var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/show?bib=https%3A%2F%2Fraw.githubusercontent.com%2FRoznn%2FEUSIPCO%2Fmain%2Feusipco2015url.bib&jsonp=1&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=https%3A%2F%2Fraw.githubusercontent.com%2FRoznn%2FEUSIPCO%2Fmain%2Feusipco2015url.bib&jsonp=1\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/show?bib=https%3A%2F%2Fraw.githubusercontent.com%2FRoznn%2FEUSIPCO%2Fmain%2Feusipco2015url.bib&jsonp=1\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2015\n \n \n (578)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Phase reconstruction of spectrograms with linear unwrapping: Application to audio signal restoration.\n \n \n \n \n\n\n \n Magron, P.; Badeau, R.; and David, B.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1-5, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"PhasePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362333,\n  author = {P. Magron and R. Badeau and B. David},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Phase reconstruction of spectrograms with linear unwrapping: Application to audio signal restoration},\n  year = {2015},\n  pages = {1-5},\n  abstract = {This paper introduces a novel technique for reconstructing the phase of modified spectrograms of audio signals. From the analysis of mixtures of sinusoids we obtain relationships between phases of successive time frames in the Time-Frequency (TF) domain. To obtain similar relationships over frequencies, in particular within onset frames, we study an impulse model. Instantaneous frequencies and attack times are estimated locally to encompass the class of non-stationary signals such as vibratos. These techniques ensure both the vertical coherence of partials (over frequencies) and the horizontal coherence (over time). The method is tested on a variety of data and demonstrates better performance than traditional consistency-based approaches. We also introduce an audio restoration framework and observe that our technique outperforms traditional methods.},\n  keywords = {audio signal processing;signal restoration;time-frequency analysis;modified spectrogram phase reconstruction;linear unwrapping;audio signal restoration;sinusoid mixture analysis;successive time frame phase;time-frequency domain;impulse model;instantaneous frequencies;attack times;horizontal coherence;vertical coherence;Frequency estimation;Time-frequency analysis;Mathematical model;Spectrogram;Coherence;Signal processing algorithms;Phase reconstruction;sinusoidal modeling;linear unwrapping;phase consistency;audio restoration},\n  doi = {10.1109/EUSIPCO.2015.7362333},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096803.pdf},\n}\n\n
\n
\n\n\n
\n This paper introduces a novel technique for reconstructing the phase of modified spectrograms of audio signals. From the analysis of mixtures of sinusoids we obtain relationships between phases of successive time frames in the Time-Frequency (TF) domain. To obtain similar relationships over frequencies, in particular within onset frames, we study an impulse model. Instantaneous frequencies and attack times are estimated locally to encompass the class of non-stationary signals such as vibratos. These techniques ensure both the vertical coherence of partials (over frequencies) and the horizontal coherence (over time). The method is tested on a variety of data and demonstrates better performance than traditional consistency-based approaches. We also introduce an audio restoration framework and observe that our technique outperforms traditional methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Refining fundamental frequency estimates using time warping.\n \n \n \n \n\n\n \n Stöter, F.; Werner, N.; Bayer, S.; and Edler, B.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 6-10, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"RefiningPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362334,\n  author = {F. Stöter and N. Werner and S. Bayer and B. Edler},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Refining fundamental frequency estimates using time warping},\n  year = {2015},\n  pages = {6-10},\n  abstract = {Algorithms for estimating the fundamental frequency (F0) of a signal vary in stability and accuracy. We propose a method which iteratively improves the estimates of such algorithms by applying in each step a time warp on the input signal based on the previously estimated fundamental frequency. This time warp is designed to lead to a nearly constant F0. A refine ment is then calculated through inverse time warping of the result of an F0 estimation applied to the warped signal. The proposed refinement algorithm is not limited to specific esti mators or optimized for specific input signal characteristics. The method is evaluated on synthetic audio signals as well as speech recordings and polyphonic music recordings. Results indicate a significant improvement on accuracy when using the proposed refinement in combination with several well-known F0 estimators.},\n  keywords = {audio signal processing;frequency estimation;speech synthesis;time warp simulation;refining fundamental frequency estimates;fundamental frequency;inverse time warping;F0 estimation;refinement algorithm;synthetic audio signals;speech recordings;polyphonic music recordings;F0 estimators;Speech;Frequency estimation;Signal processing algorithms;Estimation;Time-frequency analysis;Robustness;Fundamental frequency estimation;pitch tracking;pitch estimation;time warping},\n  doi = {10.1109/EUSIPCO.2015.7362334},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570101405.pdf},\n}\n\n
\n
\n\n\n
\n Algorithms for estimating the fundamental frequency (F0) of a signal vary in stability and accuracy. We propose a method which iteratively improves the estimates of such algorithms by applying in each step a time warp on the input signal based on the previously estimated fundamental frequency. This time warp is designed to lead to a nearly constant F0. A refine ment is then calculated through inverse time warping of the result of an F0 estimation applied to the warped signal. The proposed refinement algorithm is not limited to specific esti mators or optimized for specific input signal characteristics. The method is evaluated on synthetic audio signals as well as speech recordings and polyphonic music recordings. Results indicate a significant improvement on accuracy when using the proposed refinement in combination with several well-known F0 estimators.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Affect prediction in music using boosted ensemble of filters.\n \n \n \n \n\n\n \n Gupta, R.; Kumar, N.; and Narayanan, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 11-15, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AffectPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362335,\n  author = {R. Gupta and N. Kumar and S. Narayanan},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Affect prediction in music using boosted ensemble of filters},\n  year = {2015},\n  pages = {11-15},\n  abstract = {Music influences the affective states of its listeners. For this reason, music is extensively used in various media forms to enhance and induce emotional feeling. Automatic evaluation of affect from music can have impact on music design and can also aid further analysis of music. In this work, we present a novel scheme for affect prediction in music using a Boosted Ensemble of Single feature Filters (BESiF) model. Given a set of frame-wise features, the BESiF model predicts the affective rating as a weighted sum of filtered feature values. The BESiF model improves the Signal to Noise Ratio for arousal and valence prediction by a factor of 1.92 and 1.06, respectively, over the best baseline method. This performance is achieved using only 14 signal features for arousal (16 for valence). We further analyze the transformation of one of the features selected towards arousal prediction.},\n  keywords = {music;prediction theory;affect prediction;music;boosted ensemble;single feature filters;BESiF model;frame-wise features;filtered feature values;signal to noise ratio;arousal prediction;valence prediction;baseline method;Training;Predictive models;Mathematical model;Smoothing methods;Boosting;Multiple signal classification;Signal processing algorithms;Affect;Arousal;Valence;Emotion in music;Boosting},\n  doi = {10.1109/EUSIPCO.2015.7362335},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104885.pdf},\n}\n\n
\n
\n\n\n
\n Music influences the affective states of its listeners. For this reason, music is extensively used in various media forms to enhance and induce emotional feeling. Automatic evaluation of affect from music can have impact on music design and can also aid further analysis of music. In this work, we present a novel scheme for affect prediction in music using a Boosted Ensemble of Single feature Filters (BESiF) model. Given a set of frame-wise features, the BESiF model predicts the affective rating as a weighted sum of filtered feature values. The BESiF model improves the Signal to Noise Ratio for arousal and valence prediction by a factor of 1.92 and 1.06, respectively, over the best baseline method. This performance is achieved using only 14 signal features for arousal (16 for valence). We further analyze the transformation of one of the features selected towards arousal prediction.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multi-pitch estimation and tracking using Bayesian inference in block sparsity.\n \n \n \n \n\n\n \n Karimian-Azari, S.; Jakobsson, A.; Jensen, J. R.; and Christensen, M. G.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 16-20, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Multi-pitchPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362336,\n  author = {S. Karimian-Azari and A. Jakobsson and J. R. Jensen and M. G. Christensen},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Multi-pitch estimation and tracking using Bayesian inference in block sparsity},\n  year = {2015},\n  pages = {16-20},\n  abstract = {In this paper, we consider the problem of multi-pitch estimation and tracking of an unknown number of harmonic audio sources. The regularized least-squares is a solution for simultaneous sparse source selection and parameter estimation. Exploiting block sparsity, the method allows for reliable tracking of the found sources, without posing detailed a priori assumptions of the number of harmonics for each source. The method incorporates a Bayesian prior and assigns data-dependent reg-ularization coefficients to efficiently incorporate both earlier and future data blocks in the tracking of estimates. In comparison with fix regularization coefficients, the simulation results, using both real and synthetic audio signals, confirm the performance of the proposed method.},\n  keywords = {audio signal processing;Bayes methods;harmonics;inference mechanisms;parameter estimation;regression analysis;synthetic audio signals;data-dependent regularization coefficients;parameter estimation;simultaneous sparse source selection;regularized least-squares;harmonic audio sources;block sparsity;Bayesian inference;multipitch tracking;multipitch estimation;Estimation;Harmonic analysis;Dictionaries;Bayes methods;Frequency estimation;Signal to noise ratio;Europe;Multi-pitch estimation;tracking;harmonic signal;regularized least-squares;sparsity},\n  doi = {10.1109/EUSIPCO.2015.7362336},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102625.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we consider the problem of multi-pitch estimation and tracking of an unknown number of harmonic audio sources. The regularized least-squares is a solution for simultaneous sparse source selection and parameter estimation. Exploiting block sparsity, the method allows for reliable tracking of the found sources, without posing detailed a priori assumptions of the number of harmonics for each source. The method incorporates a Bayesian prior and assigns data-dependent reg-ularization coefficients to efficiently incorporate both earlier and future data blocks in the tracking of estimates. In comparison with fix regularization coefficients, the simulation results, using both real and synthetic audio signals, confirm the performance of the proposed method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A low-latency, real-time-capable singing voice detection method with LSTM recurrent neural networks.\n \n \n \n \n\n\n \n Lehner, B.; Widmer, G.; and Bock, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 21-25, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362337,\n  author = {B. Lehner and G. Widmer and S. Bock},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A low-latency, real-time-capable singing voice detection method with LSTM recurrent neural networks},\n  year = {2015},\n  pages = {21-25},\n  abstract = {Singing voice detection aims at identifying the regions in a music recording where at least one person sings. This is a challenging problem that cannot be solved without analysing the temporal evolution of the signal. Current state-of-the-art methods combine timbral with temporal characteristics, by summarising various feature values over time, e.g. by computing their variance. This leads to more contextual information, but also to increased latency, which is problematic if our goal is on-line, real-time singing voice detection. To overcome this problem and reduce the necessity to include context in the features themselves, we introduce a method that uses Long Short-Term Memory Recurrent Neural Networks (LSTM-RNN). In experiments on several data sets, the resulting singing voice detector outperforms the state-of-the-art baselines in terms of accuracy, while at the same time drastically reducing latency and increasing the time resolution of the detector.},\n  keywords = {feature extraction;recurrent neural nets;speech recognition;singing voice detection method;SVD;long short-term memory recurrent neural network;LSTM-RNN;Training;Context;Feature extraction;Recurrent neural networks;Europe;Signal processing;Reliability;singing voice detection;music information retrieval;recurrent neural nets},\n  doi = {10.1109/EUSIPCO.2015.7362337},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570097385.pdf},\n}\n\n
\n
\n\n\n
\n Singing voice detection aims at identifying the regions in a music recording where at least one person sings. This is a challenging problem that cannot be solved without analysing the temporal evolution of the signal. Current state-of-the-art methods combine timbral with temporal characteristics, by summarising various feature values over time, e.g. by computing their variance. This leads to more contextual information, but also to increased latency, which is problematic if our goal is on-line, real-time singing voice detection. To overcome this problem and reduce the necessity to include context in the features themselves, we introduce a method that uses Long Short-Term Memory Recurrent Neural Networks (LSTM-RNN). In experiments on several data sets, the resulting singing voice detector outperforms the state-of-the-art baselines in terms of accuracy, while at the same time drastically reducing latency and increasing the time resolution of the detector.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sparse chroma estimation for harmonic non-stationary audio.\n \n \n \n \n\n\n \n Juhlin, M.; Kronvall, T.; Sward, J.; and Jakobsson, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 26-30, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"SparsePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362338,\n  author = {M. Juhlin and T. Kronvall and J. Sward and A. Jakobsson},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Sparse chroma estimation for harmonic non-stationary audio},\n  year = {2015},\n  pages = {26-30},\n  abstract = {In this work, we extend on our recently proposed block sparse chroma estimator, such that the method also allows for signals with time-varying envelopes. Using a spline-based amplitude modulation of the chroma dictionary, the refined estimator is able to model longer frames than our earlier approach, as well as to model highly time-localized signals, and signals containing sudden bursts, such as trumpet or trombone signals, thus retaining more signal information than other methods for chroma estimation. The performance of the proposed estimator is evaluated on a recorded trumpet signal, clearly illustrating the improved performance, as compared to other used techniques.},\n  keywords = {amplitude modulation;audio signals;estimation theory;block sparse chroma estimator;harmonic nonstationary audio;time-varying envelopes;spline-based amplitude modulation;chroma dictionary;time-localized signals;sudden bursts;recorded trumpet signal;Estimation;Harmonic analysis;Splines (mathematics);Europe;Signal processing algorithms;Music;chromagram;amplitude modulation;block sparsity;convex optimization;ADMM},\n  doi = {10.1109/EUSIPCO.2015.7362338},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104561.pdf},\n}\n\n
\n
\n\n\n
\n In this work, we extend on our recently proposed block sparse chroma estimator, such that the method also allows for signals with time-varying envelopes. Using a spline-based amplitude modulation of the chroma dictionary, the refined estimator is able to model longer frames than our earlier approach, as well as to model highly time-localized signals, and signals containing sudden bursts, such as trumpet or trombone signals, thus retaining more signal information than other methods for chroma estimation. The performance of the proposed estimator is evaluated on a recorded trumpet signal, clearly illustrating the improved performance, as compared to other used techniques.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An adaptive penalty approach to multi-pitch estimation.\n \n \n \n \n\n\n \n Kronvall, T.; Elvander, F.; Adalbjörnsson, S. I.; and Jakobsson, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 31-35, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362339,\n  author = {T. Kronvall and F. Elvander and S. I. Adalbjörnsson and A. Jakobsson},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {An adaptive penalty approach to multi-pitch estimation},\n  year = {2015},\n  pages = {31-35},\n  abstract = {This work treats multi-pitch estimation, and in particular the common misclassification issue wherein the pitch at half of the true fundamental frequency, here referred to as a suboctave, is chosen instead of the true pitch. Extending on current methods which use an extension of the Group LASSO for pitch estimation, this work introduces an adaptive total variation penalty, which both enforce group- and block sparsity, and deal with errors due to sub-octaves. The method is shown to outperform current state-of-the-art sparse methods, where the model orders are unknown, while also requiring fewer tuning parameters than these. The method is also shown to outperform several conventional pitch estimation methods, even when these are virtued with oracle model orders.},\n  keywords = {signal classification;adaptive sparse penalty;adaptive total variation penalty;multipitch estimation;adaptive penalty approach;Harmonic analysis;Estimation;Tuning;Approximation methods;Europe;Frequency estimation;multi-pitch estimation;block sparsity;adaptive sparse penalty;total variation;ADMM},\n  doi = {10.1109/EUSIPCO.2015.7362339},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103397.pdf},\n}\n\n
\n
\n\n\n
\n This work treats multi-pitch estimation, and in particular the common misclassification issue wherein the pitch at half of the true fundamental frequency, here referred to as a suboctave, is chosen instead of the true pitch. Extending on current methods which use an extension of the Group LASSO for pitch estimation, this work introduces an adaptive total variation penalty, which both enforce group- and block sparsity, and deal with errors due to sub-octaves. The method is shown to outperform current state-of-the-art sparse methods, where the model orders are unknown, while also requiring fewer tuning parameters than these. The method is also shown to outperform several conventional pitch estimation methods, even when these are virtued with oracle model orders.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Pitch estimation of stereophonic mixtures of delay and amplitude panned signals.\n \n \n \n \n\n\n \n Hansen, M. W.; Jensen, J. R.; and Christensen, M. G.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 36-40, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"PitchPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362340,\n  author = {M. W. Hansen and J. R. Jensen and M. G. Christensen},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Pitch estimation of stereophonic mixtures of delay and amplitude panned signals},\n  year = {2015},\n  pages = {36-40},\n  abstract = {In this paper, a novel method for pitch estimation of stereophonic mixtures is presented, and it is investigated how the performance is affected by the pan parameters of the individual signals of the mixture. The method is based on a signal model that takes into account a stereophonic mixture created by mixing multiple individual channels with different pan parameters, and is hence suited for use in automatic music transcription, source separation and classification systems. Panning is done using both amplitude differences and delays. The performance of the estimator is compared to one single-channel, two multi-channel and one multi-pitch estimator using synthetic and real signals. Experiments show that the proposed method is able to correctly estimate the pitches of a mixture of three real signals when they are separated by more than 25 degrees.},\n  keywords = {audio signal processing;delays;pitch estimation;stereophonic mixtures;delay;amplitude panned signals;automatic music transcription;source separation;classification systems;Delays;Maximum likelihood estimation;Harmonic analysis;Europe;Channel estimation;Pitch estimation;multi-channel processing;noise reduction;maximum likelihood},\n  doi = {10.1109/EUSIPCO.2015.7362340},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104571.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, a novel method for pitch estimation of stereophonic mixtures is presented, and it is investigated how the performance is affected by the pan parameters of the individual signals of the mixture. The method is based on a signal model that takes into account a stereophonic mixture created by mixing multiple individual channels with different pan parameters, and is hence suited for use in automatic music transcription, source separation and classification systems. Panning is done using both amplitude differences and delays. The performance of the estimator is compared to one single-channel, two multi-channel and one multi-pitch estimator using synthetic and real signals. Experiments show that the proposed method is able to correctly estimate the pitches of a mixture of three real signals when they are separated by more than 25 degrees.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Discovery of repeated vocal patterns in polyphonic audio: A case study on flamenco music.\n \n \n \n \n\n\n \n Kroher, N.; Pikrakis, A.; Moreno, J.; and Díaz-Báñez, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 41-45, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"DiscoveryPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362341,\n  author = {N. Kroher and A. Pikrakis and J. Moreno and J. Díaz-Báñez},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Discovery of repeated vocal patterns in polyphonic audio: A case study on flamenco music},\n  year = {2015},\n  pages = {41-45},\n  abstract = {This paper presents a method for the discovery of repeated vocal patterns directly from music recordings. At a first stage, a voice detection algorithm provides a rough segmentation of the recording to vocal parts, based on which an estimate of the average pattern duration is computed. Then, a pattern detector which employs a sequence alignment algorithm is used to yield a ranking of pairs of matches of the detected voiced segments. At a last stage, a clustering algorithm produces the final repeated patterns. Our method was evaluated in the context of flamenco music for which symbolic metadata are very hard to produce, yielding very promising results.},\n  keywords = {audio signal processing;meta data;music;speech processing;vocal patterns;polyphonic audio;flamenco music;music recordings;voice detection algorithm;pattern detector;metadata;Signal processing algorithms;Feature extraction;Europe;Signal processing;Clustering algorithms;Algorithm design and analysis;Context;Pattern discovery;flamenco music},\n  doi = {10.1109/EUSIPCO.2015.7362341},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103063.pdf},\n}\n\n
\n
\n\n\n
\n This paper presents a method for the discovery of repeated vocal patterns directly from music recordings. At a first stage, a voice detection algorithm provides a rough segmentation of the recording to vocal parts, based on which an estimate of the average pattern duration is computed. Then, a pattern detector which employs a sequence alignment algorithm is used to yield a ranking of pairs of matches of the detected voiced segments. At a last stage, a clustering algorithm produces the final repeated patterns. Our method was evaluated in the context of flamenco music for which symbolic metadata are very hard to produce, yielding very promising results.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n 3D video watermarking using DT-DWT to resist synthesis view attack.\n \n \n \n \n\n\n \n Rana, S.; and Sur, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 46-50, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"3DPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362342,\n  author = {S. Rana and A. Sur},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {3D video watermarking using DT-DWT to resist synthesis view attack},\n  year = {2015},\n  pages = {46-50},\n  abstract = {In this paper, a 3D video watermarking scheme is proposed for depth image based rendering (DIBR) based multi view video plus depth (MVD) encoding technique. To make the scheme invariant to view synthesis process in DIBR technique, watermark is inserted in a center view which is rendered from left and right views of a 3D video frame. A low pass center view, obtained from the motion compensated temporal filtering over all the frames of a GOP, is used for embedding to reduce the temporal flickering artifacts. To make the scheme invariant to the DIBR process, 2D DT-DWT block coefficients of low-pass center view are used for embedding by exploiting its shift invariance and directional property. A comprehensive set of experiments have been carried out to justify the robustness of the proposed scheme over existing schemes with respect to compression of the 3D-HEVC video codec and synthesis view attack.},\n  keywords = {motion compensation;video coding;video watermarking;wavelet transforms;3D video watermarking;synthesis view attack;depth image based rendering;multiview video plus depth encoding technique;synthesis process;3D video frame;motion compensation;temporal filtering;2D DT-DWT block coefficients;low pass center view;3D-HEVC video codec;dual tree discrete wavelet transform;Watermarking;Three-dimensional displays;Rendering (computer graphics);Streaming media;Discrete wavelet transforms;Resists;Robustness;depth-image-based rendering (DIBR);3D high-efficient-video-coding (3D-HEVC);dual-tree discrete-wavelet-transform (DT-DWT);watermarking;synthesis view},\n  doi = {10.1109/EUSIPCO.2015.7362342},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104183.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, a 3D video watermarking scheme is proposed for depth image based rendering (DIBR) based multi view video plus depth (MVD) encoding technique. To make the scheme invariant to view synthesis process in DIBR technique, watermark is inserted in a center view which is rendered from left and right views of a 3D video frame. A low pass center view, obtained from the motion compensated temporal filtering over all the frames of a GOP, is used for embedding to reduce the temporal flickering artifacts. To make the scheme invariant to the DIBR process, 2D DT-DWT block coefficients of low-pass center view are used for embedding by exploiting its shift invariance and directional property. A comprehensive set of experiments have been carried out to justify the robustness of the proposed scheme over existing schemes with respect to compression of the 3D-HEVC video codec and synthesis view attack.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A QR-code based audio watermarking technique for tracing traitors.\n \n \n \n \n\n\n \n Chaabane, F.; Charfeddine, M.; Puech, W.; and Ben Amaf, C.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 51-55, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362343,\n  author = {F. Chaabane and M. Charfeddine and W. Puech and C. {Ben Amaf}},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A QR-code based audio watermarking technique for tracing traitors},\n  year = {2015},\n  pages = {51-55},\n  abstract = {Handling a great number of users and surviving different types of attacks present fundamental challenges of the majority fingerprinting systems in the tracing traitor field. In this paper, the proposed technique consists in embedding a fingerprint, a QR code in the audio stream extracted from the media release. Using the QR-code provides several advantages as supporting a large amount of information in a compact format end damage resiliency. This paper proposes to encode the identifier which is a parallel concatenation of two tracing codes: Boneh Shaw and Tardos codes into QR-code. The proposed approach should not only improve the two-stage tracing strategy by reducing the complexity computation, but also enhance the secure side of the proposed technique by the preprocessing treatment before generating the QR-code.},\n  keywords = {audio streaming;audio watermarking;computational complexity;QR codes;Quick Response codes;QR code;audio watermarking technique;fingerprinting systems;tracing traitor field;audio stream;tracing codes;Boneh Shaw codes;Tardos codes;two-stage tracing strategy;complexity computation;preprocessing treatment;Watermarking;Robustness;Media;Europe;Signal processing;Multimedia communication;Streaming media;QR-code;tracing traitors;Boneh Shaw;Tardos;preprocessing},\n  doi = {10.1109/EUSIPCO.2015.7362343},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103501.pdf},\n}\n\n
\n
\n\n\n
\n Handling a great number of users and surviving different types of attacks present fundamental challenges of the majority fingerprinting systems in the tracing traitor field. In this paper, the proposed technique consists in embedding a fingerprint, a QR code in the audio stream extracted from the media release. Using the QR-code provides several advantages as supporting a large amount of information in a compact format end damage resiliency. This paper proposes to encode the identifier which is a parallel concatenation of two tracing codes: Boneh Shaw and Tardos codes into QR-code. The proposed approach should not only improve the two-stage tracing strategy by reducing the complexity computation, but also enhance the secure side of the proposed technique by the preprocessing treatment before generating the QR-code.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Horizontal pairwise reversible watermarking.\n \n \n \n \n\n\n \n Dragoi, I.; Coltuc, D.; and Caciula, I.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 56-60, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"HorizontalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362344,\n  author = {I. Dragoi and D. Coltuc and I. Caciula},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Horizontal pairwise reversible watermarking},\n  year = {2015},\n  pages = {56-60},\n  abstract = {The best results for very low embedding capacity reported so far are provided by a pairwise reversible watermarking scheme that uses paired embedding with rhombus prediction and context smoothness control. Since the horizontal (or vertical) adjacent pixels are more correlated than the diagonal ones, this paper proposes such a pairing for reversible watermarking. The rhombus prediction should be modified in order to match the horizontal/vertical pairing. An improved measure of the context smoothness is also introduced. The proposed horizontal pairing based reversible watermarking scheme outperforms the state of the art diagonal pairing one. Experimental results for standard graylevel test images are provided.},\n  keywords = {image watermarking;prediction theory;horizontal pairwise reversible watermarking;very low embedding capacity;rhombus prediction;context smoothness control;horizontal adjacent pixels;horizontal pairing;vertical pairing;standard graylevel test images;Watermarking;Complexity theory;Histograms;Context;Signal processing algorithms;Prediction algorithms;Europe;pairwise reversible watermarking;histogram shifting;prediction-error expansion},\n  doi = {10.1109/EUSIPCO.2015.7362344},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104161.pdf},\n}\n\n
\n
\n\n\n
\n The best results for very low embedding capacity reported so far are provided by a pairwise reversible watermarking scheme that uses paired embedding with rhombus prediction and context smoothness control. Since the horizontal (or vertical) adjacent pixels are more correlated than the diagonal ones, this paper proposes such a pairing for reversible watermarking. The rhombus prediction should be modified in order to match the horizontal/vertical pairing. An improved measure of the context smoothness is also introduced. The proposed horizontal pairing based reversible watermarking scheme outperforms the state of the art diagonal pairing one. Experimental results for standard graylevel test images are provided.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Considerations on the benchmarking of media forensics.\n \n \n \n \n\n\n \n Kraetzer, C.; and Dittmann, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 61-65, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ConsiderationsPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362345,\n  author = {C. Kraetzer and J. Dittmann},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Considerations on the benchmarking of media forensics},\n  year = {2015},\n  pages = {61-65},\n  abstract = {In media forensics, most of the approaches are still far away from being mature enough for admission in court - the ultimate benchmark for any forensic method. The intended contribution of this paper is to facilitate the understanding between the researchers active in this field and the society perspective on media forensics. The intended benefit of this is that it allows these two to better understand each other: On one hand, showing researchers working on the development of forensic methods a precise picture of the compliance requirements installed by the society (here, requirements for potential court admissibil-ity); on the other hand, intending to help non-technicians to understand the challenges that researchers face. To achieve this contribution, a selected prominent example (the photo response non-uniformity (PRNU) based digital camera forensics) is discussed here briefly in the context of the established Daubert criteria as reference imposed as admissibility threshold for legal proceedings.},\n  keywords = {digital forensics;law administration;admissibility threshold;legal proceeding;Daubert criteria;compliance precise picture;media forensics benchmarking;Forensics;Media;Tires;Reliability;Fingerprint recognition;Benchmarking;media forensics;court admissibility;Daubert criteria},\n  doi = {10.1109/EUSIPCO.2015.7362345},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104167.pdf},\n}\n\n
\n
\n\n\n
\n In media forensics, most of the approaches are still far away from being mature enough for admission in court - the ultimate benchmark for any forensic method. The intended contribution of this paper is to facilitate the understanding between the researchers active in this field and the society perspective on media forensics. The intended benefit of this is that it allows these two to better understand each other: On one hand, showing researchers working on the development of forensic methods a precise picture of the compliance requirements installed by the society (here, requirements for potential court admissibil-ity); on the other hand, intending to help non-technicians to understand the challenges that researchers face. To achieve this contribution, a selected prominent example (the photo response non-uniformity (PRNU) based digital camera forensics) is discussed here briefly in the context of the established Daubert criteria as reference imposed as admissibility threshold for legal proceedings.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On packing laser scanning microscopy images by reversible watermarking: A case study.\n \n \n \n \n\n\n \n Dragoi, I.; Stanciu, S. G.; Coltuc, D.; Tranca, D. E.; Hristu, R.; and Stanciu, G. A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 66-70, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362346,\n  author = {I. Dragoi and S. G. Stanciu and D. Coltuc and D. E. Tranca and R. Hristu and G. A. Stanciu},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {On packing laser scanning microscopy images by reversible watermarking: A case study},\n  year = {2015},\n  pages = {66-70},\n  abstract = {Pairs of images of the same fleld-of-view collected by two complementary Laser Scanning Microscopy (LSM) work-modes are packed together by embedding one image of the pair into the other by reversible watermarking. One image remains visible and the other can be easily extracted at no extra transmission bandwidth cost. The packing keeps together the data without any risk of confusion. The use of reversible watermarking ensures, if necessary, the recovery of the host at zero distortion. Experimental results for Confocal Laser Scanning Microscopy (CLSM) and Transmission Laser Scanning Microscopy (TLSM) image pairs are provided.},\n  keywords = {image watermarking;microscopy;packing laser scanning microscopy images;reversible watermarking;transmission bandwidth cost;confocal laser scanning microscopy;transmission laser scanning microscopy;Watermarking;Microscopy;Image coding;Distortion;Europe;Context;LSM;CLSM;TLSM;reversible watermarking;prediction-error expansion},\n  doi = {10.1109/EUSIPCO.2015.7362346},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104225.pdf},\n}\n\n
\n
\n\n\n
\n Pairs of images of the same fleld-of-view collected by two complementary Laser Scanning Microscopy (LSM) work-modes are packed together by embedding one image of the pair into the other by reversible watermarking. One image remains visible and the other can be easily extracted at no extra transmission bandwidth cost. The packing keeps together the data without any risk of confusion. The use of reversible watermarking ensures, if necessary, the recovery of the host at zero distortion. Experimental results for Confocal Laser Scanning Microscopy (CLSM) and Transmission Laser Scanning Microscopy (TLSM) image pairs are provided.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Digital image self-recovery using unequal error protection.\n \n \n \n \n\n\n \n Sarreshtedari, S.; Akhaee, M. A.; and Abbasfar, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 71-75, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"DigitalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362347,\n  author = {S. Sarreshtedari and M. A. Akhaee and A. Abbasfar},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Digital image self-recovery using unequal error protection},\n  year = {2015},\n  pages = {71-75},\n  abstract = {In this paper, an unequal error protection (UEP)-based scheme is presented to generate tamper-proof images, in which the lost content of the original image is recoverable despite the malicious tampering. For this purpose, a representation of the original image is embedded into itself, after being protected by the proper channel coding. Since better protection is considered for the more important bits of the image representation through a dynamic programming (DP) optimization scheme, they survive higher tampering rates than the less important image information. As a result, the quality of the restored image degrades with respect to the true tampering rate.},\n  keywords = {channel coding;dynamic programming;image coding;image representation;image restoration;digital image self-recovery;unequal error protection;tampering rate;image restoration quality;image information;tampering rates;DP optimization scheme;dynamic programming optimization scheme;channel coding;image representation;tamper-proof images;UEP-based scheme;Error correction codes;Optimization;Channel coding;Watermarking;Digital images;Image restoration;Europe;Digital image self-recovery;Tamper-proof images;Unequal error protection (UEP)},\n  doi = {10.1109/EUSIPCO.2015.7362347},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104587.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, an unequal error protection (UEP)-based scheme is presented to generate tamper-proof images, in which the lost content of the original image is recoverable despite the malicious tampering. For this purpose, a representation of the original image is embedded into itself, after being protected by the proper channel coding. Since better protection is considered for the more important bits of the image representation through a dynamic programming (DP) optimization scheme, they survive higher tampering rates than the less important image information. As a result, the quality of the restored image degrades with respect to the true tampering rate.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Eigen-patch iris super-resolution for iris recognition improvement.\n \n \n \n \n\n\n \n Alonso-Fernandez, F.; Farrugia, R. A.; and Bigun, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 76-80, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Eigen-patchPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362348,\n  author = {F. Alonso-Fernandez and R. A. Farrugia and J. Bigun},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Eigen-patch iris super-resolution for iris recognition improvement},\n  year = {2015},\n  pages = {76-80},\n  abstract = {Low image resolution will be a predominant factor in iris recognition systems as they evolve towards more relaxed acquisition conditions. Here, we propose a super-resolution technique to enhance iris images based on Principal Component Analysis (PCA) Eigen-transformation of local image patches. Each patch is reconstructed separately, allowing better quality of enhanced images by preserving local information and reducing artifacts. We validate the system used a database of 1,872 near-infrared iris images. Results show the superiority of the presented approach over bilinear or bicubic interpolation, with the eigen-patch method being more resilient to image resolution reduction. We also perform recognition experiments with an iris matcher based 1D Log-Gabor, demonstrating that verification rates degrades more rapidly with bilinear or bicubic interpolation.},\n  keywords = {image resolution;iris recognition;principal component analysis;eigen-patch iris super-resolution;image resolution;iris recognition systems;principal component analysis;PCA Eigen-transformation;near-infrared iris images;bicubic interpolation;bilinear interpolation;image resolution reduction;1D log-gabor;bicubic interpolation;Iris recognition;Image resolution;Image reconstruction;Databases;Iris;Yttrium;Training;Iris hallucination;iris recognition;eigenpatch;super-resolution;Principal Component Analysis},\n  doi = {10.1109/EUSIPCO.2015.7362348},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104831.pdf},\n}\n\n
\n
\n\n\n
\n Low image resolution will be a predominant factor in iris recognition systems as they evolve towards more relaxed acquisition conditions. Here, we propose a super-resolution technique to enhance iris images based on Principal Component Analysis (PCA) Eigen-transformation of local image patches. Each patch is reconstructed separately, allowing better quality of enhanced images by preserving local information and reducing artifacts. We validate the system used a database of 1,872 near-infrared iris images. Results show the superiority of the presented approach over bilinear or bicubic interpolation, with the eigen-patch method being more resilient to image resolution reduction. We also perform recognition experiments with an iris matcher based 1D Log-Gabor, demonstrating that verification rates degrades more rapidly with bilinear or bicubic interpolation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n ELPA: A new key agreement scheme based on linear prediction of ECG features for WBAN.\n \n \n \n \n\n\n \n Zaghouani, E. K.; Jemai, A.; Benzina, A.; and Attia, R.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 81-85, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ELPA:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362349,\n  author = {E. K. Zaghouani and A. Jemai and A. Benzina and R. Attia},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {ELPA: A new key agreement scheme based on linear prediction of ECG features for WBAN},\n  year = {2015},\n  pages = {81-85},\n  abstract = {In this paper, we propose a novel key agreement scheme called ECG Linear Prediction key Agreement (ELPA) with the properties of plug-n-play and transparency to secure inter-sensor communication in Wireless Body Area Networks (WBANs). ELPA is a new physiological based key agreement scheme allowing two nodes belonging to the same WBAN to agree on a symmetric key from ECG signal features. The paper introduces the use of Linear Prediction Coding (LPC), which has always been used for a compression purpose, in hiding the cryptographic key. In fact we prove that concealing the symmetric key using this tool ensures high security level while keeping low computational complexity and communication overhead compared with the state of the art.},\n  keywords = {body area networks;cryptography;electrocardiography;linear predictive coding;cryptographic key;LPC;linear prediction coding;symmetric key;physiological based key agreement scheme;WBAN;wireless body area networks;inter-sensor communication;plug-n-play;ELPA;ECG linear prediction key agreement;Electrocardiography;Wireless communication;Receivers;Body area networks;Feature extraction;Discrete cosine transforms;Encoding;WBAN;communication security;ECG features;LPC;communication overhead},\n  doi = {10.1109/EUSIPCO.2015.7362349},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105063.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a novel key agreement scheme called ECG Linear Prediction key Agreement (ELPA) with the properties of plug-n-play and transparency to secure inter-sensor communication in Wireless Body Area Networks (WBANs). ELPA is a new physiological based key agreement scheme allowing two nodes belonging to the same WBAN to agree on a symmetric key from ECG signal features. The paper introduces the use of Linear Prediction Coding (LPC), which has always been used for a compression purpose, in hiding the cryptographic key. In fact we prove that concealing the symmetric key using this tool ensures high security level while keeping low computational complexity and communication overhead compared with the state of the art.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Relay selection for optimized cooperative jamming scheme.\n \n \n \n \n\n\n \n Mabrouk, A.; Tourki, K.; and Hamdi, N.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 86-90, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"RelayPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362350,\n  author = {A. Mabrouk and K. Tourki and N. Hamdi},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Relay selection for optimized cooperative jamming scheme},\n  year = {2015},\n  pages = {86-90},\n  abstract = {In this paper, we study the problem of secure dual-hop transmission in the presence of an eavesdropper, where a secrecy-enhanced relay selection as well as a destination cooperation are presented to prevent the source information from being eavesdropped. Taking into account the total power budget, a power allocation scheme is investigated to optimize the destination contribution. We present the system performance in terms of secrecy capacity where we derive a closed form expression for its lower bound. Simulation results reveal that a higher power allocation to the jamming signal should be balanced by a closer placement of the relay to the source to get better system performance, and vice versa.},\n  keywords = {cooperative communication;jamming;optimisation;relay networks (telecommunication);telecommunication power management;telecommunication security;optimized cooperative jamming scheme;secure dual-hop transmission;secrecy enhanced relay selection;destination cooperation;total power budget;power allocation;closed form expression;relay placement;Relays;Jamming;Resource management;Security;Signal to noise ratio;Europe},\n  doi = {10.1109/EUSIPCO.2015.7362350},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104455.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we study the problem of secure dual-hop transmission in the presence of an eavesdropper, where a secrecy-enhanced relay selection as well as a destination cooperation are presented to prevent the source information from being eavesdropped. Taking into account the total power budget, a power allocation scheme is investigated to optimize the destination contribution. We present the system performance in terms of secrecy capacity where we derive a closed form expression for its lower bound. Simulation results reveal that a higher power allocation to the jamming signal should be balanced by a closer placement of the relay to the source to get better system performance, and vice versa.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Managing trust in diffusion adaptive networks with malicious agents.\n \n \n \n \n\n\n \n Ntemos, K.; Kalouptsidis, N.; and Kolokotronis, N.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 91-95, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ManagingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362351,\n  author = {K. Ntemos and N. Kalouptsidis and N. Kolokotronis},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Managing trust in diffusion adaptive networks with malicious agents},\n  year = {2015},\n  pages = {91-95},\n  abstract = {In this paper, we consider the problem of information sharing over adaptive networks, where a diffusion strategy is used to estimate a common parameter. We introduce a new model that takes into account the presence of both selfish and malicious intelligent agents that adjust their behavior to maximize their own benefits. The interactions among agents are modeled as a stochastic game with incomplete information and partially observable actions. To stimulate cooperation amongst selfish agents and thwart malicious behavior, a trust management system relying on a voting scheme is employed. Agents act as independent learners, using the Q-learning algorithm. The simulation results illustrate the severe impact of falsified information on estimation accuracy along with the noticeable improvements gained by stimulating cooperation and truth-telling, with the proposed trust management mechanism.},\n  keywords = {learning (artificial intelligence);multi-agent systems;radio networks;stochastic games;telecommunication security;trusted computing;diffusion adaptive wireless network;information sharing;malicious intelligent agent;stochastic game;trust management system;voting scheme;Q-learning algorithm;Estimation;Europe;Games;Sensors;Nickel;Signal processing;Adaptive systems;Trust management;multi-agent systems;independent learning;adaptive networks;voting schemes},\n  doi = {10.1109/EUSIPCO.2015.7362351},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105261.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we consider the problem of information sharing over adaptive networks, where a diffusion strategy is used to estimate a common parameter. We introduce a new model that takes into account the presence of both selfish and malicious intelligent agents that adjust their behavior to maximize their own benefits. The interactions among agents are modeled as a stochastic game with incomplete information and partially observable actions. To stimulate cooperation amongst selfish agents and thwart malicious behavior, a trust management system relying on a voting scheme is employed. Agents act as independent learners, using the Q-learning algorithm. The simulation results illustrate the severe impact of falsified information on estimation accuracy along with the noticeable improvements gained by stimulating cooperation and truth-telling, with the proposed trust management mechanism.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Block prediction using approximate template matching.\n \n \n \n \n\n\n \n Zepeda, J.; Türkan, M.; and Thoreau, D.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 96-100, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"BlockPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362352,\n  author = {J. Zepeda and M. Türkan and D. Thoreau},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Block prediction using approximate template matching},\n  year = {2015},\n  pages = {96-100},\n  abstract = {Template matching methods have been shown to offer bit-rate savings of up to 15% when used for in-loop prediction in compression. Yet the required nearest-template search process results in prohibitive complexity. Hence, in this paper we use approximate nearest neighbor search methods to successfully address this drawback of template matching methods. Our approach uses a template index that is updated during the decoding process, yet the incurred overhead pays off in reduced nearest-template search complexity, resulting in a significant gain in template search complexity. Rate-distortion experiments further indicate that there is no rate-distortion penalty resulting from our proposed approximate template search method, and in fact a small gain of 0.1 dB is observed.},\n  keywords = {approximation theory;image matching;prediction theory;rate distortion theory;search problems;block prediction;rate-distortion experiments;reduced nearest-template search complexity;decoding process;template index;approximate nearest neighbor search methods;prohibitive complexity;nearest-template search process results;in-loop prediction;bit-rate savings;template matching methods;Complexity theory;Decoding;Standards;Encoding;Europe;Signal processing;Nearest neighbor searches;Template matching;intra-coding;image compression;approximate nearest neighbor;indexing},\n  doi = {10.1109/EUSIPCO.2015.7362352},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104777.pdf},\n}\n\n
\n
\n\n\n
\n Template matching methods have been shown to offer bit-rate savings of up to 15% when used for in-loop prediction in compression. Yet the required nearest-template search process results in prohibitive complexity. Hence, in this paper we use approximate nearest neighbor search methods to successfully address this drawback of template matching methods. Our approach uses a template index that is updated during the decoding process, yet the incurred overhead pays off in reduced nearest-template search complexity, resulting in a significant gain in template search complexity. Rate-distortion experiments further indicate that there is no rate-distortion penalty resulting from our proposed approximate template search method, and in fact a small gain of 0.1 dB is observed.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Integral images compression scheme based on view extraction.\n \n \n \n \n\n\n \n Dricot, A.; Jung, J.; Cagnazzo, M.; Pesquet, B.; and Dufaux, F.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 101-105, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"IntegralPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362353,\n  author = {A. Dricot and J. Jung and M. Cagnazzo and B. Pesquet and F. Dufaux},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Integral images compression scheme based on view extraction},\n  year = {2015},\n  pages = {101-105},\n  abstract = {Integral imaging is a glasses-free 3D video technology that captures a light-field representation of a scene. This representation eliminates many of the limitations of current stereoscopic and autostereoscopic techniques. However, integral images have a large resolution and a structure based on microimages which is challenging to encode. In this paper a compression scheme for integral images based on view extraction is proposed. Average BD-rate gains of 15.7% and up to 31.3% are reported over HEVC. Parameters of the proposed coding scheme can take a large range of values. Results are first provided with an exhaustive search of the best configuration. Then an RD criterion is proposed to avoid exhaustive search methods, saving runtime while preserving the gains. Finally, additional runtime savings are reported by exploring how the different parameters interact.},\n  keywords = {data compression;image representation;stereo image processing;video coding;view extraction;integral images compression scheme;glasses-free 3D video technology;light-field scene representation;autostereoscopic techniques;HEVC coding scheme;Image coding;Image reconstruction;Encoding;Imaging;Three-dimensional displays;Image resolution;Bit rate;Integral Imaging;Plenoptic Imaging;Holoscopy;Image and Video Coding;View Extraction},\n  doi = {10.1109/EUSIPCO.2015.7362353},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104423.pdf},\n}\n\n
\n
\n\n\n
\n Integral imaging is a glasses-free 3D video technology that captures a light-field representation of a scene. This representation eliminates many of the limitations of current stereoscopic and autostereoscopic techniques. However, integral images have a large resolution and a structure based on microimages which is challenging to encode. In this paper a compression scheme for integral images based on view extraction is proposed. Average BD-rate gains of 15.7% and up to 31.3% are reported over HEVC. Parameters of the proposed coding scheme can take a large range of values. Results are first provided with an exhaustive search of the best configuration. Then an RD criterion is proposed to avoid exhaustive search methods, saving runtime while preserving the gains. Finally, additional runtime savings are reported by exploring how the different parameters interact.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A technique for fake 3D (2D-to-3D converted) video recognition.\n \n \n \n \n\n\n \n Kakaletsis, E.; and Nikolaidis, N.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 106-109, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362354,\n  author = {E. Kakaletsis and N. Nikolaidis},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A technique for fake 3D (2D-to-3D converted) video recognition},\n  year = {2015},\n  pages = {106-109},\n  abstract = {In this paper, we propose a technique for the automatic recognition of {"}fake{"} stereoscopic videos/movies i.e., videos which result from classic 2D videos through a 2D to 3D conversion process. Essentially, the proposed technique distinguishes between 2D movies converted to 3D and real stereoscopic ones. It is based on the difference in sharpness around foreground objects in a converted stereo frame pair caused from the in-painting step that takes place after the generation of the right frame (rendered view) from the left frame (source view). The two variants of the algorithm, one utilizing a two-class Support Vector Machine and another one that follows a threshold based classification approach, use a sharpness metric evaluated on a stripe created around foreground objects such as human figures. Experimental evaluation of the proposed algorithm, which can serve as 3D quality characterization tool, is conducted on several stereoscopic movies with very promising results.},\n  keywords = {stereo image processing;support vector machines;3D video recognition;stereoscopic videos/movies;2D to 3D conversion;2D movies;support vector machine;Three-dimensional displays;Motion pictures;Signal processing algorithms;Measurement;Stereo image processing;Support vector machines;Estimation;3DTV;3D cinema;stereoscopic video;Real 3D video;Fake 3D video;2D to 3D Video Conversion;quality assessment},\n  doi = {10.1109/EUSIPCO.2015.7362354},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102189.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a technique for the automatic recognition of \"fake\" stereoscopic videos/movies i.e., videos which result from classic 2D videos through a 2D to 3D conversion process. Essentially, the proposed technique distinguishes between 2D movies converted to 3D and real stereoscopic ones. It is based on the difference in sharpness around foreground objects in a converted stereo frame pair caused from the in-painting step that takes place after the generation of the right frame (rendered view) from the left frame (source view). The two variants of the algorithm, one utilizing a two-class Support Vector Machine and another one that follows a threshold based classification approach, use a sharpness metric evaluated on a stripe created around foreground objects such as human figures. Experimental evaluation of the proposed algorithm, which can serve as 3D quality characterization tool, is conducted on several stereoscopic movies with very promising results.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Rate-distortion optimised quantisation for HEVC using spatial just noticeable distortion.\n \n \n \n \n\n\n \n Dias, A. S.; Siekmann, M.; Bosse, S.; Schwarz, H.; Marpe, D.; and Mrak, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 110-114, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Rate-distortionPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362355,\n  author = {A. S. Dias and M. Siekmann and S. Bosse and H. Schwarz and D. Marpe and M. Mrak},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Rate-distortion optimised quantisation for HEVC using spatial just noticeable distortion},\n  year = {2015},\n  pages = {110-114},\n  abstract = {Due to the higher requirements associated with Ultra High Definition (UHD) resolutions in terms of memory and transmission bandwidth, the feasibility of UHD video communication applications is strongly dependent on the performance of video compression solutions. Even though the High Efficiency Video Coding (HEVC) standard allows significantly superior rate-distortion performances compared to previous video coding standards, further performance improvements are possible when exploiting the perceptual properties of the Human Visual System (HVS). This paper proposes a novel perceptual-based solution fully compliant with the HEVC standard, where a low complexity Just Noticeable Distortion model is used to drive the encoder's rate-distortion optimised quantisation process. This technique allows a simple and effective way to influence the decisions made at the encoder, based on the limitations of the HVS. The experiments conducted for UHD resolutions show average bitrate savings of 21% with no visual quality degradations when compared to the HEVC reference software.},\n  keywords = {distortion;high definition video;optimisation;quantisation (signal);video codecs;video coding;visual perception;HEVC reference software;encoder rate-distortion optimised quantisation process;just noticeable distortion model;HVS;human visual system;perceptual properties;video coding standards;HEVC standard;high efficiency video coding;video compression;UHD video communication applications;transmission bandwidth;UHD resolutions;ultra high definition resolutions;spatial just noticeable distortion;Europe;Signal processing;5G mobile communication;Conferences;Just Noticeable Distortion;Rate-Distortion Optimised Quantisation;Perceptual Video Compression;HEVC;UHD},\n  doi = {10.1109/EUSIPCO.2015.7362355},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104417.pdf},\n}\n\n
\n
\n\n\n
\n Due to the higher requirements associated with Ultra High Definition (UHD) resolutions in terms of memory and transmission bandwidth, the feasibility of UHD video communication applications is strongly dependent on the performance of video compression solutions. Even though the High Efficiency Video Coding (HEVC) standard allows significantly superior rate-distortion performances compared to previous video coding standards, further performance improvements are possible when exploiting the perceptual properties of the Human Visual System (HVS). This paper proposes a novel perceptual-based solution fully compliant with the HEVC standard, where a low complexity Just Noticeable Distortion model is used to drive the encoder's rate-distortion optimised quantisation process. This technique allows a simple and effective way to influence the decisions made at the encoder, based on the limitations of the HVS. The experiments conducted for UHD resolutions show average bitrate savings of 21% with no visual quality degradations when compared to the HEVC reference software.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Perceptually-friendly rate distortion optimization in high efficiency video coding.\n \n \n \n \n\n\n \n Valizadeh, S.; Nasiopoulos, P.; and Ward, R.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 115-119, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Perceptually-friendlyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362356,\n  author = {S. Valizadeh and P. Nasiopoulos and R. Ward},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Perceptually-friendly rate distortion optimization in high efficiency video coding},\n  year = {2015},\n  pages = {115-119},\n  abstract = {We propose the employment of a perceptual video quality metric in measuring the distortion in the High Efficiency Video Coding (HEVC) Standard. The mean square error presently used as quality metric is not a good measure to use, as it poorly correlates with human perception. Integration of a video quality metric based on the characteristics of the Human Visual System (HVS) inside the rate distortion optimization procedure is expected to improve the compression efficiency of the video coding. In this paper, the PSNR-HVS measure is used in the rate distortion optimization process. The compression efficiency of the proposed approach is compared to that used by HEVC, the recent video coding standard. Simulations prove that the proposed approach yields higher compression efficiency and provides better visual quality.},\n  keywords = {data compression;mean square error methods;optimisation;rate distortion theory;video coding;perceptually-friendly rate distortion optimization;perceptual video quality metric;distortion measurement;HEVC standard;high efficiency video coding standard;mean square error;human perception;human visual system;video compression efficiency;PSNR-HVS;rate distortion optimization process;Bit rate;Video recording;Quality assessment;Rate-distortion;Optimization;Distortion measurement;Perceptual video coding;rate distortion optimization (RDO);human visual system (HVS);PSNR-HVS;high efficiency video coding (HEVC)},\n  doi = {10.1109/EUSIPCO.2015.7362356},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105165.pdf},\n}\n\n
\n
\n\n\n
\n We propose the employment of a perceptual video quality metric in measuring the distortion in the High Efficiency Video Coding (HEVC) Standard. The mean square error presently used as quality metric is not a good measure to use, as it poorly correlates with human perception. Integration of a video quality metric based on the characteristics of the Human Visual System (HVS) inside the rate distortion optimization procedure is expected to improve the compression efficiency of the video coding. In this paper, the PSNR-HVS measure is used in the rate distortion optimization process. The compression efficiency of the proposed approach is compared to that used by HEVC, the recent video coding standard. Simulations prove that the proposed approach yields higher compression efficiency and provides better visual quality.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Speaker emotional state classification by DPM models with annealed SMC samplers.\n \n \n \n \n\n\n \n Gunsel, B.; Cirakman, O.; and Krajewski, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 120-124, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"SpeakerPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362357,\n  author = {B. Gunsel and O. Cirakman and J. Krajewski},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Speaker emotional state classification by DPM models with annealed SMC samplers},\n  year = {2015},\n  pages = {120-124},\n  abstract = {We propose a speaker emotional state classification method that employs inference-based Bayesian networks to learn posterior density of emotional speech sequentially. We aim to alleviate difficulty in detecting medium-term states where the required monitoring time is longer compared to short-term emotional states that makes temporal content representation harder. Our inference algorithm takes advantage of the Sequential Monte Carlo (SMC) sampling and recursively approximates the Dirichlet Process Mixtures (DPM) model of the speaker state class density with unknown number of components. After learning the target posterior, classification of speaker states has been performed by a simple minimum distance classifier. Test results obtained on two different datasets demonstrate the proposed method highly reduces the training data length while providing comparable accuracy compared to the existing state-of-the-art techniques.},\n  keywords = {Bayes methods;emotion recognition;inference mechanisms;mixture models;Monte Carlo methods;signal classification;signal representation;signal sampling;speaker recognition;training data reduction;Dirichlet process mixture model;SMC sampling;sequential Monte Carlo sampling;temporal content representation;emotional speech sequentially posterior density;inference-based Bayesian network;annealed SMC sampler;DPM model;speaker emotional state classification;Decision support systems;Europe;Signal processing;Conferences;Yttrium;Graphical models;emotion classification;Dirichlet Process Mixtures model;perceptual audio features;HCI},\n  doi = {10.1109/EUSIPCO.2015.7362357},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103569.pdf},\n}\n\n
\n
\n\n\n
\n We propose a speaker emotional state classification method that employs inference-based Bayesian networks to learn posterior density of emotional speech sequentially. We aim to alleviate difficulty in detecting medium-term states where the required monitoring time is longer compared to short-term emotional states that makes temporal content representation harder. Our inference algorithm takes advantage of the Sequential Monte Carlo (SMC) sampling and recursively approximates the Dirichlet Process Mixtures (DPM) model of the speaker state class density with unknown number of components. After learning the target posterior, classification of speaker states has been performed by a simple minimum distance classifier. Test results obtained on two different datasets demonstrate the proposed method highly reduces the training data length while providing comparable accuracy compared to the existing state-of-the-art techniques.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Deep neural networks for audio scene recognition.\n \n \n \n \n\n\n \n Petetin, Y.; Laroche, C.; and Mayoue, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 125-129, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"DeepPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362358,\n  author = {Y. Petetin and C. Laroche and A. Mayoue},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Deep neural networks for audio scene recognition},\n  year = {2015},\n  pages = {125-129},\n  abstract = {These last years, artificial neural networks (ANN) have known a renewed interest since efficient training procedures have emerged to learn the so called deep neural networks (DNN), i.e. ANN with at least two hidden layers. In the same time, the computational auditory scene recognition (CASR) problem which consists in estimating the environment around a device from the received audio signal has been investigated. Most of works which deal with the CASR problem have tried to ind well-adapted features for this problem. However, these features are generally combined with a classical classi-ier. In this paper, we introduce DNN in the CASR ield and we show that such networks can provide promising results and perform better than standard classiiers when the same features are used.},\n  keywords = {audio signal processing;belief networks;learning (artificial intelligence);neural nets;signal classification;deep neural networks;computational auditory scene recognition problem;artificial neural networks;ANN;training procedures;CASR problem;deep belief networks;Artificial neural networks;Training;Mel frequency cepstral coefficient;Context;Europe;Signal processing;Deep neural networks;deep belief networks;audio scene recognition},\n  doi = {10.1109/EUSIPCO.2015.7362358},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103783.pdf},\n}\n\n
\n
\n\n\n
\n These last years, artificial neural networks (ANN) have known a renewed interest since efficient training procedures have emerged to learn the so called deep neural networks (DNN), i.e. ANN with at least two hidden layers. In the same time, the computational auditory scene recognition (CASR) problem which consists in estimating the environment around a device from the received audio signal has been investigated. Most of works which deal with the CASR problem have tried to ind well-adapted features for this problem. However, these features are generally combined with a classical classi-ier. In this paper, we introduce DNN in the CASR ield and we show that such networks can provide promising results and perform better than standard classiiers when the same features are used.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Design of optimal matrices for compressive sensing: Application to environmental sounds.\n \n \n \n \n\n\n \n Bouchhima, B.; Amara, R.; and Alouane, M. T.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 130-134, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"DesignPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362359,\n  author = {B. Bouchhima and R. Amara and M. T. Alouane},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Design of optimal matrices for compressive sensing: Application to environmental sounds},\n  year = {2015},\n  pages = {130-134},\n  abstract = {In a compressive sensing context, we propose a solution for a full learning of the dictionary composed of the sparsity basis and the measurement matrix. The sparsity basis learning process is achieved using Empirical Mode Decomposition (EMD) and Hilbert transformation. EMD being a data-driven decomposition method, the resulting sparsity basis shows high sparsifying capacities. On the other hand, a gradient method is applied for the design of the measurement matrix. The method integrates the dictionary normalization into the target function. It is shown to support large scale problems and to have a good convergence and high performance. The evaluation of the whole approach is done on a set of environmental sounds, and is based on a couple of key criteria: sparsity degree and incoherence. Experimental results demonstrate that our approach achieves well with regards to mutual coherence reduction and signal reconstruction at low sparsity degrees.},\n  keywords = {compressed sensing;Hilbert transforms;matrix algebra;signal reconstruction;dictionary learning;signal reconstruction;dictionary normalization;data-driven decomposition;Hilbert transformation;EMD;empirical mode decomposition;sparsity basis learning process;measurement matrix;environmental sounds;compressive sensing;optimal matrices design;Signal processing algorithms;Coherence;Dictionaries;Convergence;Compressed sensing;Gradient methods;Europe;Compressive Sensing;EMD;Environmental Sounds;Sparsity;Measurement Matrix;Incoherence},\n  doi = {10.1109/EUSIPCO.2015.7362359},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104363.pdf},\n}\n\n
\n
\n\n\n
\n In a compressive sensing context, we propose a solution for a full learning of the dictionary composed of the sparsity basis and the measurement matrix. The sparsity basis learning process is achieved using Empirical Mode Decomposition (EMD) and Hilbert transformation. EMD being a data-driven decomposition method, the resulting sparsity basis shows high sparsifying capacities. On the other hand, a gradient method is applied for the design of the measurement matrix. The method integrates the dictionary normalization into the target function. It is shown to support large scale problems and to have a good convergence and high performance. The evaluation of the whole approach is done on a set of environmental sounds, and is based on a couple of key criteria: sparsity degree and incoherence. Experimental results demonstrate that our approach achieves well with regards to mutual coherence reduction and signal reconstruction at low sparsity degrees.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Shrinkage methods for one-class classification.\n \n \n \n \n\n\n \n Nader, P.; Honeine, P.; and Beauseroy, P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 135-139, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ShrinkagePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362360,\n  author = {P. Nader and P. Honeine and P. Beauseroy},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Shrinkage methods for one-class classification},\n  year = {2015},\n  pages = {135-139},\n  abstract = {Over the last decades, machine learning techniques have been an important asset for detecting nonlinear relations in data. In particular, one-class classification has been very popular in many fields, specifically in applications where the available data refer to a unique class only. In this paper, we propose a sparse approach for one-class classification problems. We define the one-class by the hypersphere enclosing the samples in the Reproducing Kernel Hilbert Space, where the center of this hypersphere depends only on a small fraction of the training dataset. The selection of the most relevant samples is achieved through shrinkage methods, namely Least Angle Regression, Least Absolute Shrinkage and Selection Operator, and Elastic Net. We modify these selection methods and adapt them for estimating the one-class center in the RKHS. We compare our algorithms to well-known one-class methods, and the experimental analysis are conducted on real datasets.},\n  keywords = {compressed sensing;Hilbert spaces;learning (artificial intelligence);regression analysis;shrinkage;signal classification;shrinkage methods;one-class classification;machine learning techniques;kernel Hilbert space;least angle regression;least absolute shrinkage;selection operator;elastic net;Kernel;Signal processing algorithms;Training;Support vector machines;Mathematical model;Correlation;Europe;One-class classification;kernel methods;shrinkage methods},\n  doi = {10.1109/EUSIPCO.2015.7362360},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104413.pdf},\n}\n\n
\n
\n\n\n
\n Over the last decades, machine learning techniques have been an important asset for detecting nonlinear relations in data. In particular, one-class classification has been very popular in many fields, specifically in applications where the available data refer to a unique class only. In this paper, we propose a sparse approach for one-class classification problems. We define the one-class by the hypersphere enclosing the samples in the Reproducing Kernel Hilbert Space, where the center of this hypersphere depends only on a small fraction of the training dataset. The selection of the most relevant samples is achieved through shrinkage methods, namely Least Angle Regression, Least Absolute Shrinkage and Selection Operator, and Elastic Net. We modify these selection methods and adapt them for estimating the one-class center in the RKHS. We compare our algorithms to well-known one-class methods, and the experimental analysis are conducted on real datasets.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Vector quantization with constrained likelihood for face recognition.\n \n \n \n \n\n\n \n Kostadinov, D.; Voloshynovskiy, S.; Diephuis, M.; and Ferdowsi, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 140-144, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"VectorPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362361,\n  author = {D. Kostadinov and S. Voloshynovskiy and M. Diephuis and S. Ferdowsi},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Vector quantization with constrained likelihood for face recognition},\n  year = {2015},\n  pages = {140-144},\n  abstract = {In this paper, we investigate the problem of visual information encoding and decoding for face recognition. We propose a decomposition representation with vector quantization and constrained likelihood projection. The optimal solution is considered from the point of view of the best achievable classification accuracy by minimizing the probability of error under a given class of distortions. The performance of the proposed model of information encoding/decoding is compared with the performance of those based on sparse representation. The computer simulation results confirm the superiority of the proposed vector quantization based recognition over sparse representation based recognition on several face image databases.},\n  keywords = {error statistics;face recognition;image representation;vector quantisation;face recognition;vector quantization;visual information encoding problem;visual information decoding problem;decomposition representation;constrained likelihood projection;optimal solution;error probability;sparse representation;face image databases;Decoding;Face recognition;Vector quantization;Encoding;Europe;Reliability;quantization;visual information encoding/decoding;face recognition;identification},\n  doi = {10.1109/EUSIPCO.2015.7362361},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104815.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we investigate the problem of visual information encoding and decoding for face recognition. We propose a decomposition representation with vector quantization and constrained likelihood projection. The optimal solution is considered from the point of view of the best achievable classification accuracy by minimizing the probability of error under a given class of distortions. The performance of the proposed model of information encoding/decoding is compared with the performance of those based on sparse representation. The computer simulation results confirm the superiority of the proposed vector quantization based recognition over sparse representation based recognition on several face image databases.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Shape-based fish recognition via shape space.\n \n \n \n \n\n\n \n Nasreddine, K.; and Benzinou, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 145-149, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Shape-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362362,\n  author = {K. Nasreddine and A. Benzinou},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Shape-based fish recognition via shape space},\n  year = {2015},\n  pages = {145-149},\n  abstract = {Automatic fish recognition is a recent research work which is needed to assist marine scientists. Among most discriminative features, the fish outline is very efficient for fish recognition. In a previous work, we proposed a method for pattern recognition (classification and retrieval) based on signal registration and shape geodesics. In this paper, we introduce a preliminary step of pose estimation for accelerating the processing time. We then show that shape geodesics may also be used for outline-based fish recognition. Experiments conducted on the SQUID database which is used as a benchmark to evaluate fish shape recognition, show (1) a reduction in computation time of a factor of ten in average, and (2) the outperformance of the proposed scheme compared to previous methods.},\n  keywords = {aquaculture;image classification;image registration;marine engineering;pose estimation;shape-based fish recognition;shape space;automatic fish recognition;marine scientists;fish outline;pattern recognition;signal registration;shape geodesics;pose estimation;outline-based fish recognition;SQUID database;fish shape recognition;Shape;Databases;SQUIDs;Robustness;Estimation;Benchmark testing;Fish recognition;outline;shape;geodesics},\n  doi = {10.1109/EUSIPCO.2015.7362362},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104943.pdf},\n}\n\n
\n
\n\n\n
\n Automatic fish recognition is a recent research work which is needed to assist marine scientists. Among most discriminative features, the fish outline is very efficient for fish recognition. In a previous work, we proposed a method for pattern recognition (classification and retrieval) based on signal registration and shape geodesics. In this paper, we introduce a preliminary step of pose estimation for accelerating the processing time. We then show that shape geodesics may also be used for outline-based fish recognition. Experiments conducted on the SQUID database which is used as a benchmark to evaluate fish shape recognition, show (1) a reduction in computation time of a factor of ten in average, and (2) the outperformance of the proposed scheme compared to previous methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Parts-based shape recognition via shape geodesics.\n \n \n \n \n\n\n \n Merhy, M.; Benzinou, A.; Nasreddine, K.; Khalil, M.; and Faour, G.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 150-154, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Parts-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362363,\n  author = {M. Merhy and A. Benzinou and K. Nasreddine and M. Khalil and G. Faour},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Parts-based shape recognition via shape geodesics},\n  year = {2015},\n  pages = {150-154},\n  abstract = {The quality of the segmentation process directly affects the performance of the shape recognition. In this paper, we address the problem of shape recognition using only the available shape parts instead of the whole shape. For this purpose, we propose a shape parts recognition strategy that uses a robust distance based on geodesics in the shape space. The proposed combining strategy seeks to handle the contour discontinuity can occur in edge maps due to various disturbing factors encountered in real images. The experimental validation through the MPEG-7 shape database and some real images demonstrates the efficiency of our proposed approach.},\n  keywords = {differential geometry;shape recognition;shape geodesics;shape parts recognition;MPEG-7 shape database;Shape;Databases;Transform coding;Image segmentation;Europe;Signal processing;Robustness;shape recognition;shape parts;combining strategy;geodesics;real images},\n  doi = {10.1109/EUSIPCO.2015.7362363},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105005.pdf},\n}\n\n
\n
\n\n\n
\n The quality of the segmentation process directly affects the performance of the shape recognition. In this paper, we address the problem of shape recognition using only the available shape parts instead of the whole shape. For this purpose, we propose a shape parts recognition strategy that uses a robust distance based on geodesics in the shape space. The proposed combining strategy seeks to handle the contour discontinuity can occur in edge maps due to various disturbing factors encountered in real images. The experimental validation through the MPEG-7 shape database and some real images demonstrates the efficiency of our proposed approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Exploiting symmetry in two-dimensional clustering-based discriminant analysis for face recognition.\n \n \n \n \n\n\n \n Papachristou, K.; Tefas, A.; and Pitas, I.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 155-159, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ExploitingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362364,\n  author = {K. Papachristou and A. Tefas and I. Pitas},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Exploiting symmetry in two-dimensional clustering-based discriminant analysis for face recognition},\n  year = {2015},\n  pages = {155-159},\n  abstract = {Subspace learning techniques are among the most popular methods for face recognition. In this paper, we propose a novel face recognition technique for two dimensional subspace learning which is able to exploit the symmetry nature of human faces. We extent the Two Dimensional Clustering based Discriminant Analysis (2DCDA) by incorporating an appropriate symmetry regularizer into its objective function in order to determine symmetric projection vectors. The proposed Symmetric Two Dimensional Clustering based Discriminant Analysis technique has been applied to the face recognition problem. Experimental results showed that the proposed technique achieves better classification performance in comparison to the standard one.},\n  keywords = {face recognition;learning (artificial intelligence);two dimensional subspace learning;2DCDA;symmetry regularizer;symmetric projection vectors;symmetric two dimensional clustering based discriminant analysis technique;face recognition problem;classification performance;Databases;Standards;Face recognition;Face;Europe;Signal processing;Lighting;face recognition;subspace learning;symmetry regularizer;two-dimensional clustering-based discriminant analysis},\n  doi = {10.1109/EUSIPCO.2015.7362364},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105027.pdf},\n}\n\n
\n
\n\n\n
\n Subspace learning techniques are among the most popular methods for face recognition. In this paper, we propose a novel face recognition technique for two dimensional subspace learning which is able to exploit the symmetry nature of human faces. We extent the Two Dimensional Clustering based Discriminant Analysis (2DCDA) by incorporating an appropriate symmetry regularizer into its objective function in order to determine symmetric projection vectors. The proposed Symmetric Two Dimensional Clustering based Discriminant Analysis technique has been applied to the face recognition problem. Experimental results showed that the proposed technique achieves better classification performance in comparison to the standard one.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Mapping dynamical states to structural classes for Boolean networks using a classification algorithm.\n \n \n \n \n\n\n \n Sarbu, S.; Shmulevich, I.; Yli-Harja, O.; Nykter, M.; and Kesseli, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 160-164, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"MappingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362365,\n  author = {S. Sarbu and I. Shmulevich and O. Yli-Harja and M. Nykter and J. Kesseli},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Mapping dynamical states to structural classes for Boolean networks using a classification algorithm},\n  year = {2015},\n  pages = {160-164},\n  abstract = {Complex systems have received growing interest recently, due to their universal presence in all areas of science and engineering. Complex networks represent a simplified description of the interactions present in such systems. Boolean networks were introduced as models of gene regulatory networks. Simple enough to be computationally tractable, they capture the rich dynamical behaviour of complex networks. Structure-dynamics relationships in Boolean networks have been investigated by inferring a particular structure of a network from the time sequence of its dynamical states. However, general properties of network structures, which can be obtained from their dynamics, are lacking. We create a mapping of dynamical states to structural classes, using time-delayed normalized mutual information, in an ensemble approach. The high accuracy of our classification algorithm proves that structural information is embedded in network dynamics and that we can extract it with information-theoretic methods.},\n  keywords = {Boolean algebra;complex networks;signal classification;dynamical state mapping;structural class;Boolean network;classification algorithm;complex network;gene regulatory network;time sequence;time-delayed normalized mutual information;ensemble approach;information-theoretic method;Boolean functions;Complex networks;Mutual information;Yttrium;Signal processing;Support vector machines;Europe;Boolean networks;structural classes;information theory;classification;feature extraction},\n  doi = {10.1109/EUSIPCO.2015.7362365},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105145.pdf},\n}\n\n
\n
\n\n\n
\n Complex systems have received growing interest recently, due to their universal presence in all areas of science and engineering. Complex networks represent a simplified description of the interactions present in such systems. Boolean networks were introduced as models of gene regulatory networks. Simple enough to be computationally tractable, they capture the rich dynamical behaviour of complex networks. Structure-dynamics relationships in Boolean networks have been investigated by inferring a particular structure of a network from the time sequence of its dynamical states. However, general properties of network structures, which can be obtained from their dynamics, are lacking. We create a mapping of dynamical states to structural classes, using time-delayed normalized mutual information, in an ensemble approach. The high accuracy of our classification algorithm proves that structural information is embedded in network dynamics and that we can extract it with information-theoretic methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Bootstrap-based SVM aggregation for class imbalance problems.\n \n \n \n \n\n\n \n Sukhanov, S.; Merentitis, A.; Debes, C.; Hahn, J.; and Zoubir, A. M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 165-169, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Bootstrap-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362366,\n  author = {S. Sukhanov and A. Merentitis and C. Debes and J. Hahn and A. M. Zoubir},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Bootstrap-based SVM aggregation for class imbalance problems},\n  year = {2015},\n  pages = {165-169},\n  abstract = {Support Vector Machines (SVMs) are considered to be one of the most powerful classification tools, widely used in many applications. However, in numerous scenarios the classes are not equally represented and the predictive performance of SVMs on such data can drop dramatically. Different methods have been proposed to address moderate class imbalance issues, but there are few methods that can be successful at detecting the minority class while also keeping high accuracy, especially when applied to datasets with significant level of imbalance. In this paper, we consider SVM ensembles that are built by using a bootstrap-based undersampling technique. We target to reduce the bias induced by class imbalances via multiple undersampling procedures and then decrease the variance using SVM ensembles. For combining the SVMs, we propose a new technique that deals with class imbalance problems of varying levels. Experiments on several datasets demonstrate the performance of the proposed scheme compared to state-of-the-art balancing methods.},\n  keywords = {learning (artificial intelligence);pattern classification;statistical analysis;support vector machines;bootstrap-based SVM aggregation;support vector machines;class imbalance problems;classification tools;minority class detection;bootstrap-based undersampling technique;bias reduction;multiple undersampling procedures;ensemble learning methods;Support vector machines;Training;Bagging;Signal processing;Signal processing algorithms;Europe;Training data;SVMs;Imbalanced dataset;Undersam-pling;Ensemble learning methods},\n  doi = {10.1109/EUSIPCO.2015.7362366},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104039.pdf},\n}\n\n
\n
\n\n\n
\n Support Vector Machines (SVMs) are considered to be one of the most powerful classification tools, widely used in many applications. However, in numerous scenarios the classes are not equally represented and the predictive performance of SVMs on such data can drop dramatically. Different methods have been proposed to address moderate class imbalance issues, but there are few methods that can be successful at detecting the minority class while also keeping high accuracy, especially when applied to datasets with significant level of imbalance. In this paper, we consider SVM ensembles that are built by using a bootstrap-based undersampling technique. We target to reduce the bias induced by class imbalances via multiple undersampling procedures and then decrease the variance using SVM ensembles. For combining the SVMs, we propose a new technique that deals with class imbalance problems of varying levels. Experiments on several datasets demonstrate the performance of the proposed scheme compared to state-of-the-art balancing methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Variational Gaussian process for sensor fusion.\n \n \n \n \n\n\n \n Rohani, N.; Ruiz, P.; Besler, E.; Molina, R.; and Katsaggelos, A. K.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 170-174, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"VariationalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362367,\n  author = {N. Rohani and P. Ruiz and E. Besler and R. Molina and A. K. Katsaggelos},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Variational Gaussian process for sensor fusion},\n  year = {2015},\n  pages = {170-174},\n  abstract = {In this paper, we introduce a new Gaussian Process (GP) classification method for multisensory data. The proposed approach can deal with noisy and missing data. It is also capable of estimating the contribution of each sensor towards the classification task. We use Bayesian modeling to build a GP-based classifier which combines the information provided by all sensors and approximates the posterior distribution of the GP using variational Bayesian inference. During its training phase, the algorithm estimates each sensor's weight and then uses this information to assign a label to each new sample. In the experimental section, we evaluate the classiication performance of the proposed method on both synthetic and real data and show its applicability to different scenarios.},\n  keywords = {Bayes methods;Gaussian processes;sensor fusion;signal classification;Gaussian process classification method;multisensory data;Bayesian modeling;GP-based classifier;posterior distribution;variational Bayesian inference;sensor fusion;Robot sensing systems;Signal processing algorithms;Bayes methods;Training;Europe;Signal processing;Gaussian processes;Gaussian process;fusion;Bayesian modeling;variational inference;classiication},\n  doi = {10.1109/EUSIPCO.2015.7362367},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570097559.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we introduce a new Gaussian Process (GP) classification method for multisensory data. The proposed approach can deal with noisy and missing data. It is also capable of estimating the contribution of each sensor towards the classification task. We use Bayesian modeling to build a GP-based classifier which combines the information provided by all sensors and approximates the posterior distribution of the GP using variational Bayesian inference. During its training phase, the algorithm estimates each sensor's weight and then uses this information to assign a label to each new sample. In the experimental section, we evaluate the classiication performance of the proposed method on both synthetic and real data and show its applicability to different scenarios.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Online path loss estimation for localization using large aperture array signal processing.\n \n \n \n \n\n\n \n Dagher, R.; and Mitton, N.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 175-179, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"OnlinePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362368,\n  author = {R. Dagher and N. Mitton},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Online path loss estimation for localization using large aperture array signal processing},\n  year = {2015},\n  pages = {175-179},\n  abstract = {In this paper, a novel array-based method to estimate the path loss exponent (PLE) is developed. The method is designed as a part of an automatic calibration step, prior to localization of a source transmitting in the near-far field of the array. The method only requires the knowledge of the ranges between the array elements. By making the antenna elements transmit in turn, the array response model in the near-far field is ex ploited to estimate the current environment PLE. Simulation results show that this method can achieve good performance with one transmission round. The performance of the PLE estimation is investigated in the context of source localization with a sensitivity analysis to the PLE estimation.},\n  keywords = {aperture antennas;array signal processing;calibration;near-field communication;sensitivity analysis;source separation;sensitivity analysis;array response model;antenna elements;source localization;near-far field;automatic calibration;PLE estimation;path loss exponent estimation;large aperture array signal processing;online path loss estimation;Arrays;Estimation;Covariance matrices;Eigenvalues and eigenfunctions;Measurement;Signal processing;Manifolds;Array processing;localization;path loss estimation;spherical wave propagation},\n  doi = {10.1109/EUSIPCO.2015.7362368},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570099349.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, a novel array-based method to estimate the path loss exponent (PLE) is developed. The method is designed as a part of an automatic calibration step, prior to localization of a source transmitting in the near-far field of the array. The method only requires the knowledge of the ranges between the array elements. By making the antenna elements transmit in turn, the array response model in the near-far field is ex ploited to estimate the current environment PLE. Simulation results show that this method can achieve good performance with one transmission round. The performance of the PLE estimation is investigated in the context of source localization with a sensitivity analysis to the PLE estimation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Optimal sensor-target geometries for Doppler-shift target localization.\n \n \n \n \n\n\n \n Nguyen, N. H.; and Doğangay, K.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 180-184, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"OptimalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362369,\n  author = {N. H. Nguyen and K. Doğangay},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Optimal sensor-target geometries for Doppler-shift target localization},\n  year = {2015},\n  pages = {180-184},\n  abstract = {Doppler-shift target localization has recently attracted renewed interest due to its wide range of applications. In this paper we analyze the optimal sensor-target geometries for the Doppler-shift target localization problem where the position and velocity of a moving target are estimated from Doppler-shift measurements taken at stationary sensors. The analysis is based on minimizing the estimation uncertainty, which is equivalent to maximizing the determinant of the Fisher information matrix. In particular, the optimal geometries that maximize the estimation accuracy for target position only, velocity only, and both position and velocity, are investigated. The analytical findings are verified by numerical examples.},\n  keywords = {Doppler shift;sensor placement;target tracking;optimal sensor-target geometry;Doppler-shift target localization;Doppler shift measurement;stationary sensor;estimation uncertainty;Fisher information matrix;Sensors;Geometry;Estimation;Velocity measurement;Europe;Signal processing;Position measurement;Optimal sensor placement;Doppler-shift measurement;localization;Fisher information matrix},\n  doi = {10.1109/EUSIPCO.2015.7362369},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103819.pdf},\n}\n\n
\n
\n\n\n
\n Doppler-shift target localization has recently attracted renewed interest due to its wide range of applications. In this paper we analyze the optimal sensor-target geometries for the Doppler-shift target localization problem where the position and velocity of a moving target are estimated from Doppler-shift measurements taken at stationary sensors. The analysis is based on minimizing the estimation uncertainty, which is equivalent to maximizing the determinant of the Fisher information matrix. In particular, the optimal geometries that maximize the estimation accuracy for target position only, velocity only, and both position and velocity, are investigated. The analytical findings are verified by numerical examples.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Efficient cooperative localization algorithm in LOS/NLOS environments.\n \n \n \n \n\n\n \n Jin, D.; Yin, F.; Fritsche, C.; Zoubir, A. M.; and Gustafsson, F.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 185-189, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"EfficientPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362370,\n  author = {D. Jin and F. Yin and C. Fritsche and A. M. Zoubir and F. Gustafsson},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Efficient cooperative localization algorithm in LOS/NLOS environments},\n  year = {2015},\n  pages = {185-189},\n  abstract = {The well-known cooperative localization algorithm, `sum-product algorithm over a wireless network' (SPAWN) has two major shortcomings, a relatively high computational complexity and a large communication load. Using the Gaussian mixture model with a model selection criterion and the sigma-point (SP) methods, we propose the SPAWN-SP to overcome these problems. The SPAWN-SP easily accommodates different localization scenarios due to its high flexibility in message representation. Furthermore, harsh LOS/NLOS environments are considered for the evaluation of cooperative localization algorithms. Our simulation results indicate that the proposed SPAWN-SP demonstrates high localization accuracy in different localization scenarios, thanks to its high flexibility in message representation.},\n  keywords = {computational complexity;cooperative communication;Gaussian processes;mixture models;wireless sensor networks;message representation;SP method;sigma-point method;model selection criterion;Gaussian mixture model;computational complexity;SPAWN;wireless network;sum product algorithm;LOS-NLOS environment;cooperative localization algorithm;Approximation methods;Signal processing algorithms;Complexity theory;Signal processing;Indexes;Parametric statistics;Europe;Cooperative localization;SPAWN;low-complexity;sigma-point methods},\n  doi = {10.1109/EUSIPCO.2015.7362370},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570097329.pdf},\n}\n\n
\n
\n\n\n
\n The well-known cooperative localization algorithm, `sum-product algorithm over a wireless network' (SPAWN) has two major shortcomings, a relatively high computational complexity and a large communication load. Using the Gaussian mixture model with a model selection criterion and the sigma-point (SP) methods, we propose the SPAWN-SP to overcome these problems. The SPAWN-SP easily accommodates different localization scenarios due to its high flexibility in message representation. Furthermore, harsh LOS/NLOS environments are considered for the evaluation of cooperative localization algorithms. Our simulation results indicate that the proposed SPAWN-SP demonstrates high localization accuracy in different localization scenarios, thanks to its high flexibility in message representation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n BIAS analysis of an algebraic solution for TDOA localization with sensor location errors.\n \n \n \n \n\n\n \n Yang, A. L.; Guo, B. F.; Le Yang, C.; Min, D. Z.; and Jiang, E. W.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 190-194, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"BIASPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362371,\n  author = {A. L. Yang and B. F. Guo and C. {Le Yang} and D. Z. Min and E. W. Jiang},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {BIAS analysis of an algebraic solution for TDOA localization with sensor location errors},\n  year = {2015},\n  pages = {190-194},\n  abstract = {The nonlinearity inherent in the time difference of arrival (TDOA)-based source localization problem leads to biased source location estimates. The estimation bias of the closed-form TDOA-positioning technique, two-stage least squares(TSWLS) technology was established analytically in previous works. This paper extends the theoretical developments in the case where there exists no sensors location errors to investigate the bias of TDOA-positioning in the presence of sensor location errors. Specifically, the estimation bias of the algebraic two-stage TDOA localization algorithm proposed is derived. Simulations validate the obtained theoretical results. It is shown that different from the findings in previous works where the estimation bias of the two-stage solution mainly comes from its Stage-1 processing, both stages of the localization algorithm considered in this work can introduce significant estimation biases when sensor location errors are present.},\n  keywords = {least squares approximations;time-of-arrival estimation;bias analysis;sensor location errors;time difference of arrival;source localization;two-stage least squares technology;algebraic two-stage TDOA localization algorithm;Phase locked loops;time difference of arrival;bias analysis;source localization;sensor location error},\n  doi = {10.1109/EUSIPCO.2015.7362371},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102605.pdf},\n}\n\n
\n
\n\n\n
\n The nonlinearity inherent in the time difference of arrival (TDOA)-based source localization problem leads to biased source location estimates. The estimation bias of the closed-form TDOA-positioning technique, two-stage least squares(TSWLS) technology was established analytically in previous works. This paper extends the theoretical developments in the case where there exists no sensors location errors to investigate the bias of TDOA-positioning in the presence of sensor location errors. Specifically, the estimation bias of the algebraic two-stage TDOA localization algorithm proposed is derived. Simulations validate the obtained theoretical results. It is shown that different from the findings in previous works where the estimation bias of the two-stage solution mainly comes from its Stage-1 processing, both stages of the localization algorithm considered in this work can introduce significant estimation biases when sensor location errors are present.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Selective angle measurements for a 3D-AOA instrumental variable TMA algorithm.\n \n \n \n \n\n\n \n Doğançay, K.; and Arablouei, R.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 195-199, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"SelectivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362372,\n  author = {K. Doğançay and R. Arablouei},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Selective angle measurements for a 3D-AOA instrumental variable TMA algorithm},\n  year = {2015},\n  pages = {195-199},\n  abstract = {The method of instrumental variables has been successfully applied to pseudolinear estimation for angle-of-arrival target motion analysis (TMA). The objective of instrumental variables is to modify the normal equations of a biased least-squares estimator to make it asymptotically unbiased. The instrumental variable (IV) matrix, used in the modified normal equations, is required to be strongly correlated with the data matrix and uncorrelated with the noise in the measurement vector. At small SNR, the correlation between the IV matrix and the data matrix can become weak. The concept of selective angle measurements (SAM) overcomes this problem by allowing some rows of the IV matrix and data matrix to be identical. This paper demonstrates the effectiveness of SAM for a previously proposed 3D angle-only IV TMA algorithm. The performance improvement of SAM is verified by simulation examples.},\n  keywords = {angular measurement;correlation methods;direction-of-arrival estimation;least squares approximations;matrix algebra;selective angle measurement;3D-AOA instrumental variable TMA algorithm;pseudolinear estimation;angle-of-arrival target motion analysis;biased least square estimator;IV matrix;modified normal equation;data matrix;SNR;SAM;Maximum likelihood estimation;Noise measurement;Correlation;Three-dimensional displays;Azimuth;Europe;Signal processing;Selective angle measurements;3D target motion analysis;angle-of-arrival localization;instrumental variables},\n  doi = {10.1109/EUSIPCO.2015.7362372},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096189.pdf},\n}\n\n
\n
\n\n\n
\n The method of instrumental variables has been successfully applied to pseudolinear estimation for angle-of-arrival target motion analysis (TMA). The objective of instrumental variables is to modify the normal equations of a biased least-squares estimator to make it asymptotically unbiased. The instrumental variable (IV) matrix, used in the modified normal equations, is required to be strongly correlated with the data matrix and uncorrelated with the noise in the measurement vector. At small SNR, the correlation between the IV matrix and the data matrix can become weak. The concept of selective angle measurements (SAM) overcomes this problem by allowing some rows of the IV matrix and data matrix to be identical. This paper demonstrates the effectiveness of SAM for a previously proposed 3D angle-only IV TMA algorithm. The performance improvement of SAM is verified by simulation examples.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Adaptive clustering for multitask diffusion networks.\n \n \n \n \n\n\n \n Chen, J.; Richard, C.; and Sayed, A. H.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 200-204, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AdaptivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362373,\n  author = {J. Chen and C. Richard and A. H. Sayed},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Adaptive clustering for multitask diffusion networks},\n  year = {2015},\n  pages = {200-204},\n  abstract = {Diffusion LMS was originally conceived for online distributed parameter estimation in single-task environments where agents pursue a common objective. However, estimating distinct but correlated objects (multitask problems) is useful in many applications. To address multitask problems with combine-then-adapt diffusion LMS strategies, we derive an unsupervised strategy that allows each node to continuously select the neighboring nodes with which it should exchange information to improve its estimation accuracy. Simulation experiments illustrate the efficiency of this clustering strategy. In particular, nDiffusion LMS was originally conceived for online distributed parameter estimation in single-task environments where agents pursue a common objective. However, estimating distinct but correlated objects (multitask problems) is useful in many applications. To address multitask problems with combine-then-adapt diffusion LMS strategies, we derive an unsupervised strategy that allows each node to continuously select the neighboring nodes with which it should exchange information to improve its estimation accuracy. Simulation experiments illustrate the efficiency of this clustering strategy. In particular, nodes do not know which other nodes share similar objectives.odes do not know which other nodes share similar objectives.},\n  keywords = {parameter estimation;signal processing;single-task environments;distributed parameter estimation;diffusion LMS;multitask diffusion networks;adaptive clustering;Least squares approximations;Estimation;Signal processing algorithms;Clustering algorithms;Europe;Signal processing;Diffusion LMS;combine-then-adapt;multitask problems;adaptive network;online learning;distributed learning},\n  doi = {10.1109/EUSIPCO.2015.7362373},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104581.pdf},\n}\n\n
\n
\n\n\n
\n Diffusion LMS was originally conceived for online distributed parameter estimation in single-task environments where agents pursue a common objective. However, estimating distinct but correlated objects (multitask problems) is useful in many applications. To address multitask problems with combine-then-adapt diffusion LMS strategies, we derive an unsupervised strategy that allows each node to continuously select the neighboring nodes with which it should exchange information to improve its estimation accuracy. Simulation experiments illustrate the efficiency of this clustering strategy. In particular, nDiffusion LMS was originally conceived for online distributed parameter estimation in single-task environments where agents pursue a common objective. However, estimating distinct but correlated objects (multitask problems) is useful in many applications. To address multitask problems with combine-then-adapt diffusion LMS strategies, we derive an unsupervised strategy that allows each node to continuously select the neighboring nodes with which it should exchange information to improve its estimation accuracy. Simulation experiments illustrate the efficiency of this clustering strategy. In particular, nodes do not know which other nodes share similar objectives.odes do not know which other nodes share similar objectives.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Environmental field estimation by consensus based dynamic sensor networks and underwater gliders.\n \n \n \n \n\n\n \n Grasso, R.; Braca, P.; Fortunati, S.; Gini, F.; and Greco, M. S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 205-209, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"EnvironmentalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362374,\n  author = {R. Grasso and P. Braca and S. Fortunati and F. Gini and M. S. Greco},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Environmental field estimation by consensus based dynamic sensor networks and underwater gliders},\n  year = {2015},\n  pages = {205-209},\n  abstract = {A coordinated dynamic sensor network of autonomous underwater gliders to estimate 3D time-varying environmental fields is proposed and tested. Each sensor performs local Kalman filter sequential field estimation. A network of surface relay nodes and asynchronous consensus are used to distribute local information among all nodes so that they can converge to an estimate of the global field. Tests using data from real oceanographic forecast models demonstrate the feasibility of the approach with relative error performance within 10%.},\n  keywords = {oceanographic equipment;oceanographic techniques;environmental field estimation;consensus based dynamic sensor networks;coordinated dynamic sensor network;autonomous underwater gliders;3D time-varying environmental fields;local Kalman filter sequential field estimation;surface relay nodes;asynchronous consensus;real oceanographic forecast models;Estimation;Network topology;Ocean temperature;Topology;Switches;Covariance matrices;Convergence;Sensor networks;consensus;distributed estimation;autonomous underwater vehicles},\n  doi = {10.1109/EUSIPCO.2015.7362374},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103687.pdf},\n}\n\n
\n
\n\n\n
\n A coordinated dynamic sensor network of autonomous underwater gliders to estimate 3D time-varying environmental fields is proposed and tested. Each sensor performs local Kalman filter sequential field estimation. A network of surface relay nodes and asynchronous consensus are used to distribute local information among all nodes so that they can converge to an estimate of the global field. Tests using data from real oceanographic forecast models demonstrate the feasibility of the approach with relative error performance within 10%.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Interpolation of graph signals using shift-invariant graph filters.\n \n \n \n \n\n\n \n Segarra, S.; Marques, A. G.; Leus, G.; and Ribeiro, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 210-214, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"InterpolationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362375,\n  author = {S. Segarra and A. G. Marques and G. Leus and A. Ribeiro},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Interpolation of graph signals using shift-invariant graph filters},\n  year = {2015},\n  pages = {210-214},\n  abstract = {New schemes to recover signals defined in the nodes of a graph are proposed. Our focus is on reconstructing bandlimited graph signals, which are signals that admit a sparse representation in a frequency domain related to the structure of the graph. The schemes are designed within the framework of linear shift-invariant graph filters and consider that the seeding signals are injected only at a subset of interpolating nodes. After several sequential applications of the graph-shift operator - which computes linear combinations of the information available at neighboring nodes - the seeding signals are diffused across the graph and the original bandlimited signal is eventually recovered. Conditions under which the recovery is feasible are given, and the corresponding schemes to recover the signal are proposed. Connections with the classical interpolation in the time domain are also discussed.},\n  keywords = {bandlimited signals;compressed sensing;filtering theory;graph theory;interpolation;signal reconstruction;graph signals interpolation;signal recovery;sparse representation;linear shift-invariant graph filters;graph-shift operator;Interpolation;Time-domain analysis;Signal processing;Europe;Frequency-domain analysis;Eigenvalues and eigenfunctions;Kernel;Graph signal processing;Interpolation;Signal reconstruction;Graph shift operator;Graph filter},\n  doi = {10.1109/EUSIPCO.2015.7362375},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105183.pdf},\n}\n\n
\n
\n\n\n
\n New schemes to recover signals defined in the nodes of a graph are proposed. Our focus is on reconstructing bandlimited graph signals, which are signals that admit a sparse representation in a frequency domain related to the structure of the graph. The schemes are designed within the framework of linear shift-invariant graph filters and consider that the seeding signals are injected only at a subset of interpolating nodes. After several sequential applications of the graph-shift operator - which computes linear combinations of the information available at neighboring nodes - the seeding signals are diffused across the graph and the original bandlimited signal is eventually recovered. Conditions under which the recovery is feasible are given, and the corresponding schemes to recover the signal are proposed. Connections with the classical interpolation in the time domain are also discussed.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributed network topology reconstruction in presence of anonymous nodes.\n \n \n \n \n\n\n \n Tran, T.; and Kibangou, A. Y.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 215-219, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"DistributedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362376,\n  author = {T. Tran and A. Y. Kibangou},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Distributed network topology reconstruction in presence of anonymous nodes},\n  year = {2015},\n  pages = {215-219},\n  abstract = {This paper concerns the problem of reconstructing the network topology from data propagated through the network by means of an average consensus protocol. The proposed method is based on the distributed estimation of graph Lapla-cian spectral properties. Precisely, the identification of the network topology is implemented by estimating both eigenvalues and eigenvectors of the consensus matrix, which is related to the graph Laplacian matrix. In this paper, we focus the exposition on the estimation of the eigenvectors since the eigenvalues estimation can be achieved based on recent results of the literature using the same kind of data. We show how the topology can be reconstructed in presence of anonymous nodes, i.e. nodes that do not disclose their ID.},\n  keywords = {eigenvalues and eigenfunctions;protocols;telecommunication network topology;distributed network topology reconstruction;anonymous nodes;average consensus protocol;distributed estimation;graph Laplacian spectral properties;eigenvalues;eigenvectors;Network topology;Eigenvalues and eigenfunctions;Laplace equations;Symmetric matrices;Matrix decomposition;Topology;Protocols;Network topology Reconstruction;Graph Laplacian spectrum;Eigenvectors;Anonymous nodes;Average Consensus},\n  doi = {10.1109/EUSIPCO.2015.7362376},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105089.pdf},\n}\n\n
\n
\n\n\n
\n This paper concerns the problem of reconstructing the network topology from data propagated through the network by means of an average consensus protocol. The proposed method is based on the distributed estimation of graph Lapla-cian spectral properties. Precisely, the identification of the network topology is implemented by estimating both eigenvalues and eigenvectors of the consensus matrix, which is related to the graph Laplacian matrix. In this paper, we focus the exposition on the estimation of the eigenvectors since the eigenvalues estimation can be achieved based on recent results of the literature using the same kind of data. We show how the topology can be reconstructed in presence of anonymous nodes, i.e. nodes that do not disclose their ID.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Graph linear prediction results in smaller error than standard linear prediction.\n \n \n \n \n\n\n \n Venkitaraman, A.; Chatterjee, S.; and Händel, P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 220-224, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"GraphPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362377,\n  author = {A. Venkitaraman and S. Chatterjee and P. Händel},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Graph linear prediction results in smaller error than standard linear prediction},\n  year = {2015},\n  pages = {220-224},\n  abstract = {Linear prediction is a popular strategy employed in the analysis and representation of signals. In this paper, we propose a new linear prediction approach by considering the standard linear prediction in the context of graph signal processing, which has gained significant attention recently. We view the signal to be defined on the nodes of a graph with an adjacency matrix constructed using the coefficients of the standard linear predictor (SLP). We prove theoretically that the graph based linear prediction approach results in an equal or better performance compared with the SLP in terms of the prediction gain. We illustrate the proposed concepts by application to real speech signals.},\n  keywords = {graph theory;prediction theory;speech processing;graph linear prediction;graph signal processing;adjacency matrix;speech signal;Signal processing;Standards;Speech;Europe;Minimization;Predictive models;Fourier transforms;Linear prediction;Graph signal processing;autoregressive model},\n  doi = {10.1109/EUSIPCO.2015.7362377},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104723.pdf},\n}\n\n
\n
\n\n\n
\n Linear prediction is a popular strategy employed in the analysis and representation of signals. In this paper, we propose a new linear prediction approach by considering the standard linear prediction in the context of graph signal processing, which has gained significant attention recently. We view the signal to be defined on the nodes of a graph with an adjacency matrix constructed using the coefficients of the standard linear predictor (SLP). We prove theoretically that the graph based linear prediction approach results in an equal or better performance compared with the SLP in terms of the prediction gain. We illustrate the proposed concepts by application to real speech signals.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Application of cyclic block generalized gradient projection methods to poisson blind deconvolution.\n \n \n \n \n\n\n \n Rebegoldi, S.; Bonettini, S.; and Prato, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 225-229, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ApplicationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362378,\n  author = {S. Rebegoldi and S. Bonettini and M. Prato},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Application of cyclic block generalized gradient projection methods to poisson blind deconvolution},\n  year = {2015},\n  pages = {225-229},\n  abstract = {The aim of this paper is to consider a modification of a block coordinate gradient projection method with Armijo linesearch along the descent direction in which the projection on the feasible set is performed according to a variable non Euclidean metric. The stationarity of the limit points of the resulting scheme has recently been proved under some general assumptions on the generalized gradient projections employed. Here we tested some examples of methods belonging to this class on a blind deconvolution problem from data affected by Poisson noise, and we illustrate the impact of the projection operator choice on the practical performances of the corresponding algorithm.},\n  keywords = {blind source separation;deconvolution;gradient methods;stochastic processes;Poisson blind deconvolution;cyclic block generalized gradient projection methods application;Armijo linesearch;descent direction;variable nonEuclidean metric;Poisson noise;Decision support systems;Europe;Signal processing;Indexes;Silicon;Programming;Constrained optimization;gradient projection methods;alternating algorithms;nonconvex optimization},\n  doi = {10.1109/EUSIPCO.2015.7362378},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103429.pdf},\n}\n\n
\n
\n\n\n
\n The aim of this paper is to consider a modification of a block coordinate gradient projection method with Armijo linesearch along the descent direction in which the projection on the feasible set is performed according to a variable non Euclidean metric. The stationarity of the limit points of the resulting scheme has recently been proved under some general assumptions on the generalized gradient projections employed. Here we tested some examples of methods belonging to this class on a blind deconvolution problem from data affected by Poisson noise, and we illustrate the impact of the projection operator choice on the practical performances of the corresponding algorithm.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Maximum-a-posteriori estimation with unknown regularisation parameters.\n \n \n \n \n\n\n \n Pereyra, M.; Bioucas-Dias, J. M.; and Figueiredo, M. A. T.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 230-234, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Maximum-a-posterioriPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362379,\n  author = {M. Pereyra and J. M. Bioucas-Dias and M. A. T. Figueiredo},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Maximum-a-posteriori estimation with unknown regularisation parameters},\n  year = {2015},\n  pages = {230-234},\n  abstract = {This paper presents two hierarchical Bayesian methods for performing maximum-a-posteriori inference when the value of the regularisation parameter is unknown. The methods are useful for models with homogenous regularisers (i.e., prior sufficient statistics), including all norms, composite norms and compositions of norms with linear operators. A key contribution of this paper is to show that for these models the normalisation factor of the prior has a closed-form analytic expression. This then enables the development of Bayesian inference techniques to either estimate regularisation parameters from the observed data or, alternatively, to remove them from the model by marginalisation followed by inference with the marginalised model. The effectiveness of the proposed methodologies is illustrated on applications to compressive sensing using an l1-wavelet analysis prior, where they outperform a state-of-the-art SURE-based technique, both in terms of estimation accuracy and computing time.},\n  keywords = {Bayes methods;compressed sensing;image processing;inference mechanisms;maximum likelihood estimation;wavelet transforms;unknown regularisation parameter estimation;maximum-a-posteriori estimation;hierarchical Bayesian method;maximum-a-posteriori inference;closed-form analytic expression;Bayesian inference technique;marginalised model;compressive sensing;I1-wavelet analysis;Bayes methods;Estimation;Signal processing;Computational modeling;Compressed sensing;Signal processing algorithms;Approximation methods;regularisation parameters;maximum-a-posteriori estimation;hierarchical Bayesian inference;inverse problems;statistical signal processing},\n  doi = {10.1109/EUSIPCO.2015.7362379},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104239.pdf},\n}\n\n
\n
\n\n\n
\n This paper presents two hierarchical Bayesian methods for performing maximum-a-posteriori inference when the value of the regularisation parameter is unknown. The methods are useful for models with homogenous regularisers (i.e., prior sufficient statistics), including all norms, composite norms and compositions of norms with linear operators. A key contribution of this paper is to show that for these models the normalisation factor of the prior has a closed-form analytic expression. This then enables the development of Bayesian inference techniques to either estimate regularisation parameters from the observed data or, alternatively, to remove them from the model by marginalisation followed by inference with the marginalised model. The effectiveness of the proposed methodologies is illustrated on applications to compressive sensing using an l1-wavelet analysis prior, where they outperform a state-of-the-art SURE-based technique, both in terms of estimation accuracy and computing time.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A parallel block-coordinate approach for primal-dual splitting with arbitrary random block selection.\n \n \n \n \n\n\n \n Repetti, A.; Chouzenoux, E.; and Pesquet, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 235-239, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362380,\n  author = {A. Repetti and E. Chouzenoux and J. Pesquet},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A parallel block-coordinate approach for primal-dual splitting with arbitrary random block selection},\n  year = {2015},\n  pages = {235-239},\n  abstract = {The solution of many applied problems relies on finding the minimizer of a sum of smooth and/or nonsmooth convex functions possibly involving linear operators. In the last years, primal-dual methods have shown their efficiency to solve such minimization problems, their main advantage being their ability to deal with linear operators with no need to invert them. However, when the problem size becomes increasingly large, the implementation of these algorithms can be complicated, due to memory limitation issues. A simple way to overcome this difficulty consists of splitting the original numerous variables into blocks of smaller dimension, corresponding to the available memory, and to process them separately. In this paper we propose a random block-coordinate primal-dual algorithm, converging almost surely to a solution to the considered minimization problem. Moreover, an application to large-size 3D mesh denoising is provided to show the numerical efficiency of our method.},\n  keywords = {image denoising;minimisation;numerical analysis;parallel block-coordinate approach;primal-dual splitting;arbitrary random block selection;nonsmooth convex functions;linear operators;primal-dual methods;random block-coordinate primal-dual algorithm;minimization problem;3D mesh denoising;numerical efficiency;Signal processing algorithms;Three-dimensional displays;Noise reduction;Convex functions;Convergence;Europe;Signal processing;convex optimization;nonsmooth optimization;primal-dual algorithm;stochastic algorithm;parallel algorithm;random block-coordinate approach;proximity operator;mesh denoising},\n  doi = {10.1109/EUSIPCO.2015.7362380},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104633.pdf},\n}\n\n
\n
\n\n\n
\n The solution of many applied problems relies on finding the minimizer of a sum of smooth and/or nonsmooth convex functions possibly involving linear operators. In the last years, primal-dual methods have shown their efficiency to solve such minimization problems, their main advantage being their ability to deal with linear operators with no need to invert them. However, when the problem size becomes increasingly large, the implementation of these algorithms can be complicated, due to memory limitation issues. A simple way to overcome this difficulty consists of splitting the original numerous variables into blocks of smaller dimension, corresponding to the available memory, and to process them separately. In this paper we propose a random block-coordinate primal-dual algorithm, converging almost surely to a solution to the considered minimization problem. Moreover, an application to large-size 3D mesh denoising is provided to show the numerical efficiency of our method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A primal-dual framework for mixtures of regularizers.\n \n \n \n \n\n\n \n Gozcü, B.; Baldassarre, L.; Tran-Dinh, Q.; Aprile, C.; and Cevher, V.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 240-244, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362381,\n  author = {B. Gozcü and L. Baldassarre and Q. Tran-Dinh and C. Aprile and V. Cevher},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A primal-dual framework for mixtures of regularizers},\n  year = {2015},\n  pages = {240-244},\n  abstract = {Effectively solving many inverse problems in engineering requires to leverage all possible prior information about the structure of the signal to be estimated. This often leads to tackling constrained optimization problems with mixtures of regularizers. Providing a general purpose optimization algorithm for these cases, with both guaranteed convergence rate as well as fast implementation remains an important challenge. In this paper, we describe how a recent primal-dual algorithm for non-smooth constrained optimization can be successfully used to tackle these problems. Its simple iterations can be easily parallelized, allowing very efficient computations. Furthermore, the algorithm is guaranteed to achieve an optimal convergence rate for this class of problems. We illustrate its performance on two problems, a compressive magnetic resonance imaging application and an approach for improving the quality of analog-to-digital conversion of amplitude-modulated signals.},\n  keywords = {analogue-digital conversion;compressed sensing;estimation theory;inverse problems;magnetic resonance imaging;optimisation;amplitude-modulated signals;analog-to-digital conversion;compressive magnetic resonance imaging;nonsmooth constrained optimization;primal-dual algorithm;general purpose optimization algorithm;inverse problems;regularizer mixtures;Signal processing algorithms;Magnetic resonance imaging;Optimization;Convergence;Europe;Signal processing;Inverse problems;Constrained Non-Smooth Optimization;Inverse Problems;Compressive Sensing;MRI;ADC},\n  doi = {10.1109/EUSIPCO.2015.7362381},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105073.pdf},\n}\n\n
\n
\n\n\n
\n Effectively solving many inverse problems in engineering requires to leverage all possible prior information about the structure of the signal to be estimated. This often leads to tackling constrained optimization problems with mixtures of regularizers. Providing a general purpose optimization algorithm for these cases, with both guaranteed convergence rate as well as fast implementation remains an important challenge. In this paper, we describe how a recent primal-dual algorithm for non-smooth constrained optimization can be successfully used to tackle these problems. Its simple iterations can be easily parallelized, allowing very efficient computations. Furthermore, the algorithm is guaranteed to achieve an optimal convergence rate for this class of problems. We illustrate its performance on two problems, a compressive magnetic resonance imaging application and an approach for improving the quality of analog-to-digital conversion of amplitude-modulated signals.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Performance analysis of MVDR beamformer in WASN with sampling rate offsets and blind synchronization.\n \n \n \n \n\n\n \n Cherkassky, D.; Markovich-Golan, S.; and Gannot, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 245-249, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"PerformancePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362382,\n  author = {D. Cherkassky and S. Markovich-Golan and S. Gannot},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Performance analysis of MVDR beamformer in WASN with sampling rate offsets and blind synchronization},\n  year = {2015},\n  pages = {245-249},\n  abstract = {In wireless acoustic sensor networks (WASNs), sampling rate offsets (SROs) between nodes are inevitable, and recognized as one of the challenges that have to be resolved for a coherent array processing. A simplified free-space propagation is considered with a single desired source impinging a WASNs from the far-field and contaminated by a diffuse noise. In this paper, we analyze the theoretical performance of a fixed superdirective beamformer (SDBF) in presence of SROs. The SDBF performance loss due to SROs is manifested as a distortion of the nominal beampattern and an excess noise power at the output of the beamformer. We also propose an iterative algorithm for SROs estimation. The theoretical results are validated by simulation.},\n  keywords = {array signal processing;iterative methods;signal sampling;synchronisation;wireless sensor networks;wireless acoustic sensor networks;WASN;sampling rate offsets;SRO;coherent array processing;simplified free-space propagation;far-field;diffuse noise;fixed superdirective beamformer;fixed SDBF;nominal beampattern;iterative algorithm;blind synchronization;Arrays;Microphones;Speech;Synchronization;Signal processing algorithms;Indexes;Estimation;Blind synchronization;Wireless acoustic sensor network;Sampling rate offset},\n  doi = {10.1109/EUSIPCO.2015.7362382},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096911.pdf},\n}\n\n
\n
\n\n\n
\n In wireless acoustic sensor networks (WASNs), sampling rate offsets (SROs) between nodes are inevitable, and recognized as one of the challenges that have to be resolved for a coherent array processing. A simplified free-space propagation is considered with a single desired source impinging a WASNs from the far-field and contaminated by a diffuse noise. In this paper, we analyze the theoretical performance of a fixed superdirective beamformer (SDBF) in presence of SROs. The SDBF performance loss due to SROs is manifested as a distortion of the nominal beampattern and an excess noise power at the output of the beamformer. We also propose an iterative algorithm for SROs estimation. The theoretical results are validated by simulation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Real-time loudspeaker distance estimation with stereo audio.\n \n \n \n \n\n\n \n Nielsen, J. K.; Gaubitch, N. D.; Heusdens, R.; Martinez, J.; Jensen, T. L.; and Jensen, S. H.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 250-254, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Real-timePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362383,\n  author = {J. K. Nielsen and N. D. Gaubitch and R. Heusdens and J. Martinez and T. L. Jensen and S. H. Jensen},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Real-time loudspeaker distance estimation with stereo audio},\n  year = {2015},\n  pages = {250-254},\n  abstract = {Knowledge on how a number of loudspeakers are positioned relative to a listening position can be used to enhance the listening experience. Usually, these loudspeaker positions are estimated using calibration signals, either audible or psycho-acoustically hidden inside the desired audio signal. In this paper, we propose to use the desired audio signal instead. Specifically, we treat the case of estimating the distance between two loudspeakers playing back a stereo music or speech signal. In this connection, we develop a real-time maximum likelihood estimator and demonstrate that it has a variance in the millimetre range in a real environment for even a modest sampling frequency.},\n  keywords = {audio signal processing;loudspeakers;maximum likelihood estimation;real-time loudspeaker distance estimation;stereo audio signal;listening position;calibration signals;real-time maximum likelihood estimator;modest sampling frequency;Loudspeakers;Transceivers;Delays;Calibration;Microphones;Europe;Signal processing;Loudspeaker localisation;distance estimation;time-of-arrival estimation},\n  doi = {10.1109/EUSIPCO.2015.7362383},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570101929.pdf},\n}\n\n
\n
\n\n\n
\n Knowledge on how a number of loudspeakers are positioned relative to a listening position can be used to enhance the listening experience. Usually, these loudspeaker positions are estimated using calibration signals, either audible or psycho-acoustically hidden inside the desired audio signal. In this paper, we propose to use the desired audio signal instead. Specifically, we treat the case of estimating the distance between two loudspeakers playing back a stereo music or speech signal. In this connection, we develop a real-time maximum likelihood estimator and demonstrate that it has a variance in the millimetre range in a real environment for even a modest sampling frequency.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n DOA-estimation based on a complex Watson kernel method.\n \n \n \n \n\n\n \n Drude, L.; Jacob, F.; and Haeb-Umbach, R.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 255-259, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"DOA-estimationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362384,\n  author = {L. Drude and F. Jacob and R. Haeb-Umbach},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {DOA-estimation based on a complex Watson kernel method},\n  year = {2015},\n  pages = {255-259},\n  abstract = {This contribution presents a Direction of Arrival (DoA) estimation algorithm based on the complex Watson distribution to incorporate both phase and level differences of captured microphone array signals. The derived algorithm is reviewed in the context of the Generalized State Coherence Transform (GSCT) on the one hand and a kernel density estimation method on the other hand. A thorough simulative evaluation yields insight into parameter selection and provides details on the performance for both directional and omni-directional microphones. A comparison to the well known Steered Response Power with Phase Transform (SRP-PHAT) algorithm and a state of the art DoA estimator which explicitly accounts for aliasing, shows in particular the advantages of presented algorithm if inter-sensor level differences are indicative of the DoA, as with directional microphones.},\n  keywords = {acoustic signal processing;array signal processing;direction-of-arrival estimation;microphone arrays;statistical distributions;complex Watson kernel method;direction of arrival estimation algorithm;complex Watson distribution;phase differences;level differences;microphone array signals;generalized state coherence transform;intersensor level differences;DoA estimator;SRP-PHAT algorithm;steered response power with phase transform algorithm;omnidirectional microphones;directional microphones;parameter selection;kernel density estimation method;GSCT;Signal processing algorithms;Direction-of-arrival estimation;Microphones;Estimation;Arrays;Kernel;Direction of Arrival;sensor array;directional statistics;complex Watson distribution;directional sensors},\n  doi = {10.1109/EUSIPCO.2015.7362384},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102587.pdf},\n}\n\n
\n
\n\n\n
\n This contribution presents a Direction of Arrival (DoA) estimation algorithm based on the complex Watson distribution to incorporate both phase and level differences of captured microphone array signals. The derived algorithm is reviewed in the context of the Generalized State Coherence Transform (GSCT) on the one hand and a kernel density estimation method on the other hand. A thorough simulative evaluation yields insight into parameter selection and provides details on the performance for both directional and omni-directional microphones. A comparison to the well known Steered Response Power with Phase Transform (SRP-PHAT) algorithm and a state of the art DoA estimator which explicitly accounts for aliasing, shows in particular the advantages of presented algorithm if inter-sensor level differences are indicative of the DoA, as with directional microphones.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributed estimation of cross-correlation functions in ad-hoc microphone arrays.\n \n \n \n \n\n\n \n van Waterschoot , T.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 260-264, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"DistributedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362385,\n  author = {T. {van Waterschoot}},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Distributed estimation of cross-correlation functions in ad-hoc microphone arrays},\n  year = {2015},\n  pages = {260-264},\n  abstract = {In this paper, we address the problem of estimating the cross-correlation function between two microphone signals recorded in different nodes of an ad-hoc microphone array or wireless acoustic sensor network, where the transmission of the entire microphone signal from one node to another is undesirable due to power and/or bandwidth constraints. We show that instead of directly computing the cross-correlation function, it can be estimated as the solution to a deconvolution problem. This deconvolution problem can be separated into two subproblems, each of which depends on one microphone signal and an auxiliary signal derived from the other microphone signal. Three different strategies for solving this deconvolution problem are proposed, in which the two subproblems are solved jointly (symmetric deconvolution), separately (asymmetric deconvolution) or in a consensus framework (consensus deconvolution). Simulation results illustrate the performance difference in terms of estimation accuracy, noise robustness, and transmission requirements.},\n  keywords = {acoustic communication (telecommunication);correlation methods;deconvolution;estimation theory;microphone arrays;wireless sensor networks;distributed estimation;cross-correlation function estimation;ad-hoc microphone array;microphone signal recording;wireless acoustic sensor network;asymmetric deconvolution problem;symmetric deconvolution problem;consensus deconvolution problem;Deconvolution;Estimation;Ad hoc networks;Array signal processing;Microphone arrays;distributed signal processing;distributed optimization;cross-correlation function;ad-hoc microphone array;wireless acoustic sensor network},\n  doi = {10.1109/EUSIPCO.2015.7362385},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104749.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we address the problem of estimating the cross-correlation function between two microphone signals recorded in different nodes of an ad-hoc microphone array or wireless acoustic sensor network, where the transmission of the entire microphone signal from one node to another is undesirable due to power and/or bandwidth constraints. We show that instead of directly computing the cross-correlation function, it can be estimated as the solution to a deconvolution problem. This deconvolution problem can be separated into two subproblems, each of which depends on one microphone signal and an auxiliary signal derived from the other microphone signal. Three different strategies for solving this deconvolution problem are proposed, in which the two subproblems are solved jointly (symmetric deconvolution), separately (asymmetric deconvolution) or in a consensus framework (consensus deconvolution). Simulation results illustrate the performance difference in terms of estimation accuracy, noise robustness, and transmission requirements.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Relax and unfold: Microphone localization with Euclidean distance matrices.\n \n \n \n \n\n\n \n Dokmanić, I.; Ranieri, J.; and Vetterli, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 265-269, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"RelaxPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362386,\n  author = {I. Dokmanić and J. Ranieri and M. Vetterli},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Relax and unfold: Microphone localization with Euclidean distance matrices},\n  year = {2015},\n  pages = {265-269},\n  abstract = {Recent methods for microphone position calibration work with sound sources at a priori unknown locations. This is convenient for ad hoc arrays, as it requires little additional infrastructure. We propose a flexible localization algorithm by first recognizing the problem as an instance of multidimensional unfolding (MDU) - a classical problem in Euclidean geometry and psychometrics - and then solving the MDU as a special case of Euclidean distance matrix (EDM) completion. We solve the EDM completion using a semidefinite relaxation. In contrast to existing methods, the semidefinite formulation allows us to elegantly handle missing pairwise distance information, but also to incorporate various prior information about the distances between the pairs of microphones or sources, bounds on these distances, or ordinal information such as {"}microphones 1 and 2 are more apart than microphones 1 and 15{"}. The intuition that this should improve the localization performance is justified by numerical experiments.},\n  keywords = {acoustic generators;acoustic signal processing;calibration;mathematical programming;matrix algebra;microphone arrays;multidimensional signal processing;source separation;microphone localization;Euclidean distance matrices;microphone position calibration;sound sources;ad hoc arrays;flexible localization algorithm;multidimensional unfolding;MDU;Euclidean geometry;psychometrics;EDM completion;semidefinite relaxation;missing pairwise distance information handling;localization performance improvement;Microphones;Calibration;Euclidean distance;Geometry;Noise measurement;Symmetric matrices;Europe;Microphone localization;array calibration;microphone array;Euclidean distance matrix;semidefinite relaxation;multidimensional unfolding},\n  doi = {10.1109/EUSIPCO.2015.7362386},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105011.pdf},\n}\n\n
\n
\n\n\n
\n Recent methods for microphone position calibration work with sound sources at a priori unknown locations. This is convenient for ad hoc arrays, as it requires little additional infrastructure. We propose a flexible localization algorithm by first recognizing the problem as an instance of multidimensional unfolding (MDU) - a classical problem in Euclidean geometry and psychometrics - and then solving the MDU as a special case of Euclidean distance matrix (EDM) completion. We solve the EDM completion using a semidefinite relaxation. In contrast to existing methods, the semidefinite formulation allows us to elegantly handle missing pairwise distance information, but also to incorporate various prior information about the distances between the pairs of microphones or sources, bounds on these distances, or ordinal information such as \"microphones 1 and 2 are more apart than microphones 1 and 15\". The intuition that this should improve the localization performance is justified by numerical experiments.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A PEM-based frequency-domain Kalman filter for adaptive feedback cancellation.\n \n \n \n \n\n\n \n Bernardi, G.; van Waterschoot , T.; Wouters, J.; Hillbmtt, M.; and Moonen, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 270-274, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362387,\n  author = {G. Bernardi and T. {van Waterschoot} and J. Wouters and M. Hillbmtt and M. Moonen},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A PEM-based frequency-domain Kalman filter for adaptive feedback cancellation},\n  year = {2015},\n  pages = {270-274},\n  abstract = {Adaptive feedback cancellation (AFC) algorithms are used to solve the problem of acoustic feedback, but, frequently, they do not address the fundamental problem of loudspeaker and source signal correlation, leading to an estimation bias if standard adaptive filtering methods are used. Loudspeaker and source signal prefiltering via the prediction-error method (PEM) can address this problem. In addition to this, the use of a frequency-domain Kalman filter (FDKF) is an appealing tool for the estimation of the adaptive feedback canceler, given the advantages it offers over other common techniques, such as Wiener filtering. In this paper, we derive an algorithm employing a PEM-based prewhitening and a frequency-domain Kalman filter (PEM-FDKF) for AFC. We demonstrate its improved performance when compared with standard frequency-domain adaptive filter (FDAF) algorithms, in terms of reduced estimation error, achievable amplification and sound quality.},\n  keywords = {adaptive filters;frequency-domain analysis;Kalman filters;source separation;adaptive feedback cancellation algorithms;AFC algorithms;acoustic feedback;loudspeaker;source signal correlation;estimation bias;source signal prefiltering;prediction-error method;frequency-domain Kalman filter;PEM-based prewhitening;PEM-FDKF;frequency-domain adaptive filter algorithms;FDAF algorithms;Frequency control;Loudspeakers;Frequency-domain analysis;Kalman filters;Acoustics;Signal processing algorithms;Correlation;Adaptive feedback cancellation (AFC);prediction-error method (PEM);Kalman filter;source signal modeling},\n  doi = {10.1109/EUSIPCO.2015.7362387},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104243.pdf},\n}\n\n
\n
\n\n\n
\n Adaptive feedback cancellation (AFC) algorithms are used to solve the problem of acoustic feedback, but, frequently, they do not address the fundamental problem of loudspeaker and source signal correlation, leading to an estimation bias if standard adaptive filtering methods are used. Loudspeaker and source signal prefiltering via the prediction-error method (PEM) can address this problem. In addition to this, the use of a frequency-domain Kalman filter (FDKF) is an appealing tool for the estimation of the adaptive feedback canceler, given the advantages it offers over other common techniques, such as Wiener filtering. In this paper, we derive an algorithm employing a PEM-based prewhitening and a frequency-domain Kalman filter (PEM-FDKF) for AFC. We demonstrate its improved performance when compared with standard frequency-domain adaptive filter (FDAF) algorithms, in terms of reduced estimation error, achievable amplification and sound quality.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robust algorithm for active feedback control of narrowband noise.\n \n \n \n \n\n\n \n Niedźwiecki, M.; Meller, M.; and Gajdzica, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 275-279, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"RobustPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362388,\n  author = {M. Niedźwiecki and M. Meller and M. Gajdzica},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Robust algorithm for active feedback control of narrowband noise},\n  year = {2015},\n  pages = {275-279},\n  abstract = {The problem of active control of narrowband acoustic noise is considered. It is shown that the proposed earlier feedback control algorithm called SONIC (self-optimizing narrowband interference canceller), based on minimization of the L2-norm performance measure, can be re-derived using the Li approach. The resulting robust SONIC algorithm is more robust to heavy-tailed measurement noise, such as the a-stable noise, than the original SONIC.},\n  keywords = {acoustic noise;feedback;interference suppression;noise abatement;noise measurement;optimisation;active feedback control;narrowband acoustic noise;feedback control algorithm;self-optimizing narrowband interference canceller;SONIC algorithm;heavy-tailed measurement noise;Signal processing algorithms;Robustness;Narrowband;Minimization;Noise measurement;Steady-state;Europe;active noise control;adaptive filtering},\n  doi = {10.1109/EUSIPCO.2015.7362388},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570101939.pdf},\n}\n\n
\n
\n\n\n
\n The problem of active control of narrowband acoustic noise is considered. It is shown that the proposed earlier feedback control algorithm called SONIC (self-optimizing narrowband interference canceller), based on minimization of the L2-norm performance measure, can be re-derived using the Li approach. The resulting robust SONIC algorithm is more robust to heavy-tailed measurement noise, such as the a-stable noise, than the original SONIC.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n On the estimation of time-delays for image source method.\n \n \n \n\n\n \n Drira, A.; Boudraa, A. O.; Guillon, L.; and Komaty, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 280-284, Aug 2015. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362389,\n  author = {A. Drira and A. O. Boudraa and L. Guillon and A. Komaty},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {On the estimation of time-delays for image source method},\n  year = {2015},\n  pages = {280-284},\n  abstract = {The image source method is based on the study of signals reflected by a sedimentary seabed. It produces an estimation of the geoacoustic parameters of the stratified sediments. The key step of this method is the detection and estimation of the time delays of the signals relected by the interfaces between the sedimentary strata. The Teager-Kaiser (TK) operator is known as a reliable way to extract from the recorded signal the time delays of the reflected signals. Due to its sensitivity to discontinuities and excellent time resolution, this operator is a good tool to detect and estimate the time delays. We investigate whether the TK operator associated with time-frequency approaches (spectrogram, wavelets) could be of interest to the detection of time delays and perform better than the TK operator applied directly on the relected signals. The different methods are presented and compared first on synthetic signals. The results show that the use of wavelet transform improve the quality of the time-delays estimation. The algorithm is finally applied to real signals acquired in an acoustic tank experiment and the estimated parameters are very close to the ground-truth.},\n  keywords = {acoustic signal detection;delay estimation;sediments;time-frequency analysis;wavelet transforms;Teager-Kaiser operator;TK operator;time resolution;time-frequency approach;wavelet transform;signal acquisition;acoustic tank experiment;signal detection;stratified sediment geoacoustic parameter estimation;sedimentary seabed;image source method;time-delay estimation;Sonar equipment;Delay effects;Time-frequency analysis;Acoustics;Estimation;Spectrogram;Continuous wavelet transforms;Image method;Medium impulse response;Geoacoustic inversion;Time-delay;Teager-Kaiser energy operator},\n  doi = {10.1109/EUSIPCO.2015.7362389},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n The image source method is based on the study of signals reflected by a sedimentary seabed. It produces an estimation of the geoacoustic parameters of the stratified sediments. The key step of this method is the detection and estimation of the time delays of the signals relected by the interfaces between the sedimentary strata. The Teager-Kaiser (TK) operator is known as a reliable way to extract from the recorded signal the time delays of the reflected signals. Due to its sensitivity to discontinuities and excellent time resolution, this operator is a good tool to detect and estimate the time delays. We investigate whether the TK operator associated with time-frequency approaches (spectrogram, wavelets) could be of interest to the detection of time delays and perform better than the TK operator applied directly on the relected signals. The different methods are presented and compared first on synthetic signals. The results show that the use of wavelet transform improve the quality of the time-delays estimation. The algorithm is finally applied to real signals acquired in an acoustic tank experiment and the estimated parameters are very close to the ground-truth.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Occlusion reduction system for hearing aids with an improved transducer and an associated algorithm.\n \n \n \n \n\n\n \n Sunohara, M.; Osawa, M.; Hashiura, T.; and Tateno, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 285-289, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"OcclusionPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362390,\n  author = {M. Sunohara and M. Osawa and T. Hashiura and M. Tateno},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Occlusion reduction system for hearing aids with an improved transducer and an associated algorithm},\n  year = {2015},\n  pages = {285-289},\n  abstract = {Many of hearing aid users have complained about discomfort of their own voice and/or the mastication sound. Such discomfort is caused by increased sound pressure at low frequencies when the ear canal is blocked by hearing aid itself. This phenomenon is called {"}occlusion effect{"} and is one of the critical issues for hearing aids. This report proposes an occlusion reduction system based on active noise control technique using a new acoustic transducer. The proposed system can reduce the increased sound pressure in the ear canal about 26 dB around 200 Hz. While the proposed system achieved a better performance in the reduction, some distorted sounds are frequently perceived through the system. This secondary issue of the distorted sounds can also be reduced by controlling the feedback loop gain. Finally, a prototype system with the new transducer and a distortion suppressor algorithm is developed and then evaluated.},\n  keywords = {acoustic transducers;active noise control;hearing aids;occlusion reduction system;hearing aids;occlusion effect;active noise control technique;acoustic transducer;feedback loop gain;distortion suppressor algorithm;Receivers;Acoustic distortion;Ear;Irrigation;Gain;Auditory system;Hearing aids;Occlusion effect;Active noise control;Improved acoustic transducer;Distortion},\n  doi = {10.1109/EUSIPCO.2015.7362390},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096621.pdf},\n}\n\n
\n
\n\n\n
\n Many of hearing aid users have complained about discomfort of their own voice and/or the mastication sound. Such discomfort is caused by increased sound pressure at low frequencies when the ear canal is blocked by hearing aid itself. This phenomenon is called \"occlusion effect\" and is one of the critical issues for hearing aids. This report proposes an occlusion reduction system based on active noise control technique using a new acoustic transducer. The proposed system can reduce the increased sound pressure in the ear canal about 26 dB around 200 Hz. While the proposed system achieved a better performance in the reduction, some distorted sounds are frequently perceived through the system. This secondary issue of the distorted sounds can also be reduced by controlling the feedback loop gain. Finally, a prototype system with the new transducer and a distortion suppressor algorithm is developed and then evaluated.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Proportionate algorithms for two-microphone active feedback cancellation.\n \n \n \n \n\n\n \n Albu, F.; Nakagawa, R.; and Nordholm, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 290-294, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ProportionatePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362391,\n  author = {F. Albu and R. Nakagawa and S. Nordholm},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Proportionate algorithms for two-microphone active feedback cancellation},\n  year = {2015},\n  pages = {290-294},\n  abstract = {In this paper we propose the use of the proportionate principle in order to improve the convergence characteristics of the two microphone method for acoustic feedback cancellation in hearing aids. The reason of using proportionate algorithms is to exploit the sparseness of the adaptive filter coefficients in the transform domain. The convergence improvement can be achieved for both speech and music signals at a moderate increase of the numerical complexity over that of a previous solution in the transform domain.},\n  keywords = {adaptive filters;feedback;hearing aids;microphones;proportionate principle;convergence characteristics;two microphone method;acoustic feedback cancellation;hearing aids;adaptive filter coefficients;transform domain;speech signals;music signals;numerical complexity;Signal processing algorithms;Adaptive filters;Microphones;Speech;Finite impulse response filters;Acoustics;Transforms;acoustic feedback cancellation;proportionate adaptive algorithms;two microphones approach;adaptive algorithms},\n  doi = {10.1109/EUSIPCO.2015.7362391},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570092761.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we propose the use of the proportionate principle in order to improve the convergence characteristics of the two microphone method for acoustic feedback cancellation in hearing aids. The reason of using proportionate algorithms is to exploit the sparseness of the adaptive filter coefficients in the transform domain. The convergence improvement can be achieved for both speech and music signals at a moderate increase of the numerical complexity over that of a previous solution in the transform domain.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Partial update even mirror fourier non-linear filters for active noise control.\n \n \n \n \n\n\n \n Patel, V.; and George, N. V.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 295-299, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"PartialPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362392,\n  author = {V. Patel and N. V. George},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Partial update even mirror fourier non-linear filters for active noise control},\n  year = {2015},\n  pages = {295-299},\n  abstract = {A non-linear active noise control (ANC) scheme, which is based on an even mirror Fourier non-linear filter has been developed in this paper. A new weight update mechanism for the proposed scheme has been suggested and the range of the learning rate which ensures stability has been derived. The noise mitigation achieved using the new scheme has been compared with that obtained using a functional link artificial neural network (FLANN) based ANC system as well as using a generalized FLANN (GFLANN) based ANC mechanism. The computational complexity of the proposed algorithm has been further reduced by using the concept of partial update signal processing. A simulation study has been carried out to evaluate the effectiveness of the new method. Improved noise reduction at reduced computational load has been provided by the new partial update ANC scheme proposed.},\n  keywords = {active noise control;computational complexity;neural nets;nonlinear filters;signal processing;noise reduction;partial update signal processing;computational complexity;FLANN;functional link artificial neural network;weight update mechanism;active noise control;partial update even mirror Fourier nonlinear filters;Signal processing algorithms;Signal processing;Computational complexity;Europe;Computational modeling;Microphones;Convergence;Active noise control;FLANN;GFLANN;even mirror filter},\n  doi = {10.1109/EUSIPCO.2015.7362392},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570094561.pdf},\n}\n\n
\n
\n\n\n
\n A non-linear active noise control (ANC) scheme, which is based on an even mirror Fourier non-linear filter has been developed in this paper. A new weight update mechanism for the proposed scheme has been suggested and the range of the learning rate which ensures stability has been derived. The noise mitigation achieved using the new scheme has been compared with that obtained using a functional link artificial neural network (FLANN) based ANC system as well as using a generalized FLANN (GFLANN) based ANC mechanism. The computational complexity of the proposed algorithm has been further reduced by using the concept of partial update signal processing. A simulation study has been carried out to evaluate the effectiveness of the new method. Improved noise reduction at reduced computational load has been provided by the new partial update ANC scheme proposed.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Comparison of multichannel doubletalk detectors for acoustic echo cancellation.\n \n \n \n \n\n\n \n Schneider, M.; and Habets, E. A. P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 300-304, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ComparisonPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362393,\n  author = {M. Schneider and E. A. P. Habets},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Comparison of multichannel doubletalk detectors for acoustic echo cancellation},\n  year = {2015},\n  pages = {300-304},\n  abstract = {In acoustic echo cancellation (AEC) a doubletalk detector (DTD) is typically used to avoid misconvergence of the adaptive filter during simultaneous activity of near-end acoustic scene and loudspeaker playback. While several single-channel DTDs can be generalized to multiple channels, only little attention was so far paid to the evaluation of the resulting multichannel DTDs. In this paper, different DTDs are reviewed and evaluated. In particular, the influence of the number of loudspeakers is investigated as new dimension in the experimental evaluation, where up to sixteen loudspeaker channels are considered. The results show that the performance of a DTD is affected by the number of loudspeaker channels whenever the cross-correlation between the loudspeaker signals is considered by the DTD. Moreover, considering this cross-correlation turned out to be necessary for a high detection performance.},\n  keywords = {echo suppression;loudspeakers;multichannel doubletalk detectors;acoustic echo cancellation;near-end acoustic scene;loudspeaker playback;single-channel DTD;loudspeaker channels;loudspeaker signals;Loudspeakers;Microphones;Detectors;Correlation;Echo cancellers;Europe;Doubletalk detection;multi-channel;acoustic echo cancellation;comparison;evaluation},\n  doi = {10.1109/EUSIPCO.2015.7362393},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570097477.pdf},\n}\n\n
\n
\n\n\n
\n In acoustic echo cancellation (AEC) a doubletalk detector (DTD) is typically used to avoid misconvergence of the adaptive filter during simultaneous activity of near-end acoustic scene and loudspeaker playback. While several single-channel DTDs can be generalized to multiple channels, only little attention was so far paid to the evaluation of the resulting multichannel DTDs. In this paper, different DTDs are reviewed and evaluated. In particular, the influence of the number of loudspeakers is investigated as new dimension in the experimental evaluation, where up to sixteen loudspeaker channels are considered. The results show that the performance of a DTD is affected by the number of loudspeaker channels whenever the cross-correlation between the loudspeaker signals is considered by the DTD. Moreover, considering this cross-correlation turned out to be necessary for a high detection performance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multi-channel ANC system using optimized reference microphones based on time difference of arrival.\n \n \n \n \n\n\n \n Hase, S.; Kajikawa, Y.; Liu, L.; and Kuo, S. M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 305-309, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Multi-channelPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362394,\n  author = {S. Hase and Y. Kajikawa and L. Liu and S. M. Kuo},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Multi-channel ANC system using optimized reference microphones based on time difference of arrival},\n  year = {2015},\n  pages = {305-309},\n  abstract = {Feedforward active noise control (ANC) system using upstream reference signal can reduce various noises such as broadband noise by arranging a reference microphone close to a noise source. However, the performance of ANC system deteriorates if the noise environment such as the arrival direction is changed. This is because of the causality constraint that the unwanted noise propagates to the control point faster than the {"}antinoise{"} to cancel the unwanted noise. To solve this problem, we propose an ANC system that estimates the arrival direction of noise using multiple reference microphones placed around the control point. This system uses a time difference of arrival technique to estimate noise source location and then optimize reference signal. Noise reduction performances are examined through some simulations in this paper.},\n  keywords = {active noise control;microphones;time-of-arrival estimation;multichannel ANC system;optimized reference microphones;feedforward active noise control system;broadband noise;microphones;time difference of arrival technique;noise source location;noise reduction performances;Microphones;Microwave integrated circuits;Noise reduction;Feedforward neural networks;Delays;Europe;Multi-channel ANC system;feedforward control;time difference of arrival;causality constraint},\n  doi = {10.1109/EUSIPCO.2015.7362394},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102497.pdf},\n}\n\n
\n
\n\n\n
\n Feedforward active noise control (ANC) system using upstream reference signal can reduce various noises such as broadband noise by arranging a reference microphone close to a noise source. However, the performance of ANC system deteriorates if the noise environment such as the arrival direction is changed. This is because of the causality constraint that the unwanted noise propagates to the control point faster than the \"antinoise\" to cancel the unwanted noise. To solve this problem, we propose an ANC system that estimates the arrival direction of noise using multiple reference microphones placed around the control point. This system uses a time difference of arrival technique to estimate noise source location and then optimize reference signal. Noise reduction performances are examined through some simulations in this paper.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Block-based distributed adaptive filter for active noise control in a collaborative network.\n \n \n \n \n\n\n \n Lorente, J.; Antoñanzas, C.; Ferrer, M.; and Gonzalez, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 310-314, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Block-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362395,\n  author = {J. Lorente and C. Antoñanzas and M. Ferrer and A. Gonzalez},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Block-based distributed adaptive filter for active noise control in a collaborative network},\n  year = {2015},\n  pages = {310-314},\n  abstract = {This paper considers the implementation of an Active Noise Control (ANC) system over a network of distributed acoustic nodes. Single-channel nodes composed of one microphone, one loudspeaker, and a processor with communication capabilities have been considered. An equivalent solution to the Multiple Error Filtered-x Least Mean Square algorithm (Me-FxLMS) has been chosen because is a widely used algorithm in ANC systems with centralized processing. The proposed algorithm has been implemented with block-data processing as commonly happens in practical systems. Furthermore, the algorithm works in the frequency domain and with partitioning of the filters for improving its efficiency. Therefore, we present a new formulation to introduce a distributed algorithm based on the Me-FxLMS together with an incremental collaborative strategy in the network. Results demonstrate that the scalable and versatile distributed algorithm exhibits the same performance than the centralized version. Moreover, the computational complexity and some implementation aspects have been analyzed.},\n  keywords = {active noise control;adaptive filters;computational complexity;frequency-domain analysis;least mean squares methods;loudspeakers;microphones;block-based distributed adaptive filter;active noise control;collaborative network;distributed acoustic nodes;single-channel nodes;microphone;loudspeaker;processor;communication capability;multiple error filtered-x least mean square algorithm;Me-FxLMS;block-data processing;frequency domain;scalable versatile distributed algorithm;computational complexity;Signal processing algorithms;Partitioning algorithms;Acoustics;Microphones;Europe;Loudspeakers;Delays;Distributed Networks;Active Noise Control;Filtered-x Least Mean Square},\n  doi = {10.1109/EUSIPCO.2015.7362395},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104575.pdf},\n}\n\n
\n
\n\n\n
\n This paper considers the implementation of an Active Noise Control (ANC) system over a network of distributed acoustic nodes. Single-channel nodes composed of one microphone, one loudspeaker, and a processor with communication capabilities have been considered. An equivalent solution to the Multiple Error Filtered-x Least Mean Square algorithm (Me-FxLMS) has been chosen because is a widely used algorithm in ANC systems with centralized processing. The proposed algorithm has been implemented with block-data processing as commonly happens in practical systems. Furthermore, the algorithm works in the frequency domain and with partitioning of the filters for improving its efficiency. Therefore, we present a new formulation to introduce a distributed algorithm based on the Me-FxLMS together with an incremental collaborative strategy in the network. Results demonstrate that the scalable and versatile distributed algorithm exhibits the same performance than the centralized version. Moreover, the computational complexity and some implementation aspects have been analyzed.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Noise-robust voice conversion using a small parallel data based on non-negative matrix factorization.\n \n \n \n \n\n\n \n Aihara, R.; Fujii, T.; Nakashika, T.; Takiguchi, T.; and Ariki, Y.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 315-319, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Noise-robustPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362396,\n  author = {R. Aihara and T. Fujii and T. Nakashika and T. Takiguchi and Y. Ariki},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Noise-robust voice conversion using a small parallel data based on non-negative matrix factorization},\n  year = {2015},\n  pages = {315-319},\n  abstract = {This paper presents a novel framework of voice conversion (VC) based on non-negative matrix factorization (NMF) using a small parallel corpus. In our previous work, a VC technique using NMF for noisy environments has been proposed, and it requires parallel exemplars (dictionary), which con sist of the source exemplars and target exemplars, having the same texts uttered by the source and target speakers. The large parallel corpus is used to construct a conversion function in NMF-based VC (in the same way as common GMM-based VC). In this paper, an adaptation matrix in an NMF frame work is introduced to adapt the source dictionary to the target dictionary. This adaptation matrix is estimated using a small parallel speech corpus only. The effectiveness of this method is confirmed by comparing its effectiveness with that of a con ventional NMF-based method and a GMM-based method in a noisy environment.},\n  keywords = {matrix decomposition;speech processing;noise-robust voice conversion;nonnegative matrix factorization;large parallel corpus;adaptation matrix;source dictionary;target dictionary;Dictionaries;Speech;Noise measurement;Europe;Signal processing;Estimation;Matrix converters;voice conversion;speaker adaptation;noisy environments;small parallel corpus},\n  doi = {10.1109/EUSIPCO.2015.7362396},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570099723.pdf},\n}\n\n
\n
\n\n\n
\n This paper presents a novel framework of voice conversion (VC) based on non-negative matrix factorization (NMF) using a small parallel corpus. In our previous work, a VC technique using NMF for noisy environments has been proposed, and it requires parallel exemplars (dictionary), which con sist of the source exemplars and target exemplars, having the same texts uttered by the source and target speakers. The large parallel corpus is used to construct a conversion function in NMF-based VC (in the same way as common GMM-based VC). In this paper, an adaptation matrix in an NMF frame work is introduced to adapt the source dictionary to the target dictionary. This adaptation matrix is estimated using a small parallel speech corpus only. The effectiveness of this method is confirmed by comparing its effectiveness with that of a con ventional NMF-based method and a GMM-based method in a noisy environment.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Characterisation of tremor in normophonic voices.\n \n \n \n \n\n\n \n Fraile, R.; Sáenz-Lechón, N.; Osma-Ruiz, V. J.; and Gutierrez-Arriola, J. M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 320-324, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"CharacterisationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362397,\n  author = {R. Fraile and N. Sáenz-Lechón and V. J. Osma-Ruiz and J. M. Gutierrez-Arriola},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Characterisation of tremor in normophonic voices},\n  year = {2015},\n  pages = {320-324},\n  abstract = {Vocal tremor is a low frequency instability of the voice that causes modulation of its amplitude and fundamental frequency. Among these two, frequency modulation is more relevant for perception and it has been shown to be present both in normophonic and dysphonic voices and to happen in similar frequency bands for both voice types. This paper presents a characterisation of the frequency modulating signal estimated for normophonic voices in terms of both its spectral characteristics and its statistical distribution. By using the discrete Fourier transform for data non-uniformly spaced in time domain, it is shown that the modulating signal may be either low-pass or band-pass (i.e. oscillating), though the low-pass case dominates in the analysed data. As for the values of the modulating signal, their distribution is shown to fairly fit a Gaussian distribution with a standard deviation that significantly depends on the average fundamental frequency.},\n  keywords = {acoustic signal processing;discrete Fourier transforms;frequency modulation;Gaussian distribution;hearing;medical signal processing;speech processing;time-domain analysis;normophonic voices;vocal tremor;frequency instability;dysphonic voices;frequency modulating signal;spectral characteristics;statistical distribution;discrete Fourier transform;time domain;Gaussian distribution;Frequency modulation;Discrete Fourier transforms;Standards;Frequency estimation;Estimation;Correlation;Acoustic signal analysis;Biomedical acoustics;Frequency modulation;Speech analysis},\n  doi = {10.1109/EUSIPCO.2015.7362397},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104217.pdf},\n}\n\n
\n
\n\n\n
\n Vocal tremor is a low frequency instability of the voice that causes modulation of its amplitude and fundamental frequency. Among these two, frequency modulation is more relevant for perception and it has been shown to be present both in normophonic and dysphonic voices and to happen in similar frequency bands for both voice types. This paper presents a characterisation of the frequency modulating signal estimated for normophonic voices in terms of both its spectral characteristics and its statistical distribution. By using the discrete Fourier transform for data non-uniformly spaced in time domain, it is shown that the modulating signal may be either low-pass or band-pass (i.e. oscillating), though the low-pass case dominates in the analysed data. As for the values of the modulating signal, their distribution is shown to fairly fit a Gaussian distribution with a standard deviation that significantly depends on the average fundamental frequency.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Bayesian learning for time-varying linear prediction of speech.\n \n \n \n \n\n\n \n Casamitjana, A.; Sundin, M.; Ghosh, P.; and Chatterjee, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 325-329, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"BayesianPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362398,\n  author = {A. Casamitjana and M. Sundin and P. Ghosh and S. Chatterjee},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Bayesian learning for time-varying linear prediction of speech},\n  year = {2015},\n  pages = {325-329},\n  abstract = {We develop Bayesian learning algorithms for estimation of time-varying linear prediction (TVLP) coefficients of speech. Estimation of TVLP coefficients is a naturally underdeter-mined problem. We consider sparsity and subspace based approaches for dealing with the corresponding underdetermined system. Bayesian learning algorithms are developed to achieve better estimation performance. Expectation-maximization (EM) framework is employed to develop the Bayesian learning algorithms where we use a combined prior to model a driving noise (glottal signal) that has both sparse and dense statistical properties. The efficiency of the Bayesian learning algorithms is shown for synthetic signals using spectral distortion measure and formant tracking of real speech signals.},\n  keywords = {Bayes methods;expectation-maximisation algorithm;learning (artificial intelligence);prediction theory;spectral analysis;speech processing;speech time-varying linear prediction;Bayesian learning algorithm;TVLP coefficient estimation;expectation-maximization framework;driving noise;sparse properties;dense statistical properties;synthetic signal;spectral distortion measurement;real speech signal formant tracking;Bayes methods;Speech;Estimation;Signal processing algorithms;Standards;Prediction algorithms;Europe;Time-varying linear prediction;sparsity;Bayesian learning;expectation-maximization},\n  doi = {10.1109/EUSIPCO.2015.7362398},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104537.pdf},\n}\n\n
\n
\n\n\n
\n We develop Bayesian learning algorithms for estimation of time-varying linear prediction (TVLP) coefficients of speech. Estimation of TVLP coefficients is a naturally underdeter-mined problem. We consider sparsity and subspace based approaches for dealing with the corresponding underdetermined system. Bayesian learning algorithms are developed to achieve better estimation performance. Expectation-maximization (EM) framework is employed to develop the Bayesian learning algorithms where we use a combined prior to model a driving noise (glottal signal) that has both sparse and dense statistical properties. The efficiency of the Bayesian learning algorithms is shown for synthetic signals using spectral distortion measure and formant tracking of real speech signals.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Spectral transition measure for detection of obstruents.\n \n \n \n \n\n\n \n Madhavi, M.; Patil, H.; and Vachhani, B. B.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 330-334, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"SpectralPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362399,\n  author = {M. Madhavi and H. Patil and B. B. Vachhani},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Spectral transition measure for detection of obstruents},\n  year = {2015},\n  pages = {330-334},\n  abstract = {Obstruents are very important acoustical events (i.e., abrupt-consonantal landmarks) in the speech signal. This paper presents the use of novel Spectral Transition Measure (STM) to locate the obstruents in the continuous speech signal. The problem of obstruent detection involves detection of phonetic boundaries associated with obstruent sounds. In this paper, we propose use of STM information derived from state-of-the-art Mel Frequency Cepstral Coefficients (MFCC) feature set and newly developed feature set, viz., MFCC-TMP (which uses Teager Energy Operator (TEO) to exploit implicitly Magnitude and Phase information in the MFCC framework) for obstruent detection. The key idea here is to exploit capabilities of STM to capture high dynamic transitional characteristics associated with obstruent sounds. The experimental setup is developed on entire TIMIT database. For 20 ms agreement (tolerance) duration, obstruent detection rate is found to be 97.59 % with 17.65 % false acceptance using state-of-the-art MFCC-STM and 96.42 % with 12.88 % false acceptance using MFCC-TMP-STM. Finally, STM-based features along with static representation (i.e., MFCC-STM and MFCC-TMP-STM) are evaluated for phone recognition task.},\n  keywords = {spectral analysis;speech processing;speech recognition;phone recognition task;TIMIT database;TEO;teager energy operator;MFCC-TMP;MFCC feature set;mel frequency cepstral coefficients;phonetic boundaries detection;STM;speech signal;acoustical events;obstruent detection;spectral transition measure;Speech;Mel frequency cepstral coefficient;Feature extraction;Testing;Indexes;Mel frequency cepstral coefficients;obstruents;spectral transition measure;Teager Energy Operator},\n  doi = {10.1109/EUSIPCO.2015.7362399},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103951.pdf},\n}\n\n
\n
\n\n\n
\n Obstruents are very important acoustical events (i.e., abrupt-consonantal landmarks) in the speech signal. This paper presents the use of novel Spectral Transition Measure (STM) to locate the obstruents in the continuous speech signal. The problem of obstruent detection involves detection of phonetic boundaries associated with obstruent sounds. In this paper, we propose use of STM information derived from state-of-the-art Mel Frequency Cepstral Coefficients (MFCC) feature set and newly developed feature set, viz., MFCC-TMP (which uses Teager Energy Operator (TEO) to exploit implicitly Magnitude and Phase information in the MFCC framework) for obstruent detection. The key idea here is to exploit capabilities of STM to capture high dynamic transitional characteristics associated with obstruent sounds. The experimental setup is developed on entire TIMIT database. For 20 ms agreement (tolerance) duration, obstruent detection rate is found to be 97.59 % with 17.65 % false acceptance using state-of-the-art MFCC-STM and 96.42 % with 12.88 % false acceptance using MFCC-TMP-STM. Finally, STM-based features along with static representation (i.e., MFCC-STM and MFCC-TMP-STM) are evaluated for phone recognition task.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An improved adpcm decoder by adaptively controlled quantization interval centroids.\n \n \n \n \n\n\n \n Han, S.; and Fingscheidt, T.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 335-339, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362400,\n  author = {S. Han and T. Fingscheidt},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {An improved adpcm decoder by adaptively controlled quantization interval centroids},\n  year = {2015},\n  pages = {335-339},\n  abstract = {Adaptive differential pulse code modulation (ADPCM) has been standardized in ITU-T Recommendations G.726 and G.722 and is widely used in IP and cordless telephony. Although adaptive quantization and adaptive prediction is employed in ADPCM using a fixed scalar quantization codebook/lookup table, residual correlation of the quantizer input samples is yet observed. Exploiting source correlation, it has been shown that scalar quantization performance can be improved by a time-variant quantization interval centroid leading to an adaptive codebook in the decoder. Using an ADPCM encoder and applying this principle to the ADPCM decoder with its own adaptive quantization and prediction, the mean opinion score (MOS) of perceptual evaluation of speech quality (PESQ) is shown to improve by about 0.15 points for low bit rate ADPCM in error-free transmission conditions.},\n  keywords = {adaptive modulation;correlation methods;decoding;differential pulse code modulation;Internet telephony;quantisation (signal);table lookup;improved ADPCM decoder;adaptively controlled quantization interval centroids;adaptive differential pulse code modulation;ITU-T recommendations G.726;ITU-T recommendations G.722;IP telephony;cordless telephony;adaptive prediction;fixed scalar quantization codebook;lookup table;residual correlation;quantizer input samples;source correlation;time-variant quantization interval centroid;ADPCM encoder;mean opinion score;MOS;perceptual evaluation of speech quality;PESQ;low bit rate ADPCM;error-free transmission conditions;Quantization (signal);Decoding;Speech;Standards;Indexes;Speech coding;Europe;ADPCM;probability density function;centroid},\n  doi = {10.1109/EUSIPCO.2015.7362400},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096681.pdf},\n}\n\n
\n
\n\n\n
\n Adaptive differential pulse code modulation (ADPCM) has been standardized in ITU-T Recommendations G.726 and G.722 and is widely used in IP and cordless telephony. Although adaptive quantization and adaptive prediction is employed in ADPCM using a fixed scalar quantization codebook/lookup table, residual correlation of the quantizer input samples is yet observed. Exploiting source correlation, it has been shown that scalar quantization performance can be improved by a time-variant quantization interval centroid leading to an adaptive codebook in the decoder. Using an ADPCM encoder and applying this principle to the ADPCM decoder with its own adaptive quantization and prediction, the mean opinion score (MOS) of perceptual evaluation of speech quality (PESQ) is shown to improve by about 0.15 points for low bit rate ADPCM in error-free transmission conditions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Low complexity single microphone tonal noise reduction in vehicular traffic environments.\n \n \n \n \n\n\n \n Chatlani, N.; Beaugeant, C.; and Kroon, P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 340-344, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"LowPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362401,\n  author = {N. Chatlani and C. Beaugeant and P. Kroon},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Low complexity single microphone tonal noise reduction in vehicular traffic environments},\n  year = {2015},\n  pages = {340-344},\n  abstract = {A low complexity single microphone Tonal Noise Reduction (TNR) technique is presented for speech enhancement. This method is particularly effective in noisy environments which contain tonal noise sources, such as vehicular horns and alarms. TNR was designed to have low complexity and low memory requirements for use with battery operated communication devices. TNR detects the probability of the presence of these tonal noises which contaminate the desired speech signals. These noises are then attenuated using the proposed system for noise suppression. This is particularly effective for noise sources with a harmonic spectral structure. The proposed TNR system is able to maintain a balance between the level of noise reduction and speech distortion. Listening tests were performed to confirm the results. TNR can be used together with a general noise reduction system as a postprocessing stage by reducing the residual noise components.},\n  keywords = {computational complexity;microphones;speech enhancement;vehicular ad hoc networks;low complexity single microphone tonal noise reduction;vehicular traffic environments;speech enhancement;battery operated communication devices;noise suppression;noise sources;harmonic spectral structure;speech distortion;residual noise components;Speech;Noise measurement;Noise reduction;Speech enhancement;Signal to noise ratio;Estimation;Single microphone noise reduction;Speech enhancement;Mobile devices},\n  doi = {10.1109/EUSIPCO.2015.7362401},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104673.pdf},\n}\n\n
\n
\n\n\n
\n A low complexity single microphone Tonal Noise Reduction (TNR) technique is presented for speech enhancement. This method is particularly effective in noisy environments which contain tonal noise sources, such as vehicular horns and alarms. TNR was designed to have low complexity and low memory requirements for use with battery operated communication devices. TNR detects the probability of the presence of these tonal noises which contaminate the desired speech signals. These noises are then attenuated using the proposed system for noise suppression. This is particularly effective for noise sources with a harmonic spectral structure. The proposed TNR system is able to maintain a balance between the level of noise reduction and speech distortion. Listening tests were performed to confirm the results. TNR can be used together with a general noise reduction system as a postprocessing stage by reducing the residual noise components.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Acoustic modeling and parameter generation using relevance vector machines for speech synthesis.\n \n \n \n \n\n\n \n Hong, D. H.; Lee, J. Y.; and Kim, N. S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 345-349, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AcousticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362402,\n  author = {D. H. Hong and J. Y. Lee and N. S. Kim},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Acoustic modeling and parameter generation using relevance vector machines for speech synthesis},\n  year = {2015},\n  pages = {345-349},\n  abstract = {In this paper, we propose a relevance vector machine (RVM) for modeling and generation of a speech feature sequence. In the conventional method, the mean parameter of the hidden Markov model (HMM) state can not consider temporal correlation among corresponding data frames. Since the RVM can be utilized to solve a nonlinear regression problem, we apply it to replace the model parameters of the state output distributions. In the proposed system, RVMs are employed to model the statistically representative process of the state or phone segment which is obtained from normalized training feature sequences by using the semi-parametric nonlinear regression method. We conducted comparative experiments for the proposed RVMs with conventional HMM. It is shown that the proposed state-level RVM-based method performed better than the conventional technique.},\n  keywords = {acoustic signal processing;correlation methods;hidden Markov models;regression analysis;speech synthesis;acoustic modeling;parameter generation;relevance vector machines;RVM;speech synthesis;speech feature sequence;hidden Markov model;HMM state;temporal correlation;nonlinear regression problem;phone segment;normalized training feature sequences;semiparametric nonlinear regression method;Hidden Markov models;Speech;Speech synthesis;Training;Acoustics;Signal processing algorithms;Clustering algorithms;HMM;RVM;speech synthesis;acoustic modeling;parameter generation},\n  doi = {10.1109/EUSIPCO.2015.7362402},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102747.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a relevance vector machine (RVM) for modeling and generation of a speech feature sequence. In the conventional method, the mean parameter of the hidden Markov model (HMM) state can not consider temporal correlation among corresponding data frames. Since the RVM can be utilized to solve a nonlinear regression problem, we apply it to replace the model parameters of the state output distributions. In the proposed system, RVMs are employed to model the statistically representative process of the state or phone segment which is obtained from normalized training feature sequences by using the semi-parametric nonlinear regression method. We conducted comparative experiments for the proposed RVMs with conventional HMM. It is shown that the proposed state-level RVM-based method performed better than the conventional technique.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Do not build your TTS training corpus randomly.\n \n \n \n \n\n\n \n Chevelu, J.; and Lolive, D.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 350-354, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"DoPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362403,\n  author = {J. Chevelu and D. Lolive},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Do not build your TTS training corpus randomly},\n  year = {2015},\n  pages = {350-354},\n  abstract = {TTS voice building generally relies on a script extracted from a big text corpus while optimizing the coverage of linguistic and phonological events supposedly related to voice acoustic quality. Previous works have shown differences on objective measures between smartly reduced and random corpora, but not when subjective evaluations are performed. For us, those results do not come from corpus reduction utility but from evaluations that smooth differences. In this article, we high-light those differences in a subjective test, by clustering test corpora according to a distance between signals so as to focus on different synthesized stimuli. The results show that covering appropriate features has a real impact on the perceived quality.},\n  keywords = {speech synthesis;TTS voice building;voice acoustic quality;test corpora;Speech;Europe;Speech synthesis;Greedy algorithms;Pragmatics;Buildings;Corpus reduction;Subjective evaluation;Corpus-based Unit Selection TTS},\n  doi = {10.1109/EUSIPCO.2015.7362403},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104285.pdf},\n}\n\n
\n
\n\n\n
\n TTS voice building generally relies on a script extracted from a big text corpus while optimizing the coverage of linguistic and phonological events supposedly related to voice acoustic quality. Previous works have shown differences on objective measures between smartly reduced and random corpora, but not when subjective evaluations are performed. For us, those results do not come from corpus reduction utility but from evaluations that smooth differences. In this article, we high-light those differences in a subjective test, by clustering test corpora according to a distance between signals so as to focus on different synthesized stimuli. The results show that covering appropriate features has a real impact on the perceived quality.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Breath and repeat: An attempt at enhancing speech-laugh synthesis quality.\n \n \n \n \n\n\n \n Haddad, K. E.; Çakmak, H.; Dupont, S.; and Dutoit, T.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 355-358, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"BreathPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362404,\n  author = {K. E. Haddad and H. Çakmak and S. Dupont and T. Dutoit},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Breath and repeat: An attempt at enhancing speech-laugh synthesis quality},\n  year = {2015},\n  pages = {355-358},\n  abstract = {In this work, we present a study dedicated to improve the speech-laugh synthesis quality. The impact of two factors is evaluated. The first factor is the addition of breath intake sounds after laughter bursts in speech. The second is the repetition of the word interrupted by laughs in the speech-laugh sentences. Several configurations are evaluated through subjective perceptual tests. We report an improvement of the synthesized speech-laugh naturalness when the breath intake sounds are added. We were unable, though, to make a conclusion concerning a possible positive impact of the repetition of the interrupted words on the speech-laugh synthesis quality.},\n  keywords = {hidden Markov models;speech enhancement;speech synthesis;breath and repeat;speech-laugh synthesis quality enhancement;speech-laugh sentences;subjective perceptual tests;synthesized speech-laugh naturalness;hidden Markov model;Hidden Markov models;Speech;Acoustics;Databases;Signal processing;Bismuth;Europe;HMM-based;laughter;synthesis;speech-laugh},\n  doi = {10.1109/EUSIPCO.2015.7362404},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104647.pdf},\n}\n\n
\n
\n\n\n
\n In this work, we present a study dedicated to improve the speech-laugh synthesis quality. The impact of two factors is evaluated. The first factor is the addition of breath intake sounds after laughter bursts in speech. The second is the repetition of the word interrupted by laughs in the speech-laugh sentences. Several configurations are evaluated through subjective perceptual tests. We report an improvement of the synthesized speech-laugh naturalness when the breath intake sounds are added. We were unable, though, to make a conclusion concerning a possible positive impact of the repetition of the interrupted words on the speech-laugh synthesis quality.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Mobile velocity and direction of movement estimation in NLOS multipath environment.\n \n \n \n \n\n\n \n Ben Rejeb, N.; Bousnina, I.; Ben Salah, M. B.; and Samet, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 359-363, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"MobilePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362405,\n  author = {N. {Ben Rejeb} and I. Bousnina and M. B. {Ben Salah} and A. Samet},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Mobile velocity and direction of movement estimation in NLOS multipath environment},\n  year = {2015},\n  pages = {359-363},\n  abstract = {In this paper, we propose a new method to jointly estimate the Mobile Velocity (MV) and the Direction of Movement (DM). We exploit the NLOS multipath environment with Uniform Linear Arrays (ULAs) at the receiver. We consider the Gaussian and the Laplacian angular distribution for the incoming angle of arrivals, for being the most used ones in the literature. The proposed method uses the magnitudes and the phase of the received signals Cross-Correlation Functions (CCFs). We take as a benchmark the Tow Rays (TR) approach for the MV estimate. Performance is assessed via Monte Carlo simulation. Using the Root Mean Square Error (RMSE) as a measure of performance, our new estimator performs well over wide MV and DM ranges and outperforms the TR one for the MV estimation.},\n  keywords = {correlation methods;estimation theory;Gaussian distribution;Laplace equations;mean square error methods;mobile radio;Monte Carlo methods;multipath channels;mobile velocity;MV estimation;movement estimation;NLOS multipath environment;direction of movement;uniform linear arrays;ULA;Gaussian distribution;Laplacian angular distribution;angle of arrivals;received signal cross-correlation functions;CCF;tow ray approach;TR approach;Monte Carlo simulation;root mean square error;RMSE;Europe;Mobile communication;Monte Carlo methods;AWGN;Yttrium;Gold;mobile velocity;direction of movement;cross-correlation function;SIMO configuration;NLOS miltipath environment},\n  doi = {10.1109/EUSIPCO.2015.7362405},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105091.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a new method to jointly estimate the Mobile Velocity (MV) and the Direction of Movement (DM). We exploit the NLOS multipath environment with Uniform Linear Arrays (ULAs) at the receiver. We consider the Gaussian and the Laplacian angular distribution for the incoming angle of arrivals, for being the most used ones in the literature. The proposed method uses the magnitudes and the phase of the received signals Cross-Correlation Functions (CCFs). We take as a benchmark the Tow Rays (TR) approach for the MV estimate. Performance is assessed via Monte Carlo simulation. Using the Root Mean Square Error (RMSE) as a measure of performance, our new estimator performs well over wide MV and DM ranges and outperforms the TR one for the MV estimation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Partial Fractional Fourier Transform (PFRFT)-OFDM for underwater acoustic communication.\n \n \n \n \n\n\n \n Chen, Y.; Clemente, C.; Soraghan, J. J.; and Weiss, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 364-368, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"PartialPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362406,\n  author = {Y. Chen and C. Clemente and J. J. Soraghan and S. Weiss},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Partial Fractional Fourier Transform (PFRFT)-OFDM for underwater acoustic communication},\n  year = {2015},\n  pages = {364-368},\n  abstract = {Communication over doubly selective channels (both time and frequency selective) suffers from significant intercarrier interference (ICI). This problem is severe in underwater acoustic communications. In this paper, a new Partial Fractional Fourier (PFrFT) based orthogonal frequency division multiplex (OFDM) scenario is presented for dealing with such challenges. A band minimum mean square error (BMMSE) weight combining equalizer based on Least Square MINRES (LSMR) iterative algorithm is used in the proposed communication system. Simulation results demonstrate significant BER performance improvements (up to 8dB) over traditional orthogonal based methods and those considering Partial FFT demodulation, and Discrete Fractional Fourier Transform (DFrFT) with only a moderate computational complexity increase.},\n  keywords = {computational complexity;discrete Fourier transforms;equalisers;error statistics;intercarrier interference;iterative methods;least squares approximations;OFDM modulation;underwater acoustic communication;wireless channels;partial fractional Fourier transform;PFrFT-OFDM;underwater acoustic communication;doubly selective channels;time selective channel;frequency selective channel;intercarrier interference;ICI;orthogonal frequency division multiplex;band minimum mean square error weight combining equalizer;BMMSE weight combining equalizer;least square MINRES iterative algorithm;LSMR iterative algorithm;BER performance improvement;partial FFT demodulation;discrete fractional Fourier transform;DFrFT;moderate computational complexity;OFDM;Complexity theory;Fourier transforms;Equalizers;Demodulation;Signal to noise ratio;Bit error rate;Orthogonal frequency division multiplexing(OFDM);Partial Fractional Fourier Transform (PFrFT);Banded Minimum mean square error (BMMSE);Least Square MINRES (LSMR)},\n  doi = {10.1109/EUSIPCO.2015.7362406},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105075.pdf},\n}\n\n
\n
\n\n\n
\n Communication over doubly selective channels (both time and frequency selective) suffers from significant intercarrier interference (ICI). This problem is severe in underwater acoustic communications. In this paper, a new Partial Fractional Fourier (PFrFT) based orthogonal frequency division multiplex (OFDM) scenario is presented for dealing with such challenges. A band minimum mean square error (BMMSE) weight combining equalizer based on Least Square MINRES (LSMR) iterative algorithm is used in the proposed communication system. Simulation results demonstrate significant BER performance improvements (up to 8dB) over traditional orthogonal based methods and those considering Partial FFT demodulation, and Discrete Fractional Fourier Transform (DFrFT) with only a moderate computational complexity increase.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n MISO estimation of asynchronously mixed BPSK sources.\n \n \n \n \n\n\n \n Gouldieff, V.; Berberidis, K.; and Palicot, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 369-373, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"MISOPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362407,\n  author = {V. Gouldieff and K. Berberidis and J. Palicot},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {MISO estimation of asynchronously mixed BPSK sources},\n  year = {2015},\n  pages = {369-373},\n  abstract = {In this paper, a method for discrete sources extraction from underdetermined, finite-bandwidth and delayed mixtures of BPSK sources with a single antenna receiver is proposed. Unlike most of the already existing algorithms which consider the unavoidable delay between the sources as undesirable, the proposed method takes advantage of such a delay. Indeed, it turns out that it is possible to recover the symbols even if there is neither gain nor phase diversity. The complexity of the proposed algorithm is quite low, which makes it efficient for real-time sources separation. The effectiveness of the method is illustrated via numerical simulations for different scenarios.},\n  keywords = {phase shift keying;radiocommunication;source separation;MISO estimation;asynchronously mixed BPSK sources;discrete sources extraction;finite bandwidth BPSK source;BPSK source delayed mixtures;single antenna receiver;Signal processing algorithms;Gold;Estimation;Binary phase shift keying;Europe;Delays;Blind source separation;BPSK;underdetermined mixture;MISO;time diversity},\n  doi = {10.1109/EUSIPCO.2015.7362407},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102599.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, a method for discrete sources extraction from underdetermined, finite-bandwidth and delayed mixtures of BPSK sources with a single antenna receiver is proposed. Unlike most of the already existing algorithms which consider the unavoidable delay between the sources as undesirable, the proposed method takes advantage of such a delay. Indeed, it turns out that it is possible to recover the symbols even if there is neither gain nor phase diversity. The complexity of the proposed algorithm is quite low, which makes it efficient for real-time sources separation. The effectiveness of the method is illustrated via numerical simulations for different scenarios.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Low-sampling-rate M-ary multiple access UWB communications in multipath channels.\n \n \n \n \n\n\n \n Alkhodary, M. T.; Ballal, T.; Al-Najfouri, T. Y.; and Muqaibel, A. H.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 374-378, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Low-sampling-ratePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362408,\n  author = {M. T. Alkhodary and T. Ballal and T. Y. Al-Najfouri and A. H. Muqaibel},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Low-sampling-rate M-ary multiple access UWB communications in multipath channels},\n  year = {2015},\n  pages = {374-378},\n  abstract = {The desirable characteristics of ultra-wideband (UWB) technology are challenged by formidable sampling frequency, performance degradation in the presence of multi-user interference, and complexity of the receiver due to the channel estimation process. In this paper, a low-rate-sampling technique is used to implement M-ary multiple access UWB communications, in both the detection and channel estimation stages. A novel approach is used for multiple-access-interference (MAI) cancelation for the purpose of channel estimation. Results show reasonable performance of the proposed receiver for different number of users operating many times below Nyquist rate.},\n  keywords = {channel estimation;interference suppression;multi-access systems;multipath channels;radio receivers;signal detection;signal sampling;ultra wideband communication;wireless channels;Nyquist rate;MAI cancelation;multiple access interference cancelation;channel estimation process;receiver complexity;multiuser interference;ultra wideband technology;multipath channel;low-sampling-rate M-ary multiple access UWB communication;Receivers;Channel estimation;Correlation;Interference;Modulation;Europe;Low Sampling;Channel Estimation;Ultra-Wideband;Multiple-access-interference cancelation},\n  doi = {10.1109/EUSIPCO.2015.7362408},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103967.pdf},\n}\n\n
\n
\n\n\n
\n The desirable characteristics of ultra-wideband (UWB) technology are challenged by formidable sampling frequency, performance degradation in the presence of multi-user interference, and complexity of the receiver due to the channel estimation process. In this paper, a low-rate-sampling technique is used to implement M-ary multiple access UWB communications, in both the detection and channel estimation stages. A novel approach is used for multiple-access-interference (MAI) cancelation for the purpose of channel estimation. Results show reasonable performance of the proposed receiver for different number of users operating many times below Nyquist rate.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Block expectation propagation equalization for ISI channels.\n \n \n \n \n\n\n \n Santos, I.; Murillo-Fuentes, J. J.; and Olmos, P. M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 379-383, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"BlockPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362409,\n  author = {I. Santos and J. J. Murillo-Fuentes and P. M. Olmos},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Block expectation propagation equalization for ISI channels},\n  year = {2015},\n  pages = {379-383},\n  abstract = {Actual communications systems use high-order modulations and channels with memory. However, as the memory of the channels and the order of the constellations grow, optimal equalization such as BCJR algorithm is computationally intractable, as their complexity increases exponentially with the number of taps and size of modulation. In this paper, we propose a novel low-complexity hard and soft output equalizer based on the Expectation Propagation (EP) algorithm that provides high-accuracy posterior probability estimations at the input of the channel decoder with similar computational complexity than the linear MMSE. We experimentally show that this quasi-optimal solution outperforms classical solutions reducing the bit error probability with low complexity when LDPC channel decoding is used, avoiding the curse of dimensionality with channel memory and constellation size.},\n  keywords = {block codes;computational complexity;equalisers;intersymbol interference;least mean squares methods;parity check codes;probability;wireless channels;block expectation propagation equalization;ISI channels;high-order modulations;BCJR algorithm;soft output equalizer;high-accuracy posterior probability estimations;channel decoder;computational complexity;linear MMSE;bit error probability;LDPC channel decoding;quasioptimal solution;Signal processing algorithms;Approximation algorithms;Approximation methods;Complexity theory;Decoding;Equalizers;Expectation propagation;BCJR algorithm;low complexity;channel equalization;ISI},\n  doi = {10.1109/EUSIPCO.2015.7362409},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104395.pdf},\n}\n\n
\n
\n\n\n
\n Actual communications systems use high-order modulations and channels with memory. However, as the memory of the channels and the order of the constellations grow, optimal equalization such as BCJR algorithm is computationally intractable, as their complexity increases exponentially with the number of taps and size of modulation. In this paper, we propose a novel low-complexity hard and soft output equalizer based on the Expectation Propagation (EP) algorithm that provides high-accuracy posterior probability estimations at the input of the channel decoder with similar computational complexity than the linear MMSE. We experimentally show that this quasi-optimal solution outperforms classical solutions reducing the bit error probability with low complexity when LDPC channel decoding is used, avoiding the curse of dimensionality with channel memory and constellation size.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On the design of an FBMC based AIR interface enabling channel adaptive pulse shaping per sub-band.\n \n \n \n \n\n\n \n Fuhrwerk, M.; Peissig, J.; and Schellmann, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 384-388, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362410,\n  author = {M. Fuhrwerk and J. Peissig and M. Schellmann},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {On the design of an FBMC based AIR interface enabling channel adaptive pulse shaping per sub-band},\n  year = {2015},\n  pages = {384-388},\n  abstract = {By application of pulse shaping, the FBMC (Filter Bank Multi Carrier) based offset-QAM-OFDM (OQAM-OFDM) modulation scheme offers a new degree of freedom in designing mobile communication systems. In this contribution we investigate the coexistence performance in terms of interference isolation of individually configured sub-bands, i.e. individual prototype filter functions (PFF) and/or subcarrier spacing per sub-band, in the context of multi-user or multi-service scenarios as envisaged for 5G. To that end, we analyze the synchronization requirements of different of PFF from literature suggested for OQAM-OFDM and determine the required amount of guard bands with respect to the applied PFFs and subcarrier spacing configurations. The simulation results prove that the required amount of guard bands for OQAM-OFDM systems is independent on the time offset between different users. As a rule of thumb we can state, that for a minimum co-user interference isolation of 20 dB a bandwidth of one subcarrier spacings of the user with the largest subcarrier spacing has to be used as in-band guard bands.},\n  keywords = {5G mobile communication;channel bank filters;interference suppression;mobile communication;OFDM modulation;pulse shaping;quadrature amplitude modulation;in-band guard bands;co-user interference isolation;subcarrier spacing;5G;PFF;prototype filter functions;mobile communication systems;degree of freedom;OQAM-OFDM modulation scheme;offset-QAM-OFDM modulation scheme;channel adaptive pulse shaping per subband;FBMC based AIR interface;filter bank multicarrier;Interference;OFDM;Time-frequency analysis;Silicon;Europe;Pulse shaping methods;Frequency synchronization;OQAM-OFDM;Pulse shaping;Prototype filter;FBMC;Coexistence;MTC},\n  doi = {10.1109/EUSIPCO.2015.7362410},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104543.pdf},\n}\n\n
\n
\n\n\n
\n By application of pulse shaping, the FBMC (Filter Bank Multi Carrier) based offset-QAM-OFDM (OQAM-OFDM) modulation scheme offers a new degree of freedom in designing mobile communication systems. In this contribution we investigate the coexistence performance in terms of interference isolation of individually configured sub-bands, i.e. individual prototype filter functions (PFF) and/or subcarrier spacing per sub-band, in the context of multi-user or multi-service scenarios as envisaged for 5G. To that end, we analyze the synchronization requirements of different of PFF from literature suggested for OQAM-OFDM and determine the required amount of guard bands with respect to the applied PFFs and subcarrier spacing configurations. The simulation results prove that the required amount of guard bands for OQAM-OFDM systems is independent on the time offset between different users. As a rule of thumb we can state, that for a minimum co-user interference isolation of 20 dB a bandwidth of one subcarrier spacings of the user with the largest subcarrier spacing has to be used as in-band guard bands.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Wideband high dynamic range surveillance.\n \n \n \n \n\n\n \n Dos Santos Fagundes, R.; Lejeune, D.; Mansour, A.; Le Roy, F.; and Lababidi, R.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 389-393, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"WidebandPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362411,\n  author = {R. {Dos Santos Fagundes} and D. Lejeune and A. Mansour and F. {Le Roy} and R. Lababidi},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Wideband high dynamic range surveillance},\n  year = {2015},\n  pages = {389-393},\n  abstract = {In recent radio-communication applications, receivers may be jammed by high power unwanted signals. In this case, the received signal can be considered as the sum of a strong unwanted signal and a very weak target signal. Even though the two signals don't overlap in the frequency domain, the processing of the weak signal becomes very hard as it can be vanished at the output of the Analog Digital Converter. To avoid this scenario, many nonlinear circuits are proposed in the literature. Our study focuses on the separation of a weak and a very strong signals which are wideband signals and they are very close in the frequency domain. Several circuits have been implemented and simulated. The proposed circuit diagram is also presented. Finally, simulations are presented and discussed.},\n  keywords = {analogue-digital conversion;frequency-domain analysis;jamming;radio receivers;radiocommunication;wideband high dynamic range surveillance;radiocommunication applications;high power unwanted signals;target signal;frequency domain;analog digital converter;nonlinear circuits;wideband signals;circuit diagram;Dynamic range;Optical fiber amplifiers;Receivers;Gain control;Jamming;Radar applications;Dynamic range;AGC;Logarithmic Amplifier;OFDM;electronic warfare;EM surveillance;Radio Cognitive},\n  doi = {10.1109/EUSIPCO.2015.7362411},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570094843.pdf},\n}\n\n
\n
\n\n\n
\n In recent radio-communication applications, receivers may be jammed by high power unwanted signals. In this case, the received signal can be considered as the sum of a strong unwanted signal and a very weak target signal. Even though the two signals don't overlap in the frequency domain, the processing of the weak signal becomes very hard as it can be vanished at the output of the Analog Digital Converter. To avoid this scenario, many nonlinear circuits are proposed in the literature. Our study focuses on the separation of a weak and a very strong signals which are wideband signals and they are very close in the frequency domain. Several circuits have been implemented and simulated. The proposed circuit diagram is also presented. Finally, simulations are presented and discussed.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sparse reconstruction of incomplete relative transfer function: Discrete and continuous time domain.\n \n \n \n \n\n\n \n Koldovsky, Z.; and Tichavsky, P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 394-398, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"SparsePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362412,\n  author = {Z. Koldovsky and P. Tichavsky},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Sparse reconstruction of incomplete relative transfer function: Discrete and continuous time domain},\n  year = {2015},\n  pages = {394-398},\n  abstract = {Relative transfer functions (RTF) between microphones can often be estimated accurately but only for certain frequencies. For example, this happens in situations where the RTF is estimated from a noise-free signal of a target source whose spectrum does not span the whole frequency range. By combining a conventional RTF estimator and a selection of the active frequencies, an incomplete measurement of the RTF is obtained. We propose to retrieve the whole RTF estimate through finding the sparsest representation of the incomplete measurement in the discrete or continuous time-domain and compare both approaches. The RTF estimates are evaluated in terms of attenuation rate that measures the target signal cancellation at the output of a blocking matrix. It is shown by experiments that the reconstructed estimate can achieve significantly better attenuation than the initial (complete) estimate.},\n  keywords = {microphones;signal reconstruction;sparse reconstruction;relative transfer function;discrete time domain;continuous time domain;microphones;noise-free signal;target signal cancellation;blocking matrix;Atomic measurements;Microphones;Discrete Fourier transforms;Delays;Transfer functions;Time-domain analysis;Frequency estimation;Relative Transfer Function;Blocking Matrix;Generalized Sidelobe Canceller;Sparse Approximation;LASSO;Atomic Norm;Semidefinite programming},\n  doi = {10.1109/EUSIPCO.2015.7362412},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570086997.pdf},\n}\n\n
\n
\n\n\n
\n Relative transfer functions (RTF) between microphones can often be estimated accurately but only for certain frequencies. For example, this happens in situations where the RTF is estimated from a noise-free signal of a target source whose spectrum does not span the whole frequency range. By combining a conventional RTF estimator and a selection of the active frequencies, an incomplete measurement of the RTF is obtained. We propose to retrieve the whole RTF estimate through finding the sparsest representation of the incomplete measurement in the discrete or continuous time-domain and compare both approaches. The RTF estimates are evaluated in terms of attenuation rate that measures the target signal cancellation at the output of a blocking matrix. It is shown by experiments that the reconstructed estimate can achieve significantly better attenuation than the initial (complete) estimate.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Local relative transfer function for sound source localization.\n \n \n \n \n\n\n \n Li, X.; Horaud, R.; Girin, L.; and Gannot, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 399-403, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"LocalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362413,\n  author = {X. Li and R. Horaud and L. Girin and S. Gannot},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Local relative transfer function for sound source localization},\n  year = {2015},\n  pages = {399-403},\n  abstract = {The relative transfer function (RTF), i.e. the ratio of acoustic transfer functions between two sensors, can be used for sound' source localization / beamforming based on a microphone array. The RTF is usually defined with respect to a unique reference sensor. Choosing the reference sensor may be a difficult task, especially for dynamic acoustic environment and setup. In this paper we propose to use a locally normalized RTF, in short local-RTF, as an acoustic feature to characterize the source direction. Local-RTF takes a neighbor sensor as the reference channel for a given sensor. The estimated local-RTF vector can thus avoid the bad effects of a noisy unique reference and have smaller estimation error than conventional RTF estimators. We propose two estimators for the local-RTF and concatenate the values across sensors and frequencies to form a high-dimensional vector which is utilized for source localization. Experiments with real-world signals show the interest of this approach.},\n  keywords = {acoustic signal processing;array signal processing;direction-of-arrival estimation;microphone arrays;transfer functions;reference sensor;microphone array;acoustic transfer functions;sound source localization;local relative transfer function;Sensors;Microphones;Acoustics;Channel estimation;Signal to noise ratio;Arrays;Transfer functions;microphone array;relative transfer function;sound source localization},\n  doi = {10.1109/EUSIPCO.2015.7362413},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096283.pdf},\n}\n\n
\n
\n\n\n
\n The relative transfer function (RTF), i.e. the ratio of acoustic transfer functions between two sensors, can be used for sound' source localization / beamforming based on a microphone array. The RTF is usually defined with respect to a unique reference sensor. Choosing the reference sensor may be a difficult task, especially for dynamic acoustic environment and setup. In this paper we propose to use a locally normalized RTF, in short local-RTF, as an acoustic feature to characterize the source direction. Local-RTF takes a neighbor sensor as the reference channel for a given sensor. The estimated local-RTF vector can thus avoid the bad effects of a noisy unique reference and have smaller estimation error than conventional RTF estimators. We propose two estimators for the local-RTF and concatenate the values across sensors and frequencies to form a high-dimensional vector which is utilized for source localization. Experiments with real-world signals show the interest of this approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Relative transfer function estimation exploiting instantaneous signals and the signal subspace.\n \n \n \n \n\n\n \n Taseska, M.; and Habets, E. A. P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 404-408, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"RelativePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362414,\n  author = {M. Taseska and E. A. P. Habets},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Relative transfer function estimation exploiting instantaneous signals and the signal subspace},\n  year = {2015},\n  pages = {404-408},\n  abstract = {Multichannel noise reduction can be achieved without dis torting the desired signals, provided that the relative transfer functions (RTFs) of the sources are known. Many RTF esti mators require periods where only one source is active, which limits their applicability in practice. We propose an RTF esti mator that does not require such periods. A time-varying RTF is computed per time-frequency (TF) bin that corresponds to the dominant source at that bin. We demonstrate that a min imum variance distortionless response (MVDR) filter based on the proposed RTF estimate can extract multiple sources with low distortion. The MVDR filter has maximum degrees of freedom and hence achieves significantly better noise re duction compared to a linearly constrained minimum variance filter that uses a separate RTF for each source.},\n  keywords = {filtering theory;speech enhancement;relative transfer function estimation;instantaneous signals;signal subspace;multichannel noise reduction;RTF estimators;time-frequency bin;minimum variance distortionless response;MVDR filter;degrees of freedom;Speech;Microphones;Estimation;Noise reduction;Distortion;Transfer functions;Eigenvalues and eigenfunctions;Relative transfer function;speech en hancement;noise reduction;MVDR filter},\n  doi = {10.1109/EUSIPCO.2015.7362414},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570097843.pdf},\n}\n\n
\n
\n\n\n
\n Multichannel noise reduction can be achieved without dis torting the desired signals, provided that the relative transfer functions (RTFs) of the sources are known. Many RTF esti mators require periods where only one source is active, which limits their applicability in practice. We propose an RTF esti mator that does not require such periods. A time-varying RTF is computed per time-frequency (TF) bin that corresponds to the dominant source at that bin. We demonstrate that a min imum variance distortionless response (MVDR) filter based on the proposed RTF estimate can extract multiple sources with low distortion. The MVDR filter has maximum degrees of freedom and hence achieves significantly better noise re duction compared to a linearly constrained minimum variance filter that uses a separate RTF for each source.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Permutation-free clustering of relative transfer function features for blind source separation.\n \n \n \n \n\n\n \n Ito, N.; Araki, S.; and Nakatani, T.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 409-413, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Permutation-freePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362415,\n  author = {N. Ito and S. Araki and T. Nakatani},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Permutation-free clustering of relative transfer function features for blind source separation},\n  year = {2015},\n  pages = {409-413},\n  abstract = {This paper describes an application of relative transfer functions (RTFs) to underdetermined blind source separation (BSS). A clustering-based BSS approach has the advantage that it can even deal with the underdetermined case, where the sources outnumber the microphones. Among others, clustering of a normalized observation vector (NOV) has proven effective for BSS even under reverberation. We here point out that the NOV gives information about RTFs of the dominant source, and hence call it the RTF features. Most of the previous BSS methods are limited in that they undergo significant performance degradation when the number of sources is not known precisely. This paper introduces our recently developed method for joint BSS and source counting based on permutation-free clustering of the RTF features. We demonstrate the effectiveness of the method in experiments with reverberant mixtures of an unknown number of sources with a reverberation time of up to 440 ms.},\n  keywords = {blind source separation;feature extraction;transfer functions;permutation-free clustering;blind source separation;clustering-based BSS;relative transfer functions;RTF;normalized observation vector;NOV;reverberant mixtures;Time-frequency analysis;Amplitude modulation;Transfer functions;Frequency modulation;Speech;Reverberation;Source separation;Blind source separation;source counting;relative transfer functions;clustering;permutation problem},\n  doi = {10.1109/EUSIPCO.2015.7362415},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102453.pdf},\n}\n\n
\n
\n\n\n
\n This paper describes an application of relative transfer functions (RTFs) to underdetermined blind source separation (BSS). A clustering-based BSS approach has the advantage that it can even deal with the underdetermined case, where the sources outnumber the microphones. Among others, clustering of a normalized observation vector (NOV) has proven effective for BSS even under reverberation. We here point out that the NOV gives information about RTFs of the dominant source, and hence call it the RTF features. Most of the previous BSS methods are limited in that they undergo significant performance degradation when the number of sources is not known precisely. This paper introduces our recently developed method for joint BSS and source counting based on permutation-free clustering of the RTF features. We demonstrate the effectiveness of the method in experiments with reverberant mixtures of an unknown number of sources with a reverberation time of up to 440 ms.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Analysis of the performance and limitations of ICA-based relative impulse response identification.\n \n \n \n \n\n\n \n Meier, S.; and Kellermann, W.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 414-418, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AnalysisPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362416,\n  author = {S. Meier and W. Kellermann},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Analysis of the performance and limitations of ICA-based relative impulse response identification},\n  year = {2015},\n  pages = {414-418},\n  abstract = {Estimating impulse responses for a single source is a crucial problem for many applications in audio signal processing, such as source extraction. Since absolute impulse responses are hard to identify, relative impulse responses or, equivalently, relative transfer functions are identified instead. Independent Component Analysis (ICA) for convolutivemixtures offers the possibility to determine relative impulse responses implicitly by separating the target source from interfering sources. In this paper, fundamental limitations of relative transfer function (RTF) estimation are analyzed by calculating least-squares (LS)-optimal estimates in adverse scenarios, where the influence of scatterers and reverberation on the performance must be accounted for. Hereupon, ICA-based RTF estimation in the TRINICON framework is compared with the LS-optimal estimates.},\n  keywords = {audio signal processing;convolution;independent component analysis;least squares approximations;mixture models;source separation;transient response;ICA-based relative impulse response identification limitation;ICA-based relative impulse response identification performance analysis;audio signal processing;relative transfer function identification;independent component analysis;convolutive mixtures;target source separation;least-squares optimal estimation;scatterer influence;reverberation influence;Estimation;Microphones;Blind source separation;Europe;Transfer functions;Probability density function;Independent component analysis;impulse response estimation;relative transfer functions},\n  doi = {10.1109/EUSIPCO.2015.7362416},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104133.pdf},\n}\n\n
\n
\n\n\n
\n Estimating impulse responses for a single source is a crucial problem for many applications in audio signal processing, such as source extraction. Since absolute impulse responses are hard to identify, relative impulse responses or, equivalently, relative transfer functions are identified instead. Independent Component Analysis (ICA) for convolutivemixtures offers the possibility to determine relative impulse responses implicitly by separating the target source from interfering sources. In this paper, fundamental limitations of relative transfer function (RTF) estimation are analyzed by calculating least-squares (LS)-optimal estimates in adverse scenarios, where the influence of scatterers and reverberation on the performance must be accounted for. Hereupon, ICA-based RTF estimation in the TRINICON framework is compared with the LS-optimal estimates.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Towards a generalization of relative transfer functions to more than one source.\n \n \n \n \n\n\n \n Deleforge, A.; Gannot, S.; and Kellermann, W.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 419-423, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"TowardsPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362417,\n  author = {A. Deleforge and S. Gannot and W. Kellermann},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Towards a generalization of relative transfer functions to more than one source},\n  year = {2015},\n  pages = {419-423},\n  abstract = {We propose a natural way to generalize relative transfer functions (RTFs) to more than one source. We first prove that such a generalization is not possible using a single multichannel spectro-temporal observation, regardless of the number of microphones. We then introduce a new transform for multichannel multi-frame spectrograms, i.e., containing several channels and time frames in each time-frequency bin. This transform allows a natural generalization which satisfies the three key properties of RTFs, namely, they can be directly estimated from observed signals, they capture spatial properties of the sources and they do not depend on emitted signals. Through simulated experiments, we show how this new method can localize multiple simultaneously active sound sources using short spectro-temporal windows, without relying on source separation.},\n  keywords = {telecommunication channels;relative transfer functions;multichannel spectro-temporal observation;microphones;multiframe spectrograms;time-frequency bin;natural generalization;spatial properties;active sound sources;spectro-temporal windows;Spectrogram;Transfer functions;Microphones;Transforms;Acoustics;Time-frequency analysis;Relative Transfer Function;Grassmannian manifolds;Plucker Embedding;Multiple sound sources localization},\n  doi = {10.1109/EUSIPCO.2015.7362417},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104361.pdf},\n}\n\n
\n
\n\n\n
\n We propose a natural way to generalize relative transfer functions (RTFs) to more than one source. We first prove that such a generalization is not possible using a single multichannel spectro-temporal observation, regardless of the number of microphones. We then introduce a new transform for multichannel multi-frame spectrograms, i.e., containing several channels and time frames in each time-frequency bin. This transform allows a natural generalization which satisfies the three key properties of RTFs, namely, they can be directly estimated from observed signals, they capture spatial properties of the sources and they do not depend on emitted signals. Through simulated experiments, we show how this new method can localize multiple simultaneously active sound sources using short spectro-temporal windows, without relying on source separation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Opportunities and challenges for ultra low power signal processing in wearable healthcare.\n \n \n \n \n\n\n \n Casson, A. J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 424-428, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"OpportunitiesPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362418,\n  author = {A. J. Casson},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Opportunities and challenges for ultra low power signal processing in wearable healthcare},\n  year = {2015},\n  pages = {424-428},\n  abstract = {Wearable devices are starting to revolutionise healthcare by allowing the unobtrusive and long term monitoring of a range of body parameters. Embedding more advanced signal processing algorithms into the wearable itself can: reduce system power consumption; increase device functionality; and enable closed-loop recording-stimulation with minimal latency; amongst other benefits. The design challenge is in realising algorithms within the very limited power budgets available. Wearable algorithms are now emerging to answer this challenge. Using a new review, and examples from a case study on EEG analysis, this article overviews the state-of-the-art in wearable algorithms. It demonstrates the opportunities and challenges, highlighting the open challenge of performance assessment and measuring variability.},\n  keywords = {bioelectric potentials;body sensor networks;electroencephalography;health care;medical signal processing;neurophysiology;telemedicine;ultralow power signal processing;wearable healthcare;unobtrusive monitoring;long term monitoring;closed-loop recording-stimulation;EEG analysis;Signal processing algorithms;Algorithm design and analysis;Signal processing;Electrocardiography;Biomedical monitoring;Power demand;Electroencephalography;Wearables;power;performance metrics},\n  doi = {10.1109/EUSIPCO.2015.7362418},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096227.pdf},\n}\n\n
\n
\n\n\n
\n Wearable devices are starting to revolutionise healthcare by allowing the unobtrusive and long term monitoring of a range of body parameters. Embedding more advanced signal processing algorithms into the wearable itself can: reduce system power consumption; increase device functionality; and enable closed-loop recording-stimulation with minimal latency; amongst other benefits. The design challenge is in realising algorithms within the very limited power budgets available. Wearable algorithms are now emerging to answer this challenge. Using a new review, and examples from a case study on EEG analysis, this article overviews the state-of-the-art in wearable algorithms. It demonstrates the opportunities and challenges, highlighting the open challenge of performance assessment and measuring variability.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Exploring the epileptic network with parallel ICA of interictal EEG-FMRI.\n \n \n \n \n\n\n \n Hunyadi, B.; De Vos, M.; Van Paesschen, W.; and Van Huffel, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 429-433, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ExploringPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362419,\n  author = {B. Hunyadi and M. {De Vos} and W. {Van Paesschen} and S. {Van Huffel}},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Exploring the epileptic network with parallel ICA of interictal EEG-FMRI},\n  year = {2015},\n  pages = {429-433},\n  abstract = {The ultimate goal of the EEG-fMRI analysis in refractory focal epilepsy is the precise localization of the epileptogenic zone (EZ) to facilitate successful surgery. Many studies have shown that simultaneous GLM-based EEG-correlated fMRI analysis can identify fMRI voxels which covary with the timing of interictal spikes assessed on EEG. However, this type of analysis often does not reveal a single focus but an extensive epileptic network. In this paper we investigate whether parallel independent component analysis (ICA), a data-driven, symmetric integration approach can disentangle this network. We assume that ICA of EEG and ICA of fMRI will reveal different temporal and spatial aspects of this network, respectively. We hypothesize that by matching these different epilepsy-related EEG and fMRI components, we can get a deeper insight in the neural processes this extensive network represents. We tested parallel ICA on 12 refractory epilepsy patients who underwent full presurgical evaluation and showed concordant data (excluding EEG-fMRI) pointing to a single epileptic focus. Our results show that parallel ICA has an added value, as it can help the interpretation of the GLM results and pinpoint the EZ. Furthermore, it might help to understand how the various aspects of epileptic activity are reflected in EEG and fMRI.},\n  keywords = {bioelectric potentials;biomedical MRI;electroencephalography;independent component analysis;medical disorders;medical image processing;neurophysiology;spatiotemporal phenomena;surgery;interictal EEG-FMRI analysis;refractory focal epilepsy;epileptogenic zone;GLM-based EEG-correlated fMRI analysis;parallel independent component analysis;data-driven approach;symmetric integration approach;neural processes;presurgical evaluation;Electroencephalography;Epilepsy;Surfaces;Integrated circuits;Signal processing;Surgery;Europe;ICA;parallel ICA;EEG;fMRI;epilepsy},\n  doi = {10.1109/EUSIPCO.2015.7362419},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096239.pdf},\n}\n\n
\n
\n\n\n
\n The ultimate goal of the EEG-fMRI analysis in refractory focal epilepsy is the precise localization of the epileptogenic zone (EZ) to facilitate successful surgery. Many studies have shown that simultaneous GLM-based EEG-correlated fMRI analysis can identify fMRI voxels which covary with the timing of interictal spikes assessed on EEG. However, this type of analysis often does not reveal a single focus but an extensive epileptic network. In this paper we investigate whether parallel independent component analysis (ICA), a data-driven, symmetric integration approach can disentangle this network. We assume that ICA of EEG and ICA of fMRI will reveal different temporal and spatial aspects of this network, respectively. We hypothesize that by matching these different epilepsy-related EEG and fMRI components, we can get a deeper insight in the neural processes this extensive network represents. We tested parallel ICA on 12 refractory epilepsy patients who underwent full presurgical evaluation and showed concordant data (excluding EEG-fMRI) pointing to a single epileptic focus. Our results show that parallel ICA has an added value, as it can help the interpretation of the GLM results and pinpoint the EZ. Furthermore, it might help to understand how the various aspects of epileptic activity are reflected in EEG and fMRI.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Assessment of source separation techniques to extract vital parameters from videos.\n \n \n \n \n\n\n \n Wedekind, D.; Trumpp, A.; Andreotti, F.; Gaetjen, F.; Rasche, S.; Matschke, K.; Malberg, H.; and Zaunseder, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 434-438, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AssessmentPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{7362420,\n  author = {D. Wedekind and A. Trumpp and F. Andreotti and F. Gaetjen and S. Rasche and K. Matschke and H. Malberg and S. Zaunseder},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Assessment of source separation techniques to extract vital parameters from videos},\n  year = {2015},\n  pages = {434-438},\n  abstract = {Camera-based photoplethysmography is a contactless mean to assess vital parameters, such as heart rate and respiratory rate. In the field of camera-based photoplethysmography, blind source separation (BSS) techniques have been extensively applied to cope with artifacts and noise. Despite their wide usage, there is no consensus that common BSS approaches contribute to an improved analysis of camera-based photoplethysmograms (cbPPG). This contribution compares previously proposed multispectral BSS techniques to a novel spatial BSS approach for heart rate extraction from cbPPG. Our analysis indicates that the application of BSS techniques not necessarily improves cbPPG's analysis but signal properities like the signal-to-noise-ratio should be considered before i applying BSS techniques.},\n  keywords = {blind source separation;medical signal processing;photoplethysmography;source separation techniques;vital parameters extract;camera-based photoplethysmography;respiratory rate;camera-based photoplethysmography, blind source separation techniques;heart rate extraction;cbPPG;BSS approach;signal-to-noise-ratio;signal properties;BSS techniques;Signal to noise ratio;Heart rate;Videos;Principal component analysis;Europe;Blind source separation;Camera-based Photoplethysmography;Blind Source Separation;Independent Component Analysis;Principal Component Analysis;Heart Rate},\n  doi = {10.1109/EUSIPCO.2015.7362420},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096241.pdf},\n}\n\n
\n
\n\n\n
\n Camera-based photoplethysmography is a contactless mean to assess vital parameters, such as heart rate and respiratory rate. In the field of camera-based photoplethysmography, blind source separation (BSS) techniques have been extensively applied to cope with artifacts and noise. Despite their wide usage, there is no consensus that common BSS approaches contribute to an improved analysis of camera-based photoplethysmograms (cbPPG). This contribution compares previously proposed multispectral BSS techniques to a novel spatial BSS approach for heart rate extraction from cbPPG. Our analysis indicates that the application of BSS techniques not necessarily improves cbPPG's analysis but signal properities like the signal-to-noise-ratio should be considered before i applying BSS techniques.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Using oximetry dynamics to screen for sleep disordered breathing at varying thresholds of severity.\n \n \n \n \n\n\n \n Garde, A.; Dehkordi, P.; Wensley, D.; Ansermino, J. M.; and Dumonf, G. A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 439-443, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"UsingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362421,\n  author = {A. Garde and P. Dehkordi and D. Wensley and J. M. Ansermino and G. A. Dumonf},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Using oximetry dynamics to screen for sleep disordered breathing at varying thresholds of severity},\n  year = {2015},\n  pages = {439-443},\n  abstract = {Sleep disordered breathing (SDB) is highly prevalent in children and causes daytime sleepiness, growth failure and developmental delay. Polysomnography (PSG), the gold standard to diagnose SDB, provides an estimate of severity, called the Apnea Hypoapnea Index (AHI). PSG is costly and resource intensive; therefore we propose using the Phone Oximeter, a pulse oximeter integrated into a phone that measures blood oxygen saturation (SpO2), as an at-home screening tool. In clinical practice, an AHI of 2-5 indicates mild SDB, but an AHI ≥ 5 usually prompts SDB treatment. Thus, we studied the performance of the Phone Oximeter as an SDB screening tool at varying thresholds of AHI. We analyzed the SpO2 of 146 children, recorded by the Phone Oximeter, alongside conventional PSG. Time-frequency characterization of SpO2 dynamics resulted in identification of 77% and 86% of children with an AHI ≥ 2 and AHI ≥ 5, respectively, using a multiple logistic regression model.},\n  keywords = {blood;medical disorders;mobile radio;oximetry;paediatrics;patient treatment;pneumodynamics;regression analysis;sleep;multiple logistic regression model;time-frequency characterization;SDB treatment;at-home screening tool;blood oxygen saturation;phone oximeter;Apnea Hypoapnea Index;polysomnography;developmental delay;growth failure;daytime sleepiness;severity threshold;sleep disordered breathing;oximetry dynamics;Sleep;Pediatrics;Indexes;Standards;Mathematical model;Frequency modulation;Logistics;Sleep disordered breathing;Apnea hypoapnea index;Phone Oximeter;Polysomnography;Pulse oximetry},\n  doi = {10.1109/EUSIPCO.2015.7362421},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570101407.pdf},\n}\n\n
\n
\n\n\n
\n Sleep disordered breathing (SDB) is highly prevalent in children and causes daytime sleepiness, growth failure and developmental delay. Polysomnography (PSG), the gold standard to diagnose SDB, provides an estimate of severity, called the Apnea Hypoapnea Index (AHI). PSG is costly and resource intensive; therefore we propose using the Phone Oximeter, a pulse oximeter integrated into a phone that measures blood oxygen saturation (SpO2), as an at-home screening tool. In clinical practice, an AHI of 2-5 indicates mild SDB, but an AHI ≥ 5 usually prompts SDB treatment. Thus, we studied the performance of the Phone Oximeter as an SDB screening tool at varying thresholds of AHI. We analyzed the SpO2 of 146 children, recorded by the Phone Oximeter, alongside conventional PSG. Time-frequency characterization of SpO2 dynamics resulted in identification of 77% and 86% of children with an AHI ≥ 2 and AHI ≥ 5, respectively, using a multiple logistic regression model.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Measurement of cardiovascular state using attractor reconstruction analysis.\n \n \n \n \n\n\n \n Charlton, P. H.; Camporota, L.; Smith, J.; Nandi, M.; Christie, M.; Aston, P. J.; and Beale, R.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 444-448, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"MeasurementPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362422,\n  author = {P. H. Charlton and L. Camporota and J. Smith and M. Nandi and M. Christie and P. J. Aston and R. Beale},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Measurement of cardiovascular state using attractor reconstruction analysis},\n  year = {2015},\n  pages = {444-448},\n  abstract = {Attractor reconstruction (AR) analysis has been used previously to quantify the variability in arterial blood pressure (ABP) signals. Since ABP signals are only available in a minority of clinical scenarios, we sought to determine whether AR could also be performed on more widely available pho-toplethysmogram (PPG) signals. AR analysis was performed on simultaneous ABP and PPG signals before, during and after a change in cardiovascular state. A novel quality metric was used to eliminate windows of low quality AR. A high level of agreement was found between the detected periodicity of each signal. The remaining cardiovascular parameters derived using AR analysis exhibited similar trends between the two signals in response to the change in state, although there was poor agreement between their absolute values. This demonstrates the feasibility of applying AR to the PPG signal, increasing the range of patients in whom cardiovascular state can be measured using AR analysis.},\n  keywords = {bioelectric potentials;blood pressure measurement;blood vessels;cardiovascular system;medical signal detection;medical signal processing;photoplethysmography;signal reconstruction;cardiovascular state;attractor reconstruction analysis;arterial blood pressure signals;photoplethysmogram signals;Optimized production technology;Time series analysis;Europe;Signal processing;Arterial blood pressure;Reliability;Delay effects;attractor reconstruction (AR);arterial blood pressure (ABP);photoplethysmogram (PPG);eHealth},\n  doi = {10.1109/EUSIPCO.2015.7362422},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103815.pdf},\n}\n\n
\n
\n\n\n
\n Attractor reconstruction (AR) analysis has been used previously to quantify the variability in arterial blood pressure (ABP) signals. Since ABP signals are only available in a minority of clinical scenarios, we sought to determine whether AR could also be performed on more widely available pho-toplethysmogram (PPG) signals. AR analysis was performed on simultaneous ABP and PPG signals before, during and after a change in cardiovascular state. A novel quality metric was used to eliminate windows of low quality AR. A high level of agreement was found between the detected periodicity of each signal. The remaining cardiovascular parameters derived using AR analysis exhibited similar trends between the two signals in response to the change in state, although there was poor agreement between their absolute values. This demonstrates the feasibility of applying AR to the PPG signal, increasing the range of patients in whom cardiovascular state can be measured using AR analysis.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Energy efficient monitoring of activities of daily living using wireless acoustic sensor networks in clean and noisy conditions.\n \n \n \n\n\n \n Vuegeri, L.; Van Den Broeck, B.; Karsmakers, P.; Van hamme, H.; and Vanrumste, B.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 449-453, Aug 2015. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362423,\n  author = {L. Vuegeri and B. {Van Den Broeck} and P. Karsmakers and H. {Van hamme} and B. Vanrumste},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Energy efficient monitoring of activities of daily living using wireless acoustic sensor networks in clean and noisy conditions},\n  year = {2015},\n  pages = {449-453},\n  abstract = {This work examines the use of a Wireless Acoustic Sensor Network (WASN) for the classification of clinically relevant activities of daily living (ADL) from elderly people. The aim of this research is to automatically compile a summary report about the performed ADLs which can be easily interpreted by caregivers. In this work the classification performance of the WASN will be evaluated in both clean and noisy conditions. Moreover, the computational complexity of the WASN and solutions to reduce the required computational costs are examined as well. The obtained classification results indicate that the computational cost can be reduced by a factor of 2.43 without a significant loss in accuracy. In addition, the WASN yields a 1.4% to 4.8% increase in classification accuracy in noisy conditions compared to single microphone solutions.},\n  keywords = {computational complexity;medical signal processing;patient monitoring;signal classification;wireless sensor networks;energy efficiency monitoring;computational cost;computational complexity;elderly people;ADL;activities of daily living classification;WASN;wireless acoustic sensor network;Support vector machines;Mel frequency cepstral coefficient;Noise measurement;Acoustic sensors;Wireless sensor networks;Wireless Acoustic Sensor Networks;health monitoring;activity classification;noise robustness},\n  doi = {10.1109/EUSIPCO.2015.7362423},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n This work examines the use of a Wireless Acoustic Sensor Network (WASN) for the classification of clinically relevant activities of daily living (ADL) from elderly people. The aim of this research is to automatically compile a summary report about the performed ADLs which can be easily interpreted by caregivers. In this work the classification performance of the WASN will be evaluated in both clean and noisy conditions. Moreover, the computational complexity of the WASN and solutions to reduce the required computational costs are examined as well. The obtained classification results indicate that the computational cost can be reduced by a factor of 2.43 without a significant loss in accuracy. In addition, the WASN yields a 1.4% to 4.8% increase in classification accuracy in noisy conditions compared to single microphone solutions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Structured sparsity through reweighting and application to diffusion MRI.\n \n \n \n \n\n\n \n Auria, A.; Daducci, A.; Thiran, J. P.; and Wiaux, Y.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 454-458, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"StructuredPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362424,\n  author = {A. Auria and A. Daducci and J. P. Thiran and Y. Wiaux},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Structured sparsity through reweighting and application to diffusion MRI},\n  year = {2015},\n  pages = {454-458},\n  abstract = {We consider the problem of multiple correlated sparse signals reconstruction and propose a new implementation of structured sparsity through a reweighting scheme. We present a particular application for diffusion Magnetic Resonance Imaging data and show how this procedure can be used for fibre orientation reconstruction in the white matter of the brain. In that framework, our structured sparsity prior can be used to exploit the fundamental coherence between fibre directions in neighbour voxels. Our method approaches the ℓ0 minimisation through a reweighted ℓ1-minimisation scheme. The weights are here defined in such a way to promote correlated sparsity between neighbour signals.},\n  keywords = {magnetic resonance imaging;medical image processing;minimisation;signal reconstruction;structured sparsity;diffusion MRI;multiple correlated sparse signals reconstruction;reweighting scheme;diffusion magnetic resonance imaging data;fibre orientation reconstruction;ℓ0 minimisation;reweighted ℓ1-minimisation scheme;neighbour signals;Sociology;Statistics;Magnetic resonance imaging;Dictionaries;Minimization;Signal processing;Europe;structured sparsity;convex optimisation},\n  doi = {10.1109/EUSIPCO.2015.7362424},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096827.pdf},\n}\n\n
\n
\n\n\n
\n We consider the problem of multiple correlated sparse signals reconstruction and propose a new implementation of structured sparsity through a reweighting scheme. We present a particular application for diffusion Magnetic Resonance Imaging data and show how this procedure can be used for fibre orientation reconstruction in the white matter of the brain. In that framework, our structured sparsity prior can be used to exploit the fundamental coherence between fibre directions in neighbour voxels. Our method approaches the ℓ0 minimisation through a reweighted ℓ1-minimisation scheme. The weights are here defined in such a way to promote correlated sparsity between neighbour signals.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Super-resolution of positive spikes by Toeplitz low-rank approximation.\n \n \n \n \n\n\n \n Condat, L.; and Hirabayashi, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 459-463, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Super-resolutionPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362425,\n  author = {L. Condat and A. Hirabayashi},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Super-resolution of positive spikes by Toeplitz low-rank approximation},\n  year = {2015},\n  pages = {459-463},\n  abstract = {Super-resolution consists in recovering the fine details of a signal from low-resolution measurements. Here we con sider the estimation of Dirac pulses with positive amplitudes at arbitrary locations, from noisy lowpass-filtered samples. Maximum-likelihood estimation of the unknown parameters amounts to a difficult nonconvex matrix problem of structured low rank approximation. To solve it, we propose a new heuristic iterative algorithm, yielding state-of-the-art results.},\n  keywords = {approximation theory;concave programming;filtering theory;iterative methods;low-pass filters;matrix algebra;maximum likelihood estimation;signal resolution;signal sampling;Toeplitz low-rank approximation;superresolution positive spike;Dirac pulse estimation;noisy lowpass-filtered sample;maximum-likelihood estimation;parameter estimation;nonconvex matrix problem;structured low rank approximation;heuristic iterative algorithm;Signal processing algorithms;Maximum likelihood estimation;Approximation methods;Noise reduction;Europe;Signal processing;Dirac pulses;sparse spike deconvolution;super-resolution;structured low rank approximation},\n  doi = {10.1109/EUSIPCO.2015.7362425},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570100837.pdf},\n}\n\n
\n
\n\n\n
\n Super-resolution consists in recovering the fine details of a signal from low-resolution measurements. Here we con sider the estimation of Dirac pulses with positive amplitudes at arbitrary locations, from noisy lowpass-filtered samples. Maximum-likelihood estimation of the unknown parameters amounts to a difficult nonconvex matrix problem of structured low rank approximation. To solve it, we propose a new heuristic iterative algorithm, yielding state-of-the-art results.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Hybrid sparse and low-rank time-frequency signal decomposition.\n \n \n \n \n\n\n \n Févotte, C.; and Kowalski, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 464-468, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"HybridPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362426,\n  author = {C. Févotte and M. Kowalski},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Hybrid sparse and low-rank time-frequency signal decomposition},\n  year = {2015},\n  pages = {464-468},\n  abstract = {We propose a new hybrid (or morphological) generative model that decomposes a signal into two (and possibly more) layers. Each layer is a linear combination of localised atoms from a time-frequency dictionary. One layer has a low-rank time-frequency structure while the other as a sparse structure. The time-frequency resolutions of the dictionaries describing each layer may be different. Our contribution builds on the recently introduced Low-Rank Time-Frequency Synthesis (LRTFS) model and proposes an iterative algorithm similar to the popular iterative shrinkage/thresholding algorithm. We illustrate the capacities of the proposed model and estimation procedure on a tonal + transient audio decomposition example.},\n  keywords = {audio signal processing;signal resolution;time-frequency analysis;hybrid generative model;morphological generative model;low-rank time-frequency signal decomposition;sparse signal decomposition;time-frequency resolutions;low-rank time-frequency synthesis model;LRTFS model;iterative algorithm;tonal-transient audio decomposition;Time-frequency analysis;Dictionaries;Atomic layer deposition;Transient analysis;Estimation;Signal processing;Sparse matrices;Low-rank time-frequency synthesis;sparse component analysis;hybrid/morphological decompositions;non-negative matrix factorisation},\n  doi = {10.1109/EUSIPCO.2015.7362426},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570100947.pdf},\n}\n\n
\n
\n\n\n
\n We propose a new hybrid (or morphological) generative model that decomposes a signal into two (and possibly more) layers. Each layer is a linear combination of localised atoms from a time-frequency dictionary. One layer has a low-rank time-frequency structure while the other as a sparse structure. The time-frequency resolutions of the dictionaries describing each layer may be different. Our contribution builds on the recently introduced Low-Rank Time-Frequency Synthesis (LRTFS) model and proposes an iterative algorithm similar to the popular iterative shrinkage/thresholding algorithm. We illustrate the capacities of the proposed model and estimation procedure on a tonal + transient audio decomposition example.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Linear embeddings of low-dimensional subsets of a Hilbert space to Rm.\n \n \n \n\n\n \n Puy, G.; Davies, M.; and Gribonval, R.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 469-473, Aug 2015. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362427,\n  author = {G. Puy and M. Davies and R. Gribonval},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Linear embeddings of low-dimensional subsets of a Hilbert space to Rm},\n  year = {2015},\n  pages = {469-473},\n  abstract = {We consider the problem of embedding a low-dimensional set, M, from an infinite-dimensional Hilbert space, H, to a finite-dimensional space. Defining appropriate random linear projections, we propose two constructions of linear maps that have the restricted isometry property (RIP) on the secant set of M with high probability. The first one is optimal in the sense that it only needs a number of projections essentially proportional to the intrinsic dimension of M to satisfy the RIP. The second one, which is based on a variable density sampling technique, is computationally more efficient, while potentially requiring more measurements.},\n  keywords = {compressed sensing;Hilbert spaces;probability;linear embeddings;low-dimensional subsets;infinite-dimensional Hilbert space;finite-dimensional space;random linear projections;restricted isometry property;RIP;secant set;high probability;intrinsic dimension;compressed sensing;Hilbert space;Europe;Signal processing;Compressed sensing;Sparse matrices;Manifolds;Compressed sensing;restricted isometry property;box-counting dimension;variable density sampling},\n  doi = {10.1109/EUSIPCO.2015.7362427},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n We consider the problem of embedding a low-dimensional set, M, from an infinite-dimensional Hilbert space, H, to a finite-dimensional space. Defining appropriate random linear projections, we propose two constructions of linear maps that have the restricted isometry property (RIP) on the secant set of M with high probability. The first one is optimal in the sense that it only needs a number of projections essentially proportional to the intrinsic dimension of M to satisfy the RIP. The second one, which is based on a variable density sampling technique, is computationally more efficient, while potentially requiring more measurements.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Large scale 3D image reconstruction in optical interferometry.\n \n \n \n \n\n\n \n Schutz, A.; Ferrari, A.; Mary, D.; Thiébaut, E.; and Soulez, F.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 474-478, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"LargePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362428,\n  author = {A. Schutz and A. Ferrari and D. Mary and E. Thiébaut and F. Soulez},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Large scale 3D image reconstruction in optical interferometry},\n  year = {2015},\n  pages = {474-478},\n  abstract = {Astronomical optical interferometers (OI) sample the Fourier transform of the intensity distribution of a source at the observation wavelength. Because of rapid atmospheric perturbations, the phases of the complex Fourier samples (visibilities) cannot be directly exploited, and instead linear relationships between the phases are used (phase closures and differential phases). Consequently, specific image reconstruction methods have been devised in the last few decades. Modern polychromatic OI instruments are now paving the way to multiwavelength imaging. This paper presents the derivation of a spatio-spectral ({"}3D{"}) image reconstruction algorithm called PAINTER (Polychromatic opticAl INTErferometric Reconstruction software). The algorithm is able to solve large scale problems. It relies on an iterative process, which alternates estimation of polychromatic images and of complex visibilities. The complex visibilities are not only estimated from squared moduli and closure phases, but also from differential phases, which help to better constrain the polychromatic reconstruction. Simulations on synthetic data illustrate the efficiency of the algorithm.},\n  keywords = {Fourier transforms;image reconstruction;iterative methods;light interferometry;iterative process;polychromatic optical interferometric reconstruction software;PAINTER;spatio-spectral image reconstruction algorithm;multiwavelength imaging;polychromatic OI instrument;Fourier transform;astronomical optical interferometer;large scale 3D image reconstruction;Image reconstruction;Optical interferometry;Signal processing algorithms;Telescopes;Atmospheric measurements;Three-dimensional displays;Optical imaging;ADMM;irregular sampling;phases estimation;proximal operator;optical interferometry},\n  doi = {10.1109/EUSIPCO.2015.7362428},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103499.pdf},\n}\n\n
\n
\n\n\n
\n Astronomical optical interferometers (OI) sample the Fourier transform of the intensity distribution of a source at the observation wavelength. Because of rapid atmospheric perturbations, the phases of the complex Fourier samples (visibilities) cannot be directly exploited, and instead linear relationships between the phases are used (phase closures and differential phases). Consequently, specific image reconstruction methods have been devised in the last few decades. Modern polychromatic OI instruments are now paving the way to multiwavelength imaging. This paper presents the derivation of a spatio-spectral (\"3D\") image reconstruction algorithm called PAINTER (Polychromatic opticAl INTErferometric Reconstruction software). The algorithm is able to solve large scale problems. It relies on an iterative process, which alternates estimation of polychromatic images and of complex visibilities. The complex visibilities are not only estimated from squared moduli and closure phases, but also from differential phases, which help to better constrain the polychromatic reconstruction. Simulations on synthetic data illustrate the efficiency of the algorithm.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fast non-negative orthogonal least squares.\n \n \n \n \n\n\n \n Yaghoobi, M.; and Davies, M. E.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 479-483, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"FastPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362429,\n  author = {M. Yaghoobi and M. E. Davies},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Fast non-negative orthogonal least squares},\n  year = {2015},\n  pages = {479-483},\n  abstract = {An important class of sparse signals is the non-negative sparse signals. While numerous greedy techniques have been introduced for low-complexity sparse approximations, there are few non-negative versions. Among such a large class of greedy techniques, one successful method, which is called the Orthogonal Least Squares (OLS) algorithm, is based on the maximum residual energy reduction at each iteration. However, the basic implementation of the OLS is computationally slow. The OLS algorithm has a fast implementation based on the QR matrix factorisation of the dictionary. The extension of such technique to the non-negative domain is possible. In this paper, we present a fast implementation of the non-negative OLS (NNOLS). The computational complexity of the algorithm is compared with the basic implementation, where the new method is faster with two orders of magnitude. We also show that, if the basic implementation of NNOLS is not computationally feasible for moderate size problems, the proposed method is tractable. We also show that the proposed algorithm is even faster than an approximate implementation of the non-negative OLS algorithm.},\n  keywords = {computational complexity;greedy algorithms;least squares approximations;matrix decomposition;signal processing;nonnegative orthogonal least square algorithm;nonnegative sparse signal;sparse signals;greedy technique;low-complexity sparse approximation;nonnegative OLS algorithm;residual energy reduction;QR matrix factorisation;NNOLS;computational complexity;Signal processing algorithms;Least squares approximations;Dictionaries;Signal processing;Approximation algorithms;Europe;Non-negative sparse approximations;Orthogonal Least Squares;Efficient Implementations;Non-negative Orthogonal Least Squares and QR Matrix Factorization},\n  doi = {10.1109/EUSIPCO.2015.7362429},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104893.pdf},\n}\n\n
\n
\n\n\n
\n An important class of sparse signals is the non-negative sparse signals. While numerous greedy techniques have been introduced for low-complexity sparse approximations, there are few non-negative versions. Among such a large class of greedy techniques, one successful method, which is called the Orthogonal Least Squares (OLS) algorithm, is based on the maximum residual energy reduction at each iteration. However, the basic implementation of the OLS is computationally slow. The OLS algorithm has a fast implementation based on the QR matrix factorisation of the dictionary. The extension of such technique to the non-negative domain is possible. In this paper, we present a fast implementation of the non-negative OLS (NNOLS). The computational complexity of the algorithm is compared with the basic implementation, where the new method is faster with two orders of magnitude. We also show that, if the basic implementation of NNOLS is not computationally feasible for moderate size problems, the proposed method is tractable. We also show that the proposed algorithm is even faster than an approximate implementation of the non-negative OLS algorithm.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Split-Gaussian particle filter.\n \n \n \n \n\n\n \n Kokkala, J.; and Sarkka, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 484-488, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Split-GaussianPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362430,\n  author = {J. Kokkala and S. Sarkka},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Split-Gaussian particle filter},\n  year = {2015},\n  pages = {484-488},\n  abstract = {This paper is concerned with the use of split-Gaussian importance distributions in sequential importance resampling based particle filtering. We present novel particle filtering algorithms using the split-Gaussian importance distributions and compare their performance with several alternatives. Using a univariate nonlinear reference model, we compare the performance off the importance distributions by monitoring the effective number of particles. When using adaptive resampling, the split-Gaussian approximation has the best performance, and the Laplace approximation performs better than importance distributions based on unscented and extended Kalman filters. In addition, we also consider a two-dimensional target-tracking example where the Laplace approximation is not available in closed form and propose fitting the split-Gaussian importance distribution starting from an unscented Kalman filter based approximation.},\n  keywords = {approximation theory;Gaussian distribution;Kalman filters;nonlinear filters;particle filtering (numerical methods);signal sampling;target tracking;tracking filters;two-dimensional target tracking;extended Kalman filter;unscented Kalman filter;Laplace approximation;split-Gaussian approximation;adaptive resampling;univariate nonlinear reference model;split-Gaussian importance distribution;resampling based particle filtering;split-Gaussian particle filter;Approximation methods;Signal processing algorithms;Kalman filters;Approximation algorithms;Atmospheric measurements;Particle measurements;Europe;split-normal distribution;split-Gaussian distribution;particle filter;importance distribution},\n  doi = {10.1109/EUSIPCO.2015.7362430},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570097457.pdf},\n}\n\n
\n
\n\n\n
\n This paper is concerned with the use of split-Gaussian importance distributions in sequential importance resampling based particle filtering. We present novel particle filtering algorithms using the split-Gaussian importance distributions and compare their performance with several alternatives. Using a univariate nonlinear reference model, we compare the performance off the importance distributions by monitoring the effective number of particles. When using adaptive resampling, the split-Gaussian approximation has the best performance, and the Laplace approximation performs better than importance distributions based on unscented and extended Kalman filters. In addition, we also consider a two-dimensional target-tracking example where the Laplace approximation is not available in closed form and propose fitting the split-Gaussian importance distribution starting from an unscented Kalman filter based approximation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Application of sequential Quasi-Monte Carlo to autonomous positioning.\n \n \n \n \n\n\n \n Chopin, N.; and Gerber, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 489-493, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ApplicationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362431,\n  author = {N. Chopin and M. Gerber},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Application of sequential Quasi-Monte Carlo to autonomous positioning},\n  year = {2015},\n  pages = {489-493},\n  abstract = {SMC (Sequential Monte Carlo) algorithms (also known as particle filters) are popular methods to approximate filtering (and related) distributions of state-space models. However, they converge at the slow 1/√N rate, which may be an issue in real-time data-intensive scenarios. We give a brief outline of SQMC (Sequential Quasi-Monte Carlo), a variant of SMC based on low-discrepancy point sets proposed by [1], which converges at a faster rate, and we illustrate the greater performance of SQMC on autonomous positioning problems.},\n  keywords = {Monte Carlo methods;signal processing;state-space methods;sequential Quasi-Monte Carlo;autonomous positioning;particle filters;state-space models;low-discrepancy point sets;autonomous positioning problems;signal processing;Yttrium;Signal processing algorithms;Monte Carlo methods;Vehicles;Signal processing;Europe;Approximation algorithms;Low-discrepancy point sets;Particle filtering;Quasi-Monte Carlo},\n  doi = {10.1109/EUSIPCO.2015.7362431},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104259.pdf},\n}\n\n
\n
\n\n\n
\n SMC (Sequential Monte Carlo) algorithms (also known as particle filters) are popular methods to approximate filtering (and related) distributions of state-space models. However, they converge at the slow 1/√N rate, which may be an issue in real-time data-intensive scenarios. We give a brief outline of SQMC (Sequential Quasi-Monte Carlo), a variant of SMC based on low-discrepancy point sets proposed by [1], which converges at a faster rate, and we illustrate the greater performance of SQMC on autonomous positioning problems.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Estimation of gene expression by a bank of particle filters.\n \n \n \n \n\n\n \n Bugallo, M. F.; Taşdemir, Ç.; and Djurić, P. M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 494-498, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"EstimationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362432,\n  author = {M. F. Bugallo and Ç. Taşdemir and P. M. Djurić},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Estimation of gene expression by a bank of particle filters},\n  year = {2015},\n  pages = {494-498},\n  abstract = {This paper addresses the problem of joint estimation of time series of gene expressions and identification of the coefficients of gene interactions defining the network. The proposed method exploits a state-space structure describing the system so that a bank of particle filters can be used to efficiently track each of the time series separately. Since each gene interacts with some of the other genes, the individual filters need to exchange information about the states (genes) that they track. The analytical derivation of the posterior distribution of the states given the observed data allows for marginalization of the matrix describing the interactions in the network and for efficient implementation of the method. Computer simulations reveal a promising performance of the proposed approach when compared to the conventional particle filter that attempts to track the time series of all the genes and which, as a result, suffers from the curse-of-dimensionality.},\n  keywords = {biology;channel bank filters;estimation theory;matrix algebra;particle filtering (numerical methods);time series;particle filters;gene expression time series estimation;gene interactions;state-space structure;matrix marginalization;curse-of-dimensionality;Estimation;Gene expression;Covariance matrices;Time series analysis;Kalman filters;Approximation methods;Standards;Gene regulatory network;particle filtering;dimensionality reduction},\n  doi = {10.1109/EUSIPCO.2015.7362432},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105209.pdf},\n}\n\n
\n
\n\n\n
\n This paper addresses the problem of joint estimation of time series of gene expressions and identification of the coefficients of gene interactions defining the network. The proposed method exploits a state-space structure describing the system so that a bank of particle filters can be used to efficiently track each of the time series separately. Since each gene interacts with some of the other genes, the individual filters need to exchange information about the states (genes) that they track. The analytical derivation of the posterior distribution of the states given the observed data allows for marginalization of the matrix describing the interactions in the network and for efficient implementation of the method. Computer simulations reveal a promising performance of the proposed approach when compared to the conventional particle filter that attempts to track the time series of all the genes and which, as a result, suffers from the curse-of-dimensionality.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Parallel interacting Markov adaptive importance sampling.\n \n \n \n\n\n \n Martino, L.; Elvira, V.; Luengo, D.; and Corander, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 499-503, Aug 2015. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362433,\n  author = {L. Martino and V. Elvira and D. Luengo and J. Corander},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Parallel interacting Markov adaptive importance sampling},\n  year = {2015},\n  pages = {499-503},\n  abstract = {Monte Carlo (MC) methods are widely used for statistical inference in signal processing applications. A well-known class of MC methods is importance sampling (IS) and its adaptive extensions. In this work, we introduce an iterated importance sampler using a population of proposal densities, which are adapted according to an MCMC technique over the population of location parameters. The novel algorithm provides a global estimation of the variables of interest iteratively, using all the samples weighted according to the deterministic mixture scheme. Numerical results, on a multi-modal example and a localization problem in wireless sensor networks, show the advantages of the proposed schemes.},\n  keywords = {iterative methods;Markov processes;Monte Carlo methods;sensor placement;signal processing;wireless sensor networks;parallel interacting Markov adaptive importance sampling;Monte Carlo methods;statistical inference;signal processing;iterated importance sampler;proposal density population;location parameter population;global variable estimation;deterministic mixture scheme;localization problem;wireless sensor networks;Proposals;Monte Carlo methods;Sociology;Signal processing algorithms;Probability density function;Signal processing;Adaptive importance sampling;MCMC methods;parallel chains;Bayesian inference},\n  doi = {10.1109/EUSIPCO.2015.7362433},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n Monte Carlo (MC) methods are widely used for statistical inference in signal processing applications. A well-known class of MC methods is importance sampling (IS) and its adaptive extensions. In this work, we introduce an iterated importance sampler using a population of proposal densities, which are adapted according to an MCMC technique over the population of location parameters. The novel algorithm provides a global estimation of the variables of interest iteratively, using all the samples weighted according to the deterministic mixture scheme. Numerical results, on a multi-modal example and a localization problem in wireless sensor networks, show the advantages of the proposed schemes.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Extended GLRT detectors of correlation and sphericity: The undersampled regime.\n \n \n \n\n\n \n Mestre, X.; and Vallet, P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 504-508, Aug 2015. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362434,\n  author = {X. Mestre and P. Vallet},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Extended GLRT detectors of correlation and sphericity: The undersampled regime},\n  year = {2015},\n  pages = {504-508},\n  abstract = {Detecting the presence of one or multiple signals with unknown spatial signature can be addressed by testing the structure of the observation covariance matrix. The problem can be typically formulated as a sphericity test, which checks whether the spatial covariance matrix is proportional to the identity (white noise), or as a correlation test, which checks whether this matrix has a diagonal structure. When the number of samples is higher than the number of antennas, one can address this problem by formulating the generalized likelihood ratio test (GLRT), which basically compares the arithmetic and geometric means of the eigenvalues of the sample covariance/coherence matrix. The GLRT can be trivially extended to the undersampled regime by selecting only the positive sample eigenvalues. This paper investigates the asymptotic behavior of these extended GLRTs by determining the asymptotic law of the associated statistics under both hypotheses. The analysis is asymptotic in both the sample size (number of snapshots) and the observation dimension (number of antennas).},\n  keywords = {array signal processing;correlation methods;covariance matrices;eigenvalues and eigenfunctions;signal detection;correlation extended GLRT detector;sphericity extended GLRT detector;undersampled regime;multiple signal detection;unknown spatial signature detection;spatial covariance matrix;diagonal structure testing;generalized likelihood ratio test;antennas;eigenvalues;coherence matrix;Correlation;Covariance matrices;Eigenvalues and eigenfunctions;Europe;Signal processing;Arrays;Antenna measurements;GLRT;sphericity test;correlation test;random matrix theory;central limit theorem},\n  doi = {10.1109/EUSIPCO.2015.7362434},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n Detecting the presence of one or multiple signals with unknown spatial signature can be addressed by testing the structure of the observation covariance matrix. The problem can be typically formulated as a sphericity test, which checks whether the spatial covariance matrix is proportional to the identity (white noise), or as a correlation test, which checks whether this matrix has a diagonal structure. When the number of samples is higher than the number of antennas, one can address this problem by formulating the generalized likelihood ratio test (GLRT), which basically compares the arithmetic and geometric means of the eigenvalues of the sample covariance/coherence matrix. The GLRT can be trivially extended to the undersampled regime by selecting only the positive sample eigenvalues. This paper investigates the asymptotic behavior of these extended GLRTs by determining the asymptotic law of the associated statistics under both hypotheses. The analysis is asymptotic in both the sample size (number of snapshots) and the observation dimension (number of antennas).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Nonparametric simultaneous sparse recovery: An application to source localization.\n \n \n \n \n\n\n \n Ollila, E.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 509-513, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"NonparametricPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362435,\n  author = {E. Ollila},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Nonparametric simultaneous sparse recovery: An application to source localization},\n  year = {2015},\n  pages = {509-513},\n  abstract = {We consider multichannel sparse recovery problem where the objective is to find good recovery of jointly sparse unknown signal vectors from the given multiple measurement vectors which are different linear combinations of the same known elementary vectors. Many popular greedy or convex algorithms perform poorly under non-Gaussian heavy-tailed noise conditions or in the face of outliers. In this paper, we propose the usage of mixed ℓp, q norms on data fidelity (residual matrix) term and the conventional ℓ0,2-norm constraint on the signal matrix to promote row-sparsity. We devise a greedy pursuit algorithm based on simultaneous normalized iterative hard thresholding (SNIHT) algorithm. Simulation studies highlight the effectiveness of the proposed approaches to cope with different noise environments (i.i.d., row i.i.d, etc) and outliers. Usefulness of the methods are illustrated in source localization application with sensor arrays.},\n  keywords = {compressed sensing;iterative methods;matrix algebra;nonparametric simultaneous multichannel sparse recovery problem;convex algorithms;greedy algorithms;nonGaussian heavy-tailed noise conditions;data fidelity term;signal matrix;simultaneous normalized iterative hard thresholding algorithm;SNIHT algorithm;source localization;compressed sensing;Yttrium;Signal processing algorithms;Robustness;Signal to noise ratio;Sparse matrices;Minimization;multichannel sparse recovery;compressed sensing;robustness;iterative hard thresholding},\n  doi = {10.1109/EUSIPCO.2015.7362435},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096183.pdf},\n}\n\n
\n
\n\n\n
\n We consider multichannel sparse recovery problem where the objective is to find good recovery of jointly sparse unknown signal vectors from the given multiple measurement vectors which are different linear combinations of the same known elementary vectors. Many popular greedy or convex algorithms perform poorly under non-Gaussian heavy-tailed noise conditions or in the face of outliers. In this paper, we propose the usage of mixed ℓp, q norms on data fidelity (residual matrix) term and the conventional ℓ0,2-norm constraint on the signal matrix to promote row-sparsity. We devise a greedy pursuit algorithm based on simultaneous normalized iterative hard thresholding (SNIHT) algorithm. Simulation studies highlight the effectiveness of the proposed approaches to cope with different noise environments (i.i.d., row i.i.d, etc) and outliers. Usefulness of the methods are illustrated in source localization application with sensor arrays.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Performance bounds under misspecification model for MIMO radar application.\n \n \n \n \n\n\n \n Ren, C.; El Korso, M. N.; Galy, J.; Chaumette, E.; Larzabal, P.; and Renaux, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 514-518, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"PerformancePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362436,\n  author = {C. Ren and M. N. {El Korso} and J. Galy and E. Chaumette and P. Larzabal and A. Renaux},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Performance bounds under misspecification model for MIMO radar application},\n  year = {2015},\n  pages = {514-518},\n  abstract = {Recent tools established on misspecified lower bound on the mean square error allow to predict more accurately the mean square error behavior than the classical lower bounds in presence of model. errors. These bounds are helpful since model errors exist in practice due to system imperfections. In this paper, we are interested in the direction of arrival and direction of departure estimation in MIMO radar context with array elements position error. A closed-form expression is derived for the misspecified Cramér-Rao bound (or Huber limit) for any antennas geometry. A comparison of the misspecified Cramér-Rao bound with the classical Cramér-Rao bound and with the maximum likelihood estimator mean square error highlights the tightness improvement resulting from the use of the proposed bound.},\n  keywords = {antenna arrays;direction-of-arrival estimation;maximum likelihood estimation;mean square error methods;MIMO radar;radar antennas;radar signal processing;misspecification model;MIMO radar application;direction of arrival estimation;direction of departure estimation;Cramέr-Rao bound;antenna geometry;maximum likelihood estimator mean square error;Maximum likelihood estimation;Arrays;MIMO radar;Data models;Receiving antennas;Covariance matrices;Misspecifled Cramér-Rao bound;Huber limit;error model;MIMO radar},\n  doi = {10.1109/EUSIPCO.2015.7362436},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096303.pdf},\n}\n\n
\n
\n\n\n
\n Recent tools established on misspecified lower bound on the mean square error allow to predict more accurately the mean square error behavior than the classical lower bounds in presence of model. errors. These bounds are helpful since model errors exist in practice due to system imperfections. In this paper, we are interested in the direction of arrival and direction of departure estimation in MIMO radar context with array elements position error. A closed-form expression is derived for the misspecified Cramér-Rao bound (or Huber limit) for any antennas geometry. A comparison of the misspecified Cramér-Rao bound with the classical Cramér-Rao bound and with the maximum likelihood estimator mean square error highlights the tightness improvement resulting from the use of the proposed bound.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Bistatic coherent MIMO clutter rank analysis.\n \n \n \n \n\n\n \n Bell, K.; Johnson, J.; Baker, C.; Smith, G.; and Rangaswamy, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 519-523, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"BistaticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362437,\n  author = {K. Bell and J. Johnson and C. Baker and G. Smith and M. Rangaswamy},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Bistatic coherent MIMO clutter rank analysis},\n  year = {2015},\n  pages = {519-523},\n  abstract = {The rank of the clutter covariance matrix in a bistatic coherent multiple-input multiple-output (MIMO) radar system with arbitrary planar arrays in both the transmitter and receiver is examined. The analysis provides further generalization of {"}Brennan's rule{"} results available for linear arrays in monostatic coherent MIMO and bistatic space-time adaptive processing (STAP) systems. We first extend the two-dimensional (2D) monostatic STAP results of Varadarajan and Krolik (VK) to monostatic MIMO systems with planar arrays. We then use the VK bistatic STAP approach and determine conditions under which a four-dimensional (4D) bistatic MIMO system can be modeled as an equivalent 2D monostatic MIMO system, and apply the 2D results. The analytical expressions are validated against the numerically calculated rank of the theoretical clutter covariance matrix.},\n  keywords = {covariance matrices;MIMO radar;planar antenna arrays;radar clutter;space-time adaptive processing;monostatic coherent MIMO;bistatic space-time adaptive processing systems;bistatic STAP systems;two-dimensional monostatic STAP results;Varadarajan and Krolik;VK bistatic STAP approach;four-dimensional bistatic MIMO system;equivalent 2D monostatic MIMO system;linear arrays;Brennan's rule;arbitrary planar arrays;bistatic coherent MIMO radar system;bistatic coherent multiple-input multiple-output radar system;clutter covariance matrix;Clutter;MIMO;Planar arrays;Array signal processing;Doppler effect;Radar;MIMO radar;STAP;bistatic;monos-tatic;clutter rank},\n  doi = {10.1109/EUSIPCO.2015.7362437},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103117.pdf},\n}\n\n
\n
\n\n\n
\n The rank of the clutter covariance matrix in a bistatic coherent multiple-input multiple-output (MIMO) radar system with arbitrary planar arrays in both the transmitter and receiver is examined. The analysis provides further generalization of \"Brennan's rule\" results available for linear arrays in monostatic coherent MIMO and bistatic space-time adaptive processing (STAP) systems. We first extend the two-dimensional (2D) monostatic STAP results of Varadarajan and Krolik (VK) to monostatic MIMO systems with planar arrays. We then use the VK bistatic STAP approach and determine conditions under which a four-dimensional (4D) bistatic MIMO system can be modeled as an equivalent 2D monostatic MIMO system, and apply the 2D results. The analytical expressions are validated against the numerically calculated rank of the theoretical clutter covariance matrix.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Asymptotic detection performance of the robust ANMF.\n \n \n \n \n\n\n \n Pascal, F.; and Ovarlez, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 524-528, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AsymptoticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362438,\n  author = {F. Pascal and J. Ovarlez},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Asymptotic detection performance of the robust ANMF},\n  year = {2015},\n  pages = {524-528},\n  abstract = {This paper presents two different approaches to derive the asymptotic distributions of the robust Adaptive Normalized Matched Filter (ANMF) under both H0 and H1 hypotheses. More precisely, the ANMF has originally been derived under the assumption of partially homogenous Gaussian noise, i.e. where the variance is different between the observation under test and the set of secondary data. We propose in this work to relax the Gaussian hypothesis: we analyze the ANMF built with robust estimators, namely the M-estimators and the Tyler's estimator, under the Complex Elliptically Symmetric (CES) distributions framework. In this context, we derive two asymptotic distributions for this robust ANMF. Firstly, we combine the asymptotic properties of the robust estimators and the Gaussian-based distribution of the ANMF at finite distance. Secondly, we directly derive the asymptotic distribution of the robust ANMF.},\n  keywords = {adaptive estimation;adaptive filters;Gaussian distribution;Gaussian noise;matched filters;signal detection;robust ANMF asymptotic detection performance;asymptotic distribution;robust adaptive normalized matched filter;H0 hypotheses;H1 hypotheses;partially homogenous Gaussian noise;M-estimator;Tyler estimator;complex elliptically symmetric distribution framework;CES distribution framework;Gaussian-based distribution;Robustness;Covariance matrices;Matched filters;Probability density function;Signal to noise ratio;Mathematical model;Adaptive Normalized Match Filter;M-estimators;Tyler's estimator;Complex Elliptically Symmetric distributions;non-Gaussian detection;robust estimation theory},\n  doi = {10.1109/EUSIPCO.2015.7362438},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103535.pdf},\n}\n\n
\n
\n\n\n
\n This paper presents two different approaches to derive the asymptotic distributions of the robust Adaptive Normalized Matched Filter (ANMF) under both H0 and H1 hypotheses. More precisely, the ANMF has originally been derived under the assumption of partially homogenous Gaussian noise, i.e. where the variance is different between the observation under test and the set of secondary data. We propose in this work to relax the Gaussian hypothesis: we analyze the ANMF built with robust estimators, namely the M-estimators and the Tyler's estimator, under the Complex Elliptically Symmetric (CES) distributions framework. In this context, we derive two asymptotic distributions for this robust ANMF. Firstly, we combine the asymptotic properties of the robust estimators and the Gaussian-based distribution of the ANMF at finite distance. Secondly, we directly derive the asymptotic distribution of the robust ANMF.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Oversampled receive array calibration.\n \n \n \n \n\n\n \n Abramovich, Y. I.; and Antonio, G. S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 529-533, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"OversampledPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362439,\n  author = {Y. I. Abramovich and G. S. Antonio},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Oversampled receive array calibration},\n  year = {2015},\n  pages = {529-533},\n  abstract = {The problem of receive antenna array calibration in cases where the array is strongly spatially {"}over-sampled{"} is addressed in this paper. We suggest a new technique wherein spatially distributed strong clutter returns can be used for calibration with the goal of minimizing the power at the output of a number of antenna finger-beams steered into the invisible domain. The calibration algorithm is analyzed using simulation results and real over-the-horizon radar (OTHR) data to illustrate the effectiveness of the proposed technique.},\n  keywords = {antenna arrays;calibration;clutter;radar antennas;receiving antennas;oversampled receive array calibration;receive antenna array calibration;spatially distributed strong clutter;over-the-horizon radar;OTHR data;Arrays;Calibration;Clutter;Antenna arrays;Receiving antennas;Mutual coupling},\n  doi = {10.1109/EUSIPCO.2015.7362439},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103839.pdf},\n}\n\n
\n
\n\n\n
\n The problem of receive antenna array calibration in cases where the array is strongly spatially \"over-sampled\" is addressed in this paper. We suggest a new technique wherein spatially distributed strong clutter returns can be used for calibration with the goal of minimizing the power at the output of a number of antenna finger-beams steered into the invisible domain. The calibration algorithm is analyzed using simulation results and real over-the-horizon radar (OTHR) data to illustrate the effectiveness of the proposed technique.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Acoustic context recognition for mobile devices using a reduced complexity SVM.\n \n \n \n\n\n \n Battaglino, D.; Mesaros, A.; Lepauloux, L.; Pilati, L.; and Evans, N.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 534-538, Aug 2015. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362440,\n  author = {D. Battaglino and A. Mesaros and L. Lepauloux and L. Pilati and N. Evans},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Acoustic context recognition for mobile devices using a reduced complexity SVM},\n  year = {2015},\n  pages = {534-538},\n  abstract = {Automatic context recognition enables mobile devices to react to changes in the environment and different situations. While many different sensors can be used for context recognition, the use of acoustic cues is among the most popular and successful. Current approaches to acoustic context recognition (ACR) are too costly in terms of computation and memory requirements to support an always-listening mode. This paper describes our work to develop a reduced complexity, efficient approach to ACR involving support vector machine classifiers. The principal hypothesis is that a significant fraction of training data contains information redundant to classification. Through clustering, training data can thus be selectively decimated in order to reduce the number of support vectors needed to represent discriminative hyperplanes. This represents a significant saving in terms of computational and memory efficiency, with only modest degradations in classification accuracy.},\n  keywords = {acoustic signal processing;mobile computing;pattern clustering;signal classification;support vector machines;SVM;automatic context recognition;mobile devices;sensors;acoustic cues;acoustic context recognition;ACR;always-listening mode;reduced complexity;support vector machine classifiers;clustering;computational efficiency;memory efficiency;classification accuracy;Context;Support vector machines;Training;Training data;Mobile handsets;Complexity theory;Hidden Markov models;Acoustic Context Recognition;mobile devices contextualization;SVM;k-means;LDA},\n  doi = {10.1109/EUSIPCO.2015.7362440},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n Automatic context recognition enables mobile devices to react to changes in the environment and different situations. While many different sensors can be used for context recognition, the use of acoustic cues is among the most popular and successful. Current approaches to acoustic context recognition (ACR) are too costly in terms of computation and memory requirements to support an always-listening mode. This paper describes our work to develop a reduced complexity, efficient approach to ACR involving support vector machine classifiers. The principal hypothesis is that a significant fraction of training data contains information redundant to classification. Through clustering, training data can thus be selectively decimated in order to reduce the number of support vectors needed to represent discriminative hyperplanes. This represents a significant saving in terms of computational and memory efficiency, with only modest degradations in classification accuracy.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Energy efficient telemonitoring of wheezes.\n \n \n \n \n\n\n \n Lalos, A. S.; and Moustakas, K.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 539-543, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"EnergyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362441,\n  author = {A. S. Lalos and K. Moustakas},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Energy efficient telemonitoring of wheezes},\n  year = {2015},\n  pages = {539-543},\n  abstract = {Wheezes are abnormal continuous adventitious lung sounds that are strongly related to patients with obstructive airways diseases. Wireless telemonitoring of these sounds facilitate early diagnosis (short, long term) and management of chronic inflammatory disease of the airways (e.g., asthma) through the use of an accurate and energy efficient mhealth system. Therefore, low complexity breath compression schemes with high compression ratio are required. To this end, we propose a compressed sensing based compression/reconstruction solution that enables wheeze detection from a small number of linearly encoded samples, by exploiting the block sparsity of the breath eigenspectrum during reconstruction at the receiver. Simulation studies, carried out with publicly available breath sounds, show the energy efficiency benefits of the proposed CS scheme, compared to traditional CS recovery approaches.},\n  keywords = {bioacoustics;diseases;lung;medical disorders;patient diagnosis;patient monitoring;pneumodynamics;telemedicine;wireless sensor networks;wheeze telemonitoring;abnormal continuous adventitious lung sound;obstructive airways disease;wireless telemonitoring;early chronic inflammatory disease diagnosis;chronic inflammatory disease management;asthma;low complexity breath compression scheme;high compression ratio;compression-reconstruction solution;wheeze detection;breath eigenspectrum;breath sound;compressed sensing recovery approach;Principal component analysis;Transforms;Symmetric matrices;Signal processing;Diseases;Signal processing algorithms;Encoding;Compressed Sensing;Wheezes;Time-Frequency Analysis;PCA},\n  doi = {10.1109/EUSIPCO.2015.7362441},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570095839.pdf},\n}\n\n
\n
\n\n\n
\n Wheezes are abnormal continuous adventitious lung sounds that are strongly related to patients with obstructive airways diseases. Wireless telemonitoring of these sounds facilitate early diagnosis (short, long term) and management of chronic inflammatory disease of the airways (e.g., asthma) through the use of an accurate and energy efficient mhealth system. Therefore, low complexity breath compression schemes with high compression ratio are required. To this end, we propose a compressed sensing based compression/reconstruction solution that enables wheeze detection from a small number of linearly encoded samples, by exploiting the block sparsity of the breath eigenspectrum during reconstruction at the receiver. Simulation studies, carried out with publicly available breath sounds, show the energy efficiency benefits of the proposed CS scheme, compared to traditional CS recovery approaches.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Quaternion common factor decomposition of head-related impulse response.\n \n \n \n \n\n\n \n Wang, Z.; and Chan, C. F.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 544-548, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"QuaternionPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362442,\n  author = {Z. Wang and C. F. Chan},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Quaternion common factor decomposition of head-related impulse response},\n  year = {2015},\n  pages = {544-548},\n  abstract = {The hypercomplex number quaternion is an extension to the complex number and is widely used in computer graphic, image processing and multiple dimensional linear time-invariant systems. In this paper, the quaternion algebra is applied to head-related impulse response (HRIR) modeling. Four HRIRs measured at different elevations or for different ears are used to construct a quaternion impulse response. A two dimensional quaternion common factor decomposition (QCFD) algorithm is developed to represent each quaternion impulse response as the convolution of two factor impulse responses. By using the proposed QCFD algorithm, quaternion impulse responses with the same elevation will share the same elevation factor while quaternion impulses with the same azimuth will share the same azimuth factor. Experimental results show that the QCFD algorithm has better performance as compared to the traditional two-dimension common factor decomposition (CFD) algorithm.},\n  keywords = {matrix decomposition;signal processing;hypercomplex number quaternion;multiple dimensional linear time invariant systems;quaternion algebra;head related impulse response modeling;quaternion impulse response;2D quaternion common factor decomposition algorithm;Quaternions;Signal processing algorithms;Ear;Convolution;Azimuth;Computational fluid dynamics;Algebra;HRIR;Quaternion;CFD},\n  doi = {10.1109/EUSIPCO.2015.7362442},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570086691.pdf},\n}\n\n
\n
\n\n\n
\n The hypercomplex number quaternion is an extension to the complex number and is widely used in computer graphic, image processing and multiple dimensional linear time-invariant systems. In this paper, the quaternion algebra is applied to head-related impulse response (HRIR) modeling. Four HRIRs measured at different elevations or for different ears are used to construct a quaternion impulse response. A two dimensional quaternion common factor decomposition (QCFD) algorithm is developed to represent each quaternion impulse response as the convolution of two factor impulse responses. By using the proposed QCFD algorithm, quaternion impulse responses with the same elevation will share the same elevation factor while quaternion impulses with the same azimuth will share the same azimuth factor. Experimental results show that the QCFD algorithm has better performance as compared to the traditional two-dimension common factor decomposition (CFD) algorithm.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Re-panning of directional signals and its impact on localization.\n \n \n \n \n\n\n \n Adami, A.; Schoeffler, M.; and Herre, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 549-553, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Re-panningPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362443,\n  author = {A. Adami and M. Schoeffler and J. Herre},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Re-panning of directional signals and its impact on localization},\n  year = {2015},\n  pages = {549-553},\n  abstract = {For multichannel audio reproduction systems, it is crucial to set up the speakers correctly according to the multichannel format's specification. Especially, the predefined angle of every speaker with respect to the listening position must be strictly kept to avoid spatial distortions of virtual sources, the so called phantom sources. In a normal living room environment, a specification compliant setup is usually not possible. This means, the resulting audio scene may differ heavily from the originally intended scene, i.e., the phantom sources' positions change. To mitigate these spatial distortions, we propose a re-panning method of directional signals. The method groups pairs of adjacent loudspeakers into segments, analyses the direction of arrivals (DOAs) within each segment by means of a direct-ambience decomposition and re-renders the direct components with respect to the actual reproduction setup. The re-panning method was perceptually evaluated by means of a localization listening test.},\n  keywords = {acoustic signal processing;architectural acoustics;direction-of-arrival estimation;loudspeakers;directional signal repanning;speaker localization;multichannel audio reproduction system;multichannel format specification;spatial distortions;virtual source;phantom source;living room environment;repanning method;adjacent loudspeaker;direction of arrival estimation;direct ambience decomposition;direct component rerendering;Loudspeakers;Phantoms;Silicon;Direction-of-arrival estimation;Europe;Principal component analysis;Spatial audio;format conversion;localization},\n  doi = {10.1109/EUSIPCO.2015.7362443},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570095311.pdf},\n}\n\n
\n
\n\n\n
\n For multichannel audio reproduction systems, it is crucial to set up the speakers correctly according to the multichannel format's specification. Especially, the predefined angle of every speaker with respect to the listening position must be strictly kept to avoid spatial distortions of virtual sources, the so called phantom sources. In a normal living room environment, a specification compliant setup is usually not possible. This means, the resulting audio scene may differ heavily from the originally intended scene, i.e., the phantom sources' positions change. To mitigate these spatial distortions, we propose a re-panning method of directional signals. The method groups pairs of adjacent loudspeakers into segments, analyses the direction of arrivals (DOAs) within each segment by means of a direct-ambience decomposition and re-renders the direct components with respect to the actual reproduction setup. The re-panning method was perceptually evaluated by means of a localization listening test.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Classification of bird song syllables using singular vectors of the multitaper spectrogram.\n \n \n \n \n\n\n \n Hansson-Sandsten, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 554-558, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ClassificationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362444,\n  author = {M. Hansson-Sandsten},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Classification of bird song syllables using singular vectors of the multitaper spectrogram},\n  year = {2015},\n  pages = {554-558},\n  abstract = {Classification of song similarities and differences in one bird species is a subtle problem where the actual answer is more or less unknown. In this paper, the singular vectors when decomposing the multitaper spectrogram are proposed to be used as feature vectors for classification. The advantage is especially for signals consisting of several components which have stochastic variations in the amplitudes as well as the time- and frequency locations. The approach is evaluated and compared to other methods for simulated data and bird song syllables recorded from the great reed warbler. The results show that in classification where there are strong similar components in all the signals but where the structure of weaker components are differing between the classes, the singular vectors decomposing the multitaper spectrogram could be useful as features.},\n  keywords = {acoustic signal processing;signal classification;singular value decomposition;time-frequency analysis;bird song syllable classification;multitaper spectrogram;feature vector;time-frequency location;singular vector decomposition;Time-frequency analysis;Spectrogram;Birds;Europe;Noise measurement;Stochastic processes;time-frequency;multitaper;spectrogram;SVD;bird song},\n  doi = {10.1109/EUSIPCO.2015.7362444},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104343.pdf},\n}\n\n
\n
\n\n\n
\n Classification of song similarities and differences in one bird species is a subtle problem where the actual answer is more or less unknown. In this paper, the singular vectors when decomposing the multitaper spectrogram are proposed to be used as feature vectors for classification. The advantage is especially for signals consisting of several components which have stochastic variations in the amplitudes as well as the time- and frequency locations. The approach is evaluated and compared to other methods for simulated data and bird song syllables recorded from the great reed warbler. The results show that in classification where there are strong similar components in all the signals but where the structure of weaker components are differing between the classes, the singular vectors decomposing the multitaper spectrogram could be useful as features.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Comparison of two methods for detection of North Atlantic Right Whale upcalls.\n \n \n \n \n\n\n \n Esfahanian, M.; Zhuang, H.; Erdol, N.; and Gerstein, E.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 559-563, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ComparisonPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362445,\n  author = {M. Esfahanian and H. Zhuang and N. Erdol and E. Gerstein},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Comparison of two methods for detection of North Atlantic Right Whale upcalls},\n  year = {2015},\n  pages = {559-563},\n  abstract = {In this paper, a study is carried out for detecting North Atlantic Right Whale upcalls with measurements from passive acoustic monitoring devices. Preprocessed spectrograms of upcalls are subjected to two different tasks, one of which is based on extraction of time-frequency features from upcall contours, and the other that employs a Local Binary Pattern operator to extract salient texture features of the upcalls. Then several classifiers are used to evaluate the effectiveness of both the contour-based and texture-based features for upcall detection. Detection results reveal that popular classifiers such as Linear Discriminant Analysis, Support Vector Machine, and TreeBagger can achieve high detection rates. Furthermore, using LBP features for call detection shows improved accuracy of about 3% to 4% over time-frequency features when an identical classifier is used.},\n  keywords = {acoustic signal detection;audio signal processing;feature extraction;signal classification;time-frequency analysis;identical classifier;contour-based feature extraction;salient texture feature extraction;local binary pattern operator;time-frequency feature extraction;passive acoustic monitoring device;north atlantic right whale upcall detection;Spectrogram;Feature extraction;Whales;Support vector machines;Time-frequency analysis;Europe;North Atlantic Right Whale;Local Binary Patterns;Spectral Denoising;Upcall Detection},\n  doi = {10.1109/EUSIPCO.2015.7362445},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105267.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, a study is carried out for detecting North Atlantic Right Whale upcalls with measurements from passive acoustic monitoring devices. Preprocessed spectrograms of upcalls are subjected to two different tasks, one of which is based on extraction of time-frequency features from upcall contours, and the other that employs a Local Binary Pattern operator to extract salient texture features of the upcalls. Then several classifiers are used to evaluate the effectiveness of both the contour-based and texture-based features for upcall detection. Detection results reveal that popular classifiers such as Linear Discriminant Analysis, Support Vector Machine, and TreeBagger can achieve high detection rates. Furthermore, using LBP features for call detection shows improved accuracy of about 3% to 4% over time-frequency features when an identical classifier is used.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n High accuracy frequency analysis using instantaneous frequency attractors.\n \n \n \n \n\n\n \n Ciobanu, A.; Negrescu, C.; Niţă, V. A.; Dobre, R. A.; and Stanomir, D.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 564-568, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"HighPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362446,\n  author = {A. Ciobanu and C. Negrescu and V. A. Niţă and R. A. Dobre and D. Stanomir},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {High accuracy frequency analysis using instantaneous frequency attractors},\n  year = {2015},\n  pages = {564-568},\n  abstract = {In this paper an improved version of the instantaneous frequency attractors (IFAs) algorithm is presented, by introducing several solutions which reduce the number of spurious components especially for low signal to noise ratios (SNRs) and increase the accuracy of the frequency estimator. We perform multilevel comparison tests with both previous versions of the IFA algorithm and another high precision frequency estimation method based on the derivative of the signal. The test results confirm the superiority of the IFAs' frequency accuracy, in different conditions: low SNRs, frequency distance between components, analysis frame length. Also, the number of spurious components is significantly reduced.},\n  keywords = {frequency estimation;signal processing;high accuracy frequency analysis;instantaneous frequency attractors;spurious components;low signal to noise ratios;Algorithm design and analysis;Band-pass filters;Signal to noise ratio;Frequency estimation;Signal processing algorithms;Signal resolution;Estimation;instantaneous frequency attractor;high frequency accuracy},\n  doi = {10.1109/EUSIPCO.2015.7362446},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105545.pdf},\n}\n\n
\n
\n\n\n
\n In this paper an improved version of the instantaneous frequency attractors (IFAs) algorithm is presented, by introducing several solutions which reduce the number of spurious components especially for low signal to noise ratios (SNRs) and increase the accuracy of the frequency estimator. We perform multilevel comparison tests with both previous versions of the IFA algorithm and another high precision frequency estimation method based on the derivative of the signal. The test results confirm the superiority of the IFAs' frequency accuracy, in different conditions: low SNRs, frequency distance between components, analysis frame length. Also, the number of spurious components is significantly reduced.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A robust speech/music discriminator for switched audio coding.\n \n \n \n \n\n\n \n Fuchs, G.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 569-573, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362447,\n  author = {G. Fuchs},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A robust speech/music discriminator for switched audio coding},\n  year = {2015},\n  pages = {569-573},\n  abstract = {Switching between speech coding and generic audio coding schemes was recently proven to be very efficient for coding a large range of audio materials at low bit-rates. However, it strongly relies on a robust classification of the input signal. The aim of the paper is to design a reliable speech and music discriminator (SMD) for such an application. Main attention was laid on getting a good tradeoff between accuracy, reactivity and stability of the decision while keeping the delay and complexity reasonably low. To this end, short-term and long-term features are dissociated before being conveyed to two different classifiers. The two classifier outputs are combined in a final decision using a hysteresis. Objective measures show that a more reliable switching decision is achievable. The SMD was successfully implemented in MPEG Unified Speech and Audio Coding (USAC). It allows the codec to show unprecedented audio quality.},\n  keywords = {audio coding;music;speech coding;audio quality;MPEG USAC;MPEG unified speech-audio coding;switching decision;SMD;robust signal classification;generic audio coding scheme;speech coding;switched audio coding;robust speech-music discriminator;Speech;Speech coding;Switches;Audio coding;Delays;Feature extraction;Speech and Music Discrimination;Speech},\n  doi = {10.1109/EUSIPCO.2015.7362447},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096727.pdf},\n}\n\n
\n
\n\n\n
\n Switching between speech coding and generic audio coding schemes was recently proven to be very efficient for coding a large range of audio materials at low bit-rates. However, it strongly relies on a robust classification of the input signal. The aim of the paper is to design a reliable speech and music discriminator (SMD) for such an application. Main attention was laid on getting a good tradeoff between accuracy, reactivity and stability of the decision while keeping the delay and complexity reasonably low. To this end, short-term and long-term features are dissociated before being conveyed to two different classifiers. The two classifier outputs are combined in a final decision using a hysteresis. Objective measures show that a more reliable switching decision is achievable. The SMD was successfully implemented in MPEG Unified Speech and Audio Coding (USAC). It allows the codec to show unprecedented audio quality.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Modelling source directivity in room impulse response simulation for spherical microphone arrays.\n \n \n \n \n\n\n \n Hafezi, S.; Moore, A. H.; and Naylor, P. A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 574-578, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ModellingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362448,\n  author = {S. Hafezi and A. H. Moore and P. A. Naylor},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Modelling source directivity in room impulse response simulation for spherical microphone arrays},\n  year = {2015},\n  pages = {574-578},\n  abstract = {In this work we present a new room impulse response simulation for spherical microphone arrays taking into account source directivity. We calculate the emission angle of the sound ray leaving the source based on the location of the image and the receiver using Allen and Berkley's image method. We provide an implementation of a room impulse response simulator for a spherical microphone array including a directional source with arbitrary directivity. We validate our implementation considering the zeroth and the first-order reflections. Our results show a worst-case directional gain error of 7% in comparison with theoretical predictions.},\n  keywords = {microphone arrays;transient response;room impulse response simulation;spherical microphone array;source directivity;emission angle calculation;sound ray;Berkley image method;Allen image method;arbitrary directivity;first-order reflection;zeroth reflection;directional gain error;Receivers;Azimuth;Microphone arrays;Europe;Signal processing;Standards;directivity;directional;simulation;spherical microphone arrays;room impulse response},\n  doi = {10.1109/EUSIPCO.2015.7362448},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104421.pdf},\n}\n\n
\n
\n\n\n
\n In this work we present a new room impulse response simulation for spherical microphone arrays taking into account source directivity. We calculate the emission angle of the sound ray leaving the source based on the location of the image and the receiver using Allen and Berkley's image method. We provide an implementation of a room impulse response simulator for a spherical microphone array including a directional source with arbitrary directivity. We validate our implementation considering the zeroth and the first-order reflections. Our results show a worst-case directional gain error of 7% in comparison with theoretical predictions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Perceptual loudness compensation in interactive object-based audio coding systems.\n \n \n \n \n\n\n \n Paulus, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 579-583, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"PerceptualPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362449,\n  author = {J. Paulus},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Perceptual loudness compensation in interactive object-based audio coding systems},\n  year = {2015},\n  pages = {579-583},\n  abstract = {Changing the rendering through interactivity in object-based audio coding may change the overall signal loudness. This paper proposes a method for estimating the change in the overall loudness using loudness information of the partial mixes and the rendering description. The method has been designed for a dialogue enhancement application scenario. The results of the method are compared with reference values from measurements, and the results match well with the mean absolute error of 0.11 LU. A subjective listening test is conducted for studying the amount of amplification applied by the test participants on a probe signal simulating the result of an interactive rendering when comparing it with a reference signal of the default mix. The average level adjustment reflects the change in the signal loudness through the modification.},\n  keywords = {audio coding;interactive systems;loudness;speech enhancement;average level adjustment;probe signal;subjective listening test;dialogue enhancement application scenario;rendering description;loudness information;overall signal loudness;object-based audio coding;Rendering (computer graphics);Gain;Decoding;Audio coding;Europe;Signal processing;audio;loudness;object-based coding;listening test},\n  doi = {10.1109/EUSIPCO.2015.7362449},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096825.pdf},\n}\n\n
\n
\n\n\n
\n Changing the rendering through interactivity in object-based audio coding may change the overall signal loudness. This paper proposes a method for estimating the change in the overall loudness using loudness information of the partial mixes and the rendering description. The method has been designed for a dialogue enhancement application scenario. The results of the method are compared with reference values from measurements, and the results match well with the mean absolute error of 0.11 LU. A subjective listening test is conducted for studying the amount of amplification applied by the test participants on a probe signal simulating the result of an interactive rendering when comparing it with a reference signal of the default mix. The average level adjustment reflects the change in the signal loudness through the modification.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Envelope modeling for speech and audio processing using distribution quantization.\n \n \n \n \n\n\n \n Jähnel, T.; Bäckström, T.; and Schubert, B.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 584-588, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"EnvelopePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362450,\n  author = {T. Jähnel and T. Bäckström and B. Schubert},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Envelope modeling for speech and audio processing using distribution quantization},\n  year = {2015},\n  pages = {584-588},\n  abstract = {Envelope models are common in speech and audio processing: for example, linear prediction is used for modeling the spectral envelope of speech, whereas audio coders use scale factor bands for perceptual masking models. In this work we introduce an envelope model called distribution quantizer (DQ), with the objective of combining the accuracy of linear prediction and the flexibility of scale factor bands. We evaluate the performance of envelope models with respect to their ability to reduce entropy as well as their correlation to the original signal magnitude. The experiments show that in terms of entropy, distribution quantization and linear prediction are comparable, whereas for correlation, distribution quantization is better. Furthermore the coefficients of distribution quantization are independent and thus more flexible and easier to quantize than linear predictive coefficients.},\n  keywords = {audio coding;hearing;quantisation (signal);speech intelligibility;speech processing;speech processing;audio processing;distribution quantization;envelope models;linear prediction;spectral envelope;audio coders;scale factor band;perceptual masking models;Predictive models;Entropy;Correlation;Quantization (signal);Speech;Speech coding;Frequency-domain analysis;Speech coding;linear predictive coding;signal modeling},\n  doi = {10.1109/EUSIPCO.2015.7362450},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096285.pdf},\n}\n\n
\n
\n\n\n
\n Envelope models are common in speech and audio processing: for example, linear prediction is used for modeling the spectral envelope of speech, whereas audio coders use scale factor bands for perceptual masking models. In this work we introduce an envelope model called distribution quantizer (DQ), with the objective of combining the accuracy of linear prediction and the flexibility of scale factor bands. We evaluate the performance of envelope models with respect to their ability to reduce entropy as well as their correlation to the original signal magnitude. The experiments show that in terms of entropy, distribution quantization and linear prediction are comparable, whereas for correlation, distribution quantization is better. Furthermore the coefficients of distribution quantization are independent and thus more flexible and easier to quantize than linear predictive coefficients.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A fast algorithm for maximum likelihood-based fundamental frequency estimation.\n \n \n \n \n\n\n \n Nielsen, J. K.; Jensen, T. L.; Jensen, J. R.; Christensen, M. G.; and Jensen, S. H.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 589-593, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362451,\n  author = {J. K. Nielsen and T. L. Jensen and J. R. Jensen and M. G. Christensen and S. H. Jensen},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A fast algorithm for maximum likelihood-based fundamental frequency estimation},\n  year = {2015},\n  pages = {589-593},\n  abstract = {Periodic signals are encountered in many applications. Such signals can be modelled by a weighted sum of sinusoidal components whose frequencies are integer multiples of a fundamental frequency. Given a data set, the fundamental frequency can be estimated in many ways including a maximum likelihood (ML) approach. Unfortunately, the ML estimator has a very high computational complexity, and the more inaccurate, but faster correlation-based estimators are therefore often used instead. In this paper, we propose a fast algorithm for the evaluation of the ML cost function for complex-valued data over all frequencies on a Fourier grid and up to a maximum model order. The proposed algorithm significantly reduces the computational complexity to a level not far from the complexity of the popular harmonic summation method which is an approximate ML estimator.},\n  keywords = {computational complexity;frequency estimation;maximum likelihood estimation;signal processing;maximum likelihood-based fundamental frequency estimation;periodic signal;ML estimator;computational complexity;correlation-based estimator;Fourier grid;maximum model order;harmonic summation method;Signal processing algorithms;Mathematical model;Cost function;Computational modeling;Frequency estimation;Complexity theory;Approximation algorithms;Fundamental frequency estimation;Levin-son algorithm;Durbin algorithm;non-linear least squares;fast implementation;MATLAB;C++},\n  doi = {10.1109/EUSIPCO.2015.7362451},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570101959.pdf},\n}\n\n
\n
\n\n\n
\n Periodic signals are encountered in many applications. Such signals can be modelled by a weighted sum of sinusoidal components whose frequencies are integer multiples of a fundamental frequency. Given a data set, the fundamental frequency can be estimated in many ways including a maximum likelihood (ML) approach. Unfortunately, the ML estimator has a very high computational complexity, and the more inaccurate, but faster correlation-based estimators are therefore often used instead. In this paper, we propose a fast algorithm for the evaluation of the ML cost function for complex-valued data over all frequencies on a Fourier grid and up to a maximum model order. The proposed algorithm significantly reduces the computational complexity to a level not far from the complexity of the popular harmonic summation method which is an approximate ML estimator.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Particle filtering with a soft detection based near-optimal importance function for visual tracking.\n \n \n \n \n\n\n \n Ameziane, M. O.; Garnier, C.; Delignon, Y.; Duflos, E.; and Septier, F.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 594-598, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ParticlePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362452,\n  author = {M. O. Ameziane and C. Garnier and Y. Delignon and E. Duflos and F. Septier},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Particle filtering with a soft detection based near-optimal importance function for visual tracking},\n  year = {2015},\n  pages = {594-598},\n  abstract = {Particle filters are currently widely used for visual tracking. In order to improve their performance, we propose to enrich the observation model with soft detection information and to derive a near-optimal proposal to efficiently propagate particles in the state space. This information reflecting probabilities about the object location is more reliable than the usual binary output which can yield false or missed detections. Moreover, our proposal not only incorporates the observations as in previous works, but relies on a close approximation of the optimal importance function. The resulting PF achieves high tracking accuracy and has the advantage of coping with unpredictable and abrupt movements.},\n  keywords = {computer vision;particle filtering (numerical methods);particle filtering;soft detection based near-optimal importance function;visual tracking;observation model;soft detection information;near-optimal proposal;information reflecting probabilities;optimal importance function;Proposals;Visualization;Band-pass filters;Image color analysis;Histograms;Legged locomotion;Europe;Visual tracking;Monte-Carlo methods;particle filtering;optimal importance function;soft detection},\n  doi = {10.1109/EUSIPCO.2015.7362452},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104599.pdf},\n}\n\n
\n
\n\n\n
\n Particle filters are currently widely used for visual tracking. In order to improve their performance, we propose to enrich the observation model with soft detection information and to derive a near-optimal proposal to efficiently propagate particles in the state space. This information reflecting probabilities about the object location is more reliable than the usual binary output which can yield false or missed detections. Moreover, our proposal not only incorporates the observations as in previous works, but relies on a close approximation of the optimal importance function. The resulting PF achieves high tracking accuracy and has the advantage of coping with unpredictable and abrupt movements.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fast millimeter wave threat detection algorithm.\n \n \n \n \n\n\n \n Maqueda, I. G.; de la Blanca , N. P.; Molina, R.; and Katsaggelos, A. K.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 599-603, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"FastPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362453,\n  author = {I. G. Maqueda and N. P. {de la Blanca} and R. Molina and A. K. Katsaggelos},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Fast millimeter wave threat detection algorithm},\n  year = {2015},\n  pages = {599-603},\n  abstract = {Millimeter Wave (MMW) imaging systems are currently being used to detect hidden threats. Unfortunately the current performance of detection algorithms is very poor due to the presence of severe noise, the low resolution of MMW images and, in general, the poor quality of the acquired images. In this paper we present a new real time MMW threat detection algorithm based on a tailored de-noising, body and threat segmentation, and threat detection process that outperforms currently existing detection procedures. A complete comparison with a state of art threat detection algorithm is presented in the experimental section.},\n  keywords = {image denoising;image segmentation;millimetre wave detectors;millimetre wave imaging;millimeter wave threat detection algorithm;millimeter wave imaging systems;denoising;threat segmentation;Signal processing algorithms;Image segmentation;Standards;Detection algorithms;Classification algorithms;Algorithm design and analysis;Europe;Millimeter wave imaging;image processing;Security},\n  doi = {10.1109/EUSIPCO.2015.7362453},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103163.pdf},\n}\n\n
\n
\n\n\n
\n Millimeter Wave (MMW) imaging systems are currently being used to detect hidden threats. Unfortunately the current performance of detection algorithms is very poor due to the presence of severe noise, the low resolution of MMW images and, in general, the poor quality of the acquired images. In this paper we present a new real time MMW threat detection algorithm based on a tailored de-noising, body and threat segmentation, and threat detection process that outperforms currently existing detection procedures. A complete comparison with a state of art threat detection algorithm is presented in the experimental section.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Generalized Ricci curvature based sampling and reconstruction of images.\n \n \n \n \n\n\n \n Lin, A. S.; Luo, B. Z.; Zhang, C. J.; and Saucan, D. E.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 604-608, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"GeneralizedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362454,\n  author = {A. S. Lin and B. Z. Luo and C. J. Zhang and D. E. Saucan},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Generalized Ricci curvature based sampling and reconstruction of images},\n  year = {2015},\n  pages = {604-608},\n  abstract = {We introduce a novel method of image sampling based on viewing grayscale images as manifolds with density, and sampling them according to the generalized Ricci curvature introduced by Bakry, Emery and Ledoux. A variation of this approach, due to Morgan and his students is also considered. This new paradigm generalizes ideas and results that are by now common in Imaging and Graphics. We apply the new algorithm to natural and range images, as well as cartoons and show that the proposed method produces results similar to those obtained by employing more standard approaches. Furthermore, we show that our approach extends naturally to other types of images, in particular to MRI and CT, where its potential applications are maximal, as well as to meshes.},\n  keywords = {differential geometry;image reconstruction;image sampling;Ricci curvature;image reconstruction;image sampling;grayscale images;Manifolds;Image reconstruction;Gray-scale;Laplace equations;Imaging;Surface reconstruction;Image coding;Weighted manifolds;generalized Ricci curvature;image sampling and reconstruction},\n  doi = {10.1109/EUSIPCO.2015.7362454},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103763.pdf},\n}\n\n
\n
\n\n\n
\n We introduce a novel method of image sampling based on viewing grayscale images as manifolds with density, and sampling them according to the generalized Ricci curvature introduced by Bakry, Emery and Ledoux. A variation of this approach, due to Morgan and his students is also considered. This new paradigm generalizes ideas and results that are by now common in Imaging and Graphics. We apply the new algorithm to natural and range images, as well as cartoons and show that the proposed method produces results similar to those obtained by employing more standard approaches. Furthermore, we show that our approach extends naturally to other types of images, in particular to MRI and CT, where its potential applications are maximal, as well as to meshes.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Scene-aware high dynamic range imaging.\n \n \n \n \n\n\n \n Chen, W.; Lee, C.; and Chiang, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 609-613, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Scene-awarePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362455,\n  author = {W. Chen and C. Lee and J. Chiang},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Scene-aware high dynamic range imaging},\n  year = {2015},\n  pages = {609-613},\n  abstract = {High dynamic range (HDR) images offer better visual quality by allowing a wider range of luminance and the visual experience is much closer to reality. In this paper, a scene-aware HDR image system is proposed. According to the characteristics of the scene to be captured, the proposed system will first decide whether it is necessary to activate the HDR imaging mode. If the dynamic range of the scene is limited, no HDR imaging is performed. On the other hand, two or three LDR (low dynamic range) images with different exposures are captured to render high-quality HDR images in an efficient way. Experimental results show that the proposed HDR image system is capable of generating HDR images using either two or three LDR images and comparable to some existing techniques where three LDR images are always used.},\n  keywords = {image processing;scene-aware high dynamic range imaging;high dynamic range images;HDR image system;HDR imaging mode;low dynamic range;Histograms;Imaging;Dynamic range;Visualization;Europe;Signal processing;Image recognition;High dynamic range image;scene recognition},\n  doi = {10.1109/EUSIPCO.2015.7362455},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103301.pdf},\n}\n\n
\n
\n\n\n
\n High dynamic range (HDR) images offer better visual quality by allowing a wider range of luminance and the visual experience is much closer to reality. In this paper, a scene-aware HDR image system is proposed. According to the characteristics of the scene to be captured, the proposed system will first decide whether it is necessary to activate the HDR imaging mode. If the dynamic range of the scene is limited, no HDR imaging is performed. On the other hand, two or three LDR (low dynamic range) images with different exposures are captured to render high-quality HDR images in an efficient way. Experimental results show that the proposed HDR image system is capable of generating HDR images using either two or three LDR images and comparable to some existing techniques where three LDR images are always used.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Benchmarking superpixel descriptors.\n \n \n \n \n\n\n \n Neubert, P.; and Prötzel, P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 614-618, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"BenchmarkingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362456,\n  author = {P. Neubert and P. Prötzel},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Benchmarking superpixel descriptors},\n  year = {2015},\n  pages = {614-618},\n  abstract = {Superpixels are useful intermediate representations for many computer vision tasks. While the segmentation step is well studied, the subsequent creation of meaningful descriptors lacks this foundation. Superpixels have similar properties like affine covariant regions (keypoints), but there are fundamental differences that led to a different set of commonly used descriptors. In this paper we work towards general insights on requirements and properties of superpixel descriptors as well as a framework for experimental comparison. More precisely, we want to answer the question: Given superpixels from different images, what can superpixel descriptors tell about the ground truth overlap of the segments in the world? We propose and discuss an evaluation methodology based on image sequences with ground truth optical flow. Further, we present results of several types of superpixel descriptors and discuss the influence of the used segmentation algorithm as well as the problem of visual ambiguity in oversegmentations.},\n  keywords = {computer vision;image resolution;image segmentation;superpixel descriptor benchmarking;computer vision tasks;affine covariant regions;keypoints;evaluation methodology;segmentation algorithm;visual ambiguity problem;oversegmentations;Image segmentation;Optical imaging;Signal processing algorithms;Visualization;Image color analysis;Europe;Signal processing;superpixels;descriptors;benchmark},\n  doi = {10.1109/EUSIPCO.2015.7362456},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105525.pdf},\n}\n\n
\n
\n\n\n
\n Superpixels are useful intermediate representations for many computer vision tasks. While the segmentation step is well studied, the subsequent creation of meaningful descriptors lacks this foundation. Superpixels have similar properties like affine covariant regions (keypoints), but there are fundamental differences that led to a different set of commonly used descriptors. In this paper we work towards general insights on requirements and properties of superpixel descriptors as well as a framework for experimental comparison. More precisely, we want to answer the question: Given superpixels from different images, what can superpixel descriptors tell about the ground truth overlap of the segments in the world? We propose and discuss an evaluation methodology based on image sequences with ground truth optical flow. Further, we present results of several types of superpixel descriptors and discuss the influence of the used segmentation algorithm as well as the problem of visual ambiguity in oversegmentations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Particle volume reconstruction based on a marked point process and application to TOMO-PIV.\n \n \n \n \n\n\n \n Ben Salah, R.; Alata, O.; Tremblais, B.; Thomas, L.; and David, L.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 619-623, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ParticlePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362457,\n  author = {R. {Ben Salah} and O. Alata and B. Tremblais and L. Thomas and L. David},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Particle volume reconstruction based on a marked point process and application to TOMO-PIV},\n  year = {2015},\n  pages = {619-623},\n  abstract = {In this paper, we propose a new tomographic reconstruction method, called IOD-PVRMPP, to reconstruct 3D particle volumes from 2D particle images provided by the Tomographic Particle Image Ve-locimetry (Tomo-PIV) technique. Our method, based on marked point processes (or object processes), allows to solve the problem in a parsimonious way. It facilitates the introduction of prior knowledge and solves memory problem which is inherent to voxel based approaches used by classical tomographic reconstruction methods. The reconstruction of a 3D particle set is obtained by minimizing an energy function which defines the marked point process. To this aim, we use a simulated annealing algorithm based on Reversible Jump Markov Chain Monte Carlo (RJMCMC) method. To speed up the convergence of the simulated annealing, we develop an initialization method which provides the initial distribution of 3D particles. To do that, we proceed by detecting 2D particles located in projection images. Using synthetic data, we show that IOD-PVRMPP method gives better results than MinLOS-MART method for different seeding densities.},\n  keywords = {image reconstruction;Markov processes;Monte Carlo methods;simulated annealing;tomography;initialization methodparticle volume reconstruction;reversible jump Markov chain Monte Carlo method;simulated annealing algorithm;marked point processes;tomographic particle image velocimetry technique;2D particle images;3D particle volumes;tomographic reconstruction method;TOMO-PIV;Image reconstruction;Three-dimensional displays;Reconstruction algorithms;Simulated annealing;Europe;Signal processing;Marked Point Processes or Object Processes;Tomography Reconstruction;Simulated Annealing;RJMCMC;Tomo-PIV},\n  doi = {10.1109/EUSIPCO.2015.7362457},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104905.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a new tomographic reconstruction method, called IOD-PVRMPP, to reconstruct 3D particle volumes from 2D particle images provided by the Tomographic Particle Image Ve-locimetry (Tomo-PIV) technique. Our method, based on marked point processes (or object processes), allows to solve the problem in a parsimonious way. It facilitates the introduction of prior knowledge and solves memory problem which is inherent to voxel based approaches used by classical tomographic reconstruction methods. The reconstruction of a 3D particle set is obtained by minimizing an energy function which defines the marked point process. To this aim, we use a simulated annealing algorithm based on Reversible Jump Markov Chain Monte Carlo (RJMCMC) method. To speed up the convergence of the simulated annealing, we develop an initialization method which provides the initial distribution of 3D particles. To do that, we proceed by detecting 2D particles located in projection images. Using synthetic data, we show that IOD-PVRMPP method gives better results than MinLOS-MART method for different seeding densities.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n EEG signal classification in non-linear framework with filtered training data.\n \n \n \n \n\n\n \n Gopan, K. G.; Sinha, N.; and Babu, J. D.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 624-628, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"EEGPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362458,\n  author = {K. G. Gopan and N. Sinha and J. D. Babu},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {EEG signal classification in non-linear framework with filtered training data},\n  year = {2015},\n  pages = {624-628},\n  abstract = {Electroencephalographic (EEG) signals are produced in brain due to firing of the neurons. Any anomaly found in the EEG indicates abnormality associated with brain functioning. The efficacy of automated analysis of EEG depends on features chosen to represent the time series, classifier used and quality of training data. In this work, we present automated analysis of EEG time series acquired from two different groups. Non-linear features have been used here to capture the characteristics of EEG in each case since it portrays the non-linear dependencies of different parameters associated with EEG. In the first case, we present the classification between alcoholics and controls. In the second case, we present classification between epileptic and controls. In the classification, we have addressed the issue of quality of training data. In the proposed scheme prior to classification, we filter the training data. This approach led to minimum 10% improvement in the classification accuracy.},\n  keywords = {electroencephalography;filtering theory;medical signal processing;signal classification;time series;EEG signal classification;filtered training data;electroencephalographic signals;brain;EEG time series analysis;alcoholics;epileptic;Electroencephalography;Time series analysis;Training data;Training;Entropy;Correlation;Support vector machines;EEG;Non-Linear Analysis;k-Means Clustering;Support Vector Machine;Fuzzy k-NN},\n  doi = {10.1109/EUSIPCO.2015.7362458},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096699.pdf},\n}\n\n
\n
\n\n\n
\n Electroencephalographic (EEG) signals are produced in brain due to firing of the neurons. Any anomaly found in the EEG indicates abnormality associated with brain functioning. The efficacy of automated analysis of EEG depends on features chosen to represent the time series, classifier used and quality of training data. In this work, we present automated analysis of EEG time series acquired from two different groups. Non-linear features have been used here to capture the characteristics of EEG in each case since it portrays the non-linear dependencies of different parameters associated with EEG. In the first case, we present the classification between alcoholics and controls. In the second case, we present classification between epileptic and controls. In the classification, we have addressed the issue of quality of training data. In the proposed scheme prior to classification, we filter the training data. This approach led to minimum 10% improvement in the classification accuracy.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Post-reconstruction deconvolution of PET images by total generalized variation regularization.\n \n \n \n \n\n\n \n Guérit, S.; Jacques, L.; Macq, B.; and Lee, J. A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 629-633, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Post-reconstructionPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362459,\n  author = {S. Guérit and L. Jacques and B. Macq and J. A. Lee},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Post-reconstruction deconvolution of PET images by total generalized variation regularization},\n  year = {2015},\n  pages = {629-633},\n  abstract = {Improving the quality of positron emission tomography (PET) images, affected by low resolution and high level of noise, is a challenging task in nuclear medicine and radiotherapy. This work proposes a restoration method, achieved after tomographic reconstruction of the images and targeting clinical situations where raw data are often not accessible. Based on inverse problem methods, our contribution introduces the recently developed total generalized variation (TGV) norm to regularize PET image deconvolution. Moreover, we stabilize this procedure with additional image constraints such as positivity and photometry invariance. A criterion for updating and adjusting automatically the regularization parameter in case of Poisson noise is also presented. Experiments are conducted on both synthetic data and real patient images.},\n  keywords = {deconvolution;image restoration;inverse problems;medical image processing;photometry;positron emission tomography;radioisotope imaging;stochastic processes;patient images;synthetic data;Poisson noise;photometry invariance;image constraints;inverse problem methods;tomographic reconstruction;restoration method;radiotherapy;nuclear medicine;PET image deconvolution;positron emission tomography;TGV;total generalized variation regularization;post-reconstruction deconvolution;Positron emission tomography;Deconvolution;TV;Photometry;Photonics;PET imaging;total generalized variation;deconvolution;Poisson noise;inverse problem},\n  doi = {10.1109/EUSIPCO.2015.7362459},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105079.pdf},\n}\n\n
\n
\n\n\n
\n Improving the quality of positron emission tomography (PET) images, affected by low resolution and high level of noise, is a challenging task in nuclear medicine and radiotherapy. This work proposes a restoration method, achieved after tomographic reconstruction of the images and targeting clinical situations where raw data are often not accessible. Based on inverse problem methods, our contribution introduces the recently developed total generalized variation (TGV) norm to regularize PET image deconvolution. Moreover, we stabilize this procedure with additional image constraints such as positivity and photometry invariance. A criterion for updating and adjusting automatically the regularization parameter in case of Poisson noise is also presented. Experiments are conducted on both synthetic data and real patient images.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Abnormal-respiration detection by considering correlation of observation of adventitious sounds.\n \n \n \n \n\n\n \n Matsutake, S.; Yamashita, M.; and Matsunaga, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 634-638, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Abnormal-respirationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362460,\n  author = {S. Matsutake and M. Yamashita and S. Matsunaga},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Abnormal-respiration detection by considering correlation of observation of adventitious sounds},\n  year = {2015},\n  pages = {634-638},\n  abstract = {We propose a classification method to distinguish between normal and abnormal respiration by considering the correlation of the observation frequencies of adventitious sounds between auscultation points. This method is based on the fact that adventitious sounds are frequently observed in lung sounds from multiple points. We use the product of the correlation score and the abnormality score, which indicates the likelihood that a candidate is abnormal, of lung sounds from different points. When using lung sounds from eight points, the proposed method achieved a higher classification performance of 92.0% between normal and abnormal respiration compared with the baseline method not considering the other lung sounds, which achieved a performance of 84.1%. Our approach to the classification of healthy subjects and patients also achieved a higher classification rate of 90.8%.},\n  keywords = {medical signal detection;signal classification;abnormal-respiration detection;classification method;adventitious sounds;observation frequencies;auscultation points;lung sounds;Lungs;Correlation;Acoustics;Hidden Markov models;Mathematical model;Europe;Signal processing;lung sound;classification;adventitious sound;auscultation;pulmonary emphysema},\n  doi = {10.1109/EUSIPCO.2015.7362460},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103287.pdf},\n}\n\n
\n
\n\n\n
\n We propose a classification method to distinguish between normal and abnormal respiration by considering the correlation of the observation frequencies of adventitious sounds between auscultation points. This method is based on the fact that adventitious sounds are frequently observed in lung sounds from multiple points. We use the product of the correlation score and the abnormality score, which indicates the likelihood that a candidate is abnormal, of lung sounds from different points. When using lung sounds from eight points, the proposed method achieved a higher classification performance of 92.0% between normal and abnormal respiration compared with the baseline method not considering the other lung sounds, which achieved a performance of 84.1%. Our approach to the classification of healthy subjects and patients also achieved a higher classification rate of 90.8%.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Classification of normal and pathological infant cries using bispectrum features.\n \n \n \n \n\n\n \n Chittora, A.; and Patil, H. A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 639-643, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ClassificationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362461,\n  author = {A. Chittora and H. A. Patil},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Classification of normal and pathological infant cries using bispectrum features},\n  year = {2015},\n  pages = {639-643},\n  abstract = {In this paper, bispectrum-based feature extraction method is proposed for classification of normal vs. pathological infant cries. Bispectrum is computed for all segments of normal as well as pathological cries. Bispectrum is a two-dimensional (2-D) feature. A tensor is formed using these bispectrum features and then for feature reduction, higher order singular value decomposition theorem (HOSVD) is applied. Our experimental results show 70.56 % average accuracy of classification with support vector machine (SVM) classifier, whereas baseline features, viz., MFCC, LPC and PLP gave classification accuracy of 52.41 %, 61.27 % and 57.41 %, respectively. For showing the effectiveness of the proposed feature extraction method, a comparison with other feature extraction methods which uses diagonal slice and peaks and their locations as feature vectors is given as well.},\n  keywords = {feature extraction;signal classification;singular value decomposition;support vector machines;tensors;vectors;PLP;LPC;MFCC;SVM classifier;support vector machine classifier;HOSVD;higher order singular value decomposition theorem;feature reduction;tensor;normal infant cry classification;bispectrum-based feature extraction method;pathological infant cry classification;Pathology;Feature extraction;Tensile stress;Speech;Signal processing;Mel frequency cepstral coefficient;Pain;Higher order signal processing cumulant;bispectrum;higher order singular value decomposition theorem},\n  doi = {10.1109/EUSIPCO.2015.7362461},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104157.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, bispectrum-based feature extraction method is proposed for classification of normal vs. pathological infant cries. Bispectrum is computed for all segments of normal as well as pathological cries. Bispectrum is a two-dimensional (2-D) feature. A tensor is formed using these bispectrum features and then for feature reduction, higher order singular value decomposition theorem (HOSVD) is applied. Our experimental results show 70.56 % average accuracy of classification with support vector machine (SVM) classifier, whereas baseline features, viz., MFCC, LPC and PLP gave classification accuracy of 52.41 %, 61.27 % and 57.41 %, respectively. For showing the effectiveness of the proposed feature extraction method, a comparison with other feature extraction methods which uses diagonal slice and peaks and their locations as feature vectors is given as well.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Elbow flexion and extension identification using surface electromyography signals.\n \n \n \n \n\n\n \n Rubiano, A.; Ramirez, J. L.; El Korso, M. N.; Gallimard, L.; Jouandeau, N.; and Polit, O.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 644-648, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ElbowPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362462,\n  author = {A. Rubiano and J. L. Ramirez and M. N. {El Korso} and L. Gallimard and N. Jouandeau and O. Polit},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Elbow flexion and extension identification using surface electromyography signals},\n  year = {2015},\n  pages = {644-648},\n  abstract = {In this paper, a new approach is presented for the analysis and the identification of the surface electromyography (EMG) signals of biceps and triceps muscles. The objective of this study is the accurate classification of elbow flexion and extension movements. We propose a cropping method based on the agreement of the movement changes and the EMG signal using the upper limb kinematic. Then, we perform the extraction and selection of several well known features in time and frequency domain. The selected features are used as inputs for our support vector machine classifier that is designed using an optimal weight vector criterion. Afterward, the training and test steps are performed in the proposed scheme. Finally, numerical simulation assesses the accuracy of the classification, as well as the robustness of the proposed approach considering noisy measurements.},\n  keywords = {electromyography;medical signal processing;signal classification;support vector machines;elbow flexion;extension identification;surface electromyography signals;EMG signal;triceps muscle;biceps muscle;upper limb kinematic;support vector machine classifier;optimal weight vector criterion;feature extraction;Electromyography;Feature extraction;Elbow;Support vector machines;Kinematics;Quaternions;Software;Electromyography;feature extraction;upper limb kinematic;support vector machine;quaternions},\n  doi = {10.1109/EUSIPCO.2015.7362462},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103795.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, a new approach is presented for the analysis and the identification of the surface electromyography (EMG) signals of biceps and triceps muscles. The objective of this study is the accurate classification of elbow flexion and extension movements. We propose a cropping method based on the agreement of the movement changes and the EMG signal using the upper limb kinematic. Then, we perform the extraction and selection of several well known features in time and frequency domain. The selected features are used as inputs for our support vector machine classifier that is designed using an optimal weight vector criterion. Afterward, the training and test steps are performed in the proposed scheme. Finally, numerical simulation assesses the accuracy of the classification, as well as the robustness of the proposed approach considering noisy measurements.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A novel feature selection in the case of brain PET image classification.\n \n \n \n \n\n\n \n Garali, I.; Adel, M.; Bourennane, S.; and Guedj, E.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 649-653, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362463,\n  author = {I. Garali and M. Adel and S. Bourennane and E. Guedj},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A novel feature selection in the case of brain PET image classification},\n  year = {2015},\n  pages = {649-653},\n  abstract = {Positron Emission Tomography (PET) imaging is of importance for diagnosing neurodegenerative diseases like Alzheimer Disease (AD). Computer aided diagnosis methods could process and analyze quantitatively these images, in order to better characterize and extract meaningful information for medical diagnosis. This paper presents a novel computer-aided diagnosis technique for brain PET images classification in the case of AD. Brain images are first segmented into Regions Of Interest (ROI) using an atlas. Computing some statistical parameters on these regions, we define a Separation Power Factor (SPF) associated to each region. This factor quantifies the ability of each region to separate AD from Healthy Control (HC) brain images. Ranking selected regions according to their SPF and inputting them to a Support Vector Machine (SVM) classifier, yields better classification accuracy rate than when inputting the same number of ranked regions extracted from four others classical feature selection methods.},\n  keywords = {brain;diseases;feature selection;image classification;medical image processing;neurophysiology;positron emission tomography;statistical analysis;support vector machines;brain PET image classification;positron emission tomography;neurodegenerative disease diagnosis;Alzheimer disease;computer-aided diagnosis technique;brain PET image segmentation;statistical parameters;separation power factor;support vector machine classifier;feature selection methods;Support vector machines;Positron emission tomography;Diseases;Brain;Vegetation;Databases;Europe;Computer-Aided diagnosis (CAD);Support Vector Machine (SVM);Voxel-Based Analysis (VBA);Classification;Receiver Operating Characteristic (ROC);Alzheimer's Disease (AD);Feature Selection},\n  doi = {10.1109/EUSIPCO.2015.7362463},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570097199.pdf},\n}\n\n
\n
\n\n\n
\n Positron Emission Tomography (PET) imaging is of importance for diagnosing neurodegenerative diseases like Alzheimer Disease (AD). Computer aided diagnosis methods could process and analyze quantitatively these images, in order to better characterize and extract meaningful information for medical diagnosis. This paper presents a novel computer-aided diagnosis technique for brain PET images classification in the case of AD. Brain images are first segmented into Regions Of Interest (ROI) using an atlas. Computing some statistical parameters on these regions, we define a Separation Power Factor (SPF) associated to each region. This factor quantifies the ability of each region to separate AD from Healthy Control (HC) brain images. Ranking selected regions according to their SPF and inputting them to a Support Vector Machine (SVM) classifier, yields better classification accuracy rate than when inputting the same number of ranked regions extracted from four others classical feature selection methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A novel method with a deep network and directional edges for automatic detection of a fetal head.\n \n \n \n \n\n\n \n Nie, S.; Yu, J.; Chen, P.; Zhang, J.; and Wang, Y.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 654-658, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362464,\n  author = {S. Nie and J. Yu and P. Chen and J. Zhang and Y. Wang},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A novel method with a deep network and directional edges for automatic detection of a fetal head},\n  year = {2015},\n  pages = {654-658},\n  abstract = {In this paper, we propose a novel method for the automatic detection of fetal head in 2D ultrasound images. Fetal head detection has been a challenging task, as the ultrasound images usually have poor quality, the structures contained in the images are complex, and the gray scale distribution is highly variable. Our approach is based on a deep belief network and a modified circle detection method. The whole process can be divided into two steps: first, a deep learning architecture is applied to search the whole image and determine the result patch that contains the entire fetal head; second, a modified circle detection method is used along with Hough transform to detect the position and size of the fetal head. In order to validate our method, experiments are performed on both synthetic data and clinic ultrasound data. A good performance of the proposed method is shown in the paper.},\n  keywords = {belief networks;Hough transforms;medical signal detection;ultrasonic imaging;deep network;directional edges;automatic detection;2D ultrasound images;fetal head detection;gray scale distribution;deep belief network;modified circle detection;deep learning architecture;Hough transform;synthetic data;clinic ultrasound data;Head;Image edge detection;Ultrasonic imaging;Magnetic heads;Training;Europe;Signal processing;Fetal head;deep learning;circle detection},\n  doi = {10.1109/EUSIPCO.2015.7362464},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096689.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a novel method for the automatic detection of fetal head in 2D ultrasound images. Fetal head detection has been a challenging task, as the ultrasound images usually have poor quality, the structures contained in the images are complex, and the gray scale distribution is highly variable. Our approach is based on a deep belief network and a modified circle detection method. The whole process can be divided into two steps: first, a deep learning architecture is applied to search the whole image and determine the result patch that contains the entire fetal head; second, a modified circle detection method is used along with Hough transform to detect the position and size of the fetal head. In order to validate our method, experiments are performed on both synthetic data and clinic ultrasound data. A good performance of the proposed method is shown in the paper.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automatic lesion segmentation for melanoma diagnostics in macroscopic images.\n \n \n \n \n\n\n \n Pirnog, I.; Preda, R. O.; Oprea, C.; and Paleologu, C.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 659-663, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AutomaticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362465,\n  author = {I. Pirnog and R. O. Preda and C. Oprea and C. Paleologu},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Automatic lesion segmentation for melanoma diagnostics in macroscopic images},\n  year = {2015},\n  pages = {659-663},\n  abstract = {Detailed segmentation of pigmented skin lesions is an important requirement in computer aided applications for melanoma assessment. In particular, accurate segmentation is necessary for image-guided evaluation of skin lesions characteristics. In this paper, we present a new approach of histogram thresholding for detailed segmentation of skin lesions based on histogram analysis of the saturation color component in the hue-saturation-value (HSV) color space. The proposed technique is specifically developed with the aim to handle the complex variability of features for macroscopic color images taken in uncontrolled environment. A dataset of 30 cases with manual segmentation was used for evaluation. We compare our results with two of most important existing segmentation techniques. For similarity report between automatic and manual segmentation we used dice similarity coefficient (DSC), the true detection rate (TDR), and the false positive rate (FPR). Experimental results show that the proposed method has high precision and low computational complexity.},\n  keywords = {feature extraction;image colour analysis;image recognition;image segmentation;medical image processing;pigments;lesion segmentation;melanoma diagnostics;macroscopic images;pigmented skin lesions;computer aided applications;melanoma assessment;histogram analysis;saturation color component;hue-saturation-value color space;HSV color space;macroscopic color images;dice similarity coefficient;DSC;true detection rate;TDR;false positive rate;FPR;Image segmentation;Lesions;Skin;Malignant tumors;Gray-scale;Histograms;Image color analysis;lesion;segmentation;saturation;skin cancer},\n  doi = {10.1109/EUSIPCO.2015.7362465},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104367.pdf},\n}\n\n
\n
\n\n\n
\n Detailed segmentation of pigmented skin lesions is an important requirement in computer aided applications for melanoma assessment. In particular, accurate segmentation is necessary for image-guided evaluation of skin lesions characteristics. In this paper, we present a new approach of histogram thresholding for detailed segmentation of skin lesions based on histogram analysis of the saturation color component in the hue-saturation-value (HSV) color space. The proposed technique is specifically developed with the aim to handle the complex variability of features for macroscopic color images taken in uncontrolled environment. A dataset of 30 cases with manual segmentation was used for evaluation. We compare our results with two of most important existing segmentation techniques. For similarity report between automatic and manual segmentation we used dice similarity coefficient (DSC), the true detection rate (TDR), and the false positive rate (FPR). Experimental results show that the proposed method has high precision and low computational complexity.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Modeling a class of multi-port nonlinearities in wave digital structures.\n \n \n \n \n\n\n \n Bernardini, A.; Werner, K. J.; Sarti, A.; and Smith, J. O.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 664-668, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ModelingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362466,\n  author = {A. Bernardini and K. J. Werner and A. Sarti and J. O. Smith},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Modeling a class of multi-port nonlinearities in wave digital structures},\n  year = {2015},\n  pages = {664-668},\n  abstract = {Wave Digital Structures (WDS) are particularly interesting for applications of interactive modeling of nonlinear (NL) elements in the context of Virtual Analog modeling. NL circuits, however, often include multiple nonlinearities or multi-port nonlinearities, which cannot readily be accommodated by traditional WDS. In this work we present a novel method for modeling in the WD domain a class of multi-port NL elements that are obtained as the interconnection of linear and NL resistive bipoles. Our technique is based on a Piece-Wise Linear approximation of the individual bipoles that constitute the multi-port element. The method generalizes the existing solutions that are available in the literature as it enables the modeling of arbitrary interconnections between outer ports of the nonlinearity and individual ports of the local NL bipoles.},\n  keywords = {approximation theory;piecewise linear techniques;wave digital filters;local NL bipoles;arbitrary interconnections modeling;piecewise linear approximation;NL resistive bipoles;linear bipoles;multi-port NL elements;WD domain;virtual analog modeling;nonlinear elements;WDS;wave digital structures;Ports (Computers);Resistance;Mathematical model;Signal processing;Matrices;Europe;Context modeling;Circuit simulation;physical modeling sound synthesis;non linear signal processing;wave digital filters},\n  doi = {10.1109/EUSIPCO.2015.7362466},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104375.pdf},\n}\n\n
\n
\n\n\n
\n Wave Digital Structures (WDS) are particularly interesting for applications of interactive modeling of nonlinear (NL) elements in the context of Virtual Analog modeling. NL circuits, however, often include multiple nonlinearities or multi-port nonlinearities, which cannot readily be accommodated by traditional WDS. In this work we present a novel method for modeling in the WD domain a class of multi-port NL elements that are obtained as the interconnection of linear and NL resistive bipoles. Our technique is based on a Piece-Wise Linear approximation of the individual bipoles that constitute the multi-port element. The method generalizes the existing solutions that are available in the literature as it enables the modeling of arbitrary interconnections between outer ports of the nonlinearity and individual ports of the local NL bipoles.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Nonlinear filtered-X second-order adaptive volterra filters for listening-room compensation.\n \n \n \n \n\n\n \n Fuster, L.; de Diego , M.; Ferrer, M.; Gonzalez, A.; and Pinero, G.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 669-673, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"NonlinearPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362467,\n  author = {L. Fuster and M. {de Diego} and M. Ferrer and A. Gonzalez and G. Pinero},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Nonlinear filtered-X second-order adaptive volterra filters for listening-room compensation},\n  year = {2015},\n  pages = {669-673},\n  abstract = {The presence of nonlinearities as well as reverberation effects severely degrades the audio quality in sound reproduction systems. In this context, many adaptive strategies have been developed to compensate for room effects. However, when nonlinear distortion becomes significant, room equalization requires the introduction of suitable solutions to tackle this problem. Linearization of loudspeakers has been deeply investigated but its combination with room equalization systems may not be so straightforward, mainly when the nonlinearities present memory. In this paper, the nonlinear system has been modeled as a Volterra filter that represents the loudspeaker tandemly connected to a linear filter that corresponds to the electroacoustic path including the enclosure and the microphone setup. Based on this structure, we introduce a nonlinear filtered-x second-order adaptive Volterra filter that uses the virtual path concept to preprocess the audio signals. Simulation results validate the performance of the new approach.},\n  keywords = {adaptive filters;audio signal processing;loudspeakers;nonlinear distortion;nonlinear filters;nonlinear filtered-X second-order adaptive Volterra filters;listening-room compensation;reverberation effects;audio quality;sound reproduction systems;adaptive strategy;nonlinear distortion;room equalization;loudspeaker linearization;room equalization systems;linear filter;electroacoustic path;microphone setup;virtual path concept;audio signal preprocessing;Signal processing algorithms;Nonlinear distortion;Loudspeakers;Kernel;Europe;Adaptive systems;Nonlinear systems;Adaptive equalization;Volterra filters;nonlinear distortions;Virtual channel},\n  doi = {10.1109/EUSIPCO.2015.7362467},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104917.pdf},\n}\n\n
\n
\n\n\n
\n The presence of nonlinearities as well as reverberation effects severely degrades the audio quality in sound reproduction systems. In this context, many adaptive strategies have been developed to compensate for room effects. However, when nonlinear distortion becomes significant, room equalization requires the introduction of suitable solutions to tackle this problem. Linearization of loudspeakers has been deeply investigated but its combination with room equalization systems may not be so straightforward, mainly when the nonlinearities present memory. In this paper, the nonlinear system has been modeled as a Volterra filter that represents the loudspeaker tandemly connected to a linear filter that corresponds to the electroacoustic path including the enclosure and the microphone setup. Based on this structure, we introduce a nonlinear filtered-x second-order adaptive Volterra filter that uses the virtual path concept to preprocess the audio signals. Simulation results validate the performance of the new approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Equalization of nonlinear systems modeled using the burgers equation.\n \n \n \n \n\n\n \n Payal, S. S.; and Mathews, V. J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 674-678, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"EqualizationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362468,\n  author = {S. S. Payal and V. J. Mathews},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Equalization of nonlinear systems modeled using the burgers equation},\n  year = {2015},\n  pages = {674-678},\n  abstract = {This paper describes a model-based corrector for distortions due to propagation of acoustic waveforms in air in a cylindrical waveguide at high sound pressure levels. The nonlinear distortions are modeled using the Burgers' wave propagation model, accounting for dissipation and boundary layer dispersion effects. The corrector was designed to mitigate these distortions in signals obtained from predefined distances in the waveguide. This compensator is derived from the Burgers' model and is independent of the stimulus used. Results demonstrating a substantial reduction in the intermodulation distortion and harmonic distortion in a specific frequency band of interest over a multitude of test input stimuli are included in this paper.},\n  keywords = {acoustic distortion;circular waveguides;harmonic distortion;intermodulation distortion;nonlinear systems;acoustic waveforms;nonlinear distortions;cylindrical waveguide;high sound pressure levels;substantial reduction;intermodulation distortion;harmonic distortion;Mathematical model;Acoustic distortion;Propagation;Distortion measurement;Computational modeling;Acoustics;nonlinear acoustics;nonlinear distortion;inverse problems;backpropagation algorithms;nonlinear systems},\n  doi = {10.1109/EUSIPCO.2015.7362468},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104997.pdf},\n}\n\n
\n
\n\n\n
\n This paper describes a model-based corrector for distortions due to propagation of acoustic waveforms in air in a cylindrical waveguide at high sound pressure levels. The nonlinear distortions are modeled using the Burgers' wave propagation model, accounting for dissipation and boundary layer dispersion effects. The corrector was designed to mitigate these distortions in signals obtained from predefined distances in the waveguide. This compensator is derived from the Burgers' model and is independent of the stimulus used. Results demonstrating a substantial reduction in the intermodulation distortion and harmonic distortion in a specific frequency band of interest over a multitude of test input stimuli are included in this paper.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n New results in nonlinear state estimation using extended unbiased fir filtering.\n \n \n \n \n\n\n \n Granados-Cruz, M.; Shmaliy, Y. S.; Khan, S. H.; Ahn, C. K.; and Zhao, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 679-683, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"NewPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362469,\n  author = {M. Granados-Cruz and Y. S. Shmaliy and S. H. Khan and C. K. Ahn and S. Zhao},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {New results in nonlinear state estimation using extended unbiased fir filtering},\n  year = {2015},\n  pages = {679-683},\n  abstract = {This paper discusses two algorithms of extended unbiased FIR (EFIR) filtering of nonlinear discrete-time state-space models used in tracking and state estimation. The basic algorithm employs the extended nonlinear state and observation equations. The modified algorithm utilizes the nonlinear-to-linear conversion of the observation equation which is provided using a batch EFIR filter having small memory. Unlike the extended Kalman filter (EKF), both EFIR algorithms ignore the noise statistics and demonstrate better robustness against temporary model uncertainties. These algorithms require an optimal horizon in order to minimize the mean square error. Applications are given for robot indoor self-localization utilizing radio frequency identification tags.},\n  keywords = {FIR filters;indoor radio;Kalman filters;mean square error methods;nonlinear filters;radiofrequency identification;state estimation;state-space methods;nonlinear state estimation;extended unbiased FIR filtering;nonlinear discrete-time state-space models;state estimation;tracking;extended nonlinear state;observation equations;nonlinear-to-linear conversion;extended Kalman filter;EFIR algorithms;mean square error;robot indoor self-localization;radio frequency identification tags;Signal processing algorithms;Hidden Markov models;Mathematical model;Kalman filters;Robots;Finite impulse response filters;Europe},\n  doi = {10.1109/EUSIPCO.2015.7362469},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570095943.pdf},\n}\n\n
\n
\n\n\n
\n This paper discusses two algorithms of extended unbiased FIR (EFIR) filtering of nonlinear discrete-time state-space models used in tracking and state estimation. The basic algorithm employs the extended nonlinear state and observation equations. The modified algorithm utilizes the nonlinear-to-linear conversion of the observation equation which is provided using a batch EFIR filter having small memory. Unlike the extended Kalman filter (EKF), both EFIR algorithms ignore the noise statistics and demonstrate better robustness against temporary model uncertainties. These algorithms require an optimal horizon in order to minimize the mean square error. Applications are given for robot indoor self-localization utilizing radio frequency identification tags.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Least-square approximation of second-order nonlinear systems using quasi-perfect periodic sequences.\n \n \n \n \n\n\n \n Sicuranza, G. L.; and Carini, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 684-688, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Least-squarePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362470,\n  author = {G. L. Sicuranza and A. Carini},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Least-square approximation of second-order nonlinear systems using quasi-perfect periodic sequences},\n  year = {2015},\n  pages = {684-688},\n  abstract = {We consider the identification of nonlinear filters using periodic sequences. Perfect periodic sequences have already been proposed for this purpose. A periodic sequence is called perfect for a nonlinear filter if it causes the basis functions to be orthogonal and the autocorrelation matrix to be diagonal. In this paper, we introduce for the same purpose the quasi-perfect periodic sequences. We define a periodic sequence as quasi-perfect for a nonlinear filter if the resulting auto-correlation matrix is highly sparse. The sequence is obtained by means of a simple combinatorial rule and is formed by samples having few discrete levels. These characteristics allow an efficient implementation of the least-squares method for the approximation of certain linear-in-the-parameters nonlinear filters. A real-world experiment shows the good performance obtained.},\n  keywords = {correlation methods;least squares approximations;nonlinear filters;second-order nonlinear system least-square approximation;quasiperfect periodic sequence;nonlinear filter identification;orthogonal basis function;diagonal autocorrelation matrix;combinatorial rule;linear-in-the-parameters nonlinear filter approximation;Correlation;Sparse matrices;Nonlinear systems;Signal processing algorithms;Europe;Signal processing;Piecewise linear approximation;Least-squares approximation;second-order nonlinear systems;quasi-perfect periodic sequences;sparse auto-correlation matrix},\n  doi = {10.1109/EUSIPCO.2015.7362470},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570092419.pdf},\n}\n\n
\n
\n\n\n
\n We consider the identification of nonlinear filters using periodic sequences. Perfect periodic sequences have already been proposed for this purpose. A periodic sequence is called perfect for a nonlinear filter if it causes the basis functions to be orthogonal and the autocorrelation matrix to be diagonal. In this paper, we introduce for the same purpose the quasi-perfect periodic sequences. We define a periodic sequence as quasi-perfect for a nonlinear filter if the resulting auto-correlation matrix is highly sparse. The sequence is obtained by means of a simple combinatorial rule and is formed by samples having few discrete levels. These characteristics allow an efficient implementation of the least-squares method for the approximation of certain linear-in-the-parameters nonlinear filters. A real-world experiment shows the good performance obtained.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Universal algorithm for compressive sampling.\n \n \n \n \n\n\n \n Zaki, A.; Chatterjee, S.; and Rasmussen, L. K.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 689-693, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"UniversalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362471,\n  author = {A. Zaki and S. Chatterjee and L. K. Rasmussen},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Universal algorithm for compressive sampling},\n  year = {2015},\n  pages = {689-693},\n  abstract = {In a standard compressive sampling (CS) setup, we develop a universal algorithm where multiple CS reconstruction algorithms participate and their outputs are fused to achieve a better reconstruction performance. The new method is called universal algorithm for CS (UACS) that is iterative in nature and has a restricted isometry property (RIP) based theoretical convergence guarantee. It is shown that if one participating algorithm in the design has a converging recurrence inequality relation then the UACS also holds a converging recurrence inequality relation over iterations. An example of the UACS is presented and studied through simulations for demonstrating its flexibility and performance improvement.},\n  keywords = {compressed sensing;convergence of numerical methods;iterative methods;universal algorithm;compressive sampling setup;multiple CS reconstruction algorithms;restricted isometry property based theoretical convergence guarantee;RIP based theoretical convergence guarantee;UACS;recurrence inequality;Signal processing algorithms;Algorithm design and analysis;Matching pursuit algorithms;Reconstruction algorithms;Radiation detectors;Signal processing;Europe;Compressive sampling;greedy algorithms;iterative fusion;restricted isometry property},\n  doi = {10.1109/EUSIPCO.2015.7362471},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104603.pdf},\n}\n\n
\n
\n\n\n
\n In a standard compressive sampling (CS) setup, we develop a universal algorithm where multiple CS reconstruction algorithms participate and their outputs are fused to achieve a better reconstruction performance. The new method is called universal algorithm for CS (UACS) that is iterative in nature and has a restricted isometry property (RIP) based theoretical convergence guarantee. It is shown that if one participating algorithm in the design has a converging recurrence inequality relation then the UACS also holds a converging recurrence inequality relation over iterations. An example of the UACS is presented and studied through simulations for demonstrating its flexibility and performance improvement.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Greedy pursuits assisted basis pursuit for compressive sensing.\n \n \n \n \n\n\n \n Narayanan, S.; Sahoo, S. K.; and Makur, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 694-698, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"GreedyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362472,\n  author = {S. Narayanan and S. K. Sahoo and A. Makur},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Greedy pursuits assisted basis pursuit for compressive sensing},\n  year = {2015},\n  pages = {694-698},\n  abstract = {Fusion based Compressive Sensing (CS) reconstruction algorithms combine multiple CS reconstruction algorithms, which worked with different principles, to obtain a better signal estimate. Examples include Fusion of Algorithms for Compressed Sensing (FACS) and Committee Machine Approach for Compressed Sensing (CoMACS). However, these algorithms involve solving a least squares problem which may be ill-conditioned. Modified CS algorithms such as Modified Basis Pursuit (Mod-BP) ensured a sparse signal can efficiently be reconstructed when a part of its support is known. Since Mod-BP makes use of available signal knowledge to improve upon BP, we propose to employ multiple Greedy Pursuits (GPs) to derive a partial support for Mod-BP. As Mod-BP makes use of signal knowledge derived using GPs, we term our proposed algorithm as Greedy Pursuits Assisted Basis Pursuit (GPABP). Experimental results show that our proposed algorithm performs better than the state-of-the-art algorithms - FACS and its variants.},\n  keywords = {compressed sensing;greedy algorithms;least squares approximations;greedy pursuits assisted basis pursuit;compressive sensing;CS reconstruction algorithms;fusion of algorithms for compressed sensing;least squares problem;modified CS algorithms;modified basis pursuit;Signal processing algorithms;Compressed sensing;Reconstruction algorithms;Matching pursuit algorithms;Reliability;Europe;Signal processing;Fusion of Algorithms;Basis Pursuit;Greedy Pursuit;Modified Basis Pursuit},\n  doi = {10.1109/EUSIPCO.2015.7362472},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105361.pdf},\n}\n\n
\n
\n\n\n
\n Fusion based Compressive Sensing (CS) reconstruction algorithms combine multiple CS reconstruction algorithms, which worked with different principles, to obtain a better signal estimate. Examples include Fusion of Algorithms for Compressed Sensing (FACS) and Committee Machine Approach for Compressed Sensing (CoMACS). However, these algorithms involve solving a least squares problem which may be ill-conditioned. Modified CS algorithms such as Modified Basis Pursuit (Mod-BP) ensured a sparse signal can efficiently be reconstructed when a part of its support is known. Since Mod-BP makes use of available signal knowledge to improve upon BP, we propose to employ multiple Greedy Pursuits (GPs) to derive a partial support for Mod-BP. As Mod-BP makes use of signal knowledge derived using GPs, we term our proposed algorithm as Greedy Pursuits Assisted Basis Pursuit (GPABP). Experimental results show that our proposed algorithm performs better than the state-of-the-art algorithms - FACS and its variants.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n New robust LASSO method based on ranks.\n \n \n \n \n\n\n \n Kim, H.; Ollila, E.; and Koivunen, V.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 699-703, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"NewPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362473,\n  author = {H. Kim and E. Ollila and V. Koivunen},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {New robust LASSO method based on ranks},\n  year = {2015},\n  pages = {699-703},\n  abstract = {The LASSO (Least Absolute Shrinkage and Selection Operator) has been a popular technique for simultaneous linear regression estimation and variable selection. Robust approaches for LASSO are needed in the case of heavy-tailed errors or severe outliers. We propose a novel robust LASSO method that has a non-parametric flavor: it solves a criterion function based on ranks of the residuals with LASSO penalty. The criterion is based on pairwise differences of residuals in the least absolute deviation (LAD) loss leading to a bounded influence function. With the i\\-criterion we can easily incorporate other penalties such as fused LASSO for group sparsity and smoothness. For both methods, we propose efficient algorithms for computing the solutions. Our simulation study and application examples (image denoising, prostate cancer data analysis) show that our method outperform the usual LS/LASSO methods for either heavy-tailed errors or outliers, offering better variable selection than another robust competitor, LAD-LASSO method.},\n  keywords = {cancer;estimation theory;image denoising;medical image processing;regression analysis;shrinkage;least absolute shrinkage and selection operator;robust LASSO method;linear regression estimation;least absolute deviation;LAD loss;image denoising;prostate cancer data analysis;LS-LASSO methods;Robustness;Linear programming;Signal processing;Image denoising;Europe;Computational modeling;Prostate cancer;LASSO;penalized regression;sparse regression;group sparsity;robust},\n  doi = {10.1109/EUSIPCO.2015.7362473},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104625.pdf},\n}\n\n
\n
\n\n\n
\n The LASSO (Least Absolute Shrinkage and Selection Operator) has been a popular technique for simultaneous linear regression estimation and variable selection. Robust approaches for LASSO are needed in the case of heavy-tailed errors or severe outliers. We propose a novel robust LASSO method that has a non-parametric flavor: it solves a criterion function based on ranks of the residuals with LASSO penalty. The criterion is based on pairwise differences of residuals in the least absolute deviation (LAD) loss leading to a bounded influence function. With the i­criterion we can easily incorporate other penalties such as fused LASSO for group sparsity and smoothness. For both methods, we propose efficient algorithms for computing the solutions. Our simulation study and application examples (image denoising, prostate cancer data analysis) show that our method outperform the usual LS/LASSO methods for either heavy-tailed errors or outliers, offering better variable selection than another robust competitor, LAD-LASSO method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Randomized simultaneous orthogonal matching pursuit.\n \n \n \n \n\n\n \n Ejaz, A.; Ollila, E.; and Koivunen, V.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 704-708, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"RandomizedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362474,\n  author = {A. Ejaz and E. Ollila and V. Koivunen},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Randomized simultaneous orthogonal matching pursuit},\n  year = {2015},\n  pages = {704-708},\n  abstract = {In this paper, we develop randomized simultaneous orthogonal matching pursuit (RandSOMP) algorithm which computes an approximation of the Bayesian minimum mean-squared error (MMSE) estimate of an unknown rowsparse signal matrix. The approximation is based on greedy iterations, as in SOMP, and it elegantly incorporates the prior knowledge of the probability distribution of the signal and noise matrices into the estimation process. Unlike the exact MMSE estimator which is computationally intractable to solve, the Bayesian greedy pursuit approach offers a computationally feasible way to approximate the MMSE estimate. Our simulations illustrate that the proposed RandSOMP algorithm outperforms SOMP both in terms of mean-squared error and probability of exact support recovery. The benefits of RandSOMP are further illustrated in direction-of-arrival estimation with sensor arrays and image denoising.},\n  keywords = {Bayes methods;direction-of-arrival estimation;greedy algorithms;image denoising;image fusion;iterative methods;least mean squares methods;matrix algebra;signal denoising;statistical distributions;randomized simultaneous orthogonal matching pursuit algorithm;RandSOMP algorithm;Bayesian minimum mean-squared error estimate;MMSE estimate;unknown rowsparse signal matrix;greedy iterations;signal probability distribution;noise matrices;exact support recovery probability;direction-of-arrival estimation;sensor arrays;image denoising;Decision support systems;Yttrium;Europe;Signal to noise ratio;Conferences;Bayes;minimum mean-squared error (MMSE);multichannel sparse recovery;compressed sensing},\n  doi = {10.1109/EUSIPCO.2015.7362474},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104453.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we develop randomized simultaneous orthogonal matching pursuit (RandSOMP) algorithm which computes an approximation of the Bayesian minimum mean-squared error (MMSE) estimate of an unknown rowsparse signal matrix. The approximation is based on greedy iterations, as in SOMP, and it elegantly incorporates the prior knowledge of the probability distribution of the signal and noise matrices into the estimation process. Unlike the exact MMSE estimator which is computationally intractable to solve, the Bayesian greedy pursuit approach offers a computationally feasible way to approximate the MMSE estimate. Our simulations illustrate that the proposed RandSOMP algorithm outperforms SOMP both in terms of mean-squared error and probability of exact support recovery. The benefits of RandSOMP are further illustrated in direction-of-arrival estimation with sensor arrays and image denoising.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n More efficient sparsity-inducing algorithms using inexact gradient.\n \n \n \n \n\n\n \n Rakotomamonjy, A.; Koço, S.; and Ralaivola, L.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 709-713, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"MorePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362475,\n  author = {A. Rakotomamonjy and S. Koço and L. Ralaivola},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {More efficient sparsity-inducing algorithms using inexact gradient},\n  year = {2015},\n  pages = {709-713},\n  abstract = {In this paper, we tackle the problem of adapting a set of classic sparsity-inducing methods to cases when the gradient of the objective function is either difficult or very expensive to compute. Our contributions are two-fold: first, we propose methodologies for computing fair estimations of inexact gradients, second we propose novel stopping criteria for computing these gradients. For each contribution we provide theoretical backgrounds and justifications. In the experimental part, we study the impact of the proposed methods for two well-known algorithms, Frank-Wolfe and Orthogonal Matching Pursuit. Results on toy datasets show that inexact gradients can be as useful as exact ones provided the appropriate stopping criterion is used.},\n  keywords = {signal processing;orthogonal matching pursuit;Frank-Wolfe;inexact gradient;sparsity-inducing algorithms;Signal processing algorithms;Approximation methods;Signal processing;Indexes;Matching pursuit algorithms;Europe;Linear programming;sparse learning;greedy algorithms;inexact gradient;randomization},\n  doi = {10.1109/EUSIPCO.2015.7362475},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104305.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we tackle the problem of adapting a set of classic sparsity-inducing methods to cases when the gradient of the objective function is either difficult or very expensive to compute. Our contributions are two-fold: first, we propose methodologies for computing fair estimations of inexact gradients, second we propose novel stopping criteria for computing these gradients. For each contribution we provide theoretical backgrounds and justifications. In the experimental part, we study the impact of the proposed methods for two well-known algorithms, Frank-Wolfe and Orthogonal Matching Pursuit. Results on toy datasets show that inexact gradients can be as useful as exact ones provided the appropriate stopping criterion is used.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Improving event detection for audio surveillance using Gabor filterbank features.\n \n \n \n \n\n\n \n Geiger, J. T.; and Helwani, K.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 714-718, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ImprovingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362476,\n  author = {J. T. Geiger and K. Helwani},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Improving event detection for audio surveillance using Gabor filterbank features},\n  year = {2015},\n  pages = {714-718},\n  abstract = {Acoustic event detection in surveillance scenarios is an important but difficult problem. Realistic systems are struggling with noisy recording conditions. In this work, we propose to use Gabor filterbank features to detect target events in different noisy background scenes. These features capture spectro-temporal modulation frequencies in the signal, which makes them suited for the detection of non-stationary sound events. A single-class detector is constructed for each of the different target events. In a hierarchical framework, the separate detectors are combined to a multi-class detector. Experiments are performed using a database of four different target sounds and four background scenarios. On average, the proposed features outperform conventional features in all tested noise levels, in terms of detection and classification performance.},\n  keywords = {acoustic signal processing;audio databases;audio signal processing;Gabor filters;modulation;signal classification;signal detection;surveillance;audio surveillance;Gabor filterbank features;acoustic event detection;realistic systems;noisy recording conditions;noisy background scenes;spectrotemporal modulation frequencies;nonstationary sound events;single-class detector;target sounds;noise levels;Decision support systems;Europe;Signal processing;Conferences;Audio surveillance;event detection;Gabor features;noise robustness},\n  doi = {10.1109/EUSIPCO.2015.7362476},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570095283.pdf},\n}\n\n
\n
\n\n\n
\n Acoustic event detection in surveillance scenarios is an important but difficult problem. Realistic systems are struggling with noisy recording conditions. In this work, we propose to use Gabor filterbank features to detect target events in different noisy background scenes. These features capture spectro-temporal modulation frequencies in the signal, which makes them suited for the detection of non-stationary sound events. A single-class detector is constructed for each of the different target events. In a hierarchical framework, the separate detectors are combined to a multi-class detector. Experiments are performed using a database of four different target sounds and four background scenarios. On average, the proposed features outperform conventional features in all tested noise levels, in terms of detection and classification performance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n HOG and subband power distribution image features for acoustic scene classification.\n \n \n \n \n\n\n \n Bisot, V.; Essid, S.; and Richard, G.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 719-723, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"HOGPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362477,\n  author = {V. Bisot and S. Essid and G. Richard},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {HOG and subband power distribution image features for acoustic scene classification},\n  year = {2015},\n  pages = {719-723},\n  abstract = {Acoustic scene classification is a difficult problem mostly due to the high density of events concurrently occurring in audio scenes. In order to capture the occurrences of these events we propose to use the Subband Power Distribution (SPD) as a feature. We extract it by computing the histogram of amplitude values in each frequency band of a spectrogram image. The SPD allows us to model the density of events in each frequency band. Our method is evaluated on a large acoustic scene dataset using support vector machines. We outperform the previous methods when using the SPD in conjunction with the histogram of gradients. To reach further improvement, we also consider the use of an approximation of the earth mover's distance kernel to compare histograms in a more suitable way. Using the so-called Sinkhorn kernel improves the results on most of the feature configurations. Best performances reach a 92.8% F1 score.},\n  keywords = {acoustic imaging;acoustic signal processing;audio signal processing;gradient methods;image classification;support vector machines;histogram of gradients;HOG;subband power distribution image features;SPD;acoustic scene classification;spectrogram image;acoustic scene dataset;support vector machines;earth mover distance kernel;Sinkhorn kernel;audio scenes;Feature extraction;Spectrogram;Acoustics;Histograms;Kernel;Time-frequency analysis;Support vector machines;Acoustic scene classification;subband power distribution image;Sinkhorn distance;support vector machine},\n  doi = {10.1109/EUSIPCO.2015.7362477},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103579.pdf},\n}\n\n
\n
\n\n\n
\n Acoustic scene classification is a difficult problem mostly due to the high density of events concurrently occurring in audio scenes. In order to capture the occurrences of these events we propose to use the Subband Power Distribution (SPD) as a feature. We extract it by computing the histogram of amplitude values in each frequency band of a spectrogram image. The SPD allows us to model the density of events in each frequency band. Our method is evaluated on a large acoustic scene dataset using support vector machines. We outperform the previous methods when using the SPD in conjunction with the histogram of gradients. To reach further improvement, we also consider the use of an approximation of the earth mover's distance kernel to compare histograms in a more suitable way. Using the so-called Sinkhorn kernel improves the results on most of the feature configurations. Best performances reach a 92.8% F1 score.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Feature learning with deep scattering for urban sound analysis.\n \n \n \n \n\n\n \n Salamon, J.; and Bello, J. P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 724-728, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"FeaturePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362478,\n  author = {J. Salamon and J. P. Bello},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Feature learning with deep scattering for urban sound analysis},\n  year = {2015},\n  pages = {724-728},\n  abstract = {In this paper we evaluate the scattering transform as an alternative signal representation to the mel-spectrogram in the context of unsupervised feature learning for urban sound classification. We show that we can obtain comparable (or better) performance using the scattering transform whilst reducing both the amount of training data required for feature learning and the size of the learned codebook by an order of magnitude. In both cases the improvement is attributed to the local phase invariance of the representation. We also observe improved classification of sources in the background of the auditory scene, a result that provides further support for the importance of temporal modulation in sound segregation.},\n  keywords = {audio signal processing;learning (artificial intelligence);signal classification;feature learning;deep scattering;urban sound analysis;scattering transform;signal representation;mel-spectrogram;unsupervised feature learning;urban sound classification;local phase invariance;temporal modulation;sound segregation;Scattering;Transforms;Signal processing algorithms;Clustering algorithms;Modulation;Spectrogram;Algorithm design and analysis;Unsupervised learning;scattering transform;acoustic event classification;urban;machine learning},\n  doi = {10.1109/EUSIPCO.2015.7362478},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103721.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we evaluate the scattering transform as an alternative signal representation to the mel-spectrogram in the context of unsupervised feature learning for urban sound classification. We show that we can obtain comparable (or better) performance using the scattering transform whilst reducing both the amount of training data required for feature learning and the size of the learned codebook by an order of magnitude. In both cases the improvement is attributed to the local phase invariance of the representation. We also observe improved classification of sources in the background of the auditory scene, a result that provides further support for the importance of temporal modulation in sound segregation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automatic recognition of environmental sound events using all-pole group delay features.\n \n \n \n \n\n\n \n Diment, A.; Cakir, E.; Heittola, T.; and Virtanen, T.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 729-733, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AutomaticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362479,\n  author = {A. Diment and E. Cakir and T. Heittola and T. Virtanen},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Automatic recognition of environmental sound events using all-pole group delay features},\n  year = {2015},\n  pages = {729-733},\n  abstract = {A feature based on the group delay function from all-pole models (APGD) is proposed for environmental sound event recognition. The commonly used spectral features take into account merely the magnitude information, whereas the phase is overlooked due to the complications related to its interpretation. Additional information concealed in the phase is hypothesised to be beneficial for sound event recognition. The APGD is an approach to inferring phase information, which has shown applicability for speech and music analysis and is now studied in environmental audio. The evaluation is performed within a multi-label deep neural network (DNN) framework on a diverse real-life dataset of environmental sounds. It shows performance improvement compared to the baseline log mel-band energy case. Combined with the magnitude-based features, APGD demonstrates further improvement.},\n  keywords = {music;neural nets;speech processing;speech recognition;environmental sound event recognition;automatic recognition;all-pole group delay features;group delay function;phase information;speech analysis;music analysis;environmental audio;multilabel deep neural network;DNN;diverse real-life dataset;Delays;Discrete cosine transforms;Feature extraction;Computational modeling;Signal processing;Europe;Neural networks;Phase spectrum;sound event recognition;audio classification;neural networks},\n  doi = {10.1109/EUSIPCO.2015.7362479},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103759.pdf},\n}\n\n
\n
\n\n\n
\n A feature based on the group delay function from all-pole models (APGD) is proposed for environmental sound event recognition. The commonly used spectral features take into account merely the magnitude information, whereas the phase is overlooked due to the complications related to its interpretation. Additional information concealed in the phase is hypothesised to be beneficial for sound event recognition. The APGD is an approach to inferring phase information, which has shown applicability for speech and music analysis and is now studied in environmental audio. The evaluation is performed within a multi-label deep neural network (DNN) framework on a diverse real-life dataset of environmental sounds. It shows performance improvement compared to the baseline log mel-band energy case. Combined with the magnitude-based features, APGD demonstrates further improvement.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Spatial-feature-based acoustic scene analysis using distributed microphone array.\n \n \n \n \n\n\n \n Imoto, K.; and Ono, N.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 734-738, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Spatial-feature-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362480,\n  author = {K. Imoto and N. Ono},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Spatial-feature-based acoustic scene analysis using distributed microphone array},\n  year = {2015},\n  pages = {734-738},\n  abstract = {In this paper we propose a robust and efficient method to utilize the spatial information provided by a distributed microphone array for acoustic scene analysis. In our approach, similarly to the cepstrum, which is widely used as a spectral feature, the logarithm of the amplitude in multichannel observation is converted to a feature vector by a linear orthogonal transformation. Then, the spatial information of the acoustic scene is represented in the spatial feature space. This approach does not require the positions of the microphones and is not sensitive to the synchronization mismatch of channels, both of which make the method suitable for use with a distributed microphone array. Experimental results using reallife environmental sounds show the validity of our approach even when a smaller feature dimension than the original one is used.},\n  keywords = {acoustic signal processing;cepstral analysis;feature extraction;microphone arrays;transforms;spatial cepstrum;linear orthogonal transformation;feature vector;multichannel observation;amplitude logarithm;spatial information utilization;distributed microphone array;spatial-feature-based acoustic scene analysis;Cepstrum;Microphones;Feature extraction;TV;Arrays;Image analysis;Acoustic scene analysis;distributed microphone array;spatial cepstrum;symmetric microphone array;isotropic sound field},\n  doi = {10.1109/EUSIPCO.2015.7362480},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104903.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we propose a robust and efficient method to utilize the spatial information provided by a distributed microphone array for acoustic scene analysis. In our approach, similarly to the cepstrum, which is widely used as a spectral feature, the logarithm of the amplitude in multichannel observation is converted to a feature vector by a linear orthogonal transformation. Then, the spatial information of the acoustic scene is represented in the spatial feature space. This approach does not require the positions of the microphones and is not sensitive to the synchronization mismatch of channels, both of which make the method suitable for use with a distributed microphone array. Experimental results using reallife environmental sounds show the validity of our approach even when a smaller feature dimension than the original one is used.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Hyperspectral applications in the global transportation infrastructure.\n \n \n \n \n\n\n \n Bridgelall, R.; Rafert, J. B.; and Tolliver, D.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 739-743, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"HyperspectralPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362481,\n  author = {R. Bridgelall and J. B. Rafert and D. Tolliver},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Hyperspectral applications in the global transportation infrastructure},\n  year = {2015},\n  pages = {739-743},\n  abstract = {Hyperspectral remote sensing is an emerging field with potential applications in the observation, management, and maintenance of the global transportation infrastructure. This study introduces a general analytical framework to link transportation systems analysis and hyperspectral analysis. The authors introduce a range of applications that would benefit from the capabilities of hyperspectral remote sensing. They selected three critical but unrelated applications and identified both the spatial and spectral information of their key operational characteristics to demonstrate the hyperspectral utility. The specific scenario studies exemplifies the general approach of utilizing the outputs of hyperspectral analysis to improve models that practitioners currently use to analyze a variety of transportation problems including roadway congestion forecasting, railway condition monitoring, and pipeline risk management.},\n  keywords = {geophysical image processing;railways;transportation;hyperspectral utility;roadway congestion forecasting;railway condition monitoring;pipeline risk management;transportation system analysis;hyperspectral remote sensing;global transportation infrastructure;Hyperspectral imaging;Vehicles;Analytical models;Resistance;Hyperspectral image processing;intelligent transportation systems;remote sensing;smart infrastructure;unmanned aircraft systems},\n  doi = {10.1109/EUSIPCO.2015.7362481},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570092793.pdf},\n}\n\n
\n
\n\n\n
\n Hyperspectral remote sensing is an emerging field with potential applications in the observation, management, and maintenance of the global transportation infrastructure. This study introduces a general analytical framework to link transportation systems analysis and hyperspectral analysis. The authors introduce a range of applications that would benefit from the capabilities of hyperspectral remote sensing. They selected three critical but unrelated applications and identified both the spatial and spectral information of their key operational characteristics to demonstrate the hyperspectral utility. The specific scenario studies exemplifies the general approach of utilizing the outputs of hyperspectral analysis to improve models that practitioners currently use to analyze a variety of transportation problems including roadway congestion forecasting, railway condition monitoring, and pipeline risk management.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multilinear spectral unmixing of hyperspectral multiangle images.\n \n \n \n \n\n\n \n Veganzones, M. A.; Cohen, J.; Farias, R. C.; Marrero, R.; Chanussot, J.; and Comon, P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 744-748, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"MultilinearPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362482,\n  author = {M. A. Veganzones and J. Cohen and R. C. Farias and R. Marrero and J. Chanussot and P. Comon},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Multilinear spectral unmixing of hyperspectral multiangle images},\n  year = {2015},\n  pages = {744-748},\n  abstract = {Spectral unmixing is one of the most important and studied topics in hyperspectral image analysis. By means of spectral unmixing it is possible to decompose a hyperspectral image in its spectral components, the so-called endmembers, and their respective fractional spatial distributions, so-called abundance maps. New hyperspectral missions will allow to acquire hyperspectral images in new ways, for instance, in temporal series or in multi-angular acquisitions. Working with these incoming huge databases of multi-way hyperspec-tral images will raise new challenges to the hyperspectral community. Here, we propose the use of compression-based non-negative tensor canonical polyadic (CP) decompositions to analyze this kind of datasets. Furthermore, we show that the non-negative CP decomposition could be understood as a multi-linear spectral unmixing technique. We evaluate the proposed approach by means of Mars synthetic datasets built upon multi-angular in-lab hyperspectral acquisitions.},\n  keywords = {data compression;hyperspectral imaging;image coding;matrix decomposition;multilinear spectral unmixing technique;hyperspectral multiangle images;hyperspectral image analysis;fractional spatial distributions;multiangular acquisitions;image compression;nonnegative tensor canonical polyadic decompositions;Mars synthetic datasets;Tensile stress;Hyperspectral imaging;Europe;Matrix decomposition;Image coding;Least squares approximations;Multilinear spectral unmixing;hyper-spectral multiangle images;multiway analysis;Canonical Polyadic;nonnegative tensor decomposition},\n  doi = {10.1109/EUSIPCO.2015.7362482},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103621.pdf},\n}\n\n
\n
\n\n\n
\n Spectral unmixing is one of the most important and studied topics in hyperspectral image analysis. By means of spectral unmixing it is possible to decompose a hyperspectral image in its spectral components, the so-called endmembers, and their respective fractional spatial distributions, so-called abundance maps. New hyperspectral missions will allow to acquire hyperspectral images in new ways, for instance, in temporal series or in multi-angular acquisitions. Working with these incoming huge databases of multi-way hyperspec-tral images will raise new challenges to the hyperspectral community. Here, we propose the use of compression-based non-negative tensor canonical polyadic (CP) decompositions to analyze this kind of datasets. Furthermore, we show that the non-negative CP decomposition could be understood as a multi-linear spectral unmixing technique. We evaluate the proposed approach by means of Mars synthetic datasets built upon multi-angular in-lab hyperspectral acquisitions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n 'On the fly' dimensionality reduction for hyperspectral image acquisition.\n \n \n \n \n\n\n \n Zabalza, J.; Ren, J.; and Marshall, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 749-753, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"'OnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362483,\n  author = {J. Zabalza and J. Ren and S. Marshall},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {'On the fly' dimensionality reduction for hyperspectral image acquisition},\n  year = {2015},\n  pages = {749-753},\n  abstract = {Hyperspectral imaging (HSI) devices produce 3-D hyper-cubes of a spatial scene in hundreds of different spectral bands, generating large data sets which allow accurate data processing to be implemented. However, the large dimensionality of hypercubes leads to subsequent implementation of dimensionality reduction techniques such as principal component analysis (PCA), where the covariance matrix is constructed in order to perform such analysis. In this paper, we describe how the covariance matrix of an HSI hypercube can be computed in real time `on the fly' during the data acquisition process. This offers great potential for HSI embedded devices to provide not only conventional HSI data but also preprocessed information.},\n  keywords = {covariance matrices;data acquisition;hypercube networks;hyperspectral imaging;image processing;principal component analysis;hyperspectral image acquisition;on the fly dimensionality reduction;3D hypercubes;spatial scene;spectral bands;principal component analysis;PCA;covariance matrix;HSI hypercube;data acquisition;Covariance matrices;Real-time systems;Hypercubes;Principal component analysis;Signal processing;Europe;Cameras;Covariance matrix;data reduction;hypercube;hyperspectral cameras;principal component analysis (PCA)},\n  doi = {10.1109/EUSIPCO.2015.7362483},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104345.pdf},\n}\n\n
\n
\n\n\n
\n Hyperspectral imaging (HSI) devices produce 3-D hyper-cubes of a spatial scene in hundreds of different spectral bands, generating large data sets which allow accurate data processing to be implemented. However, the large dimensionality of hypercubes leads to subsequent implementation of dimensionality reduction techniques such as principal component analysis (PCA), where the covariance matrix is constructed in order to perform such analysis. In this paper, we describe how the covariance matrix of an HSI hypercube can be computed in real time `on the fly' during the data acquisition process. This offers great potential for HSI embedded devices to provide not only conventional HSI data but also preprocessed information.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An analysis of collaborative representation schemes for the classification of hyperspectral images.\n \n \n \n \n\n\n \n Dalla Mura, M.; Bioucas-Dias, J. M.; and Chanussot, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 754-758, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362484,\n  author = {M. {Dalla Mura} and J. M. Bioucas-Dias and J. Chanussot},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {An analysis of collaborative representation schemes for the classification of hyperspectral images},\n  year = {2015},\n  pages = {754-758},\n  abstract = {Collaborative-based representation classifiers have widely spread in the latest years achieving remarkable results in signal and image processing tasks. In this paper, we consider these approaches for the hyperspectral image classification. Specifically, we focus on collaborative and sparse representation classifiers and we perform an investigation on the role of the different regularizations and constraints that can be considered with respect to the classification performance. In addition, we propose to consider the Nearest Subspace Classifier with regularization which, from the experiments, has proven to be a competitive classification technique. Experimental results have been conducted considering both spectral and spatial features of a real hyperspectral image.},\n  keywords = {geophysical image processing;hyperspectral imaging;image classification;image representation;collaborative representation scheme;hyperspectral image classification;collaborative-based representation classifier;image processing;sparse representation classifier;nearest subspace classifier;Collaboration;Training;Hyperspectral imaging;Dictionaries;Europe;Signal processing;Sparse representation classification;collaborative classiication;nearest subspace classiier;hyper-spectral imaging;remote sensing},\n  doi = {10.1109/EUSIPCO.2015.7362484},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105153.pdf},\n}\n\n
\n
\n\n\n
\n Collaborative-based representation classifiers have widely spread in the latest years achieving remarkable results in signal and image processing tasks. In this paper, we consider these approaches for the hyperspectral image classification. Specifically, we focus on collaborative and sparse representation classifiers and we perform an investigation on the role of the different regularizations and constraints that can be considered with respect to the classification performance. In addition, we propose to consider the Nearest Subspace Classifier with regularization which, from the experiments, has proven to be a competitive classification technique. Experimental results have been conducted considering both spectral and spatial features of a real hyperspectral image.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n New hierarchical joint classification method for SAR-optical multiresolution remote sensing data.\n \n \n \n\n\n \n Hedhli, I.; Moser, G.; Serpico, S. B.; and Zerubia, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 759-763, Aug 2015. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362485,\n  author = {I. Hedhli and G. Moser and S. B. Serpico and J. Zerubia},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {New hierarchical joint classification method for SAR-optical multiresolution remote sensing data},\n  year = {2015},\n  pages = {759-763},\n  abstract = {In this paper, we develop a novel classification approach for multiresolution, multisensor (optical and synthetic aperture radar), and/or multiband images. Accurate and time-efficient classification methods are particularly important tools to support rapid and reliable assessment of the ground changes. Given the huge amount and variety of data available currently from last-generation satellite missions, the main difficulty is to develop a classifier that can take benefit of multiband, multiresolution, and multisensor input imagery. The proposed method addresses the problem of multisensor fusion of SAR with optical data for classification purposes, and allows input data collected at multiple resolutions and additional multiscale features derived through wavelets to be fused.},\n  keywords = {feature extraction;geophysical image processing;image classification;image fusion;image resolution;optical information processing;radar imaging;remote sensing by radar;synthetic aperture radar;wavelet transforms;hierarchical joint classification method;SAR-optical multiresolution remote sensing data;multisensor images;multiband images;synthetic aperture radar;ground changes assessment;last-generation satellite missions;multisensor fusion;optical data;multiscale features;Image resolution;Optical sensors;Optical imaging;Signal resolution;Optical signal processing;Synthetic aperture radar;Multisensor;multiresolution remote sensing images;supervised classification;hierarchical Markov random fields},\n  doi = {10.1109/EUSIPCO.2015.7362485},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n In this paper, we develop a novel classification approach for multiresolution, multisensor (optical and synthetic aperture radar), and/or multiband images. Accurate and time-efficient classification methods are particularly important tools to support rapid and reliable assessment of the ground changes. Given the huge amount and variety of data available currently from last-generation satellite missions, the main difficulty is to develop a classifier that can take benefit of multiband, multiresolution, and multisensor input imagery. The proposed method addresses the problem of multisensor fusion of SAR with optical data for classification purposes, and allows input data collected at multiple resolutions and additional multiscale features derived through wavelets to be fused.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n From biometric to forensic hashing: Challenges in digital crime scene trace analysis.\n \n \n \n \n\n\n \n Vielhauer, C.; and Dittmann, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 764-768, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"FromPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362486,\n  author = {C. Vielhauer and J. Dittmann},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {From biometric to forensic hashing: Challenges in digital crime scene trace analysis},\n  year = {2015},\n  pages = {764-768},\n  abstract = {The known BioHash concept introduced e.g. for handwriting biometrics offers possibility of template protection or to derive individual keys (e.g. crypto keys for further protection). In our paper we introduce two forensic use cases: (A) the forensic investigation of a BioHash found during digital forensics and (B) the application of the BioHash to latent crime scene traces in digitized forensics. Firstly, we elaborate the design of the BioHash in the known two operation modes with their essential parameter settings. Secondly we analyze, which forensic information can be derived and interpreted from publicly available data by introducing four investigation purposes. Further, we show that the BioHash can be used for a privacy-preserving search or to enhance reproducibility of varying features in crime scene forensics.},\n  keywords = {biometrics (access control);digital forensics;police data processing;BioHash concept;biometric hashing;forensic hashing;digital crime scene trace analysis;template protection;digital forensics;forensic information;privacy-preserving search;crime scene forensics;Forensics;TV;Robustness;Writing;Sensitivity;Error analysis;Semantics;Biometrics;Passive forensic analysis},\n  doi = {10.1109/EUSIPCO.2015.7362486},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570095405.pdf},\n}\n\n
\n
\n\n\n
\n The known BioHash concept introduced e.g. for handwriting biometrics offers possibility of template protection or to derive individual keys (e.g. crypto keys for further protection). In our paper we introduce two forensic use cases: (A) the forensic investigation of a BioHash found during digital forensics and (B) the application of the BioHash to latent crime scene traces in digitized forensics. Firstly, we elaborate the design of the BioHash in the known two operation modes with their essential parameter settings. Secondly we analyze, which forensic information can be derived and interpreted from publicly available data by introducing four investigation purposes. Further, we show that the BioHash can be used for a privacy-preserving search or to enhance reproducibility of varying features in crime scene forensics.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Can contact-free measurement of heartbeat signal be used in forensics?.\n \n \n \n \n\n\n \n Haque, M. A.; Nasrollah, K.; and Moeslund, T. B.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 769-773, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"CanPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362487,\n  author = {M. A. Haque and K. Nasrollah and T. B. Moeslund},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Can contact-free measurement of heartbeat signal be used in forensics?},\n  year = {2015},\n  pages = {769-773},\n  abstract = {Biometrics and soft biometrics characteristics are of great importance in forensics applications for identifying criminals and law enforcement. Developing new biometrics and soft biometrics are therefore of interest of many applications, among them forensics. Heartbeat signals have been previously used as biometrics, but they have been measured using contact-based sensors. This paper extracts heartbeat signals, using a contact-free method by a simple webcam. The extracted signals in this case are not as precise as those that can be extracted using contact-based sensors. However, the contact-free extracted heartbeat signals are shown in this paper to have some potentials to be used as soft biometrics. Promising experimental results on a public database, have shown that utilizing these signals can improve the accuracy of spoofing detection in a face recognition system.},\n  keywords = {biometrics (access control);cardiology;digital forensics;face recognition;medical signal detection;soft biometrics characteristics;forensics applications;contact-based sensors;webcam;contact-free extracted heartbeat signals;spoofing detection;face recognition system;Face;Videos;Heart beat;Feature extraction;Biometrics (access control);Forensics;Forensics;biometrics;soft biometrics;heartbeat signals},\n  doi = {10.1109/EUSIPCO.2015.7362487},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096247.pdf},\n}\n\n
\n
\n\n\n
\n Biometrics and soft biometrics characteristics are of great importance in forensics applications for identifying criminals and law enforcement. Developing new biometrics and soft biometrics are therefore of interest of many applications, among them forensics. Heartbeat signals have been previously used as biometrics, but they have been measured using contact-based sensors. This paper extracts heartbeat signals, using a contact-free method by a simple webcam. The extracted signals in this case are not as precise as those that can be extracted using contact-based sensors. However, the contact-free extracted heartbeat signals are shown in this paper to have some potentials to be used as soft biometrics. Promising experimental results on a public database, have shown that utilizing these signals can improve the accuracy of spoofing detection in a face recognition system.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Beyond the eye of the beholder: On a forensic descriptor of the eye region.\n \n \n \n \n\n\n \n Zeinstra, C. G.; Veldhuis, R. N. J.; and Spreeuwers, L. J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 774-778, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"BeyondPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362488,\n  author = {C. G. Zeinstra and R. N. J. Veldhuis and L. J. Spreeuwers},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Beyond the eye of the beholder: On a forensic descriptor of the eye region},\n  year = {2015},\n  pages = {774-778},\n  abstract = {The task of forensic facial experts is to assess the likelihood whether a suspect is depicted on crime scene images. They typically (a) use morphological analysis when comparing parts of the facial region, and (b) combine this partial evidence into a final judgment. Facial parts can be considered as soft biometric modalities and in recent years have been studied in the biometric community. In this paper we focus on the region around the eye from a forensic perspective by applying the FISWG feature list of the eye modality. We compare existing work from the soft biometric perspective based on a texture descriptor with our approach.},\n  keywords = {face recognition;feature extraction;gaze tracking;image forensics;eye region forensic descriptor;beholder eye;forensic facial experts;crime scene image;morphological analysis;soft biometric modalities;biometric community;FISWG feature list;eye modality;texture descriptor;Shape;Iris;Forensics;Image color analysis;Face;Iris recognition;Eyelids;Soft biometrics;eye region;forensics;FISWG},\n  doi = {10.1109/EUSIPCO.2015.7362488},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570097247.pdf},\n}\n\n
\n
\n\n\n
\n The task of forensic facial experts is to assess the likelihood whether a suspect is depicted on crime scene images. They typically (a) use morphological analysis when comparing parts of the facial region, and (b) combine this partial evidence into a final judgment. Facial parts can be considered as soft biometric modalities and in recent years have been studied in the biometric community. In this paper we focus on the region around the eye from a forensic perspective by applying the FISWG feature list of the eye modality. We compare existing work from the soft biometric perspective based on a texture descriptor with our approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Blind biometric source sensor recognition using advanced PRNU fingerprints.\n \n \n \n \n\n\n \n Debiasi, L.; and Uhl, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 779-783, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"BlindPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362489,\n  author = {L. Debiasi and A. Uhl},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Blind biometric source sensor recognition using advanced PRNU fingerprints},\n  year = {2015},\n  pages = {779-783},\n  abstract = {Previous device identification studies on the iris sensors of the CASIA-Iris V4 database using PRNU fingerprints showed high variations regarding the differentiability of the sensors. These variations may have been caused by the usage of multiple sensors of the same model for the image acquisition. Since no speciic documentation on this exists we investigate the presence of multiple image sensors in the data sets. The images under investigation, furthermore, show a strong correlation regarding their content, therefore we make use of different PRNU enhancements approaches based on weighting the PRNU depending on the image content. The enhanced PRNU is used in conjunction with different forensic techniques to detect the presence of multiple sensors in the data sets. Finally, the results of the enhancement approaches and the results without any PRNU enhancement are compared and an assessment on whether multiple sensor instances have been used in the data sets is given.},\n  keywords = {fingerprint identification;blind biometric source sensor recognition;advanced PRNU fingerprints;CASIA-Iris V4 database;image acquisition;multiple image sensors;enhancements approaches;forensic techniques;Forensics;Databases;Fingerprint recognition;Correlation;Signal processing;Digital images;Iris;Digital image forensics;Biometric sensor forensics;PRNU;Sensor identification},\n  doi = {10.1109/EUSIPCO.2015.7362489},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105061.pdf},\n}\n\n
\n
\n\n\n
\n Previous device identification studies on the iris sensors of the CASIA-Iris V4 database using PRNU fingerprints showed high variations regarding the differentiability of the sensors. These variations may have been caused by the usage of multiple sensors of the same model for the image acquisition. Since no speciic documentation on this exists we investigate the presence of multiple image sensors in the data sets. The images under investigation, furthermore, show a strong correlation regarding their content, therefore we make use of different PRNU enhancements approaches based on weighting the PRNU depending on the image content. The enhanced PRNU is used in conjunction with different forensic techniques to detect the presence of multiple sensors in the data sets. Finally, the results of the enhancement approaches and the results without any PRNU enhancement are compared and an assessment on whether multiple sensor instances have been used in the data sets is given.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Wideband speech coding with hybrid digital-analog transmission.\n \n \n \n \n\n\n \n Rüngeler, M.; Kleifgen, F.; and Vary, P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 784-788, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"WidebandPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362490,\n  author = {M. Rüngeler and F. Kleifgen and P. Vary},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Wideband speech coding with hybrid digital-analog transmission},\n  year = {2015},\n  pages = {784-788},\n  abstract = {Efficient digital transmission of speech requires source coding which comes at the price of unavoidable quantization errors. Thus, even in clear channel conditions, the quality of the decoded speech signal is limited due to the quantization errors. Hybrid Digital-Analog (HDA) codes circumvent this limitation by additionally transmitting the quantization error with quasi-analog methods (discrete-time, quasi-continuous-amplitude) with neither increasing the total transmission power, nor the occupied frequency bandwidth on the radio channel. So far, the HDA concept has mainly been applied to random parameters. In this paper, the HDA concept is adapted to the transmission of wideband speech signals using PCM and ADPCM coding. By experimental verification it is shown that the HDA concept may outperform conventional purely digital transmission systems at all channel qualities while additionally eliminating the quality saturation effect.},\n  keywords = {quantisation (signal);speech coding;wideband speech coding;hybrid digital-analog transmission;digital transmission;quantization errors;decoded speech signal;quantization errors;hybrid digital-analog codes;quantization error;quasi-analog methods;HDA concept;ADPCM coding;digital transmission systems;channel qualities;quality saturation effect;Speech;Quantization (signal);Speech coding;Decoding;Digital-analog conversion;Phase change materials;Hybrid Digital-Analog (HDA) transmission;lattice ADPCM speech coding},\n  doi = {10.1109/EUSIPCO.2015.7362490},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570095877.pdf},\n}\n\n
\n
\n\n\n
\n Efficient digital transmission of speech requires source coding which comes at the price of unavoidable quantization errors. Thus, even in clear channel conditions, the quality of the decoded speech signal is limited due to the quantization errors. Hybrid Digital-Analog (HDA) codes circumvent this limitation by additionally transmitting the quantization error with quasi-analog methods (discrete-time, quasi-continuous-amplitude) with neither increasing the total transmission power, nor the occupied frequency bandwidth on the radio channel. So far, the HDA concept has mainly been applied to random parameters. In this paper, the HDA concept is adapted to the transmission of wideband speech signals using PCM and ADPCM coding. By experimental verification it is shown that the HDA concept may outperform conventional purely digital transmission systems at all channel qualities while additionally eliminating the quality saturation effect.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Harmonic model for MDCT based audio coding with LPC envelope.\n \n \n \n \n\n\n \n Moriya, T.; Kamamoto, Y.; Harada, N.; Bäckström, T.; Helmrich, C.; and Fuchs, G.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 789-793, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"HarmonicPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362491,\n  author = {T. Moriya and Y. Kamamoto and N. Harada and T. Bäckström and C. Helmrich and G. Fuchs},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Harmonic model for MDCT based audio coding with LPC envelope},\n  year = {2015},\n  pages = {789-793},\n  abstract = {Conventional music coders, based on a modified discrete cosine transform (MDCT) suffer greatly when lowering their bit-rate and delay. In particular, tonal music signals are penalized by short analysis windows and the variable length coding of the quantized MDCT coefficients demands a significant amount of bits for coding the harmonic structure. For solving such an issue, the paper proposes a frequency-domain harmonic model aiming to amend the probability model of the variable length coding of the quantized MDCT coefficients. The new model was combined successfully with an envelope based arithmetic coding at rate lower than 10 kbps, and with a context based arithmetic coding at higher bit rates in the recent 3 GPP EVS (Enhanced Voice Services) codec standard. Objective and subjective quality tests indicate that the proposed harmonic model enhances the quality of music for low-delay audio coding.},\n  keywords = {arithmetic codes;audio coding;discrete cosine transforms;frequency-domain analysis;linear predictive coding;music;probability;variable length codes;audio coding;LPC envelope;music coder;modified discrete cosine transform;tonal music signal;short analysis window;variable length coding;quantized MDCT coefficient;frequency-domain harmonic model;probability model;arithmetic coding;3GPP EVS codec standard;enhanced voice service;linear predictive coding;Harmonic analysis;Encoding;Power harmonic filters;Context;Frequency-domain analysis;Indexes;Speech;MDCT;envelope;harmonic interval;arithmetic coding;EVS},\n  doi = {10.1109/EUSIPCO.2015.7362491},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096669.pdf},\n}\n\n
\n
\n\n\n
\n Conventional music coders, based on a modified discrete cosine transform (MDCT) suffer greatly when lowering their bit-rate and delay. In particular, tonal music signals are penalized by short analysis windows and the variable length coding of the quantized MDCT coefficients demands a significant amount of bits for coding the harmonic structure. For solving such an issue, the paper proposes a frequency-domain harmonic model aiming to amend the probability model of the variable length coding of the quantized MDCT coefficients. The new model was combined successfully with an envelope based arithmetic coding at rate lower than 10 kbps, and with a context based arithmetic coding at higher bit rates in the recent 3 GPP EVS (Enhanced Voice Services) codec standard. Objective and subjective quality tests indicate that the proposed harmonic model enhances the quality of music for low-delay audio coding.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Low-complexity semi-parametric joint-stereo audio transform coding.\n \n \n \n \n\n\n \n Helmrich, C. R.; Niedermeier, A.; Bayer, S.; and Edler, B.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 794-798, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Low-complexityPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362492,\n  author = {C. R. Helmrich and A. Niedermeier and S. Bayer and B. Edler},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Low-complexity semi-parametric joint-stereo audio transform coding},\n  year = {2015},\n  pages = {794-798},\n  abstract = {Traditional audio codecs based on real-valued transforms utilize separate and largely independent algorithmic schemes for parametric coding of noise-like or high-frequency spectral components as well as channel pairs. It is shown that in the frequency-domain part of coders such as Extended HE-AAC, these schemes can be unified into a single algorithmic block located at the core of the modified discrete cosine transform path, enabling greater flexibility like semi-parametric coding and large savings in codec delay and complexity. This paper focuses on the stereo coding aspect of this block and demonstrates that, by using specially chosen spectral configurations when deriving the parametric side-information in the encoder, perceptual artifacts can be reduced and the spatial processing in the decoder can remain real-valued. Listening tests confirm the benefit of our proposal at intermediate bit-rates.},\n  keywords = {audio coding;codecs;discrete cosine transforms;frequency-domain analysis;low-complexity semiparametric audio transform coding;joint-stereo audio transform coding;audio codecs;real-valued transforms;channel pairs;frequency domain;extended HE-AAC;single algorithmic block;discrete cosine transform path;codec delay;parametric side-information;spatial processing;Encoding;Decoding;Transform coding;Filling;Codecs;Delays;Frequency-domain analysis;Audio coding;decorrelation;MDCT;stereo},\n  doi = {10.1109/EUSIPCO.2015.7362492},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102685.pdf},\n}\n\n
\n
\n\n\n
\n Traditional audio codecs based on real-valued transforms utilize separate and largely independent algorithmic schemes for parametric coding of noise-like or high-frequency spectral components as well as channel pairs. It is shown that in the frequency-domain part of coders such as Extended HE-AAC, these schemes can be unified into a single algorithmic block located at the core of the modified discrete cosine transform path, enabling greater flexibility like semi-parametric coding and large savings in codec delay and complexity. This paper focuses on the stereo coding aspect of this block and demonstrates that, by using specially chosen spectral configurations when deriving the parametric side-information in the encoder, perceptual artifacts can be reduced and the spatial processing in the decoder can remain real-valued. Listening tests confirm the benefit of our proposal at intermediate bit-rates.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A quasi-orthogonal, invertible, and perceptually relevant time-frequency transform for audio coding.\n \n \n \n \n\n\n \n Derrien, O.; Necciarf, T.; and Balazs, P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 799-803, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362493,\n  author = {O. Derrien and T. Necciarf and P. Balazs},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A quasi-orthogonal, invertible, and perceptually relevant time-frequency transform for audio coding},\n  year = {2015},\n  pages = {799-803},\n  abstract = {We describe ERB-MDCT, an invertible real-valued time-frequency transform based on MDCT, which is widely used in audio coding (e.g. MP3 and AAC). ERB-MDCT was designed similarly to ERBLet, a recent invertible transform with a resolution evolving across frequency to match the perceptual ERB frequency scale, while the frequency scale in most invertible transforms (e.g. MDCT) is uniform. ERB-MDCT has mostly the same frequency scale as ERBLet, but the main improvement is that atoms are quasi-orthogonal, i.e. its redundancy is close to 1. Furthermore, the energy is more sparse in the time-frequency plane. Thus, it is more suitable for audio coding than ERBLet.},\n  keywords = {audio coding;time-frequency analysis;transform coding;transforms;quasiorthogonal time-frequency transform;perceptually relevant time-frequency transform;ERB-MDCT;invertible real-valued time-frequency transform;audio coding;perceptual ERB frequency scale;equivalent rectangular bandwidth;Redundancy;Transforms;Audio coding;Signal resolution;Time-frequency analysis;Bandwidth;Non-stationary time-frequency transforms;ERB filters;MDCT;Audio coding},\n  doi = {10.1109/EUSIPCO.2015.7362493},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570092829.pdf},\n}\n\n
\n
\n\n\n
\n We describe ERB-MDCT, an invertible real-valued time-frequency transform based on MDCT, which is widely used in audio coding (e.g. MP3 and AAC). ERB-MDCT was designed similarly to ERBLet, a recent invertible transform with a resolution evolving across frequency to match the perceptual ERB frequency scale, while the frequency scale in most invertible transforms (e.g. MDCT) is uniform. ERB-MDCT has mostly the same frequency scale as ERBLet, but the main improvement is that atoms are quasi-orthogonal, i.e. its redundancy is close to 1. Furthermore, the energy is more sparse in the time-frequency plane. Thus, it is more suitable for audio coding than ERBLet.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Comparison of windowing schemes for speech coding.\n \n \n \n \n\n\n \n Fischer, J.; and Bäckström, T.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 804-808, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ComparisonPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362494,\n  author = {J. Fischer and T. Bäckström},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Comparison of windowing schemes for speech coding},\n  year = {2015},\n  pages = {804-808},\n  abstract = {The majority of speech coding algorithms are based on the code excited linear prediction (CELP) paradigm, modelling the speech signal by linear prediction. This coding approach offers the advantage of a very short algorithmic delay, due to the windowing scheme based on rectangular windowing of the residual of the linear predictor. Although widely used, the performance and structural choices of this windowing scheme have not been extensively documented. In this paper we introduce three alternative windowing schemes, as alternatives to the one already used in CELP codecs. These windowing schemes differ in their handling of transitions between frames. Our subject evaluation shows that omitting the error feedback loop yields an increase in perceptual quality at scenarios with high quantization noise. In addition, objective measures show that while error feedback improves the accuracy slightly at high bitrates, at low bitrates it causes a degradation in quality, resulting in a lower SNR.},\n  keywords = {codecs;quantisation (signal);speech coding;error feedback;quantization noise;perceptual quality;CELP codecs;residual rectangular windowing;algorithmic delay;speech signal modelling;CELP paradigm;code-excited linear prediction paradigm;speech coding algorithm;windowing scheme;Signal to noise ratio;Finite impulse response filters;Speech coding;Speech;Quantization (signal);Speech processing;Europe;speech coding;windowing;source modelling;linear prediction},\n  doi = {10.1109/EUSIPCO.2015.7362494},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570095867.pdf},\n}\n\n
\n
\n\n\n
\n The majority of speech coding algorithms are based on the code excited linear prediction (CELP) paradigm, modelling the speech signal by linear prediction. This coding approach offers the advantage of a very short algorithmic delay, due to the windowing scheme based on rectangular windowing of the residual of the linear predictor. Although widely used, the performance and structural choices of this windowing scheme have not been extensively documented. In this paper we introduce three alternative windowing schemes, as alternatives to the one already used in CELP codecs. These windowing schemes differ in their handling of transitions between frames. Our subject evaluation shows that omitting the error feedback loop yields an increase in perceptual quality at scenarios with high quantization noise. In addition, objective measures show that while error feedback improves the accuracy slightly at high bitrates, at low bitrates it causes a degradation in quality, resulting in a lower SNR.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Hybrid method for multi-exposure image fusion based on weighted mean and sparse representation.\n \n \n \n \n\n\n \n Sakai, T.; Kimura, D.; Yoshida, T.; and Iwahashi, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 809-813, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"HybridPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362495,\n  author = {T. Sakai and D. Kimura and T. Yoshida and M. Iwahashi},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Hybrid method for multi-exposure image fusion based on weighted mean and sparse representation},\n  year = {2015},\n  pages = {809-813},\n  abstract = {We propose a hybrid method for multi-exposure image fusion in this paper. The fusion blends some images capturing the same scene with different exposure times and produces a high quality image. Based on the pixel-wise weighted mean, many methods have been actively proposed, but their resultant images have blurred edges and textures because of the mean procedure. To overcome the disadvantages, the proposed method separately fuses the means and details of input images. The details are fused based on sparse representation, and the results keep their sharpness. Consequently, the resultant fused images are fine with sharp edges and textures. Through simulations, we show that the proposed method outperforms previous methods objectively and perceptually.},\n  keywords = {image capture;image fusion;image restoration;multiexposure image fusion;sparse representation;image capture;weighted mean method;input image detail;Image edge detection;Image fusion;Fuses;Europe;Signal processing;Laplace equations;Image color analysis;Multi-exposure image fusion;high-dynamic-range imaging;weighted mean;sparse representation},\n  doi = {10.1109/EUSIPCO.2015.7362495},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103703.pdf},\n}\n\n
\n
\n\n\n
\n We propose a hybrid method for multi-exposure image fusion in this paper. The fusion blends some images capturing the same scene with different exposure times and produces a high quality image. Based on the pixel-wise weighted mean, many methods have been actively proposed, but their resultant images have blurred edges and textures because of the mean procedure. To overcome the disadvantages, the proposed method separately fuses the means and details of input images. The details are fused based on sparse representation, and the results keep their sharpness. Consequently, the resultant fused images are fine with sharp edges and textures. Through simulations, we show that the proposed method outperforms previous methods objectively and perceptually.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A perturbed linear mixing model accounting for spectral variability.\n \n \n \n \n\n\n \n Thouvenin, P.; Dobigeon, N.; and Tourneret, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 814-818, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362496,\n  author = {P. Thouvenin and N. Dobigeon and J. Tourneret},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A perturbed linear mixing model accounting for spectral variability},\n  year = {2015},\n  pages = {814-818},\n  abstract = {Hyperspectral unmixing aims at determining the reference spectral signatures composing a hyperspectral image, their abundance fractions and their number. In practice, the spectral variability of the identified signatures induces significant abundance estimation errors. To address this issue, this paper introduces a new linear mixing model explicitly accounting for this phenomenon. In this setting, the extracted endmembers are interpreted as possibly corrupted versions of the true endmembers. The parameters of this model can be estimated using an optimization algorithm based on the alternating direction method of multipliers. The performance of the proposed unmixing method is evaluated on synthetic and real data.},\n  keywords = {geophysical image processing;hyperspectral imaging;optimisation;perturbation techniques;spectral analysis;perturbed linear mixing model;spectral variability;hyperspectral unmixing;spectral signatures;hyperspectral image;abundance fractions;abundance estimation errors;optimization algorithm;unmixing method;alternating direction method of multipliers;ADMM;Signal processing algorithms;Yttrium;Optimization;Hyperspectral imaging;Europe;Signal processing;Adaptation models;Hyperspectral imagery;linear unmixing;endmember variability;Alternating Direction Method of Multipliers (ADMM)},\n  doi = {10.1109/EUSIPCO.2015.7362496},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096265.pdf},\n}\n\n
\n
\n\n\n
\n Hyperspectral unmixing aims at determining the reference spectral signatures composing a hyperspectral image, their abundance fractions and their number. In practice, the spectral variability of the identified signatures induces significant abundance estimation errors. To address this issue, this paper introduces a new linear mixing model explicitly accounting for this phenomenon. In this setting, the extracted endmembers are interpreted as possibly corrupted versions of the true endmembers. The parameters of this model can be estimated using an optimization algorithm based on the alternating direction method of multipliers. The performance of the proposed unmixing method is evaluated on synthetic and real data.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Stereoscopic video description for key-frame extraction in movie summarization.\n \n \n \n \n\n\n \n Mademlis, I.; Nikolaidis, N.; and Pitas, I.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 819-823, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"StereoscopicPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362497,\n  author = {I. Mademlis and N. Nikolaidis and I. Pitas},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Stereoscopic video description for key-frame extraction in movie summarization},\n  year = {2015},\n  pages = {819-823},\n  abstract = {A novel, low-level video frame description method is proposed that is able to compactly capture informative image statistics from luminance, color and stereoscopic disparity video data, both in a global and in various local scales. Thus, scene texture, illumination and geometry properties may succinctly be contained within a single frame feature descriptor, which can subsequently be employed as a building block in any key-frame extraction scheme, e.g., shot frame clustering. The computed key-frames are subsequently used to derive a movie summary in the form of a video skim, which is suitably post-processed to reduce stereoscopic video defects that cause visual fatigue and are a by-product of the summarization.},\n  keywords = {feature extraction;video signal processing;stereoscopic video description;key-frame extraction;movie summarization;low-level video frame description method;informative image statistics;single frame feature descriptor;shot frame clustering;reduce stereoscopic video defects;visual fatigue;Feature extraction;Stereo image processing;Image color analysis;Motion pictures;Streaming media;Histograms;Three-dimensional displays;Video Summarization;Stereoscopic Video Description;Bag-of-Features},\n  doi = {10.1109/EUSIPCO.2015.7362497},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103547.pdf},\n}\n\n
\n
\n\n\n
\n A novel, low-level video frame description method is proposed that is able to compactly capture informative image statistics from luminance, color and stereoscopic disparity video data, both in a global and in various local scales. Thus, scene texture, illumination and geometry properties may succinctly be contained within a single frame feature descriptor, which can subsequently be employed as a building block in any key-frame extraction scheme, e.g., shot frame clustering. The computed key-frames are subsequently used to derive a movie summary in the form of a video skim, which is suitably post-processed to reduce stereoscopic video defects that cause visual fatigue and are a by-product of the summarization.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Bayes classification for asynchronous event-based cameras.\n \n \n \n \n\n\n \n Fillatre, L.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 824-828, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"BayesPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362498,\n  author = {L. Fillatre},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Bayes classification for asynchronous event-based cameras},\n  year = {2015},\n  pages = {824-828},\n  abstract = {Asynchronous event-based cameras use time encoding to code the pixel intensity values. A time encoding of an input pattern generates a random stream of asynchronous events. An event is defined as a pair containing a timestamp and the variation sign of the input signal since the last emitted event. The goal of this paper is the recognition of the input pattern among a set of several known possibilities from the observation of the event stream. This paper proposes a statistical model of the random event stream based on the physical model of the event-based camera. It also calculates the optimal Bayes classifier which recognizes the input pattern. The numerical complexity of the classifier is rather low. The Bayes risk, which measures the performance of the classifier, is numerically evaluated on simulated data. It is compared to the mean number of events, which entails the power consumption of the camera, exploited to take the decision.},\n  keywords = {cameras;image coding;image recognition;Bayes classification;asynchronous event-based cameras;time encoding;pixel intensity values;asynchronous events;random stream;event-based camera;numerical complexity;optimal Bayes classifier;power consumption;Cameras;Sensors;Encoding;Numerical models;Europe;Signal processing;Neuromorphics;Time encoding;Statistical classification;Event-based camera;Bayes risk},\n  doi = {10.1109/EUSIPCO.2015.7362498},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104201.pdf},\n}\n\n
\n
\n\n\n
\n Asynchronous event-based cameras use time encoding to code the pixel intensity values. A time encoding of an input pattern generates a random stream of asynchronous events. An event is defined as a pair containing a timestamp and the variation sign of the input signal since the last emitted event. The goal of this paper is the recognition of the input pattern among a set of several known possibilities from the observation of the event stream. This paper proposes a statistical model of the random event stream based on the physical model of the event-based camera. It also calculates the optimal Bayes classifier which recognizes the input pattern. The numerical complexity of the classifier is rather low. The Bayes risk, which measures the performance of the classifier, is numerically evaluated on simulated data. It is compared to the mean number of events, which entails the power consumption of the camera, exploited to take the decision.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A comparison of thermal image descriptors for face analysis.\n \n \n \n \n\n\n \n Carrapico, R.; Mourão, A.; Magalhães, J.; and Cavaco, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 829-833, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362499,\n  author = {R. Carrapico and A. Mourão and J. Magalhães and S. Cavaco},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A comparison of thermal image descriptors for face analysis},\n  year = {2015},\n  pages = {829-833},\n  abstract = {Thermal imaging is a type of imaging that uses thermographic cameras to detect radiation in the infrared range of the electromagnetic spectrum. Thermal images are particularly well suited for face detection and recognition because of the low sensitivity to illumination changes, color skins, beards and other artifacts. In this paper, we take a fresh look at the problem of face analysis in the thermal domain. We consider several thermal image descriptors and assess their performance in two popular tasks: face recognition and facial expression recognition. The results have shown that face recognition can reach accuracy levels of 91% with Localized Binary Patterns. Also, despite the difficulty of facial expression detection, our experiments have revealed that Haar based features (FCTH - Fuzzy Color and Texture Histogram) offers the best results for some facial expressions.},\n  keywords = {emotion recognition;face recognition;image colour analysis;image texture;infrared imaging;fuzzy color-texture histogram;FCTH;Haar-based features;localized binary patterns;facial expression recognition;thermal domain;color skins;illumination changes;face recognition;face detection;electromagnetic spectrum;infrared range;radiation detection;thermographic cameras;thermal imaging;face analysis;thermal image descriptors;Face recognition;Face;Image color analysis;Feature extraction;Histograms;Image recognition;Lighting;Thermal images;face recognition;facial expressions;image descriptors},\n  doi = {10.1109/EUSIPCO.2015.7362499},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103673.pdf},\n}\n\n
\n
\n\n\n
\n Thermal imaging is a type of imaging that uses thermographic cameras to detect radiation in the infrared range of the electromagnetic spectrum. Thermal images are particularly well suited for face detection and recognition because of the low sensitivity to illumination changes, color skins, beards and other artifacts. In this paper, we take a fresh look at the problem of face analysis in the thermal domain. We consider several thermal image descriptors and assess their performance in two popular tasks: face recognition and facial expression recognition. The results have shown that face recognition can reach accuracy levels of 91% with Localized Binary Patterns. Also, despite the difficulty of facial expression detection, our experiments have revealed that Haar based features (FCTH - Fuzzy Color and Texture Histogram) offers the best results for some facial expressions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n SketchPrint: Physical object micro-structure identification using mobile phones.\n \n \n \n \n\n\n \n Diephuis, M.; Voloshynovskiy, S.; and Holotyak, T.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 834-838, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"SketchPrint:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362500,\n  author = {M. Diephuis and S. Voloshynovskiy and T. Holotyak},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {SketchPrint: Physical object micro-structure identification using mobile phones},\n  year = {2015},\n  pages = {834-838},\n  abstract = {This paper addresses the identification of physical objects based on their physical non-cloneable surface structures. These micro-structures are optically acquired using a hand held non-modified consumer mobile phone. Object identification is done with the SketchPrint descriptor, which combines fingerprint-like properties while having reasonable invariance to geometrical and lighting distortions due to its semi-local nature. Crucially, objects can be identified without any geometrical matching or final re-ranking procedure.},\n  keywords = {mobile handsets;object recognition;fingerprint like properties;SketchPrint descriptor;object identification;consumer mobile phone;hand held nonmodified mobile phone;physical noncloneable surface structure;mobile phones;physical object microstructure identification;Feature extraction;Indexes;Computer architecture;Probes;Europe;Signal processing;physical object identification;micro-structure images;semi-local descriptor;SketchPrint},\n  doi = {10.1109/EUSIPCO.2015.7362500},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104907.pdf},\n}\n\n
\n
\n\n\n
\n This paper addresses the identification of physical objects based on their physical non-cloneable surface structures. These micro-structures are optically acquired using a hand held non-modified consumer mobile phone. Object identification is done with the SketchPrint descriptor, which combines fingerprint-like properties while having reasonable invariance to geometrical and lighting distortions due to its semi-local nature. Crucially, objects can be identified without any geometrical matching or final re-ranking procedure.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n MVDR broadband beamforming using polynomial matrix techniques.\n \n \n \n \n\n\n \n Weiss, S.; Bendoukha, S.; Alzin, A.; Coutts, F. K.; Proudler, I. K.; and Chambers, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 839-843, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"MVDRPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362501,\n  author = {S. Weiss and S. Bendoukha and A. Alzin and F. K. Coutts and I. K. Proudler and J. Chambers},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {MVDR broadband beamforming using polynomial matrix techniques},\n  year = {2015},\n  pages = {839-843},\n  abstract = {This paper presents initial progress on formulating minimum variance distortionless response (MVDR) broadband beam-forming using a generalised sidelobe canceller (GSC) in the context of polynomial matrix techniques. The quiescent vector is defined as a broadband steering vector, and we propose a blocking matrix design obtained by paraunitary matrix completion. The polynomial approach decouples the spatial and temporal orders of the filters in the blocking matrix, and decouples the adaptive filter order from the construction of the blocking matrix. For off-broadside constraints the polynomial approach is simple, and more accurate and considerably less costly than a standard time domain broadband GSC.},\n  keywords = {adaptive filters;adaptive signal processing;array signal processing;polynomial matrices;time-domain analysis;standard time-domain broadband GSC;off-broadside constraint;adaptive filter order;blocking matrix construction;filter temporal order;filter spatial order;paraunitary matrix completion;blocking matrix design;broadband steering vector;quiescent vector;generalised sidelobe canceller;minimum variance distortionless response broadband beamforming;polynomial matrix technique;MVDR broadband beamforming;Broadband communication;Standards;Narrowband;Matrix decomposition;Array signal processing;Arrays},\n  doi = {10.1109/EUSIPCO.2015.7362501},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104975.pdf},\n}\n\n
\n
\n\n\n
\n This paper presents initial progress on formulating minimum variance distortionless response (MVDR) broadband beam-forming using a generalised sidelobe canceller (GSC) in the context of polynomial matrix techniques. The quiescent vector is defined as a broadband steering vector, and we propose a blocking matrix design obtained by paraunitary matrix completion. The polynomial approach decouples the spatial and temporal orders of the filters in the blocking matrix, and decouples the adaptive filter order from the construction of the blocking matrix. For off-broadside constraints the polynomial approach is simple, and more accurate and considerably less costly than a standard time domain broadband GSC.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multiple shift second order sequential best rotation algorithm for polynomial matrix EVD.\n \n \n \n \n\n\n \n Wang, Z.; McWhirter, J. G.; Corr, J.; and Weiss, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 844-848, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"MultiplePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362502,\n  author = {Z. Wang and J. G. McWhirter and J. Corr and S. Weiss},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Multiple shift second order sequential best rotation algorithm for polynomial matrix EVD},\n  year = {2015},\n  pages = {844-848},\n  abstract = {In this paper, we present an improved version of the second order sequential best rotation algorithm (SBR2) for polynomial matrix eigenvalue decomposition of para-Hermitian matrices. The improved algorithm is entitled multiple shift SBR2 (MS-SBR2) which is developed based on the original SBR2 algorithm. It can achieve faster convergence than the original SBR2 algorithm by means of transferring more off-diagonal energy onto the diagonal at each iteration. Its convergence is proved and also demonstrated by means of a numerical example. Furthermore, simulation results are included to compare its convergence characteristics and computational complexity with the original SBR2, sequential matrix diagonalization (SMD) and multiple shift maximum element SMD algorithms.},\n  keywords = {eigenvalues and eigenfunctions;Hermitian matrices;polynomial matrices;signal processing;polynomial matrix EVD;multiple shift maximum element SMD algorithms;sequential matrix diagonalization;off-diagonal energy;MS-SBR2;multiple shift SBR2;para-Hermitian matrices;polynomial matrix eigenvalue decomposition;second order sequential best rotation algorithm;Signal processing algorithms;Jacobian matrices;Covariance matrices;Signal processing;Convergence;Matrix decomposition;Europe;Polynomial matrix eigenvalue decomposition;multiple shift SBR2},\n  doi = {10.1109/EUSIPCO.2015.7362502},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103803.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we present an improved version of the second order sequential best rotation algorithm (SBR2) for polynomial matrix eigenvalue decomposition of para-Hermitian matrices. The improved algorithm is entitled multiple shift SBR2 (MS-SBR2) which is developed based on the original SBR2 algorithm. It can achieve faster convergence than the original SBR2 algorithm by means of transferring more off-diagonal energy onto the diagonal at each iteration. Its convergence is proved and also demonstrated by means of a numerical example. Furthermore, simulation results are included to compare its convergence characteristics and computational complexity with the original SBR2, sequential matrix diagonalization (SMD) and multiple shift maximum element SMD algorithms.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Row-shift corrected truncation of paraunitary matrices for PEVD algorithms.\n \n \n \n \n\n\n \n Corr, J.; Thompson, K.; Weiss, S.; Proudler, I. K.; and McWhirter, J. G.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 849-853, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Row-shiftPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362503,\n  author = {J. Corr and K. Thompson and S. Weiss and I. K. Proudler and J. G. McWhirter},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Row-shift corrected truncation of paraunitary matrices for PEVD algorithms},\n  year = {2015},\n  pages = {849-853},\n  abstract = {In this paper, we show that the paraunitary (PU) matrices that arise from the polynomial eigenvalue decomposition (PEVD) of a parahermitian matrix are not unique. In particular, arbitrary shifts (delays) of polynomials in one row of a PU matrix yield another PU matrix that admits the same PEVD. To keep the order of such a PU matrix as low as possible, we propose a row-shift correction. Using the example of an iterative PEVD algorithm with previously proposed truncation of the PU matrix, we demonstrate that a considerable shortening of the PU order can be accomplished when using row-corrected truncation.},\n  keywords = {decomposition;eigenvalues and eigenfunctions;iterative methods;matrix algebra;polynomials;signal processing;row-shift corrected truncation;paraunitary matrix;polynomial eigenvalue decomposition;parahermitian matrix;PU matrix;iterative PEVD algorithm;signal processing;Matrix decomposition;Signal processing algorithms;Covariance matrices;Signal processing;Delays;Complexity theory;Approximation algorithms},\n  doi = {10.1109/EUSIPCO.2015.7362503},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103575.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we show that the paraunitary (PU) matrices that arise from the polynomial eigenvalue decomposition (PEVD) of a parahermitian matrix are not unique. In particular, arbitrary shifts (delays) of polynomials in one row of a PU matrix yield another PU matrix that admits the same PEVD. To keep the order of such a PU matrix as low as possible, we propose a row-shift correction. Using the example of an iterative PEVD algorithm with previously proposed truncation of the PU matrix, we demonstrate that a considerable shortening of the PU order can be accomplished when using row-corrected truncation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robust adaptive beamforming based on low-rank and cross-correlation techniques.\n \n \n \n \n\n\n \n Ruan, H.; and de Lamare , R. C.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 854-858, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"RobustPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362504,\n  author = {H. Ruan and R. C. {de Lamare}},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Robust adaptive beamforming based on low-rank and cross-correlation techniques},\n  year = {2015},\n  pages = {854-858},\n  abstract = {This work presents a cost-effective low-rank technique for designing robust adaptive beamforming (RAB) algorithms. The proposed technique is based on low-rank modelling of the mismatch and exploitation of the cross-correlation between the received data and the output of the beamformer. We construct a linear system of equations which computes the steering vector mismatch based on prior information about the level of mismatch, and then we employ an orthogonal Krylov subspace based method to iteratively estimate the steering vector mismatch in a reduced-dimensional subspace, resulting in the proposed orthogonal Krylov subspace projection mismatch estimation (OKSPME) method. Simulation results show excellent performance of OKSPME in terms of the beamformer output signal-to-interference-plus-noise ratio (SINR) as compared to existing RAB algorithms.},\n  keywords = {adaptive signal processing;array signal processing;correlation theory;iterative methods;cost-effective low-rank technique;robust adaptive beamforming algorithm;RAB algorithm;cross-correlation exploitation;cross-correlation mismatch;steering vector mismatch;iterative estimation;reduced-dimensional subspace;orthogonal Krylov subspace projection mismatch estimation method;OKSPME method;beamformer output signal-to-interference-plus-noise ratio;beamformer output SINR;Arrays;Robustness;Signal to noise ratio;Signal processing algorithms;Linear systems;Covariance matrices;Array signal processing;robust adaptive beamforming;low-rank techniques;low complexity methods},\n  doi = {10.1109/EUSIPCO.2015.7362504},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570100371.pdf},\n}\n\n
\n
\n\n\n
\n This work presents a cost-effective low-rank technique for designing robust adaptive beamforming (RAB) algorithms. The proposed technique is based on low-rank modelling of the mismatch and exploitation of the cross-correlation between the received data and the output of the beamformer. We construct a linear system of equations which computes the steering vector mismatch based on prior information about the level of mismatch, and then we employ an orthogonal Krylov subspace based method to iteratively estimate the steering vector mismatch in a reduced-dimensional subspace, resulting in the proposed orthogonal Krylov subspace projection mismatch estimation (OKSPME) method. Simulation results show excellent performance of OKSPME in terms of the beamformer output signal-to-interference-plus-noise ratio (SINR) as compared to existing RAB algorithms.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An adaptively focusing measurement design for compressed sensing based DOA estimation.\n \n \n \n \n\n\n \n Ibrahim, M.; Roemer, F.; and Del Galdo, G.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 859-863, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362505,\n  author = {M. Ibrahim and F. Roemer and G. {Del Galdo}},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {An adaptively focusing measurement design for compressed sensing based DOA estimation},\n  year = {2015},\n  pages = {859-863},\n  abstract = {In this paper we propose an adaptive design strategy for the measurement matrix for applying Compressed Sensing (CS) to Direction Of Arrival (DOA) estimation with antenna arrays. Instead of choosing the coefficients of the compression matrix randomly, we propose a systematic design methodology for constructing a measurement matrix that focuses the array towards a specific area of interest and thereby achieves a superior DOA estimation performance. The focusing is performed in a sequential manner, i.e., we start with a uniform measurement design from which regions of interest can be extracted that the subsequent measurements then focus on. By continuously updating these target regions, gradual movement of the sources can also be tracked over time. Numerical results demonstrate that the focused measurements possess a superior SNR leading to significantly enhanced DOA estimates.},\n  keywords = {adaptive antenna arrays;array signal processing;compressed sensing;direction-of-arrival estimation;matrix algebra;adaptive focusing measurement design;compressed sensing based DOA estimation;direction of arrival estimation;measurement matrix adaptive design;antenna arrays;compression matrix random coefficient;Direction-of-arrival estimation;Estimation;Focusing;Antenna arrays;Biomedical measurement;Antenna measurements;Transmission line matrix methods;Compressive Sensing;DOA Estimation;Measurement Design},\n  doi = {10.1109/EUSIPCO.2015.7362505},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104637.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we propose an adaptive design strategy for the measurement matrix for applying Compressed Sensing (CS) to Direction Of Arrival (DOA) estimation with antenna arrays. Instead of choosing the coefficients of the compression matrix randomly, we propose a systematic design methodology for constructing a measurement matrix that focuses the array towards a specific area of interest and thereby achieves a superior DOA estimation performance. The focusing is performed in a sequential manner, i.e., we start with a uniform measurement design from which regions of interest can be extracted that the subsequent measurements then focus on. By continuously updating these target regions, gradual movement of the sources can also be tracked over time. Numerical results demonstrate that the focused measurements possess a superior SNR leading to significantly enhanced DOA estimates.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Corpus based reconstruction of speech degraded by wind noise.\n \n \n \n \n\n\n \n Nelke, C. M.; Naylor, P. A.; and Vary, P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 864-868, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"CorpusPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362506,\n  author = {C. M. Nelke and P. A. Naylor and P. Vary},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Corpus based reconstruction of speech degraded by wind noise},\n  year = {2015},\n  pages = {864-868},\n  abstract = {This contribution addresses the problem of enhancing a speech signal which is degraded by wind noise. The characteristic that wind noise signals are sparse in time and frequency is exploited in a way that only time-frequency regions that are determined as degraded are enhanced. In these regions of the noisy signal, a process is applied to reconstruct the clean speech data. This is realized by a separation of the noisy speech signal into an autoregressive filter representing the human vocal tract and its excitation signal. The clean filter coefficients of the former are estimated using a pre-trained codebook. A pitch cycle taken from clean speech is adapted to reconstruct the excitation of noisy speech segments.},\n  keywords = {acoustic noise;autoregressive processes;compressed sensing;filtering theory;signal denoising;signal reconstruction;speech enhancement;wind;speech signal enhancement;noisy speech segments;pitch cycle;pretrained codebook;clean filter coefficients;excitation signal;human vocal tract;autoregressive filter;noisy speech signal;speech data;wind noise signals;corpus based reconstruction;Speech;Noise measurement;Time-frequency analysis;Speech enhancement;Estimation;Speech coding;wind noise reduction;binary mask;speech enhancement;codebook;source-filter speech model},\n  doi = {10.1109/EUSIPCO.2015.7362506},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570093223.pdf},\n}\n\n
\n
\n\n\n
\n This contribution addresses the problem of enhancing a speech signal which is degraded by wind noise. The characteristic that wind noise signals are sparse in time and frequency is exploited in a way that only time-frequency regions that are determined as degraded are enhanced. In these regions of the noisy signal, a process is applied to reconstruct the clean speech data. This is realized by a separation of the noisy speech signal into an autoregressive filter representing the human vocal tract and its excitation signal. The clean filter coefficients of the former are estimated using a pre-trained codebook. A pitch cycle taken from clean speech is adapted to reconstruct the excitation of noisy speech segments.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Dialogue enhancement of stereo sound.\n \n \n \n \n\n\n \n Geiger, J. T.; Grosche, P.; and Parodi, Y. L.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 869-873, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"DialoguePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362507,\n  author = {J. T. Geiger and P. Grosche and Y. L. Parodi},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Dialogue enhancement of stereo sound},\n  year = {2015},\n  pages = {869-873},\n  abstract = {Studies show that many people have difficulties in understanding dialogue in movies when watching TV, especially hard-of-hearing listeners or in adverse listening environments. In order to overcome this problem, we propose an efficient methodology to enhance the speech component of a stereo signal. The method is designed with low computational complexity in mind, and consists of first extracting a center channel from the stereo signal. Novel methods for speech enhancement and voice activity detection are proposed which exploit the stereo information. A speech enhancement filter is estimated based on the relationship between the extracted center channel and all other channels. Subjective and objective evaluations show that this method can successfully enhance intelligibility of the dialogue without affecting the overall sound quality negatively.},\n  keywords = {acoustic filters;acoustic signal detection;acoustic signal processing;computational complexity;hearing;speech enhancement;sound quality;speech enhancement filter;stereo information;voice activity detection;computational complexity;stereo signal;speech component;hard-of-hearing listeners;stereo sound;dialogue enhancement;Decision support systems;Europe;Signal processing;Yttrium;Conferences;Radio frequency;Speech enhancement;dialogue enhancement;voice activity detection;stereo enhancement;Wiener filter},\n  doi = {10.1109/EUSIPCO.2015.7362507},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096395.pdf},\n}\n\n
\n
\n\n\n
\n Studies show that many people have difficulties in understanding dialogue in movies when watching TV, especially hard-of-hearing listeners or in adverse listening environments. In order to overcome this problem, we propose an efficient methodology to enhance the speech component of a stereo signal. The method is designed with low computational complexity in mind, and consists of first extracting a center channel from the stereo signal. Novel methods for speech enhancement and voice activity detection are proposed which exploit the stereo information. A speech enhancement filter is estimated based on the relationship between the extracted center channel and all other channels. Subjective and objective evaluations show that this method can successfully enhance intelligibility of the dialogue without affecting the overall sound quality negatively.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Noise robust exemplar matching with coupled dictionaries for single-channel speech enhancement.\n \n \n \n \n\n\n \n Yilmaz, E.; Baby, D.; and Van hamme, H.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 874-878, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"NoisePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362508,\n  author = {E. Yilmaz and D. Baby and H. {Van hamme}},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Noise robust exemplar matching with coupled dictionaries for single-channel speech enhancement},\n  year = {2015},\n  pages = {874-878},\n  abstract = {In this paper, we propose a single-channel speech enhancement system based on the noise robust exemplar matching (N-REM) framework using coupled dictionaries. N-REM approximates noisy speech segments as a sparse linear combination of speech and noise exemplars that are stored in multiple dictionaries based on their length and associated speech unit. The dictionaries providing the best approximation of the noisy mixtures are used to estimate the speech component. We further employ a coupled dictionary approach that performs the approximation in the lower dimensional mel domain to benefit from the reduced computational load and better generalization, and the enhancement in the short-time Fourier transform (STFT) domain for higher spectral resolution. The proposed enhancement system is shown to have superior performance compared to the exemplar-based sparse representations approach using fixed-length exemplars in a single overcomplete dictionary.},\n  keywords = {Fourier transforms;speech enhancement;noise robust exemplar matching;single-channel speech enhancement;noisy speech segments;sparse linear combination;short-time Fourier transform;exemplar-based sparse representations;Speech;Dictionaries;Signal to noise ratio;Speech enhancement;Noise measurement;Approximation methods;Europe;speech enhancement;exemplar matching;coupled dictionaries;non-negative sparse coding},\n  doi = {10.1109/EUSIPCO.2015.7362508},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102149.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a single-channel speech enhancement system based on the noise robust exemplar matching (N-REM) framework using coupled dictionaries. N-REM approximates noisy speech segments as a sparse linear combination of speech and noise exemplars that are stored in multiple dictionaries based on their length and associated speech unit. The dictionaries providing the best approximation of the noisy mixtures are used to estimate the speech component. We further employ a coupled dictionary approach that performs the approximation in the lower dimensional mel domain to benefit from the reduced computational load and better generalization, and the enhancement in the short-time Fourier transform (STFT) domain for higher spectral resolution. The proposed enhancement system is shown to have superior performance compared to the exemplar-based sparse representations approach using fixed-length exemplars in a single overcomplete dictionary.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Restoration of instantaneous amplitude and phase of speech signal in noisy reverberant environments.\n \n \n \n \n\n\n \n Liu, Y.; Nower, N.; Yan, Y.; and Unoki, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 879-883, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"RestorationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362509,\n  author = {Y. Liu and N. Nower and Y. Yan and M. Unoki},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Restoration of instantaneous amplitude and phase of speech signal in noisy reverberant environments},\n  year = {2015},\n  pages = {879-883},\n  abstract = {We have proved that restoring the instantaneous amplitude as well as instantaneous phase on Gammatone interbank plays a significant role for speech enhancement. However, it is still challenging topic with dereverberation since previously proposed scheme can only work in noisy environments. In this paper, we extend our previously proposed scheme to be general speech enhancement of removing the effects of noise and reverberation by restoring instantaneous amplitude and phase simultaneously. Objective and subjective experiments were conducted under various noisy reverberant conditions to evaluate the effectiveness of the extension of proposed scheme. The signal to error ratio (SER), correlation, PESQ, and SNR loss were used in objective evaluations. The normalized mean preference score was used in subjective evaluations. The results of both evaluations revealed that the proposed scheme could effectively improve quality and intelligibility of speech signals under noisy reverberant conditions.},\n  keywords = {channel bank filters;interference suppression;reverberation;signal restoration;speech enhancement;speech intelligibility;speech signal instantaneous amplitude restoration;speech signal instantaneous phase restoration;Gammatone filterbank;speech enhancement;dereverberation;noisy environment;reverberation effects removal;noise effects removal;signal to error ratio;SNR loss;correlation;normalized mean preference score;speech signal intelligibility improvement;speech signal quality improvement;noisy reverberant conditions;Noise measurement;Reverberation;Speech;Signal to noise ratio;Speech enhancement;Kalman filters;Europe;Instantaneous amplitude and phase;Kalman filter;Linear prediction;Gammatone filterbank},\n  doi = {10.1109/EUSIPCO.2015.7362509},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105219.pdf},\n}\n\n
\n
\n\n\n
\n We have proved that restoring the instantaneous amplitude as well as instantaneous phase on Gammatone interbank plays a significant role for speech enhancement. However, it is still challenging topic with dereverberation since previously proposed scheme can only work in noisy environments. In this paper, we extend our previously proposed scheme to be general speech enhancement of removing the effects of noise and reverberation by restoring instantaneous amplitude and phase simultaneously. Objective and subjective experiments were conducted under various noisy reverberant conditions to evaluate the effectiveness of the extension of proposed scheme. The signal to error ratio (SER), correlation, PESQ, and SNR loss were used in objective evaluations. The normalized mean preference score was used in subjective evaluations. The results of both evaluations revealed that the proposed scheme could effectively improve quality and intelligibility of speech signals under noisy reverberant conditions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robustness improvement of ultrasound-based sensor systems for speech communication.\n \n \n \n \n\n\n \n Cvijanović, N.; Kechichian, P.; Janse, K.; and Kohlrausch, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 884-888, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"RobustnessPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362510,\n  author = {N. Cvijanović and P. Kechichian and K. Janse and A. Kohlrausch},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Robustness improvement of ultrasound-based sensor systems for speech communication},\n  year = {2015},\n  pages = {884-888},\n  abstract = {In recent years, auxiliary sensors have been employed to improve the robustness of emerging hands-free speech communication systems based on air-conduction microphones, especially in low signal-to-noise-ratio environments. One such sensor, based on ultrasound, captures articulatory movement information during speech production and has been used in a voice activity detector and also shown to improve the performance of speech recognizers. However, studies thus far have tested such sensors in ideal scenarios where only relevant articulatory information was assumed to be present. Therefore, in this paper the robustness of such sensors in realistic scenarios is investigated. Challenges arising from non-articulatory movements and other environmental influences captured by ultrasound sensors are discussed and strategies for their detection presented. Finally, the proposed strategies are evaluated in an ultrasound-based voice activity detector.},\n  keywords = {microphones;speech recognition;ultrasonic devices;voice communication;speech communication;robustness improvement;ultrasound-based sensor systems;auxiliary sensors;air-conduction microphones;signal-to-noise-ratio environments;articulatory movement information;speech production;speech recognizers;articulatory information;ultrasound-based voice activity detector;Speech;Doppler shift;Ultrasonic imaging;Robustness;Speech processing;Microphones;Ultrasound;articulation;robustness;voice activity detection;Doppler shift},\n  doi = {10.1109/EUSIPCO.2015.7362510},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104237.pdf},\n}\n\n
\n
\n\n\n
\n In recent years, auxiliary sensors have been employed to improve the robustness of emerging hands-free speech communication systems based on air-conduction microphones, especially in low signal-to-noise-ratio environments. One such sensor, based on ultrasound, captures articulatory movement information during speech production and has been used in a voice activity detector and also shown to improve the performance of speech recognizers. However, studies thus far have tested such sensors in ideal scenarios where only relevant articulatory information was assumed to be present. Therefore, in this paper the robustness of such sensors in realistic scenarios is investigated. Challenges arising from non-articulatory movements and other environmental influences captured by ultrasound sensors are discussed and strategies for their detection presented. Finally, the proposed strategies are evaluated in an ultrasound-based voice activity detector.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributive estimation of frequency selective channels for massive MIMO systems.\n \n \n \n \n\n\n \n Zaib, A.; Masood, M.; Ghogho, M.; and Al-Naffouri, T. Y.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 889-893, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"DistributivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362511,\n  author = {A. Zaib and M. Masood and M. Ghogho and T. Y. Al-Naffouri},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Distributive estimation of frequency selective channels for massive MIMO systems},\n  year = {2015},\n  pages = {889-893},\n  abstract = {We consider frequency selective channel estimation in the uplink of massive MIMO-OFDM systems, where our major concern is complexity. A low complexity distributed LMMSE algorithm is proposed that attains near optimal channel impulse response (CIR) estimates from noisy observations at receive antenna array. In proposed method, every antenna estimates the CIRs of its neighborhood followed by recursive sharing of estimates with immediate neighbors. At each step, every antenna calculates the weighted average of shared estimates which converges to near optimal LMMSE solution. The simulation results validate the near optimal performance of proposed algorithm in terms of mean square error (MSE).},\n  keywords = {antenna arrays;channel estimation;mean square error methods;MIMO communication;OFDM modulation;receiving antennas;recursive estimation;wireless channels;frequency selective channels distributive estimation;massive MIMO-OFDM system;low complexity distributed LMMSE algorithm;optimal channel impulse response;CIR;receive antenna array;recursive sharing;mean square error;MIMO;Estimation;Channel estimation;Arrays;Antenna arrays;Covariance matrices;Channel estimation;massive MIMO;Least squares;LMMSE;distributed estimation},\n  doi = {10.1109/EUSIPCO.2015.7362511},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104341.pdf},\n}\n\n
\n
\n\n\n
\n We consider frequency selective channel estimation in the uplink of massive MIMO-OFDM systems, where our major concern is complexity. A low complexity distributed LMMSE algorithm is proposed that attains near optimal channel impulse response (CIR) estimates from noisy observations at receive antenna array. In proposed method, every antenna estimates the CIRs of its neighborhood followed by recursive sharing of estimates with immediate neighbors. At each step, every antenna calculates the weighted average of shared estimates which converges to near optimal LMMSE solution. The simulation results validate the near optimal performance of proposed algorithm in terms of mean square error (MSE).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Likelihood-based blind separation of QAM signals in time-varying dual-polarized channels.\n \n \n \n\n\n \n Zhu, D.; Mathews, V. J.; and Detienne, D. H.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 894-898, Aug 2015. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362512,\n  author = {D. Zhu and V. J. Mathews and D. H. Detienne},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Likelihood-based blind separation of QAM signals in time-varying dual-polarized channels},\n  year = {2015},\n  pages = {894-898},\n  abstract = {This paper presents a new method for separating QAM signals in time-varying dual-polarized channels. The system applies an adaptive blind source separation (BSS) method based on the likelihood functions of the amplitude of the transmitted signals to recover the input signals and to track the time-varying polarization coefficients. The results demonstrate that the likelihood-based adaptive BSS method is able to recover the source signals of different modulation types for a wide range of input SNRs. The symbol error rate (SER) of estimated signals is close to the theoretical SER of different modulation types at lower SNRs. At high SNRs, the SERs are dominated by the source separation errors. The results also show that this algorithm tracks the time-varying polarization channels coefficients with small errors.},\n  keywords = {blind source separation;quadrature amplitude modulation;likelihood-based blind separation;QAM signals;time-varying dual-polarized channels;adaptive blind source separation;time-varying polarization coefficients;symbol error rate;source separation errors;time-varying polarization channels;Signal processing algorithms;Blind source separation;Quadrature amplitude modulation;Error analysis;Europe;Likelihood function;blind source separation;time-varying dual-polarization},\n  doi = {10.1109/EUSIPCO.2015.7362512},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n This paper presents a new method for separating QAM signals in time-varying dual-polarized channels. The system applies an adaptive blind source separation (BSS) method based on the likelihood functions of the amplitude of the transmitted signals to recover the input signals and to track the time-varying polarization coefficients. The results demonstrate that the likelihood-based adaptive BSS method is able to recover the source signals of different modulation types for a wide range of input SNRs. The symbol error rate (SER) of estimated signals is close to the theoretical SER of different modulation types at lower SNRs. At high SNRs, the SERs are dominated by the source separation errors. The results also show that this algorithm tracks the time-varying polarization channels coefficients with small errors.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n BIT loading in MIMO-PLC systems with the presence of interference.\n \n \n \n \n\n\n \n Vo, T. M.; Amis, K.; Chonavel, T.; and Siohan, P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 899-903, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"BITPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362513,\n  author = {T. M. Vo and K. Amis and T. Chonavel and P. Siohan},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {BIT loading in MIMO-PLC systems with the presence of interference},\n  year = {2015},\n  pages = {899-903},\n  abstract = {In broadband indoor power line communication (PLC) sys tems, multiple input multiple output (MIMO) techniques have been introduced to address the increasing demand for high data rates under the constraint of limited allocated bandwidth. Whereas the self inter-antenna interference can be dealt with on each subcarrier, both inter-carrier and inter-symbol interference can occur yielding sub-optimal bit loading if not con sidered. In this paper, we extend to the MIMO case the low-complexity bit/power allocation algorithm, called Reduced Complexity Algorithm (RCA), that we previously applied to the SISO case. Based on the Greedy principle, the RCA takes the interference into account to optimize the bit loading. We consider two MIMO schemes: optimum eigen beamforming and spatial multiplexing. Simulation results show the efficiency of the RCA in terms of throughput and computation cost in both cases.},\n  keywords = {array signal processing;bandwidth allocation;carrier transmission on power lines;indoor radio;intercarrier interference;intersymbol interference;MIMO communication;space division multiplexing;broadband indoor power line communication;MIMO-PLC system bit loading;multiple input multiple output technique;limited allocated bandwidth constraint;self interantenna interference;intersymbol interference;intercarrier interference;low-complexity bit algorithm;power allocation algorithm;reduced complexity algorithm;RCA;optimum eigen beamforming;spatial multiplexing;Interference;MIMO;Complexity theory;Loading;Throughput;Transmitting antennas;Resource management;Bit loading;MIMO;Interference;Power Line Communication;Greedy based approach},\n  doi = {10.1109/EUSIPCO.2015.7362513},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570100449.pdf},\n}\n\n
\n
\n\n\n
\n In broadband indoor power line communication (PLC) sys tems, multiple input multiple output (MIMO) techniques have been introduced to address the increasing demand for high data rates under the constraint of limited allocated bandwidth. Whereas the self inter-antenna interference can be dealt with on each subcarrier, both inter-carrier and inter-symbol interference can occur yielding sub-optimal bit loading if not con sidered. In this paper, we extend to the MIMO case the low-complexity bit/power allocation algorithm, called Reduced Complexity Algorithm (RCA), that we previously applied to the SISO case. Based on the Greedy principle, the RCA takes the interference into account to optimize the bit loading. We consider two MIMO schemes: optimum eigen beamforming and spatial multiplexing. Simulation results show the efficiency of the RCA in terms of throughput and computation cost in both cases.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Analog joint source channel coding for MIMO broadcast channels.\n \n \n \n \n\n\n \n Fresnedo, O.; González-Coma, J. P.; Castedo, L.; and García-Frías, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 904-908, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AnalogPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362514,\n  author = {O. Fresnedo and J. P. González-Coma and L. Castedo and J. García-Frías},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Analog joint source channel coding for MIMO broadcast channels},\n  year = {2015},\n  pages = {904-908},\n  abstract = {We study the application of analog Joint Source Channel Coding (JSCC) techniques for the transmission of discrete-time, continuous-amplitude source information symbols over the Multiple-Input Multiple-Output (MIMO) Broadcast Channel (BC). Two channel access methods are proposed with different requirements regarding channel knowledge at transmission: Code Division Multiple Access (CDMA) and linear Minimum Mean Square Error (MMSE). The obtained results show that the CDMA scheme performs rather well when the channel responses are unknown at transmission, whereas the linear MMSE access approaches the Optimal Performance Theoretically Attainable (OPTA) when the channel information is also available at transmission.},\n  keywords = {broadcast channels;broadcast communication;code division multiple access;combined source-channel coding;least mean squares methods;MIMO communication;wireless channels;optimal performance theoretically attainable results;channel access methods;CDMA scheme;OPTA;MMSE access approaches;minimum mean square error;code division multiple access;multiple-input multiple-output broadcast channel;continuous-amplitude source information symbols;JSCC techniques;MIMO broadcast channels;analog joint source channel coding;Multiaccess communication;MIMO;Base stations;Receivers;Distortion;Decoding;Europe;JSCC;BC;CDMA;MMSE Transceiver},\n  doi = {10.1109/EUSIPCO.2015.7362514},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103369.pdf},\n}\n\n
\n
\n\n\n
\n We study the application of analog Joint Source Channel Coding (JSCC) techniques for the transmission of discrete-time, continuous-amplitude source information symbols over the Multiple-Input Multiple-Output (MIMO) Broadcast Channel (BC). Two channel access methods are proposed with different requirements regarding channel knowledge at transmission: Code Division Multiple Access (CDMA) and linear Minimum Mean Square Error (MMSE). The obtained results show that the CDMA scheme performs rather well when the channel responses are unknown at transmission, whereas the linear MMSE access approaches the Optimal Performance Theoretically Attainable (OPTA) when the channel information is also available at transmission.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Optimized MIMO symbol mapping to improve the turbo cliff region of iterative precoded MIMO detection.\n \n \n \n \n\n\n \n Nhan, N.; Rostaing, P.; Amis, K.; Collin, L.; and Radoi, E.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 909-913, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"OptimizedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362515,\n  author = {N. Nhan and P. Rostaing and K. Amis and L. Collin and E. Radoi},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Optimized MIMO symbol mapping to improve the turbo cliff region of iterative precoded MIMO detection},\n  year = {2015},\n  pages = {909-913},\n  abstract = {In this paper we investigate the concatenation of the multiple input multiple output (MIMO) max-dmin linear precoder with an outer forward error correction (FEC) code assuming turbo detection at the receiver. A maximum squared Euclidean weight (MSEW) binary-to-MIMO symbol mapper is introduced in the precoding scheme. Extrinsic information transfer (EXIT) chart is used to analyze the turbo-cliff and error-floor of the proposed MIMO symbol mapper. Analysis and simulation results show significant improvement of the proposed MSEW symbol mapper in terms of error-rate performance.},\n  keywords = {concatenated codes;error statistics;forward error correction;iterative methods;linear codes;MIMO communication;precoding;radio receivers;signal detection;turbo codes;optimized MIMO symbol mapping;iterative precoded MIMO detection turbo cliff region improvement;multiple input multiple output max-dmin linear precoder;forward error correction code;FEC code;turbo detection;maximum squared Euclidean weight;MSEW binary-to-MIMO symbol mapper;extrinsic information transfer chart;iterative receiver;MIMO;Signal to noise ratio;Receivers;Wires;MIMO linear precoder;turbo detection;iterative receiver;symbol mapping;EXIT chart},\n  doi = {10.1109/EUSIPCO.2015.7362515},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570097581.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we investigate the concatenation of the multiple input multiple output (MIMO) max-dmin linear precoder with an outer forward error correction (FEC) code assuming turbo detection at the receiver. A maximum squared Euclidean weight (MSEW) binary-to-MIMO symbol mapper is introduced in the precoding scheme. Extrinsic information transfer (EXIT) chart is used to analyze the turbo-cliff and error-floor of the proposed MIMO symbol mapper. Analysis and simulation results show significant improvement of the proposed MSEW symbol mapper in terms of error-rate performance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Multiple stage ant colony optimization algorithm for near-OPTD large-MIMO detection.\n \n \n \n\n\n \n Mandloi, M.; and Bhatia, V.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 914-918, Aug 2015. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362516,\n  author = {M. Mandloi and V. Bhatia},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Multiple stage ant colony optimization algorithm for near-OPTD large-MIMO detection},\n  year = {2015},\n  pages = {914-918},\n  abstract = {In this paper, we propose a multiple stage ant colony optimization (MSACO) algorithm for symbol vector detection in large multiple-input multiple-output (MIMO) systems. The proposed algorithm uses minimum mean squared error (MMSE) solution as an initial solution in every stage, and produces a set of solutions by using the ant colony optimization (ACO) based MIMO detection. Finally, a best solution from the generated solution set is selected using the maximum likelihood (ML) metric. Simulation results show that the proposed algorithm significantly outperforms the existing ACO algorithm and some of the other MIMO detection algorithms in terms of bit error rate (BER) performance and achieves near ML performance. Furthermore, the BER performance of the proposed algorithm shifts towards single input single output (SISO) additive white Gaussian noise (AWGN) performance with increase in number of antennas which adds to the importance of MSACO algorithm for detection in large MIMO systems.},\n  keywords = {ant colony optimisation;antenna arrays;AWGN;error statistics;least mean squares methods;maximum likelihood estimation;MIMO communication;near-optimal large-MIMO detection;multiple stage ant colony optimization;MSACO;symbol vector detection;multiple-input multiple-output systems;minimum mean squared error;MMSE;maximum likelihood metric;bit error rate;BER;single input single output;SISO;additive white Gaussian noise;AWGN;antenna arrays;Signal processing algorithms;MIMO;Bit error rate;Cities and towns;Detectors;Transmitting antennas;Signal processing;Ant colony optimization;multiple-input},\n  doi = {10.1109/EUSIPCO.2015.7362516},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a multiple stage ant colony optimization (MSACO) algorithm for symbol vector detection in large multiple-input multiple-output (MIMO) systems. The proposed algorithm uses minimum mean squared error (MMSE) solution as an initial solution in every stage, and produces a set of solutions by using the ant colony optimization (ACO) based MIMO detection. Finally, a best solution from the generated solution set is selected using the maximum likelihood (ML) metric. Simulation results show that the proposed algorithm significantly outperforms the existing ACO algorithm and some of the other MIMO detection algorithms in terms of bit error rate (BER) performance and achieves near ML performance. Furthermore, the BER performance of the proposed algorithm shifts towards single input single output (SISO) additive white Gaussian noise (AWGN) performance with increase in number of antennas which adds to the importance of MSACO algorithm for detection in large MIMO systems.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fixed-point arithmetic detectors for massive MIMO-OFDM systems.\n \n \n \n \n\n\n \n Al-Askery, A. J.; Tsimenidis, C. C.; and Boussakta, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 919-923, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Fixed-pointPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362517,\n  author = {A. J. Al-Askery and C. C. Tsimenidis and S. Boussakta},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Fixed-point arithmetic detectors for massive MIMO-OFDM systems},\n  year = {2015},\n  pages = {919-923},\n  abstract = {In this paper, the performance of massive multiple input multiple output (MIMO) systems is investigated using reduced detection implementations for MIMO detectors. The motivation for this paper is the need for a reduced complexity detector to be implemented as an optimum massive MIMO detector with low precision. We used different decomposition schemes to build the linear detector based on the (IEEE 754) standard in addition to user-defined precision for selected detectors. Simulations are used to demonstrate the behaviour of several matrix inversion schemes under reduced bit resolution. The numerical results demonstrate improved performance when using QRD and pivoted LDLT decomposition schemes at reduced precision.},\n  keywords = {matrix decomposition;matrix inversion;MIMO communication;OFDM modulation;signal detection;LDLT decomposition scheme;QRD decomposition scheme;reduced bit resolution;matrix inversion scheme;user-defined precision;IEEE 754 standard;linear detector;optimum massive MIMO detector;reduced complexity detector;reduced detection implementation;massive multiple-input multiple-output systems;massive MIMO-OFDM systems;fixed-point arithmetic detectors;Detectors;MIMO;Matrix decomposition;Complexity theory;Receivers;Signal processing algorithms;Hardware;Massive MIMO;OFDM;Multipath Fading;Low Precision Detection;Fixed Point Representation;Linear Receivers},\n  doi = {10.1109/EUSIPCO.2015.7362517},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570095215.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, the performance of massive multiple input multiple output (MIMO) systems is investigated using reduced detection implementations for MIMO detectors. The motivation for this paper is the need for a reduced complexity detector to be implemented as an optimum massive MIMO detector with low precision. We used different decomposition schemes to build the linear detector based on the (IEEE 754) standard in addition to user-defined precision for selected detectors. Simulations are used to demonstrate the behaviour of several matrix inversion schemes under reduced bit resolution. The numerical results demonstrate improved performance when using QRD and pivoted LDLT decomposition schemes at reduced precision.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Exact fast smoothing in switching models with application to stochastic volatility.\n \n \n \n \n\n\n \n Gorynin, I.; Derrode, S.; Monfrini, E.; and Pieczynski, W.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 924-928, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ExactPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362518,\n  author = {I. Gorynin and S. Derrode and E. Monfrini and W. Pieczynski},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Exact fast smoothing in switching models with application to stochastic volatility},\n  year = {2015},\n  pages = {924-928},\n  abstract = {We consider the problem of statistical smoothing in nonlinear non-Gaussian systems. Our novel method relies on a Markov-switching model to operate recursively on series of noisy input data to produce an estimate of the underlying system state. We show through a set of experiments that our technique is efficient within the framework of the stochastic volatility model.},\n  keywords = {Markov processes;smoothing methods;fast smoothing;switching models;statistical smoothing;nonlinear nonGaussian systems;Markov-switching model;stochastic volatility model;Hidden Markov models;Smoothing methods;Markov processes;Switches;Mathematical model;Probability density function;Signal processing algorithms;Nonlinear systems;Stochastic volatility;Bayesian smoother;Conditionally Gaussian linear state-space model;Smoothing in switching systems},\n  doi = {10.1109/EUSIPCO.2015.7362518},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103749.pdf},\n}\n\n
\n
\n\n\n
\n We consider the problem of statistical smoothing in nonlinear non-Gaussian systems. Our novel method relies on a Markov-switching model to operate recursively on series of noisy input data to produce an estimate of the underlying system state. We show through a set of experiments that our technique is efficient within the framework of the stochastic volatility model.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Legendre Ramanujan Sums transform.\n \n \n \n \n\n\n \n Pei, S.; and Wen, C.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 929-932, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"LegendrePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362519,\n  author = {S. Pei and C. Wen},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Legendre Ramanujan Sums transform},\n  year = {2015},\n  pages = {929-932},\n  abstract = {In this paper, Legendre Ramanujan Sums transform(LRST) is proposed and derived by applying DFT to the complete generalized Legendre sequence (CGLS) matrices. The original matrix based Ramanujan Sums transform (RST) by truncating the Ramanujan Sums series is non-orthogonal and lack of fast algorithm, the proposed LRST has orthogonal property and O(Nlog2V) complexity fast algorithm. The LRST transform matrix is a sparse matrix and can be calculated with only additions and multiplications with more improvement in efficiency. It is suitable for image compression and transform coding. Meanwhile the LRST is useful to analyze to periodic signal especially for already known periodic sequences.},\n  keywords = {data compression;discrete Fourier transforms;image coding;image sequences;sparse matrices;transform coding;legendre Ramanujan Sums transform;DFT;complete generalized Legendre sequence matrix;CGLS matrix;orthogonal property;sparse matrix;LRST transform matrix;image compression;transform coding;image sequence;Europe;Signal processing;Conferences;Generalized Legendre sequence;Ramanujan sum;Image transform coding},\n  doi = {10.1109/EUSIPCO.2015.7362519},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570091895.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, Legendre Ramanujan Sums transform(LRST) is proposed and derived by applying DFT to the complete generalized Legendre sequence (CGLS) matrices. The original matrix based Ramanujan Sums transform (RST) by truncating the Ramanujan Sums series is non-orthogonal and lack of fast algorithm, the proposed LRST has orthogonal property and O(Nlog2V) complexity fast algorithm. The LRST transform matrix is a sparse matrix and can be calculated with only additions and multiplications with more improvement in efficiency. It is suitable for image compression and transform coding. Meanwhile the LRST is useful to analyze to periodic signal especially for already known periodic sequences.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A blind source separation method for chemical sensor arrays based on a second order mixing model.\n \n \n \n \n\n\n \n Ando, R. A.; Duarte, L. T.; Jutten, C.; and Attux, R.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 933-937, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362520,\n  author = {R. A. Ando and L. T. Duarte and C. Jutten and R. Attux},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A blind source separation method for chemical sensor arrays based on a second order mixing model},\n  year = {2015},\n  pages = {933-937},\n  abstract = {In this paper we propose a blind source separation method to process the data acquired by an array of ion-selective electrodes in order to measure the ionic activity of different ions in an aqueous solution. While this problem has already been studied in the past, the method presented differs from the ones previously analyzed by approximating the mixing function by a second-degree polynomial, and using a method based on the differential of the mutual information to adjust the parameter values. Experimental results, both with synthetic and real data, suggest that the algorithm proposed is more accurate than the other models in the literature.},\n  keywords = {array signal processing;blind source separation;chemical sensors;sensor arrays;blind source separation method;chemical sensor array;second order mixing model;ion selective electrode;second degree polynomial;Decision support systems;Europe;Signal processing;Conferences;Blind source separation;chemical sensor arrays;ion-selective electrodes;quadratic mixing model},\n  doi = {10.1109/EUSIPCO.2015.7362520},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104521.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we propose a blind source separation method to process the data acquired by an array of ion-selective electrodes in order to measure the ionic activity of different ions in an aqueous solution. While this problem has already been studied in the past, the method presented differs from the ones previously analyzed by approximating the mixing function by a second-degree polynomial, and using a method based on the differential of the mutual information to adjust the parameter values. Experimental results, both with synthetic and real data, suggest that the algorithm proposed is more accurate than the other models in the literature.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Taylor-fourier series analysis for fractional order systems.\n \n \n \n \n\n\n \n Barbé, K.; Lauwers, L.; and Fuentes, L. G.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 938-942, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Taylor-fourierPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362521,\n  author = {K. Barbé and L. Lauwers and L. G. Fuentes},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Taylor-fourier series analysis for fractional order systems},\n  year = {2015},\n  pages = {938-942},\n  abstract = {Dynamical systems describing a physical process with a dominant diffusion phenomenon require a large dimensional model due to their long memory. Without prior knowledge, it is however not straightforward to know if/whether one deals with a fractional order system or long memory effects. Since the parametric modeling of a fractional system is very involved, we tackle the question whether fractional insight can be gathered in a non-parametric way. In this paper we show that the classical Fourier basis leading to the Frequency Response Function (FRF) lacks fractional insight. Therefore, we introduce a TaylorFourier basis to obtain non-parametric insight in the fractional system. This analysis proposes a novel type of spectrum to visualize the spectral content of a fractional system: the Taylor-Fourier spectrum.},\n  keywords = {Fourier series;frequency response;nonparametric statistics;spectral analysis;Taylor-Fourier series analysis;fractional order systems;dynamical systems;physical process;dominant diffusion phenomenon;large dimensional model;long memory effects;parametric modeling;classical Fourier basis;frequency response function;nonparametric insight;spectral content;Decision support systems;Europe;Signal processing;Conferences;Poles and zeros;Time-frequency analysis;Non-parametric modeling;dynamic systems;fractional order systems;Taylor-Fourier basis;theory of frames},\n  doi = {10.1109/EUSIPCO.2015.7362521},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102515.pdf},\n}\n\n
\n
\n\n\n
\n Dynamical systems describing a physical process with a dominant diffusion phenomenon require a large dimensional model due to their long memory. Without prior knowledge, it is however not straightforward to know if/whether one deals with a fractional order system or long memory effects. Since the parametric modeling of a fractional system is very involved, we tackle the question whether fractional insight can be gathered in a non-parametric way. In this paper we show that the classical Fourier basis leading to the Frequency Response Function (FRF) lacks fractional insight. Therefore, we introduce a TaylorFourier basis to obtain non-parametric insight in the fractional system. This analysis proposes a novel type of spectrum to visualize the spectral content of a fractional system: the Taylor-Fourier spectrum.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The flexible signature dictionary.\n \n \n \n \n\n\n \n Barzideh, F.; Skretting, K.; and Engan, K.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 943-947, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362522,\n  author = {F. Barzideh and K. Skretting and K. Engan},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {The flexible signature dictionary},\n  year = {2015},\n  pages = {943-947},\n  abstract = {Dictionary learning and Sparse representation of signals and images has been a hot topic for the past decade and aims to help find the sparsest representation for the signal(s) at hand. Typically, the Dictionary learning process involves finding a large number of free variables. Also, the resulting dictionary in general does not have a specific structure. In this paper we use the ideas from Image Signature Dictionary and General overlapping frames and proposed a flexible signature dictionary. We show that the resulting signatures capture the essence of the signal and can represent signals of their own type very well in opposed to signals of other types.},\n  keywords = {image representation;general overlapping frames;image signature dictionary;sparse image representation;sparse signal representation;dictionary learning process;flexible signature dictionary;Dictionaries;Training;Signal processing algorithms;Noise measurement;Encoding;Europe;Signal processing;dictionary learning;signature dictionary;sparse representation},\n  doi = {10.1109/EUSIPCO.2015.7362522},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104751.pdf},\n}\n\n
\n
\n\n\n
\n Dictionary learning and Sparse representation of signals and images has been a hot topic for the past decade and aims to help find the sparsest representation for the signal(s) at hand. Typically, the Dictionary learning process involves finding a large number of free variables. Also, the resulting dictionary in general does not have a specific structure. In this paper we use the ideas from Image Signature Dictionary and General overlapping frames and proposed a flexible signature dictionary. We show that the resulting signatures capture the essence of the signal and can represent signals of their own type very well in opposed to signals of other types.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Statistical efficiency of structured CPD estimation applied to Wiener-Hammerstein modeling.\n \n \n \n \n\n\n \n de M. Goulart , J. H.; Boizard, M.; Boyer, R.; Favier, G.; and Comon, P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 948-952, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"StatisticalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362523,\n  author = {J. H. {de M. Goulart} and M. Boizard and R. Boyer and G. Favier and P. Comon},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Statistical efficiency of structured CPD estimation applied to Wiener-Hammerstein modeling},\n  year = {2015},\n  pages = {948-952},\n  abstract = {The computation of a structured canonical polyadic decomposition (CPD) is useful to address several important modeling problems in real-world applications. In this paper, we consider the identification of a nonlinear system by means of a Wiener-Hammerstein model, assuming a high-order Volterra kernel of that system has been previously estimated. Such a kernel, viewed as a tensor, admits a CPD with banded circulant factors which comprise the model parameters. To estimate them, we formulate specialized estimators based on recently proposed algorithms for the computation of structured CPDs. Then, considering the presence of additive white Gaussian noise, we derive a closed-form expression for the Cramér-Rao bound (CRB) associated with this estimation problem. Finally, we assess the statistical performance of the proposed estimators via Monte Carlo simulations, by comparing their mean-square error with the CRB.},\n  keywords = {AWGN;estimation theory;identification;Monte Carlo methods;tensors;Volterra equations;statistical efficiency;structured CPD estimation;Wiener-Hammerstein modeling;structured canonical polyadic decomposition;nonlinear system identification;high-order Volterra kernel;tensor;banded circulant factors;specialized estimators;additive white Gaussian noise;closed-form expression;Cramér-Rao bound;CRB;estimation problem;statistical performance assessment;Monte Carlo simulations;Tensile stress;Computational modeling;Kernel;Estimation;Yttrium;Signal processing algorithms;Europe;Tensor Decomposition;Structured CPD;Cramér-Rao bound;Wiener-Hammerstein model},\n  doi = {10.1109/EUSIPCO.2015.7362523},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570100853.pdf},\n}\n\n
\n
\n\n\n
\n The computation of a structured canonical polyadic decomposition (CPD) is useful to address several important modeling problems in real-world applications. In this paper, we consider the identification of a nonlinear system by means of a Wiener-Hammerstein model, assuming a high-order Volterra kernel of that system has been previously estimated. Such a kernel, viewed as a tensor, admits a CPD with banded circulant factors which comprise the model parameters. To estimate them, we formulate specialized estimators based on recently proposed algorithms for the computation of structured CPDs. Then, considering the presence of additive white Gaussian noise, we derive a closed-form expression for the Cramér-Rao bound (CRB) associated with this estimation problem. Finally, we assess the statistical performance of the proposed estimators via Monte Carlo simulations, by comparing their mean-square error with the CRB.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Estimation of the nonlinearity degree for polynomial autoregressive processes with RJMCMC.\n \n \n \n \n\n\n \n Karakuş, O.; Kuruoğlu, E. E.; and Altinkaya, M. A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 953-957, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"EstimationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362524,\n  author = {O. Karakuş and E. E. Kuruoğlu and M. A. Altinkaya},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Estimation of the nonlinearity degree for polynomial autoregressive processes with RJMCMC},\n  year = {2015},\n  pages = {953-957},\n  abstract = {Despite the popularity of linear process models in signal and image processing, various real life phenomena exhibit nonlinear characteristics. Compromising between the realistic and computationally heavy nonlinear models and the simplicity of linear estimation methods, linear in the parameters nonlinear models such as polynomial autoregressive (PAR) models have been accessible analytical tools for modelling such phenomena. In this work, we aim to demonstrate the potentials of Reversible Jump Markov Chain Monte Carlo (RJMCMC) which is a successful statistical tool in model dimension estimation in nonlinear process identification. We explore the capability of RJMCMC in jumping not only between spaces with different dimensions, but also between different classes of models. In particular, we demonstrate the success of RJMCMC in sampling in linear and nonlinear spaces of varying dimensions for the estimation of PAR processes.},\n  keywords = {autoregressive processes;image processing;Markov processes;Monte Carlo methods;signal processing;PAR process estimation;nonlinear space;linear space;nonlinear process identification;model dimension estimation;reversible jump Markov chain Monte Carlo;PAR model;polynomial autoregressive model;linear estimation method;nonlinear model;nonlinear characteristics;image processing;signal processing;linear process model;RJMCMC capability;polynomial autoregressive process;nonlinearity degree estimation;Mathematical model;Data models;Estimation;Computational modeling;Europe;Signal processing;Analytical models;Polynomial AR;Reversible Jump MCMC;Nonlinearity degree estimation},\n  doi = {10.1109/EUSIPCO.2015.7362524},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570095299.pdf},\n}\n\n
\n
\n\n\n
\n Despite the popularity of linear process models in signal and image processing, various real life phenomena exhibit nonlinear characteristics. Compromising between the realistic and computationally heavy nonlinear models and the simplicity of linear estimation methods, linear in the parameters nonlinear models such as polynomial autoregressive (PAR) models have been accessible analytical tools for modelling such phenomena. In this work, we aim to demonstrate the potentials of Reversible Jump Markov Chain Monte Carlo (RJMCMC) which is a successful statistical tool in model dimension estimation in nonlinear process identification. We explore the capability of RJMCMC in jumping not only between spaces with different dimensions, but also between different classes of models. In particular, we demonstrate the success of RJMCMC in sampling in linear and nonlinear spaces of varying dimensions for the estimation of PAR processes.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Derivative-augmented features as a dynamic model for time-series.\n \n \n \n \n\n\n \n Baggenstoss, P. M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 958-962, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Derivative-augmentedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362525,\n  author = {P. M. Baggenstoss},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Derivative-augmented features as a dynamic model for time-series},\n  year = {2015},\n  pages = {958-962},\n  abstract = {In the field of automatic speech recognition (ASR), it is common practice to augment features with time-derivatives, which we call derivative-augmented features (DAF). Although the method is effective for modeling the dynamic behavior of features and produces signiicantly lower clas-siication error, it violates the assumption of conditional independence of the observations. The traditional approach is to ignore the problem (simply apply the mathematical approach that assumes independence). In this paper, we take an alternative approach in which we still use the same mathematical approach as before, but calculate a correction factor by integrating out the redundant dimensions. This makes it possible to compare and combine a DAF PDF and a non-DAF PDF. We conduct experiments to demonstrate the usefulness of the approach.},\n  keywords = {correlation methods;hidden Markov models;signal classification;speech recognition;time series;derivative-augmented features;time-series;automatic speech recognition;ASR;time-derivatives;DAF;classification error;mathematical approach;correction factor calculation;redundant dimensions;hidden Markov model;HMM;Hidden Markov models;Markov processes;Europe;Signal processing;Indexes;Probability density function;Feature extraction;PDF estimation;feature derivatives;HMM},\n  doi = {10.1109/EUSIPCO.2015.7362525},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103093.pdf},\n}\n\n
\n
\n\n\n
\n In the field of automatic speech recognition (ASR), it is common practice to augment features with time-derivatives, which we call derivative-augmented features (DAF). Although the method is effective for modeling the dynamic behavior of features and produces signiicantly lower clas-siication error, it violates the assumption of conditional independence of the observations. The traditional approach is to ignore the problem (simply apply the mathematical approach that assumes independence). In this paper, we take an alternative approach in which we still use the same mathematical approach as before, but calculate a correction factor by integrating out the redundant dimensions. This makes it possible to compare and combine a DAF PDF and a non-DAF PDF. We conduct experiments to demonstrate the usefulness of the approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Analysis of the harmonics contribution on the three-point interpolated DFT frequency estimator.\n \n \n \n \n\n\n \n Belega, D.; Petri, D.; and Dallet, D.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 963-967, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AnalysisPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362526,\n  author = {D. Belega and D. Petri and D. Dallet},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Analysis of the harmonics contribution on the three-point interpolated DFT frequency estimator},\n  year = {2015},\n  pages = {963-967},\n  abstract = {In this paper the contribution of harmonics on frequency estimation obtained by the classical three-point interpolated Fourier method is investigated in the case when noisy and harmonically distorted complex sinusoids are analyzed. To this aim the expressions for the frequency estimation error due to harmonics and the approximated combined variance of the frequency estimator due to both harmonics and wideband noise are derived. Using the obtained expressions the contributions of each harmonic and wideband noise on the frequency estimation error are then compared. The accuracies of the derived expressions are verified through computer simulations.},\n  keywords = {discrete Fourier transforms;harmonic analysis;signal processing;harmonics contribution;three-point interpolated DFT frequency estimator;interpolated Fourier method;wideband noise;Harmonic analysis;Frequency estimation;Harmonic distortion;Discrete Fourier transforms;Wideband;Noise measurement;complex sinusoid;error and statistical analysis;frequency estimation;harmonics;interpolated Fourier method},\n  doi = {10.1109/EUSIPCO.2015.7362526},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103451.pdf},\n}\n\n
\n
\n\n\n
\n In this paper the contribution of harmonics on frequency estimation obtained by the classical three-point interpolated Fourier method is investigated in the case when noisy and harmonically distorted complex sinusoids are analyzed. To this aim the expressions for the frequency estimation error due to harmonics and the approximated combined variance of the frequency estimator due to both harmonics and wideband noise are derived. Using the obtained expressions the contributions of each harmonic and wideband noise on the frequency estimation error are then compared. The accuracies of the derived expressions are verified through computer simulations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Errors-in-variables identification of noisy moving average models.\n \n \n \n \n\n\n \n Youcef, A.; Diversi, R.; and Grivel, E.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 968-972, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Errors-in-variablesPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362527,\n  author = {A. Youcef and R. Diversi and E. Grivel},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Errors-in-variables identification of noisy moving average models},\n  year = {2015},\n  pages = {968-972},\n  abstract = {In this paper, we propose to address the moving average (MA) parameters estimation issue based only on noisy observations and without any knowledge on the variance of the additive stationary white Gaussian measurement noise. For this purpose, the MA process is approximated by a high-order AR process and its parameters are estimated by using an errors-in-variables (EIV) approach, which also makes it possible to derive the variances of both the driving process and the additive white noise. The method is based on the Frisch scheme. One of the main difficulties in this case is to evaluate the minimal AR-process order that must be considered to have a {"}good{"} approximation of the MA process. To this end, we propose a way based on K-means method. Simulation results of the proposed method are presented and compared to existing MA-parameter estimation approaches.},\n  keywords = {approximation theory;autoregressive moving average processes;AWGN;measurement errors;measurement uncertainty;parameter estimation;signal processing;errors-in-variables identification;EIV approach;noisy moving average models;MA parameter estimation;noisy observations;additive stationary white Gaussian measurement noise;autoregressive processes;high-order AR process;Frisch scheme;MA process;K-means method;Noise measurement;Approximation methods;Correlation;Signal processing;Europe;Estimation;Mathematical model;Moving average model;autoregressive model;errors-in-variables (EIV);K-means classification},\n  doi = {10.1109/EUSIPCO.2015.7362527},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103805.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose to address the moving average (MA) parameters estimation issue based only on noisy observations and without any knowledge on the variance of the additive stationary white Gaussian measurement noise. For this purpose, the MA process is approximated by a high-order AR process and its parameters are estimated by using an errors-in-variables (EIV) approach, which also makes it possible to derive the variances of both the driving process and the additive white noise. The method is based on the Frisch scheme. One of the main difficulties in this case is to evaluate the minimal AR-process order that must be considered to have a \"good\" approximation of the MA process. To this end, we propose a way based on K-means method. Simulation results of the proposed method are presented and compared to existing MA-parameter estimation approaches.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Metropolis-hastings improved particle smoother and marginalized models.\n \n \n \n \n\n\n \n Nordh, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 973-977, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Metropolis-hastingsPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362528,\n  author = {J. Nordh},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Metropolis-hastings improved particle smoother and marginalized models},\n  year = {2015},\n  pages = {973-977},\n  abstract = {This paper combines the Metropolis-Hastings Improved Particle Smoother (MHIPS) with marginalized models. It demonstrates the effectiveness of the combination by looking at two examples; a degenerate model of a double integrator and a fifth order mixed linear/nonlinear Gaussian (MLNLG) model. For the MLNLG model two different methods are compared with the non-marginalized case; the first marginalizes the linear states only during the filtering, the second marginalizes during both the foward filtering and backward smoothing pass. The results demonstrate that marginalization not only improves the overall performance, but also increases the rate of improvement for each iteration of the MHIPS algorithm. It thus reduces the required number of iterations to beat the performance of a Forward-Filter Backward Simulator approach for the same model.},\n  keywords = {Gaussian processes;smoothing methods;marginalized models;metropolis-hastings improved particle smoother;MHIPS;degenerate model;double integrator;fifth order mixed linear/nonlinear Gaussian model;MLNLG model;foward filtering;backward smoothing pass;forward-filter backward simulator approach;Smoothing methods;Trajectory;Kalman filters;Proposals;Computational modeling;Europe;Metropolis-Hasting Improved Particle Smoother;Rao-Blackwellized smoothing;Particle Smoothing;Particle Filter},\n  doi = {10.1109/EUSIPCO.2015.7362528},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104589.pdf},\n}\n\n
\n
\n\n\n
\n This paper combines the Metropolis-Hastings Improved Particle Smoother (MHIPS) with marginalized models. It demonstrates the effectiveness of the combination by looking at two examples; a degenerate model of a double integrator and a fifth order mixed linear/nonlinear Gaussian (MLNLG) model. For the MLNLG model two different methods are compared with the non-marginalized case; the first marginalizes the linear states only during the filtering, the second marginalizes during both the foward filtering and backward smoothing pass. The results demonstrate that marginalization not only improves the overall performance, but also increases the rate of improvement for each iteration of the MHIPS algorithm. It thus reduces the required number of iterations to beat the performance of a Forward-Filter Backward Simulator approach for the same model.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n MIMO systems outage capacity based on minors moments of Wishart matrices.\n \n \n \n \n\n\n \n Tralli, V.; and Conti, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 978-982, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"MIMOPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362529,\n  author = {V. Tralli and A. Conti},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {MIMO systems outage capacity based on minors moments of Wishart matrices},\n  year = {2015},\n  pages = {978-982},\n  abstract = {Complex Wishart matrices represent a class of random matrices exploited in a number of wireless communication problems. This paper analyzes the first and second order statistical moments of complex Wishart matrices' minors. This enables to derive new closed-form approximations for the outage capacity of multiple input multiple output (MIMO) systems operating in Rayleigh fading channels at any signal-to-noise ratio (SNR) regime and with any number of inputs and outputs. The derived expressions are compared with bounds known in the literature as well as with simulations. Results show the tightness of the proposed approximations to simulations for a broad range of MIMO settings.},\n  keywords = {approximation theory;MIMO communication;Rayleigh channels;statistical analysis;MIMO systems outage capacity;Wishart matrices;random matrices;wireless communication problems;multiple input multiple output;signal-to-noise ratio;SNR;MIMO settings;MIMO;Approximation methods;Signal to noise ratio;Fading;Wireless communication;Indexes;Europe;MIMO systems;outage capacity;Wishart matrix;fading channels;statistical characterization},\n  doi = {10.1109/EUSIPCO.2015.7362529},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104735.pdf},\n}\n\n
\n
\n\n\n
\n Complex Wishart matrices represent a class of random matrices exploited in a number of wireless communication problems. This paper analyzes the first and second order statistical moments of complex Wishart matrices' minors. This enables to derive new closed-form approximations for the outage capacity of multiple input multiple output (MIMO) systems operating in Rayleigh fading channels at any signal-to-noise ratio (SNR) regime and with any number of inputs and outputs. The derived expressions are compared with bounds known in the literature as well as with simulations. Results show the tightness of the proposed approximations to simulations for a broad range of MIMO settings.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Representation of signals by local symmetry decomposition.\n \n \n \n \n\n\n \n Gnutti, A.; Guerrini, F.; and Leonardi, R.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 983-987, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"RepresentationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362530,\n  author = {A. Gnutti and F. Guerrini and R. Leonardi},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Representation of signals by local symmetry decomposition},\n  year = {2015},\n  pages = {983-987},\n  abstract = {In this paper we propose a segmentation of finite support sequences based on the even/odd decomposition of a signal. The objective is to find a more compact representation of information. To this aim, the paper starts to generalize the even/odd decomposition by concentrating the energy on either the even or the odd part by optimally placing the centre of symmetry. Local symmetry intervals are thus located. The sequence segmentation is further processed by applying an iterative growth on the candidate segments to remove any overlapping portions. Experimental results show that the set of segments can be more eficiently compressed with respect to the DCT transformation of the entire sequence, which corresponds to the near optimal KLT transform of the data chosen for the experiment.},\n  keywords = {discrete cosine transforms;signal representation;local symmetry decomposition;signals representation;finite support sequences;even/odd decomposition;DCT transformation;optimal KLT transform;Image segmentation;Europe;Feature extraction;Standards;Convolution;Lapping;Symmetry;1-D segmentation;signal decomposition;compact representation;compression},\n  doi = {10.1109/EUSIPCO.2015.7362530},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104935.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we propose a segmentation of finite support sequences based on the even/odd decomposition of a signal. The objective is to find a more compact representation of information. To this aim, the paper starts to generalize the even/odd decomposition by concentrating the energy on either the even or the odd part by optimally placing the centre of symmetry. Local symmetry intervals are thus located. The sequence segmentation is further processed by applying an iterative growth on the candidate segments to remove any overlapping portions. Experimental results show that the set of segments can be more eficiently compressed with respect to the DCT transformation of the entire sequence, which corresponds to the near optimal KLT transform of the data chosen for the experiment.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Parameter estimation of Gaussian functions using the scaled reassigned spectrogram.\n \n \n \n \n\n\n \n Brynolfsson, J.; and Hansson-Sandsten, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 988-992, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ParameterPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362531,\n  author = {J. Brynolfsson and M. Hansson-Sandsten},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Parameter estimation of Gaussian functions using the scaled reassigned spectrogram},\n  year = {2015},\n  pages = {988-992},\n  abstract = {In this paper we suggest an improved algorithm for estimation of parameters detailing Gaussian functions and expand it to handle linear combinations of Gaussian functions. Components in the signal are first detected in the spectrogram, which is calculated using a Gaussian window function. Scaled reassignment is then performed using a set of candidate scaling factors and the local Renyi entropy is used to measure the concentration of each component using every candidate scaling factor. Exploiting the fact that a Gaussian function may be perfectly reassigned into one single point given the correct scaling, one may identify the parameters detailing the Gaussian function. We evaluate the algorithm on both simulated and real data.},\n  keywords = {Gaussian processes;parameter estimation;signal processing;Renyi entropy;Gaussian window function;linear combinations;estimation algorithm;scaled reassigned spectrogram;parameter estimation;Spectrogram;Entropy;Time-frequency analysis;Shape;Signal processing algorithms;Mathematical model;Signal to noise ratio;Reassigned spectrogram;Gaussian functions;Parameter estimation},\n  doi = {10.1109/EUSIPCO.2015.7362531},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104645.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we suggest an improved algorithm for estimation of parameters detailing Gaussian functions and expand it to handle linear combinations of Gaussian functions. Components in the signal are first detected in the spectrogram, which is calculated using a Gaussian window function. Scaled reassignment is then performed using a set of candidate scaling factors and the local Renyi entropy is used to measure the concentration of each component using every candidate scaling factor. Exploiting the fact that a Gaussian function may be perfectly reassigned into one single point given the correct scaling, one may identify the parameters detailing the Gaussian function. We evaluate the algorithm on both simulated and real data.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Two-sided diagonalization of order-three tensors.\n \n \n \n \n\n\n \n Tichavský, P.; Phan, A. H.; and Cichocki, A. S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 993-997, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Two-sidedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362532,\n  author = {P. Tichavský and A. H. Phan and A. S. Cichocki},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Two-sided diagonalization of order-three tensors},\n  year = {2015},\n  pages = {993-997},\n  abstract = {This paper presents algorithms for two-sided diagonalization of order-three tensors. It is another expression for joint non-symmetric approximate diagonalization of a set of square ma trices, say T1,..., TM: We seek two non-orthogonal matrices A and B such that the products ATmBT are close to diagonal in a sense. The algorithms can be used for a block tensor decomposition and applied e.g. for tensor deconvolution and feature extraction using the convolutive model.},\n  keywords = {convolution;matrix decomposition;tensors;order-three tensor two-sided diagonalization;square matrix joint nonsymmetric approximate diagonalization;block tensor decomposition;convolutive model;Tensile stress;Signal processing algorithms;Matrix decomposition;Symmetric matrices;Approximation algorithms;Europe;Signal processing;Multilinear models;canonical polyadic de composition;parallel factor analysis;block-term decomposition;joint matrix diagonalization},\n  doi = {10.1109/EUSIPCO.2015.7362532},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570100893.pdf},\n}\n\n
\n
\n\n\n
\n This paper presents algorithms for two-sided diagonalization of order-three tensors. It is another expression for joint non-symmetric approximate diagonalization of a set of square ma trices, say T1,..., TM: We seek two non-orthogonal matrices A and B such that the products ATmBT are close to diagonal in a sense. The algorithms can be used for a block tensor decomposition and applied e.g. for tensor deconvolution and feature extraction using the convolutive model.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Classifying autoregressive models using dissimilarity measures: A comparative study.\n \n \n \n \n\n\n \n Magnant, C.; Grivel, E.; Giremus, A.; Ratton, L.; and Joseph, B.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 998-1002, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ClassifyingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362533,\n  author = {C. Magnant and E. Grivel and A. Giremus and L. Ratton and B. Joseph},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Classifying autoregressive models using dissimilarity measures: A comparative study},\n  year = {2015},\n  pages = {998-1002},\n  abstract = {Autoregressive (AR) models are used in various applications, from speech processing to radar signal analysis. In this paper, our purpose is to extract different model subsets from a set of two or more AR models. The approach operates with the following steps: firstly the matrix composed of dissimilarity measures between AR-model pairs are created. This can be based on the symmetric Itakura divergence, the symmetric Itakura-Saito divergence, the log-spectral distance or Jeffrey's divergence (JD), which corresponds to the symmetric version of the Kullback-Leibler divergence. These matrices are then transformed to get the same properties as correlation matrices. Eigenvalue decompositions are performed to get the number of AR-model subsets and the estimations of their cardinals. Finally, K-means are used for classification. A comparative study points out the relevance of the JD-based method. Illustrations with sea radar clutter are also provided.},\n  keywords = {autoregressive processes;eigenvalues and eigenfunctions;matrix decomposition;radar clutter;radar signal processing;signal classification;speech processing;autoregressive model classification;dissimilarity measurement;speech processing;radar signal analysis;AR-model pairs;symmetric Itakura-Saito divergence;log-spectral distance;Jeffrey divergence;JD;Kullback-Leibler divergence;correlation matrix;eigenvalue decomposition;signal classification;sea radar clutter;K-means;Eigenvalues and eigenfunctions;Matrix decomposition;Symmetric matrices;Europe;Signal processing;Analytical models;Correlation;Autoregressive model;Jeffrey's divergence;Itakura divergence;Itakura-Saito divergence;log-spectral distance;K-means;classification},\n  doi = {10.1109/EUSIPCO.2015.7362533},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103583.pdf},\n}\n\n
\n
\n\n\n
\n Autoregressive (AR) models are used in various applications, from speech processing to radar signal analysis. In this paper, our purpose is to extract different model subsets from a set of two or more AR models. The approach operates with the following steps: firstly the matrix composed of dissimilarity measures between AR-model pairs are created. This can be based on the symmetric Itakura divergence, the symmetric Itakura-Saito divergence, the log-spectral distance or Jeffrey's divergence (JD), which corresponds to the symmetric version of the Kullback-Leibler divergence. These matrices are then transformed to get the same properties as correlation matrices. Eigenvalue decompositions are performed to get the number of AR-model subsets and the estimations of their cardinals. Finally, K-means are used for classification. A comparative study points out the relevance of the JD-based method. Illustrations with sea radar clutter are also provided.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Bayesian estimation of the multifractality parameter for images via a closed-form Whittle likelihood.\n \n \n \n \n\n\n \n Combrexelle, S.; Wendt, H.; Tourneret, J. -.; Abry, P.; and McLaughlin, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1003-1007, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"BayesianPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362534,\n  author = {S. Combrexelle and H. Wendt and J. -. Tourneret and P. Abry and S. McLaughlin},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Bayesian estimation of the multifractality parameter for images via a closed-form Whittle likelihood},\n  year = {2015},\n  pages = {1003-1007},\n  abstract = {Texture analysis is central in many image processing problems. It can be conducted by studying the local regularity fluctuations of image amplitudes, and multifractal analysis provides a theoretical and practical framework for such a characterization. Yet, due to the non Gaussian nature and intricate dependence structure of multifractal models, accurate parameter estimation is challenging: standard estimators yield modest performance, and alternative (semi-)parametric estimators exhibit prohibitive computational cost for large images. This present contribution addresses these difficulties and proposes a Bayesian procedure for the estimation of the multifractality parameter c2 for images. It relies on a recently proposed semi-parametric model for the multivariate statistics of log-wavelet leaders and on a Whittle approximation that enables its numerical evaluation. The key result is a closed-form expression for the Whittle likelihood. Numerical simulations indicate the excellent performance of the method, significantly improving estimation performance over standard estimators and computational efficiency over previously proposed Bayesian estimators.},\n  keywords = {approximation theory;Bayes methods;Hankel transforms;image texture;maximum likelihood estimation;wavelet transforms;Bayesian estimation;multifractality parameter estimation;closed-form Whittle likelihood;texture analysis;image processing problems;local regularity fluctuations;image amplitudes;multifractal analysis;dependence structure;multifractal models;semi parametric model;multivariate statistics;log-wavelet leaders;Whittle approximation;numerical simulations;Hankel transform;Bayes methods;Fractals;Estimation;Approximation methods;Transforms;Numerical models;Computational modeling;Multifractal analysis;Bayesian estimation;Hankel transform;Whittle likelihood;Texture analysis},\n  doi = {10.1109/EUSIPCO.2015.7362534},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570097467.pdf},\n}\n\n
\n
\n\n\n
\n Texture analysis is central in many image processing problems. It can be conducted by studying the local regularity fluctuations of image amplitudes, and multifractal analysis provides a theoretical and practical framework for such a characterization. Yet, due to the non Gaussian nature and intricate dependence structure of multifractal models, accurate parameter estimation is challenging: standard estimators yield modest performance, and alternative (semi-)parametric estimators exhibit prohibitive computational cost for large images. This present contribution addresses these difficulties and proposes a Bayesian procedure for the estimation of the multifractality parameter c2 for images. It relies on a recently proposed semi-parametric model for the multivariate statistics of log-wavelet leaders and on a Whittle approximation that enables its numerical evaluation. The key result is a closed-form expression for the Whittle likelihood. Numerical simulations indicate the excellent performance of the method, significantly improving estimation performance over standard estimators and computational efficiency over previously proposed Bayesian estimators.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Crossing-tree partition functions.\n \n \n \n \n\n\n \n Decrouez, G.; and Amblard, P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1008-1012, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Crossing-treePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362535,\n  author = {G. Decrouez and P. Amblard},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Crossing-tree partition functions},\n  year = {2015},\n  pages = {1008-1012},\n  abstract = {A new multifractal formalism based on the crossing-tree for H-sssi processes was recently introduced [1, 2]. The crossing-tree performs an ad-hoc decomposition of a signal based on its fluctuations, and thus represents a natural tool for the multifractal analysis of time series. The estimation of the Hausdorff spectrum happens in the context of a multifractal formalism, where the spectrum is obtained from a transform of a partition function. In this contribution, we introduce a new crossing-tree partition function, which differs from the original one presented in [2]. We show numerically that the new partition function improves the stability of the estimation in many cases, compared with the original crossing-tree partition function. Estimation is further compared with state-of-the-art techniques, including wavelet and wavelet leaders.},\n  keywords = {array signal processing;time series;trees (mathematics);crossing-tree partition function;multifractal formalism;H-sssi process;signal ad-hoc decomposition;time series multifractal analysis;Hausdorff spectrum estimation;Fractals;Estimation;Europe;Signal processing;Wavelet transforms;Wavelet analysis;Yttrium;H-sssi processes;crossing tree;multi-fractal formalism;adaptative decomposition;wavelets},\n  doi = {10.1109/EUSIPCO.2015.7362535},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570098733.pdf},\n}\n\n
\n
\n\n\n
\n A new multifractal formalism based on the crossing-tree for H-sssi processes was recently introduced [1, 2]. The crossing-tree performs an ad-hoc decomposition of a signal based on its fluctuations, and thus represents a natural tool for the multifractal analysis of time series. The estimation of the Hausdorff spectrum happens in the context of a multifractal formalism, where the spectrum is obtained from a transform of a partition function. In this contribution, we introduce a new crossing-tree partition function, which differs from the original one presented in [2]. We show numerically that the new partition function improves the stability of the estimation in many cases, compared with the original crossing-tree partition function. Estimation is further compared with state-of-the-art techniques, including wavelet and wavelet leaders.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Analysis and simulations of multifractal random walks.\n \n \n \n \n\n\n \n Schmitt, F. G.; and Huang, Y.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1013-1017, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AnalysisPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362536,\n  author = {F. G. Schmitt and Y. Huang},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Analysis and simulations of multifractal random walks},\n  year = {2015},\n  pages = {1013-1017},\n  abstract = {Multifractal time series, characterized by a scale invariance and large fluctuations at all scales, are found in many fields of natural and applied sciences. Here we consider a quite general type of multifractal time series, called multifractal random walk, as non stationary stochastic processes with intermittent stationary increments. We first quickly recall how such time series can be analyzed and characterized, using structure functions and arbitrary order Hilbert spectral analysis, and then we discuss the simulation approach. Here we review recent works on this topic. We provide an unification of the works published, and discuss how to choose parameters in stochastic simulations in order to simulate a multifractal series with desired properties. In the lognormal framework we provide a new h-μ plane expressing the scale invariant properties of these simulations.},\n  keywords = {log normal distribution;spectral analysis;stochastic processes;time series;multifractal random walk simulation;multifractal random walk analysis;multifractal time series;scale invariance;large fluctuations;nonstationary stochastic process;arbitrary order Hilbert spectral analysis;structure function;stochastic simulation;lognormal framework;h-μ plane;scale invariant properties;Fractals;Time series analysis;Stochastic processes;Spectral analysis;Europe;Analytical models;Scaling;Multifractal random walks;Intermittency;Stochastic modeling;Time series},\n  doi = {10.1109/EUSIPCO.2015.7362536},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104411.pdf},\n}\n\n
\n
\n\n\n
\n Multifractal time series, characterized by a scale invariance and large fluctuations at all scales, are found in many fields of natural and applied sciences. Here we consider a quite general type of multifractal time series, called multifractal random walk, as non stationary stochastic processes with intermittent stationary increments. We first quickly recall how such time series can be analyzed and characterized, using structure functions and arbitrary order Hilbert spectral analysis, and then we discuss the simulation approach. Here we review recent works on this topic. We provide an unification of the works published, and discuss how to choose parameters in stochastic simulations in order to simulate a multifractal series with desired properties. In the lognormal framework we provide a new h-μ plane expressing the scale invariant properties of these simulations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Effectiveness of multiscale fractal dimension for improvement of frame classification rate.\n \n \n \n \n\n\n \n Zaki, M.; Shah, N. J.; and Patil, H. A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1018-1022, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"EffectivenessPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362537,\n  author = {M. Zaki and N. J. Shah and H. A. Patil},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Effectiveness of multiscale fractal dimension for improvement of frame classification rate},\n  year = {2015},\n  pages = {1018-1022},\n  abstract = {We propose to use multiscale fractal dimension (FD)-based features for phoneme classification task at frame-level. During speech production, turbulence is created and hence vortices (generated due to presence of separated airflow) may travel along the vocal tract and excite vocal tract resonators. This turbulence and in effect, the embedded features of different phoneme classes, can be captured by invariant property of multiscale FD. To capture complementary information, feature-level fusion of proposed feature with state-of-the-art Mel Frequency Cepstral Coefficients (MFCC) is attempted and found to be effective. In particular, single-hidden layer neural nets were trained to compute the frame classification rate. Proposed feature was able to reduce the error rate by over 1.6 % from MFCC features on TIMIT database. This is supported by significant reduction in % EER (i.e., 0.327 % to 4.795 %)1.},\n  keywords = {cepstral analysis;feature extraction;neural nets;sensor fusion;signal classification;speech processing;multiscale fractal dimension;frame classification rate improvement;multiscale FD-based features;phoneme classification;speech production;turbulence;vocal tract resonators;invariant multiscale FD;feature-level fusion;Mel frequency cepstral coefficients;single-hidden layer neural nets;error rate reduction;MFCC features;TIMIT database;Fractals;Speech;Mel frequency cepstral coefficient;Production;Databases;Neural networks;Europe;fractal dimension;multiscale analysis;phoneme-based frame classification;nonlinearity},\n  doi = {10.1109/EUSIPCO.2015.7362537},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570099291.pdf},\n}\n\n
\n
\n\n\n
\n We propose to use multiscale fractal dimension (FD)-based features for phoneme classification task at frame-level. During speech production, turbulence is created and hence vortices (generated due to presence of separated airflow) may travel along the vocal tract and excite vocal tract resonators. This turbulence and in effect, the embedded features of different phoneme classes, can be captured by invariant property of multiscale FD. To capture complementary information, feature-level fusion of proposed feature with state-of-the-art Mel Frequency Cepstral Coefficients (MFCC) is attempted and found to be effective. In particular, single-hidden layer neural nets were trained to compute the frame classification rate. Proposed feature was able to reduce the error rate by over 1.6 % from MFCC features on TIMIT database. This is supported by significant reduction in % EER (i.e., 0.327 % to 4.795 %)1.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributed clustering algorithms in group-based ad hoc networks.\n \n \n \n \n\n\n \n Massin, R.; Le Martret, C. J.; and Ciblat, P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1023-1027, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"DistributedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362538,\n  author = {R. Massin and C. J. {Le Martret} and P. Ciblat},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Distributed clustering algorithms in group-based ad hoc networks},\n  year = {2015},\n  pages = {1023-1027},\n  abstract = {For dense ad hoc networks, clustering is an appropriate strategy for improving scalability. Moreover, dense networks such as public safety or military networks are also structured through a hierarchical organization via operational groups. This organization usually impacts both the mobility of nodes which move in group, and the data flow since the traffic is mainly intra-group rather than inter-group. In this work, we extend two distributed clustering algorithms, GDMAC and VOTE, by taking into account the group structure. Our simulations of dense ad hoc networks show that our extensions lead to a lower end-to-end communication delay and offer a better stability to mobility.},\n  keywords = {ad hoc networks;distributed clustering algorithms;group-based ad hoc networks;dense networks;public safety;military networks;GDMAC;VOTE;end-to-end communication;Clustering algorithms;Ad hoc networks;Signal processing algorithms;Stability analysis;Protocols;Measurement;Europe;Ad hoc network;distributed clustering;operational group},\n  doi = {10.1109/EUSIPCO.2015.7362538},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570095355.pdf},\n}\n\n
\n
\n\n\n
\n For dense ad hoc networks, clustering is an appropriate strategy for improving scalability. Moreover, dense networks such as public safety or military networks are also structured through a hierarchical organization via operational groups. This organization usually impacts both the mobility of nodes which move in group, and the data flow since the traffic is mainly intra-group rather than inter-group. In this work, we extend two distributed clustering algorithms, GDMAC and VOTE, by taking into account the group structure. Our simulations of dense ad hoc networks show that our extensions lead to a lower end-to-end communication delay and offer a better stability to mobility.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On channel selection for energy-constrained rateless-coded D2D communications.\n \n \n \n \n\n\n \n Maghsudi, S.; and Stańczak, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1028-1032, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362539,\n  author = {S. Maghsudi and S. Stańczak},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {On channel selection for energy-constrained rateless-coded D2D communications},\n  year = {2015},\n  pages = {1028-1032},\n  abstract = {We consider a distributed channel selection problem for device-to-device (D2D) communications underlaying conventional cellular networks. In our model, underlaying devices exploit the possibly-idle licensed cellular spectrum in order to establish direct communications links, and transmit using rateless coding under energy constraint. While the quality of each channel is assumed to be stochastic, the availability is non-stochastic (adversarial). Moreover, cellular channels are idle only for some finite time. As acquiring prior information about channel quality and/or availability yields excessive cost, we assume that D2D devices do not possess any prior information about channels. Device pairs face the problem of selecting a suitable channel so that a successful data delivery under the energy constraint is guaranteed. We model this problem as a multi-armed bandit game with mortal arms, and provide an algorithmic solution.},\n  keywords = {cellular radio;channel coding;game theory;radio links;wireless channels;energy-constrained rateless-coded D2D communication;distributed channel selection problem;device-to-device communication;cellular network;possibly-idle licensed cellular spectrum;direct communications link;cellular channel;data delivery;multiarmed bandit game;Encoding;Games;Random variables;Transmitters;Receivers;Europe;Signal processing},\n  doi = {10.1109/EUSIPCO.2015.7362539},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096627.pdf},\n}\n\n
\n
\n\n\n
\n We consider a distributed channel selection problem for device-to-device (D2D) communications underlaying conventional cellular networks. In our model, underlaying devices exploit the possibly-idle licensed cellular spectrum in order to establish direct communications links, and transmit using rateless coding under energy constraint. While the quality of each channel is assumed to be stochastic, the availability is non-stochastic (adversarial). Moreover, cellular channels are idle only for some finite time. As acquiring prior information about channel quality and/or availability yields excessive cost, we assume that D2D devices do not possess any prior information about channels. Device pairs face the problem of selecting a suitable channel so that a successful data delivery under the energy constraint is guaranteed. We model this problem as a multi-armed bandit game with mortal arms, and provide an algorithmic solution.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Power modulation: Application to inter-cell interference coordination.\n \n \n \n \n\n\n \n Varma, V. S.; Lasaulce, S.; Zhang, C.; and Visoz, R.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1033-1037, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"PowerPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362540,\n  author = {V. S. Varma and S. Lasaulce and C. Zhang and R. Visoz},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Power modulation: Application to inter-cell interference coordination},\n  year = {2015},\n  pages = {1033-1037},\n  abstract = {In this work, a novel technique which allows every transmitter in an interference network to have global channel state information (CSI) is proposed. The key feature of the proposed technique is that each transmitter acquires global CSI purely through the available feedback channel (i.e., a feedback of the received signal power). In the first step of the proposed technique, each transmitter uses several observations provided by the feedback channel to learn the channel gains perceived by its intended receiver. Secondly, this information is quantized, modulated, and transmitted to the other transmitters through the power levels used by the transmitters; the latter are indirectly observed through the received signal power. Hence, the interference is used as an implicit communication channel through which local CSI is exchanged. Once global CSI is acquired, it can be used to optimize any utility function which depends on it.},\n  keywords = {feedback;interference (signal);modulation;radio receivers;radio transmitters;telecommunication channels;power modulation;inter-cell interference coordination;transmitter;interference network;global channel state information;CSI;feedback channel;received signal power;implicit communication channel;utility function;Transmitters;Interference;Receivers;Signal to noise ratio;Estimation;Training},\n  doi = {10.1109/EUSIPCO.2015.7362540},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570097375.pdf},\n}\n\n
\n
\n\n\n
\n In this work, a novel technique which allows every transmitter in an interference network to have global channel state information (CSI) is proposed. The key feature of the proposed technique is that each transmitter acquires global CSI purely through the available feedback channel (i.e., a feedback of the received signal power). In the first step of the proposed technique, each transmitter uses several observations provided by the feedback channel to learn the channel gains perceived by its intended receiver. Secondly, this information is quantized, modulated, and transmitted to the other transmitters through the power levels used by the transmitters; the latter are indirectly observed through the received signal power. Hence, the interference is used as an implicit communication channel through which local CSI is exchanged. Once global CSI is acquired, it can be used to optimize any utility function which depends on it.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Overhead-aware distributed CSI selection in the MIMO interference channel.\n \n \n \n \n\n\n \n Mochaourab, R.; Brandt, R.; Ghauch, H.; and Bengtsson, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1038-1042, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Overhead-awarePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362541,\n  author = {R. Mochaourab and R. Brandt and H. Ghauch and M. Bengtsson},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Overhead-aware distributed CSI selection in the MIMO interference channel},\n  year = {2015},\n  pages = {1038-1042},\n  abstract = {We consider a MIMO interference channel in which the transmitters and receivers operate in frequency-division duplex mode. In this setting, interference management through coordinated transceiver design necessitates channel state information at the transmitters (CSI-T). The acquisition of CSI-T is done through feedback from the receivers, which entitles a loss in degrees of freedom, due to training and feedback. This loss increases with the amount of CSI-T. In this work, after formulating an overhead model for CSI acquisition at the transmitters, we propose a distributed mechanism to find for each transmitter a subset of the complete CSI, which is used to perform interference management. The mechanism is based on many-to-many stable matching. We prove the existence of a stable matching and exploit an algorithm to reach it. Simulation results show performance improvement compared to full and minimal CSI-T.},\n  keywords = {MIMO communication;radio transceivers;radiofrequency interference;telecommunication network management;wireless channels;overhead-aware distributed CSI selection;MIMO interference channel;receiver;frequency-division duplex mode;interference management;coordinated transceiver design;channel state information at the transmitter;CSI-T;distributed mechanism;many-to-many stable matching;Receivers;Training;Radio transmitters;Interference;Channel estimation;Signal processing},\n  doi = {10.1109/EUSIPCO.2015.7362541},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103793.pdf},\n}\n\n
\n
\n\n\n
\n We consider a MIMO interference channel in which the transmitters and receivers operate in frequency-division duplex mode. In this setting, interference management through coordinated transceiver design necessitates channel state information at the transmitters (CSI-T). The acquisition of CSI-T is done through feedback from the receivers, which entitles a loss in degrees of freedom, due to training and feedback. This loss increases with the amount of CSI-T. In this work, after formulating an overhead model for CSI acquisition at the transmitters, we propose a distributed mechanism to find for each transmitter a subset of the complete CSI, which is used to perform interference management. The mechanism is based on many-to-many stable matching. We prove the existence of a stable matching and exploit an algorithm to reach it. Simulation results show performance improvement compared to full and minimal CSI-T.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A computationally efficient implementation of fictitious play in a distributed setting.\n \n \n \n \n\n\n \n Swenson, B.; Kar, S.; and Xavier, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1043-1047, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362542,\n  author = {B. Swenson and S. Kar and J. Xavier},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A computationally efficient implementation of fictitious play in a distributed setting},\n  year = {2015},\n  pages = {1043-1047},\n  abstract = {The paper deals with distributed learning of Nash equilibria in games with a large number of players. The classical fictitious play (FP) algorithm is impractical in large games due to demanding communication requirements and high computational complexity. A variant of FP is presented that aims to mitigate both issues. Complexity is mitigated by use of a computationally efficient Monte-Carlo based best response rule. Demanding communication problems are mitigated by implementing the algorithm in a network-based distributed setting, in which player-to-player communication is restricted to local subsets of neighboring players as determined by a (possibly sparse, but connected) preassigned communication graph. Results are demonstrated via a simulation example.},\n  keywords = {computational complexity;game theory;graph theory;learning (artificial intelligence);Monte Carlo methods;distributed learning;Nash equilibria;classical fictitious play algorithm;communication requirements;computational complexity;computationally efficient Monte-Carlo based best response rule;network-based distributed setting;player-to-player communication;preassigned communication graph;Games;Signal processing algorithms;Heuristic algorithms;Computational complexity;Europe;Signal processing;Games;Distributed Learning;Fictitious Play;Nash Equilibrium},\n  doi = {10.1109/EUSIPCO.2015.7362542},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103827.pdf},\n}\n\n
\n
\n\n\n
\n The paper deals with distributed learning of Nash equilibria in games with a large number of players. The classical fictitious play (FP) algorithm is impractical in large games due to demanding communication requirements and high computational complexity. A variant of FP is presented that aims to mitigate both issues. Complexity is mitigated by use of a computationally efficient Monte-Carlo based best response rule. Demanding communication problems are mitigated by implementing the algorithm in a network-based distributed setting, in which player-to-player communication is restricted to local subsets of neighboring players as determined by a (possibly sparse, but connected) preassigned communication graph. Results are demonstrated via a simulation example.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Binaural coherent-to-diffuse-ratio estimation for dereverberation using an ITD model.\n \n \n \n \n\n\n \n Zheng, C.; Schwarz, A.; Kellermann, W.; and Li, X.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1048-1052, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"BinauralPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362543,\n  author = {C. Zheng and A. Schwarz and W. Kellermann and X. Li},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Binaural coherent-to-diffuse-ratio estimation for dereverberation using an ITD model},\n  year = {2015},\n  pages = {1048-1052},\n  abstract = {Most previously proposed dual-channel coherent-to-diffuse-ratio (CDR) estimators are based on a free-field model. When used for binaural signals, e.g., for dereverberation in binaural hearing aids, their performance may degrade due to the influence of the head, even when the direction-of-arrival of the desired speaker is exactly known. In this paper, the head shadowing effect is taken into account for CDR estimation by using a simplified model for the frequency-dependent interaural time difference and a model for the binaural coherence of the diffuse noise field. Evaluation of CDR-based dereverberation with measured binaural impulse responses indicates that the proposed binaural CDR estimators can improve PESQ scores.},\n  keywords = {estimation theory;reverberation;speech intelligibility;speech processing;transient response;binaural coherent-to-diffuse-ratio estimation;dereverberation;ITD model;dual-channel coherent-to-diffuse ratio estimator;CDR estimator;free-field model;binaural signal;binaural hearing aid;direction-of-arrival estimation;head shadowing effect;frequency-dependent interaural time difference;binaural coherence model;binaural impulse response;PESQ score;speech quality;speech intelligibility;Coherence;Microphones;Speech;Signal processing;Ear;Spatial coherence;Europe;Binaural speech dereverberation;interau-ral time difference;coherent-to-diffuse-ratio},\n  doi = {10.1109/EUSIPCO.2015.7362543},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103151.pdf},\n}\n\n
\n
\n\n\n
\n Most previously proposed dual-channel coherent-to-diffuse-ratio (CDR) estimators are based on a free-field model. When used for binaural signals, e.g., for dereverberation in binaural hearing aids, their performance may degrade due to the influence of the head, even when the direction-of-arrival of the desired speaker is exactly known. In this paper, the head shadowing effect is taken into account for CDR estimation by using a simplified model for the frequency-dependent interaural time difference and a model for the binaural coherence of the diffuse noise field. Evaluation of CDR-based dereverberation with measured binaural impulse responses indicates that the proposed binaural CDR estimators can improve PESQ scores.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n High-frequency tonal components restoration in low-bitrate audio coding using multiple spectral translations.\n \n \n \n\n\n \n Samaali, I.; Mahé, G.; and Alouane, M. T.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1053-1057, Aug 2015. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362544,\n  author = {I. Samaali and G. Mahé and M. T. Alouane},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {High-frequency tonal components restoration in low-bitrate audio coding using multiple spectral translations},\n  year = {2015},\n  pages = {1053-1057},\n  abstract = {At reduced bitrates, the audio compression affects high frequency tonal components of signals, which results in a roughness phenomenon. Audio coders are limited in the reconstruction of the high-frequency spectrum mainly because of the potential unpredictability of the structure of the latter, as well as unprecise indicators of tonal to noise ratio. We propose a technique for high-frequency tones restoration, based on the correction of the tonal positions in the decoded signal, using a small set of information transmitted through an auxiliary channel at a very low bit-rate (typically <; 2 kbps). The proposed approach is evaluated using objective measures of perceptual roughness. The experimental results with HE-AAC coding at 16 kbps exhibits an efficient preservation of the harmonicity and a significant improvement of the audio quality.},\n  keywords = {audio coding;channel coding;data compression;decoding;spectral analysis;multiple spectral translations;low-bitrate audio coding;high-frequency tonal component restoration;audio compression;high-frequency spectrum reconstruction;high-frequency tone restoration;signal decoding;auxiliary channel;information transmission;very low bit-rate;perceptual roughness;HE-AAC coding;harmonicity preservation;audio quality improvement;Decoding;Encoding;Harmonic analysis;Bit rate;Europe;Bandwidth},\n  doi = {10.1109/EUSIPCO.2015.7362544},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n At reduced bitrates, the audio compression affects high frequency tonal components of signals, which results in a roughness phenomenon. Audio coders are limited in the reconstruction of the high-frequency spectrum mainly because of the potential unpredictability of the structure of the latter, as well as unprecise indicators of tonal to noise ratio. We propose a technique for high-frequency tones restoration, based on the correction of the tonal positions in the decoded signal, using a small set of information transmitted through an auxiliary channel at a very low bit-rate (typically <; 2 kbps). The proposed approach is evaluated using objective measures of perceptual roughness. The experimental results with HE-AAC coding at 16 kbps exhibits an efficient preservation of the harmonicity and a significant improvement of the audio quality.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Bayesian suppression of memoryless nonlinear audio distortion.\n \n \n \n \n\n\n \n Carvalho, H. T.; Ávila, F. R.; and Biscainho, L. W. P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1058-1062, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"BayesianPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362545,\n  author = {H. T. Carvalho and F. R. Ávila and L. W. P. Biscainho},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Bayesian suppression of memoryless nonlinear audio distortion},\n  year = {2015},\n  pages = {1058-1062},\n  abstract = {Even if nonlinear distortion may be deliberately applied to audio signals for esthetic or technical reasons, it is common to hear annoying defects in accidentally saturated or amateurishly processed audio - which calls for some means to automatically undo the impairment. This paper proposes an algorithm to blindly identify the nonlinear distortion suffered by an audio signal and reconstruct its original form. Designed to deal with memoryless impairments, the model adopted for the nonlinear distortion is a curve composed of an invertible sequence of linear segments, capable of following the typical shape of compressed audio, and whose parameters are easily interpretable and thus constrainable. The solution builds on the posterior statistical distribution of the curve parameters given the degraded signal, and yields perceptually impressive results for real signals distorted by arbitrary curves.},\n  keywords = {audio signal processing;Bayes methods;compressed sensing;nonlinear distortion;signal reconstruction;statistical distributions;Bayesian suppression;memoryless nonlinear audio distortion;audio signals;nonlinear distortion;linear segments;compressed audio;statistical distribution;Nonlinear distortion;Signal processing algorithms;Europe;Shape;White noise;Nonlinear distortion;Bayesian signal processing;blind system identification;audio processing},\n  doi = {10.1109/EUSIPCO.2015.7362545},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103841.pdf},\n}\n\n
\n
\n\n\n
\n Even if nonlinear distortion may be deliberately applied to audio signals for esthetic or technical reasons, it is common to hear annoying defects in accidentally saturated or amateurishly processed audio - which calls for some means to automatically undo the impairment. This paper proposes an algorithm to blindly identify the nonlinear distortion suffered by an audio signal and reconstruct its original form. Designed to deal with memoryless impairments, the model adopted for the nonlinear distortion is a curve composed of an invertible sequence of linear segments, capable of following the typical shape of compressed audio, and whose parameters are easily interpretable and thus constrainable. The solution builds on the posterior statistical distribution of the curve parameters given the degraded signal, and yields perceptually impressive results for real signals distorted by arbitrary curves.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An extended reverberation decay tail metric as a measure of perceived late reverberation.\n \n \n \n \n\n\n \n Javed, H. A.; and Naylor, P. A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1063-1067, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362546,\n  author = {H. A. Javed and P. A. Naylor},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {An extended reverberation decay tail metric as a measure of perceived late reverberation},\n  year = {2015},\n  pages = {1063-1067},\n  abstract = {In this paper the development and evaluation of an extended Reverberation Decay Tail (RDT) metric is described. The signal-based metric predicts the perceived impact of reverberation on speech, by identifying and characterising energy decay characteristics in the signal Bark spectrum. In comparison with a previous metric, the new metric is extended to operate on wideband speech and incorporates an improved perceptual model and decay curve detection scheme. Furthermore, contributions of this work include experimental testing and validation of the metric on reverberant speech. The tests conducted show positive correlation with objective measures such as C50 as well as with subjective listening test scores. Potential applications of the measure include use as an evaluation tool for dereverberation research.},\n  keywords = {reverberation;signal detection;speech processing;extended reverberation decay tail metric;perceived late reverberation measurement;RDT metric;signal-based metric;speech reverberation;energy decay characteristics;signal Bark spectrum;improved perceptual model;decay curve detection scheme;Reverberation;Speech;Europe;Signal processing;Wideband;Distortion measurement;RDT;late reverberation;perceptual reverberation;speech quality measure},\n  doi = {10.1109/EUSIPCO.2015.7362546},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105067.pdf},\n}\n\n
\n
\n\n\n
\n In this paper the development and evaluation of an extended Reverberation Decay Tail (RDT) metric is described. The signal-based metric predicts the perceived impact of reverberation on speech, by identifying and characterising energy decay characteristics in the signal Bark spectrum. In comparison with a previous metric, the new metric is extended to operate on wideband speech and incorporates an improved perceptual model and decay curve detection scheme. Furthermore, contributions of this work include experimental testing and validation of the metric on reverberant speech. The tests conducted show positive correlation with objective measures such as C50 as well as with subjective listening test scores. Potential applications of the measure include use as an evaluation tool for dereverberation research.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A linearization system for parametric array loudspeakers using the parallel cascade volterra filter.\n \n \n \n \n\n\n \n Hatano, Y.; Shi, C.; Kinoshita, S.; and Kajikawa, Y.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1068-1072, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362547,\n  author = {Y. Hatano and C. Shi and S. Kinoshita and Y. Kajikawa},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A linearization system for parametric array loudspeakers using the parallel cascade volterra filter},\n  year = {2015},\n  pages = {1068-1072},\n  abstract = {The parametric array loudspeaker (PAL) is well known for its ability to radiate a narrow sound beam from a relatively small ultrasonic emitter. Nonlinear distortions commonly occur in the self-demodulated sound of the PAL. Based on the Volterra filter modeling the self-demodulation process of the PAL, a linearization system can be developed for the PAL. However, the computational complexity of the Volterra filter increases dramatically with the tap length. In this paper, the parallel cascade structure is adopted to implement the Volterra filter. The experiment results demonstrate that the computational complexity of the Volterra filter is significantly reduced by using the parallel cascade structure, and based on such an implementation of the Volterra filter, the performance of the linearization system is not compromised.},\n  keywords = {computational complexity;linearisation techniques;loudspeakers;nonlinear filters;ultrasonic equipment;parametric array loudspeakers;linearization system;parallel cascade Volterra filter;narrow sound beam;ultrasonic emitter;nonlinear distortions;self-demodulated sound;computational complexity;parallel cascade structure;Kernel;Eigenvalues and eigenfunctions;Computational complexity;Harmonic distortion;Acoustics;Computational modeling;Volterra filter;Parallel cascade structure;Parametric array loudspeaker;Nonlinear distortion},\n  doi = {10.1109/EUSIPCO.2015.7362547},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570101439.pdf},\n}\n\n
\n
\n\n\n
\n The parametric array loudspeaker (PAL) is well known for its ability to radiate a narrow sound beam from a relatively small ultrasonic emitter. Nonlinear distortions commonly occur in the self-demodulated sound of the PAL. Based on the Volterra filter modeling the self-demodulation process of the PAL, a linearization system can be developed for the PAL. However, the computational complexity of the Volterra filter increases dramatically with the tap length. In this paper, the parallel cascade structure is adopted to implement the Volterra filter. The experiment results demonstrate that the computational complexity of the Volterra filter is significantly reduced by using the parallel cascade structure, and based on such an implementation of the Volterra filter, the performance of the linearization system is not compromised.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A generalized method for the derivation of non-linear state-space models from circuit schematics.\n \n \n \n \n\n\n \n Holters, M.; and Zölzer, U.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1073-1077, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362548,\n  author = {M. Holters and U. Zölzer},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A generalized method for the derivation of non-linear state-space models from circuit schematics},\n  year = {2015},\n  pages = {1073-1077},\n  abstract = {Digital emulation of analog circuits for musical audio processing, like synthesizers, guitar effect pedals, or vintage amplifiers, is an ongoing research topic. David Yeh proposed to use the nodal DK method to derive a non-linear state-space system from a circuit schematic in a very systematic way. However, this approach has some drawbacks and limitations, especially with respect to the modeling of individual circuit elements. Therefore, in this paper, we present an alternative that is more flexible than the nodal DK method and hopefully allows for easier integration of almost arbitrary element models. This flexibility and generality in our opinion outweighs the relatively small cost associated with it in terms of increased matrix sizes. We therefore believe the proposed method to be a useful tool for circuit simulation.},\n  keywords = {audio signal processing;matrix algebra;state-space methods;current-source nonlinear state-space model derivation;circuit schematics;analog circuit digital emulation;musical audio processing;nodal DK method;arbitrary element model;matrix sizes;Mathematical model;Operational amplifiers;Europe;Integrated circuit modeling;Capacitors;Analog circuits;circuit analysis;circuit simulation;virtual analog modeling;state-space model},\n  doi = {10.1109/EUSIPCO.2015.7362548},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103545.pdf},\n}\n\n
\n
\n\n\n
\n Digital emulation of analog circuits for musical audio processing, like synthesizers, guitar effect pedals, or vintage amplifiers, is an ongoing research topic. David Yeh proposed to use the nodal DK method to derive a non-linear state-space system from a circuit schematic in a very systematic way. However, this approach has some drawbacks and limitations, especially with respect to the modeling of individual circuit elements. Therefore, in this paper, we present an alternative that is more flexible than the nodal DK method and hopefully allows for easier integration of almost arbitrary element models. This flexibility and generality in our opinion outweighs the relatively small cost associated with it in terms of increased matrix sizes. We therefore believe the proposed method to be a useful tool for circuit simulation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multichannel online speech dereverberation under noisy environments.\n \n \n \n \n\n\n \n Togami, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1078-1082, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"MultichannelPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362549,\n  author = {M. Togami},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Multichannel online speech dereverberation under noisy environments},\n  year = {2015},\n  pages = {1078-1082},\n  abstract = {In this paper, we propose a novel online speech dereverberation with multichannel microphone input signals for noisy environments. Unlike conventional dereverberation methods which optimizes the dereverberation filter by noisy microphone input signals, the proposed method optimizes the dereverberation filter by noiseless microphone input signals so as to achieve a good dereverberation filter under noisy environments. Noiseless microphone input signals are estimated by multichannel Wiener filtering which can be interpreted as combination of multichannel beamforming and time-varying singlechannel Wiener filtering. In multichannel Wiener filtering, residual reverberation which cannot be reduced by the time-invariant dereverberation filter is also reduced. Optimization of the parameters are updated by using the expectation-maximization algorithm in an online manner. Experimental results show that the proposed method can reduce reverberation and background noise effectively in an online manner even when microphone input signals are observed under noisy enviornments.},\n  keywords = {acoustic noise;acoustic signal processing;expectation-maximisation algorithm;microphones;optimisation;reverberation;signal denoising;speech processing;time-varying filters;Wiener filters;multichannel online speech dereverberation;noisy environments;multichannel microphone input signals;dereverberation filter optimization;noiseless microphone input signals;multichannel Wiener filtering;multichannel beamforming;time-varying single-channel Wiener filtering;parameter optimization;expectation-maximization algorithm;reverberation reduction;background noise reduction;Speech;Microphones;Noise measurement;Chlorine;Reverberation;Noise reduction;Optimization;Dereverberation;noise robustness;local Gaussian modeling;multichannel Wiener filtering;EM algorithm},\n  doi = {10.1109/EUSIPCO.2015.7362549},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104467.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a novel online speech dereverberation with multichannel microphone input signals for noisy environments. Unlike conventional dereverberation methods which optimizes the dereverberation filter by noisy microphone input signals, the proposed method optimizes the dereverberation filter by noiseless microphone input signals so as to achieve a good dereverberation filter under noisy environments. Noiseless microphone input signals are estimated by multichannel Wiener filtering which can be interpreted as combination of multichannel beamforming and time-varying singlechannel Wiener filtering. In multichannel Wiener filtering, residual reverberation which cannot be reduced by the time-invariant dereverberation filter is also reduced. Optimization of the parameters are updated by using the expectation-maximization algorithm in an online manner. Experimental results show that the proposed method can reduce reverberation and background noise effectively in an online manner even when microphone input signals are observed under noisy enviornments.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Recognize and separate approach for speech denoising using nonnegative matrix factorization.\n \n \n \n \n\n\n \n Sohrab, F.; and Erdogan, H.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1083-1087, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"RecognizePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362550,\n  author = {F. Sohrab and H. Erdogan},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Recognize and separate approach for speech denoising using nonnegative matrix factorization},\n  year = {2015},\n  pages = {1083-1087},\n  abstract = {This paper proposes a novel approach for denoising single-channel noisy speech signals. A speech dictionary and multiple noise dictionaries are trained using nonnegative matrix factorization (NMF). After observing the mixed signal, first the type of noise in the mixed signal is identified. The magnitude spectrogram of the noisy signal is decomposed using NMF with the concatenated trained dictionaries of noise and speech. Our results indicate that recognizing the noise type from the mixed signal and using the corresponding specific noise dictionary provides better results than using a general noise dictionary in the NMF approach. We also compare our algorithm with other state-of-the-art denoising methods and show that it has better performance than the competitors in most cases.},\n  keywords = {matrix decomposition;signal denoising;source separation;speech processing;speech recognition;denoising methods;magnitude spectrogram;noise dictionaries;speech dictionary;single-channel noisy speech signals;NMF;nonnegative matrix factorization;speech denoising;Speech;Dictionaries;Noise reduction;Training;Signal to noise ratio;Speech processing;Noise measurement;Speech enhancement;single channel denoising;nonnegative matrix factorization},\n  doi = {10.1109/EUSIPCO.2015.7362550},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096429.pdf},\n}\n\n
\n
\n\n\n
\n This paper proposes a novel approach for denoising single-channel noisy speech signals. A speech dictionary and multiple noise dictionaries are trained using nonnegative matrix factorization (NMF). After observing the mixed signal, first the type of noise in the mixed signal is identified. The magnitude spectrogram of the noisy signal is decomposed using NMF with the concatenated trained dictionaries of noise and speech. Our results indicate that recognizing the noise type from the mixed signal and using the corresponding specific noise dictionary provides better results than using a general noise dictionary in the NMF approach. We also compare our algorithm with other state-of-the-art denoising methods and show that it has better performance than the competitors in most cases.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A source separation evaluation method in object-based spatial audio.\n \n \n \n \n\n\n \n Liu, Q.; Wang, W.; Jackson, P. J. B.; and Cox, T. J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1088-1092, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362551,\n  author = {Q. Liu and W. Wang and P. J. B. Jackson and T. J. Cox},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A source separation evaluation method in object-based spatial audio},\n  year = {2015},\n  pages = {1088-1092},\n  abstract = {Representing a complex acoustic scene with audio objects is desirable but challenging in object-based spatial audio production and reproduction, especially when concurrent sound signals are present in the scene. Source separation (SS) provides a potentially useful and enabling tool for audio object extraction. These extracted objects are often remixed to reconstruct a sound field in the reproduction stage. A suitable SS method is expected to produce audio objects that ultimately deliver high quality audio after remix. The performance of these SS algorithms therefore needs to be evaluated in this context. Existing metrics for SS performance evaluation, however, do not take into account the essential sound field reconstruction process. To address this problem, here we propose a new SS evaluation method which employs a remixing strategy similar to the panning law, and provides a framework to incorporate the conventional SS metrics. We have tested our proposed method on real-room recordings processed with four SS methods, including two state-of-the-art blind source separation (BSS) methods and two classic beamforming algorithms. The evaluation results based on three conventional SS metrics are analysed.},\n  keywords = {acoustic signal processing;array signal processing;audio signal processing;blind source separation;signal reconstruction;source separation evaluation method;complex acoustic scene;object-based spatial audio production;concurrent sound signals;audio object extraction;SS algorithms;sound field reconstruction process;remixing strategy;panning law;real-room recordings;blind source separation;BSS methods;SS evaluation method;beamforming algorithms;SS metrics;Microphones;Measurement;Signal processing algorithms;Array signal processing;Arrays;Magnetic heads;Spatial audio;audio objects;blind source separation;beamforming;evaluation},\n  doi = {10.1109/EUSIPCO.2015.7362551},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570098689.pdf},\n}\n\n
\n
\n\n\n
\n Representing a complex acoustic scene with audio objects is desirable but challenging in object-based spatial audio production and reproduction, especially when concurrent sound signals are present in the scene. Source separation (SS) provides a potentially useful and enabling tool for audio object extraction. These extracted objects are often remixed to reconstruct a sound field in the reproduction stage. A suitable SS method is expected to produce audio objects that ultimately deliver high quality audio after remix. The performance of these SS algorithms therefore needs to be evaluated in this context. Existing metrics for SS performance evaluation, however, do not take into account the essential sound field reconstruction process. To address this problem, here we propose a new SS evaluation method which employs a remixing strategy similar to the panning law, and provides a framework to incorporate the conventional SS metrics. We have tested our proposed method on real-room recordings processed with four SS methods, including two state-of-the-art blind source separation (BSS) methods and two classic beamforming algorithms. The evaluation results based on three conventional SS metrics are analysed.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Using enhanced F0-trajectories for multiple speaker detection in audio monitoring scenarios.\n \n \n \n \n\n\n \n Cornaggia-Urrigshardt, A.; and Kurth, F.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1093-1097, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"UsingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362552,\n  author = {A. Cornaggia-Urrigshardt and F. Kurth},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Using enhanced F0-trajectories for multiple speaker detection in audio monitoring scenarios},\n  year = {2015},\n  pages = {1093-1097},\n  abstract = {We propose to use enhanced F0-trajectories, which are extracted using shift-autocorrelation (shift-ACF), for multiple speaker detection in audio monitoring scenarios. After introducing spectral shift-ACF features, their performance in a multiple FO-extraction in the presence of different noise types is estimated for synthetic signal scenarios. Afterwards, at novel method for F0-supertrajectory extraction is proposeds and evaluated for multiple speaker detection in the presence of background noises that typically occur in audio monitoring. It turns out that due to their improved sharpness in representing harmonic components, spectral shift-ACF features outperform classical features in many cases.},\n  keywords = {correlation methods;feature extraction;speaker recognition;spectral analysis;enhanced F0-trajectories;multiple speaker detection;audio monitoring scenario;shift-autocorrelation;spectral shift-ACF feature;multiple F0-extraction;synthetic signal scenario;F0-supertrajectory extraction;background noises;harmonic component representation;Feature extraction;Speech;Harmonic analysis;Noise measurement;Gaussian noise;Monitoring;Robustness;Multiple Speaker Detection;Audio Monitoring;FO-Trajectories;Shift-ACF},\n  doi = {10.1109/EUSIPCO.2015.7362552},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570092869.pdf},\n}\n\n
\n
\n\n\n
\n We propose to use enhanced F0-trajectories, which are extracted using shift-autocorrelation (shift-ACF), for multiple speaker detection in audio monitoring scenarios. After introducing spectral shift-ACF features, their performance in a multiple FO-extraction in the presence of different noise types is estimated for synthetic signal scenarios. Afterwards, at novel method for F0-supertrajectory extraction is proposeds and evaluated for multiple speaker detection in the presence of background noises that typically occur in audio monitoring. It turns out that due to their improved sharpness in representing harmonic components, spectral shift-ACF features outperform classical features in many cases.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Separation matrix optimization using associative memory model for blind source separation.\n \n \n \n \n\n\n \n Omachi, M.; Ogawa, T.; Kobayashi, T.; Fujieda, M.; and Katagiri, K.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1098-1102, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"SeparationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362553,\n  author = {M. Omachi and T. Ogawa and T. Kobayashi and M. Fujieda and K. Katagiri},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Separation matrix optimization using associative memory model for blind source separation},\n  year = {2015},\n  pages = {1098-1102},\n  abstract = {A source signal is estimated using an associative memory model (AMM) and used for separation matrix optimization in linear blind source separation (BSS) to yield high quality and less distorted speech. Linear-filtering-based BSS, such as independent vector analysis (IVA), has been shown to be effective in sound source separation while avoiding non-linear signal distortion. This technique, however, requires several assumptions of sound sources being independent and generated from non-Gaussian distribution. We propose a method for estimating a linear separation matrix without any assumptions about the sources by repeating the following two steps: estimating non-distorted reference signals by using an AMM and optimizing the separation matrix to minimize an error between the estimated signal and reference signal. Experimental comparisons carried out in simultaneous speech separation suggest that the proposed method can reduce the residual distortion caused by IVA.},\n  keywords = {blind source separation;filtering theory;matrix algebra;optimisation;speech processing;vectors;linear blind source separation;separation matrix optimization;associative memory model;source signal;high quality speech;less distorted speech;linear-filtering-based BSS;independent vector analysis;IVA;sound source separation;nonGaussian distribution;linear separation matrix;nondistorted reference signals;estimated signal;Distortion;Speech;Optimization;Yttrium;Convolution;Training;Blind source separation;convolutional neural network;denoising autoencoder associative memory model linear filtering blind},\n  doi = {10.1109/EUSIPCO.2015.7362553},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096599.pdf},\n}\n\n
\n
\n\n\n
\n A source signal is estimated using an associative memory model (AMM) and used for separation matrix optimization in linear blind source separation (BSS) to yield high quality and less distorted speech. Linear-filtering-based BSS, such as independent vector analysis (IVA), has been shown to be effective in sound source separation while avoiding non-linear signal distortion. This technique, however, requires several assumptions of sound sources being independent and generated from non-Gaussian distribution. We propose a method for estimating a linear separation matrix without any assumptions about the sources by repeating the following two steps: estimating non-distorted reference signals by using an AMM and optimizing the separation matrix to minimize an error between the estimated signal and reference signal. Experimental comparisons carried out in simultaneous speech separation suggest that the proposed method can reduce the residual distortion caused by IVA.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Coexistence of G.fast and VDSL in FTTDP and FTTC deployments.\n \n \n \n \n\n\n \n Strobel, R.; and Utschick, W.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1103-1107, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"CoexistencePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362554,\n  author = {R. Strobel and W. Utschick},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Coexistence of G.fast and VDSL in FTTDP and FTTC deployments},\n  year = {2015},\n  pages = {1103-1107},\n  abstract = {Hybrid copper/fiber networks bridge the gap between the fiber link and the customer by using copper wires over the last meters. This solution combines energy efficiency and low cost of the copper network with higher fiber data rates. ITU recently finished the G.fast standard for high speed data transmission on copper wires for this application. Coexistence with legacy VDSL2 systems is an important topic for the introduction of the new technology, as the systems share a significant part of the frequency spectrum. This paper investigates the performance of G.fast coexisting with VDSL2. Methods for decentralized spectrum optimization and protection of legacy services are presented.},\n  keywords = {digital subscriber lines;optical fibre subscriber loops;optimisation;spectrum optimization;frequency spectrum;VDSL2 systems;data transmission;ITU;fiber data rates;copper wires;fiber link;fiber networks;copper networks;FTTC;fiber to the curb-architecture;FTTDP;fiber to the distribution point;G.fast;Crosstalk;Signal to noise ratio;Optimization;Loading;Europe;spectrum optimization;alien crosstalk;iterative water-filling;FTTdp;coexistence},\n  doi = {10.1109/EUSIPCO.2015.7362554},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103537.pdf},\n}\n\n
\n
\n\n\n
\n Hybrid copper/fiber networks bridge the gap between the fiber link and the customer by using copper wires over the last meters. This solution combines energy efficiency and low cost of the copper network with higher fiber data rates. ITU recently finished the G.fast standard for high speed data transmission on copper wires for this application. Coexistence with legacy VDSL2 systems is an important topic for the introduction of the new technology, as the systems share a significant part of the frequency spectrum. This paper investigates the performance of G.fast coexisting with VDSL2. Methods for decentralized spectrum optimization and protection of legacy services are presented.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Step-adaptive approximate least squares.\n \n \n \n \n\n\n \n Lunglmayr, M.; Unterrieder, C.; and Huemer, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1108-1112, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Step-adaptivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362555,\n  author = {M. Lunglmayr and C. Unterrieder and M. Huemer},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Step-adaptive approximate least squares},\n  year = {2015},\n  pages = {1108-1112},\n  abstract = {Recently, we proposed approximate least squares (ALS), a low complexity approach to solve the linear least squares problem. In this work we present the step-adaptive linear least squares (SALS) algorithm, an extension of the ALS approach that significantly reduces its approximation error. We theoretically motivate the extension of the algorithm, and introduce a low complexity implementation scheme. Our performance simulations exhibit that SALS features a practically negligible error compared to the exact LS solution that is achieved with only a marginal complexity increase compared to ALS. This performance gain is achieved with about the same low computational complexity as the original ALS approach.},\n  keywords = {computational complexity;least squares approximations;signal processing;step-adaptive approximate least square;step-adaptive linear least square algorithm;SALS algorithm;approximation error reduction;low complexity implementation scheme;computational complexity;signal processing;Least squares approximations;Complexity theory;Signal processing;Signal processing algorithms;Eigenvalues and eigenfunctions;Approximation algorithms;least squares;approximation;iterative algorithm;complexity;approximate least squares},\n  doi = {10.1109/EUSIPCO.2015.7362555},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570094291.pdf},\n}\n\n
\n
\n\n\n
\n Recently, we proposed approximate least squares (ALS), a low complexity approach to solve the linear least squares problem. In this work we present the step-adaptive linear least squares (SALS) algorithm, an extension of the ALS approach that significantly reduces its approximation error. We theoretically motivate the extension of the algorithm, and introduce a low complexity implementation scheme. Our performance simulations exhibit that SALS features a practically negligible error compared to the exact LS solution that is achieved with only a marginal complexity increase compared to ALS. This performance gain is achieved with about the same low computational complexity as the original ALS approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Pipelined design of an instantaneous frequency estimation-based time-frequency optimal filter.\n \n \n \n \n\n\n \n Ivanović, V. N.; Jovanovski, S.; and Radović, N.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1113-1117, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"PipelinedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362556,\n  author = {V. N. Ivanović and S. Jovanovski and N. Radović},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Pipelined design of an instantaneous frequency estimation-based time-frequency optimal filter},\n  year = {2015},\n  pages = {1113-1117},\n  abstract = {Pipelined signal adaptive hardware design of an optimal time-frequency (TF) filter has been presented. It is based on the real-time results of TF analysis and on the TF analysis-based instantaneous frequency (IF) estimation. The implemented pipelining technique allows the filter to overlap in execution unconditional steps performing in neighboring TF instants and, therefore, to significantly enhance time performance. The improvement in execution time corresponding to the one clock cycle by a TF point (i.e. even 50% in some TF points) is achieved. The design is tested on multicomponent signals and compared with the other possible IF estimation-based TF filter's designs.},\n  keywords = {frequency estimation;signal processing;time-frequency analysis;Wigner distribution;instantaneous frequency estimation-based time-frequency;pipelined signal adaptive hardware design;optimal time-frequency filter;multicomponent signals;Estimation;Gabor filters;Logic gates;Hardware;Pipeline processing;Frequency estimation;Cross-terms free Wigner distribution;Hardware design;Optimal filter;Pipelining},\n  doi = {10.1109/EUSIPCO.2015.7362556},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570099323.pdf},\n}\n\n
\n
\n\n\n
\n Pipelined signal adaptive hardware design of an optimal time-frequency (TF) filter has been presented. It is based on the real-time results of TF analysis and on the TF analysis-based instantaneous frequency (IF) estimation. The implemented pipelining technique allows the filter to overlap in execution unconditional steps performing in neighboring TF instants and, therefore, to significantly enhance time performance. The improvement in execution time corresponding to the one clock cycle by a TF point (i.e. even 50% in some TF points) is achieved. The design is tested on multicomponent signals and compared with the other possible IF estimation-based TF filter's designs.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fixed-point implementation of Lattice Wave Digital Filter: Comparison and error analysis.\n \n \n \n \n\n\n \n Volkova, A.; and Hilaire, T.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1118-1122, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Fixed-pointPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362557,\n  author = {A. Volkova and T. Hilaire},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Fixed-point implementation of Lattice Wave Digital Filter: Comparison and error analysis},\n  year = {2015},\n  pages = {1118-1122},\n  abstract = {A consistent analysis of the filter design along with its further implementation in fixed-point arithmetic requires a large amount of work, and this process differs from one filter representation to another. For the unifying purposes of such flow, a Specialized Implicit Form (SIF) had been proposed in [1]. Various sensitivity and stability measures have been adapted to it along with an a priori error analysis (quantization of the coefficients and output error). In this paper a conversion algorithm for the widely used Lattice Wave Digital Filters (LWDF) to the SIF is presented, along with a finite precision error analysis. It allows to compare fairly LWDF to other structures, like direct forms and state-space. This is illustrated with a numerical example.},\n  keywords = {error analysis;fixed point arithmetic;lattice filters;wave digital filters;fixed-point implementation;lattice wave digital filter;specialized implicit form;a priori error analysis;LWDF;SIF;finite precision error analysis;Transfer functions;Quantization (signal);Signal processing algorithms;Roundoff errors;Digital filters;Lattices;Filter implementation;Lattice Wave Digital Filters;error analysis;fixed-point arithmetic},\n  doi = {10.1109/EUSIPCO.2015.7362557},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105129.pdf},\n}\n\n
\n
\n\n\n
\n A consistent analysis of the filter design along with its further implementation in fixed-point arithmetic requires a large amount of work, and this process differs from one filter representation to another. For the unifying purposes of such flow, a Specialized Implicit Form (SIF) had been proposed in [1]. Various sensitivity and stability measures have been adapted to it along with an a priori error analysis (quantization of the coefficients and output error). In this paper a conversion algorithm for the widely used Lattice Wave Digital Filters (LWDF) to the SIF is presented, along with a finite precision error analysis. It allows to compare fairly LWDF to other structures, like direct forms and state-space. This is illustrated with a numerical example.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Minimized roundoff noise and pole sensitivity subject to L2-Scaling constraints for IIR filters.\n \n \n \n\n\n \n Hinamoto, Y.; and Doi, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1123-1127, Aug 2015. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362558,\n  author = {Y. Hinamoto and A. Doi},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Minimized roundoff noise and pole sensitivity subject to L2-Scaling constraints for IIR filters},\n  year = {2015},\n  pages = {1123-1127},\n  abstract = {This paper investigates the minimization problem of weighted roundoff noise and pole sensitivity subject to l2-scaling constraints for state-space digital filters. A new measure for evaluating roundoff noise and pole sensitivity is proposed, and an efficient technique for minimizing this measure is developed. It is shown that the problem can be converted into an unconstrained optimization problem by using linear-algebraic techniques. The unconstrained optimization problem at hand is then solved iteratively by employing an efficient quasi-Newton algorithm with closed-form formulas for key gradient evaluation. Finally a numerical example is presented to demonstrate the validity and effectiveness of the proposed technique.},\n  keywords = {gradient methods;IIR filters;minimisation;minimized roundoff noise;pole sensitivity;IIR filters;weighted roundoff noise;state-space digital filters;unconstrained optimization problem;linear-algebraic techniques;quasiNewton algorithm;key gradient evaluation;Sensitivity;Europe;Signal processing;Noise measurement;Optimization;Transfer functions;Minimization},\n  doi = {10.1109/EUSIPCO.2015.7362558},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n This paper investigates the minimization problem of weighted roundoff noise and pole sensitivity subject to l2-scaling constraints for state-space digital filters. A new measure for evaluating roundoff noise and pole sensitivity is proposed, and an efficient technique for minimizing this measure is developed. It is shown that the problem can be converted into an unconstrained optimization problem by using linear-algebraic techniques. The unconstrained optimization problem at hand is then solved iteratively by employing an efficient quasi-Newton algorithm with closed-form formulas for key gradient evaluation. Finally a numerical example is presented to demonstrate the validity and effectiveness of the proposed technique.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Unsupervised feature selection method for improved human gait recognition.\n \n \n \n \n\n\n \n Rida, I.; Maadeed, S. A.; and Bouridane, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1128-1132, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"UnsupervisedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362559,\n  author = {I. Rida and S. A. Maadeed and A. Bouridane},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Unsupervised feature selection method for improved human gait recognition},\n  year = {2015},\n  pages = {1128-1132},\n  abstract = {Gait recognition is an emerging biometric technology which aims to identify people purely through the analysis of the way they walk. The technology has attracted interest as a method of identification because it is non-invasiveness since it does not require the subject's cooperation. However, {"}covariates{"} which include clothing, carrying conditions, and other intra-class variations affect the recognition performances. This paper proposes an unsupervised feature selection method which is able to select most relevant discriminative features for human recognition to alleviate the impact of covariates so as to improve the recognition performances. The proposed method has been evaluated using CASIA Gait Database (Dataset B) and the experimental results demonstrate that the proposed technique achieves 85.43 % of correct recognition.},\n  keywords = {biometrics (access control);gait analysis;gesture recognition;unsupervised feature selection method;human gait recognition;biometric technology;covariates;recognition performances;recognition performances;CASIA gait database;Computational modeling;Gait recognition;Entropy;Clothing;Feature extraction;Dynamics;Training;Biometrics;gait;model free;feature selection;entropy},\n  doi = {10.1109/EUSIPCO.2015.7362559},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103019.pdf},\n}\n\n
\n
\n\n\n
\n Gait recognition is an emerging biometric technology which aims to identify people purely through the analysis of the way they walk. The technology has attracted interest as a method of identification because it is non-invasiveness since it does not require the subject's cooperation. However, \"covariates\" which include clothing, carrying conditions, and other intra-class variations affect the recognition performances. This paper proposes an unsupervised feature selection method which is able to select most relevant discriminative features for human recognition to alleviate the impact of covariates so as to improve the recognition performances. The proposed method has been evaluated using CASIA Gait Database (Dataset B) and the experimental results demonstrate that the proposed technique achieves 85.43 % of correct recognition.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Rejection-based classification for action recognition using a spatio-temporal dictionary.\n \n \n \n \n\n\n \n Tim, S. C. W.; Rombaut, M.; and Pellerin, D.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1133-1137, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Rejection-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362560,\n  author = {S. C. W. Tim and M. Rombaut and D. Pellerin},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Rejection-based classification for action recognition using a spatio-temporal dictionary},\n  year = {2015},\n  pages = {1133-1137},\n  abstract = {This paper presents a method for human action recognition in videos which learns a dictionary whose atoms are spatio-temporal patches. We use these gray-level spatio-temporal patches to learn motion patterns inside the videos. This method also relies on a part-based human detector in order to segment and narrow down several interesting regions inside the videos without a need for bounding boxes annotations. We show that the utilization of these parts improves the classification performance. We introduce a rejection-based classification method which is based on a Support Vector Machine. This method has been tested on UCF sports action dataset with good results.},\n  keywords = {image classification;motion estimation;object detection;support vector machines;rejection-based classification;spatio-temporal dictionary;human action recognition;gray-level spatio-temporal patches;motion patterns;part-based human detector;support vector machine;Videos;Dictionaries;Yttrium;Detectors;Support vector machines;Europe;Dictionary Learning;Action Recognition;Classification;Videos;Spatio-temporal patches},\n  doi = {10.1109/EUSIPCO.2015.7362560},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570097533.pdf},\n}\n\n
\n
\n\n\n
\n This paper presents a method for human action recognition in videos which learns a dictionary whose atoms are spatio-temporal patches. We use these gray-level spatio-temporal patches to learn motion patterns inside the videos. This method also relies on a part-based human detector in order to segment and narrow down several interesting regions inside the videos without a need for bounding boxes annotations. We show that the utilization of these parts improves the classification performance. We introduce a rejection-based classification method which is based on a Support Vector Machine. This method has been tested on UCF sports action dataset with good results.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Lidar-based gait analysis in people tracking and 4D visualization.\n \n \n \n \n\n\n \n Benedek, C.; Nagy, B.; Gálai, B.; and Jankó, Z.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1138-1142, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Lidar-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362561,\n  author = {C. Benedek and B. Nagy and B. Gálai and Z. Jankó},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Lidar-based gait analysis in people tracking and 4D visualization},\n  year = {2015},\n  pages = {1138-1142},\n  abstract = {In this paper we introduce a new approach on gait analysis based on data streams of a Rotating Multi Beam (RMB) Lidar sensor. The gait descriptors for training and recognition are observed and extracted in realistic outdoor surveillance scenarios, where multiple pedestrians walk concurrently in the field of interest, while occlusions or background noise may affects the observation. The proposed algorithms are embedded into an integrated 4D vision and visualization system. Gait features are exploited in two different components of the workflow. First, in the tracking step the collected characteristic gait parameters support as biometric descriptors the re-identification of people, who temporarily leave the field of interest, and re-appear later. Second, in the visualization module, we display moving avatar models which follow in real time the trajectories of the observed pedestrians with synchronized leg movements. The proposed approach is experimentally demonstrated in eight multi-target scenes.},\n  keywords = {gait analysis;image reconstruction;object tracking;optical radar;pedestrians;multi-target scenes;moving avatar models;people re-identification;biometric descriptors;tracking step;gait features;visualization system;integrated 4D vision;pedestrians;gait descriptors;RMB lidar sensor;rotating multi beam lidar sensor;data streams;gait analysis;Three-dimensional displays;Laser radar;Trajectory;Feature extraction;Surveillance;Legged locomotion;Europe;Lidar;gait recognition;4D reconstruction},\n  doi = {10.1109/EUSIPCO.2015.7362561},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570100943.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we introduce a new approach on gait analysis based on data streams of a Rotating Multi Beam (RMB) Lidar sensor. The gait descriptors for training and recognition are observed and extracted in realistic outdoor surveillance scenarios, where multiple pedestrians walk concurrently in the field of interest, while occlusions or background noise may affects the observation. The proposed algorithms are embedded into an integrated 4D vision and visualization system. Gait features are exploited in two different components of the workflow. First, in the tracking step the collected characteristic gait parameters support as biometric descriptors the re-identification of people, who temporarily leave the field of interest, and re-appear later. Second, in the visualization module, we display moving avatar models which follow in real time the trajectories of the observed pedestrians with synchronized leg movements. The proposed approach is experimentally demonstrated in eight multi-target scenes.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Hands, face and joints for multi-modal human-action temporal segmentation and recognition.\n \n \n \n \n\n\n \n Seddik, B.; Gazzah, S.; and Ben Amara, N. E.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1143-1147, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Hands,Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362562,\n  author = {B. Seddik and S. Gazzah and N. E. {Ben Amara}},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Hands, face and joints for multi-modal human-action temporal segmentation and recognition},\n  year = {2015},\n  pages = {1143-1147},\n  abstract = {We present in this paper a new approach for human-action extraction and recognition in a multi-modal context. Our solution contains two modules. The first one applies temporal action segmentation by combining a heuristic analysis with augmented-joint description and SVM classification. The second one aims for a frame-wise action recognition using skeletal, RGB and depth modalities coupled with a label-grouping strategy in the decision level. Our contribution consists of (1) a selective concatenation of features extracted from the different modalities, (2) the introduction of features relative to the face region in addition to the hands, and (3) the applied multilevel frames-grouping strategy. Our experiments carried on the Chalearn gesture challenge 2014 dataset have proved the effectiveness of our approach within the literature.},\n  keywords = {gesture recognition;heuristic programming;support vector machines;multimodal human-action temporal segmentation;multimodal human-action temporal recognition;heuristic analysis;augmented-joint description;SVM classification;Feature extraction;Support vector machines;Face;Streaming media;Context;Europe;Signal processing;human action recognition;temporal segmentation;Chalearn gesture challenge;Kinect;SVM},\n  doi = {10.1109/EUSIPCO.2015.7362562},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104835.pdf},\n}\n\n
\n
\n\n\n
\n We present in this paper a new approach for human-action extraction and recognition in a multi-modal context. Our solution contains two modules. The first one applies temporal action segmentation by combining a heuristic analysis with augmented-joint description and SVM classification. The second one aims for a frame-wise action recognition using skeletal, RGB and depth modalities coupled with a label-grouping strategy in the decision level. Our contribution consists of (1) a selective concatenation of features extracted from the different modalities, (2) the introduction of features relative to the face region in addition to the hands, and (3) the applied multilevel frames-grouping strategy. Our experiments carried on the Chalearn gesture challenge 2014 dataset have proved the effectiveness of our approach within the literature.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The 2D factor analysis and its application to face recognition with a single sample per person.\n \n \n \n \n\n\n \n Machado, A. M. C.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1148-1152, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362563,\n  author = {A. M. C. Machado},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {The 2D factor analysis and its application to face recognition with a single sample per person},\n  year = {2015},\n  pages = {1148-1152},\n  abstract = {In this paper, a novel theoretical model of data reduction and multivariate analysis is proposed. The Two-dimensional Factor Analysis is an extension of classical factor analysis in which the images are treated as matrices instead of being converted to unidimensional vectors. By maximally representing the correlation among the pixels, it is able to capture meaningful information about the spatial relationships of the elements in a two-dimensional signal. The method is illustrated in the problem of face recognition with superior results when compared to other approaches based on principal component analysis. Experiments using public databases under different pose and illumination conditions show that the proposed method is significantly more effective than the two-dimensional principal component analysis while dealing with samples composed by a single image per person.},\n  keywords = {face recognition;principal component analysis;2D factor analysis;face recognition;single sample per person;theoretical model;data reduction;multivariate analysis;classical factor analysis;unidimensional vectors;two-dimensional signal;principal component analysis;illumination conditions;pose conditions;two-dimensional principal component analysis;Principal component analysis;Face recognition;Loading;Databases;Yttrium;Correlation;Training;Face recognition;factor analysis;principal component analysis;data reduction},\n  doi = {10.1109/EUSIPCO.2015.7362563},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103225.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, a novel theoretical model of data reduction and multivariate analysis is proposed. The Two-dimensional Factor Analysis is an extension of classical factor analysis in which the images are treated as matrices instead of being converted to unidimensional vectors. By maximally representing the correlation among the pixels, it is able to capture meaningful information about the spatial relationships of the elements in a two-dimensional signal. The method is illustrated in the problem of face recognition with superior results when compared to other approaches based on principal component analysis. Experiments using public databases under different pose and illumination conditions show that the proposed method is significantly more effective than the two-dimensional principal component analysis while dealing with samples composed by a single image per person.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n 3D sound field analysis using circular higher-order microphone array.\n \n \n \n \n\n\n \n Chen, H.; Abhayapala, T. D.; and Zhang, W.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1153-1157, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"3DPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362564,\n  author = {H. Chen and T. D. Abhayapala and W. Zhang},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {3D sound field analysis using circular higher-order microphone array},\n  year = {2015},\n  pages = {1153-1157},\n  abstract = {This paper proposes the theory and design of circular higher-order microphone arrays for 3D sound field analysis using spherical harmonics. Through employing the spherical harmonic translation theorem, the local spatial sound fields recorded by each higher-order microphone placed in the circular arrays are combined to form the sound field information of a large global spherical region. The proposed design reduces the number of the required sampling points and the geometrical complexity of microphone arrays. We develop a two-step method to calculate sound field coefficients using the proposed array structure, i) analytically combine local sound field coefficients on each circular array and ii) solve for global sound field coefficients using data from the first step. Simulation and experimental results show that the proposed array is capable of acquiring the full 3D sound field information over a relatively large spherical region with decent accuracy and computational simplicity.},\n  keywords = {acoustic field;computational complexity;harmonics;microphone arrays;3D sound field analysis;circular higher-order microphone array;spherical harmonics;sound field information;geometrical complexity;two-step method;Arrays;Microphones;Harmonic analysis;Three-dimensional displays;Europe;Computational modeling;Microphone arrays;higher-order microphones;spherical harmonics},\n  doi = {10.1109/EUSIPCO.2015.7362564},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103997.pdf},\n}\n\n
\n
\n\n\n
\n This paper proposes the theory and design of circular higher-order microphone arrays for 3D sound field analysis using spherical harmonics. Through employing the spherical harmonic translation theorem, the local spatial sound fields recorded by each higher-order microphone placed in the circular arrays are combined to form the sound field information of a large global spherical region. The proposed design reduces the number of the required sampling points and the geometrical complexity of microphone arrays. We develop a two-step method to calculate sound field coefficients using the proposed array structure, i) analytically combine local sound field coefficients on each circular array and ii) solve for global sound field coefficients using data from the first step. Simulation and experimental results show that the proposed array is capable of acquiring the full 3D sound field information over a relatively large spherical region with decent accuracy and computational simplicity.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Adaptive RF stealth beamforming for frequency diverse array radar.\n \n \n \n \n\n\n \n Wang, W.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1158-1161, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AdaptivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362565,\n  author = {W. Wang},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Adaptive RF stealth beamforming for frequency diverse array radar},\n  year = {2015},\n  pages = {1158-1161},\n  abstract = {This paper proposes an adaptive radio frequency (RF) stealth beamforming for frequency diverse array (FDA) radar using spoiled frequency increments. Since active radars are highly visible to intercept receivers, traditional high-gain phased-array antenna beam is replaced by a series of low-gain FDA beam with nonlinear frequency increments to reduce the system visibility, and it achieves the same performance as the original high-gain by jointly exploiting the spoiled beams. Equivalently the detection performance is not degraded. Numerical simulation results verify the proposed method.},\n  keywords = {adaptive signal processing;array signal processing;numerical analysis;radar detection;radar receivers;radar signal processing;adaptive RF stealth beamforming;frequency diverse array radar;adaptive radiofrequency stealth beamforming;high-gain phasedarray antenna beam;low-gain FDA beam;nonlinear frequency increment;numerical simulation;Radar;Arrays;Array signal processing;Gain;OFDM;Receivers;Radio frequency;RF stealth;frequency diverse array;transmit beamforming;low probability of intercept},\n  doi = {10.1109/EUSIPCO.2015.7362565},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103653.pdf},\n}\n\n
\n
\n\n\n
\n This paper proposes an adaptive radio frequency (RF) stealth beamforming for frequency diverse array (FDA) radar using spoiled frequency increments. Since active radars are highly visible to intercept receivers, traditional high-gain phased-array antenna beam is replaced by a series of low-gain FDA beam with nonlinear frequency increments to reduce the system visibility, and it achieves the same performance as the original high-gain by jointly exploiting the spoiled beams. Equivalently the detection performance is not degraded. Numerical simulation results verify the proposed method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robust adaptive method for speech signal waveform estimation using microphone array.\n \n \n \n \n\n\n \n Ivanenkov, A. S.; and Rodionov, A. A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1162-1166, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"RobustPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362566,\n  author = {A. S. Ivanenkov and A. A. Rodionov},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Robust adaptive method for speech signal waveform estimation using microphone array},\n  year = {2015},\n  pages = {1162-1166},\n  abstract = {This paper considers the scenario when a mix of signals from multiple acoustic sources is received by a microphone array. The problem is to estimate the waveform of the source of interest located in the near field of the array. The considered problem can arise in many applications such as video conferencing, acoustic room surveillance and others, when it is necessary to capture human speech against acoustic interferences. To solve this problem, an adaptive algorithm is independently applied to each narrow band of the received signal. Based on the model of interference including rank-deficient correlation matrix, a new method of robust adaptive processing is proposed. The results of numerical simulation and experiment demonstrating the robustness of the proposed method to imperfections in desired signal spatial model and to the finite sample size effect are presented.},\n  keywords = {acoustic radiators;array signal processing;correlation methods;microphone arrays;speech processing;finite sample size effect;signal spatial model;robust adaptive processing;rank-deficient correlation matrix;adaptive algorithm;acoustic interferences;multiple acoustic sources;microphone array;speech signal waveform estimation;robust adaptive method;Interference;Microphones;Arrays;Correlation;Signal processing algorithms;Estimation;Acoustics;microphone arrays;robust adaptive beamforming;maximum likelihood estimation},\n  doi = {10.1109/EUSIPCO.2015.7362566},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104695.pdf},\n}\n\n
\n
\n\n\n
\n This paper considers the scenario when a mix of signals from multiple acoustic sources is received by a microphone array. The problem is to estimate the waveform of the source of interest located in the near field of the array. The considered problem can arise in many applications such as video conferencing, acoustic room surveillance and others, when it is necessary to capture human speech against acoustic interferences. To solve this problem, an adaptive algorithm is independently applied to each narrow band of the received signal. Based on the model of interference including rank-deficient correlation matrix, a new method of robust adaptive processing is proposed. The results of numerical simulation and experiment demonstrating the robustness of the proposed method to imperfections in desired signal spatial model and to the finite sample size effect are presented.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Carrier frequency and direction of arrival estimation with nested sub-nyquist sensor array receiver.\n \n \n \n \n\n\n \n Kumar, A. A.; Razul, S. G.; and See, C. S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1167-1171, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"CarrierPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362567,\n  author = {A. A. Kumar and S. G. Razul and C. S. See},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Carrier frequency and direction of arrival estimation with nested sub-nyquist sensor array receiver},\n  year = {2015},\n  pages = {1167-1171},\n  abstract = {Carrier frequency and its corresponding direction of arrival (DOA) estimation, at sub-Nyquist sampling rates of narrowband (bandwidth not exceeding B Hz) sources is considered in this paper. We assume M physical sensors arranged in a two dimensional nested sensor array configuration and propose to modify the receiver architecture by inserting an additional delay channel to only the dense sensor array. An efficient subspace based estimation algorithm to estimate the carrier frequencies and their DOAs is also presented. With this proposed approach we show that a minimum ADC sampling frequency of B Hz is sufficient and O(\\M/4]2) carrier frequencies and their DOAs can be estimated despite all the carrier frequencies exactly aliased to the same frequency. Furthermore, simulations indicate that when used for spectrum estimation, in addition to carrier frequencies and their DOA estimation, it shows better performance compared to an existing approach using the same M element uniform two dimensional sensor array.},\n  keywords = {array signal processing;channel estimation;cognitive radio;direction-of-arrival estimation;frequency estimation;radio receivers;signal sampling;spectrum estimation;efficient subspace based estimation algorithm;dense sensor array;two dimensional nested sensor array;subNyquist sampling;nested subNyquist sensor array receiver;direction of arrival estimation;carrier frequency estimation;Arrays;Direction-of-arrival estimation;Frequency estimation;Receivers;Estimation;Manifolds;Bandwidth;Direction-of-arrival;sub-Nyquist sampling;Spectrum estimation;nested sensor array},\n  doi = {10.1109/EUSIPCO.2015.7362567},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104003.pdf},\n}\n\n
\n
\n\n\n
\n Carrier frequency and its corresponding direction of arrival (DOA) estimation, at sub-Nyquist sampling rates of narrowband (bandwidth not exceeding B Hz) sources is considered in this paper. We assume M physical sensors arranged in a two dimensional nested sensor array configuration and propose to modify the receiver architecture by inserting an additional delay channel to only the dense sensor array. An efficient subspace based estimation algorithm to estimate the carrier frequencies and their DOAs is also presented. With this proposed approach we show that a minimum ADC sampling frequency of B Hz is sufficient and O(\\M/4]2) carrier frequencies and their DOAs can be estimated despite all the carrier frequencies exactly aliased to the same frequency. Furthermore, simulations indicate that when used for spectrum estimation, in addition to carrier frequencies and their DOA estimation, it shows better performance compared to an existing approach using the same M element uniform two dimensional sensor array.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A revisit of the unique polar representation of the Vector-valued Hyperanalytic Signal.\n \n \n \n \n\n\n \n Huang, B.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1172-1176, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362568,\n  author = {B. Huang},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A revisit of the unique polar representation of the Vector-valued Hyperanalytic Signal},\n  year = {2015},\n  pages = {1172-1176},\n  abstract = {In this paper, we extend the classic analytic signal to the Vector-valued Hyperanalytic Signal (VHaS) that is denoted to distinguish from the multivariate hypercomplex data. The 2d-Dimensional (2d-D) VHaS, S(t) : [0,1] → C2d, is defined by a complexification of two d-D Vector-valued Hypercomplex Signals (VHcS), S(t) := G(t)e0 + HC2ded[G](t) ed, where HC2ded and ei represent the Hilbert transform and the ith unit axis, and G(t) ϵ Cd, ei G C2d. Inspired by the unique polar form of a classic analytic signal and the one of a 4-D VHaS proposed in the work of Huang and Kunoth (2014), we provide a theoretical explanation of the unique polar representation of a 6-D or 8-D VHaS by replacing the quaternion with octonion, which further implies the possible extension for d-D VHaS with d> 8. Moreover, the derived continuous VHcS envelope and phase from the polar form lead to a unified definition of the time-frequency-amplitude spectrum of the given VHcS G(t).},\n  keywords = {Hilbert transforms;signal processing;polar representation;vector valued hyperanalytic signal;multivariate hypercomplex data;Hilbert transform;quaternion replacement;octonion;Quaternions;Signal processing;Europe;Transforms;Arrays;Brain modeling;vector-valued hypercomplex signal;vector-valued hyperanalytic signal;quaternionic signal;oc-tonionic signal;unique polar representation;time-frequency-amplitude spectrum},\n  doi = {10.1109/EUSIPCO.2015.7362568},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105109.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we extend the classic analytic signal to the Vector-valued Hyperanalytic Signal (VHaS) that is denoted to distinguish from the multivariate hypercomplex data. The 2d-Dimensional (2d-D) VHaS, S(t) : [0,1] → C2d, is defined by a complexification of two d-D Vector-valued Hypercomplex Signals (VHcS), S(t) := G(t)e0 + HC2ded[G](t) ed, where HC2ded and ei represent the Hilbert transform and the ith unit axis, and G(t) ϵ Cd, ei G C2d. Inspired by the unique polar form of a classic analytic signal and the one of a 4-D VHaS proposed in the work of Huang and Kunoth (2014), we provide a theoretical explanation of the unique polar representation of a 6-D or 8-D VHaS by replacing the quaternion with octonion, which further implies the possible extension for d-D VHaS with d> 8. Moreover, the derived continuous VHcS envelope and phase from the polar form lead to a unified definition of the time-frequency-amplitude spectrum of the given VHcS G(t).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributed multichannel adaptive filtering.\n \n \n \n \n\n\n \n Almeida Neto, F. G.; Nascimento, V. H.; and de Paula , A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1177-1181, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"DistributedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362569,\n  author = {F. G. {Almeida Neto} and V. H. Nascimento and A. {de Paula}},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Distributed multichannel adaptive filtering},\n  year = {2015},\n  pages = {1177-1181},\n  abstract = {A new distributed multichannel technique is proposed for networks in which a different set of parameters is estimated by each node. The technique is proposed for non fully-connect topologies, so that nodes must store data and re-transmit information to other network elements. To reduce the amount of terms stored by each node, pre-computation of the data required by other elements is performed before the data sharing. The proposed method is adequate for implementation in networks with a large number of nodes, for which straightforward implementations would be prohibitive in terms of cost and memory.},\n  keywords = {adaptive filters;parameter estimation;distributed multichannel adaptive filtering;parameter estimation;data storage;data sharing;multichannel LMS;non fully-connect topologies;network elements;Network topology;Topology;Estimation;Least squares approximations;Memory management;Delays;Europe;Distributed adaptive filtering;distributed multichannel adaptive filtering;multichannel LMS},\n  doi = {10.1109/EUSIPCO.2015.7362569},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096057.pdf},\n}\n\n
\n
\n\n\n
\n A new distributed multichannel technique is proposed for networks in which a different set of parameters is estimated by each node. The technique is proposed for non fully-connect topologies, so that nodes must store data and re-transmit information to other network elements. To reduce the amount of terms stored by each node, pre-computation of the data required by other elements is performed before the data sharing. The proposed method is adequate for implementation in networks with a large number of nodes, for which straightforward implementations would be prohibitive in terms of cost and memory.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Online estimation of wind turbine blade deflection with UWB signals.\n \n \n \n \n\n\n \n Jensen, T. L.; Jakobsen, M. L.; Østergaard, J.; Nielsen, J. K.; Byskov, C.; Bæk, P.; and Jensen, S. H.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1182-1186, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"OnlinePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362570,\n  author = {T. L. Jensen and M. L. Jakobsen and J. Østergaard and J. K. Nielsen and C. Byskov and P. Bæk and S. H. Jensen},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Online estimation of wind turbine blade deflection with UWB signals},\n  year = {2015},\n  pages = {1182-1186},\n  abstract = {In this paper we use ultra-wideband (UWB) signals for the localization of blade tips on wind turbines. Our approach is to acquire two separate distances to each tip via time-delay estimation, and each tip is then localized by triangulation. We derive an approximate maximum a posteriori (MAP) delay estimator exploiting i) contextual prior information and ii) a direct-path approximation. The resulting deflection estimation algorithm is computationally feasible for online usage. Simulation studies are conducted to assess the overall triangulation uncertainty and it is observed that negative correlation between the two distance estimates is detrimental for the tip localization accuracy. Measurement data acquired in an anechoic chamber is used to confirm that the UWB-hardware complies with the desired/relevant ranging accuracy. Finally, measurement data obtained from a static test bench is used to demonstrate that the approximate MAP-based localization algorithm is able to outperform standard methods.},\n  keywords = {anechoic chambers (electromagnetic);blades;delay estimation;maximum likelihood estimation;ultra wideband communication;wind turbines;ultrawideband signals;UWB signals;online estimation;wind turbine blade deflection;blade tips;time-delay estimation;maximum a posteriori delay estimator;MAP delay estimator;contextual prior information;direct-path approximation;deflection estimation;triangulation uncertainty;tip localization accuracy;anechoic chamber;UWB hardware;Blades;Wind turbines;Delays;Estimation;Approximation methods;Approximation algorithms;Signal processing algorithms;Wind turbines;blade deflection;localization;UWB signals;time-delay estimation;MAP estimation},\n  doi = {10.1109/EUSIPCO.2015.7362570},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102585.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we use ultra-wideband (UWB) signals for the localization of blade tips on wind turbines. Our approach is to acquire two separate distances to each tip via time-delay estimation, and each tip is then localized by triangulation. We derive an approximate maximum a posteriori (MAP) delay estimator exploiting i) contextual prior information and ii) a direct-path approximation. The resulting deflection estimation algorithm is computationally feasible for online usage. Simulation studies are conducted to assess the overall triangulation uncertainty and it is observed that negative correlation between the two distance estimates is detrimental for the tip localization accuracy. Measurement data acquired in an anechoic chamber is used to confirm that the UWB-hardware complies with the desired/relevant ranging accuracy. Finally, measurement data obtained from a static test bench is used to demonstrate that the approximate MAP-based localization algorithm is able to outperform standard methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Generalized targets detection and counting in dense wireless sensors networks.\n \n \n \n \n\n\n \n Jellali, Z.; Atallah, L. N.; and Cherif, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1187-1191, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"GeneralizedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362571,\n  author = {Z. Jellali and L. N. Atallah and S. Cherif},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Generalized targets detection and counting in dense wireless sensors networks},\n  year = {2015},\n  pages = {1187-1191},\n  abstract = {This paper applies the Compressed Sensing (CS) the targets detection in small scale dense Wireless Sensors Networks (WSN). The monitored area is partitioned into cells, each equipped by one sensor. The CS application aims to locate targets from a reduced subset of sensors measurements. A generalized version of a recently proposed Greedy Matching Pursuit algorithm (GMP), designed for point events joint detection and counting, is derived, which is denoted by gGMP. This generalization enables the identification of several active cells at each iteration. Also, an optimized deterministic sensors subset selection scheme, based on the maximum energy is envisaged and shown to outperform the random choice scheme.},\n  keywords = {approximation theory;compressed sensing;greedy algorithms;object detection;set theory;wireless sensor networks;generalized targets detection;generalized targets counting;dense wireless sensors networks;WSN;compressed sensing;sensors measurements;subset reduction;greedy matching pursuit algorithm;active cells;subset selection scheme;optimized deterministic sensors;gGMP;Sensors;Wireless sensor networks;Monitoring;Matching pursuit algorithms;Signal processing algorithms;Object detection;Energy measurement;Dense Wireless Sensors Networks;rare targets detection;Compressed Sensing},\n  doi = {10.1109/EUSIPCO.2015.7362571},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104793.pdf},\n}\n\n
\n
\n\n\n
\n This paper applies the Compressed Sensing (CS) the targets detection in small scale dense Wireless Sensors Networks (WSN). The monitored area is partitioned into cells, each equipped by one sensor. The CS application aims to locate targets from a reduced subset of sensors measurements. A generalized version of a recently proposed Greedy Matching Pursuit algorithm (GMP), designed for point events joint detection and counting, is derived, which is denoted by gGMP. This generalization enables the identification of several active cells at each iteration. Also, an optimized deterministic sensors subset selection scheme, based on the maximum energy is envisaged and shown to outperform the random choice scheme.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Applications of large empirical spatio-temporal covariance matrix in multipath channels detection.\n \n \n \n \n\n\n \n Pham, G. T.; and Loubaton, P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1192-1196, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ApplicationsPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362572,\n  author = {G. T. Pham and P. Loubaton},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Applications of large empirical spatio-temporal covariance matrix in multipath channels detection},\n  year = {2015},\n  pages = {1192-1196},\n  abstract = {This paper addresses the detection of a single signal in a multipath propagation channel using a sensors array in the case where the number of sensors M and the number of observations N are large and of the same order of magnitude and where the number of paths P is much smaller than M and N. In contrast with the single path context, the GLRT test cannot be implemented, and we evaluate the behaviour of tests based on the largest eigenvalues of the empirical spatio-temporal covariance matrix. Using a technical result showing that the largest singular values of low rank deterministic pertubation of certain Gaussian block-Hankel large random matrices behave as if the entries of the latter random matrices were independent identically distributed, we obtain a clear understanding of the advantages of the use of the spatial-temporal covariance matrix.},\n  keywords = {antenna arrays;array signal processing;covariance matrices;Gaussian processes;multipath channels;radiowave propagation;signal detection;singular value decomposition;multiantenna detection;Gaussian block-Hankel large random matrices;low rank deterministic perturbation;singular values;sensors array;single signal detection;multipath propagation channel detection;empirical spatio-temporal covariance matrix;Covariance matrices;Eigenvalues and eigenfunctions;Yttrium;Arrays;Context;Chlorine;Europe},\n  doi = {10.1109/EUSIPCO.2015.7362572},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104545.pdf},\n}\n\n
\n
\n\n\n
\n This paper addresses the detection of a single signal in a multipath propagation channel using a sensors array in the case where the number of sensors M and the number of observations N are large and of the same order of magnitude and where the number of paths P is much smaller than M and N. In contrast with the single path context, the GLRT test cannot be implemented, and we evaluate the behaviour of tests based on the largest eigenvalues of the empirical spatio-temporal covariance matrix. Using a technical result showing that the largest singular values of low rank deterministic pertubation of certain Gaussian block-Hankel large random matrices behave as if the entries of the latter random matrices were independent identically distributed, we obtain a clear understanding of the advantages of the use of the spatial-temporal covariance matrix.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Improvement of robustness to change of positive elements in boolean compressive sensing.\n \n \n \n \n\n\n \n Kawaguchi, Y.; Osa, T.; Nagano, H.; and Togami, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1197-1201, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ImprovementPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362573,\n  author = {Y. Kawaguchi and T. Osa and H. Nagano and M. Togami},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Improvement of robustness to change of positive elements in boolean compressive sensing},\n  year = {2015},\n  pages = {1197-1201},\n  abstract = {A new boolean compressive sensing method for solving the group-testing problem is proposed. The conventional method has the problem that the estimation performance is degraded in the case that positive elements change in the middle of tests because the results of the tests before a change-point are inconsistent with those of the tests after the change-point. To solve the problem, the proposed method detects the latest change-point of positive elements, and it finds positive elements by using only the results of the tests after the change-point. To detect the change-point, the proposed method makes use of the fact that the distribution of the results depends on the number of positive elements. Experimental simulation indicates that the proposed method outperforms the conventional method on the condition that positive elements change in the middle of tests.},\n  keywords = {Boolean algebra;compressed sensing;Boolean compressive sensing;group-testing problem;estimation performance;change-point detection;Testing;Compressed sensing;Estimation;Minimization;Mathematical model;Noise measurement;Europe;group testing;compressive sensing;change-point detection},\n  doi = {10.1109/EUSIPCO.2015.7362573},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104315.pdf},\n}\n\n
\n
\n\n\n
\n A new boolean compressive sensing method for solving the group-testing problem is proposed. The conventional method has the problem that the estimation performance is degraded in the case that positive elements change in the middle of tests because the results of the tests before a change-point are inconsistent with those of the tests after the change-point. To solve the problem, the proposed method detects the latest change-point of positive elements, and it finds positive elements by using only the results of the tests after the change-point. To detect the change-point, the proposed method makes use of the fact that the distribution of the results depends on the number of positive elements. Experimental simulation indicates that the proposed method outperforms the conventional method on the condition that positive elements change in the middle of tests.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n TDE sign based homing algorithm for sound source tracking using a Y-shaped microphone array.\n \n \n \n \n\n\n \n Sreejith, T. M.; Joshin, P. K.; Harshavardhan, S.; and Sreenivas, T. V.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1202-1206, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"TDEPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362574,\n  author = {T. M. Sreejith and P. K. Joshin and S. Harshavardhan and T. V. Sreenivas},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {TDE sign based homing algorithm for sound source tracking using a Y-shaped microphone array},\n  year = {2015},\n  pages = {1202-1206},\n  abstract = {Sound source localization, tracking and homing in play an important role in human-robot interaction. The frequent tracking of a moving acoustic source is computationally expensive for any real time application. In this paper, we propose a simple algorithm based on the sign of time delay of arrival estimation (STDE). Also a special Y-array microphone is designed which is well suited for real time tracking and homing in application. Compared to the other conventional planar microphone arrays, Y-array has the unique property of proximity detection of the acoustic source. The STDE algorithm along with the Y-array is used to estimate the gross source region successively for real time robotic homing in applications. Experiments conducted in ane-choic and varechoic enclosures show that the proposed algorithm works accurately in low and moderate reverb conditions of upto RT60<;800ms.},\n  keywords = {acoustic signal processing;array signal processing;microphone arrays;TDE sign based homing algorithm;sound source tracking;Y-shaped microphone array;sound source localization;human-robot interaction;acoustic source frequent tracking;time delay of arrival estimation;Y-array microphone;acoustic source proximity detection;Microphones;Robots;Arrays;Signal processing algorithms;Acoustics;Estimation;Europe;real-time homing in;STDE;gross source region;voronoi region},\n  doi = {10.1109/EUSIPCO.2015.7362574},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103731.pdf},\n}\n\n
\n
\n\n\n
\n Sound source localization, tracking and homing in play an important role in human-robot interaction. The frequent tracking of a moving acoustic source is computationally expensive for any real time application. In this paper, we propose a simple algorithm based on the sign of time delay of arrival estimation (STDE). Also a special Y-array microphone is designed which is well suited for real time tracking and homing in application. Compared to the other conventional planar microphone arrays, Y-array has the unique property of proximity detection of the acoustic source. The STDE algorithm along with the Y-array is used to estimate the gross source region successively for real time robotic homing in applications. Experiments conducted in ane-choic and varechoic enclosures show that the proposed algorithm works accurately in low and moderate reverb conditions of upto RT60<;800ms.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fast implementation of iterative adaptive approach for wideband unambiguous radar detection.\n \n \n \n \n\n\n \n Petrov, N.; and Le Chevalier, F.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1207-1211, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"FastPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362575,\n  author = {N. Petrov and F. {Le Chevalier}},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Fast implementation of iterative adaptive approach for wideband unambiguous radar detection},\n  year = {2015},\n  pages = {1207-1211},\n  abstract = {Wideband radars are sensors of new generation used for target detection and classification. Detection of moving targets with wideband radar faces range migration phenomenon which is used to resolve velocity ambiguities in low pulse repetition frequency mode. The resolution is equivalent to a bi-dimensional spectrum estimation problem with non-uniform sampling, while the ability to resolve velocity ambiguity depends on spectral resolution of the method used. Recently Iterative Adaptive Approach (IAA) was shown an attractive solution of this problem. Nevertheless straightforward implementation of IAA for wideband signal suffers from high computational requirements. In this paper fast implementation of IAA for wideband data is proposed and studied with numerical simulations. Proposed solution decreases computational cost by an order of magnitude for realistic data sizes.},\n  keywords = {numerical analysis;object detection;radar detection;wideband unambiguous radar detection;iterative adaptive approach;target classification;moving target detection;range migration phenomenon;bi-dimensional spectrum estimation problem;nonuniform sampling;wideband signal;numerical simulations;Wideband;Covariance matrices;Interpolation;Radar detection;Indexes;Doppler effect;Iterative Adaptive Approach (IAA);wideband radar;target detection;target migration},\n  doi = {10.1109/EUSIPCO.2015.7362575},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102633.pdf},\n}\n\n
\n
\n\n\n
\n Wideband radars are sensors of new generation used for target detection and classification. Detection of moving targets with wideband radar faces range migration phenomenon which is used to resolve velocity ambiguities in low pulse repetition frequency mode. The resolution is equivalent to a bi-dimensional spectrum estimation problem with non-uniform sampling, while the ability to resolve velocity ambiguity depends on spectral resolution of the method used. Recently Iterative Adaptive Approach (IAA) was shown an attractive solution of this problem. Nevertheless straightforward implementation of IAA for wideband signal suffers from high computational requirements. In this paper fast implementation of IAA for wideband data is proposed and studied with numerical simulations. Proposed solution decreases computational cost by an order of magnitude for realistic data sizes.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Compensating power amplifier distortion in cognitive radio systems with adaptive interacting multiple model.\n \n \n \n\n\n \n Ben Mabrouk, M.; Grivel, E.; Magnant, C.; Ferré, G.; and Deltimple, N.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1212-1216, Aug 2015. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362576,\n  author = {M. {Ben Mabrouk} and E. Grivel and C. Magnant and G. Ferré and N. Deltimple},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Compensating power amplifier distortion in cognitive radio systems with adaptive interacting multiple model},\n  year = {2015},\n  pages = {1212-1216},\n  abstract = {This work aims at improving the power amplifier (PA) efficiency in uplink OFDM-based cognitive radio (CR) communications. Unlike the traditional approaches, we suggest transmitting a non-linearily ampliied signal without any il-tering and addressing the OFDM sample estimation from the distorted signal at the receiver. The proposed post-distortion and detection technique is based on a Volterra model for the PA and the channel. As the transmission can switch from one sub-band to another, the CR-PA behavior varies over time and the Volterra kernels can be constant or suddenly change. Therefore, an interactive multiple model (IMM) combining extended Kalman filters is considered. The transition probability matrix, which plays a key role in the IMM, is also sequentially estimated. The resulting uplink system has various advantages: it learns from the observations and a part of the computational load is exported to the receiver, which is not battery driven unlike the mobile terminal.},\n  keywords = {cognitive radio;distortion;Kalman filters;nonlinear filters;power amplifiers;Volterra series;power amplifier distortion compensation;cognitive radio;adaptive interacting multiple model;OFDM;nonlinearily ampliied signal;postdistortion technique;detection technique;Volterra model;interactive multiple model;extended Kalman filter;transition probability matrix;Estimation;OFDM;Kernel;Signal processing algorithms;Distortion;Receivers;Europe;Power amplifier;digital post/pre-distortion;cognitive radio;Volterra modeling;interacting multiple model;transition probability matrix estimation},\n  doi = {10.1109/EUSIPCO.2015.7362576},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n This work aims at improving the power amplifier (PA) efficiency in uplink OFDM-based cognitive radio (CR) communications. Unlike the traditional approaches, we suggest transmitting a non-linearily ampliied signal without any il-tering and addressing the OFDM sample estimation from the distorted signal at the receiver. The proposed post-distortion and detection technique is based on a Volterra model for the PA and the channel. As the transmission can switch from one sub-band to another, the CR-PA behavior varies over time and the Volterra kernels can be constant or suddenly change. Therefore, an interactive multiple model (IMM) combining extended Kalman filters is considered. The transition probability matrix, which plays a key role in the IMM, is also sequentially estimated. The resulting uplink system has various advantages: it learns from the observations and a part of the computational load is exported to the receiver, which is not battery driven unlike the mobile terminal.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Cognitive radio networks based on opportunistic beamforming with quantized feedback.\n \n \n \n \n\n\n \n Massaoudi, A.; Sellami, N.; and Siala, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1217-1221, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"CognitivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362577,\n  author = {A. Massaoudi and N. Sellami and M. Siala},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Cognitive radio networks based on opportunistic beamforming with quantized feedback},\n  year = {2015},\n  pages = {1217-1221},\n  abstract = {In this paper, we consider an opportunistic beamforming scheduling scheme of secondary users (SUs) which can share the spectrum with a primary user (PU) in an underlay cognitive radio network. In the scheduling process, the cognitive base station (CBS) having multi-antennas, generates orthogonal beams which insure the minimum interference to the PU. Then, each SU feeds back its maximum signal to interference and noise ratio (SINR) and the corresponding beam index to the CBS. The CBS selects the users having the largest SINRs for transmission. The aim of our work is to study the effect of SINR feedback quantization on the throughput of the secondary system. To do this, we derive an accurate statistical characterization of ordered beams SINR and then we derive the closed-form expression of the system throughput with SINR feedback quantization based on Lloyd-Max algorithm.},\n  keywords = {array signal processing;cognitive radio;interference (signal);quantisation (signal);scheduling;Lloyd-Max algorithm;closed-form expression;SINR;signal to interference and noise ratio;orthogonal beams;multiantennas;cognitive base station;scheduling process;primary user;secondary users;opportunistic beamforming scheduling;quantized feedback;cognitive radio networks;Interference;Signal to noise ratio;Quantization (signal);Throughput;Cognitive radio;Array signal processing;Indexes;Cognitive radio;opportunistic beamforming;feedback quantization;Lloyd-Max quantizer},\n  doi = {10.1109/EUSIPCO.2015.7362577},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104495.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we consider an opportunistic beamforming scheduling scheme of secondary users (SUs) which can share the spectrum with a primary user (PU) in an underlay cognitive radio network. In the scheduling process, the cognitive base station (CBS) having multi-antennas, generates orthogonal beams which insure the minimum interference to the PU. Then, each SU feeds back its maximum signal to interference and noise ratio (SINR) and the corresponding beam index to the CBS. The CBS selects the users having the largest SINRs for transmission. The aim of our work is to study the effect of SINR feedback quantization on the throughput of the secondary system. To do this, we derive an accurate statistical characterization of ordered beams SINR and then we derive the closed-form expression of the system throughput with SINR feedback quantization based on Lloyd-Max algorithm.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Low-feedback cooperative opportunistic transmission for dynamic licensed shared access.\n \n \n \n \n\n\n \n Ntougias, K.; Taramas, N.; and Papadias, C. B.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1222-1226, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Low-feedbackPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362578,\n  author = {K. Ntougias and N. Taramas and C. B. Papadias},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Low-feedback cooperative opportunistic transmission for dynamic licensed shared access},\n  year = {2015},\n  pages = {1222-1226},\n  abstract = {In order to meet the exponentially growing capacity demands of future mobile radio communication systems, the synergy of spectrum sharing methods, multi-antenna transmission schemes, small-cell offloading, and cooperative communication techniques is suggested. In this context, the mitigation of harmful interference, the provision of quality of service (QoS) guarantees, and the minimization of backhaul and channel state information (CSI) overhead are key challenges that have to be addressed. In this paper, we study the performance of a Licensed Shared Access (LSA) system comprised of a macro-cell sector (incumbent operator) and three partially overlapping small cells (licensee operator) placed within that sector. The small cells utilize a new low-feedback cooperative opportunistic beamforming (OBF) with proportional fair scheduling (PFS) transmission scheme to ensure that the proposed system is able to reach the mentioned goals above. Simulation results show that this system attains a substantial fraction of the available sum-rate capacity with minimal feedback.},\n  keywords = {cooperative communication;feedback;mobile radio;quality of service;telecommunication channels;telecommunication scheduling;low-feedback cooperative opportunistic transmission;dynamic licensed shared access;mobile radio communication systems;spectrum sharing methods;multiantenna transmission schemes;cooperative communication techniques;interference;quality of service;QoS;channel state information;CSI;Licensed Shared Access;operative opportunistic beamforming;proportional fair scheduling;Interference;Signal to noise ratio;Quality of service;Mobile communication;Array signal processing;Europe;Licensed Shared Access (LSA);Quality of Service (QoS);Opportunistic Beamforming (OBF);Proportional Fair Scheduling (PFS);Channel State Information (CSI)},\n  doi = {10.1109/EUSIPCO.2015.7362578},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105117.pdf},\n}\n\n
\n
\n\n\n
\n In order to meet the exponentially growing capacity demands of future mobile radio communication systems, the synergy of spectrum sharing methods, multi-antenna transmission schemes, small-cell offloading, and cooperative communication techniques is suggested. In this context, the mitigation of harmful interference, the provision of quality of service (QoS) guarantees, and the minimization of backhaul and channel state information (CSI) overhead are key challenges that have to be addressed. In this paper, we study the performance of a Licensed Shared Access (LSA) system comprised of a macro-cell sector (incumbent operator) and three partially overlapping small cells (licensee operator) placed within that sector. The small cells utilize a new low-feedback cooperative opportunistic beamforming (OBF) with proportional fair scheduling (PFS) transmission scheme to ensure that the proposed system is able to reach the mentioned goals above. Simulation results show that this system attains a substantial fraction of the available sum-rate capacity with minimal feedback.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An enhanced spectrum sensing algorithm with maximum ratio combination of spectral correlation.\n \n \n \n \n\n\n \n Thanh Nguyen, T.; Kreul, T.; and Kaiser, T.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1227-1230, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362579,\n  author = {T. {Thanh Nguyen} and T. Kreul and T. Kaiser},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {An enhanced spectrum sensing algorithm with maximum ratio combination of spectral correlation},\n  year = {2015},\n  pages = {1227-1230},\n  abstract = {In cognitive radio networks, the task of spectrum sensing is required to be reliable at low signal-to-noise ratios (SNRs). Spectral correlation is an effective approach to satisfy the requirement. The algorithms based on statistic spectral correlation profiles are a good method as shown in some previous works. In this paper, we propose an algorithm with maximum ratio combination for the profiles to enhance the method. We construct a formula of statistic test and describe an implementation for our algorithm in practice. Extensive simulations are carried out to verify the performance of algorithms. As a result, the proposed algorithm outperforms the existing algorithms with a neglectful cost of additional complexity.},\n  keywords = {cognitive radio;radio spectrum management;enhanced spectrum sensing algorithm;maximum ratio combination;spectral correlation;cognitive radio networks;low signal-to-noise ratios;Signal processing algorithms;Correlation;Sensors;Binary phase shift keying;Complexity theory;Performance gain},\n  doi = {10.1109/EUSIPCO.2015.7362579},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570088285.pdf},\n}\n\n
\n
\n\n\n
\n In cognitive radio networks, the task of spectrum sensing is required to be reliable at low signal-to-noise ratios (SNRs). Spectral correlation is an effective approach to satisfy the requirement. The algorithms based on statistic spectral correlation profiles are a good method as shown in some previous works. In this paper, we propose an algorithm with maximum ratio combination for the profiles to enhance the method. We construct a formula of statistic test and describe an implementation for our algorithm in practice. Extensive simulations are carried out to verify the performance of algorithms. As a result, the proposed algorithm outperforms the existing algorithms with a neglectful cost of additional complexity.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An efficient policy for D2D communications and energy harvesting in cognitive radios: Go Bayesian!.\n \n \n \n \n\n\n \n Darak, S. J.; Zhang, H.; Palicot, J.; and Moy, C.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1231-1235, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362580,\n  author = {S. J. Darak and H. Zhang and J. Palicot and C. Moy},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {An efficient policy for D2D communications and energy harvesting in cognitive radios: Go Bayesian!},\n  year = {2015},\n  pages = {1231-1235},\n  abstract = {Recently, there has been a surge of interests in paradigms such as device-to-device (D2D) communications and radio frequency energy harvesting (RFEH) to improve the spectrum as well as energy efficiencies of next-generation decentralized cognitive radio networks. However, little attention has been paid to the dual but competing task of subband selection of any desired bandwidth in D2D mode (i.e., opportunistic vacant spectrum access) and RFEH mode as well as need to minimize the subband switching cost (SSC) for an efficient implementation. Taking these factors into account, a new D2D-RFEH policy is proposed. It consists of: 1)Bayesian approach based Tunable Thompson Sampling (TTS) algorithm to learn subband statistics, 2) Subband access scheme employing TTS algorithm for minimizing collisions among the secondary users, and 3) Mode selection scheme. The simulation results, complexity and SSC analysis validate the superiority of the proposed policy over the policies employing frequentist approach based learning algorithms.},\n  keywords = {Bayes methods;cognitive radio;energy harvesting;mobile communication;next generation networks;radio spectrum management;telecommunication power management;device-to-device communications;D2D communications;cognitive radios;radio frequency energy harvesting;RFEH;next-generation networks;subband switching cost;SSC;Bayesian approach;tunable Thompson sampling algorithm;TTS algorithm;Signal processing algorithms;Bandwidth;Algorithm design and analysis;Radio frequency;Bayes methods;Europe;Signal processing;Device to device communications;Radio frequency energy harvesting;Thompson Sampling},\n  doi = {10.1109/EUSIPCO.2015.7362580},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103051.pdf},\n}\n\n
\n
\n\n\n
\n Recently, there has been a surge of interests in paradigms such as device-to-device (D2D) communications and radio frequency energy harvesting (RFEH) to improve the spectrum as well as energy efficiencies of next-generation decentralized cognitive radio networks. However, little attention has been paid to the dual but competing task of subband selection of any desired bandwidth in D2D mode (i.e., opportunistic vacant spectrum access) and RFEH mode as well as need to minimize the subband switching cost (SSC) for an efficient implementation. Taking these factors into account, a new D2D-RFEH policy is proposed. It consists of: 1)Bayesian approach based Tunable Thompson Sampling (TTS) algorithm to learn subband statistics, 2) Subband access scheme employing TTS algorithm for minimizing collisions among the secondary users, and 3) Mode selection scheme. The simulation results, complexity and SSC analysis validate the superiority of the proposed policy over the policies employing frequentist approach based learning algorithms.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The ensemble Kalman filter and its relations to other nonlinear filters.\n \n \n \n \n\n\n \n Roth, M.; Fritsche, C.; Hendeby, G.; and Gustafison, F.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1236-1240, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362581,\n  author = {M. Roth and C. Fritsche and G. Hendeby and F. Gustafison},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {The ensemble Kalman filter and its relations to other nonlinear filters},\n  year = {2015},\n  pages = {1236-1240},\n  abstract = {The Ensemble Kalman filter (EnKF) is a standard algorithm in oceanography and meteorology, where it has got thousands of citations. It is in these communities appreciated since it scales much better with state dimension n than the standard Kalman filter (KF). In short, the EnKF propagates ensembles with N state realizations instead of mean values and covariance matrices and thereby avoids the computational and storage burden of working on n × n matrices. Perhaps surprising, very little attention has been devoted to the EnKF in the signal processing community. In an attempt to change this, we present the EnKF in a Kalman filtering context. Furthermore, its application to nonlinear problems is compared to sigma point Kalman ilters and the particle ilter, so as to reveal new insights and improvements for high-dimensional filtering algorithms in general. A simulation example shows the EnKF performance in a space debris tracking application.},\n  keywords = {Kalman filters;nonlinear filters;space debris;tracking;ensemble Kalman filter;nonlinear filter;signal processing;high-dimensional filtering algorithms;space debris tracking;Kalman filters;Signal processing algorithms;Covariance matrices;Europe;Linear systems;Nonlinear systems;Kalman filter;ensemble Kalman filter;sigma point Kalman filter;UKF;particle filter},\n  doi = {10.1109/EUSIPCO.2015.7362581},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103581.pdf},\n}\n\n
\n
\n\n\n
\n The Ensemble Kalman filter (EnKF) is a standard algorithm in oceanography and meteorology, where it has got thousands of citations. It is in these communities appreciated since it scales much better with state dimension n than the standard Kalman filter (KF). In short, the EnKF propagates ensembles with N state realizations instead of mean values and covariance matrices and thereby avoids the computational and storage burden of working on n × n matrices. Perhaps surprising, very little attention has been devoted to the EnKF in the signal processing community. In an attempt to change this, we present the EnKF in a Kalman filtering context. Furthermore, its application to nonlinear problems is compared to sigma point Kalman ilters and the particle ilter, so as to reveal new insights and improvements for high-dimensional filtering algorithms in general. A simulation example shows the EnKF performance in a space debris tracking application.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Particle filtering for Bayesian parameter estimation in a high dimensional state space model.\n \n \n \n \n\n\n \n Míguez, J.; Crisan, D.; and Mariño, I. P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1241-1245, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ParticlePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362582,\n  author = {J. Míguez and D. Crisan and I. P. Mariño},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Particle filtering for Bayesian parameter estimation in a high dimensional state space model},\n  year = {2015},\n  pages = {1241-1245},\n  abstract = {Researchers in some of the most active fields of science, including, e.g., geophysics or systems biology, have to deal with very-large-scale stochastic dynamic models of real world phenomena for which conventional prediction and estimation methods are not well suited. In this paper, we investigate the application of a novel nested particle filtering scheme for joint Bayesian parameter estimation and tracking of the dynamic variables in a high dimensional state space model-namely a stochastic version of the two-scale Lorenz 96 chaotic system, commonly used as a benchmark model in meteorology and climate science. We provide theoretical guarantees on the algorithm performance, including uniform convergence rates for the approximation of posterior probability density functions of the fixed model parameters.},\n  keywords = {approximation theory;Bayes methods;parameter estimation;particle filtering (numerical methods);probability;Bayesian parameter estimation method;high dimensional state space model;stochastic dynamic model;nested particle filtering scheme;stochastic version;two-scale Lorenz 96 chaotic system;benchmark model;climate science;posterior probability density functions;approximation theory;Mathematical model;Approximation methods;Stochastic processes;Approximation algorithms;Signal processing algorithms;Bayes methods;Computational modeling;Particle filtering;data assimilation;Bayesian parameter estimation;convergence analysis;kernel density estimation},\n  doi = {10.1109/EUSIPCO.2015.7362582},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105013.pdf},\n}\n\n
\n
\n\n\n
\n Researchers in some of the most active fields of science, including, e.g., geophysics or systems biology, have to deal with very-large-scale stochastic dynamic models of real world phenomena for which conventional prediction and estimation methods are not well suited. In this paper, we investigate the application of a novel nested particle filtering scheme for joint Bayesian parameter estimation and tracking of the dynamic variables in a high dimensional state space model-namely a stochastic version of the two-scale Lorenz 96 chaotic system, commonly used as a benchmark model in meteorology and climate science. We provide theoretical guarantees on the algorithm performance, including uniform convergence rates for the approximation of posterior probability density functions of the fixed model parameters.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sequential Monte Carlo sampling for systems with fractional Gaussian processes.\n \n \n \n \n\n\n \n Urteaga, I.; Bugallo, M. F.; and Djurić, P. M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1246-1250, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"SequentialPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362583,\n  author = {I. Urteaga and M. F. Bugallo and P. M. Djurić},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Sequential Monte Carlo sampling for systems with fractional Gaussian processes},\n  year = {2015},\n  pages = {1246-1250},\n  abstract = {In the past decades, Sequential Monte Carlo (SMC) sampling has proven to be a method of choice in many applications where the dynamics of the studied system are described by nonlinear equations and/or non-Gaussian noises. In this paper, we study the application of SMC sampling to nonlinear state-space models where the state is a fractional Gaussian process. These processes are characterized by long-memory properties (i.e., long-range dependence) and are observed in many fields including physics, hydrology and econometrics. We propose an SMC method for tracking the dynamic longmemory latent states, accompanied by a model selection procedure when the Hurst parameter is unknown. We demonstrate the performance of the proposed approach on simulated time-series with nonlinear observations.},\n  keywords = {Gaussian processes;Monte Carlo methods;nonlinear equations;particle filtering (numerical methods);state-space methods;sequential Monte Carlo;SMC method;fractional Gaussian processes;nonlinear equations;nonGaussian noises;nonlinear state-space models;longmemory latent states;Hurst parameter;Gaussian processes;Monte Carlo methods;Europe;Signal processing;Data models;Mathematical model;Sequential Monte Carlo;particle filtering;fractional Gaussian process;state-space models},\n  doi = {10.1109/EUSIPCO.2015.7362583},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105233.pdf},\n}\n\n
\n
\n\n\n
\n In the past decades, Sequential Monte Carlo (SMC) sampling has proven to be a method of choice in many applications where the dynamics of the studied system are described by nonlinear equations and/or non-Gaussian noises. In this paper, we study the application of SMC sampling to nonlinear state-space models where the state is a fractional Gaussian process. These processes are characterized by long-memory properties (i.e., long-range dependence) and are observed in many fields including physics, hydrology and econometrics. We propose an SMC method for tracking the dynamic longmemory latent states, accompanied by a model selection procedure when the Hurst parameter is unknown. We demonstrate the performance of the proposed approach on simulated time-series with nonlinear observations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A sequential Monte Carlo approximation of the HISP filter.\n \n \n \n \n\n\n \n Houssineau, J.; Clark, D. E.; and Del Moral, P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1251-1255, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362584,\n  author = {J. Houssineau and D. E. Clark and P. {Del Moral}},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A sequential Monte Carlo approximation of the HISP filter},\n  year = {2015},\n  pages = {1251-1255},\n  abstract = {A formulation of the hypothesised filter for independent stochastic populations (hisp) is proposed, based on the concept of association measure, which is a measure on the set of observation histories. Using this formulation, a particle approximation is introduced at the level of the association measure for handling the exponential growth in the number of underlying hypotheses. This approximation is combined with a sequential Monte Carlo implementation for the underlying single-object distributions to form a mixed particle association model. Finally, the performance of this approach is compared against a Kalman filter implementation on simulated data based on a finite-resolution sensor.},\n  keywords = {Kalman filters;Monte Carlo methods;sequential Monte Carlo approximation;HISP filter;hypothesised filter;independent stochastic populations;particle approximation;Kalman filter;finite-resolution sensor;Sociology;Statistics;Indexes;Approximation methods;Atmospheric measurements;Particle measurements;Generators;Multi-object filtering;finite-resolution sensor},\n  doi = {10.1109/EUSIPCO.2015.7362584},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105507.pdf},\n}\n\n
\n
\n\n\n
\n A formulation of the hypothesised filter for independent stochastic populations (hisp) is proposed, based on the concept of association measure, which is a measure on the set of observation histories. Using this formulation, a particle approximation is introduced at the level of the association measure for handling the exponential growth in the number of underlying hypotheses. This approximation is combined with a sequential Monte Carlo implementation for the underlying single-object distributions to form a mixed particle association model. Finally, the performance of this approach is compared against a Kalman filter implementation on simulated data based on a finite-resolution sensor.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Speaker localization and separation using incremental distributed expectation-maximization.\n \n \n \n \n\n\n \n Dorfan, Y.; Cherkassky, D.; and Gannot, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1256-1260, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"SpeakerPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362585,\n  author = {Y. Dorfan and D. Cherkassky and S. Gannot},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Speaker localization and separation using incremental distributed expectation-maximization},\n  year = {2015},\n  pages = {1256-1260},\n  abstract = {A network of microphone pairs is utilized for the joint task of localizing and separating multiple concurrent speakers. The recently presented incremental distributed expectation-maximization (IDEM) is addressing the first task, namely detection and localization. Here we extend this algorithm to address the second task, namely blindly separating the speech sources. We show that the proposed algorithm, denoted distributed algorithm for localization and separation (DALAS), is capable of separating speakers in reverberant enclosure without a priori information on their number and locations. In the first stage of the proposed algorithm, the IDEM algorithm is applied for blindly detecting the active sources and to estimate their locations. In the second stage, the location estimates are utilized for selecting the most useful node of microphones for the subsequent separation stage. Separation is finally obtained by utilizing the hidden variables of the IDEM algorithm to construct masks for each source in the relevant node.},\n  keywords = {blind source separation;expectation-maximisation algorithm;microphone arrays;signal detection;speaker recognition;speaker localization;incremental distributed expectation-maximization;microphone pairs;concurrent speakers;speech sources;denoted distributed algorithm;DALAS;reverberant enclosure;IDEM algorithm;subsequent separation stage;Signal processing algorithms;Microphones;Europe;Source separation;Frequency-domain analysis;Speech;Wireless acoustic sensor network;blind source separation;incremental estimate-maximize},\n  doi = {10.1109/EUSIPCO.2015.7362585},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096447.pdf},\n}\n\n
\n
\n\n\n
\n A network of microphone pairs is utilized for the joint task of localizing and separating multiple concurrent speakers. The recently presented incremental distributed expectation-maximization (IDEM) is addressing the first task, namely detection and localization. Here we extend this algorithm to address the second task, namely blindly separating the speech sources. We show that the proposed algorithm, denoted distributed algorithm for localization and separation (DALAS), is capable of separating speakers in reverberant enclosure without a priori information on their number and locations. In the first stage of the proposed algorithm, the IDEM algorithm is applied for blindly detecting the active sources and to estimate their locations. In the second stage, the location estimates are utilized for selecting the most useful node of microphones for the subsequent separation stage. Separation is finally obtained by utilizing the hidden variables of the IDEM algorithm to construct masks for each source in the relevant node.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Relaxation of rank-1 spatial constraint in overdetermined blind source separation.\n \n \n \n \n\n\n \n Kitamura, D.; Ono, N.; Sawada, H.; Kameoka, H.; and Saruwatari, H.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1261-1265, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"RelaxationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362586,\n  author = {D. Kitamura and N. Ono and H. Sawada and H. Kameoka and H. Saruwatari},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Relaxation of rank-1 spatial constraint in overdetermined blind source separation},\n  year = {2015},\n  pages = {1261-1265},\n  abstract = {In this paper, we propose a new algorithm for overdetermined blind source separation (BSS), which enables us to achieve good separation performance even for signals recorded in a reverberant environment. The proposed algorithm utilizes ex tra observations (channels) in overdetermined BSS to esti mate both direct and reverberant components of each source. This approach can relax the rank-1 spatial constraint, which corresponds to the assumption of a linear time-invariant mixing system. To confirm the efficacy of the proposed algorithm, we apply the relaxation of the rank-1 spatial constraint to con ventional BSS techniques. The experimental results show that the proposed algorithm can avoid the degradation of separation performance for reverberant signals in some cases.},\n  keywords = {blind source separation;matrix decomposition;rank-1 spatial constraint relaxation;overdetermined blind source separation;BSS;direct component estimation;reverberant component estimation;linear time-invariant mixing system;nonnegative matrix factorization;Signal processing algorithms;Spectrogram;Principal component analysis;Covariance matrices;Blind source separation;Time-frequency analysis;Reverberation;Blind source separation;overdetermined;nonnegative matrix factorization;rank-1 spatial constraint},\n  doi = {10.1109/EUSIPCO.2015.7362586},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570101079.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a new algorithm for overdetermined blind source separation (BSS), which enables us to achieve good separation performance even for signals recorded in a reverberant environment. The proposed algorithm utilizes ex tra observations (channels) in overdetermined BSS to esti mate both direct and reverberant components of each source. This approach can relax the rank-1 spatial constraint, which corresponds to the assumption of a linear time-invariant mixing system. To confirm the efficacy of the proposed algorithm, we apply the relaxation of the rank-1 spatial constraint to con ventional BSS techniques. The experimental results show that the proposed algorithm can avoid the degradation of separation performance for reverberant signals in some cases.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Features for speaker localization in multichannel bilateral hearing aids.\n \n \n \n \n\n\n \n Thiemann, J.; Doclo, S.; and van de Par , S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1266-1270, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"FeaturesPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362587,\n  author = {J. Thiemann and S. Doclo and S. {van de Par}},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Features for speaker localization in multichannel bilateral hearing aids},\n  year = {2015},\n  pages = {1266-1270},\n  abstract = {Modern hearing aids often contain multiple microphones to enable the use of spatial filtering techniques for signal enhancement. To steer the spatial filtering algorithm it is necessary to localize sources of interest, which can be intelligently achieved using computational auditory scene analysis (CASA). In this article, we describe a CASA system using a binaural auditory processing model that has been extended to six channels to allow reliable localization in both azimuth and elevation, thus also distinguishing between front and back. The features used to estimate the direction are one level difference and five inter-microphone time differences of arrival (TDOA). Initial experiments are presented that show the localization errors that can be expected with this set of features on a typical multichannel hearing aid in anechoic conditions with diffuse noise.},\n  keywords = {hearing aids;spatial filters;speaker recognition;time-of-arrival estimation;diffuse noise;anechoic conditions;multichannel hearing aid;intermicrophone time difference-of-arrival;binaural auditory processing model;CASA system;computational auditory scene analysis;signal enhancement;spatial filtering techniques;multichannel bilateral hearing aids;speaker localization;Azimuth;Microphones;Signal to noise ratio;Computational modeling;Auditory system;Training;Estimation;Computational Auditory Scene Analysis;Localization;Multichannel Hearing Aids},\n  doi = {10.1109/EUSIPCO.2015.7362587},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103347.pdf},\n}\n\n
\n
\n\n\n
\n Modern hearing aids often contain multiple microphones to enable the use of spatial filtering techniques for signal enhancement. To steer the spatial filtering algorithm it is necessary to localize sources of interest, which can be intelligently achieved using computational auditory scene analysis (CASA). In this article, we describe a CASA system using a binaural auditory processing model that has been extended to six channels to allow reliable localization in both azimuth and elevation, thus also distinguishing between front and back. The features used to estimate the direction are one level difference and five inter-microphone time differences of arrival (TDOA). Initial experiments are presented that show the localization errors that can be expected with this set of features on a typical multichannel hearing aid in anechoic conditions with diffuse noise.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multi-room speech activity detection using a distributed microphone network in domestic environments.\n \n \n \n \n\n\n \n Giannoulis, P.; Brutti, A.; Matassoni, M.; Abad, A.; Katsamanis, A.; Matos, M.; Potamianos, G.; and Maragos, P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1271-1275, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Multi-roomPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362588,\n  author = {P. Giannoulis and A. Brutti and M. Matassoni and A. Abad and A. Katsamanis and M. Matos and G. Potamianos and P. Maragos},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Multi-room speech activity detection using a distributed microphone network in domestic environments},\n  year = {2015},\n  pages = {1271-1275},\n  abstract = {Domestic environments are particularly challenging for distant speech recognition: reverberation, background noise and interfering sources, as well as the propagation of acoustic events across adjacent rooms, critically degrade the performance of standard speech processing algorithms. In this application scenario, a crucial task is the detection and localization of speech events generated by users within the various rooms. A specific challenge of multi-room environments is the inter-room interference that negatively affects speech activity detectors. In this paper, we present and compare different solutions for the multi-room speech activity detection task. The combination of a model-based room-independent speech activity detection module with a room-dependent inside/outside classification stage, based on specific features, provides satisfactory performance. The proposed methods are evaluated on a multi-room, multi-channel corpus, where spoken commands and other typical acoustic events occur in different rooms.},\n  keywords = {microphones;speech recognition;multiroom speech activity detection;distributed microphone network;domestic environment;speech recognition;reverberation;background noise;acoustic event propagation;standard speech processing algorithm;speech event localization;interroom interference;model-based room-independent speech activity detection module;room-dependent inside-outside classification stage;Speech;Microphones;Smart homes;Reverberation;Signal to noise ratio;Speech recognition;Speech activity detection;smart homes;microphone arrays},\n  doi = {10.1109/EUSIPCO.2015.7362588},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103509.pdf},\n}\n\n
\n
\n\n\n
\n Domestic environments are particularly challenging for distant speech recognition: reverberation, background noise and interfering sources, as well as the propagation of acoustic events across adjacent rooms, critically degrade the performance of standard speech processing algorithms. In this application scenario, a crucial task is the detection and localization of speech events generated by users within the various rooms. A specific challenge of multi-room environments is the inter-room interference that negatively affects speech activity detectors. In this paper, we present and compare different solutions for the multi-room speech activity detection task. The combination of a model-based room-independent speech activity detection module with a room-dependent inside/outside classification stage, based on specific features, provides satisfactory performance. The proposed methods are evaluated on a multi-room, multi-channel corpus, where spoken commands and other typical acoustic events occur in different rooms.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A directional noise suppressor with an adjustable constant beamwidth for multichannel signal enhancement.\n \n \n \n\n\n \n Sugiyama, A.; and Miyahara, R.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1276-1280, Aug 2015. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362589,\n  author = {A. Sugiyama and R. Miyahara},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A directional noise suppressor with an adjustable constant beamwidth for multichannel signal enhancement},\n  year = {2015},\n  pages = {1276-1280},\n  abstract = {This paper proposes a directional noise suppressor with an adjustable constant beamwidth for multichannel signal enhancement. A directional gain based on inter-channel phase difference is combined with a spectral gain commonly used in noise suppressors (NS). The beamwidth can be specified as passband edges of the directional gain. In order to implement frequency-independent constant beamwidth, frequency-proportionate band-edge phase differences are determined for the passband. Stereo perception is preserved by weighting stereo input with the common directional and spectral gain. Evaluation with signals recorded by a commercial PC demonstrates that the signal-to-noise ratio improvement and the PESQ score for the enhanced signal are equally improved in two channels by 26.1 dB and 0.2 over a conventional NS. ILD difference between the input and the output is small when the target-signal dominates the input signal.},\n  keywords = {array signal processing;directional noise suppressor;adjustable constant beamwidth;multichannel signal enhancement;inter-channel phase difference;noise suppressors;spectral gain;passband edges;frequency-proportionate band-edge phase differences;stereo perception;PESQ score;Speech;Passband;Microphones;Gain;Error analysis;Discrete Fourier transforms;Nose;Multichannel;Beamformer;Noise suppressor;Phase difference;Directional gain;Constant beamwidth},\n  doi = {10.1109/EUSIPCO.2015.7362589},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n This paper proposes a directional noise suppressor with an adjustable constant beamwidth for multichannel signal enhancement. A directional gain based on inter-channel phase difference is combined with a spectral gain commonly used in noise suppressors (NS). The beamwidth can be specified as passband edges of the directional gain. In order to implement frequency-independent constant beamwidth, frequency-proportionate band-edge phase differences are determined for the passband. Stereo perception is preserved by weighting stereo input with the common directional and spectral gain. Evaluation with signals recorded by a commercial PC demonstrates that the signal-to-noise ratio improvement and the PESQ score for the enhanced signal are equally improved in two channels by 26.1 dB and 0.2 over a conventional NS. ILD difference between the input and the output is small when the target-signal dominates the input signal.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Drum transcription using partially fixed non-negative matrix factorization.\n \n \n \n\n\n \n Wu, C.; and Lerch, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1281-1285, Aug 2015. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362590,\n  author = {C. Wu and A. Lerch},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Drum transcription using partially fixed non-negative matrix factorization},\n  year = {2015},\n  pages = {1281-1285},\n  abstract = {In this paper, a drum transcription algorithm using partially fixed non-negative matrix factorization is presented. The proposed method allows users to identify percussive events in complex mixtures with a minimal training set. The algorithm decomposes the music signal into two parts: percussive part with pre-defined drum templates and harmonic part with undefined entries. The harmonic part is able to adapt to the music content, allowing the algorithm to work in polyphonic mixtures. Drum event times can be simply picked from the percussive activation matrix with onset detection. The system is efficient and robust even with a minimal training set. The recognition rates for the ENST dataset vary from 56.7 to 78.9% for three percussive instruments extracted from polyphonic music.},\n  keywords = {audio signal processing;learning (artificial intelligence);matrix decomposition;music;musical instruments;pattern recognition;partially fixed nonnegative matrix factorization;drum transcription algorithm;percussive events;minimal training set;music signal decomposition;harmonic part;percussive part;music content;polyphonic mixture;Matrix decomposition;Dictionaries;Training;High definition video;Multiple signal classification;Harmonic analysis;Music;NMF;MIR;Drum Transcription;Automatic Music Transcription},\n  doi = {10.1109/EUSIPCO.2015.7362590},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n In this paper, a drum transcription algorithm using partially fixed non-negative matrix factorization is presented. The proposed method allows users to identify percussive events in complex mixtures with a minimal training set. The algorithm decomposes the music signal into two parts: percussive part with pre-defined drum templates and harmonic part with undefined entries. The harmonic part is able to adapt to the music content, allowing the algorithm to work in polyphonic mixtures. Drum event times can be simply picked from the percussive activation matrix with onset detection. The system is efficient and robust even with a minimal training set. The recognition rates for the ENST dataset vary from 56.7 to 78.9% for three percussive instruments extracted from polyphonic music.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Timbral modeling for music artist recognition using i-vectors.\n \n \n \n \n\n\n \n Eghbal-zadeh, H.; Schedl, M.; and Widmer, G.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1286-1290, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"TimbralPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362591,\n  author = {H. Eghbal-zadeh and M. Schedl and G. Widmer},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Timbral modeling for music artist recognition using i-vectors},\n  year = {2015},\n  pages = {1286-1290},\n  abstract = {Music artist (i.e., singer) recognition is a challenging task in Music Information Retrieval (MIR). The presence of different musical instruments, the diversity of music genres and singing techniques make the retrieval of artist-relevant information from a song difficult. Many authors tried to address this problem by using complex features or hybrid systems. In this paper, we propose new song-level timbre-related features that are built from frame-level MFCCs via so-called i-vectors. We report artist recognition results with multiple classifiers such as K-nearest neighbor, Discriminant Analysis and Naive Bayes using these new features. Our approach yields considerable improvements and outperforms existing methods. We could achieve an 84.31% accuracy using MFCC features on a 20-classes artist recognition task.},\n  keywords = {speech recognition;timbral modeling;music artist recognition;music information retrieval;complex features;hybrid systems;song-level timbre-related features;i-vectors;K-nearest neighbor discriminant analysis;Naive Bayes;MFCC features;artist recognition task;Feature extraction;Computational modeling;Mel frequency cepstral coefficient;Music;Training;Europe;Signal processing;music artist recognition;timbral modeling;song-level features;i-vectors;mfcc},\n  doi = {10.1109/EUSIPCO.2015.7362591},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096365.pdf},\n}\n\n
\n
\n\n\n
\n Music artist (i.e., singer) recognition is a challenging task in Music Information Retrieval (MIR). The presence of different musical instruments, the diversity of music genres and singing techniques make the retrieval of artist-relevant information from a song difficult. Many authors tried to address this problem by using complex features or hybrid systems. In this paper, we propose new song-level timbre-related features that are built from frame-level MFCCs via so-called i-vectors. We report artist recognition results with multiple classifiers such as K-nearest neighbor, Discriminant Analysis and Naive Bayes using these new features. Our approach yields considerable improvements and outperforms existing methods. We could achieve an 84.31% accuracy using MFCC features on a 20-classes artist recognition task.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Keyword spotting in singing with duration-modeled HMMs.\n \n \n \n \n\n\n \n Kruspe, A. M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1291-1295, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"KeywordPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362592,\n  author = {A. M. Kruspe},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Keyword spotting in singing with duration-modeled HMMs},\n  year = {2015},\n  pages = {1291-1295},\n  abstract = {Keyword spotting in speech is a very well-researched problem, but there are almost no approaches for singing. Most speech-based approaches cannot be applied easily to singing because the phoneme durations in singing vary a lot more than in speech, especially the vowel durations. To represent expected phoneme durations, several duration modeling techniques have been developed over the years in the field of ASR. To the best of our knowledge, these approaches have not been used for keyword spotting yet. In this paper, we present a new approach for keyword spotting in singing. We first extract various features (MFCC, TRAP, PLP, RASTA-PLP) and generate phoneme posteriograms from these features. We then perform keyword spotting on these posteriograms using keyword-filler HMMs and test two different duration modeling techniques on these HMMs: Explicit-duration modeling and Post-processor duration modeling. We evaluate our approach on a small singing data set without accompaniment.},\n  keywords = {hidden Markov models;speech processing;keyword spotting;singing;duration-modeled HMM;phoneme durations;explicit-duration modeling;post-processor duration modeling;Hidden Markov models;Speech;Viterbi algorithm;Limiting;Feature extraction;Computational modeling;Europe;Keyword spotting;Spoken term detection;Singing;Explicit-Duration HMM;Keyword-Filler HMM},\n  doi = {10.1109/EUSIPCO.2015.7362592},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570097447.pdf},\n}\n\n
\n
\n\n\n
\n Keyword spotting in speech is a very well-researched problem, but there are almost no approaches for singing. Most speech-based approaches cannot be applied easily to singing because the phoneme durations in singing vary a lot more than in speech, especially the vowel durations. To represent expected phoneme durations, several duration modeling techniques have been developed over the years in the field of ASR. To the best of our knowledge, these approaches have not been used for keyword spotting yet. In this paper, we present a new approach for keyword spotting in singing. We first extract various features (MFCC, TRAP, PLP, RASTA-PLP) and generate phoneme posteriograms from these features. We then perform keyword spotting on these posteriograms using keyword-filler HMMs and test two different duration modeling techniques on these HMMs: Explicit-duration modeling and Post-processor duration modeling. We evaluate our approach on a small singing data set without accompaniment.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Music boundary detection using neural networks on spectrograms and self-similarity lag matrices.\n \n \n \n \n\n\n \n Grill, T.; and Schluter, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1296-1300, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"MusicPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362593,\n  author = {T. Grill and J. Schluter},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Music boundary detection using neural networks on spectrograms and self-similarity lag matrices},\n  year = {2015},\n  pages = {1296-1300},\n  abstract = {The first step of understanding the structure of a music piece is to segment it into formative parts. A recently successful method for finding segment boundaries employs a Convolutional Neural Network (CNN) trained on spectrogram excerpts. While setting a new state of the art, it often misses boundaries defined by non-local musical cues, such as segment repetitions. To account for this, we propose a refined variant of self-similarity lag matrices representing long-term relationships. We then demonstrate different ways of fusing this feature with spectrogram excerpts within a CNN, resulting in a boundary recognition performance superior to the previous state of the art. We assume that the integration of more features in a similar fashion would improve the performance even further.},\n  keywords = {acoustic signal detection;music;neural nets;music boundary detection;self-similarity lag matrices;music piece segmentation;segment boundaries;convolutional neural network;spectrogram excerpts;nonlocal musical cues;segment repetitions;boundary recognition performance;Spectrogram;Context;Convolution;Neural networks;Europe;Kernel;Music information retrieval;Acoustic signal processing;Feedforward neural networks},\n  doi = {10.1109/EUSIPCO.2015.7362593},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104539.pdf},\n}\n\n
\n
\n\n\n
\n The first step of understanding the structure of a music piece is to segment it into formative parts. A recently successful method for finding segment boundaries employs a Convolutional Neural Network (CNN) trained on spectrogram excerpts. While setting a new state of the art, it often misses boundaries defined by non-local musical cues, such as segment repetitions. To account for this, we propose a refined variant of self-similarity lag matrices representing long-term relationships. We then demonstrate different ways of fusing this feature with spectrogram excerpts within a CNN, resulting in a boundary recognition performance superior to the previous state of the art. We assume that the integration of more features in a similar fashion would improve the performance even further.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Polyphonic pitch detection by matching spectral and autocorrelation peaks.\n \n \n \n \n\n\n \n Kraft, S.; and Zölzer, U.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1301-1305, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"PolyphonicPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362594,\n  author = {S. Kraft and U. Zölzer},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Polyphonic pitch detection by matching spectral and autocorrelation peaks},\n  year = {2015},\n  pages = {1301-1305},\n  abstract = {This paper describes a polyphonic multi-pitch detector which selects peaks as pitch candidates in both the spectrum and a multi-channel generalised autocorrelation. A final pitch is detected if a peak in the spectrum has a corresponding peak within the same semitone range in at least one of the autocorrelation channels. The autocorrelation is calculated in octave bands and all pre-processing steps like filtering, whitening and non-linear distortion are applied exclusively in the frequency domain for maximum flexibility in the parametrisation and high computational efficiency. An evaluation with common data sets yields good detection accuracies comparable to state of the art algorithms.},\n  keywords = {audio signal processing;correlation methods;filtering theory;music;signal detection;polyphonic pitch detection;spectral-autocorrelation peak matching;polyphonic multipitch detector;multichannel generalised autocorrelation;autocorrelation channel;octave band;signal filtering;signal whitening;nonlinear distortion;Correlation;Indexes;Harmonic analysis;Signal processing algorithms;Europe;Algorithm design and analysis;polyphonic pitch detection;music information retrieval;autocorrelation;spectral processing},\n  doi = {10.1109/EUSIPCO.2015.7362594},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096781.pdf},\n}\n\n
\n
\n\n\n
\n This paper describes a polyphonic multi-pitch detector which selects peaks as pitch candidates in both the spectrum and a multi-channel generalised autocorrelation. A final pitch is detected if a peak in the spectrum has a corresponding peak within the same semitone range in at least one of the autocorrelation channels. The autocorrelation is calculated in octave bands and all pre-processing steps like filtering, whitening and non-linear distortion are applied exclusively in the frequency domain for maximum flexibility in the parametrisation and high computational efficiency. An evaluation with common data sets yields good detection accuracies comparable to state of the art algorithms.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Towards zero-configuration condition monitoring based on dictionary learning.\n \n \n \n \n\n\n \n Martin-del-Campo, S.; and Sandin, F.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1306-1310, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"TowardsPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362595,\n  author = {S. Martin-del-Campo and F. Sandin},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Towards zero-configuration condition monitoring based on dictionary learning},\n  year = {2015},\n  pages = {1306-1310},\n  abstract = {Condition-based predictive maintenance can significantly improve overall equipment effectiveness provided that appropriate monitoring methods are used. Online condition monitoring systems are customized to each type of machine and need to be reconfigured when conditions change, which is costly and requires expert knowledge. Basic feature extraction methods limited to signal distribution functions and spectra are commonly used, making it difficult to automatically analyze and compare machine conditions. In this paper, we investigate the possibility to automate the condition monitoring process by continuously learning a dictionary of optimized shift-invariant feature vectors using a well-known sparse approximation method. We study how the feature vectors learned from a vibration signal evolve over time when a fault develops within a ball bearing of a rotating machine. We quantify the adaptation rate of learned features and find that this quantity changes significantly in the transitions between normal and faulty states of operation of the ball bearing.},\n  keywords = {approximation theory;condition monitoring;signal processing;zero-configuration condition monitoring;dictionary learning;condition-based predictive maintenance;online condition monitoring systems;shift-invariant feature vectors;sparse approximation method;Dictionaries;Vibrations;Condition monitoring;Monitoring;Signal processing;Rotating machines;Europe;Condition monitoring;feature extraction;dictionary learning;sparse representation;bearings},\n  doi = {10.1109/EUSIPCO.2015.7362595},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096691.pdf},\n}\n\n
\n
\n\n\n
\n Condition-based predictive maintenance can significantly improve overall equipment effectiveness provided that appropriate monitoring methods are used. Online condition monitoring systems are customized to each type of machine and need to be reconfigured when conditions change, which is costly and requires expert knowledge. Basic feature extraction methods limited to signal distribution functions and spectra are commonly used, making it difficult to automatically analyze and compare machine conditions. In this paper, we investigate the possibility to automate the condition monitoring process by continuously learning a dictionary of optimized shift-invariant feature vectors using a well-known sparse approximation method. We study how the feature vectors learned from a vibration signal evolve over time when a fault develops within a ball bearing of a rotating machine. We quantify the adaptation rate of learned features and find that this quantity changes significantly in the transitions between normal and faulty states of operation of the ball bearing.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fast Jacobi algorithm for non-orthogonal joint diagonalization of non-symmetric third-order tensors.\n \n \n \n \n\n\n \n Maurandi, V.; and Moreau, E.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1311-1315, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"FastPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362596,\n  author = {V. Maurandi and E. Moreau},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Fast Jacobi algorithm for non-orthogonal joint diagonalization of non-symmetric third-order tensors},\n  year = {2015},\n  pages = {1311-1315},\n  abstract = {We consider the problem of non-orthogonal joint diagonalization of a set of non-symmetric real-valued third-order tensors. This appears in many signal processing problems and it is instrumental in source separation. We propose a new Jacobi-like algorithm based on an LU decomposition of the so-called diagonalizing matrices. The parameters estimation is done entirely analytically following a strategy based on a classical inverse criterion and a fully decoupled estimation. One important point is that the diagonalization is directly done on the set of third-order tensors and not on their unfolded version. Computer simulations illustrate the overall good performances of the proposed algorithm.},\n  keywords = {Jacobian matrices;signal processing;tensors;fast Jacobi algorithm;nonorthogonal joint diagonalization;nonsymmetric real-valued third-order tensors;signal processing problems;source separation;diagonalizing matrices;parameters estimation;inverse criterion;fully decoupled estimation;diagonalization;third-order tensors;computer simulations;Tensile stress;Matrix decomposition;Jacobian matrices;Signal processing algorithms;Europe;Parameter estimation;Blind Source Separation;Independent Component Analysis;Joint Diagonalization;Third-Order Tensors},\n  doi = {10.1109/EUSIPCO.2015.7362596},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570097407.pdf},\n}\n\n
\n
\n\n\n
\n We consider the problem of non-orthogonal joint diagonalization of a set of non-symmetric real-valued third-order tensors. This appears in many signal processing problems and it is instrumental in source separation. We propose a new Jacobi-like algorithm based on an LU decomposition of the so-called diagonalizing matrices. The parameters estimation is done entirely analytically following a strategy based on a classical inverse criterion and a fully decoupled estimation. One important point is that the diagonalization is directly done on the set of third-order tensors and not on their unfolded version. Computer simulations illustrate the overall good performances of the proposed algorithm.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A fast algorithm for joint eigenvalue decomposition of real matrices.\n \n \n \n \n\n\n \n André, R.; Trainini, T.; Luciani, X.; and Moreau, E.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1316-1320, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362597,\n  author = {R. André and T. Trainini and X. Luciani and E. Moreau},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A fast algorithm for joint eigenvalue decomposition of real matrices},\n  year = {2015},\n  pages = {1316-1320},\n  abstract = {We introduce an original algorithm to perform the joint eigen value decomposition of a set of real matrices. The proposed algorithm is iterative but does not resort to any sweeping procedure such as classical Jacobi approaches. Instead we use a first order approximation of the inverse of the matrix of eigen vectors and at each iteration the whole matrix of eigenvectors is updated. This algorithm is called Joint eigenvalue Decomposition using Taylor Expansion and has been designed in order to decrease the overall numerical complexity of the procedure (which is a trade off between the number of iterations and the cost of each iteration) while keeping the same level of performances. Numerical comparisons with reference algorithms show that this goal is achieved.},\n  keywords = {eigenvalues and eigenfunctions;iterative methods;Jacobian matrices;real matrices;Jacobi approaches;eigenvectors;joint eigenvalue decomposition;Taylor expansion;numerical complexity;iterations;Signal processing algorithms;Matrix decomposition;Complexity theory;Eigenvalues and eigenfunctions;Jacobian matrices;Signal to noise ratio;Estimation;Joint eigenvalue decomposition;joint diagonalization;canonical polyadic decomposition;ICA},\n  doi = {10.1109/EUSIPCO.2015.7362597},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570099233.pdf},\n}\n\n
\n
\n\n\n
\n We introduce an original algorithm to perform the joint eigen value decomposition of a set of real matrices. The proposed algorithm is iterative but does not resort to any sweeping procedure such as classical Jacobi approaches. Instead we use a first order approximation of the inverse of the matrix of eigen vectors and at each iteration the whole matrix of eigenvectors is updated. This algorithm is called Joint eigenvalue Decomposition using Taylor Expansion and has been designed in order to decrease the overall numerical complexity of the procedure (which is a trade off between the number of iterations and the cost of each iteration) while keeping the same level of performances. Numerical comparisons with reference algorithms show that this goal is achieved.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Canonical polyadic tensor decomposition in the presence of non Gaussian noise.\n \n \n \n \n\n\n \n Farias, R. C.; and Comon, P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1321-1325, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"CanonicalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362598,\n  author = {R. C. Farias and P. Comon},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Canonical polyadic tensor decomposition in the presence of non Gaussian noise},\n  year = {2015},\n  pages = {1321-1325},\n  abstract = {In this paper we describe an estimator for the canonical polyadic (CP) tensor model using order statistics of the residuals. The estimator minimizes in an iterative and alternating fashion a dispersion function given by the weighted ranked absolute residuals. Specific choices of the weights lead to either equivalent or approximate versions of the least squares estimator, least absolute deviation estimator or least trimmed squares estimators. For different noise distributions, we present simulations comparing the performance of the pro posed algorithm with the standard least squares estimator. The simulated performance is equivalent in the Gaussian noise case and superior when the noise is distributed according to the Laplacian or Cauchy distributions.},\n  keywords = {Gaussian noise;iterative methods;least mean squares methods;signal processing;tensors;canonical polyadic tensor decomposition;nonGaussian noise;dispersion function;least squares estimator;least absolute deviation estimator;least trimmed squares estimators;Laplacian distributions;Cauchy distributions;Yttrium;Estimation;Least squares approximations;Robustness;Data models;Arrays;Tensile stress;Tensor decomposition;order statistics;non Gaussian noise;robust estimation},\n  doi = {10.1109/EUSIPCO.2015.7362598},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570101111.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we describe an estimator for the canonical polyadic (CP) tensor model using order statistics of the residuals. The estimator minimizes in an iterative and alternating fashion a dispersion function given by the weighted ranked absolute residuals. Specific choices of the weights lead to either equivalent or approximate versions of the least squares estimator, least absolute deviation estimator or least trimmed squares estimators. For different noise distributions, we present simulations comparing the performance of the pro posed algorithm with the standard least squares estimator. The simulated performance is equivalent in the Gaussian noise case and superior when the noise is distributed according to the Laplacian or Cauchy distributions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Variational blind source separation toolbox and its application to hyperspectral image data.\n \n \n \n \n\n\n \n Tichý, O.; and Šmídl, V.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1326-1330, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"VariationalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362599,\n  author = {O. Tichý and V. {Šmídl}},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Variational blind source separation toolbox and its application to hyperspectral image data},\n  year = {2015},\n  pages = {1326-1330},\n  abstract = {The task of blind source separation (BSS) is to decompose sources that are observed only via their linear combination with unknown weights. The separation is possible when additional assumptions on the initial sources are given. Different assumptions yield different separation algorithms. Since we are primarily concerned with noisy observations, we follow the Variational Bayes approach and define noise properties and assumptions on the sources by prior probability distributions. Due to properties of the Variational Bayes algorithm, the resulting inference algorithm is very similar for many different source assumptions. This allows us to build a modular toolbox, where it is easy to code different assumptions as different modules. By using different modules, we obtain different BSS algorithms. The potential of this open-source toolbox is demonstrated on separation of hyperspectral image data. The MATLAB implementation of the toolbox is available for download.},\n  keywords = {Bayes methods;blind source separation;decomposition;geophysical image processing;hyperspectral imaging;image coding;inference mechanisms;mathematics computing;probability;variational techniques;variational blind source separation toolbox;hyperspectral image data separation;BSS;decomposition;variational Bayes approach;probability distribution;inference algorithm;code;open-source toolbox;MATLAB implementation;Hyperspectral imaging;Signal processing algorithms;Blind source separation;Mathematical model;Europe;Inference algorithms;Blind Source Separation;Variational;Bayes Method;Sparse Prior;Hyperspectral Image},\n  doi = {10.1109/EUSIPCO.2015.7362599},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104439.pdf},\n}\n\n
\n
\n\n\n
\n The task of blind source separation (BSS) is to decompose sources that are observed only via their linear combination with unknown weights. The separation is possible when additional assumptions on the initial sources are given. Different assumptions yield different separation algorithms. Since we are primarily concerned with noisy observations, we follow the Variational Bayes approach and define noise properties and assumptions on the sources by prior probability distributions. Due to properties of the Variational Bayes algorithm, the resulting inference algorithm is very similar for many different source assumptions. This allows us to build a modular toolbox, where it is easy to code different assumptions as different modules. By using different modules, we obtain different BSS algorithms. The potential of this open-source toolbox is demonstrated on separation of hyperspectral image data. The MATLAB implementation of the toolbox is available for download.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fine landmark-based synchronization of ad-hoc microphone arrays.\n \n \n \n \n\n\n \n Hon, T.; Wang, L.; Reiss, J. D.; and Cavallaro, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1331-1335, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"FinePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362600,\n  author = {T. Hon and L. Wang and J. D. Reiss and A. Cavallaro},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Fine landmark-based synchronization of ad-hoc microphone arrays},\n  year = {2015},\n  pages = {1331-1335},\n  abstract = {We use audio fingerprinting to solve the synchronization problem between multiple recordings from an ad-hoc array consisting of randomly placed wireless microphones or handheld smartphones. Synchronization is crucial when employing conventional microphone array techniques such as beam-forming and source localization. We propose a fine audio landmark fingerprinting method that detects the time difference of arrivals (TDOAs) of multiple sources in the acoustic environment. By estimating the maximum and minimum TDOAs, the proposed method can accurately calculate the unknown time offset between a pair of microphone recordings. Experimental results demonstrate that the proposed method significantly improves the synchronization accuracy of conventional audio fingerprinting methods and achieves comparable performance to the generalized cross-correlation method.},\n  keywords = {audio recording;direction-of-arrival estimation;microphone arrays;smart phones;synchronisation;ad hoc microphone arrays;fine landmark-based synchronization;synchronization problem;randomly placed wireless microphones;handheld smartphones;audio landmark fingerprinting method;time difference of arrivals;TDOA;multiple sources;acoustic environment;microphone recordings;Synchronization;Feature extraction;Microphone arrays;Time-frequency analysis;Signal processing algorithms;Array signal processing;Synchronization;audio fingerprinting;microphone array},\n  doi = {10.1109/EUSIPCO.2015.7362600},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096423.pdf},\n}\n\n
\n
\n\n\n
\n We use audio fingerprinting to solve the synchronization problem between multiple recordings from an ad-hoc array consisting of randomly placed wireless microphones or handheld smartphones. Synchronization is crucial when employing conventional microphone array techniques such as beam-forming and source localization. We propose a fine audio landmark fingerprinting method that detects the time difference of arrivals (TDOAs) of multiple sources in the acoustic environment. By estimating the maximum and minimum TDOAs, the proposed method can accurately calculate the unknown time offset between a pair of microphone recordings. Experimental results demonstrate that the proposed method significantly improves the synchronization accuracy of conventional audio fingerprinting methods and achieves comparable performance to the generalized cross-correlation method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Array calibration using array response interpolation and parametric modeling.\n \n \n \n \n\n\n \n Yang, B.; McKelvey, T.; Viberg, M.; and Xu, G.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1336-1340, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ArrayPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362601,\n  author = {B. Yang and T. McKelvey and M. Viberg and G. Xu},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Array calibration using array response interpolation and parametric modeling},\n  year = {2015},\n  pages = {1336-1340},\n  abstract = {High-performance array applications often require an accurate array response model. A common way to achieve this is by array calibration which involves measuring the response for a finite number of given source directions and employing interpolation. This paper considers the array calibration problem by combing interpolation techniques and parametric modeling. The idea is to model the array response as a product of a mutual coupling matrix, an ideal array response vector (derived from the geometry of antenna array) and an angle-dependent correction vector. Since the major effects are captured by the physical model and the mutual coupling matrix, the correction vector will be a smoother function of angle as compared to direct interpolation of the measured array response. In numerical experiments of a real antenna array, the method is found to improve the performance of the array calibration significantly.},\n  keywords = {antenna arrays;calibration;interpolation;matrix algebra;vectors;array response interpolation technique;parametric modeling;array calibration problem;mutual coupling matrix;geometry;antenna array;angle-dependent correction vector;Arrays;Mutual coupling;Interpolation;Antenna arrays;Finite element analysis;Antenna measurements;Calibration;Array calibration;array response interpolation;correction vector;parametric modeling},\n  doi = {10.1109/EUSIPCO.2015.7362601},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103779.pdf},\n}\n\n
\n
\n\n\n
\n High-performance array applications often require an accurate array response model. A common way to achieve this is by array calibration which involves measuring the response for a finite number of given source directions and employing interpolation. This paper considers the array calibration problem by combing interpolation techniques and parametric modeling. The idea is to model the array response as a product of a mutual coupling matrix, an ideal array response vector (derived from the geometry of antenna array) and an angle-dependent correction vector. Since the major effects are captured by the physical model and the mutual coupling matrix, the correction vector will be a smoother function of angle as compared to direct interpolation of the measured array response. In numerical experiments of a real antenna array, the method is found to improve the performance of the array calibration significantly.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Design of a sparse planar array for optimized 3D medical ultrasound imaging.\n \n \n \n \n\n\n \n Sciallero, C.; and Trucco, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1341-1345, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"DesignPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362602,\n  author = {C. Sciallero and A. Trucco},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Design of a sparse planar array for optimized 3D medical ultrasound imaging},\n  year = {2015},\n  pages = {1341-1345},\n  abstract = {Two-dimensional apertures provide fully electronic scanning for 3D medical ultrasound imaging. The design of planar arrays with a limited number of active elements yielding real-time 3D high-quality imaging, for all beam scanning orientations, is one of the current challenges. In this paper, an innovative transmission/reception solution, that involves a compact fully sampled 256-element array as transmitter and an optimized 256-element sparse array as receiver, able to fulfill all the previous requirements, is proposed. The sparse array is made up of a thinned version of the transmitter dense array (i.e., a proper subset of elements is used both to transmit and receive) surrounded by an annular sparse off-the-grid array. Both the positions and the weights of the sparse array are jointly optimized by minimizing a novel cost function by means of simulated annealing algorithm. The proposed solution is well-suited for real-time 3D imaging over 360° of azimuth and ±40° of inclination.},\n  keywords = {biomedical imaging;minimisation;receivers;simulated annealing;transmitters;sparse planar array design;optimized 3D medical ultrasound imaging;two-dimensional apertures;fully electronic scanning;real-time 3D high-quality imaging;beam scanning orientation;sparse array;receiver;transmitter dense array;annular sparse off-the-grid array;cost function minimization;simulated annealing algorithm;Arrays;Three-dimensional displays;Transmitters;Receivers;Optimization;Imaging;Azimuth;3D medical ultrasound imaging;planar array;sparse array;stochastic optimization;ultrasound signal processing},\n  doi = {10.1109/EUSIPCO.2015.7362602},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105167.pdf},\n}\n\n
\n
\n\n\n
\n Two-dimensional apertures provide fully electronic scanning for 3D medical ultrasound imaging. The design of planar arrays with a limited number of active elements yielding real-time 3D high-quality imaging, for all beam scanning orientations, is one of the current challenges. In this paper, an innovative transmission/reception solution, that involves a compact fully sampled 256-element array as transmitter and an optimized 256-element sparse array as receiver, able to fulfill all the previous requirements, is proposed. The sparse array is made up of a thinned version of the transmitter dense array (i.e., a proper subset of elements is used both to transmit and receive) surrounded by an annular sparse off-the-grid array. Both the positions and the weights of the sparse array are jointly optimized by minimizing a novel cost function by means of simulated annealing algorithm. The proposed solution is well-suited for real-time 3D imaging over 360° of azimuth and ±40° of inclination.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Dual-function radar-communications using phase-rotational invariance.\n \n \n \n \n\n\n \n Hassanien, A.; Amin, M. G.; Zhang, Y. D.; and Ahmad, F.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1346-1350, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Dual-functionPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362603,\n  author = {A. Hassanien and M. G. Amin and Y. D. Zhang and F. Ahmad},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Dual-function radar-communications using phase-rotational invariance},\n  year = {2015},\n  pages = {1346-1350},\n  abstract = {In this paper, we develop a new technique for dual-function radar-communications in a transmit multi-sensor array where information embedding is achieved using phase-rotational invariance. A sequence of Q bits is first mapped into a dictionary of 2Q phase rotations. Then, one pair of transmit orthogonal waveforms is used in tandem with 2Q pairs of transmit beamforming weight vectors for embedding a certain entry of the phase-rotation dictionary during each radar pulse. The same pair of waveforms is used during all pulses while the pair of transmit beamforming weight vectors changes from pulse to pulse based on which entry of the phase-rotation dictionary is embedded. During each pulse, the receiver detects the embedded phase rotation and employ it to decipher the transmitted bit sequence. The proposed information embedding technique is angle-dependant and, therefore, the communication process is inherently secure against interception from directions other than the desired communication direction. The performance of the proposed technique is investigated in terms of the bit error rate (BER).},\n  keywords = {error statistics;radar applications;radar receivers;radio receivers;sensor arrays;sensor fusion;BER;bit error rate;transmitted bit sequence;embedded phase rotation;radio receiver;radar pulse;phase-rotation dictionary;transmit beamforming weight vectors;transmit orthogonal waveforms;2Q phase rotations;Q bits sequence;information embedding;multisensor array;phase rotational invariance;dual function radar communications;Radar;Receivers;Array signal processing;Dictionaries;Arrays;Radar signal processing;Radar antennas;Dual-function radar-communications;phase-rotational invariance;communication security;bit error rate},\n  doi = {10.1109/EUSIPCO.2015.7362603},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105049.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we develop a new technique for dual-function radar-communications in a transmit multi-sensor array where information embedding is achieved using phase-rotational invariance. A sequence of Q bits is first mapped into a dictionary of 2Q phase rotations. Then, one pair of transmit orthogonal waveforms is used in tandem with 2Q pairs of transmit beamforming weight vectors for embedding a certain entry of the phase-rotation dictionary during each radar pulse. The same pair of waveforms is used during all pulses while the pair of transmit beamforming weight vectors changes from pulse to pulse based on which entry of the phase-rotation dictionary is embedded. During each pulse, the receiver detects the embedded phase rotation and employ it to decipher the transmitted bit sequence. The proposed information embedding technique is angle-dependant and, therefore, the communication process is inherently secure against interception from directions other than the desired communication direction. The performance of the proposed technique is investigated in terms of the bit error rate (BER).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n ISAR image formation with a combined empirical mode decomposition and time frequency representation.\n \n \n \n \n\n\n \n Ahmed, B. A. H.; Cexus, J.; and Toumi, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1351-1355, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ISARPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362604,\n  author = {B. A. H. Ahmed and J. Cexus and A. Toumi},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {ISAR image formation with a combined empirical mode decomposition and time frequency representation},\n  year = {2015},\n  pages = {1351-1355},\n  abstract = {In this paper, a method for Inverse Synthetic Aperture Radar (ISAR) image formation based on the use of the Complex Empirical Mode Decomposition (CEMD) is proposed. The CEMD [1] which based on the Empirical Mode Decomposition (EMD) is used in conjunction with a Time-Frequency Representation (TFR) to estimate a 3-D time-range-Doppler Cubic image, which we can use to effectively extract a sequence of ISAR 2-D range-Doppler images. The potential of the proposed method to construct ISAR image is illustrated by simulations results performed on synthetic data and compared to 2-D Fourier Transform and TFR methods. The simulation results indicate that this method can provide ISAR images with a good resolution. These results demonstrate the potential application of the proposed method for ISAR image formation.},\n  keywords = {Doppler radar;radar imaging;synthetic aperture radar;2D range-Doppler images;3D time-range-Doppler cubic image;TFR;time-frequency representation;CEMD;complex empirical mode decomposition;combined empirical mode decomposition;ISAR image formation;inverse synthetic aperture radar;Time-frequency analysis;Signal processing algorithms;Fourier transforms;Doppler effect;Empirical mode decomposition;Image resolution;Radar imaging;Inverse Synthetic Aperture Radar;Image formation;Complex Empirical Mode Decomposition;Time-Frequency Representation},\n  doi = {10.1109/EUSIPCO.2015.7362604},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104353.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, a method for Inverse Synthetic Aperture Radar (ISAR) image formation based on the use of the Complex Empirical Mode Decomposition (CEMD) is proposed. The CEMD [1] which based on the Empirical Mode Decomposition (EMD) is used in conjunction with a Time-Frequency Representation (TFR) to estimate a 3-D time-range-Doppler Cubic image, which we can use to effectively extract a sequence of ISAR 2-D range-Doppler images. The potential of the proposed method to construct ISAR image is illustrated by simulations results performed on synthetic data and compared to 2-D Fourier Transform and TFR methods. The simulation results indicate that this method can provide ISAR images with a good resolution. These results demonstrate the potential application of the proposed method for ISAR image formation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multi-group multicast beamforming for simultaneous wireless information and power transfer.\n \n \n \n \n\n\n \n Demir, Ö. T.; and Tuncer, T. E.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1356-1360, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Multi-groupPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362605,\n  author = {Ö. T. Demir and T. E. Tuncer},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Multi-group multicast beamforming for simultaneous wireless information and power transfer},\n  year = {2015},\n  pages = {1356-1360},\n  abstract = {In this paper, simultaneous wireless information and power transfer (SWIPT) concept is introduced for multi group multicast beamforming. Each user has a single antenna and a power splitter which divides the radio frequency (RF) signal into two for both information decoding and energy harvesting. The aim is to minimize the total transmission power at the base station while satisfying both signal-to-interference-plus-noise-ratio (SINR) and harvested power constraints at each user. Unlike unicast and certain broadcast scenarios, semidefinite relaxation (SDR) is not tight and global optimum solution cannot be found for this problem. We propose an iterative algorithm where a convex optimization problem is solved at each iteration. Both perfect and imperfect channel state information (CSI) at the base station are considered. Simulation results show that the proposed solution is very close to the SDR lower bound and a few number of iterations are enough for the algorithm convergence.},\n  keywords = {array signal processing;concave programming;decoding;energy harvesting;iterative methods;radiofrequency power transmission;multigroup multicast beamforming;simultaneous wireless information-power transfer;SWIPT concept;power splitter;antenna;radiofrequency signal;RF signal;information decoding;energy harvesting;total transmission power minimization;signal-to-interference-plus-noise-ratio;harvested power constraints;unicast scenario;broadcast scenario;semidefinite relaxation;iterative algorithm;convex optimization problem;imperfect channel state information;perfect CSI;SDR lower bound;Array signal processing;Receivers;Optimized production technology;Wireless communication;Minimization;Quality of service;Decoding;Multicast beamforming;wireless power transfer;convex optimization;alternating minimization},\n  doi = {10.1109/EUSIPCO.2015.7362605},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104911.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, simultaneous wireless information and power transfer (SWIPT) concept is introduced for multi group multicast beamforming. Each user has a single antenna and a power splitter which divides the radio frequency (RF) signal into two for both information decoding and energy harvesting. The aim is to minimize the total transmission power at the base station while satisfying both signal-to-interference-plus-noise-ratio (SINR) and harvested power constraints at each user. Unlike unicast and certain broadcast scenarios, semidefinite relaxation (SDR) is not tight and global optimum solution cannot be found for this problem. We propose an iterative algorithm where a convex optimization problem is solved at each iteration. Both perfect and imperfect channel state information (CSI) at the base station are considered. Simulation results show that the proposed solution is very close to the SDR lower bound and a few number of iterations are enough for the algorithm convergence.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Joint optimization of transmit and relay beamformer for single group multicast transmission.\n \n \n \n \n\n\n \n Demir, O. T.; and Tuncer, T. E.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1361-1365, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"JointPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362606,\n  author = {O. T. Demir and T. E. Tuncer},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Joint optimization of transmit and relay beamformer for single group multicast transmission},\n  year = {2015},\n  pages = {1361-1365},\n  abstract = {In this paper, single group multicasting is considered for the cooperative relay network. Two-phase transmission is performed and amplify-and-forward relay protocol is used. In the first phase, the base station broadcasts the common signal to the single antenna relays by employing beamforming. Each relay multiplies its received signal by a complex weight and retransmits it to the single antenna users. The aim is to find the beamformer weights at the base station and the relays jointly to minimize the total transmitted power while satisfying signal-to-noise ratio constraint for each user. Nonconvex joint problem is firstly relaxed and then converted to an equivalent biconvex problem by using exact penalty approach. The equivalent problem is solved iteratively using alternating minimization. To the best of our knowledge, this is the first work that considers joint beamforming at the base station and the relays for single group multicasting scenario.},\n  keywords = {amplify and forward communication;array signal processing;concave programming;convex programming;cooperative communication;multicast communication;protocols;relay networks (telecommunication);single group multicasting;cooperative relay network;two-phase transmission;joint beamforming;alternating minimization;exact penalty approach;equivalent biconvex problem;nonconvex joint problem;signal-to-noise ratio constraint;beamformer weights;single antenna relays;base station;amplify-and-forward relay protocol;Base stations;Array signal processing;Relay networks (telecommunications);Antennas;Signal to noise ratio;Quality of service;Multicast beamforming;distributed beam-forming;convex optimization;exact penalty function},\n  doi = {10.1109/EUSIPCO.2015.7362606},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104931.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, single group multicasting is considered for the cooperative relay network. Two-phase transmission is performed and amplify-and-forward relay protocol is used. In the first phase, the base station broadcasts the common signal to the single antenna relays by employing beamforming. Each relay multiplies its received signal by a complex weight and retransmits it to the single antenna users. The aim is to find the beamformer weights at the base station and the relays jointly to minimize the total transmitted power while satisfying signal-to-noise ratio constraint for each user. Nonconvex joint problem is firstly relaxed and then converted to an equivalent biconvex problem by using exact penalty approach. The equivalent problem is solved iteratively using alternating minimization. To the best of our knowledge, this is the first work that considers joint beamforming at the base station and the relays for single group multicasting scenario.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sparse support recovery for DOA estimation in the presence of mutual coupling.\n \n \n \n \n\n\n \n Elbir, A. M.; and Tuncer, T. E.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1366-1370, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"SparsePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362607,\n  author = {A. M. Elbir and T. E. Tuncer},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Sparse support recovery for DOA estimation in the presence of mutual coupling},\n  year = {2015},\n  pages = {1366-1370},\n  abstract = {Direction-of-arrival (DOA) estimation in the presence of mutual coupling and coherent signals is a hard task for arbitrary sensor arrays including uniform circular array (UCA). While the coherent sources can be resolved using spatial smoothing algorithms for uniform linear and rectangular arrays, it cannot be applied to UCA. In this paper, a new technique is proposed for DOA estimation in UCA using a single snapshot. Joint-sparse recovery algorithm is proposed where the source signal spatial directions and coupling coefficients are embedded into a joint-sparse signal. A dictionary is defined according to restricted isometry and compressed sensing is employed for both DOA and coupling coefficient estimation. It is shown that the proposed method performs better than the alternative sparse recovery techniques.},\n  keywords = {compressed sensing;direction-of-arrival estimation;sparse support recovery;DOA estimation;mutual coupling;direction-of-arrival estimation;arbitrary sensor arrays;uniform circular array;coherent sources;spatial smoothing algorithms;joint-sparse recovery algorithm;compressed sensing;Direction-of-arrival estimation;Estimation;Couplings;Antenna arrays;Dictionaries;Matching pursuit algorithms;Signal processing algorithms;Compressed sensing;Joint-sparse recovery;Mutual coupling;Multipath;DOA estimation;Uniform circular array},\n  doi = {10.1109/EUSIPCO.2015.7362607},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105169.pdf},\n}\n\n
\n
\n\n\n
\n Direction-of-arrival (DOA) estimation in the presence of mutual coupling and coherent signals is a hard task for arbitrary sensor arrays including uniform circular array (UCA). While the coherent sources can be resolved using spatial smoothing algorithms for uniform linear and rectangular arrays, it cannot be applied to UCA. In this paper, a new technique is proposed for DOA estimation in UCA using a single snapshot. Joint-sparse recovery algorithm is proposed where the source signal spatial directions and coupling coefficients are embedded into a joint-sparse signal. A dictionary is defined according to restricted isometry and compressed sensing is employed for both DOA and coupling coefficient estimation. It is shown that the proposed method performs better than the alternative sparse recovery techniques.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Antenna array design for music in the presence of spatially distributed sources.\n \n \n \n \n\n\n \n Xiong, W.; Picheral, J.; and Marcos, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1371-1375, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AntennaPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362608,\n  author = {W. Xiong and J. Picheral and S. Marcos},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Antenna array design for music in the presence of spatially distributed sources},\n  year = {2015},\n  pages = {1371-1375},\n  abstract = {In this paper, the impact of array geometry on the direction of arrival (DOA) estimation of spatially distributed sources impinging on a sensor array is considered. Taking into account the coherently distributed source model proposed in [1], we establish closed-form expressions of the MUSIC-based DOA estimation error as functions of the positions of the array sensors in the presence of model errors due to the angular dispersion of the signal sources. The impact of the array geometry is studied and particular array designs are proposed to make DOA estimation more robust to source dispersion. The analytical results are validated by numerical simulations.},\n  keywords = {antenna arrays;direction-of-arrival estimation;sensor arrays;antenna array design;spatially distributed sources;direction of arrival estimation;DOA estimation;coherently distributed source model;MUSIC-based DOA estimation error;array sensors;signal sources;numerical simulations;Arrays;Direction-of-arrival estimation;Geometry;Estimation error;Multiple signal classification;Dispersion;array signal processing;distributed sources;array geometry;performance;MUSIC},\n  doi = {10.1109/EUSIPCO.2015.7362608},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103009.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, the impact of array geometry on the direction of arrival (DOA) estimation of spatially distributed sources impinging on a sensor array is considered. Taking into account the coherently distributed source model proposed in [1], we establish closed-form expressions of the MUSIC-based DOA estimation error as functions of the positions of the array sensors in the presence of model errors due to the angular dispersion of the signal sources. The impact of the array geometry is studied and particular array designs are proposed to make DOA estimation more robust to source dispersion. The analytical results are validated by numerical simulations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Wide-band source localization based on spatio-temporal structure of data.\n \n \n \n \n\n\n \n Fossati, C.; Bourennane, S.; and Villemin, G.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1376-1380, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Wide-bandPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362609,\n  author = {C. Fossati and S. Bourennane and G. Villemin},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Wide-band source localization based on spatio-temporal structure of data},\n  year = {2015},\n  pages = {1376-1380},\n  abstract = {In this paper, a new subspace method based on spatio-temporal structure of data is presented for estimation of directions-of-arrival (DOAs) of sources impinging on an array of sensors. Firstly, the signals received on the different sensors are processed independently sensor by sensor to estimate the Times Of Arrival (TOAs) of multipaths. Then, the obtained TOAs are post-processed to estimate the DOA of each ray path. Simulation results show that the performance of the proposed method is similar to those of HR methods, with an advantage in its ability to cope with the situation where the number of multipaths is much larger than that of antenna sensors, which arises in many practical situations.},\n  keywords = {array signal processing;direction-of-arrival estimation;time-of-arrival estimation;subspace method;sensor array;HR method;ray path;multipath TOA estimation;multipath time-of-arrival estimation;DOA estimation;direction-of-arrival estimation;data spatiotemporal structure;wideband source localization;Covariance matrices;Direction-of-arrival estimation;Estimation;Sensor arrays;Smoothing methods;Localization;High Resolution;Time of arrival;ray path association},\n  doi = {10.1109/EUSIPCO.2015.7362609},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104739.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, a new subspace method based on spatio-temporal structure of data is presented for estimation of directions-of-arrival (DOAs) of sources impinging on an array of sensors. Firstly, the signals received on the different sensors are processed independently sensor by sensor to estimate the Times Of Arrival (TOAs) of multipaths. Then, the obtained TOAs are post-processed to estimate the DOA of each ray path. Simulation results show that the performance of the proposed method is similar to those of HR methods, with an advantage in its ability to cope with the situation where the number of multipaths is much larger than that of antenna sensors, which arises in many practical situations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n New insights into time synchronization of MIMO systems with interference.\n \n \n \n \n\n\n \n Hiltunen, S.; Chevalier, P.; and Loubaton, P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1381-1385, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"NewPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362610,\n  author = {S. Hiltunen and P. Chevalier and P. Loubaton},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {New insights into time synchronization of MIMO systems with interference},\n  year = {2015},\n  pages = {1381-1385},\n  abstract = {This paper concerns time synchronization of MIMO systems. The current most powerful receiver is a generalized likelihood ratio test (GLRT) receiver, and assumes unknown, stationary, circular, and spatially colored Gaussian noise. However, this receiver is more complex than its non-GLRT counterparts, which, unfortunately, do not perform as well in most cases. As the complexity is an important issue for practical implementations and may be prohibitive for a large number of antennas, the purpose of this paper is to propose several ways of decreasing the complexity of the GLRT receiver while keeping its performance. Simplifications of the GLRT receiver and optimization of parameters, jointly with new low-complexity receivers, are introduced. The performance of the new receivers is analyzed and compared with that of the GLRT receiver, enlightening the practical interest of these receivers.},\n  keywords = {antenna arrays;Gaussian noise;MIMO communication;radio receivers;radiofrequency interference;synchronisation;MIMO system interference;time synchronization;generalized likelihood ratio test receiver;GLRT receiver complexity reduction;unknown noise;stationary noise;circular noise;spatially colored Gaussian noise;antennas;parameter optimization;low-complexity receiver;Synchronization;MIMO;Interference;Complexity theory;Fading;Receiving antennas;Time Synchronization;MIMO;Single Carrier;GLRT;Interference},\n  doi = {10.1109/EUSIPCO.2015.7362610},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103597.pdf},\n}\n\n
\n
\n\n\n
\n This paper concerns time synchronization of MIMO systems. The current most powerful receiver is a generalized likelihood ratio test (GLRT) receiver, and assumes unknown, stationary, circular, and spatially colored Gaussian noise. However, this receiver is more complex than its non-GLRT counterparts, which, unfortunately, do not perform as well in most cases. As the complexity is an important issue for practical implementations and may be prohibitive for a large number of antennas, the purpose of this paper is to propose several ways of decreasing the complexity of the GLRT receiver while keeping its performance. Simplifications of the GLRT receiver and optimization of parameters, jointly with new low-complexity receivers, are introduced. The performance of the new receivers is analyzed and compared with that of the GLRT receiver, enlightening the practical interest of these receivers.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributed signal subspace estimation based on local generalized eigenvector matrix inversion.\n \n \n \n \n\n\n \n Hassani, A.; Bertrand, A.; and Moonen, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1386-1390, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"DistributedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362611,\n  author = {A. Hassani and A. Bertrand and M. Moonen},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Distributed signal subspace estimation based on local generalized eigenvector matrix inversion},\n  year = {2015},\n  pages = {1386-1390},\n  abstract = {Many array-processing algorithms or applications require the estimation of a target signal subspace, e.g., for source localization or for signal enhancement. In wireless sensor networks, the straightforward estimation of a network-wide signal subspace would require a centralization of all the sensor signals to compute network-wide covariance matrices. In this paper, we present a distributed algorithm for network-wide signal subspace estimation in which such data centralization is avoided. The algorithm relies on a generalized eigenvalue decomposition (GEVD), which allows to estimate a target signal subspace in spatially correlated noise. We show that the network-wide signal subspace can be found from the inversion of the matrices containing the generalized eigenvectors of a pair of reduced-dimension sensor signal covariance matrices at each node. The resulting distributed algorithm reduces the per-node communication and computational cost, while converging to the centralized solution. Numerical simulations reveal a faster convergence speed compared to a previously proposed algorithm.},\n  keywords = {array signal processing;covariance matrices;eigenvalues and eigenfunctions;matrix decomposition;matrix inversion;wireless sensor networks;distributed signal subspace estimation;local generalized eigenvector matrix inversion;array-processing algorithm;wireless sensor network;network-wide covariance matrices;data centralization;network-wide signal subspace estimation;generalized eigenvalue decomposition;reduced-dimension sensor signal covariance matrices;Estimation;Covariance matrices;Wireless sensor networks;Signal processing algorithms;Europe;Signal processing;Wireless communication;Wireless sensor network (WSN);distributed estimation;signal subspace estimation;generalized eigenvalue decomposition (GEVD)},\n  doi = {10.1109/EUSIPCO.2015.7362611},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104441.pdf},\n}\n\n
\n
\n\n\n
\n Many array-processing algorithms or applications require the estimation of a target signal subspace, e.g., for source localization or for signal enhancement. In wireless sensor networks, the straightforward estimation of a network-wide signal subspace would require a centralization of all the sensor signals to compute network-wide covariance matrices. In this paper, we present a distributed algorithm for network-wide signal subspace estimation in which such data centralization is avoided. The algorithm relies on a generalized eigenvalue decomposition (GEVD), which allows to estimate a target signal subspace in spatially correlated noise. We show that the network-wide signal subspace can be found from the inversion of the matrices containing the generalized eigenvectors of a pair of reduced-dimension sensor signal covariance matrices at each node. The resulting distributed algorithm reduces the per-node communication and computational cost, while converging to the centralized solution. Numerical simulations reveal a faster convergence speed compared to a previously proposed algorithm.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Timing mismatch compensation in TI-ADCS using Bayesian approach.\n \n \n \n \n\n\n \n Araghi, H.; Akhaee, M. A.; and Amini, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1391-1395, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"TimingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362612,\n  author = {H. Araghi and M. A. Akhaee and A. Amini},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Timing mismatch compensation in TI-ADCS using Bayesian approach},\n  year = {2015},\n  pages = {1391-1395},\n  abstract = {A TI-ADC is a circuitry to achieve high sampling rates by passing the signal and its shifted versions through a number of parallel ADCs with lower sampling rates. When the time shifts between the C channels of a TI-ADC are properly tuned, the aggregate of the obtained samples is equivalent to that of a single ADC with C-times the sampling rate. However, the performance of a TI-ADC can be seriously degraded under interchannel timing mismatch. As this non-ideality cannot be avoided in practice, we need to first estimate the mismatch value, and then, compensate it. In this paper, by adopting a stochastic bandlimited signal model we study the signal recovery problem from the samples of a TI-ADC affected by timing mismatch and jitter. In particular, we derive the Bayesian model and implement the minimum mean square error (MMSE) estimator. The latter is achieved by means of Gibbs sampling technique.},\n  keywords = {analogue-digital conversion;Bayes methods;least mean squares methods;Markov processes;Monte Carlo methods;timing jitter;timing mismatch compensation;TI-ADCS circuitry;Bayesian approach;parallel ADC;interchannel timing mismatch;stochastic bandlimited signal model;signal recovery problem;timing jitter;minimum mean square error estimator;MMSE estimator;Gibbs sampling technique;time-interleaved analog-digital converters;Timing;Jitter;Estimation;Bayes methods;Random variables;Europe;Bandlimited signals;Bayesian modeling;Gibbs sampling;Multichannel sampling;Timing mismatch},\n  doi = {10.1109/EUSIPCO.2015.7362612},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104941.pdf},\n}\n\n
\n
\n\n\n
\n A TI-ADC is a circuitry to achieve high sampling rates by passing the signal and its shifted versions through a number of parallel ADCs with lower sampling rates. When the time shifts between the C channels of a TI-ADC are properly tuned, the aggregate of the obtained samples is equivalent to that of a single ADC with C-times the sampling rate. However, the performance of a TI-ADC can be seriously degraded under interchannel timing mismatch. As this non-ideality cannot be avoided in practice, we need to first estimate the mismatch value, and then, compensate it. In this paper, by adopting a stochastic bandlimited signal model we study the signal recovery problem from the samples of a TI-ADC affected by timing mismatch and jitter. In particular, we derive the Bayesian model and implement the minimum mean square error (MMSE) estimator. The latter is achieved by means of Gibbs sampling technique.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Channel shortening for enhancing passive UHF RFID performance.\n \n \n \n\n\n \n Ben Jabeur, T.; and Kadri, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1396-1400, Aug 2015. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362613,\n  author = {T. {Ben Jabeur} and A. Kadri},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Channel shortening for enhancing passive UHF RFID performance},\n  year = {2015},\n  pages = {1396-1400},\n  abstract = {This paper proposes using a channel shortening equalizer (CSE) to improve the performance of passive ultrahigh frequency radio frequency identification (UHF RFID) systems. In UHF RFID systems, the reader interrogates RF tags by transmitting continuous wave (CW) signals that power up the internal integrated circuitry of the tags that in turn, backscatter these signals to the reader after embedding their unique information. The overall performance of passive UHF RFID systems depends heavily on the power level of the signal impinged on the tag which is a function of the multipath channel environment in which the reader and the tags are deployed. In this paper, a channel shortening equalizer with a new constraint that exploits the knowledge of the propagation channel and the nature of the CW signal is proposed to boost the power of the impinged signal on the tag. The results show that using the proposed equalizer enhances the power level significantly which results in better performance.},\n  keywords = {equalisers;multipath channels;radiofrequency identification;radiowave propagation;signal detection;wireless channels;passive UHF RFID system performance enhancement;channel shortening equalizer;passive ultrahigh frequency radio frequency identification system;continuous wave signal transmission;RF tags;CW signal transmission;internal integrated circuitry power up;signal backscattering;multipath channel environment;propagation channel;Equalizers;Passive RFID tags;Transmitting antennas;Indexes;Fading;Radio frequency;Passive UHF RFID system;Channel shortening equalizer;SIMO system},\n  doi = {10.1109/EUSIPCO.2015.7362613},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n This paper proposes using a channel shortening equalizer (CSE) to improve the performance of passive ultrahigh frequency radio frequency identification (UHF RFID) systems. In UHF RFID systems, the reader interrogates RF tags by transmitting continuous wave (CW) signals that power up the internal integrated circuitry of the tags that in turn, backscatter these signals to the reader after embedding their unique information. The overall performance of passive UHF RFID systems depends heavily on the power level of the signal impinged on the tag which is a function of the multipath channel environment in which the reader and the tags are deployed. In this paper, a channel shortening equalizer with a new constraint that exploits the knowledge of the propagation channel and the nature of the CW signal is proposed to boost the power of the impinged signal on the tag. The results show that using the proposed equalizer enhances the power level significantly which results in better performance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n MT-based artificial hypothesis generation for unsupervised discriminative language modeling.\n \n \n \n \n\n\n \n Dikici, E.; and Saraçlar, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1401-1405, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"MT-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362614,\n  author = {E. Dikici and M. Saraçlar},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {MT-based artificial hypothesis generation for unsupervised discriminative language modeling},\n  year = {2015},\n  pages = {1401-1405},\n  abstract = {Discriminative language modeling (DLM) is used as a postprocessing step to correct automatic speech recognition (ASR) errors. Traditional DLM training requires a large number of ASR N-best lists together with their reference transcriptions. It is possible to incorporate additional text data into training via artificial hypothesis generation through confusion modeling. A weighted finite-state transducer (WFST) or a machine translation (MT) system can be used to generate the artificial hypotheses. When the reference transcriptions are not available, training can be done in an unsupervised way via a target output selection scheme. In this paper we adapt the MT-based artificial hypothesis generation approach to un-supervised discriminative language modeling, and compare it with the WFST-based setting. We achieve improvements in word error rate of up to 0.7% over the generative baseline, which is significant at p <; 0.001.},\n  keywords = {finite state machines;language translation;speech recognition;unsupervised discriminative language modeling;automatic speech recognition errors;ASR errors;DLM training;confusion modeling;weighted finite-state transducer;WFST;machine translation system;MT system;target output selection scheme;MT-based artificial hypothesis generation approach;Training;Adaptation models;Data models;Europe;Signal processing;Speech;Manuals;Discriminative language model;confusion model;machine translation;unsupervised training},\n  doi = {10.1109/EUSIPCO.2015.7362614},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105111.pdf},\n}\n\n
\n
\n\n\n
\n Discriminative language modeling (DLM) is used as a postprocessing step to correct automatic speech recognition (ASR) errors. Traditional DLM training requires a large number of ASR N-best lists together with their reference transcriptions. It is possible to incorporate additional text data into training via artificial hypothesis generation through confusion modeling. A weighted finite-state transducer (WFST) or a machine translation (MT) system can be used to generate the artificial hypotheses. When the reference transcriptions are not available, training can be done in an unsupervised way via a target output selection scheme. In this paper we adapt the MT-based artificial hypothesis generation approach to un-supervised discriminative language modeling, and compare it with the WFST-based setting. We achieve improvements in word error rate of up to 0.7% over the generative baseline, which is significant at p <; 0.001.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Unsupervised approach to extract summary keywords in meeting domain.\n \n \n \n \n\n\n \n Bokaetf, M. H.; Sameti, H.; and Liu, Y.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1406-1410, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"UnsupervisedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362615,\n  author = {M. H. Bokaetf and H. Sameti and Y. Liu},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Unsupervised approach to extract summary keywords in meeting domain},\n  year = {2015},\n  pages = {1406-1410},\n  abstract = {Summary keywords are words that are used in the reference extracted summary, therefore can be used to discriminate between summary sentences from non-summary ones. Finding these words is important for the extractive summarization algorithms that measure the importance of a sentence based on the importance of its constituent words. This paper is focused on extracting summary keywords in the multi-party meeting domain. We test previously proposed keyword extraction algorithms and evaluate their performance to determine summary keywords. We also propose a new approach which uses discourse information to find local important keywords and show that it outperforms all the previous methods. We evaluate our proposed approach on the standard AMI meeting corpus according to the reference extracted summary prepared in this corpus.},\n  keywords = {information retrieval;unsupervised learning;unsupervised learning approach;summary keyword extraction;extractive summarization algorithms;multiparty meeting domain;discourse information;AMI meeting corpus;Signal processing algorithms;Feature extraction;Europe;Signal processing;Market research;Speech;Standards;meeting segmentation;funtion segmentation;unsupervised algorithm;summary keyword extraction},\n  doi = {10.1109/EUSIPCO.2015.7362615},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104351.pdf},\n}\n\n
\n
\n\n\n
\n Summary keywords are words that are used in the reference extracted summary, therefore can be used to discriminate between summary sentences from non-summary ones. Finding these words is important for the extractive summarization algorithms that measure the importance of a sentence based on the importance of its constituent words. This paper is focused on extracting summary keywords in the multi-party meeting domain. We test previously proposed keyword extraction algorithms and evaluate their performance to determine summary keywords. We also propose a new approach which uses discourse information to find local important keywords and show that it outperforms all the previous methods. We evaluate our proposed approach on the standard AMI meeting corpus according to the reference extracted summary prepared in this corpus.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Feature extraction using pre-trained convolutive bottleneck nets for dysarthric speech recognition.\n \n \n \n \n\n\n \n Takashima, Y.; Nakashika, T.; Takiguchi, T.; and Ariki, Y.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1411-1415, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"FeaturePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362616,\n  author = {Y. Takashima and T. Nakashika and T. Takiguchi and Y. Ariki},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Feature extraction using pre-trained convolutive bottleneck nets for dysarthric speech recognition},\n  year = {2015},\n  pages = {1411-1415},\n  abstract = {In this paper, we investigate the recognition of speech uttered by a person with an articulation disorder resulting from athetoid cerebral palsy based on a robust feature extraction method using pre-trained convolutive bottleneck networks (CBN). Generally speaking, the amount of speech data obtained from a person with an articulation disorder is limited because their burden is large due to strain on the speech muscles. Therefore, a trained CBN tends toward overfitting for a small corpus of training data. In our previous work, the experimental results showed speech recognition using features extracted from CBNs outperformed conventional features. However, the recognition accuracy strongly depends on the initial values of the convolution kernels. To prevent overfitting in the networks, we introduce in this paper a pre-training technique using a convolutional restricted Boltzmann machine (CRBM). Through word-recognition experiments, we confirmed its superiority in comparison to convolutional networks without pre-training.},\n  keywords = {feature extraction;speech recognition;convolutional networks;convolutional restricted Boltzmann machine;speech muscles;convolutive bottleneck networks;athetoid cerebral palsy;articulation disorder;speech recognition;feature extraction;Feature extraction;Convolution;Speech;Speech recognition;Europe;Kernel;Articulation disorders;feature extraction;convolutional neural networks;bottleneck feature;convolutional restricted Boltzmann machine},\n  doi = {10.1109/EUSIPCO.2015.7362616},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570097315.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we investigate the recognition of speech uttered by a person with an articulation disorder resulting from athetoid cerebral palsy based on a robust feature extraction method using pre-trained convolutive bottleneck networks (CBN). Generally speaking, the amount of speech data obtained from a person with an articulation disorder is limited because their burden is large due to strain on the speech muscles. Therefore, a trained CBN tends toward overfitting for a small corpus of training data. In our previous work, the experimental results showed speech recognition using features extracted from CBNs outperformed conventional features. However, the recognition accuracy strongly depends on the initial values of the convolution kernels. To prevent overfitting in the networks, we introduce in this paper a pre-training technique using a convolutional restricted Boltzmann machine (CRBM). Through word-recognition experiments, we confirmed its superiority in comparison to convolutional networks without pre-training.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Evaluation of PNCC and extended spectral subtraction methods for robust speech recognition.\n \n \n \n \n\n\n \n Fux, T.; and Jouvet, D.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1416-1420, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"EvaluationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362617,\n  author = {T. Fux and D. Jouvet},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Evaluation of PNCC and extended spectral subtraction methods for robust speech recognition},\n  year = {2015},\n  pages = {1416-1420},\n  abstract = {This paper evaluates the robustness of different approaches for speech recognition with respect to signal-to-noise ratio (SNR), to signal level and to presence of non-speech data before and after utterances to be recognized. Three types of noise robust features are considered: Power Normalized Cepstral Coefficients (PNCC), Mel-Frequency Cepstral Coefficients (MFCC) after applying an extended spectral subtraction method, and Sphinx embedded denoising features from recent sphinx versions. Although removing C0 in MFCC-based features leads to a slight decrease in speech recognition performance, it makes the speech recognition system independent on the speech signal level. With multi-condition training, the three sets of noise-robust features lead to a rather similar behavior of performance with respect to SNR and presence of non-speech data. Overall, best performance is achieved with the extended spectral subtraction approach. Also, the performance of the PNCC features appears to be dependent on the initialization of the normalization factor.},\n  keywords = {cepstral analysis;speech recognition;extended spectral subtraction methods;robust speech recognition;signal-to-noise ratio;SNR;nonspeech data;noise robust features;power normalized cepstral coefficients;Mel-frequency cepstral coefficients;MFCC;Sphinx embedded denoising features;sphinx versions;multicondition training;PNCC features;Speech;Noise measurement;Mel frequency cepstral coefficient;Speech recognition;Training;Signal to noise ratio;Hidden Markov models;Speech recognition;Speech level robustness;Noise robustness;Spectral subtraction;PNCC},\n  doi = {10.1109/EUSIPCO.2015.7362617},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104069.pdf},\n}\n\n
\n
\n\n\n
\n This paper evaluates the robustness of different approaches for speech recognition with respect to signal-to-noise ratio (SNR), to signal level and to presence of non-speech data before and after utterances to be recognized. Three types of noise robust features are considered: Power Normalized Cepstral Coefficients (PNCC), Mel-Frequency Cepstral Coefficients (MFCC) after applying an extended spectral subtraction method, and Sphinx embedded denoising features from recent sphinx versions. Although removing C0 in MFCC-based features leads to a slight decrease in speech recognition performance, it makes the speech recognition system independent on the speech signal level. With multi-condition training, the three sets of noise-robust features lead to a rather similar behavior of performance with respect to SNR and presence of non-speech data. Overall, best performance is achieved with the extended spectral subtraction approach. Also, the performance of the PNCC features appears to be dependent on the initialization of the normalization factor.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A useful feature-engineering approach for a LVCSR system based on CD-DNN-HMM algorithm.\n \n \n \n \n\n\n \n Lee, S. J.; Kang, B. O.; Chung, H.; and Park, J. G.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1421-1425, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362618,\n  author = {S. J. Lee and B. O. Kang and H. Chung and J. G. Park},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A useful feature-engineering approach for a LVCSR system based on CD-DNN-HMM algorithm},\n  year = {2015},\n  pages = {1421-1425},\n  abstract = {In this paper, we propose a useful feature-engineering approach for Context-Dependent Deep-Neural-Network Hidden-Markov-Model (CD-DNN-HMM) based Large-Vocabulary-Continuous-Speech-Recognition (LVCSR) systems. The speech recognition performance of a LVCSR system is improved from two feature-engineering perspectives. The first performance improvement is achieved by adopting the intra/inter-frame feature subsets when the Gaussian-Mixture-Model (GMM) HMMs for the HMM state-level alignment are built. And the second performance gain is then followed with the additional features augmenting the front-end of the DNN. We evaluate the effectiveness of our feature-engineering approach under a series of Korean speech recognition tasks (isolated single-syllable recognition with a medium-sized speech corpus and conversational speech recognition with a large-sized database) using the Kaldi speech recognition toolkit. The results show that the proposed feature-engineering approach outperforms the traditional Mel Frequency Cepstral Coefficient (MFCCs) GMM + Mel-frequency filter-bank output DNN method.},\n  keywords = {feature extraction;hidden Markov models;neural nets;speech recognition;feature-engineering approach;context-dependent deep-neural-network hidden-Markov-model;large-vocabulary-continuous-speech-recognition systems;CD-DNN-HMM LVCSR systems;intra-inter-frame feature subsets;Gaussian-mixture-model HMM;GMM HMM;HMM state-level alignment;Korean speech recognition tasks;Kaldi speech recognition toolkit;Speech recognition;Speech;Feature extraction;Hidden Markov models;Entropy;Harmonic analysis;Acoustics;Feature extraction;feature engineering;speech recognition;deep learning;deep neural network},\n  doi = {10.1109/EUSIPCO.2015.7362618},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102385.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a useful feature-engineering approach for Context-Dependent Deep-Neural-Network Hidden-Markov-Model (CD-DNN-HMM) based Large-Vocabulary-Continuous-Speech-Recognition (LVCSR) systems. The speech recognition performance of a LVCSR system is improved from two feature-engineering perspectives. The first performance improvement is achieved by adopting the intra/inter-frame feature subsets when the Gaussian-Mixture-Model (GMM) HMMs for the HMM state-level alignment are built. And the second performance gain is then followed with the additional features augmenting the front-end of the DNN. We evaluate the effectiveness of our feature-engineering approach under a series of Korean speech recognition tasks (isolated single-syllable recognition with a medium-sized speech corpus and conversational speech recognition with a large-sized database) using the Kaldi speech recognition toolkit. The results show that the proposed feature-engineering approach outperforms the traditional Mel Frequency Cepstral Coefficient (MFCCs) GMM + Mel-frequency filter-bank output DNN method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Arabic speaker emotion classification using rhythm metrics and neural networks.\n \n \n \n \n\n\n \n Mefiah, A.; Alotaibi, Y. A.; and Selouani, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1426-1430, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ArabicPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362619,\n  author = {A. Mefiah and Y. A. Alotaibi and S. Selouani},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Arabic speaker emotion classification using rhythm metrics and neural networks},\n  year = {2015},\n  pages = {1426-1430},\n  abstract = {In this paper, rhythm metrics are calculated and used to classify five Arabic speech emotions; namely, neutral, sad, happy, surprised, and angry. Eight speakers (four male and four female) simulated the five emotions in their speech by speaking three selected sentences two times each. A human perception test was conducted using nine listeners (male and female). The results of a neural network-based automatic emotion recognition system using rhythm metrics were similar to the human perception test results, although less accurate. Anger was the most recognized speaker emotion and happiness was the least. One of our findings is that the emotions of male speakers are easier to recognize than those of female speakers. In addition, we found that the neural networks and rhythm metrics can be used for speaker emotion recognition using speech signals, but only when the dataset size is large enough.},\n  keywords = {emotion recognition;natural language processing;neural nets;signal classification;speaker recognition;Arabic speaker emotion classification;rhythm metrics;Arabic speech emotions classification;neutral;sad;happy;surprised;angry;human perception test;neural network-based automatic emotion recognition system;female speakers;speaker emotion recognition;speech signals;Speech;Rhythm;Measurement;Speech processing;Speech recognition;Emotion recognition;Feature extraction;Emotion;Arabic;corpus;classification},\n  doi = {10.1109/EUSIPCO.2015.7362619},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104855.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, rhythm metrics are calculated and used to classify five Arabic speech emotions; namely, neutral, sad, happy, surprised, and angry. Eight speakers (four male and four female) simulated the five emotions in their speech by speaking three selected sentences two times each. A human perception test was conducted using nine listeners (male and female). The results of a neural network-based automatic emotion recognition system using rhythm metrics were similar to the human perception test results, although less accurate. Anger was the most recognized speaker emotion and happiness was the least. One of our findings is that the emotions of male speakers are easier to recognize than those of female speakers. In addition, we found that the neural networks and rhythm metrics can be used for speaker emotion recognition using speech signals, but only when the dataset size is large enough.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Non-binary LDPC coded OFDM in impulsive power line channels.\n \n \n \n \n\n\n \n Al-Rubaye, G. A.; Tsimenidis, C. C.; and Johnston, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1431-1435, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Non-binaryPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362620,\n  author = {G. A. Al-Rubaye and C. C. Tsimenidis and M. Johnston},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Non-binary LDPC coded OFDM in impulsive power line channels},\n  year = {2015},\n  pages = {1431-1435},\n  abstract = {In this paper, we propose Irregular Non-Binary Low Density Parity Check (IR-NB-LDPC) codes with Signed Log Fast Fourier Transform (SL-FFT) decoding algorithm to overcome the harsh environment of power-line communication (PLC) channels. Their performance is compared with Irregular Binary LDPC (IR-B-LDPC) codes using the Sum Product algorithm (SPA). The sparse parity check matrix H of both codes are constructed using progressive edge growth (PEG) algorithm with a novel initialization of the apriori log likelihood ratios (LLR) of each decoder to mitigate the highly impulsive noise in PLC channels. Numerical performance results obtained via simulations show that the proposed system at bit error rate of 10-4 achieves a coding gain of more than 21 dB compared to uncoded system and more than 6 dB compared to IR-B-LDPC codes for the same block length in bits and rates, however, this is accomplished with higher decoding complexity.},\n  keywords = {carrier transmission on power lines;decoding;Fourier transforms;OFDM modulation;parity check codes;telecommunication channels;sparse parity check matrix;IR-NB-LDPC codes;IR-B-LDPC codes;progressive edge growth algorithm;sum product algorithm;irregular binary LDPC codes;power-line communication channels;signed log fast fourier transform decoding algorithm;nonbinary low density parity check codes;impulsive power line channels;nonbinary LDPC CODED OFDM;Parity check codes;Decoding;Signal processing algorithms;Bit error rate;Complexity theory;Yttrium;Europe;Non-binary LDPC;binary-LDPC;log likelihood ratios;power-line communication channels;OFDM},\n  doi = {10.1109/EUSIPCO.2015.7362620},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096775.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose Irregular Non-Binary Low Density Parity Check (IR-NB-LDPC) codes with Signed Log Fast Fourier Transform (SL-FFT) decoding algorithm to overcome the harsh environment of power-line communication (PLC) channels. Their performance is compared with Irregular Binary LDPC (IR-B-LDPC) codes using the Sum Product algorithm (SPA). The sparse parity check matrix H of both codes are constructed using progressive edge growth (PEG) algorithm with a novel initialization of the apriori log likelihood ratios (LLR) of each decoder to mitigate the highly impulsive noise in PLC channels. Numerical performance results obtained via simulations show that the proposed system at bit error rate of 10-4 achieves a coding gain of more than 21 dB compared to uncoded system and more than 6 dB compared to IR-B-LDPC codes for the same block length in bits and rates, however, this is accomplished with higher decoding complexity.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An algorithm for cross-layer subcarrier and power allocation in cellular networks.\n \n \n \n \n\n\n \n Torrea-Duran, R.; Tsiaflakis, P.; Vandendorpe, L.; and Moonen, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1436-1440, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362621,\n  author = {R. Torrea-Duran and P. Tsiaflakis and L. Vandendorpe and M. Moonen},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {An algorithm for cross-layer subcarrier and power allocation in cellular networks},\n  year = {2015},\n  pages = {1436-1440},\n  abstract = {Inter-cell interference is a major challenge in multi-user multi-carrier cellular networks, especially for cells with overlapping coverage. Several subcarrier and power allocation algorithms have been developed to deal with this problem. However, they focus on maximizing data rates using only physical layer information, disregarding upper layer information like the queue backlogs. Assigning subcarriers to the users based only on physical layer information like the channel conditions maximizes data rates, but may lead to network instability. To tackle this problem, we propose a cross-layer subcarrier and power allocation algorithm that uses physical layer information to reduce inter-cell interference and upper layer information to stabilize the network. Furthermore, our approach achieves a larger rate region than the baseline approach by protecting users in neighboring cells.},\n  keywords = {cellular radio;multiuser channels;optimisation;queueing theory;radiofrequency interference;network instability;channel conditions;queue backlogs;physical layer information;data rate maximization;overlapping coverage;intercell interference reduction;multiuser multicarrier cellular networks;power allocation algorithm;cross-layer subcarrier algorithm;Resource management;Interference;Base stations;Aggregates;Signal processing algorithms;Physical layer;Heuristic algorithms;Cross-layer;network stability;inter-cell interference},\n  doi = {10.1109/EUSIPCO.2015.7362621},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104331.pdf},\n}\n\n
\n
\n\n\n
\n Inter-cell interference is a major challenge in multi-user multi-carrier cellular networks, especially for cells with overlapping coverage. Several subcarrier and power allocation algorithms have been developed to deal with this problem. However, they focus on maximizing data rates using only physical layer information, disregarding upper layer information like the queue backlogs. Assigning subcarriers to the users based only on physical layer information like the channel conditions maximizes data rates, but may lead to network instability. To tackle this problem, we propose a cross-layer subcarrier and power allocation algorithm that uses physical layer information to reduce inter-cell interference and upper layer information to stabilize the network. Furthermore, our approach achieves a larger rate region than the baseline approach by protecting users in neighboring cells.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Power allocation for maximizing energy efficiency of mixed RF/VLC wireless networks.\n \n \n \n \n\n\n \n Kashef, M.; Ismail, M.; Abdallah, M.; Qaraqe, K.; and Serpedin, E.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1441-1445, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"PowerPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362622,\n  author = {M. Kashef and M. Ismail and M. Abdallah and K. Qaraqe and E. Serpedin},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Power allocation for maximizing energy efficiency of mixed RF/VLC wireless networks},\n  year = {2015},\n  pages = {1441-1445},\n  abstract = {Developing energy efficient wireless communication networks has become crucial due to the associated environmental and financial benefits. Visible light communication (VLC) has emerged as a promising candidate for achieving energy efficient wireless communication. Integrating VLC with heterogeneous wireless networks has improved the achievable data rates of mobile users. In this paper, we investigate the energy efficiency benefits of employing VLC in a heterogeneous wireless environment. We formulate the problem of power allocation for energy efficiency maximization of a heterogeneous network composed of a VLC system and a radio frequency (RF) communication system. Then, we investigate the impacts of the system parameters on the energy efficiency of the mixed RF/VLC heterogeneous network.},\n  keywords = {free-space optical communication;radio networks;telecommunication power management;radio frequency communication system;heterogeneous network;visible light communication;mixed RF/VLC wireless networks;energy efficiency;power allocation;Reliability;Wireless networks;Benchmark testing},\n  doi = {10.1109/EUSIPCO.2015.7362622},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570099329.pdf},\n}\n\n
\n
\n\n\n
\n Developing energy efficient wireless communication networks has become crucial due to the associated environmental and financial benefits. Visible light communication (VLC) has emerged as a promising candidate for achieving energy efficient wireless communication. Integrating VLC with heterogeneous wireless networks has improved the achievable data rates of mobile users. In this paper, we investigate the energy efficiency benefits of employing VLC in a heterogeneous wireless environment. We formulate the problem of power allocation for energy efficiency maximization of a heterogeneous network composed of a VLC system and a radio frequency (RF) communication system. Then, we investigate the impacts of the system parameters on the energy efficiency of the mixed RF/VLC heterogeneous network.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A kernel based technique for MSER equalisation for non-linear channels.\n \n \n \n \n\n\n \n Mitra, R.; and Bhatia, V.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1446-1450, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362623,\n  author = {R. Mitra and V. Bhatia},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A kernel based technique for MSER equalisation for non-linear channels},\n  year = {2015},\n  pages = {1446-1450},\n  abstract = {Adaptive channel equalisation is a signal processing technique to mitigate inter-symbol interference (ISI) in a time dispersive channel. To this end, the use of least mean squares (LMS) algorithm and its variants is widespread since they minimise the minimum mean squared error (MMSE) criteria by online stochastic gradient algorithms and they asymptotically tend to the optimal Weiner solution for linearly separable channels. The kernel least mean squares (KLMS) algorithm and its variants are based on the MMSE based algorithms for non-linear channels. However, as has been pointed out in the literature, the minimum bit/symbol error rate (MBER/MSER) criterion is a better choice for adapting an equaliser as compared to the traditional approaches based on MMSE criterion. In this paper, we propose a novel equaliser that is inspired from the recently proposed MSER adaptation by Gong et al. using the kernel trick for non-linear channel equalisation.},\n  keywords = {adaptive equalisers;dispersive channels;error statistics;gradient methods;interference suppression;intersymbol interference;least mean squares methods;signal processing;stochastic processes;MSER equalisation;adaptive channel equalisation;signal processing technique;intersymbol interference mitigation;ISI mitigation;time dispersive channel;least mean square algorithm;LMS algorithm;minimum mean squared error criteria;MMSE criteria;online stochastic gradient algorithm;optimal Weiner solution;kernel least mean square algorithm;KLMS algorithm;minimum bit error rate;MBER;nonlinear channel equalisation;minimum symbol error rate;Signal processing algorithms;Kernel;Signal to noise ratio;Least squares approximations;Error analysis;Convergence;Minimum symbol-error rate criterion;kernel trick;non-linear equalisation},\n  doi = {10.1109/EUSIPCO.2015.7362623},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096641.pdf},\n}\n\n
\n
\n\n\n
\n Adaptive channel equalisation is a signal processing technique to mitigate inter-symbol interference (ISI) in a time dispersive channel. To this end, the use of least mean squares (LMS) algorithm and its variants is widespread since they minimise the minimum mean squared error (MMSE) criteria by online stochastic gradient algorithms and they asymptotically tend to the optimal Weiner solution for linearly separable channels. The kernel least mean squares (KLMS) algorithm and its variants are based on the MMSE based algorithms for non-linear channels. However, as has been pointed out in the literature, the minimum bit/symbol error rate (MBER/MSER) criterion is a better choice for adapting an equaliser as compared to the traditional approaches based on MMSE criterion. In this paper, we propose a novel equaliser that is inspired from the recently proposed MSER adaptation by Gong et al. using the kernel trick for non-linear channel equalisation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Unequal error protection in rate adaptive spectrum management for digital subscriber line systems.\n \n \n \n \n\n\n \n Verdyck, J.; Tsiaflakis, P.; and Moonen, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1451-1455, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"UnequalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362624,\n  author = {J. Verdyck and P. Tsiaflakis and M. Moonen},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Unequal error protection in rate adaptive spectrum management for digital subscriber line systems},\n  year = {2015},\n  pages = {1451-1455},\n  abstract = {Crosstalk between different lines in a cable bundle is the major source of performance degradation in DSL systems. Spectrum coordination techniques have been shown to substantially alleviate the crosstalk problem. The equal level of error protection that these techniques provide can however be excessive for some applications. Many applications with diverse error protection requirements can be sharing the same connection. In this paper, two novel rate adaptive spectrum management algorithms are presented that enable a different level of error protection for different applications. The algorithms are generalizations of the globally optimal OSB and the locally optimal DSB algorithms for systems that incorporate unequal error protection. Through simulation, it is shown that unequal error protection can lead to significant performance gains.},\n  keywords = {digital subscriber lines;error correction;radio spectrum management;crosstalk;performance degradation;DSL systems;spectrum coordination techniques;rate adaptive spectrum management algorithms;globally optimal OSB algorithms;locally optimal DSB algorithms;unequal error protection;Modems;Error correction codes;DSL;Optimization;Radio spectrum management;Complexity theory;Linear programming;DSL;DSM;Unequal error protection},\n  doi = {10.1109/EUSIPCO.2015.7362624},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104415.pdf},\n}\n\n
\n
\n\n\n
\n Crosstalk between different lines in a cable bundle is the major source of performance degradation in DSL systems. Spectrum coordination techniques have been shown to substantially alleviate the crosstalk problem. The equal level of error protection that these techniques provide can however be excessive for some applications. Many applications with diverse error protection requirements can be sharing the same connection. In this paper, two novel rate adaptive spectrum management algorithms are presented that enable a different level of error protection for different applications. The algorithms are generalizations of the globally optimal OSB and the locally optimal DSB algorithms for systems that incorporate unequal error protection. Through simulation, it is shown that unequal error protection can lead to significant performance gains.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Relay subset selection in cognitive networks with imperfect CSI and individual power constraints.\n \n \n \n \n\n\n \n Blanco, L.; and Nájar, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1456-1460, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"RelayPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362625,\n  author = {L. Blanco and M. Nájar},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Relay subset selection in cognitive networks with imperfect CSI and individual power constraints},\n  year = {2015},\n  pages = {1456-1460},\n  abstract = {This paper considers the relay subset selection problem in an underlay cognitive network in which two secondary users communicate assisted by a set of N potential relays. More specifically, this paper deals with the joint problem of choosing the best subset of L secondary relays and their corresponding weights which maximize the Signal-to-Interference-plus-Noise ratio (SINR) at the secondary user receiver, subject to per-relay power constraints and interference power constraints at the primary user. This problem is a combinatorial problem with a high computational burden. Nevertheless, we propose a sub-optimal technique, based on a convex relaxation of the problem, which achieves a near-optimal performance with a reduced complexity. Contrary to other approaches in the literature, the secondary relays are not limited to cooperate at full power.},\n  keywords = {cognitive radio;combinatorial mathematics;communication complexity;convex programming;radiofrequency interference;relaxation theory;relay networks (telecommunication);imperfect CSI;individual power constraint;relay subset selection problem;underlay cognitive network;secondary relay;signal-to-interference-plus-noise ratio;SINR;secondary user receiver;interference power constraint;per-relay power constraint;combinatorial problem;suboptimal technique;convex relaxation;Relays;Interference;Signal to noise ratio;Quality of service;Europe;Receivers;Multiple relay selection},\n  doi = {10.1109/EUSIPCO.2015.7362625},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104691.pdf},\n}\n\n
\n
\n\n\n
\n This paper considers the relay subset selection problem in an underlay cognitive network in which two secondary users communicate assisted by a set of N potential relays. More specifically, this paper deals with the joint problem of choosing the best subset of L secondary relays and their corresponding weights which maximize the Signal-to-Interference-plus-Noise ratio (SINR) at the secondary user receiver, subject to per-relay power constraints and interference power constraints at the primary user. This problem is a combinatorial problem with a high computational burden. Nevertheless, we propose a sub-optimal technique, based on a convex relaxation of the problem, which achieves a near-optimal performance with a reduced complexity. Contrary to other approaches in the literature, the secondary relays are not limited to cooperate at full power.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Soft-feedback OMP for the recovery of discrete-valued sparse signals.\n \n \n \n \n\n\n \n Sparrer, S.; and Fischer, R. F. H.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1461-1465, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Soft-feedbackPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362626,\n  author = {S. Sparrer and R. F. H. Fischer},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Soft-feedback OMP for the recovery of discrete-valued sparse signals},\n  year = {2015},\n  pages = {1461-1465},\n  abstract = {In Compressed Sensing, a real-valued sparse vector has to be reconstructed from an underdetermined system of linear equations. However, in many applications of digital communications the elements of the unknown sparse vector are drawn from a finite set. The standard reconstruction algorithms of Compressed Sensing do not take this knowledge into account, hence, enhanced algorithms are required to achieve optimum performance. In this paper, we propose a new approach for the reconstruction of discrete-valued sparse signals. On the one hand, the algorithm is tailored to the discrete nature of the signal. On the other hand, reliability information is utilized within the successive reconstruction procedure. Via numerical simulations it is shown that the proposed variant of the Orthogonal Matching Pursuit clearly outperforms the well-known standard versions.},\n  keywords = {compressed sensing;iterative methods;reliability;signal reconstruction;time-frequency analysis;discrete-valued sparse signal recovery;soft-feedback OMP;compressed sensing;real-valued sparse vector;linear equations;digital communication applications;standard reconstruction algorithms;discrete-valued sparse signal reconstruction;information reliability;numerical simulation;orthogonal matching pursuit;Signal processing algorithms;Interference;Correlation;Compressed sensing;Decoding;Estimation;Reliability},\n  doi = {10.1109/EUSIPCO.2015.7362626},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570085681.pdf},\n}\n\n
\n
\n\n\n
\n In Compressed Sensing, a real-valued sparse vector has to be reconstructed from an underdetermined system of linear equations. However, in many applications of digital communications the elements of the unknown sparse vector are drawn from a finite set. The standard reconstruction algorithms of Compressed Sensing do not take this knowledge into account, hence, enhanced algorithms are required to achieve optimum performance. In this paper, we propose a new approach for the reconstruction of discrete-valued sparse signals. On the one hand, the algorithm is tailored to the discrete nature of the signal. On the other hand, reliability information is utilized within the successive reconstruction procedure. Via numerical simulations it is shown that the proposed variant of the Orthogonal Matching Pursuit clearly outperforms the well-known standard versions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Throughput and energy optimization of a cooperative ARQ scheme using analog network coding.\n \n \n \n \n\n\n \n Ayedi, M.; Sellami, N.; and Siala, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1466-1470, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ThroughputPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362627,\n  author = {M. Ayedi and N. Sellami and M. Siala},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Throughput and energy optimization of a cooperative ARQ scheme using analog network coding},\n  year = {2015},\n  pages = {1466-1470},\n  abstract = {In this paper, we propose to improve a Cooperative Automatic Repeat reQuest (C-ARQ) scheme through the optimization of the distribution of the average relay energy cost in a two sources one destination wireless system. We consider a BPSK modulated transmission over Rayleigh fading channels. We propose to use specific relay and destination treatment for different possible cases: both received packets are error free, only one received packet is detected erroneous or both received packets are detected erroneous at the destination. We derive semi-analytical bounds of the throughput and the average relay energy. We find the optimal relay energy distribution which maximizes the throughput by using dichotomy search. Numerical results confirm the accuracy of the derived closed form expressions and show the throughput improvement due to the relay energy optimization compared to the conventional cooperative ARQ scheme.},\n  keywords = {automatic repeat request;channel coding;cooperative communication;network coding;phase shift keying;Rayleigh channels;relay networks (telecommunication);search problems;telecommunication power management;cooperative ARQ scheme;energy optimization;analog network coding;throughput optimization;cooperative automatic repeat request scheme;average relay energy cost distribution optimization;wireless system;BPSK modulated transmission;Rayleigh fading channel;destination treatment;relay treatment;received packet detection;throughput semianalytical bound;dichotomy search;Europe;Signal processing;Erbium;Yttrium;Silicon;Cooperative ARQ communication;Analog Network Coding;throughput and energy optimization},\n  doi = {10.1109/EUSIPCO.2015.7362627},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103719.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose to improve a Cooperative Automatic Repeat reQuest (C-ARQ) scheme through the optimization of the distribution of the average relay energy cost in a two sources one destination wireless system. We consider a BPSK modulated transmission over Rayleigh fading channels. We propose to use specific relay and destination treatment for different possible cases: both received packets are error free, only one received packet is detected erroneous or both received packets are detected erroneous at the destination. We derive semi-analytical bounds of the throughput and the average relay energy. We find the optimal relay energy distribution which maximizes the throughput by using dichotomy search. Numerical results confirm the accuracy of the derived closed form expressions and show the throughput improvement due to the relay energy optimization compared to the conventional cooperative ARQ scheme.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Online estimation of a time-varying delay based on a univariate cross-ambiguity function analysis.\n \n \n \n \n\n\n \n Stumper, J.; and Rosich, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1471-1475, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"OnlinePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362628,\n  author = {J. Stumper and A. Rosich},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Online estimation of a time-varying delay based on a univariate cross-ambiguity function analysis},\n  year = {2015},\n  pages = {1471-1475},\n  abstract = {This paper presents a method to estimate a linearly time-varying delay between two continuous signals. The joint estimation of the time delay and Doppler shift by analyzing the cross-ambiguity function is state of the art, however, this method has high computational demands as it relies on a bi-variate search. It is shown that, by using previous estimation results to initialize the analysis, a similar result can already be obtained with a univariate search, as is then sufficient to search for the variation of the delay since the last measurement. Perfect tracking is obtained with proper initialization, and for the case of incorrect initialization, convergence can be guaranteed and even influenced with a tuning parameter. A theoretical analysis and a numerical example illustrate the performance of the proposed method.},\n  keywords = {convergence;Doppler shift;parameter estimation;signal processing;linear time-varying delay online estimation;univariate cross-ambiguity function analysis;Doppler shift estimation;bivariate search;convergence;tuning parameter;continuous signals;Doppler shift;Convergence;Signal processing;Estimation;Europe;Delay estimation;time-varying delay estimation;Doppler shift;wideband cross-ambiguity function},\n  doi = {10.1109/EUSIPCO.2015.7362628},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104717.pdf},\n}\n\n
\n
\n\n\n
\n This paper presents a method to estimate a linearly time-varying delay between two continuous signals. The joint estimation of the time delay and Doppler shift by analyzing the cross-ambiguity function is state of the art, however, this method has high computational demands as it relies on a bi-variate search. It is shown that, by using previous estimation results to initialize the analysis, a similar result can already be obtained with a univariate search, as is then sufficient to search for the variation of the delay since the last measurement. Perfect tracking is obtained with proper initialization, and for the case of incorrect initialization, convergence can be guaranteed and even influenced with a tuning parameter. A theoretical analysis and a numerical example illustrate the performance of the proposed method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Parameter estimation of harmonic linear chirps.\n \n \n \n \n\n\n \n Doweck, Y.; Amar, A.; and Cohen, I.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1476-1480, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ParameterPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362629,\n  author = {Y. Doweck and A. Amar and I. Cohen},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Parameter estimation of harmonic linear chirps},\n  year = {2015},\n  pages = {1476-1480},\n  abstract = {We address the problem of estimating the initial frequency and frequency rate of a linear chirp with harmonic components given time samples of the observed signal. As an alternative to the maximum likelihood estimator, which requires an exhaustive search in the initial frequency-frequency rate space, we present a two-step estimation method. First, the signal is separated into its harmonic components. Then, the two parameters of the fundamental component are jointly estimated using a least squares approach given the estimated time-varying phase of each separated component. This method is compared to the maximum likelihood and to a modified high-order ambiguity function based method. Simulations results and a real data example demonstrate the performance of the proposed method. In particular, it is shown that the estimates achieve the Cramer-Rao lower bound at high signal-to-noise ratio and that the two-step method outperforms the high-order ambiguity function based method.},\n  keywords = {harmonic analysis;least squares approximations;parameter estimation;signal processing;high signal-to-noise ratio;Cramer-Rao lower bound;modified high-order ambiguity function based method;time-varying phase estimation;least squares approach;initial frequency-frequency rate space;maximum likelihood estimator;harmonic linear chirps;parameter estimation;Harmonic analysis;Maximum likelihood estimation;Chirp;Frequency estimation;Europe;Maximum likelihood estimation;harmonic chirps;Cramer-Rao lower bound},\n  doi = {10.1109/EUSIPCO.2015.7362629},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570087825.pdf},\n}\n\n
\n
\n\n\n
\n We address the problem of estimating the initial frequency and frequency rate of a linear chirp with harmonic components given time samples of the observed signal. As an alternative to the maximum likelihood estimator, which requires an exhaustive search in the initial frequency-frequency rate space, we present a two-step estimation method. First, the signal is separated into its harmonic components. Then, the two parameters of the fundamental component are jointly estimated using a least squares approach given the estimated time-varying phase of each separated component. This method is compared to the maximum likelihood and to a modified high-order ambiguity function based method. Simulations results and a real data example demonstrate the performance of the proposed method. In particular, it is shown that the estimates achieve the Cramer-Rao lower bound at high signal-to-noise ratio and that the two-step method outperforms the high-order ambiguity function based method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Asymptotic normality of cyclic autocorrelation estimate with estimated cycle frequency.\n \n \n \n \n\n\n \n Napolitano, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1481-1485, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AsymptoticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362630,\n  author = {A. Napolitano},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Asymptotic normality of cyclic autocorrelation estimate with estimated cycle frequency},\n  year = {2015},\n  pages = {1481-1485},\n  abstract = {For an almost-cyclostationary signal, mean-square consistent and asymptotically complex normal estimators of the cyclic statistics exist, provided that the signal has finite or practically finite memory and the cycle frequency is perfectly known. In the paper, conditions are derived to obtain a mean-square consistent and asymptotically complex normal estimator of the cyclic autocorrelation function with estimated cycle frequency. For this purpose, a new lemma on conditioned cumulants of complex-valued random variables is derived. As an example of application, the problem of detecting a rapidly moving source emitting a cyclostationary signal is addressed and the case of a low Earth orbit satellite considered.},\n  keywords = {correlation methods;estimation theory;asymptotic normality;cyclic autocorrelation estimation;estimated cycle frequency;almost cyclostationary signal;mean square consistent estimator;asymptotically complex normal estimator;cyclic statistics;finite memory signal;conditioned cumulants;complex valued random variable;low earth orbit satellite;Frequency estimation;Correlation;Random variables;Frequency modulation;Yttrium;Europe;Cyclostationarity;Asymptotic Normality;Doppler effect},\n  doi = {10.1109/EUSIPCO.2015.7362630},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096055.pdf},\n}\n\n
\n
\n\n\n
\n For an almost-cyclostationary signal, mean-square consistent and asymptotically complex normal estimators of the cyclic statistics exist, provided that the signal has finite or practically finite memory and the cycle frequency is perfectly known. In the paper, conditions are derived to obtain a mean-square consistent and asymptotically complex normal estimator of the cyclic autocorrelation function with estimated cycle frequency. For this purpose, a new lemma on conditioned cumulants of complex-valued random variables is derived. As an example of application, the problem of detecting a rapidly moving source emitting a cyclostationary signal is addressed and the case of a low Earth orbit satellite considered.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Time-frequency ridge analysis based on the reassignment vector.\n \n \n \n \n\n\n \n Meignen, S.; Gardner, T.; and Oberlin, T.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1486-1490, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Time-frequencyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362631,\n  author = {S. Meignen and T. Gardner and T. Oberlin},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Time-frequency ridge analysis based on the reassignment vector},\n  year = {2015},\n  pages = {1486-1490},\n  abstract = {This paper considers the problem of detecting and estimating AM/FM components in the time-frequency plane. It intro duces a new algorithm to estimate the ridges corresponding to the instantaneous frequencies of the components, and to seg ment the time-frequency plane into different {"}basins of attraction{"}, each basin corresponding to one mode. The technique is based on the structure of the reassignment vector, which is commonly used for sharpening time-frequency representations. Compared with previous approaches, this new method does not need extra parameters, exhibits less sensitivity to the choice of the window and shows better reconstruction per formance. Its effectiveness is demonstrated on simulated and real datasets.},\n  keywords = {signal detection;time-frequency analysis;AM components;FM components;time-frequency representations;reassignment vector;time-frequency ridge analysis;Time-frequency analysis;Spectrogram;Chirp;Europe;Fourier transforms;Signal to noise ratio;multicomponent signals;short-time Fourier transform;reassignment;time-frequency;AM/FM;ridges},\n  doi = {10.1109/EUSIPCO.2015.7362631},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570097575.pdf},\n}\n\n
\n
\n\n\n
\n This paper considers the problem of detecting and estimating AM/FM components in the time-frequency plane. It intro duces a new algorithm to estimate the ridges corresponding to the instantaneous frequencies of the components, and to seg ment the time-frequency plane into different \"basins of attraction\", each basin corresponding to one mode. The technique is based on the structure of the reassignment vector, which is commonly used for sharpening time-frequency representations. Compared with previous approaches, this new method does not need extra parameters, exhibits less sensitivity to the choice of the window and shows better reconstruction per formance. Its effectiveness is demonstrated on simulated and real datasets.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Kernel estimation for time-frequency distributions using epigraph set of L1-NORM.\n \n \n \n \n\n\n \n Deprem, Z.; and Çetin, A. E.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1491-1495, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"KernelPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362632,\n  author = {Z. Deprem and A. E. Çetin},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Kernel estimation for time-frequency distributions using epigraph set of L1-NORM},\n  year = {2015},\n  pages = {1491-1495},\n  abstract = {In this article, a new kernel estimation method is introduced using the epigraph set of the l1-norm. The new method produces a high-resolution and cross-term free estimates for Cohen's Class of Time-frequency (TF) distributions. The kernel estimation process starts with an initial rough TF distribution. This initial estimate is orthogonally projected onto the epigraph set of the l1 norm in TF domain. Epigraph set of the l1 norm produces a sparse time-frequency distribution. Sparsity in TF domain leads to cross-term free TF distributions. Experimental results are presented and the TF distributions obtained with the estimated kernel are compared to those obtained with an optimized kernel.},\n  keywords = {estimation theory;signal processing;time-frequency analysis;time-frequency distribution;L1-NORM;kernel estimation method;epigraph set;Cohens class;Kernel;Signal resolution;Estimation;Time-frequency analysis;Optimization;Europe;Time-frequency distributions;Cohen's Class;Ll-norm;sparsity},\n  doi = {10.1109/EUSIPCO.2015.7362632},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104711.pdf},\n}\n\n
\n
\n\n\n
\n In this article, a new kernel estimation method is introduced using the epigraph set of the l1-norm. The new method produces a high-resolution and cross-term free estimates for Cohen's Class of Time-frequency (TF) distributions. The kernel estimation process starts with an initial rough TF distribution. This initial estimate is orthogonally projected onto the epigraph set of the l1 norm in TF domain. Epigraph set of the l1 norm produces a sparse time-frequency distribution. Sparsity in TF domain leads to cross-term free TF distributions. Experimental results are presented and the TF distributions obtained with the estimated kernel are compared to those obtained with an optimized kernel.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Toward an uncertainty principle for weighted graphs.\n \n \n \n \n\n\n \n Pasdeloup, B.; Alami, R.; Gripon, V.; and Rabbat, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1496-1500, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"TowardPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362633,\n  author = {B. Pasdeloup and R. Alami and V. Gripon and M. Rabbat},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Toward an uncertainty principle for weighted graphs},\n  year = {2015},\n  pages = {1496-1500},\n  abstract = {The uncertainty principle states that a signal cannot be localized both in time and frequency. With the aim of extending this result to signals on graphs, Agaskar & Lu introduce notions of graph and spectral spreads. They show that a graph uncertainty principle holds for some families of unweighted graphs. This principle states that a signal cannot be simultaneously localized both in graph and spectral domains. In this paper, we aim to extend their work to weighted graphs. We show that a naive extension of their definitions leads to inconsistent results such as discontinuity of the graph spread when regarded as a function of the graph structure. To circumvent this problem, we propose another definition of graph spread that relies on an inverse similarity matrix. We also discuss the choice of the distance function that appears in this definition. Finally, we compute and plot uncertainty curves for families of weighted graphs.},\n  keywords = {graph theory;signal processing;uncertainty principle;weighted graphs;spectral domains;graph domains;graph structure;inverse similarity matrix;distance function;signal processing;Uncertainty;Europe;Symmetric matrices;Eigenvalues and eigenfunctions;Spectral analysis;Context;Signal processing on graphs;uncertainty principle;weighted graphs},\n  doi = {10.1109/EUSIPCO.2015.7362633},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570095793.pdf},\n}\n\n
\n
\n\n\n
\n The uncertainty principle states that a signal cannot be localized both in time and frequency. With the aim of extending this result to signals on graphs, Agaskar & Lu introduce notions of graph and spectral spreads. They show that a graph uncertainty principle holds for some families of unweighted graphs. This principle states that a signal cannot be simultaneously localized both in graph and spectral domains. In this paper, we aim to extend their work to weighted graphs. We show that a naive extension of their definitions leads to inconsistent results such as discontinuity of the graph spread when regarded as a function of the graph structure. To circumvent this problem, we propose another definition of graph spread that relies on an inverse similarity matrix. We also discuss the choice of the distance function that appears in this definition. Finally, we compute and plot uncertainty curves for families of weighted graphs.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Enhanced lasso recovery on graph.\n \n \n \n \n\n\n \n Bresson, X.; Laurent, T.; and von Brecht , J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1501-1505, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"EnhancedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362634,\n  author = {X. Bresson and T. Laurent and J. {von Brecht}},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Enhanced lasso recovery on graph},\n  year = {2015},\n  pages = {1501-1505},\n  abstract = {This work aims at recovering signals that are sparse on graphs. Compressed sensing offers techniques for signal recovery from a few linear measurements and graph Fourier analysis provides a signal representation on graph. In this paper, we leverage these two frameworks to introduce a new Lasso recovery algorithm on graphs. More precisely, we present a non-convex, non-smooth algorithm that outperforms the standard convex Lasso technique. We carry out numerical experiments on three benchmark graph datasets.},\n  keywords = {Fourier analysis;graph theory;signal representation;signal recovery;compressed sensing;signal representation;graph Fourier analysis;Lasso recovery algorithm;nonconvex nonsmooth algorithm;Signal processing algorithms;Standards;Signal processing;Europe;Algorithm design and analysis;Laplace equations;Convex functions;Graph spectral analysis;Fourier basis;Lasso;ℓ1 relaxation;sparse recovery;non-convex optimization},\n  doi = {10.1109/EUSIPCO.2015.7362634},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096793.pdf},\n}\n\n
\n
\n\n\n
\n This work aims at recovering signals that are sparse on graphs. Compressed sensing offers techniques for signal recovery from a few linear measurements and graph Fourier analysis provides a signal representation on graph. In this paper, we leverage these two frameworks to introduce a new Lasso recovery algorithm on graphs. More precisely, we present a non-convex, non-smooth algorithm that outperforms the standard convex Lasso technique. We carry out numerical experiments on three benchmark graph datasets.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On the degrees of freedom of signals on graphs.\n \n \n \n \n\n\n \n Tsitsvero, M.; and Barbarossa, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1506-1510, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362635,\n  author = {M. Tsitsvero and S. Barbarossa},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {On the degrees of freedom of signals on graphs},\n  year = {2015},\n  pages = {1506-1510},\n  abstract = {Continuous-time signals are well known for not being perfectly localized in both time and frequency domains. Conversely, a signal defined over the vertices of a graph can be perfectly localized in both vertex and frequency domains. We derive the conditions ensuring the validity of this property and then, building on this theory, we provide the conditions for perfect reconstruction of a graph signal from its samples. Next, we provide a finite step algorithm for the reconstruction of a band-limited signal from its samples and then we show the effect of sampling a non perfectly band-limited signal and show how to select the bandwidth that minimizes the mean square reconstruction error.},\n  keywords = {continuous time systems;graph theory;mean square error methods;signal reconstruction;signal sampling;degree-of-freedom;continuous-time signals;graph vertices;frequency domain;graph signal reconstruction;finite step algorithm;band-limited signal reconstruction;nonperfectly band-limited signal;mean square reconstruction error minimization;Frequency-domain analysis;Laplace equations;Signal processing;Eigenvalues and eigenfunctions;Europe;Fourier transforms;Graph Fourier Transform;sampling on graph;graph signal recovery},\n  doi = {10.1109/EUSIPCO.2015.7362635},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570097383.pdf},\n}\n\n
\n
\n\n\n
\n Continuous-time signals are well known for not being perfectly localized in both time and frequency domains. Conversely, a signal defined over the vertices of a graph can be perfectly localized in both vertex and frequency domains. We derive the conditions ensuring the validity of this property and then, building on this theory, we provide the conditions for perfect reconstruction of a graph signal from its samples. Next, we provide a finite step algorithm for the reconstruction of a band-limited signal from its samples and then we show the effect of sampling a non perfectly band-limited signal and show how to select the bandwidth that minimizes the mean square reconstruction error.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Oversampled bipartite graphs with controlled redundancy.\n \n \n \n \n\n\n \n Sakiyama, A.; and Tanaka, Y.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1511-1515, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"OversampledPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362636,\n  author = {A. Sakiyama and Y. Tanaka},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Oversampled bipartite graphs with controlled redundancy},\n  year = {2015},\n  pages = {1511-1515},\n  abstract = {This paper extends our previous work on graph oversampling for graph signal processing. In the graph oversampling method, nodes are duplicated and edges are appended to construct oversampled graph Laplacian matrix. It can convert an arbitrary K-colorable graph into one bipartite graph which includes all edges of the original graph. Since it uses a coloring-based algorithm, performance of graph signal processing depends on the coloring results. In this paper, we present graph oversampling based on a few different graph bipartition methods which use maximum spanning tree and eigendecomposition. Furthermore, we consider the effective selection method of duplicated nodes. The performance of the oversampled graphs is compared through an experiment on graph signal denoising.},\n  keywords = {graph theory;signal denoising;trees (mathematics);oversampled bipartite graphs;controlled redundancy;graph signal processing;Laplacian matrix;K-colorable graph;bipartite graph;coloring-based algorithm;graph bipartition methods;spanning tree;eigendecomposition;effective selection method;graph signal denoising;Bipartite graph;Signal processing;Transforms;Redundancy;Europe;Laplace equations;Signal processing algorithms;Graph signal processing;graph bipartition;graph oversampling;graph wavelets;graph filter banks},\n  doi = {10.1109/EUSIPCO.2015.7362636},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102395.pdf},\n}\n\n
\n
\n\n\n
\n This paper extends our previous work on graph oversampling for graph signal processing. In the graph oversampling method, nodes are duplicated and edges are appended to construct oversampled graph Laplacian matrix. It can convert an arbitrary K-colorable graph into one bipartite graph which includes all edges of the original graph. Since it uses a coloring-based algorithm, performance of graph signal processing depends on the coloring results. In this paper, we present graph oversampling based on a few different graph bipartition methods which use maximum spanning tree and eigendecomposition. Furthermore, we consider the effective selection method of duplicated nodes. The performance of the oversampled graphs is compared through an experiment on graph signal denoising.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Stationary graph signals using an isometric graph translation.\n \n \n \n \n\n\n \n Girault, B.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1516-1520, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"StationaryPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362637,\n  author = {B. Girault},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Stationary graph signals using an isometric graph translation},\n  year = {2015},\n  pages = {1516-1520},\n  abstract = {We extend the concept of stationary temporal signals to stationary graph signals. Doing so, we introduce the concept of strict sense stationary and wide sense stationary graph signals as a statistical invariance through an isometric graph translation operator. Using these definitions, we propose a spectral characterisation of WSS graph signals allowing to study stationarity using only the spectral components of a graph signal. Finally, we apply this characterisation on a synthetic graph in order to study a few important stochastic graph signals. Also, using geographic data, we study weather readings on a graph of weather stations and show evidence of stationarity in the temperature readings.},\n  keywords = {graph theory;signal processing;stationary temporal signals;strict sense stationary;wide sense stationary graph signals;statistical invariance;isometric graph translation operator;spectral characterisation;WSS graph signals;stationarity;spectral components;synthetic graph;stochastic graph signals;geographic data;weather readings;weather stations;temperature readings;Signal processing;Fourier transforms;Correlation;Symmetric matrices;Eigenvalues and eigenfunctions;Europe;Meteorology;Signal processing on graphs;stationary signals},\n  doi = {10.1109/EUSIPCO.2015.7362637},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105097.pdf},\n}\n\n
\n
\n\n\n
\n We extend the concept of stationary temporal signals to stationary graph signals. Doing so, we introduce the concept of strict sense stationary and wide sense stationary graph signals as a statistical invariance through an isometric graph translation operator. Using these definitions, we propose a spectral characterisation of WSS graph signals allowing to study stationarity using only the spectral components of a graph signal. Finally, we apply this characterisation on a synthetic graph in order to study a few important stochastic graph signals. Also, using geographic data, we study weather readings on a graph of weather stations and show evidence of stationarity in the temperature readings.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Video saliency based on rarity prediction: Hyperaptor.\n \n \n \n \n\n\n \n Cassagne, I.; Riche, N.; Decombas, M.; Mancas, M.; Gosselin, B.; Dutoit, T.; and Laganiere, R.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1521-1525, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"VideoPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362638,\n  author = {I. Cassagne and N. Riche and M. Decombas and M. Mancas and B. Gosselin and T. Dutoit and R. Laganiere},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Video saliency based on rarity prediction: Hyperaptor},\n  year = {2015},\n  pages = {1521-1525},\n  abstract = {Saliency models are able to provide heatmaps highlighting areas in images which attract human gaze. Most of them are designed for still images but an increasing trend goes towards an extension to videos by adding dynamic features to the models. Nevertheless, only few are specifically designed to manage the temporal aspect. We propose a new model which quantifies the rarity natively in a spatiotemporal way. Based on a sliding temporal window, static and dynamic features are summarized by a time evolving {"}surface{"} of different features statistics, that we call the {"}hyperhistogram{"}. The rarity-maps obtained for each feature are combined with the result of a superpixel algorithm to have a more object-based orientation. The proposed model, Hyperaptor stands for hyperhistogram-based rarity prediction. The model is evaluated on a dataset of 12 videos with 2 different references along 3 different metrics. It is shown to achieve better performance compared to state-of-the-art models.},\n  keywords = {feature extraction;video signal processing;superpixel algorithm;hyperhistogram;sliding temporal window;hyperaptor;rarity prediction;video saliency;Decision support systems;Handheld computers;Europe;Signal processing;Feature extraction;Heating;Indexes;Visual attention;Saliency;Rarity Mechanism;Optical Flow;Hyperhistogram},\n  doi = {10.1109/EUSIPCO.2015.7362638},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096885.pdf},\n}\n\n
\n
\n\n\n
\n Saliency models are able to provide heatmaps highlighting areas in images which attract human gaze. Most of them are designed for still images but an increasing trend goes towards an extension to videos by adding dynamic features to the models. Nevertheless, only few are specifically designed to manage the temporal aspect. We propose a new model which quantifies the rarity natively in a spatiotemporal way. Based on a sliding temporal window, static and dynamic features are summarized by a time evolving \"surface\" of different features statistics, that we call the \"hyperhistogram\". The rarity-maps obtained for each feature are combined with the result of a superpixel algorithm to have a more object-based orientation. The proposed model, Hyperaptor stands for hyperhistogram-based rarity prediction. The model is evaluated on a dataset of 12 videos with 2 different references along 3 different metrics. It is shown to achieve better performance compared to state-of-the-art models.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A CBIR-based evaluation framework for visual attention models.\n \n \n \n \n\n\n \n Awad, D.; Mancas, M.; Riche, N.; Courboulay, V.; and Revel, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1526-1530, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362639,\n  author = {D. Awad and M. Mancas and N. Riche and V. Courboulay and A. Revel},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A CBIR-based evaluation framework for visual attention models},\n  year = {2015},\n  pages = {1526-1530},\n  abstract = {The computational models of visual attention, originally proposed as cognitive models of human attention, nowadays are being used as front-ends to numerous vision systems like automatic object recognition. These systems are generally evaluated against eye tracking data or manually segmented salient objects in images. We previously showed that this comparison can lead to different rankings depending on which of the two ground truths is used. These findings suggest that the saliency models ranking might be different for each application and the use of eye-tracking rankings to choose a model for a given application is not optimal. Therefore, in this paper, we propose a new saliency evaluation framework optimized for object recognition. This paper aims to answer the question: 1) Is the application-driven saliency models rankings consistent with classical ground truth like eye-tracking? 2) If not, which saliency models one should use for the precise CBIR applications?.},\n  keywords = {content-based retrieval;image retrieval;image segmentation;object recognition;CBIR-based evaluation framework;visual attention computational model;human attention cognitive model;automatic object recognition;eye tracking data;salient object segmentation;eye-tracking ranking;saliency evaluation framework;application-driven saliency model ranking;classical ground truth;content-based image retrieval;Visualization;Computational modeling;Feature extraction;Object recognition;Signal processing algorithms;Image color analysis;Image retrieval;saliency models;object recognition;CBIR;eye tracking;attention;saliency evaluation},\n  doi = {10.1109/EUSIPCO.2015.7362639},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570100569.pdf},\n}\n\n
\n
\n\n\n
\n The computational models of visual attention, originally proposed as cognitive models of human attention, nowadays are being used as front-ends to numerous vision systems like automatic object recognition. These systems are generally evaluated against eye tracking data or manually segmented salient objects in images. We previously showed that this comparison can lead to different rankings depending on which of the two ground truths is used. These findings suggest that the saliency models ranking might be different for each application and the use of eye-tracking rankings to choose a model for a given application is not optimal. Therefore, in this paper, we propose a new saliency evaluation framework optimized for object recognition. This paper aims to answer the question: 1) Is the application-driven saliency models rankings consistent with classical ground truth like eye-tracking? 2) If not, which saliency models one should use for the precise CBIR applications?.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An efficient audiovisual saliency model to predict eye positions when looking at conversations.\n \n \n \n \n\n\n \n Coutrot, A.; and Guyader, N.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1531-1535, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362640,\n  author = {A. Coutrot and N. Guyader},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {An efficient audiovisual saliency model to predict eye positions when looking at conversations},\n  year = {2015},\n  pages = {1531-1535},\n  abstract = {Classic models of visual attention dramatically fail at predicting eye positions on visual scenes involving faces. While some recent models combine faces with low-level features, none of them consider sound as an input. Yet it is crucial in conversation or meeting scenes. In this paper, we describe and refine an audiovisual saliency model for conversation scenes. This model includes a speaker diarization algorithm which automatically modulates the saliency of conversation partners' faces and bodies according to their speaking-or-not status. To merge our different features into a master saliency map, we use an efficient statistical method (Lasso) allowing a straightforward interpretation of feature relevance. To train and evaluate our model, we run an eye tracking experiment on a publicly available meeting videobase. We show that increasing the saliency of speakers' faces (but not bodies) greatly improves the predictions of our model, compared to previous ones giving an equal and constant weight to each conversation partner.},\n  keywords = {audio-visual systems;face recognition;gaze tracking;speaker recognition;statistical analysis;audiovisual saliency model;eye position prediction;low-level feature;speaker diarization algorithm;statistical method;feature relevance straightforward interpretation;eye tracking experiment;Visualization;Signal processing algorithms;Heuristic algorithms;Statistical analysis;Europe;Signal processing;Speech;saliency model;audiovisual;face;eye movements;conversations},\n  doi = {10.1109/EUSIPCO.2015.7362640},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102201.pdf},\n}\n\n
\n
\n\n\n
\n Classic models of visual attention dramatically fail at predicting eye positions on visual scenes involving faces. While some recent models combine faces with low-level features, none of them consider sound as an input. Yet it is crucial in conversation or meeting scenes. In this paper, we describe and refine an audiovisual saliency model for conversation scenes. This model includes a speaker diarization algorithm which automatically modulates the saliency of conversation partners' faces and bodies according to their speaking-or-not status. To merge our different features into a master saliency map, we use an efficient statistical method (Lasso) allowing a straightforward interpretation of feature relevance. To train and evaluate our model, we run an eye tracking experiment on a publicly available meeting videobase. We show that increasing the saliency of speakers' faces (but not bodies) greatly improves the predictions of our model, compared to previous ones giving an equal and constant weight to each conversation partner.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n How task difficulty influences eye movements when exploring natural scene images.\n \n \n \n \n\n\n \n Devillez, H.; Guérin-Dugué, A.; and Guyader, N.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1536-1540, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"HowPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362641,\n  author = {H. Devillez and A. Guérin-Dugué and N. Guyader},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {How task difficulty influences eye movements when exploring natural scene images},\n  year = {2015},\n  pages = {1536-1540},\n  abstract = {Since Yarbus [1], it is well known that task greatly influences the visual exploration of a scene. In this paper, we quantify the influence of three different cognitive tasks on the visual exploration of various natural scene images. Eye movements made to solve the three tasks were compared to the ones recorded during a free-exploration condition. Eye movement parameters and eye fixation positions were compared during the time course of the exploration according to fixation/saccade rank. Tasks were chosen because they implied different visual processing and might be classified according to their difficulty. We found that task difficulty impacts eye-movement characteristics at the very beginning of exploration. The variability between eye fixations of observers is also impacted. Finally, eye-movement characteristics might reveal the tasks solving.},\n  keywords = {gaze tracking;image classification;natural scenes;eye movement;natural scene image exploration;scene visual exploration;cognitive tasks;free-exploration condition;eye fixation position;visual processing;Visualization;Iron;Observers;Brain models;Europe;Signal processing;Eye movement;Visual Attention;Tasks;Saliency},\n  doi = {10.1109/EUSIPCO.2015.7362641},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105031.pdf},\n}\n\n
\n
\n\n\n
\n Since Yarbus [1], it is well known that task greatly influences the visual exploration of a scene. In this paper, we quantify the influence of three different cognitive tasks on the visual exploration of various natural scene images. Eye movements made to solve the three tasks were compared to the ones recorded during a free-exploration condition. Eye movement parameters and eye fixation positions were compared during the time course of the exploration according to fixation/saccade rank. Tasks were chosen because they implied different visual processing and might be classified according to their difficulty. We found that task difficulty impacts eye-movement characteristics at the very beginning of exploration. The variability between eye fixations of observers is also impacted. Finally, eye-movement characteristics might reveal the tasks solving.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A learning-based visual saliency fusion model for High Dynamic Range video (LBVS-HDR).\n \n \n \n \n\n\n \n Banitalebi-Dehkordi, A.; Dong, Y.; Pourazad, M. T.; and Nasiopoulos, P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1541-1545, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362642,\n  author = {A. Banitalebi-Dehkordi and Y. Dong and M. T. Pourazad and P. Nasiopoulos},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A learning-based visual saliency fusion model for High Dynamic Range video (LBVS-HDR)},\n  year = {2015},\n  pages = {1541-1545},\n  abstract = {Saliency prediction for Standard Dynamic Range (SDR) videos has been well explored in the last decade. However, limited studies are available on High Dynamic Range (HDR) Visual Attention Models (VAMs). Considering that the characteristic of HDR content in terms of dynamic range and color gamut is quite different than those of SDR content, it is essential to identify the importance of different saliency attributes of HDR videos for designing a VAM and understand how to combine these features. To this end we propose a learning-based visual saliency fusion method for HDR content (LVBS-HDR) to combine various visual saliency features. In our approach various conspicuity maps are extracted from HDR data, and then for fusing conspicuity maps, a Random Forests algorithm is used to train a model based on the collected data from an eye-tracking experiment. Performance evaluations demonstrate the superiority of the proposed fusion method against other existing fusion methods.},\n  keywords = {gaze tracking;image colour analysis;eye-tracking;random forests algorithm;conspicuity maps;color gamut;VAM;visual attention models;SDR videos;standard dynamic range videos;saliency prediction;HDR;high dynamic range video;LBVS;learning-based visual saliency fusion;Visualization;Feature extraction;Image color analysis;Training;Databases;Dynamic range;Radio frequency;High Dynamic Range video;HDR;visual attention model;saliency prediction},\n  doi = {10.1109/EUSIPCO.2015.7362642},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105157.pdf},\n}\n\n
\n
\n\n\n
\n Saliency prediction for Standard Dynamic Range (SDR) videos has been well explored in the last decade. However, limited studies are available on High Dynamic Range (HDR) Visual Attention Models (VAMs). Considering that the characteristic of HDR content in terms of dynamic range and color gamut is quite different than those of SDR content, it is essential to identify the importance of different saliency attributes of HDR videos for designing a VAM and understand how to combine these features. To this end we propose a learning-based visual saliency fusion method for HDR content (LVBS-HDR) to combine various visual saliency features. In our approach various conspicuity maps are extracted from HDR data, and then for fusing conspicuity maps, a Random Forests algorithm is used to train a model based on the collected data from an eye-tracking experiment. Performance evaluations demonstrate the superiority of the proposed fusion method against other existing fusion methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A methodology for estimating the radiation pattern of a violin during the performance.\n \n \n \n \n\n\n \n Canclini, A.; Mucci, L.; Antonacci, F.; Sarti, A.; and Tubaro, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1546-1550, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362643,\n  author = {A. Canclini and L. Mucci and F. Antonacci and A. Sarti and S. Tubaro},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A methodology for estimating the radiation pattern of a violin during the performance},\n  year = {2015},\n  pages = {1546-1550},\n  abstract = {We propose a method for the estimation of the three-dimensional radiation pattern of violins, during the performance of a musician. A microphone array captures the energy radiated by the violin in different directions using beamforming based on sub-arrays. The 3D radiation pattern is estimated allowing the musician to freely move. In particular, a tracking system estimates the position and orientation of the violin. The adopted system can be also used in a mildly reverberant environment, thus allowing the musician to play in a natural fashion. The experimental results prove the accuracy and the effectiveness of the method.},\n  keywords = {acoustic signal processing;array signal processing;microphone arrays;three-dimensional radiation pattern estimation;microphone array;beamforming;3D radiation pattern estimation;tracking system;position estimation;reverberant environment;Antenna radiation patterns;Cameras;Microphones;Acoustics;Acoustic measurements;Instruments;Europe;Musical acoustics;plenacoustic analysis;radiation pattern},\n  doi = {10.1109/EUSIPCO.2015.7362643},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103001.pdf},\n}\n\n
\n
\n\n\n
\n We propose a method for the estimation of the three-dimensional radiation pattern of violins, during the performance of a musician. A microphone array captures the energy radiated by the violin in different directions using beamforming based on sub-arrays. The 3D radiation pattern is estimated allowing the musician to freely move. In particular, a tracking system estimates the position and orientation of the violin. The adopted system can be also used in a mildly reverberant environment, thus allowing the musician to play in a natural fashion. The experimental results prove the accuracy and the effectiveness of the method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Addressing the data-association problem for multiple sound source localization using DOA estimates.\n \n \n \n \n\n\n \n Alexandridis, A.; Borboudakis, G.; and Mouchtaris, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1551-1555, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AddressingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362644,\n  author = {A. Alexandridis and G. Borboudakis and A. Mouchtaris},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Addressing the data-association problem for multiple sound source localization using DOA estimates},\n  year = {2015},\n  pages = {1551-1555},\n  abstract = {In this paper, we consider the data association problem that arises when localizing multiple sound sources using direction of arrival (DOA) estimates from multiple microphone arrays. In such a scenario, the association of the DOAs across the arrays that correspond to the same source is unknown and must be found for accurate localization. We present an association algorithm that finds the correct DOA association to the sources based on features extracted for each source that we propose. Our method results in high association and localization accuracy in scenarios with missed detections, reverberation, and noise and outperforms other recently proposed methods.},\n  keywords = {acoustic signal processing;direction-of-arrival estimation;feature extraction;microphone arrays;sensor fusion;wireless sensor networks;data-association problem;multiple sound source localization;DOA estimation;direction of arrival;microphone arrays;Direction-of-arrival estimation;Feature extraction;Signal processing algorithms;Array signal processing;Frequency estimation;Microphone arrays;data-association;multiple microphone arrays;direction of arrival;wireless acoustic sensor networks;localization},\n  doi = {10.1109/EUSIPCO.2015.7362644},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103123.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we consider the data association problem that arises when localizing multiple sound sources using direction of arrival (DOA) estimates from multiple microphone arrays. In such a scenario, the association of the DOAs across the arrays that correspond to the same source is unknown and must be found for accurate localization. We present an association algorithm that finds the correct DOA association to the sources based on features extracted for each source that we propose. Our method results in high association and localization accuracy in scenarios with missed detections, reverberation, and noise and outperforms other recently proposed methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n 3D localization of multiple sound sources with intensity vector estimates in single source zones.\n \n \n \n \n\n\n \n Pavlidi, D.; Delikaris-Manias, S.; Pulkki, V.; and Mouchtaris, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1556-1560, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"3DPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362645,\n  author = {D. Pavlidi and S. Delikaris-Manias and V. Pulkki and A. Mouchtaris},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {3D localization of multiple sound sources with intensity vector estimates in single source zones},\n  year = {2015},\n  pages = {1556-1560},\n  abstract = {This work proposes a novel method for 3D direction of arrival (DOA) estimation based on the sound intensity vector estimation, via the encoding of the signals of a spherical microphone array from the space domain to the spherical harmonic domain. The sound intensity vector is estimated on detected single source zones (SSZs), where one source is dominant. A smoothed 2D histogram of these estimates reveals the DOA of the present sources and through an iterative process, accurate 3D DOA information can be obtained. The performance of the proposed method is demonstrated through simulations in various signal-to-noise ratio and reverberation conditions.},\n  keywords = {acoustic signal processing;array signal processing;direction-of-arrival estimation;iterative methods;microphone arrays;vectors;3D localization;multiple sound sources;single source zones;3D direction of arrival estimation;sound intensity vector estimation;encoding;spherical microphone array;space domain;spherical harmonic domain;smoothed 2D histogram;iterative process;signal-to-noise ratio;reverberation conditions;Direction-of-arrival estimation;Histograms;Microphones;Estimation;Harmonic analysis;Three-dimensional displays;Arrays;direction of arrival;3D;multiple sources;microphone array processing;sound intensity},\n  doi = {10.1109/EUSIPCO.2015.7362645},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104651.pdf},\n}\n\n
\n
\n\n\n
\n This work proposes a novel method for 3D direction of arrival (DOA) estimation based on the sound intensity vector estimation, via the encoding of the signals of a spherical microphone array from the space domain to the spherical harmonic domain. The sound intensity vector is estimated on detected single source zones (SSZs), where one source is dominant. A smoothed 2D histogram of these estimates reveals the DOA of the present sources and through an iterative process, accurate 3D DOA information can be obtained. The performance of the proposed method is demonstrated through simulations in various signal-to-noise ratio and reverberation conditions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Energy-vs-performance trade-offs in speech enhancement in wireless acoustic sensor networks.\n \n \n \n \n\n\n \n de la Hucha Arce , F.; Rosas, F.; Moonen, M.; Verhelst, M.; and Bertrand, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1561-1565, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Energy-vs-performancePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362646,\n  author = {F. {de la Hucha Arce} and F. Rosas and M. Moonen and M. Verhelst and A. Bertrand},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Energy-vs-performance trade-offs in speech enhancement in wireless acoustic sensor networks},\n  year = {2015},\n  pages = {1561-1565},\n  abstract = {Distributed algorithms allow wireless acoustic sensor networks (WASNs) to divide the computational load of signal processing tasks, such as speech enhancement, among the sensor nodes. However, current algorithms focus on performance optimality, oblivious to the energy constraints that battery-powered sensor nodes usually face. To extend the lifetime of the network, nodes should be able to dynamically scale down their energy consumption when decreases in performance are tolerated. In this paper we study the relationship between energy and performance in the DANSE algorithm applied to speech enhancement. We propose two strategies that introduce flexibility to adjust the energy consumption and the desired performance. To analyze the impact of these strategies we combine an energy model with simulations. Results show that the energy consumption can be substantially reduced depending on the tolerated decrease in performance. This shows significant potential for extending the network lifetime using dynamic system reconfiguration.},\n  keywords = {energy consumption;speech enhancement;telecommunication power management;wireless sensor networks;energy-vs-performance trade-offs;speech enhancement;wireless acoustic sensor networks;distributed algorithms;computational load;signal processing;battery-powered sensor nodes;network lifetime;energy consumption;DANSE algorithm;Speech enhancement;Speech;Microphones;Signal processing algorithms;Bandwidth;Wireless communication;Dynamic system reconfiguration;distributed signal processing;wireless acoustic sensor networks},\n  doi = {10.1109/EUSIPCO.2015.7362646},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104623.pdf},\n}\n\n
\n
\n\n\n
\n Distributed algorithms allow wireless acoustic sensor networks (WASNs) to divide the computational load of signal processing tasks, such as speech enhancement, among the sensor nodes. However, current algorithms focus on performance optimality, oblivious to the energy constraints that battery-powered sensor nodes usually face. To extend the lifetime of the network, nodes should be able to dynamically scale down their energy consumption when decreases in performance are tolerated. In this paper we study the relationship between energy and performance in the DANSE algorithm applied to speech enhancement. We propose two strategies that introduce flexibility to adjust the energy consumption and the desired performance. To analyze the impact of these strategies we combine an energy model with simulations. Results show that the energy consumption can be substantially reduced depending on the tolerated decrease in performance. This shows significant potential for extending the network lifetime using dynamic system reconfiguration.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributed acoustic slam.\n \n \n \n \n\n\n \n Grzymkowski, Ł.; Główczewski, K.; and Raczyński, S. A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1566-1570, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"DistributedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362647,\n  author = {Ł. Grzymkowski and K. Główczewski and S. A. Raczyński},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Distributed acoustic slam},\n  year = {2015},\n  pages = {1566-1570},\n  abstract = {Vision-based methods are very popular for simultaneous localization and environment mapping (SLAM). One can imagine that exploiting the natural acoustic landscape of the robot's environment can prove to be a useful alternative to vision SLAM. Visual SLAM depends on matching local features between images, whereas distributed acoustic SLAM is based on matching acoustic events. Proposed DASLAM is based on distributed microphone arrays, where each microphone is connected to a separate, moving, controllable recording device, which requires compensation for their different clock shifts. We show that this controlled mobility is necessary to deal with underdetermined cases. Estimation is done using particle filtering. Results show that both tasks can be accomplished with good precision, even for the theoretically underdetermined cases. For example, we were able to achieve mapping error as low as 17.53 cm for sound sources with localization error of 18.61 cm and clock synchronization error of 42 μs for 2 robots and 2 sources.},\n  keywords = {acoustic applications;acoustic signal processing;microphone arrays;particle filtering (numerical methods);SLAM (robots);distributed acoustic SLAM;simultaneous localization and environment mapping;acoustic event matching;distributed microphone array;underdetermined case;particle filter;Robot kinematics;Simultaneous localization and mapping;Acoustics;Microphone arrays;Clocks;microphone arrays;distributed microphone arrays;mobile robots;particle filter;robot navigation;source localization},\n  doi = {10.1109/EUSIPCO.2015.7362647},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105017.pdf},\n}\n\n
\n
\n\n\n
\n Vision-based methods are very popular for simultaneous localization and environment mapping (SLAM). One can imagine that exploiting the natural acoustic landscape of the robot's environment can prove to be a useful alternative to vision SLAM. Visual SLAM depends on matching local features between images, whereas distributed acoustic SLAM is based on matching acoustic events. Proposed DASLAM is based on distributed microphone arrays, where each microphone is connected to a separate, moving, controllable recording device, which requires compensation for their different clock shifts. We show that this controlled mobility is necessary to deal with underdetermined cases. Estimation is done using particle filtering. Results show that both tasks can be accomplished with good precision, even for the theoretically underdetermined cases. For example, we were able to achieve mapping error as low as 17.53 cm for sound sources with localization error of 18.61 cm and clock synchronization error of 42 μs for 2 robots and 2 sources.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Channel-aware energy optimization of OFDM receivers using dynamic precision scaling in FPGAS.\n \n \n \n \n\n\n \n Cladera, F.; Gautier, M.; and Sentieys, O.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1571-1575, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Channel-awarePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362648,\n  author = {F. Cladera and M. Gautier and O. Sentieys},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Channel-aware energy optimization of OFDM receivers using dynamic precision scaling in FPGAS},\n  year = {2015},\n  pages = {1571-1575},\n  abstract = {To reduce the energy consumption of Orthogonal Frequency-Division Multiplexing (OFDM) systems, a new variable word-length method is presented in this paper. A simulation based approach is used: the optimized fixed-point implementation of an OFDM receiver is found for different simulated channel conditions, depending on the Signal-to-Noise Ratio (SNR) and the channel type. During the execution, the receiver estimates the channel conditions and chooses the optimum word-length to decode the received information. A realistic energy consumption of the receiver is estimated with a library that contains the energy consumption of Field-Programmable Gate Array (FPGA) basic operators depending on the bit-width, obtained from experimental data. Up to 57% of the dynamic energy can be saved using this method.},\n  keywords = {channel allocation;field programmable gate arrays;OFDM modulation;optimisation;telecommunication power management;channel aware energy optimization;orthogonal frequency-division multiplexing systems;OFDM receivers;dynamic precision scaling;field programmable gate array;FPGA;word-length method;optimized fixed-point implementation;signal-to-noise ratio;SNR;Receivers;OFDM;Signal to noise ratio;Energy consumption;Bit error rate;Field programmable gate arrays;Equalizers;Fixed-point arithmetic;OFDM receiver;energy reduction;variable wordlength},\n  doi = {10.1109/EUSIPCO.2015.7362648},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103427.pdf},\n}\n\n
\n
\n\n\n
\n To reduce the energy consumption of Orthogonal Frequency-Division Multiplexing (OFDM) systems, a new variable word-length method is presented in this paper. A simulation based approach is used: the optimized fixed-point implementation of an OFDM receiver is found for different simulated channel conditions, depending on the Signal-to-Noise Ratio (SNR) and the channel type. During the execution, the receiver estimates the channel conditions and chooses the optimum word-length to decode the received information. A realistic energy consumption of the receiver is estimated with a library that contains the energy consumption of Field-Programmable Gate Array (FPGA) basic operators depending on the bit-width, obtained from experimental data. Up to 57% of the dynamic energy can be saved using this method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A high performance FPGA-GPU-CPU platform for a real-time locating system.\n \n \n \n \n\n\n \n Alawieh, M.; Kasparek, M.; Franke, N.; and Hupfer, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1576-1580, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362649,\n  author = {M. Alawieh and M. Kasparek and N. Franke and J. Hupfer},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A high performance FPGA-GPU-CPU platform for a real-time locating system},\n  year = {2015},\n  pages = {1576-1580},\n  abstract = {A heterogeneous Software Defined Radio (SDR) cluster platform that handles highly demanding processing algorithms in real-time is proposed. The solution based on a combination of FPGA, GPU and CPU offers the best balance between performance, cost, and flexibility. The key feature of our heterogeneous platform is achieving the required performance by assigning the tasks according to the technology characteristics. The FPGA in the proposed system does not only acquire external data but perform initial acquisition. This process aids in facilitating parallelism on the GPU side and optimizing the data transfer. The performance of the platform is demonstrated for an intensive real-time localization application. The overall costs are kept extremely low when compared to other solutions that can provide similar performance.},\n  keywords = {field programmable gate arrays;graphics processing units;mobile computing;software radio;high performance FPGA-GPU-CPU platform;real-time locating system;heterogeneous software defined radio cluster platform;heterogeneous SDR cluster platform;data transfer optimization;Correlation;Field programmable gate arrays;Graphics processing units;Receivers;Transmitters;Signal processing algorithms;Finite impulse response filters;Wirelss locating systems;FPGA;GPU;CPU;heterogeneous platform;SDR},\n  doi = {10.1109/EUSIPCO.2015.7362649},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570093555.pdf},\n}\n\n
\n
\n\n\n
\n A heterogeneous Software Defined Radio (SDR) cluster platform that handles highly demanding processing algorithms in real-time is proposed. The solution based on a combination of FPGA, GPU and CPU offers the best balance between performance, cost, and flexibility. The key feature of our heterogeneous platform is achieving the required performance by assigning the tasks according to the technology characteristics. The FPGA in the proposed system does not only acquire external data but perform initial acquisition. This process aids in facilitating parallelism on the GPU side and optimizing the data transfer. The performance of the platform is demonstrated for an intensive real-time localization application. The overall costs are kept extremely low when compared to other solutions that can provide similar performance.\n
\n\n\n
\n\n\n \n\n\n
\n \n\n \n \n \n \n \n \n Speeding up execution time of a smart wheelchair command technique using parallel computing.\n \n \n \n \n\n\n \n Ghorbel, A.; Jallouli, M.; and Ben Amor, N.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1586-1590, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"SpeedingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362651,\n  author = {A. Ghorbel and M. Jallouli and N. {Ben Amor}},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Speeding up execution time of a smart wheelchair command technique using parallel computing},\n  year = {2015},\n  pages = {1586-1590},\n  abstract = {An Human Machine Interface for wheelchair control based on facial expressions is presented. The interface is implemented on embedded system architecture based on ARM processor rather than personal computer, like usual wheelchair command implementation, to reduce energy consumption while maintaining similar computing performance. The command technique code is complex but offers inherent parallelism. To reduce processing time, two parallelism levels are exploited. The first one is instruction parallelism which is applied to a dual core architecture using OpenMP directives. The second level is data parallelism in which an SIMD specific unit is exploited. In both parallelization techniques, a minimum of initial code re-manipulation is required. As processing unit, we choose the Pandaboard-ES platform that includes a dual-core ARM9 and a set of control interfaces. The obtained preliminary experiments demonstrate the effectiveness of this parallelization and conduct to a 40% reduction of processing time against a conventional x86 CPU.},\n  keywords = {handicapped aids;human computer interaction;microprocessor chips;parallel processing;wheelchairs;dual-core ARM9;Pandaboard-ES platform;SIMD specific unit;data parallelism;OpenMP directive;dual core architecture;instruction parallelism;energy consumption;ARM processor;embedded system architecture;facial expressions;wheelchair control;human machine interface;parallel computing;smart wheelchair command technique;Parallel processing;Wheelchairs;Signal processing algorithms;Computer architecture;Instruction sets;Face;Neon;HMI;embedded system;dual-core;parallelism},\n  doi = {10.1109/EUSIPCO.2015.7362651},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105085.pdf},\n}\n\n
\n
\n\n\n
\n An Human Machine Interface for wheelchair control based on facial expressions is presented. The interface is implemented on embedded system architecture based on ARM processor rather than personal computer, like usual wheelchair command implementation, to reduce energy consumption while maintaining similar computing performance. The command technique code is complex but offers inherent parallelism. To reduce processing time, two parallelism levels are exploited. The first one is instruction parallelism which is applied to a dual core architecture using OpenMP directives. The second level is data parallelism in which an SIMD specific unit is exploited. In both parallelization techniques, a minimum of initial code re-manipulation is required. As processing unit, we choose the Pandaboard-ES platform that includes a dual-core ARM9 and a set of control interfaces. The obtained preliminary experiments demonstrate the effectiveness of this parallelization and conduct to a 40% reduction of processing time against a conventional x86 CPU.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Estimating the HEVC decoding energy using high-level video features.\n \n \n \n \n\n\n \n Herglotz, C.; and Kaup, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1591-1595, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"EstimatingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362652,\n  author = {C. Herglotz and A. Kaup},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Estimating the HEVC decoding energy using high-level video features},\n  year = {2015},\n  pages = {1591-1595},\n  abstract = {This paper shows how the decoding energy of HEVC software decoders can be estimated when using high-level features of a coded bit stream. The investigated features comprise number of frames, resolution, bitrate, QP, and encoder configuration, where the proposed model reaches an average estimation error of 10%. While establishing this model, we closely investigated the influence of these high-level features on the decoding energy. Mathematical relations are derived that can, e.g., be exploited to control the decoding energy from the encoder side. To show the validity of our research, evaluations are performed on two different hardware devices and three different software solutions.},\n  keywords = {decoding;video coding;mathematical relation;bitrate;resolution;encoder configuration;frame number;coded bit stream;HEVC software decoders;high-level video features;HEVC decoding energy estimation;Decoding;Energy resolution;Streaming media;Software;Energy consumption;Energy measurement;Signal processing;Video decoding;HEVC;energy estimation},\n  doi = {10.1109/EUSIPCO.2015.7362652},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570093423.pdf},\n}\n\n
\n
\n\n\n
\n This paper shows how the decoding energy of HEVC software decoders can be estimated when using high-level features of a coded bit stream. The investigated features comprise number of frames, resolution, bitrate, QP, and encoder configuration, where the proposed model reaches an average estimation error of 10%. While establishing this model, we closely investigated the influence of these high-level features on the decoding energy. Mathematical relations are derived that can, e.g., be exploited to control the decoding energy from the encoder side. To show the validity of our research, evaluations are performed on two different hardware devices and three different software solutions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n GNU radio based digital beamforming system: BER and computational performance analysis.\n \n \n \n \n\n\n \n Balakrishnan, S.; and Ong, L. T.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1596-1600, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"GNUPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362653,\n  author = {S. Balakrishnan and L. T. Ong},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {GNU radio based digital beamforming system: BER and computational performance analysis},\n  year = {2015},\n  pages = {1596-1600},\n  abstract = {The rapid growth in computational capacity of general purpose processors (GPPs) has allowed for an alternative to traditional implementation of digital signal processing systems. Signal processing algorithms that were once implemented in dedicated field programmable gate arrays (FPGAs) and embedded digital signal processors are now being increasingly implemented using softwares. This paper presents the development of a GPP based digital beamforming system using GNU Radio - an Open Source software development platform for signal processing applications to be used with software defined radio systems. The developed beamforming system is based on minimum variance distortionless response (MVDR) algorithm. We study the Bit Error Rate (BER) performance of the beamforming system. We provide the experimental BER results to highlight the signal recovery capabilities of the beamformer. This paper also addresses the challenges of real-time implementation and analyses the computational complexity of the GPP based digital beamforming system.},\n  keywords = {array signal processing;error statistics;software radio;GNU radio;digital beamforming system;BER analysis;computational performance analysis;general purpose processor;embedded digital signal processor;open source software development platform;minimum variance distortionless response algorithm;Array signal processing;Bit error rate;Antenna arrays;Software;Binary phase shift keying;Covariance matrices;GNU Radio;software defined radio;digital beamforming},\n  doi = {10.1109/EUSIPCO.2015.7362653},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104515.pdf},\n}\n\n
\n
\n\n\n
\n The rapid growth in computational capacity of general purpose processors (GPPs) has allowed for an alternative to traditional implementation of digital signal processing systems. Signal processing algorithms that were once implemented in dedicated field programmable gate arrays (FPGAs) and embedded digital signal processors are now being increasingly implemented using softwares. This paper presents the development of a GPP based digital beamforming system using GNU Radio - an Open Source software development platform for signal processing applications to be used with software defined radio systems. The developed beamforming system is based on minimum variance distortionless response (MVDR) algorithm. We study the Bit Error Rate (BER) performance of the beamforming system. We provide the experimental BER results to highlight the signal recovery capabilities of the beamformer. This paper also addresses the challenges of real-time implementation and analyses the computational complexity of the GPP based digital beamforming system.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Vectorization of binaural sound virtualization on the ARM Cortex-A15 architecture.\n \n \n \n \n\n\n \n Belloch, J. A.; González, A.; Igual, F. D.; Mayo, R.; and Quintana-Orti, E. S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1601-1605, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"VectorizationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362654,\n  author = {J. A. Belloch and A. González and F. D. Igual and R. Mayo and E. S. Quintana-Orti},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Vectorization of binaural sound virtualization on the ARM Cortex-A15 architecture},\n  year = {2015},\n  pages = {1601-1605},\n  abstract = {Today's mobile devices are equipped with low power processors featuring SIMD (single-instruction, multiple-data) floating-point units which can operate with multiple data units concurrently. This is the case, e.g., of the ARMv7 architecture, which integrates the (NEON) Advanced SIMD extension, a combined 64- and 128-bit SIMD instruction set for standardized acceleration of media and signal processing applications. In this paper we target the efficient implementation of binaural sound virtualization, a heavy-duty audio processing application that can eventually require 16 convolutions to synthesize a virtual sound source. For this application, we describe a data reorganization that allows to exploit the 128-bit NEON intrinsics of an ARM Cortex-A15 core. As a result, our new SIMD-accelerated implementation is capable of reproducing up to 60 sound sources under realtime conditions, compared with the 40 sound sources that can be handled by the original code.},\n  keywords = {audio signal processing;microprocessor chips;binaural sound virtualization vectorization;ARM Cortex-A15 architecture;SIMD floating-point units;single-instruction multiple-data;NEON Advanced SIMD extension;media application;signal processing application;heavy-duty audio processing application;sound source;Neon;Registers;Program processors;Virtualization;Computer architecture;Databases;Signal processing;Audio Processing;Spatial Sound;Low Power Processors;ARMv7 and ARM Cortex-A15;NEON Intrinsics},\n  doi = {10.1109/EUSIPCO.2015.7362654},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104889.pdf},\n}\n\n
\n
\n\n\n
\n Today's mobile devices are equipped with low power processors featuring SIMD (single-instruction, multiple-data) floating-point units which can operate with multiple data units concurrently. This is the case, e.g., of the ARMv7 architecture, which integrates the (NEON) Advanced SIMD extension, a combined 64- and 128-bit SIMD instruction set for standardized acceleration of media and signal processing applications. In this paper we target the efficient implementation of binaural sound virtualization, a heavy-duty audio processing application that can eventually require 16 convolutions to synthesize a virtual sound source. For this application, we describe a data reorganization that allows to exploit the 128-bit NEON intrinsics of an ARM Cortex-A15 core. As a result, our new SIMD-accelerated implementation is capable of reproducing up to 60 sound sources under realtime conditions, compared with the 40 sound sources that can be handled by the original code.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A predictive function optimization algorithm for multi-spectral skin lesion assessment.\n \n \n \n \n\n\n \n Li, C.; Balla-Arabe, S.; Brost, V.; and Yang, F.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1606-1610, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362655,\n  author = {C. Li and S. Balla-Arabe and V. Brost and F. Yang},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A predictive function optimization algorithm for multi-spectral skin lesion assessment},\n  year = {2015},\n  pages = {1606-1610},\n  abstract = {The newly introduced Kubelka-Munk Genetic Algorithm (KMGA) is a promising technique used in the assessment of skin lesions. Unfortunately, this method is computationally expensive due to its function inverting process. In the work of this paper, we design a Predictive Function Optimization Algorithm in order to improve the efficiency of KMGA by speeding up its convergence rate. Using this approach, a High-Convergence-Rate KMGA (HCR-KMGA) is implemented onto multi-core processors and FPGA devices respectively. Furthermore, the implementations are optimized using parallel computing techniques. Intensive experiments demonstrate that HCR-KMGA can effectively accelerate KMGA method, while improving its assessment accuracy as well.},\n  keywords = {biomedical optical imaging;field programmable gate arrays;genetic algorithms;inverse problems;medical image processing;optimisation;parallel algorithms;skin;predictive function optimization algorithm;multispectral skin lesion assessment;Kubelka-Munk Genetic Algorithm;function inverting process;convergence rate;high-convergence-rate KMGA;HCR-KMGA;multicore processors;FPGA devices;parallel computing techniques;Optimization;Skin;Field programmable gate arrays;Signal processing algorithms;Lesions;Genetic algorithms;Hardware;Multi-spectral Image Processing;Light-Tissue Interaction;Genetic Algorithm;Kubelka-Munk model;Embedded System;SW/HW Co-design;FPGA;High-Level Synthesis;High-Performance Computing;POSIX Thread},\n  doi = {10.1109/EUSIPCO.2015.7362655},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570097293.pdf},\n}\n\n
\n
\n\n\n
\n The newly introduced Kubelka-Munk Genetic Algorithm (KMGA) is a promising technique used in the assessment of skin lesions. Unfortunately, this method is computationally expensive due to its function inverting process. In the work of this paper, we design a Predictive Function Optimization Algorithm in order to improve the efficiency of KMGA by speeding up its convergence rate. Using this approach, a High-Convergence-Rate KMGA (HCR-KMGA) is implemented onto multi-core processors and FPGA devices respectively. Furthermore, the implementations are optimized using parallel computing techniques. Intensive experiments demonstrate that HCR-KMGA can effectively accelerate KMGA method, while improving its assessment accuracy as well.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Temporal error concealment for fisheye video sequences based on equisolid re-projection.\n \n \n \n \n\n\n \n Eichenseer, A.; Seiler, J.; Bätz, M.; and Kaup, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1611-1615, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"TemporalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362656,\n  author = {A. Eichenseer and J. Seiler and M. Bätz and A. Kaup},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Temporal error concealment for fisheye video sequences based on equisolid re-projection},\n  year = {2015},\n  pages = {1611-1615},\n  abstract = {Wide-angle video sequences obtained by fisheye cameras exhibit characteristics that may not very well comply with standard image and video processing techniques such as error concealment. This paper introduces a temporal error concealment technique designed for the inherent characteristics of equisolid fisheye video sequences by applying a re-projection into the equisolid domain after conducting part of the error concealment in the perspective domain. Combining this technique with conventional decoder motion vector estimation achieves average gains of 0.71 dB compared against pure decoder motion vector estimation for the test sequences used. Maximum gains amount to up to 2.04 dB for selected frames.},\n  keywords = {image motion analysis;image sequences;wide-angle video sequences;fisheye cameras;temporal error concealment technique;equisolid fisheye video sequences;equisolid domain;perspective domain;decoder motion vector estimation;test sequences;equisolid re-projection;gain 0.71 dB;gain 2.04 dB;Video sequences;Gain;Cameras;Signal processing;Estimation;Adaptation models;Europe;Error Concealment;Fisheye Lens;Temporal Prediction;Motion Vector Estimation},\n  doi = {10.1109/EUSIPCO.2015.7362656},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096287.pdf},\n}\n\n
\n
\n\n\n
\n Wide-angle video sequences obtained by fisheye cameras exhibit characteristics that may not very well comply with standard image and video processing techniques such as error concealment. This paper introduces a temporal error concealment technique designed for the inherent characteristics of equisolid fisheye video sequences by applying a re-projection into the equisolid domain after conducting part of the error concealment in the perspective domain. Combining this technique with conventional decoder motion vector estimation achieves average gains of 0.71 dB compared against pure decoder motion vector estimation for the test sequences used. Maximum gains amount to up to 2.04 dB for selected frames.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Human vision model including age dependencies.\n \n \n \n \n\n\n \n Mantiuk, R. K.; and Ramponi, G.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1616-1620, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"HumanPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362657,\n  author = {R. K. Mantiuk and G. Ramponi},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Human vision model including age dependencies},\n  year = {2015},\n  pages = {1616-1620},\n  abstract = {We extend a model of the human visual system to predict the effects of age. The extensions are based on the existing models of disability glare, aging of the crystalline lens and reduced pupil size with age. The complete model, including an empirical neural component, can well explain the differences in contrast sensitivity between old and young observers.},\n  keywords = {neural nets;physiological models;empirical neural component;pupil size;crystalline lens aging;disability glare;human visual system;age dependencies;human vision model;Sensitivity;Retina;Visualization;Aging;Lenses;Observers;Predictive models;Visual model;Visual difference predictor (VDP);aging effects;high dynamic range images},\n  doi = {10.1109/EUSIPCO.2015.7362657},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102033.pdf},\n}\n\n
\n
\n\n\n
\n We extend a model of the human visual system to predict the effects of age. The extensions are based on the existing models of disability glare, aging of the crystalline lens and reduced pupil size with age. The complete model, including an empirical neural component, can well explain the differences in contrast sensitivity between old and young observers.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n p-th Power total variation regularization in photon-limited imaging via iterative reweighting.\n \n \n \n \n\n\n \n Adhikari, L.; and Marcia, R. F.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1621-1625, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"p-thPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362658,\n  author = {L. Adhikari and R. F. Marcia},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {p-th Power total variation regularization in photon-limited imaging via iterative reweighting},\n  year = {2015},\n  pages = {1621-1625},\n  abstract = {Recent work in ℓp-norm regularized sparsity recovery problems (where 0 ≤ p ≤ 1) has shown that signals can be recovered with very high accuracy despite the fact that the solution to these nonconvex optimization problems are not necessarily the global minima but are instead potentially local minima. In particular, ℓp-norm regularization has been used effectively for signal reconstruction from measurements corrupted by zero-mean additive Gaussian noise. This paper describes a p-th power total variation (TVp) regularized op timization approach for image recovery problems in photon-limited settings using iterative reweighting. The proposed method iteratively convexities a sequence of nonconvex TVp subproblems using a weighted TV approach and is solved using a modification to the FISTA method for TV-based de-noising. We explore the effectiveness of the proposed method through numerical experiments in image deblurring.},\n  keywords = {concave programming;image enhancement;iterative methods;p-th power total variation regularization;photon-limited imaging;iterative reweighting;ℓp-norm regularized sparsity recovery problems;nonconvex optimization problems;global minima;signal reconstruction;zero-mean additive Gaussian noise;power total variation;image recovery problems;photon-limited settings;iterative reweighting;weighted TV approach;FISTA method;TV-based denoising;image deblurring;TV;Image reconstruction;Photonics;Minimization;Europe;Signal processing;Noise measurement;Total variation (TV);weighted TV;SPIRAL-TVp;photon-limited imaging;Poisson noise},\n  doi = {10.1109/EUSIPCO.2015.7362658},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570097723.pdf},\n}\n\n
\n
\n\n\n
\n Recent work in ℓp-norm regularized sparsity recovery problems (where 0 ≤ p ≤ 1) has shown that signals can be recovered with very high accuracy despite the fact that the solution to these nonconvex optimization problems are not necessarily the global minima but are instead potentially local minima. In particular, ℓp-norm regularization has been used effectively for signal reconstruction from measurements corrupted by zero-mean additive Gaussian noise. This paper describes a p-th power total variation (TVp) regularized op timization approach for image recovery problems in photon-limited settings using iterative reweighting. The proposed method iteratively convexities a sequence of nonconvex TVp subproblems using a weighted TV approach and is solved using a modification to the FISTA method for TV-based de-noising. We explore the effectiveness of the proposed method through numerical experiments in image deblurring.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Piecewise parameterised Markov random fields for semi-local Hurst estimation.\n \n \n \n \n\n\n \n Regli, J. -.; and Nelson, J. D. B.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1626-1630, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"PiecewisePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362659,\n  author = {J. -. Regli and J. D. B. Nelson},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Piecewise parameterised Markov random fields for semi-local Hurst estimation},\n  year = {2015},\n  pages = {1626-1630},\n  abstract = {Semi-local Hurst estimation is considered by incorporating a Markov random field model to constrain a wavelet-based pointwise Hurst estimator. This results in an estimator which is able to exploit the spatial regularities of a piecewise parametric varying Hurst parameter. The pointwise estimates are jointly inferred along with the parametric form of the underlying Hurst function which characterises how the Hurst parameter varies deterministically over the spatial support of the data. Unlike recent Hurst regularisation methods, the proposed approach is flexible in that arbitrary parametric forms can be considered and is extensible in as much as the associated gradient descent algorithm can accommodate a broad class of distributional assumptions without any significant modifications. The potential benefits of the approach are illustrated with simulations of various first-order polynomial forms.},\n  keywords = {gradient methods;Markov processes;signal processing;wavelet transforms;associated gradient descent algorithm;wavelet-based pointwise Hurst estimator;semilocal Hurst estimation;piecewise parameterised Markov random fields;Estimation;Markov processes;Mathematical model;Europe;Signal processing;Least squares approximations;Signal processing algorithms},\n  doi = {10.1109/EUSIPCO.2015.7362659},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570097507.pdf},\n}\n\n
\n
\n\n\n
\n Semi-local Hurst estimation is considered by incorporating a Markov random field model to constrain a wavelet-based pointwise Hurst estimator. This results in an estimator which is able to exploit the spatial regularities of a piecewise parametric varying Hurst parameter. The pointwise estimates are jointly inferred along with the parametric form of the underlying Hurst function which characterises how the Hurst parameter varies deterministically over the spatial support of the data. Unlike recent Hurst regularisation methods, the proposed approach is flexible in that arbitrary parametric forms can be considered and is extensible in as much as the associated gradient descent algorithm can accommodate a broad class of distributional assumptions without any significant modifications. The potential benefits of the approach are illustrated with simulations of various first-order polynomial forms.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Road network extraction by a higher-order CRF model built on centerline cliques.\n \n \n \n \n\n\n \n Besbes, O.; and Benazza-Benyahia, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1631-1635, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"RoadPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362660,\n  author = {O. Besbes and A. Benazza-Benyahia},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Road network extraction by a higher-order CRF model built on centerline cliques},\n  year = {2015},\n  pages = {1631-1635},\n  abstract = {The goal of this work is to recover road networks from aerial images. This problem is extremely challenging because roads not only exhibit a highly varying appearance but also are usually occluded by nearby objects. Most importantly, roads are complex structures as they form connected networks of segments with slowly changing width and curvature. As an effective tool for their extraction, we propose to resort to a Conditional Random Field (CRF) model. Our contribution consists in representing the prior on the complex structure of the roads by higher-order potentials defined over centerline cliques. Robust PN-Potts potentials are defined over such relevant cliques as well as over background cliques to integrate long-range constraints within the objective model energy. The optimal solution is derived thanks to graph-cuts tools. We demonstrate promising results and make qualitative and quantitative comparisons to the state of the art methods on the Vaihingen database.},\n  keywords = {geophysical image processing;image representation;random processes;road network extraction;higher-order CRF model;centerline clique;aerial imaging;conditional random field model;robust PN-Potts potential;graph-cuts tool;Vaihingen database;Roads;Image segmentation;Shape;Feature extraction;Color;Robustness;Detectors;Road network;higher-order CRF;centerline cliques;graph-cuts;aerial images},\n  doi = {10.1109/EUSIPCO.2015.7362660},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103659.pdf},\n}\n\n
\n
\n\n\n
\n The goal of this work is to recover road networks from aerial images. This problem is extremely challenging because roads not only exhibit a highly varying appearance but also are usually occluded by nearby objects. Most importantly, roads are complex structures as they form connected networks of segments with slowly changing width and curvature. As an effective tool for their extraction, we propose to resort to a Conditional Random Field (CRF) model. Our contribution consists in representing the prior on the complex structure of the roads by higher-order potentials defined over centerline cliques. Robust PN-Potts potentials are defined over such relevant cliques as well as over background cliques to integrate long-range constraints within the objective model energy. The optimal solution is derived thanks to graph-cuts tools. We demonstrate promising results and make qualitative and quantitative comparisons to the state of the art methods on the Vaihingen database.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A blind deblurring and image decomposition approach for astronomical image restoration.\n \n \n \n \n\n\n \n Mourya, R.; Denis, L.; Becker, J.; and Thiébaut, E.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1636-1640, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362661,\n  author = {R. Mourya and L. Denis and J. Becker and E. Thiébaut},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A blind deblurring and image decomposition approach for astronomical image restoration},\n  year = {2015},\n  pages = {1636-1640},\n  abstract = {With the progress of adaptive optics systems, ground-based telescopes acquire images with improved resolutions. However, compensation for atmospheric turbulence is still partial, which leaves good scope for digital restoration techniques to recover fine details in the images. A blind image deblurring algorithm for a single long-exposure image is proposed, which is an instance of maximum-a-posteriori estimation posed as constrained non-convex optimization problem. A view of sky contains mainly two types of sources: point-like and smooth extended sources. The algorithm takes into account this fact explicitly by imposing different priors on these components, and recovers two separate maps for them. Moreover, an appropriate prior on the blur kernel is also considered. The resulting optimization problem is solved by alternating minimization. The initial experimental results on synthetically corrupted images are promising, the algorithm is able to restore the fine details in the image, and recover the point spread function.},\n  keywords = {adaptive optics;astronomical image processing;astronomical telescopes;atmospheric turbulence;compensation;concave programming;decomposition;image restoration;maximum likelihood estimation;minimisation;optical transfer function;point spread function;minimization;blur kernel;constrained nonconvex optimization problem;maximum-a-posteriori estimation;single long-exposure imaging;blind image deblurring algorithm;digital restoration technique;atmospheric turbulence;compensation;image acquisition;ground-based telescope;adaptive optics system;astronomical image restoration;image decomposition approach;Image restoration;Imaging;Estimation;Optimization;Europe;Signal processing;Signal processing algorithms;Astronomical imaging;blind image deblurring;non-convex optimization;alternate minimization;Huber function;ADMM},\n  doi = {10.1109/EUSIPCO.2015.7362661},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104679.pdf},\n}\n\n
\n
\n\n\n
\n With the progress of adaptive optics systems, ground-based telescopes acquire images with improved resolutions. However, compensation for atmospheric turbulence is still partial, which leaves good scope for digital restoration techniques to recover fine details in the images. A blind image deblurring algorithm for a single long-exposure image is proposed, which is an instance of maximum-a-posteriori estimation posed as constrained non-convex optimization problem. A view of sky contains mainly two types of sources: point-like and smooth extended sources. The algorithm takes into account this fact explicitly by imposing different priors on these components, and recovers two separate maps for them. Moreover, an appropriate prior on the blur kernel is also considered. The resulting optimization problem is solved by alternating minimization. The initial experimental results on synthetically corrupted images are promising, the algorithm is able to restore the fine details in the image, and recover the point spread function.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Semi-blind separation of galaxy spectra from a mixture obtained by slitless spectroscopy.\n \n \n \n \n\n\n \n Selloum, A.; Hosseini, S.; Contini, T.; and Deville, Y.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1641-1645, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Semi-blindPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362662,\n  author = {A. Selloum and S. Hosseini and T. Contini and Y. Deville},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Semi-blind separation of galaxy spectra from a mixture obtained by slitless spectroscopy},\n  year = {2015},\n  pages = {1641-1645},\n  abstract = {We investigate the problem of separating galaxy spectra from their mixtures resulting from the slitless spectroscopy used in the future Euclid space mission. This can be formulated as a source separation problem where the structure of the mixture is specific and depends on a low number of parameters. We first develop a mathematical model to describe the observations generated by the near-infrared spectrograph of Euclid, then propose non-blind, semi-blind and regularized semi-blind methods to separate the spectra. The first simulation results are encouraging: even for a signal to noise ratio of 5 dB, our regularized semi-blind method succeeds in separating the considered two spectra and provides a satisfactory estimate of the emission line positions and amplitudes.},\n  keywords = {astronomical spectra;astronomical techniques;blind source separation;infrared spectroscopy;semiblind separation;galaxy spectra;slitless spectroscopy;Euclid space mission;source separation problem;near-infrared spectroscopy;nonblind method;regularized semiblind method;Mathematical model;Indexes;Source separation;Wavelength measurement;Shape;Instruments;Europe;Semi-blind source separation;Euclid mission;Spectrum decontamination;Slitless spectroscopy;Optimization},\n  doi = {10.1109/EUSIPCO.2015.7362662},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103551.pdf},\n}\n\n
\n
\n\n\n
\n We investigate the problem of separating galaxy spectra from their mixtures resulting from the slitless spectroscopy used in the future Euclid space mission. This can be formulated as a source separation problem where the structure of the mixture is specific and depends on a low number of parameters. We first develop a mathematical model to describe the observations generated by the near-infrared spectrograph of Euclid, then propose non-blind, semi-blind and regularized semi-blind methods to separate the spectra. The first simulation results are encouraging: even for a signal to noise ratio of 5 dB, our regularized semi-blind method succeeds in separating the considered two spectra and provides a satisfactory estimate of the emission line positions and amplitudes.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Compressed sensing and radio interferometry.\n \n \n \n \n\n\n \n Jiang, M.; Girard, J. N.; Starck, J. -.; Corbel, S.; and Tasse, C.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1646-1650, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"CompressedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362663,\n  author = {M. Jiang and J. N. Girard and J. -. Starck and S. Corbel and C. Tasse},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Compressed sensing and radio interferometry},\n  year = {2015},\n  pages = {1646-1650},\n  abstract = {Radio interferometric imaging constitutes a strong ill-posed inverse problem. In addition, the next generation radio telescopes, such as the Low Frequency Array (LOFAR) and the Square Kilometre Array (SKA), come with an additional direction-dependent effects which impacts the image restoration. In the compressed sensing framework, we used the analysis and synthesis formulation of the problem and we solved it using proximal algorithms. A simple version of our method has been implemented within the LOFAR imager and has been validated on simulated and real LOFAR data. It demonstrated its capability to super-resolve radio sources, to provide correct photometry of point sources in a large field of view and image extended emissions with enhanced quality as compared to classical deconvolution methods. One extension of our method is to use the temporal information of the data to build a 2D-1D sparse imager enabling the detection of transient sources.},\n  keywords = {compressed sensing;image restoration;interferometry;radiotelescopes;compressed sensing;radio interferometry;radio interferometric imaging;ill-posed inverse problem;low frequency array;square kilometre array;direction-dependent effects;image restoration;synthesis formulation;proximal algorithms;LOFAR imager;LOFAR data;super-resolve radio sources;photometry;image extended emissions;deconvolution methods;temporal information;2D-1D sparse imager;transient sources detection;Transient analysis;Imaging;Image reconstruction;Compressed sensing;Signal processing algorithms;Dictionaries;Minimization;sparsity;compressed sensing;interferom-etry;imaging;transients},\n  doi = {10.1109/EUSIPCO.2015.7362663},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103663.pdf},\n}\n\n
\n
\n\n\n
\n Radio interferometric imaging constitutes a strong ill-posed inverse problem. In addition, the next generation radio telescopes, such as the Low Frequency Array (LOFAR) and the Square Kilometre Array (SKA), come with an additional direction-dependent effects which impacts the image restoration. In the compressed sensing framework, we used the analysis and synthesis formulation of the problem and we solved it using proximal algorithms. A simple version of our method has been implemented within the LOFAR imager and has been validated on simulated and real LOFAR data. It demonstrated its capability to super-resolve radio sources, to provide correct photometry of point sources in a large field of view and image extended emissions with enhanced quality as compared to classical deconvolution methods. One extension of our method is to use the temporal information of the data to build a 2D-1D sparse imager enabling the detection of transient sources.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Band selection in RKHS for fast nonlinear unmixing of hyperspectral images.\n \n \n \n \n\n\n \n Imbiriba, T.; Bermudez, J. C. M.; Richard, C.; and Tourneret, J. -.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1651-1655, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"BandPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362664,\n  author = {T. Imbiriba and J. C. M. Bermudez and C. Richard and J. -. Tourneret},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Band selection in RKHS for fast nonlinear unmixing of hyperspectral images},\n  year = {2015},\n  pages = {1651-1655},\n  abstract = {The profusion of spectral bands generated by the acquisition process of hyperspectral images generally leads to high computational costs. Such difficulties arise in particular with nonlinear unmixing methods, which are naturally more complex than linear ones. This complexity, associated with the high redundancy of information within the complete set of bands, make the search of band selection algorithms relevant. With this work, we propose a band selection strategy in reproducing kernel Hilbert spaces that allows to drastically reduce the processing time required by nonlinear unmixing techniques. Simulation results show a complexity reduction of two orders of magnitude without compromising unmixing performance.},\n  keywords = {computational complexity;Hilbert spaces;hyperspectral imaging;image fusion;redundancy;hyperspectral image fast nonlinear unmixing;RKHS;spectral band profusion;hyperspectral image acquisition process;high information redundancy;band selection algorithm;kernel Hilbert space reproduction;complexity reduction;Clustering algorithms;Kernel;Signal processing algorithms;Hyperspectral imaging;Complexity theory;Europe;Signal processing;Hyperspectral data;nonlinear unmixing;band selection;kernel methods},\n  doi = {10.1109/EUSIPCO.2015.7362664},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104937.pdf},\n}\n\n
\n
\n\n\n
\n The profusion of spectral bands generated by the acquisition process of hyperspectral images generally leads to high computational costs. Such difficulties arise in particular with nonlinear unmixing methods, which are naturally more complex than linear ones. This complexity, associated with the high redundancy of information within the complete set of bands, make the search of band selection algorithms relevant. With this work, we propose a band selection strategy in reproducing kernel Hilbert spaces that allows to drastically reduce the processing time required by nonlinear unmixing techniques. Simulation results show a complexity reduction of two orders of magnitude without compromising unmixing performance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Unmixing multitemporal hyperspectral images accounting for endmember variability.\n \n \n \n \n\n\n \n Halimi, A.; Dobigeon, N.; Toumeret, J.; McLaughlin, S.; and Honeine, P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1656-1660, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"UnmixingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362665,\n  author = {A. Halimi and N. Dobigeon and J. Toumeret and S. McLaughlin and P. Honeine},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Unmixing multitemporal hyperspectral images accounting for endmember variability},\n  year = {2015},\n  pages = {1656-1660},\n  abstract = {This paper proposes an unsupervised Bayesian algorithm for unmixing successive hyperspectral images while accounting for temporal and spatial variability of the endmembers. Each image pixel is modeled as a linear combination of the end-members weighted by their corresponding abundances. Spatial endmember variability is introduced by considering the normal compositional model that assumes variable endmembers for each image pixel. A prior enforcing a smooth temporal variation of both endmembers and abundances is considered. The proposed algorithm estimates the mean vectors and covariance matrices of the endmembers and the abundances associated with each image. Since the estimators are difficult to express in closed form, we propose to sample according to the posterior distribution of interest and use the generated samples to build estimators. The performance of the proposed Bayesian model and the corresponding estimation algorithm is evaluated by comparison with other unmixing algorithms on synthetic images.},\n  keywords = {covariance matrices;hyperspectral imaging;image processing;statistical distributions;unmixing multitemporal hyperspectral image;unsupervised Bayesian algorithm;image pixel;endmember temporal variability;endmember spatial variability;normal compositional model;smooth temporal variation;mean vector estimation;covariance matrices;posterior distribution;Signal processing algorithms;Bayes methods;Hyperspectral imaging;Europe;Signal processing;Covariance matrices;Indexes;Hyperspectral unmixing;spectral variability;temporal and spatial variability;Bayesian algorithm;Hamiltonian Monte-Carlo;MCMC methods},\n  doi = {10.1109/EUSIPCO.2015.7362665},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570099457.pdf},\n}\n\n
\n
\n\n\n
\n This paper proposes an unsupervised Bayesian algorithm for unmixing successive hyperspectral images while accounting for temporal and spatial variability of the endmembers. Each image pixel is modeled as a linear combination of the end-members weighted by their corresponding abundances. Spatial endmember variability is introduced by considering the normal compositional model that assumes variable endmembers for each image pixel. A prior enforcing a smooth temporal variation of both endmembers and abundances is considered. The proposed algorithm estimates the mean vectors and covariance matrices of the endmembers and the abundances associated with each image. Since the estimators are difficult to express in closed form, we propose to sample according to the posterior distribution of interest and use the generated samples to build estimators. The performance of the proposed Bayesian model and the corresponding estimation algorithm is evaluated by comparison with other unmixing algorithms on synthetic images.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Query by example search with segmented dynamic time warping for non-exact spoken queries.\n \n \n \n \n\n\n \n Proenga, J.; Veiga, A.; and Perdigao, F.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1661-1665, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"QueryPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362666,\n  author = {J. Proenga and A. Veiga and F. Perdigao},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Query by example search with segmented dynamic time warping for non-exact spoken queries},\n  year = {2015},\n  pages = {1661-1665},\n  abstract = {This paper presents an approach to the Query-by-Example task of finding spoken queries on speech databases when the intended match may be non-exact or slightly complex. The built system is low-resource as it tries to solve the problem where the language of queries and searched audio is unspecified. Our method is based on a modified Dynamic Time Warping (DTW) algorithm using posterior-grams and extracting intricate paths to account for special cases of query match such as word re-ordering, lexical variations and filler content. This system was evaluated on the MediaEval 2014 task of Query by Example Search on Speech (QUESST) where the spoken data is from different languages, unknown to the participant. We combined the results of five DTW modifications computed on the output of three phoneme recognizers of different languages. The combination of all systems provided the best performance overall and improved detection of complex case queries.},\n  keywords = {dynamic programming;feature extraction;query processing;speech recognition;query-by-example search;dynamic time warping;DTW algorithm;spoken query;speech database;posterior gram;path extraction;automatic speech recognition;ASR system;Speech;Databases;Europe;Signal processing;Acoustics;Search problems;Signal processing algorithms;Query-by-example;audio search;dynamic time warping;pattern matching},\n  doi = {10.1109/EUSIPCO.2015.7362666},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104947.pdf},\n}\n\n
\n
\n\n\n
\n This paper presents an approach to the Query-by-Example task of finding spoken queries on speech databases when the intended match may be non-exact or slightly complex. The built system is low-resource as it tries to solve the problem where the language of queries and searched audio is unspecified. Our method is based on a modified Dynamic Time Warping (DTW) algorithm using posterior-grams and extracting intricate paths to account for special cases of query match such as word re-ordering, lexical variations and filler content. This system was evaluated on the MediaEval 2014 task of Query by Example Search on Speech (QUESST) where the spoken data is from different languages, unknown to the participant. We combined the results of five DTW modifications computed on the output of three phoneme recognizers of different languages. The combination of all systems provided the best performance overall and improved detection of complex case queries.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Combining NDHMM and phonetic feature detection for speech recognition.\n \n \n \n \n\n\n \n Svendsen, T.; and Hamar, J. B.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1666-1670, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"CombiningPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362667,\n  author = {T. Svendsen and J. B. Hamar},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Combining NDHMM and phonetic feature detection for speech recognition},\n  year = {2015},\n  pages = {1666-1670},\n  abstract = {Non-negative HMM (N-HMM) [1] is a model that is well suited for modeling a mixture of e.g. audio signals, but does not have the ability to generalize to model unseen data. Non-negative durational HMM (NdHMM) has recently been proposed [2] as a modification to N-HMM that can allow for generalization, and thus make the approach suitable for automatic speech recognition. A detector-based approach to speech recognition has been studied by several researchers as an alternative to the traditional HMM approach. A bank of phonetic feature detectors will produce phonetic feature posteriors, which fit well with the non-negativity constraint of NdHMM. We review the NdHMM approach proposed in [2] and propose to extend this approach by combining NdHMM with a phonetic feature detection front-end in a tandem-like system. Experimental results of the proposed approach are presented.},\n  keywords = {hidden Markov models;matrix decomposition;speech recognition;NDHMM;phonetic feature detection;speech recognition;tandem-like system;nonnegative matrix factorisation;Hidden Markov models;Feature extraction;Detectors;Dictionaries;Speech;Speech recognition;Spectrogram;ASR;Non-negative durational HMM;Phone recognition;Phonetic feature detection},\n  doi = {10.1109/EUSIPCO.2015.7362667},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104671.pdf},\n}\n\n
\n
\n\n\n
\n Non-negative HMM (N-HMM) [1] is a model that is well suited for modeling a mixture of e.g. audio signals, but does not have the ability to generalize to model unseen data. Non-negative durational HMM (NdHMM) has recently been proposed [2] as a modification to N-HMM that can allow for generalization, and thus make the approach suitable for automatic speech recognition. A detector-based approach to speech recognition has been studied by several researchers as an alternative to the traditional HMM approach. A bank of phonetic feature detectors will produce phonetic feature posteriors, which fit well with the non-negativity constraint of NdHMM. We review the NdHMM approach proposed in [2] and propose to extend this approach by combining NdHMM with a phonetic feature detection front-end in a tandem-like system. Experimental results of the proposed approach are presented.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Word embeddings combination and neural networks for robustness in ASR error detection.\n \n \n \n \n\n\n \n Ghannay, S.; Estève, Y.; and Camelin, N.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1671-1675, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"WordPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362668,\n  author = {S. Ghannay and Y. Estève and N. Camelin},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Word embeddings combination and neural networks for robustness in ASR error detection},\n  year = {2015},\n  pages = {1671-1675},\n  abstract = {This study focuses on error detection in Automatic Speech Recognition (ASR) output. We propose to build a confidence classifier based on a neural network architecture, which is in charge to attribute a label (error or correct) for each word within an ASR hypothesis. This classifier uses word embed-dings as inputs, in addition to ASR confidence-based, lexical and syntactic features. We propose to evaluate the impact of three different kinds of word embeddings on this error detection approach, and we present a solution to combine these three different types of word embeddings in order to take advantage of their complementarity. In our experiments, different approaches are evaluated on the automatic transcriptions generated by two different ASR systems applied on the ETAPE corpus (French broadcast news). Experimental results show that the proposed neural architectures achieve a CER reduction comprised between 4% and 5.8% in error detection, depending on test dataset, in comparison with a state-of-the-art CRF approach.},\n  keywords = {error detection;neural nets;signal classification;speech recognition;word embeddings combination;neural networks;ASR error detection robustness;automatic speech recognition;confidence classifier;ASR confidence lexical feature;ASR confidence syntactic feature;automatic transcription;ETAPE corpus;French broadcast news;Feature extraction;Computer architecture;Syntactics;Europe;Signal processing;Biological neural networks;Automatic speech recognition;confidence measures;neuronal networks;word embeddings},\n  doi = {10.1109/EUSIPCO.2015.7362668},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105093.pdf},\n}\n\n
\n
\n\n\n
\n This study focuses on error detection in Automatic Speech Recognition (ASR) output. We propose to build a confidence classifier based on a neural network architecture, which is in charge to attribute a label (error or correct) for each word within an ASR hypothesis. This classifier uses word embed-dings as inputs, in addition to ASR confidence-based, lexical and syntactic features. We propose to evaluate the impact of three different kinds of word embeddings on this error detection approach, and we present a solution to combine these three different types of word embeddings in order to take advantage of their complementarity. In our experiments, different approaches are evaluated on the automatic transcriptions generated by two different ASR systems applied on the ETAPE corpus (French broadcast news). Experimental results show that the proposed neural architectures achieve a CER reduction comprised between 4% and 5.8% in error detection, depending on test dataset, in comparison with a state-of-the-art CRF approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Hybrid input spaces for exemplar-based noise robust speech recognition using coupled dictionaries.\n \n \n \n \n\n\n \n Baby, D.; and Van hamme, H.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1676-1680, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"HybridPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362669,\n  author = {D. Baby and H. {Van hamme}},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Hybrid input spaces for exemplar-based noise robust speech recognition using coupled dictionaries},\n  year = {2015},\n  pages = {1676-1680},\n  abstract = {Exemplar-based feature enhancement successfully exploits a wide temporal signal context. We extend this technique with hybrid input spaces that are chosen for a more effective separation of speech from background noise. This work investigates the use of two different hybrid input spaces which are formed by incorporating the full-resolution and modulation envelope spectral representations with the Mel features. A coupled output dictionary containing Mel exemplars, which are jointly extracted with the hybrid space exemplars, is used to reconstruct the enhanced Mel features for the ASR back-end. When compared to the system which uses Mel features only as input exemplars, these hybrid input spaces are found to yield improved word error rates on the AURORA-2 database especially with unseen noise cases.},\n  keywords = {speech recognition;hybrid input spaces;exemplar-based noise robust speech recognition;coupled dictionaries;exemplar-based feature enhancement;temporal signal context;background noise;modulation envelope spectral representations;Mel features;Mel exemplars;ASR back-end;AURORA-2 database;word error rates;Dictionaries;Discrete Fourier transforms;Speech;Noise measurement;Feature extraction;Training data;Modulation;coupled dictionaries;automatic speech recognition;modulation envelope;non-negative matrix factorization},\n  doi = {10.1109/EUSIPCO.2015.7362669},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570101195.pdf},\n}\n\n
\n
\n\n\n
\n Exemplar-based feature enhancement successfully exploits a wide temporal signal context. We extend this technique with hybrid input spaces that are chosen for a more effective separation of speech from background noise. This work investigates the use of two different hybrid input spaces which are formed by incorporating the full-resolution and modulation envelope spectral representations with the Mel features. A coupled output dictionary containing Mel exemplars, which are jointly extracted with the hybrid space exemplars, is used to reconstruct the enhanced Mel features for the ASR back-end. When compared to the system which uses Mel features only as input exemplars, these hybrid input spaces are found to yield improved word error rates on the AURORA-2 database especially with unseen noise cases.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Adaptive noise dictionary design for noise robust exemplar matching of speech.\n \n \n \n \n\n\n \n Yilmaz, E.; Van hamme, H.; and Gemmeke, J. F.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1681-1685, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AdaptivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362670,\n  author = {E. Yilmaz and H. {Van hamme} and J. F. Gemmeke},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Adaptive noise dictionary design for noise robust exemplar matching of speech},\n  year = {2015},\n  pages = {1681-1685},\n  abstract = {This paper investigates an adaptive noise dictionary design approach to achieve an effective and computationally feasible noise modeling for the noise robust exemplar matching (N-REM) framework. N-REM approximates noisy speech segments as a linear combination of multiple length exemplars in a sparse representation (SR) formulation. Compared to the previous SR techniques with a single overcomplete dictionary, N-REM uses smaller dictionaries containing considerably fewer noise exemplars. Hence, the noise exemplars have to be selected with care to accurately model the spectrotem-poral content of the actual noise conditions. For this purpose, in a previous work, we introduced a noise exemplar selection stage before performing recognition which extracts noise exemplars from a few noise-only training sequences chosen for each target noisy utterance. In this work, we explore the impact of the several design parameters on the recognition accuracy by evaluating the system performance on the CHIME-2 and AURORA-2 databases.},\n  keywords = {acoustic noise;speech recognition;adaptive noise dictionary design;feasible noise modeling;noise robust exemplar matching framework;multiple length exemplars linear combination;SR techniques;sparse representation formulation;noise-only training sequences;target noisy utterance;recognition;N-REM;noisy speech segments;Dictionaries;Speech;Training;Noise measurement;Hidden Markov models;Signal to noise ratio;Adaptation models;template matching;noise-robustness;automatic speech recognition;sparse representations;exemplar selection},\n  doi = {10.1109/EUSIPCO.2015.7362670},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102603.pdf},\n}\n\n
\n
\n\n\n
\n This paper investigates an adaptive noise dictionary design approach to achieve an effective and computationally feasible noise modeling for the noise robust exemplar matching (N-REM) framework. N-REM approximates noisy speech segments as a linear combination of multiple length exemplars in a sparse representation (SR) formulation. Compared to the previous SR techniques with a single overcomplete dictionary, N-REM uses smaller dictionaries containing considerably fewer noise exemplars. Hence, the noise exemplars have to be selected with care to accurately model the spectrotem-poral content of the actual noise conditions. For this purpose, in a previous work, we introduced a noise exemplar selection stage before performing recognition which extracts noise exemplars from a few noise-only training sequences chosen for each target noisy utterance. In this work, we explore the impact of the several design parameters on the recognition accuracy by evaluating the system performance on the CHIME-2 and AURORA-2 databases.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Online Bayesian group sparse parameter estimation using a generalized inverse Gaussian Markov chain.\n \n \n \n \n\n\n \n Themelis, K. E.; Rontogiannis, A. A.; and Koutroumbas, K. D.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1686-1690, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"OnlinePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362671,\n  author = {K. E. Themelis and A. A. Rontogiannis and K. D. Koutroumbas},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Online Bayesian group sparse parameter estimation using a generalized inverse Gaussian Markov chain},\n  year = {2015},\n  pages = {1686-1690},\n  abstract = {In this paper we develop a variational Bayes algorithm for the adaptive estimation of time-varying, group sparse signals. First, we propose a hierarchical Bayesian model that captures the sparsity structure of the signal. Sparsity is imposed by a multivariate Laplace distribution, which is known to be the Bayesian analogue of the adaptive lasso. Sparsity structure is then expressed via a novel generalized inverse Gaussian Markov chain, defined on the parameters of the Laplace distribution. The conjugacy of the model's prior distributions permits the development of an efficient online variational Bayes algorithm that performs inference on the model parameters. Experimental results verify that capturing sparsity structure leads to improvements on estimation performance.},\n  keywords = {adaptive estimation;Bayes methods;compressed sensing;Laplace equations;Markov processes;parameter estimation;multivariate Laplace distribution;hierarchical Bayesian model;adaptive estimation;variational Bayes algorithm;generalized inverse Gaussian Markov chain;online Bayesian group sparse parameter estimation;Bayes methods;Adaptation models;Markov processes;Signal processing algorithms;Approximation methods;Europe;Approximation algorithms;online inference;variational Bayes;Markov random field;generalized inverse Gaussian distribution;group sparsity},\n  doi = {10.1109/EUSIPCO.2015.7362671},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104123.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we develop a variational Bayes algorithm for the adaptive estimation of time-varying, group sparse signals. First, we propose a hierarchical Bayesian model that captures the sparsity structure of the signal. Sparsity is imposed by a multivariate Laplace distribution, which is known to be the Bayesian analogue of the adaptive lasso. Sparsity structure is then expressed via a novel generalized inverse Gaussian Markov chain, defined on the parameters of the Laplace distribution. The conjugacy of the model's prior distributions permits the development of an efficient online variational Bayes algorithm that performs inference on the model parameters. Experimental results verify that capturing sparsity structure leads to improvements on estimation performance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Group sparse LMS for multiple system identification.\n \n \n \n \n\n\n \n Yu, L.; Wei, C.; and Zheng, G.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1691-1695, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"GroupPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362672,\n  author = {L. Yu and C. Wei and G. Zheng},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Group sparse LMS for multiple system identification},\n  year = {2015},\n  pages = {1691-1695},\n  abstract = {Armed with structures, group sparsity can be exploited to extraordinarily improve the performance of adaptive estimation. In this paper, a group sparse regularized least-mean-square (LMS) algorithm is proposed to cope with the identification problems for multiple/multi-channel systems. In particular, the coefficients of impulse response function for each system are assumed to be sparse. Then, the dependencies between multiple systems are considered, where the coefficients of impulse responses of each system share the same pattern. An iterative online algorithm is proposed via proximal splitting method. At the end, simulations are carried out to verify the superiority of our proposed algorithm to the state-of-the-art algorithms.},\n  keywords = {adaptive estimation;iterative methods;least mean squares methods;signal processing;group sparse LMS;multiple system identification;adaptive estimation;least-mean-square algorithm;impulse response function;iterative online algorithm;proximal splitting method;Signal processing algorithms;Least squares approximations;Convergence;Steady-state;Standards;Correlation;Algorithm design and analysis;LMS;Multiple system identification;Group sparsity;Proximal splitting method},\n  doi = {10.1109/EUSIPCO.2015.7362672},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103383.pdf},\n}\n\n
\n
\n\n\n
\n Armed with structures, group sparsity can be exploited to extraordinarily improve the performance of adaptive estimation. In this paper, a group sparse regularized least-mean-square (LMS) algorithm is proposed to cope with the identification problems for multiple/multi-channel systems. In particular, the coefficients of impulse response function for each system are assumed to be sparse. Then, the dependencies between multiple systems are considered, where the coefficients of impulse responses of each system share the same pattern. An iterative online algorithm is proposed via proximal splitting method. At the end, simulations are carried out to verify the superiority of our proposed algorithm to the state-of-the-art algorithms.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Adaptive cyclic and randomized coordinate descent for the sparse total least squares problem.\n \n \n \n \n\n\n \n Onose, A.; and Dumitrescu, B.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1696-1700, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AdaptivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362673,\n  author = {A. Onose and B. Dumitrescu},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Adaptive cyclic and randomized coordinate descent for the sparse total least squares problem},\n  year = {2015},\n  pages = {1696-1700},\n  abstract = {Coordinate descent (CD) is a simple and general optimization technique. We use it to solve the sparse total least squares problem in an adaptive manner, working on the l1-regularized Rayleigh quotient function. We propose two algorithmic approaches for choosing the coordinates: cyclic and randomized. In both cases, the number of CD steps per time instant is a parameter that can serve as a trade-off between complexity and performance. We present numerical experiments showing that the proposed algorithms can approach stationary error near that of the oracle. The randomized algorithm is slightly better than the cyclic one with respect to convergence speed.},\n  keywords = {computational complexity;filtering theory;least squares approximations;optimisation;randomised algorithms;adaptive cyclic coordinate descent;randomized coordinate descent;sparse total least squares problem;optimization technique;randomized algorithm;sparse filter;l1-regularized Rayleigh quotient function;Signal processing algorithms;Complexity theory;Europe;Adaptive algorithms;Finite impulse response filters;Indexes;adaptive algorithm;channel identification;sparse filter;total least squares;coordinate descent;randomization},\n  doi = {10.1109/EUSIPCO.2015.7362673},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102963.pdf},\n}\n\n
\n
\n\n\n
\n Coordinate descent (CD) is a simple and general optimization technique. We use it to solve the sparse total least squares problem in an adaptive manner, working on the l1-regularized Rayleigh quotient function. We propose two algorithmic approaches for choosing the coordinates: cyclic and randomized. In both cases, the number of CD steps per time instant is a parameter that can serve as a trade-off between complexity and performance. We present numerical experiments showing that the proposed algorithms can approach stationary error near that of the oracle. The randomized algorithm is slightly better than the cyclic one with respect to convergence speed.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An iterative hard thresholding algorithm with improved convergence for low-rank tensor recovery.\n \n \n \n \n\n\n \n d. M. Goulart, J. H.; and Favier, G.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1701-1705, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362674,\n  author = {J. H. d. M. Goulart and G. Favier},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {An iterative hard thresholding algorithm with improved convergence for low-rank tensor recovery},\n  year = {2015},\n  pages = {1701-1705},\n  abstract = {Recovering low-rank tensors from undercomplete linear measurements is a computationally challenging problem of great practical importance. Most existing approaches circumvent the intractability of the tensor rank by considering instead the multilinear rank. Among them, the recently proposed tensor iterative hard thresholding (TIHT) algorithm is simple and has low cost per iteration, but converges quite slowly. In this work, we propose a new step size selection heuristic for accelerating its convergence, relying on a condition which (ideally) ensures monotonic decrease of its target cost function. This condition is obtained by studying TIHT from the standpoint of the majorization-minimization strategy which underlies the normalized IHT algorithm used for sparse vector recovery. Simulation results are presented for synthetic data tensor recovery and brain MRI data tensor completion, showing that the performance of TIHT is notably improved by our heuristic, with a small to moderate increase of the cost per iteration.},\n  keywords = {biomedical MRI;convergence of numerical methods;data analysis;iterative methods;tensors;iterative hard thresholding algorithm;convergence improvement;low-rank tensor recovery;undercomplete linear measurements;TIHT algorithm;step size selection heuristic;target cost function;majorization-minimization strategy;sparse vector recovery;MRI data tensor completion;Tensile stress;Convergence;Signal processing algorithms;Minimization;Signal processing;Cost function;Europe;Low-rank Tensor Recovery;Tensor Completion;Iterative Hard Thresholding},\n  doi = {10.1109/EUSIPCO.2015.7362674},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103027.pdf},\n}\n\n
\n
\n\n\n
\n Recovering low-rank tensors from undercomplete linear measurements is a computationally challenging problem of great practical importance. Most existing approaches circumvent the intractability of the tensor rank by considering instead the multilinear rank. Among them, the recently proposed tensor iterative hard thresholding (TIHT) algorithm is simple and has low cost per iteration, but converges quite slowly. In this work, we propose a new step size selection heuristic for accelerating its convergence, relying on a condition which (ideally) ensures monotonic decrease of its target cost function. This condition is obtained by studying TIHT from the standpoint of the majorization-minimization strategy which underlies the normalized IHT algorithm used for sparse vector recovery. Simulation results are presented for synthetic data tensor recovery and brain MRI data tensor completion, showing that the performance of TIHT is notably improved by our heuristic, with a small to moderate increase of the cost per iteration.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On quantized compressed sensing with saturated measurements via greedy pursuit.\n \n \n \n \n\n\n \n Elleuch, I.; Abdelkefi, F.; Siala, M.; Hamila, R.; and Al-Dhahir, N.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1706-1710, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362675,\n  author = {I. Elleuch and F. Abdelkefi and M. Siala and R. Hamila and N. Al-Dhahir},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {On quantized compressed sensing with saturated measurements via greedy pursuit},\n  year = {2015},\n  pages = {1706-1710},\n  abstract = {We consider the problem of signal recovery under a sparsity prior, from multi-bit quantized compressed measurements. Recently, it has been shown that allowing a small fraction of the quantized measurements to saturate, combined with a saturation consistency recovery approach, would enhance reconstruction performance. In this paper, by leveraging the potential sparsity of the corrupting saturation noise, we propose a model-based greedy pursuit approach, where a cancel-then-recover procedure is applied in each iteration to estimate the unbounded sign-constrained saturation noise and remove it from the measurements to enable a clean signal estimate. Simulation results show the performance improvements of our proposed method compared with state-of-the-art recovery approaches, in the noiseless and noisy settings.},\n  keywords = {compressed sensing;greedy algorithms;quantisation (signal);signal denoising;quantized compressed sensing;saturated measurements;model-based greedy pursuit approach;signal recovery problem;multibit quantized compressed measurements;saturation consistency recovery approach;reconstruction performance enhancement;potential corrupting saturation noise sparsity;unbounded sign-constrained saturation noise estimation;Pollution measurement;Quantization (signal);Noise measurement;Estimation;Europe;Compressed sensing;Multi-Bit Quantized Compressed Sensing;Saturation;Sparse Corruptions;Sign Constraint;Cancel-Then-Recover;Greedy Pursuit},\n  doi = {10.1109/EUSIPCO.2015.7362675},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104273.pdf},\n}\n\n
\n
\n\n\n
\n We consider the problem of signal recovery under a sparsity prior, from multi-bit quantized compressed measurements. Recently, it has been shown that allowing a small fraction of the quantized measurements to saturate, combined with a saturation consistency recovery approach, would enhance reconstruction performance. In this paper, by leveraging the potential sparsity of the corrupting saturation noise, we propose a model-based greedy pursuit approach, where a cancel-then-recover procedure is applied in each iteration to estimate the unbounded sign-constrained saturation noise and remove it from the measurements to enable a clean signal estimate. Simulation results show the performance improvements of our proposed method compared with state-of-the-art recovery approaches, in the noiseless and noisy settings.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sparse signal recovery using a Bernoulli generalized Gaussian prior.\n \n \n \n \n\n\n \n Chaari, L.; Toumeret, J.; and Chaux, C.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1711-1715, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"SparsePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362676,\n  author = {L. Chaari and J. Toumeret and C. Chaux},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Sparse signal recovery using a Bernoulli generalized Gaussian prior},\n  year = {2015},\n  pages = {1711-1715},\n  abstract = {Bayesian sparse signal recovery has been widely investigated during the last decade due to its ability to automatically estimate regularization parameters. Prior based on mixtures of Bernoulli and continuous distributions have recently been used in a number of recent works to model the target signals, often leading to complicated posteriors. Inference is therefore usually performed using Markov chain Monte Carlo algorithms. In this paper, a Bernoulli-generalized Gaussian distribution is used in a sparse Bayesian regularization framework to promote a two-level flexible sparsity. Since the resulting conditional posterior has anon-differentiable energy function, the inference is conducted using the recently proposed non-smooth Hamiltonian Monte Carlo algorithm. Promising results obtained with synthetic data show the efficiency of the proposed regularization scheme.},\n  keywords = {Gaussian processes;Markov processes;Monte Carlo methods;signal processing;sparse signal recovery;Bernoulli generalized Gaussian prior;Bayesian sparse signal recovery;regularization parameters;continuous distributions;Bernoulli distributions;target signals;Markov chain Monte Carlo algorithms;Bernoulli-generalized Gaussian distribution;Bayesian regularization framework;energy function;nonsmooth Hamiltonian Monte Carlo algorithm;regularization scheme;synthetic data;Bayes methods;Signal processing algorithms;Europe;Signal processing;Monte Carlo methods;Proposals;Markov processes;Sparse Bayesian regularization;MCMC;ns-HMC;restoration},\n  doi = {10.1109/EUSIPCO.2015.7362676},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103571.pdf},\n}\n\n
\n
\n\n\n
\n Bayesian sparse signal recovery has been widely investigated during the last decade due to its ability to automatically estimate regularization parameters. Prior based on mixtures of Bernoulli and continuous distributions have recently been used in a number of recent works to model the target signals, often leading to complicated posteriors. Inference is therefore usually performed using Markov chain Monte Carlo algorithms. In this paper, a Bernoulli-generalized Gaussian distribution is used in a sparse Bayesian regularization framework to promote a two-level flexible sparsity. Since the resulting conditional posterior has anon-differentiable energy function, the inference is conducted using the recently proposed non-smooth Hamiltonian Monte Carlo algorithm. Promising results obtained with synthetic data show the efficiency of the proposed regularization scheme.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Detection of time-varying support via rank evolution approach for effective joint sparse recovery.\n \n \n \n \n\n\n \n Lavrenko, A.; Römer, F.; Del Galdo, G.; Thomä, R.; and Arikan, O.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1716-1720, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"DetectionPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362677,\n  author = {A. Lavrenko and F. Römer and G. {Del Galdo} and R. Thomä and O. Arikan},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Detection of time-varying support via rank evolution approach for effective joint sparse recovery},\n  year = {2015},\n  pages = {1716-1720},\n  abstract = {Efficient recovery of sparse signals from few linear projections is a primary goal in a number of applications, most notably in a recently-emerged area of compressed sensing. The multiple measurement vector (MMV) joint sparse recovery is an extension of the single vector sparse recovery problem to the case when a set of consequent measurements share the same support. In this contribution we consider a modification of the MMV problem where the signal support can change from one block of data to another and the moment of change is not known in advance. We propose an approach for the support change detection based on the sequential rank estimation of a windowed block of the measurement data. We show that under certain conditions it allows for an unambiguous determination of the moment of change, provided that the consequent data vectors are incoherent to each other.},\n  keywords = {compressed sensing;time-varying support;rank evolution approach;joint sparse recovery;sparse signals recovery;multiple measurement vector;vector sparse recovery problem;MMV problem;support change detection;data vectors;Indexes;Yttrium;Europe;Signal processing;Data models;Sensors;Support vector machines;sparse recovery;multiple measurement vector;time-varying support;stationarity window},\n  doi = {10.1109/EUSIPCO.2015.7362677},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096169.pdf},\n}\n\n
\n
\n\n\n
\n Efficient recovery of sparse signals from few linear projections is a primary goal in a number of applications, most notably in a recently-emerged area of compressed sensing. The multiple measurement vector (MMV) joint sparse recovery is an extension of the single vector sparse recovery problem to the case when a set of consequent measurements share the same support. In this contribution we consider a modification of the MMV problem where the signal support can change from one block of data to another and the moment of change is not known in advance. We propose an approach for the support change detection based on the sequential rank estimation of a windowed block of the measurement data. We show that under certain conditions it allows for an unambiguous determination of the moment of change, provided that the consequent data vectors are incoherent to each other.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Spectral estimation in highly transient data.\n \n \n \n \n\n\n \n Emrani, S.; and Krim, H.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1721-1725, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"SpectralPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362678,\n  author = {S. Emrani and H. Krim},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Spectral estimation in highly transient data},\n  year = {2015},\n  pages = {1721-1725},\n  abstract = {We propose a new framework for estimating different frequencies in piece-wise periodic signals with time varying amplitude and phase. Through a 3-dimensional delay embedding of the introduced model, we construct a union of intersecting planes where each plane corresponds to one frequency. The equations of each of these planes only depend on the associated frequency, and are used to calculate the tone in each segment. A sparse subspace clustering technique is utilized to find the segmentation of the data, and the points in each cluster are used to compute the normal vectors. In the presence of white Gaussian noise, principal component analysis is used to robustly perform this computation. Experimental results demonstrate the effectiveness of the proposed framework.},\n  keywords = {compressed sensing;estimation theory;Gaussian noise;principal component analysis;synchronisation;vectors;white noise;spectral estimation;highly transient data;piecewise periodic signals;3D delay embedding;intersecting planes;sparse subspace clustering technique;normal vectors;white Gaussian noise;principal component analysis;Delays;Frequency estimation;Mathematical model;Yttrium;Time-frequency analysis;Delay effects;Sparse matrices;Spectral estimation;transient data;delay embedding;sparse subspace clustering},\n  doi = {10.1109/EUSIPCO.2015.7362678},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103745.pdf},\n}\n\n
\n
\n\n\n
\n We propose a new framework for estimating different frequencies in piece-wise periodic signals with time varying amplitude and phase. Through a 3-dimensional delay embedding of the introduced model, we construct a union of intersecting planes where each plane corresponds to one frequency. The equations of each of these planes only depend on the associated frequency, and are used to calculate the tone in each segment. A sparse subspace clustering technique is utilized to find the segmentation of the data, and the points in each cluster are used to compute the normal vectors. In the presence of white Gaussian noise, principal component analysis is used to robustly perform this computation. Experimental results demonstrate the effectiveness of the proposed framework.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A novel line search method for nonsmooth optimization problems.\n \n \n \n \n\n\n \n Yang, Y.; and Pesavento, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1726-1730, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362679,\n  author = {Y. Yang and M. Pesavento},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A novel line search method for nonsmooth optimization problems},\n  year = {2015},\n  pages = {1726-1730},\n  abstract = {In this paper, we propose a novel exact/successive line search method for stepsize calculation in iterative algorithms for nonsmooth optimization problems. The proposed approach is to perform line search over a properly constructed differ-entiable function based on the original nonsmooth objective function, and it outperforms state-of-the-art techniques from the perspective of convergence speed, computational complexity and signaling burden. When applied to LASSO, the proposed exact line search is shown, either analytically or numerically, to exhibit several desirable advantages, namely: it is implementable in closed-form, converges fast and is robust with respect to the choice of problem parameters.},\n  keywords = {computational complexity;iterative methods;optimisation;signal processing;nonsmooth optimization problems;exact-successive line search method;stepsize calculation;iterative algorithms;differ-entiable function;original nonsmooth objective function;convergence speed;computational complexity;signaling burden;LASSO;Convergence;Search problems;Linear programming;Approximation methods;Europe;Iterative methods;Descent Direction Method;Distributed and Parallel Algorithms;LASSO;Line Search;Nondifferentiable Optimization;Successive Convex Approximation},\n  doi = {10.1109/EUSIPCO.2015.7362679},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096865.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a novel exact/successive line search method for stepsize calculation in iterative algorithms for nonsmooth optimization problems. The proposed approach is to perform line search over a properly constructed differ-entiable function based on the original nonsmooth objective function, and it outperforms state-of-the-art techniques from the perspective of convergence speed, computational complexity and signaling burden. When applied to LASSO, the proposed exact line search is shown, either analytically or numerically, to exhibit several desirable advantages, namely: it is implementable in closed-form, converges fast and is robust with respect to the choice of problem parameters.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Compressed sensing for unit selection based speech synthesis.\n \n \n \n \n\n\n \n Sharma, P.; Abrol, V.; and Sao, A. K.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1731-1735, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"CompressedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362680,\n  author = {P. Sharma and V. Abrol and A. K. Sao},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Compressed sensing for unit selection based speech synthesis},\n  year = {2015},\n  pages = {1731-1735},\n  abstract = {This paper proposes an approach based on compressed sensing to reduce the footprint of speech corpus in unit selection based speech synthesis (USS) systems. It exploits the observation that speech signal can have a sparse representation (in suitable choice of basis functions) and can be estimated effectively using the sparse coding framework. Thus, only few significant coefficients of the sparse vector needed to be stored instead of entire speech signal. During synthesis, speech signal can be reconstructed (with less error) using these significant coefficients only. Furthermore, the number of significant coefficients can be chosen adaptively based on type of segment such as voiced or unvoiced. Simulation results suggest that the proposed compression method effectively preserves most of the spectral information and can be used as an alternative to existing compression methods used in USS systems.},\n  keywords = {compressed sensing;speech synthesis;compressed sensing;unit selection based speech synthesis;speech signal;spectral information;USS systems;sparse coding framework;sparse vector;Speech;Speech coding;Speech processing;Indexes;Dictionaries;Sparse matrices;Spectrogram;Compressed sensing;sparse representation;speech synthesis},\n  doi = {10.1109/EUSIPCO.2015.7362680},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570100743.pdf},\n}\n\n
\n
\n\n\n
\n This paper proposes an approach based on compressed sensing to reduce the footprint of speech corpus in unit selection based speech synthesis (USS) systems. It exploits the observation that speech signal can have a sparse representation (in suitable choice of basis functions) and can be estimated effectively using the sparse coding framework. Thus, only few significant coefficients of the sparse vector needed to be stored instead of entire speech signal. During synthesis, speech signal can be reconstructed (with less error) using these significant coefficients only. Furthermore, the number of significant coefficients can be chosen adaptively based on type of segment such as voiced or unvoiced. Simulation results suggest that the proposed compression method effectively preserves most of the spectral information and can be used as an alternative to existing compression methods used in USS systems.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An unified approach for blind source separation using sparsity and decorrelation.\n \n \n \n \n\n\n \n Feng, F.; and Kowalski, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1736-1740, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362681,\n  author = {F. Feng and M. Kowalski},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {An unified approach for blind source separation using sparsity and decorrelation},\n  year = {2015},\n  pages = {1736-1740},\n  abstract = {Independent component analysis (ICA) has been a major tool for blind source separation (BSS). Both theoretical and practical evaluations showed that the hypothesis of independence suits well for audio signals. In the last few years, optimization approach based on sparsity has emerged as another efficient implement for BSS. This paper starts from introducing some new BSS methods that take advantages of both decorrelation (which is a direct consequence of independence) and sparsity using overcomplete Gabor representation. It is shown that the proposed methods work in both under-determined and over-determined cases. Experimental results illustrate the good performances of these approaches for audio mixtures.},\n  keywords = {audio signals;blind source separation;decorrelation;Gabor filters;independent component analysis;optimisation;blind source separation;unified approach;sparsity;decorrelation;independent component analysis;ICA;audio signals;optimization approach;direct independence consequence;overcomplete Gabor representation;audio mixtures;Decorrelation;Signal processing algorithms;Convergence;Optimization;Signal to noise ratio;Europe;Blind Source Separation;Sparsity;Independant Component Analysis;Optimization},\n  doi = {10.1109/EUSIPCO.2015.7362681},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103191.pdf},\n}\n\n
\n
\n\n\n
\n Independent component analysis (ICA) has been a major tool for blind source separation (BSS). Both theoretical and practical evaluations showed that the hypothesis of independence suits well for audio signals. In the last few years, optimization approach based on sparsity has emerged as another efficient implement for BSS. This paper starts from introducing some new BSS methods that take advantages of both decorrelation (which is a direct consequence of independence) and sparsity using overcomplete Gabor representation. It is shown that the proposed methods work in both under-determined and over-determined cases. Experimental results illustrate the good performances of these approaches for audio mixtures.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Inverse problems with time-frequency dictionaries and non-white Gaussian noise.\n \n \n \n \n\n\n \n Kowalski, M.; and Gramfort, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1741-1745, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"InversePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362682,\n  author = {M. Kowalski and A. Gramfort},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Inverse problems with time-frequency dictionaries and non-white Gaussian noise},\n  year = {2015},\n  pages = {1741-1745},\n  abstract = {Sparse regressions to solve ill-posed inverse problems have been massively investigated over the last decade. Yet, when noise is present in the model, it is almost exclusively considered as Gaussian and white. While this assumption can hold in practice, it rarely holds when observations are time series as they are corrupted by auto-correlated and colored noise. In this work we study sparse regression under the assumption of non white Gaussian noise and explain how to run the inference using proximal gradient methods. We investigate an application in brain imaging: the problem of source localization using magneto- and electroencephalography which allow functional brain imaging with high temporal resolution. We use a time-frequency representation of the source waveforms and a sparse regularization which promotes focal sources with smooth and transient activations. Our approach is evaluated using simulations comparing it to strategies that assume the noise is white or to simple prewhitening.},\n  keywords = {electroencephalography;gradient methods;inverse problems;magnetoencephalography;medical image processing;regression analysis;time-frequency analysis;sparse regularization;functional brain imaging;source localization;electroencephalography;proximal gradient method;ill-posed inverse problems;sparse regressions;nonwhite Gaussian noise;time-frequency dictionaries;Time-frequency analysis;Gaussian noise;Inverse problems;Brain modeling;Imaging;Electroencephalography;Approximation methods;Deconvolution;Inverse Problem;Time-Frequency Whitening;Non white Gaussian Noise;Sparse recovery;Denoising},\n  doi = {10.1109/EUSIPCO.2015.7362682},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103195.pdf},\n}\n\n
\n
\n\n\n
\n Sparse regressions to solve ill-posed inverse problems have been massively investigated over the last decade. Yet, when noise is present in the model, it is almost exclusively considered as Gaussian and white. While this assumption can hold in practice, it rarely holds when observations are time series as they are corrupted by auto-correlated and colored noise. In this work we study sparse regression under the assumption of non white Gaussian noise and explain how to run the inference using proximal gradient methods. We investigate an application in brain imaging: the problem of source localization using magneto- and electroencephalography which allow functional brain imaging with high temporal resolution. We use a time-frequency representation of the source waveforms and a sparse regularization which promotes focal sources with smooth and transient activations. Our approach is evaluated using simulations comparing it to strategies that assume the noise is white or to simple prewhitening.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Improved AMP (IAMP) for non-ideal measurement matrices.\n \n \n \n \n\n\n \n Lu, Y.; and Dai, W.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1746-1750, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ImprovedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362683,\n  author = {Y. Lu and W. Dai},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Improved AMP (IAMP) for non-ideal measurement matrices},\n  year = {2015},\n  pages = {1746-1750},\n  abstract = {This paper studies the sparse recovery problem. Of particular interest is the well known approximate message passing (AMP) algorithm. AMP enjoys low computational complexity and good performance guarantees. However, the algorithm and performance analysis heavily rely on the assumption that the measurement matrix is a standard Gaussian random matrix. The main contribution of this paper is an improved AMP (IAMP) algorithm that works better for non-ideal measurement matrices. The algorithm is equivalent to AMP for standard Gaussian random matrices but provides better recovery when the correlations between columns of the measurement matrix deviate from those of the standard Gaussian random matrices. The derivation is based on a modification of the message passing mechanism that removes the conditional independence assumption. Examples are provided to demonstrate the performance improvement of IAMP where both a particularly designed matrix and a matrix from real applications are used.},\n  keywords = {compressed sensing;message passing;improved AMP;IAMP;nonideal measurement matrices;sparse recovery problem;approximate message passing algorithm;computational complexity;standard Gaussian random matrix;standard Gaussian random matrices;measurement matrix;standard Gaussian random matrices;Sparse matrices;Standards;Message passing;Approximation algorithms;Signal processing algorithms;Complexity theory;Approximation methods;AMP;compressed sensing;message passing;sparse signal processing;standard Gaussian random matrix},\n  doi = {10.1109/EUSIPCO.2015.7362683},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104631.pdf},\n}\n\n
\n
\n\n\n
\n This paper studies the sparse recovery problem. Of particular interest is the well known approximate message passing (AMP) algorithm. AMP enjoys low computational complexity and good performance guarantees. However, the algorithm and performance analysis heavily rely on the assumption that the measurement matrix is a standard Gaussian random matrix. The main contribution of this paper is an improved AMP (IAMP) algorithm that works better for non-ideal measurement matrices. The algorithm is equivalent to AMP for standard Gaussian random matrices but provides better recovery when the correlations between columns of the measurement matrix deviate from those of the standard Gaussian random matrices. The derivation is based on a modification of the message passing mechanism that removes the conditional independence assumption. Examples are provided to demonstrate the performance improvement of IAMP where both a particularly designed matrix and a matrix from real applications are used.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Compressive imaging with complex wavelet transform and turbo AMP reconstruction.\n \n \n \n \n\n\n \n Guo, C.; and Nelson, J. D. B.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1751-1755, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"CompressivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362684,\n  author = {C. Guo and J. D. B. Nelson},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Compressive imaging with complex wavelet transform and turbo AMP reconstruction},\n  year = {2015},\n  pages = {1751-1755},\n  abstract = {We extend the {"}turbo{"} belief propagation framework for compressive imaging to the dual-tree complex wavelet transform (DT-CWT) to exploit both sparsity and dependency across scales. Due to the near shift-invariance property and the improved angular resolution of DT-CWT, better reconstruction can be expected when incorporating with the compressed sensing (CS) algorithms. Two types priors to form the hidden Markov tree structure for the DT-CWT coefficients are con sidered. One models the real and imaginary components of DT-CWT separately while the other assumes the shared hid den states between the two. Simulations with natural images confirm an improved performance when iterating between the CS reconstruction and the DT-CWT HMT.},\n  keywords = {compressed sensing;hidden Markov models;image reconstruction;wavelet transforms;compressive imaging;turbo AMP reconstruction;belief propagation framework;dual-tree complex wavelet transform;shift-invariance property;compressed sensing algorithms;Markov tree structure;DT-CWT coefficients;natural images;Image reconstruction;Discrete wavelet transforms;Hidden Markov models;Belief propagation;Signal processing algorithms;compressed sensing;dual-tree complex wavelet transform;HMT;turbo decoding;approximate mes sage passing},\n  doi = {10.1109/EUSIPCO.2015.7362684},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570100385.pdf},\n}\n\n
\n
\n\n\n
\n We extend the \"turbo\" belief propagation framework for compressive imaging to the dual-tree complex wavelet transform (DT-CWT) to exploit both sparsity and dependency across scales. Due to the near shift-invariance property and the improved angular resolution of DT-CWT, better reconstruction can be expected when incorporating with the compressed sensing (CS) algorithms. Two types priors to form the hidden Markov tree structure for the DT-CWT coefficients are con sidered. One models the real and imaginary components of DT-CWT separately while the other assumes the shared hid den states between the two. Simulations with natural images confirm an improved performance when iterating between the CS reconstruction and the DT-CWT HMT.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Joint inverse covariances estimation with mutual linear structure.\n \n \n \n \n\n\n \n Soloveychik, I.; and Wiesel, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1756-1760, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"JointPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362685,\n  author = {I. Soloveychik and A. Wiesel},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Joint inverse covariances estimation with mutual linear structure},\n  year = {2015},\n  pages = {1756-1760},\n  abstract = {We consider the problem of joint estimation of structured inverse covariance matrices. We assume the structure is unknown and perform the estimation using groups of measurements coming from populations with different covariances. Given that the inverse covariances span a low dimensional affine subspace in the space of symmetric matrices, our aim is to determine this structure. It is then utilized to improve the estimation of the inverse covariances. We propose a novel optimization algorithm discovering and exploring the underlying structure and provide its efficient implementation. Numerical simulations are presented to illustrate the performance benefits of the proposed algorithm.},\n  keywords = {covariance matrices;numerical analysis;optimisation;joint inverse covariances estimation;mutual linear structure;joint estimation problem;structured inverse covariance matrices;symmetric matrices;numerical simulations;Estimation;Covariance matrices;Symmetric matrices;Yttrium;Sparse matrices;Signal processing;Signal processing algorithms;Structured inverse covariance estimation;joint inverse covariance estimation;graphical models},\n  doi = {10.1109/EUSIPCO.2015.7362685},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104263.pdf},\n}\n\n
\n
\n\n\n
\n We consider the problem of joint estimation of structured inverse covariance matrices. We assume the structure is unknown and perform the estimation using groups of measurements coming from populations with different covariances. Given that the inverse covariances span a low dimensional affine subspace in the space of symmetric matrices, our aim is to determine this structure. It is then utilized to improve the estimation of the inverse covariances. We propose a novel optimization algorithm discovering and exploring the underlying structure and provide its efficient implementation. Numerical simulations are presented to illustrate the performance benefits of the proposed algorithm.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Moving target localization using distributed dual-frequency radars and sparse reconstruction.\n \n \n \n \n\n\n \n Kadry, K. A.; Ahmad, F.; and Amin, M. G.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1761-1765, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"MovingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362686,\n  author = {K. A. Kadry and F. Ahmad and M. G. Amin},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Moving target localization using distributed dual-frequency radars and sparse reconstruction},\n  year = {2015},\n  pages = {1761-1765},\n  abstract = {In this paper, we present a sparsity-based approach for target location and velocity estimation using a network of distributed dual-frequency radar units. A single dual-frequency radar can only estimate the range and radial velocity component of the moving target. The distributed configuration permits not only target localization in cross-range and downrange, but also provides Doppler velocity diversity, which enables the estimation of the horizontal and vertical target velocity components. We develop a linear signal model for the distributed radar network configuration under dual-frequency operation, and perform joint optimization for simultaneously recovering the target location and motion parameters. Supporting simulation results are provided, which validate the effectiveness of the proposed method.},\n  keywords = {Doppler radar;radar signal processing;signal reconstruction;distributed radar network configuration;linear signal model;Doppler velocity diversity;sparse reconstruction;distributed dual-frequency radars;moving target localization;Doppler radar;Radar imaging;Estimation;Doppler effect;Radar signal processing;Image reconstruction;Dual-frequency;moving target;localization;sparse reconstruction},\n  doi = {10.1109/EUSIPCO.2015.7362686},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104973.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we present a sparsity-based approach for target location and velocity estimation using a network of distributed dual-frequency radar units. A single dual-frequency radar can only estimate the range and radial velocity component of the moving target. The distributed configuration permits not only target localization in cross-range and downrange, but also provides Doppler velocity diversity, which enables the estimation of the horizontal and vertical target velocity components. We develop a linear signal model for the distributed radar network configuration under dual-frequency operation, and perform joint optimization for simultaneously recovering the target location and motion parameters. Supporting simulation results are provided, which validate the effectiveness of the proposed method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Maximum likelihood and robust G-music performance in K-distributed noise.\n \n \n \n \n\n\n \n Abramovich, Y. I.; Johnson, B. A.; and Besson, O.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1766-1770, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"MaximumPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362687,\n  author = {Y. I. Abramovich and B. A. Johnson and O. Besson},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Maximum likelihood and robust G-music performance in K-distributed noise},\n  year = {2015},\n  pages = {1766-1770},\n  abstract = {For an antenna array input mixture of m point source signals in K-distributed noise, we compare DOA estimation delivered by Maximum Likelihood and the recently introduced Robust G-MUSIC (RG-MUSIC) technique. We demonstrate that similar to the Gaussian case, MLE is still superior to RG-MUSIC, especially within the so-called threshold region. This makes it possible to use the Expected Likelihood (EL) methodology to detect the presence of RG-MUSIC performance breakdown and {"}cure{"} those cases via an approach previously developed for the complex Gaussian circumstance.},\n  keywords = {antenna arrays;direction-of-arrival estimation;maximum likelihood estimation;signal classification;maximum likelihood;robust G-MUSIC performance;K-distributed noise;antenna array input mixture;DOA estimation;RG-MUSIC technique;MLE;threshold region;expected likelihood methodology;EL methodology;complex Gaussian circumstance;Maximum likelihood estimation;Direction-of-arrival estimation;Covariance matrices;Multiple signal classification;Arrays;Signal to noise ratio;Maximum Likelihood Estimation (MLE);G-MUSIC;Robust G-MUSIC;Expected Likelihood (EL)},\n  doi = {10.1109/EUSIPCO.2015.7362687},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103775.pdf},\n}\n\n
\n
\n\n\n
\n For an antenna array input mixture of m point source signals in K-distributed noise, we compare DOA estimation delivered by Maximum Likelihood and the recently introduced Robust G-MUSIC (RG-MUSIC) technique. We demonstrate that similar to the Gaussian case, MLE is still superior to RG-MUSIC, especially within the so-called threshold region. This makes it possible to use the Expected Likelihood (EL) methodology to detect the presence of RG-MUSIC performance breakdown and \"cure\" those cases via an approach previously developed for the complex Gaussian circumstance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An efficient two-step procedure for compressed sensing 3D MIMO radar.\n \n \n \n \n\n\n \n Pinto, R.; and Merched, R.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1771-1775, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362688,\n  author = {R. Pinto and R. Merched},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {An efficient two-step procedure for compressed sensing 3D MIMO radar},\n  year = {2015},\n  pages = {1771-1775},\n  abstract = {In MIMO Radar schemes, sparse scenarios have been successfully exploited by compressed sensing (CS) techniques. We address the ill-conditioning inherent to the linear model of a 3D Radar imaging system, by proposing a two-step decoupling procedure which induces structure, and allows for fast matrix products to efficiently recover the target image. This is accomplished by further combining it with an Approximate Message Passing algorithm, that yields two iterative versions for range and cross-range image recovery. Simulations suggest that besides computational efficiency, decoupling the full model matrix gives us more freedom in selecting the CS regularization levels. An FDTD based experiment also shows that the algorithms are robust in real life situations where nonideal antennas and multiple scattering naturally occur.},\n  keywords = {compressed sensing;MIMO radar;radar signal processing;computational efficiency;approximate message passing algorithm;two-step decoupling procedure;compressed sensing 3D MIMO radar;Sparse matrices;Compressed sensing;Manifolds;MIMO radar;Approximation algorithms;Coherence;Robustness;MIMO Radar;Compressed Sensing},\n  doi = {10.1109/EUSIPCO.2015.7362688},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570091735.pdf},\n}\n\n
\n
\n\n\n
\n In MIMO Radar schemes, sparse scenarios have been successfully exploited by compressed sensing (CS) techniques. We address the ill-conditioning inherent to the linear model of a 3D Radar imaging system, by proposing a two-step decoupling procedure which induces structure, and allows for fast matrix products to efficiently recover the target image. This is accomplished by further combining it with an Approximate Message Passing algorithm, that yields two iterative versions for range and cross-range image recovery. Simulations suggest that besides computational efficiency, decoupling the full model matrix gives us more freedom in selecting the CS regularization levels. An FDTD based experiment also shows that the algorithms are robust in real life situations where nonideal antennas and multiple scattering naturally occur.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Matching pursuit via continuous resolution cell rejection in presence of unresolved radar targets.\n \n \n \n \n\n\n \n Bosse, J.; Rabaste, O.; and Poullin, D.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1776-1780, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"MatchingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362689,\n  author = {J. Bosse and O. Rabaste and D. Poullin},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Matching pursuit via continuous resolution cell rejection in presence of unresolved radar targets},\n  year = {2015},\n  pages = {1776-1780},\n  abstract = {In this article a new matching pursuit algorithm with continuous radar resolution cell rejection is proposed. It allows matching pursuit to work well even if more than one target is present in some resolution cell (unresolved targets) of the radar matched filter: it prevents its tendency to generate spurious sidelobes or miss a weaker target hidden in stronger target sidelobes. The FMCW radar case is particularly investigated which offers a very natural and computationally inexpensive solution to the problem that can also be applied in spectral analysis. The extension of the proposed approach to any radar waveform is also investigated.},\n  keywords = {CW radar;FM radar;iterative methods;matched filters;radar resolution;radar theory;spectral analysis;time-frequency analysis;continuous radar resolution cell rejection;unresolved radar target;matching pursuit algorithm;radar matched filter;FMCW radar;spectral analysis;radar waveform;frequency modulated continuous waveform;Radar;Matching pursuit algorithms;Signal resolution;Signal processing algorithms;Radar signal processing;Manganese;Eigenvalues and eigenfunctions;Matching Pursuit;Radar Signal Processing;Discrete Prolate Spheroidal Sequences;Spectral Estimation;Unresolved targets},\n  doi = {10.1109/EUSIPCO.2015.7362689},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096639.pdf},\n}\n\n
\n
\n\n\n
\n In this article a new matching pursuit algorithm with continuous radar resolution cell rejection is proposed. It allows matching pursuit to work well even if more than one target is present in some resolution cell (unresolved targets) of the radar matched filter: it prevents its tendency to generate spurious sidelobes or miss a weaker target hidden in stronger target sidelobes. The FMCW radar case is particularly investigated which offers a very natural and computationally inexpensive solution to the problem that can also be applied in spectral analysis. The extension of the proposed approach to any radar waveform is also investigated.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The SAS project: Speech signal processing in high school education.\n \n \n \n \n\n\n \n Sharma, D.; Poddar, A.; Manna, S.; and Naylor, P. A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1781-1785, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362690,\n  author = {D. Sharma and A. Poddar and S. Manna and P. A. Naylor},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {The SAS project: Speech signal processing in high school education},\n  year = {2015},\n  pages = {1781-1785},\n  abstract = {We describe the Speech And Sound (SAS) outreach project with the aim of introducing high school students to speech signal processing through the real-life example of automatic speech recognition. The syllabus was designed to help students understand how the concepts they learn as part of the physics, mathematics and computing courses relate to reallife applications. The six week project was organized into a mixture of informal lecture and practical sessions and the students were encouraged to engage in informal discussions with the instructors with any questions and ideas. The project was piloted at an international high school in India with 10th, 11th and 12th grade students. By the end of the course, the students had gained a high level understanding of the many technologies that make up such a complex system, as evident by the high overall scores in the final assessment.},\n  keywords = {educational courses;further education;speech recognition;India;international high school;informal discussions;practical sessions;informal lecture;real-life applications;physics courses;mathematics courses;computing courses;automatic speech recognition;speech signal processing;high school students;SAS outreach project;speech and sound outreach project;high school education;Synthetic aperture sonar;Speech;Speech processing;Education;Physics;Mathematics;Engineering;Outreach;ASR;DSP;STEM},\n  doi = {10.1109/EUSIPCO.2015.7362690},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096493.pdf},\n}\n\n
\n
\n\n\n
\n We describe the Speech And Sound (SAS) outreach project with the aim of introducing high school students to speech signal processing through the real-life example of automatic speech recognition. The syllabus was designed to help students understand how the concepts they learn as part of the physics, mathematics and computing courses relate to reallife applications. The six week project was organized into a mixture of informal lecture and practical sessions and the students were encouraged to engage in informal discussions with the instructors with any questions and ideas. The project was piloted at an international high school in India with 10th, 11th and 12th grade students. By the end of the course, the students had gained a high level understanding of the many technologies that make up such a complex system, as evident by the high overall scores in the final assessment.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n What can industrial partnerships bring in to small-group projects to teach signal and image processing?.\n \n \n \n\n\n \n Falleri, J.; Grivel, E.; and Reveillere, L.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1786-1790, Aug 2015. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362691,\n  author = {J. Falleri and E. Grivel and L. Reveillere},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {What can industrial partnerships bring in to small-group projects to teach signal and image processing?},\n  year = {2015},\n  pages = {1786-1790},\n  abstract = {This paper deals with our positive experience about project-based pedagogy with the help of industrial partners to teach signal and image processing. During one semester, students are working in small groups of 6 to 8 students, supervised by two teachers or engineers working in a small or a big com pany. Various topics are proposed each year such as radar pro cessing, mobile communication system or image processing. The role played by the industrial partners is crucial: they give seminars about program management, they evaluate the tech nical quality of the projects and the clarity of the oral presen tation. An award ceremony is also organized at school during which the activities of the companies are presented. There are also some discussions about the activities of a young engineer and several awards in various categories are given. A cocktail party ends up the day. Anonymous online surveys completed by our students as well as discussions with our partners con firm the relevance of these projects.},\n  keywords = {educational institutions;image processing;telecommunication engineering education;school;technical quality evaluation;program management;mobile communication system;radar processing;industrial partner;image processing;signal processing;Signal processing;Companies;Image processing;Europe;Education;Project management;Computer science;Signal and image processing;education;active pedagogy;industrial partnership;projects},\n  doi = {10.1109/EUSIPCO.2015.7362691},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n This paper deals with our positive experience about project-based pedagogy with the help of industrial partners to teach signal and image processing. During one semester, students are working in small groups of 6 to 8 students, supervised by two teachers or engineers working in a small or a big com pany. Various topics are proposed each year such as radar pro cessing, mobile communication system or image processing. The role played by the industrial partners is crucial: they give seminars about program management, they evaluate the tech nical quality of the projects and the clarity of the oral presen tation. An award ceremony is also organized at school during which the activities of the companies are presented. There are also some discussions about the activities of a young engineer and several awards in various categories are given. A cocktail party ends up the day. Anonymous online surveys completed by our students as well as discussions with our partners con firm the relevance of these projects.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fire-control radar model laboratory work.\n \n \n \n \n\n\n \n Vincent, F.; Berranger, M.; Beaugendre, G.; and Chaumette, E.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1791-1794, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Fire-controlPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362692,\n  author = {F. Vincent and M. Berranger and G. Beaugendre and E. Chaumette},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Fire-control radar model laboratory work},\n  year = {2015},\n  pages = {1791-1794},\n  abstract = {Electrical engineering teaching is not an easy task because of the broad spectrum of knowledge to call for (electromagnetic, electronic, control, signal processing), each one having its specific formalism. To connect these different courses through a real-life application, we have decided to design a fire-control model based on a low-cost sonar system. This experiment has been designed for graduated students and is exploited in laboratory projects. Besides the playful aspects brought by the model, the project allows to face-off a real system and requires strong initiative from the students to success.},\n  keywords = {electrical engineering education;laboratory techniques;sonar;student experiments;fire-control radar model;laboratory work;electrical engineering teaching;low-cost sonar system;graduated students;laboratory projects;Mathematical model;Spaceborne radar;Sonar;Distance measurement;Radar signal processing;Laboratory;experiment;radar;sonar;ultrasound;fire-control;Matlab;electrical engineering},\n  doi = {10.1109/EUSIPCO.2015.7362692},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102519.pdf},\n}\n\n
\n
\n\n\n
\n Electrical engineering teaching is not an easy task because of the broad spectrum of knowledge to call for (electromagnetic, electronic, control, signal processing), each one having its specific formalism. To connect these different courses through a real-life application, we have decided to design a fire-control model based on a low-cost sonar system. This experiment has been designed for graduated students and is exploited in laboratory projects. Besides the playful aspects brought by the model, the project allows to face-off a real system and requires strong initiative from the students to success.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Gamma curve determination for each individual pixel on high-resolution flat panel display.\n \n \n \n \n\n\n \n Kim, M.; Lee, W. H.; Kim, S. Y.; Kim, H. J.; Moon, H.; and Beom Ra, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1795-1799, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"GammaPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362693,\n  author = {M. Kim and W. H. Lee and S. Y. Kim and H. J. Kim and H. Moon and J. {Beom Ra}},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Gamma curve determination for each individual pixel on high-resolution flat panel display},\n  year = {2015},\n  pages = {1795-1799},\n  abstract = {This paper presents an automated system to determine gamma curves of each individual pixel on a high-resolution flat panel display. The system consists of a panel and its aligned area scan camera located at a fixed position. To localize a panel pixel position in an image captured from the aligned camera, a mapping function estimation scheme is proposed between the panel and the camera coordinates. The mapping function is modeled by combining both 2-D perspective transform and lens distortion and is estimated via feature-based registration. Intensity values of individual panel pixel are then measured for sampled gray-levels. To alleviate intensity interferences from neighboring pixels, we propose to use evenly spaced dot images as panel input. By using the measured intensity values of each panel pixel, gamma curves are determined to examine the characteristics of defects on the panel, if any. Experimental results show the relevance of the proposed method.},\n  keywords = {flat panel displays;gamma distribution;gamma curve determination;high-resolution flat panel display;panel pixel position;mapping function estimation scheme;2D perspective transform;lens distortion;feature-based registration;Cameras;Lenses;Nonlinear distortion;Image resolution;Estimation;Transforms;Defect inspection;machine vision;high-resolution flat panel display;gamma curve determination},\n  doi = {10.1109/EUSIPCO.2015.7362693},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103853.pdf},\n}\n\n
\n
\n\n\n
\n This paper presents an automated system to determine gamma curves of each individual pixel on a high-resolution flat panel display. The system consists of a panel and its aligned area scan camera located at a fixed position. To localize a panel pixel position in an image captured from the aligned camera, a mapping function estimation scheme is proposed between the panel and the camera coordinates. The mapping function is modeled by combining both 2-D perspective transform and lens distortion and is estimated via feature-based registration. Intensity values of individual panel pixel are then measured for sampled gray-levels. To alleviate intensity interferences from neighboring pixels, we propose to use evenly spaced dot images as panel input. By using the measured intensity values of each panel pixel, gamma curves are determined to examine the characteristics of defects on the panel, if any. Experimental results show the relevance of the proposed method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Model-based superpixel segmentation of SAR images.\n \n \n \n \n\n\n \n Kayabol, K.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1800-1804, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Model-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362694,\n  author = {K. Kayabol},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Model-based superpixel segmentation of SAR images},\n  year = {2015},\n  pages = {1800-1804},\n  abstract = {We propose a superpixel segmentation method for synthetic aperture radar (SAR) images. The method uses the SAR image amplitudes and pixels coordinates as features. The feature vectors are modeled statistically by taking into account the SAR image statistics. Nakagami and bivariate Gaussian distributions are used for amplitudes and position vectors, respectively. A finite mixture model (FMM) is proposed for pixel clustering. Learning and clustering steps are performed using posterior distributions. Based on the classification results obtained on real TerraSAR-X image, it is shown that the proposed method is capable of obtaining more accurate superpixels compared to state-of-the-art superpixel segmentation methods.},\n  keywords = {Gaussian distribution;image classification;image segmentation;mixture models;pattern clustering;radar imaging;synthetic aperture radar;SAR image model-based superpixel segmentation;synthetic aperture radar image;feature vector;Nakagami distribution;bivariate Gaussian distribution;finite mixture model;FMM;pixel clustering;Learning step;posterior distribution;image classification;real TerraSAR-X image;Image segmentation;Synthetic aperture radar;Signal processing algorithms;Nakagami distribution;Europe;Signal processing;Mixture models;Superpixel segmentation;SAR image;finite mixture models},\n  doi = {10.1109/EUSIPCO.2015.7362694},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570095329.pdf},\n}\n\n
\n
\n\n\n
\n We propose a superpixel segmentation method for synthetic aperture radar (SAR) images. The method uses the SAR image amplitudes and pixels coordinates as features. The feature vectors are modeled statistically by taking into account the SAR image statistics. Nakagami and bivariate Gaussian distributions are used for amplitudes and position vectors, respectively. A finite mixture model (FMM) is proposed for pixel clustering. Learning and clustering steps are performed using posterior distributions. Based on the classification results obtained on real TerraSAR-X image, it is shown that the proposed method is capable of obtaining more accurate superpixels compared to state-of-the-art superpixel segmentation methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Bayesian Gaussian mixture model for spatial-spectral classification of hyperspectral images.\n \n \n \n \n\n\n \n Kayabol, K.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1805-1809, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"BayesianPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362695,\n  author = {K. Kayabol},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Bayesian Gaussian mixture model for spatial-spectral classification of hyperspectral images},\n  year = {2015},\n  pages = {1805-1809},\n  abstract = {We propose a Bayesian Gaussian mixture model for hyper-spectral image classification. The model provides a robust estimation framework for small size training samples. Defining prior distributions for the mean vector and the covariance matrix, we are able to regularize the parameter estimation problem. Especially, we can obtain invertible positive definite covariance matrices. The mixture model also takes into account the spatial alignments of the pixels by using non-stationary mixture proportions. Based on the classification results obtained on Indian Pine data set, the proposed method yields better classification performance especially for small size training samples compared to state-of-the-art linear and quadratic classifiers.},\n  keywords = {Bayes methods;covariance matrices;Gaussian processes;hyperspectral imaging;image classification;mixture models;parameter estimation;vectors;Bayesian Gaussian mixture model;spatial-spectral classification;hyperspectral image classification;mean vector;covariance matrix;parameter estimation problem;Indian Pine data set;linear classifiers;quadratic classifiers;Bayes methods;Covariance matrices;Training;Hyperspectral imaging;Mixture models;Gaussian mixture model;Hyperspectral images;classification;Bayesian;Gaussian mixture models;auto logistic regression},\n  doi = {10.1109/EUSIPCO.2015.7362695},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570100855.pdf},\n}\n\n
\n
\n\n\n
\n We propose a Bayesian Gaussian mixture model for hyper-spectral image classification. The model provides a robust estimation framework for small size training samples. Defining prior distributions for the mean vector and the covariance matrix, we are able to regularize the parameter estimation problem. Especially, we can obtain invertible positive definite covariance matrices. The mixture model also takes into account the spatial alignments of the pixels by using non-stationary mixture proportions. Based on the classification results obtained on Indian Pine data set, the proposed method yields better classification performance especially for small size training samples compared to state-of-the-art linear and quadratic classifiers.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fuzzy selecting local region level set algorithm.\n \n \n \n \n\n\n \n Balla-Arabe, S.; Li, C.; Brost, V.; and Yang, F.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1810-1814, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"FuzzyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362696,\n  author = {S. Balla-Arabe and C. Li and V. Brost and F. Yang},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Fuzzy selecting local region level set algorithm},\n  year = {2015},\n  pages = {1810-1814},\n  abstract = {In this work, we introduced a novel localized region based level set model which is simultaneously effective for heterogeneous object or/and background and robust against noise. As such, we propose to minimize an energy functional based on a selective local average, i.e., when computing the local average, instead to use the intensity of all the pixels surrounding a given pixel, we first give a local Gaussian fuzzy membership to be a background or an object pixel to each of these surrounding pixels and then, we use the fuzzy weighted local average of these pixels to replace the traditional local average. With the graphics processing units' acceleration, the local lattice Boltzmann method is used to solve the proposed level set equation. The algorithm is effective in presence of intensity heterogeneity, robust against noise, fast and highly parallelizable. Experimental results demonstrate subjectively and objectively the performance of the proposed framework.},\n  keywords = {fuzzy set theory;graphics processing units;image processing;lattice Boltzmann methods;fuzzy selecting local region level set algorithm;heterogeneous object;energy functional minimization;local Gaussian fuzzy membership;object pixel;graphics processing unit;local lattice Boltzmann method;Level set;Graphics processing units;Robustness;Force;Mathematical model;Active contours;Image edge detection;Level set method;image segmentation;lattice Boltzmann method;graphics processing units (GPU)},\n  doi = {10.1109/EUSIPCO.2015.7362696},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104967.pdf},\n}\n\n
\n
\n\n\n
\n In this work, we introduced a novel localized region based level set model which is simultaneously effective for heterogeneous object or/and background and robust against noise. As such, we propose to minimize an energy functional based on a selective local average, i.e., when computing the local average, instead to use the intensity of all the pixels surrounding a given pixel, we first give a local Gaussian fuzzy membership to be a background or an object pixel to each of these surrounding pixels and then, we use the fuzzy weighted local average of these pixels to replace the traditional local average. With the graphics processing units' acceleration, the local lattice Boltzmann method is used to solve the proposed level set equation. The algorithm is effective in presence of intensity heterogeneity, robust against noise, fast and highly parallelizable. Experimental results demonstrate subjectively and objectively the performance of the proposed framework.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n PMMW image super resolution from compressed sensing observations.\n \n \n \n \n\n\n \n Saafin, W.; Villena, S.; Vega, M.; Molina, R.; and Katsaggelos, A. K.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1815-1819, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"PMMWPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362697,\n  author = {W. Saafin and S. Villena and M. Vega and R. Molina and A. K. Katsaggelos},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {PMMW image super resolution from compressed sensing observations},\n  year = {2015},\n  pages = {1815-1819},\n  abstract = {In this paper we propose a novel optimization framework to obtain High Resolution (HR) Passive Millimeter Wave (P-MMW) images from multiple Low Resolution (LR) observations captured using a simulated Compressed Sensing (CS) imaging system. The proposed CS Super Resolution (CSS-R) approach combines existing CS reconstruction algorithms with the use of Super Gaussian (SG) regularization terms on the image to be reconstructed, smoothness constraints on the registration parameters to be estimated and the use of the Alternate Direction Methods of Multipliers (ADMM) to link the CS and SR problems. The image estimation subproblem is solved using Majorization-Minimization (MM), registration is tackled minimizing a quadratic function and CS reconstruction is approached as an l1-minimization problem subject to a quadratic constraint. The performed experiments, on simulated and real PMMW observations, validate the used approach.},\n  keywords = {compressed sensing;Gaussian processes;image reconstruction;image registration;image resolution;minimax techniques;minimisation;quadratic programming;PMMW image super resolution;compressed sensing observation;optimization framework;passive millimeter wave image super resolution;multiple low resolution observation;LR observation;CS super resolution;CSS-R;CS reconstruction algorithm;super Gaussian regularization;SG regularization;registration parameter estimation;alternate direction method of multiplier;ADMM;majorization-minimization;MM;quadratic function minimization;l1-minimization problem;quadratic constraint;Optimization;Image resolution;Europe;Image coding;Signal processing algorithms;Compressed sensing;Imaging;Passive millimeter-wave;compressive sensing;super resolution;image restoration},\n  doi = {10.1109/EUSIPCO.2015.7362697},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104757.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we propose a novel optimization framework to obtain High Resolution (HR) Passive Millimeter Wave (P-MMW) images from multiple Low Resolution (LR) observations captured using a simulated Compressed Sensing (CS) imaging system. The proposed CS Super Resolution (CSS-R) approach combines existing CS reconstruction algorithms with the use of Super Gaussian (SG) regularization terms on the image to be reconstructed, smoothness constraints on the registration parameters to be estimated and the use of the Alternate Direction Methods of Multipliers (ADMM) to link the CS and SR problems. The image estimation subproblem is solved using Majorization-Minimization (MM), registration is tackled minimizing a quadratic function and CS reconstruction is approached as an l1-minimization problem subject to a quadratic constraint. The performed experiments, on simulated and real PMMW observations, validate the used approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Image regularization with higher-order morphological gradients.\n \n \n \n \n\n\n \n Nakashizuka, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1820-1824, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ImagePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362698,\n  author = {M. Nakashizuka},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Image regularization with higher-order morphological gradients},\n  year = {2015},\n  pages = {1820-1824},\n  abstract = {In this paper, we propose an image prior based on morphological image features for image recovery. The proposed prior is obtained as the sum of morphological gradient and its higher-order extensions. The morphological gradient is defined as the difference between dilation and erosion of an image and obtains a discretized modulus of gradient. In order to suppress artifacts appear in the recovered image, we introduce higherorder morphological gradients. The regularization problem with the proposed prior is reduced to a constrained minimization problem. In order to apply the subgradient method to this problem, we derive the subgradient of the proposed priors. We apply the proposed prior to image denoising and demonstrate that the proposed higher-order morphological gradient prior is capable to suppress staircase artifacts. Comparison with the total variation image prior is also demonstrated.},\n  keywords = {gradient methods;image denoising;mathematical morphology;minimisation;image regularization;morphological image features;image recovery;constrained minimization problem;subgradient method;image denoising;higher-order morphological gradient prior;staircase artifacts;TV;Standards;Minimization;Approximation methods;Europe;Signal processing;Image denoising;Image recovery;mathematical morphology;morphological gradient;image prior;regularization},\n  doi = {10.1109/EUSIPCO.2015.7362698},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103875.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose an image prior based on morphological image features for image recovery. The proposed prior is obtained as the sum of morphological gradient and its higher-order extensions. The morphological gradient is defined as the difference between dilation and erosion of an image and obtains a discretized modulus of gradient. In order to suppress artifacts appear in the recovered image, we introduce higherorder morphological gradients. The regularization problem with the proposed prior is reduced to a constrained minimization problem. In order to apply the subgradient method to this problem, we derive the subgradient of the proposed priors. We apply the proposed prior to image denoising and demonstrate that the proposed higher-order morphological gradient prior is capable to suppress staircase artifacts. Comparison with the total variation image prior is also demonstrated.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Shot aggregating strategy for near-duplicate video retrieval.\n \n \n \n \n\n\n \n Srinivasan, V.; Lefebvre, F.; and Ozerov, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1825-1829, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ShotPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362699,\n  author = {V. Srinivasan and F. Lefebvre and A. Ozerov},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Shot aggregating strategy for near-duplicate video retrieval},\n  year = {2015},\n  pages = {1825-1829},\n  abstract = {In this paper, we propose a new strategy for near-duplicate video retrieval that is based on shot aggregation. We investigate different methods for shot aggregation with the main objective to solve the difficult trade-off between performance, scalability and speed. The proposed short aggregation is based on two steps. The first step consists of keyframes selection. And the second one is the aggregation of the keyframes per shot. The aggregation is performed by applying Fisher vector on the descriptors computed on the selected keyframes. We demonstrate that the scalability and the speed are tackled by a sparse video analysis approach (i.e. extracting only few keyframes) combined with shot aggregation, while the performance is discussed around the choice of the aggregation strategy. The performance is evaluated on the CC_WEB_VIDEO dataset that is designed for the near-duplicate video retrieval assessment and for which some experiments have been conducted by different authors.},\n  keywords = {compressed sensing;video retrieval;shot aggregation strategy;keyframe selection;keyframe aggregation;sparse video analysis approach;near-duplicate video retrieval assessment;Feature extraction;Scalability;Europe;Signal processing;Cameras;Kernel;Hidden Markov models},\n  doi = {10.1109/EUSIPCO.2015.7362699},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570097527.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a new strategy for near-duplicate video retrieval that is based on shot aggregation. We investigate different methods for shot aggregation with the main objective to solve the difficult trade-off between performance, scalability and speed. The proposed short aggregation is based on two steps. The first step consists of keyframes selection. And the second one is the aggregation of the keyframes per shot. The aggregation is performed by applying Fisher vector on the descriptors computed on the selected keyframes. We demonstrate that the scalability and the speed are tackled by a sparse video analysis approach (i.e. extracting only few keyframes) combined with shot aggregation, while the performance is discussed around the choice of the aggregation strategy. The performance is evaluated on the CC_WEB_VIDEO dataset that is designed for the near-duplicate video retrieval assessment and for which some experiments have been conducted by different authors.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An efficient statistical-based retrieval approach for JPEG2000 compressed images.\n \n \n \n \n\n\n \n Chaker, A.; Kaaniche, M.; Benazza-Benyahia, A.; and Antonini, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1830-1834, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362700,\n  author = {A. Chaker and M. Kaaniche and A. Benazza-Benyahia and M. Antonini},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {An efficient statistical-based retrieval approach for JPEG2000 compressed images},\n  year = {2015},\n  pages = {1830-1834},\n  abstract = {This paper deals with the problem of image retrieval when the database is presented in a compressed form, by using typically the JPEG2000 encoding scheme based on wavelet transform followed by an uniform scalar quantization. The state-of-the-art method aims at applying a preprocessing step before the feature extraction to reduce the difference in the compression qualities between the images. Our contribution consists in extracting robust features directly from the quantized coefficients. More precisely, assuming that the unquantized coefficients within a subband have a Laplacian distribution, we propose to estimate the distribution parameter from the quantized coefficients. Then, the estimated parameters of the whole subbands are used to build a salient feature for the indexing process. Experimental results show that the proposed retrieval approach significantly improves the state-of-the-art one.},\n  keywords = {data compression;feature extraction;image coding;image retrieval;parameter estimation;statistical analysis;statistical-based retrieval approach;JPEG2000 compressed imaging;image retrieval;JPEG2000 encoding scheme;wavelet transform;uniform scalar quantization;feature extraction;Laplacian distribution;distribution parameter estimation;Decision support systems;Europe;Signal processing;Content based image retrieval;compressed images;wavelet domain;feature extraction;statistical model;retrieval performance},\n  doi = {10.1109/EUSIPCO.2015.7362700},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103713.pdf},\n}\n\n
\n
\n\n\n
\n This paper deals with the problem of image retrieval when the database is presented in a compressed form, by using typically the JPEG2000 encoding scheme based on wavelet transform followed by an uniform scalar quantization. The state-of-the-art method aims at applying a preprocessing step before the feature extraction to reduce the difference in the compression qualities between the images. Our contribution consists in extracting robust features directly from the quantized coefficients. More precisely, assuming that the unquantized coefficients within a subband have a Laplacian distribution, we propose to estimate the distribution parameter from the quantized coefficients. Then, the estimated parameters of the whole subbands are used to build a salient feature for the indexing process. Experimental results show that the proposed retrieval approach significantly improves the state-of-the-art one.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n High resolution depth image recovery algorithm using grayscale image.\n \n \n \n \n\n\n \n Uruma, K.; Konishi, K.; Takahashi, T.; and Furukawa, T.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1835-1839, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"HighPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362701,\n  author = {K. Uruma and K. Konishi and T. Takahashi and T. Furukawa},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {High resolution depth image recovery algorithm using grayscale image},\n  year = {2015},\n  pages = {1835-1839},\n  abstract = {This paper proposes a depth image recovery algorithm which recovers depth images using grayscale images and low resolution depth images. Based on a image colorization technique, a depth value image recovery problem is formulated as a convex quadratic optimization problem, and a fast depth image recovery algorithm is proposed. Experimental results show that the proposed algorithm recovers a high resolution depth image from a very low resolution depth image effectively.},\n  keywords = {image colour analysis;image resolution;optimisation;grayscale image;high resolution depth image recovery;image colorization;depth value image recovery problem;convex quadratic optimization problem;Image resolution;Signal processing algorithms;Gray-scale;Europe;Signal resolution;Art;depth image recovery;image colorization;depth sensor},\n  doi = {10.1109/EUSIPCO.2015.7362701},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102875.pdf},\n}\n\n
\n
\n\n\n
\n This paper proposes a depth image recovery algorithm which recovers depth images using grayscale images and low resolution depth images. Based on a image colorization technique, a depth value image recovery problem is formulated as a convex quadratic optimization problem, and a fast depth image recovery algorithm is proposed. Experimental results show that the proposed algorithm recovers a high resolution depth image from a very low resolution depth image effectively.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Combining single-image and multiview super-resolution for mixed-resolution image plus depth data.\n \n \n \n \n\n\n \n Richter, T.; Seiler, J.; Schnurrer, W.; Bätz, M.; and Kaup, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1840-1844, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"CombiningPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362702,\n  author = {T. Richter and J. Seiler and W. Schnurrer and M. Bätz and A. Kaup},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Combining single-image and multiview super-resolution for mixed-resolution image plus depth data},\n  year = {2015},\n  pages = {1840-1844},\n  abstract = {In mixed-resolution multiview setups, a scene is captured from various viewpoints with cameras having different spatial resolutions. Compared to full-resolution systems, mixed-resolution setups allow for savings with respect to data transmission, storage, and costs. However, for applications like free viewpoint television, high-quality images are required for all available camera perspectives. Therefore, high-resolution cameras can be used to increase the image quality of a neighboring low-resolution view. Due to occlusions, some parts of the scene are invisible in the high-resolution reference views and thus cannot be directly synthesized from the neighboring perspectives. In this paper, we propose to integrate the idea of single-image super-resolution to better handle occluded areas and thus to improve the super-resolution quality for mixed-resolution multiview images. For a downsampling factor of 4, the proposed method achieves an average gain of 0.53 dB with respect to a comparable multiview super-resolution approach.},\n  keywords = {cameras;data communication;image resolution;single-image resolution;multiview super-resolution;mixed-resolution image;cameras;data transmission;data storage;free viewpoint television;image quality;occlusions;single-image super-resolution;gain 0.53 dB;Cameras;Signal resolution;Spatial resolution;Extrapolation;Interpolation;Multiview;Super-Resolution;Mixed-Resolution},\n  doi = {10.1109/EUSIPCO.2015.7362702},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102173.pdf},\n}\n\n
\n
\n\n\n
\n In mixed-resolution multiview setups, a scene is captured from various viewpoints with cameras having different spatial resolutions. Compared to full-resolution systems, mixed-resolution setups allow for savings with respect to data transmission, storage, and costs. However, for applications like free viewpoint television, high-quality images are required for all available camera perspectives. Therefore, high-resolution cameras can be used to increase the image quality of a neighboring low-resolution view. Due to occlusions, some parts of the scene are invisible in the high-resolution reference views and thus cannot be directly synthesized from the neighboring perspectives. In this paper, we propose to integrate the idea of single-image super-resolution to better handle occluded areas and thus to improve the super-resolution quality for mixed-resolution multiview images. For a downsampling factor of 4, the proposed method achieves an average gain of 0.53 dB with respect to a comparable multiview super-resolution approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An online background subtraction algorithm using a contiguously weighted linear regression model.\n \n \n \n \n\n\n \n Hu, Y.; Sirlantzis, K.; Howells, G.; Ragot, N.; and Rodríguez, P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1845-1849, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362703,\n  author = {Y. Hu and K. Sirlantzis and G. Howells and N. Ragot and P. Rodríguez},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {An online background subtraction algorithm using a contiguously weighted linear regression model},\n  year = {2015},\n  pages = {1845-1849},\n  abstract = {In this paper, we propose a fast online background subtraction algorithm detecting a contiguous foreground. The proposed algorithm consists of a background model and a foreground model. The background model is a regression based low rank model. It seeks a low rank background subspace and represents the background as the linear combination of the basis spanning the subspace. The foreground model promotes the contiguity in the foreground detection. It encourages the foreground to be detected as whole regions rather than separated pixels. We formulate the background and foreground model into a contiguously weighted linear regression problem. This problem can be solved efficiently and it achieves an online scheme. The experimental comparison with most recent algorithms on the benchmark dataset demonstrates the high effectiveness of the proposed algorithm.},\n  keywords = {regression analysis;video signal processing;benchmark dataset;foreground detection;low rank background subspace;foreground model;contiguously weighted linear regression model;online background subtraction algorithm;Signal processing algorithms;Computational modeling;Europe;Linear regression;Yttrium;Video sequences;Approximation algorithms;online background subtraction;contiguity},\n  doi = {10.1109/EUSIPCO.2015.7362703},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096731.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a fast online background subtraction algorithm detecting a contiguous foreground. The proposed algorithm consists of a background model and a foreground model. The background model is a regression based low rank model. It seeks a low rank background subspace and represents the background as the linear combination of the basis spanning the subspace. The foreground model promotes the contiguity in the foreground detection. It encourages the foreground to be detected as whole regions rather than separated pixels. We formulate the background and foreground model into a contiguously weighted linear regression problem. This problem can be solved efficiently and it achieves an online scheme. The experimental comparison with most recent algorithms on the benchmark dataset demonstrates the high effectiveness of the proposed algorithm.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Clustering-based fast intra prediction mode algorithm for HEVC.\n \n \n \n \n\n\n \n Jaballah, S.; Rouis, K.; and Belhadj Tahar, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1850-1854, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Clustering-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362704,\n  author = {S. Jaballah and K. Rouis and J. {Belhadj Tahar}},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Clustering-based fast intra prediction mode algorithm for HEVC},\n  year = {2015},\n  pages = {1850-1854},\n  abstract = {The High Efficiency Video Coding (HEVC) is the next generation video coding standard. The HEVC provides equivalent perceptual quality with bit-budget saving greater than 50% compared to the H.264/AVC. In this paper, we propose a new fast intra prediction mode decision algorithm for the HEVC. We apply an early termination method based on the statistics of the resulted IPMs from both rough mode decision and most probable mode stages. The resulted IPMs are clustered into a K cluster by means of the K-medoid clustering algorithm, and each IPM cluster center represents all the IPMs within a cluster for the RDO process. The sugeested algorithm has been evaluated on high resolution test video sequences. Compared with the current HM16.0 and state-of-the-art scheme in all intra high efficiency configuration cases, the proposed algorithm outperforms the state-of-the-art scheme in terms of encoding time with similar coding efficiency.},\n  keywords = {optimisation;prediction theory;statistical analysis;video coding;visual perception;clustering-based fast intraprediction mode algorithm;IPM cluster center;high efficiency video coding;HEVC;next generation video coding standard;equivalent perceptual quality;H.264-AVC;intraprediction mode decision algorithm;termination method;K-medoid clustering algorithm;rate-distortion optimization;RDO process;high resolution test video sequences;HM16.0;encoding time;coding efficiency;Encoding;Prediction algorithms;Clustering algorithms;Signal processing algorithms;Video coding;Europe;HEVC;intra prediction;mode decision;early termination},\n  doi = {10.1109/EUSIPCO.2015.7362704},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105087.pdf},\n}\n\n
\n
\n\n\n
\n The High Efficiency Video Coding (HEVC) is the next generation video coding standard. The HEVC provides equivalent perceptual quality with bit-budget saving greater than 50% compared to the H.264/AVC. In this paper, we propose a new fast intra prediction mode decision algorithm for the HEVC. We apply an early termination method based on the statistics of the resulted IPMs from both rough mode decision and most probable mode stages. The resulted IPMs are clustered into a K cluster by means of the K-medoid clustering algorithm, and each IPM cluster center represents all the IPMs within a cluster for the RDO process. The sugeested algorithm has been evaluated on high resolution test video sequences. Compared with the current HM16.0 and state-of-the-art scheme in all intra high efficiency configuration cases, the proposed algorithm outperforms the state-of-the-art scheme in terms of encoding time with similar coding efficiency.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Transform learning MRI with global wavelet regularization.\n \n \n \n \n\n\n \n Tanc, A. K.; and Eksioglu, E. M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1855-1859, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"TransformPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362705,\n  author = {A. K. Tanc and E. M. Eksioglu},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Transform learning MRI with global wavelet regularization},\n  year = {2015},\n  pages = {1855-1859},\n  abstract = {Sparse regularization of the reconstructed image in a transform domain has led to state of the art algorithms for magnetic resonance imaging (MRI) reconstruction. Recently, new methods have been proposed which perform sparse regularization on patches extracted from the image. These patch level regularization methods utilize synthesis dictionaries or analysis transforms learned from the patch sets. In this work we jointly enforce a global wavelet domain sparsity constraint together with a patch level, learned analysis sparsity prior. Simulations indicate that this joint regularization culminates in MRI reconstruction performance exceeding the performance of methods which apply either of these terms alone.},\n  keywords = {biomedical MRI;image reconstruction;medical image processing;wavelet transforms;transform learning;global wavelet regularization;MRI;sparse regularization;image reconstruction;magnetic resonance imaging reconstruction;patch level regularization methods;synthesis dictionaries;global wavelet domain sparsity constraint;Image reconstruction;Signal processing algorithms;Transforms;Magnetic resonance imaging;Algorithm design and analysis;Dictionaries;Noise reduction;Magnetic resonance;Image reconstruction;Sparsity;Transform learning;Compressed Sensing},\n  doi = {10.1109/EUSIPCO.2015.7362705},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570094339.pdf},\n}\n\n
\n
\n\n\n
\n Sparse regularization of the reconstructed image in a transform domain has led to state of the art algorithms for magnetic resonance imaging (MRI) reconstruction. Recently, new methods have been proposed which perform sparse regularization on patches extracted from the image. These patch level regularization methods utilize synthesis dictionaries or analysis transforms learned from the patch sets. In this work we jointly enforce a global wavelet domain sparsity constraint together with a patch level, learned analysis sparsity prior. Simulations indicate that this joint regularization culminates in MRI reconstruction performance exceeding the performance of methods which apply either of these terms alone.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sleep stage classification using sparse rational decomposition of single channel EEG records.\n \n \n \n \n\n\n \n Samiee, K.; Kovács, P.; Kiranyaz, S.; Gabbouj, M.; and Saramaki, T.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1860-1864, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"SleepPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362706,\n  author = {K. Samiee and P. Kovács and S. Kiranyaz and M. Gabbouj and T. Saramaki},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Sleep stage classification using sparse rational decomposition of single channel EEG records},\n  year = {2015},\n  pages = {1860-1864},\n  abstract = {A sparse representation of ID signals is proposed based on time-frequency analysis using Generalized Rational Discrete Short Time Fourier Transform (RDSTFT). First, the signal is decomposed into a set of frequency sub-bands using poles and coefficients of the RDSTFT spectra. Then, the sparsity is obtained by applying the Basis Pursuit (BP) algorithm on these frequency sub-bands. Finally, the total energy of each subband was used to extract features for offline patient-specific sleep stage classification of single channel EEG records. In classification of over 670 hours sleep Electroencephalography of 39 subjects, the overall accuracy of 92.50% on the test set is achieved using random forests (RF) classifier trained on 25% of each sleep record. A comparison with the results of other state-of-art methods demonstrates the effectiveness of the proposed sparse decomposition method in EEG signal analysis.},\n  keywords = {compressed sensing;discrete Fourier transforms;electroencephalography;medical signal processing;sleep stage classification;sparse rational decomposition;single channel EEG records;sparse representation;time-frequency analysis;generalized rational discrete short time Fourier transform;basis pursuit algorithm;random forests classifier;EEG signal analysis;Sleep;Electroencephalography;Feature extraction;Radio frequency;Signal processing algorithms;Europe;Signal processing;Sleep stage classification;sleep-EDF;sparsity;rational functions;basis pursuit},\n  doi = {10.1109/EUSIPCO.2015.7362706},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096711.pdf},\n}\n\n
\n
\n\n\n
\n A sparse representation of ID signals is proposed based on time-frequency analysis using Generalized Rational Discrete Short Time Fourier Transform (RDSTFT). First, the signal is decomposed into a set of frequency sub-bands using poles and coefficients of the RDSTFT spectra. Then, the sparsity is obtained by applying the Basis Pursuit (BP) algorithm on these frequency sub-bands. Finally, the total energy of each subband was used to extract features for offline patient-specific sleep stage classification of single channel EEG records. In classification of over 670 hours sleep Electroencephalography of 39 subjects, the overall accuracy of 92.50% on the test set is achieved using random forests (RF) classifier trained on 25% of each sleep record. A comparison with the results of other state-of-art methods demonstrates the effectiveness of the proposed sparse decomposition method in EEG signal analysis.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Performance bounds analysis in multichannel diffusion-MRI.\n \n \n \n \n\n\n \n Sid, F. A.; Abed-Meraim, K.; Harba, R.; and Oulebsir-Boumghar, F.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1865-1869, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"PerformancePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362707,\n  author = {F. A. Sid and K. Abed-Meraim and R. Harba and F. Oulebsir-Boumghar},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Performance bounds analysis in multichannel diffusion-MRI},\n  year = {2015},\n  pages = {1865-1869},\n  abstract = {In this paper, we develop Cramèr-Rao Bound (CRB) expressions in multichannel diffusion MRI. We consider a multi-tensor model with a Non-central Chi (Nc-Chi) distributed noise. The CRB formulas involve integral expressions that are numerically evaluated. In a second step, we propose to simplify the CRB calculation by introducing two analytical approximate expressions for high SNR and low SNR, respectively. Moreover, we develop CRB formulas for the parameters of clinical interest such as the Fractional Anisotropy (FA) and the principal tensor directions. Finally, we exploit these CRBs to analyze the impact of controllable system parameters (e.g. b-value, number of gradient diffusion, number of acquisition coils, etc.) on the clinical parameter estimation in view of future optimal design of the acquisition protocol.},\n  keywords = {biomedical MRI;medical image processing;statistical distributions;tensors;performance bounds analysis;multichannel diffusion MRI;Cramer-Rao bound;multitensor model;noncentral Chi distributed noise;integral expressions;fractional anisotropy;principal tensor direction;gradient diffusion;acquisition protocol;Signal to noise ratio;Coils;Approximation methods;Tensile stress;Magnetic resonance imaging;Anisotropic magnetoresistance;Computational modeling;Cramèr-Rao Bound;dMRI;Nc-Chi distribution;multiple coils;multi-tensor model},\n  doi = {10.1109/EUSIPCO.2015.7362707},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096975.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we develop Cramèr-Rao Bound (CRB) expressions in multichannel diffusion MRI. We consider a multi-tensor model with a Non-central Chi (Nc-Chi) distributed noise. The CRB formulas involve integral expressions that are numerically evaluated. In a second step, we propose to simplify the CRB calculation by introducing two analytical approximate expressions for high SNR and low SNR, respectively. Moreover, we develop CRB formulas for the parameters of clinical interest such as the Fractional Anisotropy (FA) and the principal tensor directions. Finally, we exploit these CRBs to analyze the impact of controllable system parameters (e.g. b-value, number of gradient diffusion, number of acquisition coils, etc.) on the clinical parameter estimation in view of future optimal design of the acquisition protocol.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Symmetrical EEG-FMRI imaging by sparse regularization.\n \n \n \n \n\n\n \n Oberlin, T.; Barillot, C.; Gribonval, R.; and Maurel, P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1870-1874, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"SymmetricalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362708,\n  author = {T. Oberlin and C. Barillot and R. Gribonval and P. Maurel},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Symmetrical EEG-FMRI imaging by sparse regularization},\n  year = {2015},\n  pages = {1870-1874},\n  abstract = {This work considers the problem of brain imaging using simultaneously recorded electroencephalography (EEG) and functional magnetic resonance imaging (fMRI). To this end, we introduce a linear coupling model that links the electrical EEG signal to the hemodynamic response from the blood-oxygen level dependent (BOLD) signal. Both modalities are then symmetrically integrated, to achieve a high resolution in time and space while allowing some robustness against potential decoupling of the BOLD effect. The novelty of the approach consists in expressing the joint imaging problem as a linear inverse problem, which is addressed using sparse regularization. We consider several sparsity-enforcing penalties, which naturally reflect the fact that only few areas of the brain are activated at a certain time, and allow for a fast optimization through proximal algorithms. The significance of the method and the effectiveness of the algorithms are demonstrated through numerical investigations on a spherical head model.},\n  keywords = {biomedical MRI;blood;electroencephalography;haemodynamics;inverse problems;medical image processing;spherical head model;linear inverse problem;joint imaging problem;BOLD signal;blood-oxygen level dependent;electrical EEG signal;linear coupling model;functional magnetic resonance imaging;electroencephalography;brain imaging;sparse regularization;EEG-FMRI imaging;Electroencephalography;Brain modeling;Inverse problems;Couplings;Noise measurement;Signal processing algorithms;Imaging;EEG-fMRI;multimodal imaging;structured sparsity;EEG inverse problem},\n  doi = {10.1109/EUSIPCO.2015.7362708},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103399.pdf},\n}\n\n
\n
\n\n\n
\n This work considers the problem of brain imaging using simultaneously recorded electroencephalography (EEG) and functional magnetic resonance imaging (fMRI). To this end, we introduce a linear coupling model that links the electrical EEG signal to the hemodynamic response from the blood-oxygen level dependent (BOLD) signal. Both modalities are then symmetrically integrated, to achieve a high resolution in time and space while allowing some robustness against potential decoupling of the BOLD effect. The novelty of the approach consists in expressing the joint imaging problem as a linear inverse problem, which is addressed using sparse regularization. We consider several sparsity-enforcing penalties, which naturally reflect the fact that only few areas of the brain are activated at a certain time, and allow for a fast optimization through proximal algorithms. The significance of the method and the effectiveness of the algorithms are demonstrated through numerical investigations on a spherical head model.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n MEM-diffusion MRI framework to solve MEEG inverse problem.\n \n \n \n \n\n\n \n Belaoucha, B.; Lina, J.; Clerc, M.; and Papadopoulo, T.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1875-1879, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"MEM-diffusionPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362709,\n  author = {B. Belaoucha and J. Lina and M. Clerc and T. Papadopoulo},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {MEM-diffusion MRI framework to solve MEEG inverse problem},\n  year = {2015},\n  pages = {1875-1879},\n  abstract = {In this paper, we present a framework to fuse information coming from diffusion magnetic resonance imaging (dMRI) with Magnetoencephalography (MEG)/Electroencephalography (EEG) measurements to reconstruct the activation on the cortical surface. The MEG/EEG inverse-problem is solved by the Maximum Entropy on the Mean (MEM) principle and by assuming that the sources inside each cortical region follow Normal distribution. These regions are obtained using dMRI and assumed to be functionally independent. The source reconstruction framework presented in this work is tested using synthetic and real data. The activated regions for the real data is consistent with the literature about the face recognition and processing network.},\n  keywords = {biodiffusion;biomedical MRI;electroencephalography;image reconstruction;magnetoencephalography;medical image processing;MEM-dMRI framework;diffusion MRI;magnetic resonance imaging;MEEG measurements;magnetoencephalography;electroencephalography;cortical surface;Maximum Entropy on the Mean;MEM principle;source reconstruction framework;face recognition;processing network;Electroencephalography;Entropy;Magnetic resonance imaging;Face recognition;Time measurement;Europe;Signal processing;MEG;EEG;dMRI;source reconstruction;parcellation;MEM},\n  doi = {10.1109/EUSIPCO.2015.7362709},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103657.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we present a framework to fuse information coming from diffusion magnetic resonance imaging (dMRI) with Magnetoencephalography (MEG)/Electroencephalography (EEG) measurements to reconstruct the activation on the cortical surface. The MEG/EEG inverse-problem is solved by the Maximum Entropy on the Mean (MEM) principle and by assuming that the sources inside each cortical region follow Normal distribution. These regions are obtained using dMRI and assumed to be functionally independent. The source reconstruction framework presented in this work is tested using synthetic and real data. The activated regions for the real data is consistent with the literature about the face recognition and processing network.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Non-unitary joint zero-block diagonalization of matrices using a conjugate gradient algorithm.\n \n \n \n \n\n\n \n Cherrak, O.; Ghennioui, H.; Abarkan, E. -.; Thirion-Moreau, N.; and Moreau, E.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1880-1884, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Non-unitaryPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362710,\n  author = {O. Cherrak and H. Ghennioui and E. -. Abarkan and N. Thirion-Moreau and E. Moreau},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Non-unitary joint zero-block diagonalization of matrices using a conjugate gradient algorithm},\n  year = {2015},\n  pages = {1880-1884},\n  abstract = {This communication addresses a new problem which is the Non-Unitary Joint Zero-Block Diagonalization of a given set of complex matrices. This problem can occur in fields of applications such as blind separation of convolutive mixtures of sources and generalizes the non unitary Joint Zero-Diagonalization problem. We present a new method based on the Conjugate Gradient algorithm. Our algorithm uses a numerical diagram of opti mization which requires the calculation of the complex gradient matrix. The main advantages of the proposed method stem from the conjugate gradient properties: it is fast, stable and robust. Computer simulations are provided in order to illustrate the good behavior of the proposed method in dif ferent contexts. Two cases are studied: in the first scenario, a set of exactly zero-block-diagonal matrices are considered, then these matrices are progressively perturbed by an additive Gaussian noise.},\n  keywords = {AWGN;conjugate gradient methods;matrix decomposition;optimisation;signal processing;matrix nonunitary joint zero-block diagonalization;conjugate gradient algorithm;optimization numerical diagram;complex gradient matrix;signal processing;additive Gaussian noise;Matrix decomposition;Signal processing algorithms;Indexes;Signal to noise ratio;Europe;Context;Joint zero-block diagonalization;matrix decompositions;conjugate gradient algorithm;linear convolutive mixtures},\n  doi = {10.1109/EUSIPCO.2015.7362710},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570101187.pdf},\n}\n\n
\n
\n\n\n
\n This communication addresses a new problem which is the Non-Unitary Joint Zero-Block Diagonalization of a given set of complex matrices. This problem can occur in fields of applications such as blind separation of convolutive mixtures of sources and generalizes the non unitary Joint Zero-Diagonalization problem. We present a new method based on the Conjugate Gradient algorithm. Our algorithm uses a numerical diagram of opti mization which requires the calculation of the complex gradient matrix. The main advantages of the proposed method stem from the conjugate gradient properties: it is fast, stable and robust. Computer simulations are provided in order to illustrate the good behavior of the proposed method in dif ferent contexts. Two cases are studied: in the first scenario, a set of exactly zero-block-diagonal matrices are considered, then these matrices are progressively perturbed by an additive Gaussian noise.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Coalitional games for a distributed signal enhancement application.\n \n \n \n \n\n\n \n Ampeliotis, D.; Bogdanovic, N.; and Berberidis, K.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1885-1889, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"CoalitionalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362711,\n  author = {D. Ampeliotis and N. Bogdanovic and K. Berberidis},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Coalitional games for a distributed signal enhancement application},\n  year = {2015},\n  pages = {1885-1889},\n  abstract = {We consider a scenario in which a number of sensor nodes monitor an area, where several sources are active. Each node has an interest to estimate the signal of a particular source using measurements that, unavoidably, are mixtures of the source signals. Nodes could improve the quality of the signal of interest if they were able to use the signals measured by other nodes, however, in a such a case, communication costs must be properly taken into account. To this end, coalitional game theory is used in our study. In the case where the communication cost is zero, we prove that the cooperation of all nodes is beneficial for all. In contrast, when the communication costs are taken into account, we employ a distributed merge-split coalition formation algorithm to organize the nodes into stable cooperative groups. Simulation results are in accordance with the theoretical findings.},\n  keywords = {array signal processing;game theory;sensor arrays;signal reconstruction;distributed signal enhancement application;sensor nodes;source signals;communication costs;coalitional game theory;distributed merge-split coalition formation algorithm;Games;Game theory;Manganese;Europe;Blind source separation;Mathematical model;Blind source separation;coalitional game theory;coalition formation algorithms;distributed processing;NTU games},\n  doi = {10.1109/EUSIPCO.2015.7362711},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103681.pdf},\n}\n\n
\n
\n\n\n
\n We consider a scenario in which a number of sensor nodes monitor an area, where several sources are active. Each node has an interest to estimate the signal of a particular source using measurements that, unavoidably, are mixtures of the source signals. Nodes could improve the quality of the signal of interest if they were able to use the signals measured by other nodes, however, in a such a case, communication costs must be properly taken into account. To this end, coalitional game theory is used in our study. In the case where the communication cost is zero, we prove that the cooperation of all nodes is beneficial for all. In contrast, when the communication costs are taken into account, we employ a distributed merge-split coalition formation algorithm to organize the nodes into stable cooperative groups. Simulation results are in accordance with the theoretical findings.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A novel deterministic method for large-scale blind source separation.\n \n \n \n \n\n\n \n Boussé, M.; Debals, O.; and De Lathauwer, L.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1890-1894, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362712,\n  author = {M. Boussé and O. Debals and L. {De Lathauwer}},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A novel deterministic method for large-scale blind source separation},\n  year = {2015},\n  pages = {1890-1894},\n  abstract = {A novel deterministic method for blind source separation is presented. In contrast to common methods such as independent component analysis, only mild assumptions are imposed on the sources. On the contrary, the method exploits a hypothesized (approximate) intrinsic low-rank structure of the mixing vectors. This is a very natural assumption for problems with many sensors. As such, the blind source separation problem can be reformulated as the computation of a tensor decomposition by applying a low-rank approximation to the tensorized mixing vectors. This allows the introduction of blind source separation in certain big data applications, where other methods fall short.},\n  keywords = {approximation theory;blind source separation;decomposition;deterministic algorithms;independent component analysis;tensors;vectors;deterministic method;large-scale blind source separation;independent component analysis;hypothesized intrinsic low-rank structure;mixing vector;sensor;low-rank approximation;tensor decomposition;Tensile stress;Approximation methods;Blind source separation;Sensors;Europe;Big data;Blind source separation;big data;higher-order tensor;tensor decomposition;low-rank approximation},\n  doi = {10.1109/EUSIPCO.2015.7362712},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570085947.pdf},\n}\n\n
\n
\n\n\n
\n A novel deterministic method for blind source separation is presented. In contrast to common methods such as independent component analysis, only mild assumptions are imposed on the sources. On the contrary, the method exploits a hypothesized (approximate) intrinsic low-rank structure of the mixing vectors. This is a very natural assumption for problems with many sensors. As such, the blind source separation problem can be reformulated as the computation of a tensor decomposition by applying a low-rank approximation to the tensorized mixing vectors. This allows the introduction of blind source separation in certain big data applications, where other methods fall short.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Room impulse response estimation by iterative weighted L1-norm.\n \n \n \n \n\n\n \n Crocco, M.; and Del Bue, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1895-1899, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"RoomPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362713,\n  author = {M. Crocco and A. {Del Bue}},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Room impulse response estimation by iterative weighted L1-norm},\n  year = {2015},\n  pages = {1895-1899},\n  abstract = {This paper presents a novel method to solve for the challenging problem of acoustic Room Impulse Response estimation (RIR). The approach formulates the RIR estimation as a Blind Channel Identification (BCI) problem and it exploits sparsity and non-negativity priors to reduce illposedness and to increase robustness of the solution to noise. This provides an iterative procedure based on a reweighted l1-norm penalty and a standard l1-norm constraint. The proposed method guarantees the convexity of the problem at each iteration, it avoids drawbacks related to anchor constraints and it enforces sparsity in a more effective way with respect to standard l1-norm penalty approaches. Experiments show that our approach outperform current state of the art methods on speech and non-speech real signals.},\n  keywords = {iterative methods;telecommunication channels;transient response;room impulse response estimation;iterative weighted L1-norm;RIR estimation;blind channel identification;iterative procedure;standard l1-norm constraint;iteration;l1-norm penalty approaches;Cost function;Microphones;Robustness;Estimation;Minimization;Europe;Signal processing;Room Impulse Response;Blind System Identification;Sparsity;Non-negative Priors;TDOA Estimation},\n  doi = {10.1109/EUSIPCO.2015.7362713},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570099235.pdf},\n}\n\n
\n
\n\n\n
\n This paper presents a novel method to solve for the challenging problem of acoustic Room Impulse Response estimation (RIR). The approach formulates the RIR estimation as a Blind Channel Identification (BCI) problem and it exploits sparsity and non-negativity priors to reduce illposedness and to increase robustness of the solution to noise. This provides an iterative procedure based on a reweighted l1-norm penalty and a standard l1-norm constraint. The proposed method guarantees the convexity of the problem at each iteration, it avoids drawbacks related to anchor constraints and it enforces sparsity in a more effective way with respect to standard l1-norm penalty approaches. Experiments show that our approach outperform current state of the art methods on speech and non-speech real signals.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Matrix factorization for bilinear blind source separation: Methods, separability and conditioning.\n \n \n \n \n\n\n \n Deville, Y.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1900-1904, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"MatrixPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362714,\n  author = {Y. Deville},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Matrix factorization for bilinear blind source separation: Methods, separability and conditioning},\n  year = {2015},\n  pages = {1900-1904},\n  abstract = {This paper deals with a general class of blind source separation methods for bilinear mixtures, using a structure based on matrix factorization, which models the direct, i.e. mixing, function, thus not requiring the analytical form of the inverse model. This approach also initially does not set restrictions on e.g. statistical independence, nonnegativity or sparsity, but on linear independence of sources and some source products. The separation principle used for adapting the parameters of the above structure consists in fitting the observations with the above direct model. We prove (for two sources at this stage) that this principle ensures separability, i.e. unique decomposition. Associated criteria and algorithms are also described. Performance is illustrated with preprocessed hyperspectral remote sensing data. This also allows us to highlight potential conditioning issues of some practical bilinear matrix factorization (BMF) methods and to suggest how to extend them.},\n  keywords = {blind source separation;matrix decomposition;bilinear blind source separation;signal separability;signal conditioning;bilinear mixture signal;parameter adaptation;bilinear matrix factorization methods;Adaptation models;Signal processing algorithms;Matrix decomposition;Cost function;Europe;Blind source separation;bilinear matrix factorization;direct modeling;exact fit;separability (uniqueness of decomposition)},\n  doi = {10.1109/EUSIPCO.2015.7362714},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096771.pdf},\n}\n\n
\n
\n\n\n
\n This paper deals with a general class of blind source separation methods for bilinear mixtures, using a structure based on matrix factorization, which models the direct, i.e. mixing, function, thus not requiring the analytical form of the inverse model. This approach also initially does not set restrictions on e.g. statistical independence, nonnegativity or sparsity, but on linear independence of sources and some source products. The separation principle used for adapting the parameters of the above structure consists in fitting the observations with the above direct model. We prove (for two sources at this stage) that this principle ensures separability, i.e. unique decomposition. Associated criteria and algorithms are also described. Performance is illustrated with preprocessed hyperspectral remote sensing data. This also allows us to highlight potential conditioning issues of some practical bilinear matrix factorization (BMF) methods and to suggest how to extend them.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Topic detection and compressed classification in Twitter.\n \n \n \n \n\n\n \n Milioris, D.; and Jacquet, P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1905-1909, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"TopicPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362715,\n  author = {D. Milioris and P. Jacquet},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Topic detection and compressed classification in Twitter},\n  year = {2015},\n  pages = {1905-1909},\n  abstract = {In this paper we introduce a novel information propagation method in Twitter, while maintaining a low computational complexity. It exploits the power of Compressive Sensing in conjunction with a Kalman filter to update the states of a dynamical system. The proposed method first employs Joint Complexity, which is defined as the cardinality of a set of all distinct factors of a given string represented by suffix trees, to perform topic detection. Then based on the inherent spa tial sparsity of the data, we apply the theory of Compressive Sensing to perform sparsity-based topic classification by re covering an indicator vector, while reducing significantly the amount of information from tweets, possessing limited power, storage, and processing capabilities, to a central server. We exploit datasets in various languages collected by using the Twitter streaming API and achieve better classification accu racy when compared with state-of-the-art methods.},\n  keywords = {compressed sensing;computational complexity;Kalman filters;social networking (online);topic detection;compressed classification;Twitter;information propagation;computational complexity;compressive sensing;Kalman filter;joint complexity;sparsity-based topic classification;API;Complexity theory;Twitter;Kalman filters;Compressed sensing;Servers;Markov processes;Transforms;Big Data;Text Classification;Joint Com plexity;Combinatorics;Compressive Sensing;Kalman Filter},\n  doi = {10.1109/EUSIPCO.2015.7362715},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570101209.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we introduce a novel information propagation method in Twitter, while maintaining a low computational complexity. It exploits the power of Compressive Sensing in conjunction with a Kalman filter to update the states of a dynamical system. The proposed method first employs Joint Complexity, which is defined as the cardinality of a set of all distinct factors of a given string represented by suffix trees, to perform topic detection. Then based on the inherent spa tial sparsity of the data, we apply the theory of Compressive Sensing to perform sparsity-based topic classification by re covering an indicator vector, while reducing significantly the amount of information from tweets, possessing limited power, storage, and processing capabilities, to a central server. We exploit datasets in various languages collected by using the Twitter streaming API and achieve better classification accu racy when compared with state-of-the-art methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Emergence of core-periphery structure from local node dominance in social networks.\n \n \n \n \n\n\n \n Gamble, J.; Chintakunta, H.; and Krim, H.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1910-1914, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"EmergencePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362716,\n  author = {J. Gamble and H. Chintakunta and H. Krim},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Emergence of core-periphery structure from local node dominance in social networks},\n  year = {2015},\n  pages = {1910-1914},\n  abstract = {There has been growing evidence recently for the view that social networks can be divided into a well connected core, and a sparse periphery. This paper describes how such a global description can be obtained from local {"}dominance{"} relation ships between vertices, to naturally yield a distributed algorithm for such a decomposition. It is shown that the resulting core describes the global structure of the network, while also preserving shortest paths, and displaying {"}expander-like{"} properties. Moreover, the periphery obtained from this de composition consists of a large number of connected com ponents, which can be used to identify communities in the network. These are used for a `divide-and-conquer' strategy for community detection, where the peripheral components are obtained as a pre-processing step to identify the small sets most likely to contain communities. The method is illustrated using a real world network (DBLP co-authorship network), with ground-truth communities.},\n  keywords = {network theory (graphs);social sciences;core periphery structure;local node dominance;social networks;well connected core;sparse periphery;global description;local dominance relationship;distributed algorithm;shortest path;expander-like properties;DBLP coauthorship network;ground truth communities;Europe;Signal processing;Social network services;Network topology;Topology;Signal processing algorithms;Marine vehicles;Social networks;core-periphery structure;community detection;homology;local-to-global},\n  doi = {10.1109/EUSIPCO.2015.7362716},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570098779.pdf},\n}\n\n
\n
\n\n\n
\n There has been growing evidence recently for the view that social networks can be divided into a well connected core, and a sparse periphery. This paper describes how such a global description can be obtained from local \"dominance\" relation ships between vertices, to naturally yield a distributed algorithm for such a decomposition. It is shown that the resulting core describes the global structure of the network, while also preserving shortest paths, and displaying \"expander-like\" properties. Moreover, the periphery obtained from this de composition consists of a large number of connected com ponents, which can be used to identify communities in the network. These are used for a `divide-and-conquer' strategy for community detection, where the peripheral components are obtained as a pre-processing step to identify the small sets most likely to contain communities. The method is illustrated using a real world network (DBLP co-authorship network), with ground-truth communities.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Constraint Kalman filter for indoor bluetooth localization.\n \n \n \n \n\n\n \n Chen, L.; Kuusniemi, H.; Chen, Y.; Liu, J.; Pei, L.; Ruotsalainen, L.; and Chen, R.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1915-1919, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ConstraintPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362717,\n  author = {L. Chen and H. Kuusniemi and Y. Chen and J. Liu and L. Pei and L. Ruotsalainen and R. Chen},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Constraint Kalman filter for indoor bluetooth localization},\n  year = {2015},\n  pages = {1915-1919},\n  abstract = {This paper studies sequential estimation of indoor localization based on fingerprints of received signal strength indicators (RSSI). Due to the lack of an analytic formula for the fingerprinting measurements, the Kalman filter can not be directly applied. By introducing a hidden variable to represent the unknown positioning coordinate, a state model is formulated and a constrained Kalman filter (CKF) is then derived within the Bayesian framework. The update of the state incorporates the prior information of the motion model and the statistical property of the hidden variable estimated from the RSSI measurements. The positioning accuracy of the proposed CKF method is evaluated in indoor field tests by a self-developed Bluetooth fingerprint positioning system. The conducted field tests demonstrate the effectiveness of the method in providing an accurate indoor positioning solution.},\n  keywords = {Bayes methods;Bluetooth;indoor communication;Kalman filters;RSSI;sequential estimation;indoor localization;received signal strength indicators;RSSI;state model;constrained Kalman filter;CKF;Bayesian framework;motion model;statistical property;positioning accuracy;indoor field tests;Bluetooth fingerprint positioning system;indoor positioning solution;Kalman filters;Position measurement;Bluetooth;Estimation;Bayes methods;Phase measurement;Kalman filter;fingerprinting;receiver signal strength indicator (RSSI);Bayesian estimation},\n  doi = {10.1109/EUSIPCO.2015.7362717},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104665.pdf},\n}\n\n
\n
\n\n\n
\n This paper studies sequential estimation of indoor localization based on fingerprints of received signal strength indicators (RSSI). Due to the lack of an analytic formula for the fingerprinting measurements, the Kalman filter can not be directly applied. By introducing a hidden variable to represent the unknown positioning coordinate, a state model is formulated and a constrained Kalman filter (CKF) is then derived within the Bayesian framework. The update of the state incorporates the prior information of the motion model and the statistical property of the hidden variable estimated from the RSSI measurements. The positioning accuracy of the proposed CKF method is evaluated in indoor field tests by a self-developed Bluetooth fingerprint positioning system. The conducted field tests demonstrate the effectiveness of the method in providing an accurate indoor positioning solution.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Inference of wired network topology using multipoint reflectometry.\n \n \n \n \n\n\n \n Ulrich, M.; and Yang, B.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1920-1924, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"InferencePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362718,\n  author = {M. Ulrich and B. Yang},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Inference of wired network topology using multipoint reflectometry},\n  year = {2015},\n  pages = {1920-1924},\n  abstract = {We present in this paper a novel algorithm CoMaTeCh for the inference of wired network topology using reflection mea surements at multiple cable ends. This is useful for applications where the topology of an existing wired network (e.g. communication networks, powerline networks) is unknown and needs to be reconstructed in a non-intrusive way. Starting with the range and amplitude measurements of reflections caused by impedance discontinuities of the network, ouralgorithm estimates both the topology and the cable lengths. Using multiple reflection measurements, many ambiguities can be resolved, leading to a unique solution and a low computational effort. It is superior to existing approaches and is tested with both simulated and real data.},\n  keywords = {carrier transmission on power lines;reflectometry;telecommunication network topology;wired network topology inference;multipoint reflectometry;CoMaTeCh;reflection measurements;Network topology;Topology;Transmission line measurements;Joining processes;Current measurement;Impedance;Signal processing algorithms;Network topology inference;reflectometry;CoMaTeCh;communication networks;smart grid},\n  doi = {10.1109/EUSIPCO.2015.7362718},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570099185.pdf},\n}\n\n
\n
\n\n\n
\n We present in this paper a novel algorithm CoMaTeCh for the inference of wired network topology using reflection mea surements at multiple cable ends. This is useful for applications where the topology of an existing wired network (e.g. communication networks, powerline networks) is unknown and needs to be reconstructed in a non-intrusive way. Starting with the range and amplitude measurements of reflections caused by impedance discontinuities of the network, ouralgorithm estimates both the topology and the cable lengths. Using multiple reflection measurements, many ambiguities can be resolved, leading to a unique solution and a low computational effort. It is superior to existing approaches and is tested with both simulated and real data.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Alternate multibit embedding method for reversible watermarking.\n \n \n \n \n\n\n \n Nedelcu, T.; Iordache, R.; and Coltuc, D.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1925-1929, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AlternatePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362719,\n  author = {T. Nedelcu and R. Iordache and D. Coltuc},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Alternate multibit embedding method for reversible watermarking},\n  year = {2015},\n  pages = {1925-1929},\n  abstract = {This paper proposes a new embedding scheme for multibit difference expansion reversible watermarking. The prediction error expansion (PEE) schemes expand n times the difference in order to embed up to log2 n bpp. For natural images, this capacity cannot be achieved because overflow or underflow is generated by the embedding process. The proposed method aims to increase the capacity of the embedded information by using a different embedding procedure when the classical one fail. Although the proposed embedding method introduces larger distortion than classical procedure, the experimental results show that the proposed scheme provide an increase of the embedding capacity and outperforms the classical method regarding the image quality with respect to capacity. Experimental results using the classical and proposed multibit difference expansion based on the MED predictor are provided.},\n  keywords = {image watermarking;prediction theory;alternate multibit embedding method;multibit difference expansion reversible watermarking;prediction error expansion scheme;PEE scheme;MED predictor;Watermarking;Mathematical model;Distortion;Decoding;Europe;Image edge detection;Signal processing algorithms;reversible watermarking;prediction-error expansion;multibit embedding},\n  doi = {10.1109/EUSIPCO.2015.7362719},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105159.pdf},\n}\n\n
\n
\n\n\n
\n This paper proposes a new embedding scheme for multibit difference expansion reversible watermarking. The prediction error expansion (PEE) schemes expand n times the difference in order to embed up to log2 n bpp. For natural images, this capacity cannot be achieved because overflow or underflow is generated by the embedding process. The proposed method aims to increase the capacity of the embedded information by using a different embedding procedure when the classical one fail. Although the proposed embedding method introduces larger distortion than classical procedure, the experimental results show that the proposed scheme provide an increase of the embedding capacity and outperforms the classical method regarding the image quality with respect to capacity. Experimental results using the classical and proposed multibit difference expansion based on the MED predictor are provided.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Generalization of GLRT-based magnetic anomaly detection.\n \n \n \n \n\n\n \n Pepe, P.; Zozor, S.; Rouve, L. -.; Coulomb, J. -.; Serviere, C.; and Muley, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1930-1934, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"GeneralizationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362720,\n  author = {P. Pepe and S. Zozor and L. -. Rouve and J. -. Coulomb and C. Serviere and J. Muley},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Generalization of GLRT-based magnetic anomaly detection},\n  year = {2015},\n  pages = {1930-1934},\n  abstract = {Magnetic anomaly detection (MAD) refers to a passive method used to reveal hidden magnetic masses and is most commonly based on a dipolar target model. This paper proposes a generalization of the MAD through a multipolar model that provides a more precise description of the anomaly and serves a twofold objective: to improve the detection performance, and to widen the variety of detectable targets. The dipole detection strategy - namely an orthonormal decomposition of the anomaly followed by a generalized likelihood ratio test - is hence revisited in the multipolar case. The performance are assessed analytically and the relevance of this generalization is demonstrated on multipolar scenarios.},\n  keywords = {signal detection;statistical analysis;GLRT-based magnetic anomaly detection;MAD;passive method;dipolar target model;orthonormal decomposition;generalized likelihood ratio test;Magnetometers;Detectors;Receivers;Mathematical model;Earth;Europe;Signal processing;Magnetic anomaly detection;spherical harmonic expansion;orthonormal basis functions},\n  doi = {10.1109/EUSIPCO.2015.7362720},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104643.pdf},\n}\n\n
\n
\n\n\n
\n Magnetic anomaly detection (MAD) refers to a passive method used to reveal hidden magnetic masses and is most commonly based on a dipolar target model. This paper proposes a generalization of the MAD through a multipolar model that provides a more precise description of the anomaly and serves a twofold objective: to improve the detection performance, and to widen the variety of detectable targets. The dipole detection strategy - namely an orthonormal decomposition of the anomaly followed by a generalized likelihood ratio test - is hence revisited in the multipolar case. The performance are assessed analytically and the relevance of this generalization is demonstrated on multipolar scenarios.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An improved variational mode decomposition method for internal waves separation.\n \n \n \n \n\n\n \n Schmitt, J.; Horne, E.; Pustelnik, N.; Joubaud, S.; and Odier, P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1935-1939, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362721,\n  author = {J. Schmitt and E. Horne and N. Pustelnik and S. Joubaud and P. Odier},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {An improved variational mode decomposition method for internal waves separation},\n  year = {2015},\n  pages = {1935-1939},\n  abstract = {This paper proposes to revisit the 2-D Variational Mode Decomposition (2-D-VMD) in order to separate the incident and reflected waves in experimental images of internal waves velocity field. 2-D-VMD aims at splitting an image into a sequence of oscillating components which are centered around specific spatial frequencies. In this work we develop a proximal algorithm with local convergence guarantees, allowing more flexibility in order to deal with modes having different spectral properties and to add some optional constraints modeling prior informations. Our method is compared with the standard 2-D-VMD and with a Hilbert based strategy usually employed for processing internal waves images.},\n  keywords = {convergence;geophysical image processing;Hilbert transforms;ocean waves;oceanographic techniques;variational techniques;oceanic internal wave reflection;internal waves images;Hilbert based strategy;2-D-VMD;2-D variational mode decomposition;internal waves separation;Signal processing algorithms;Convergence;Europe;Signal processing;Minimization;Standards;Fourier transforms;Optimisation;mode decomposition;internal waves},\n  doi = {10.1109/EUSIPCO.2015.7362721},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104703.pdf},\n}\n\n
\n
\n\n\n
\n This paper proposes to revisit the 2-D Variational Mode Decomposition (2-D-VMD) in order to separate the incident and reflected waves in experimental images of internal waves velocity field. 2-D-VMD aims at splitting an image into a sequence of oscillating components which are centered around specific spatial frequencies. In this work we develop a proximal algorithm with local convergence guarantees, allowing more flexibility in order to deal with modes having different spectral properties and to add some optional constraints modeling prior informations. Our method is compared with the standard 2-D-VMD and with a Hilbert based strategy usually employed for processing internal waves images.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Classification of rainfall radar images using the scattering transform.\n \n \n \n \n\n\n \n Garcia, G. B.; Lagrange, M.; Emmanuel, I.; and Andrieu, H.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1940-1944, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ClassificationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362722,\n  author = {G. B. Garcia and M. Lagrange and I. Emmanuel and H. Andrieu},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Classification of rainfall radar images using the scattering transform},\n  year = {2015},\n  pages = {1940-1944},\n  abstract = {The main objective of this paper is to classify rainfall radar images by using the scattering transform, which gives us a translation invariant representation of the images and preserves high-frequency information useful to encode important morphological aspects of the meteorological phenomena under study. To demonstrate the usefulness of the approach, a classification framework is considered, where the images are to be classified into 4 morphological classes: light rain, shower, unorganised storm and organised storm. Experiments show that the benefits of the scattering are threefold: 1) it provides complementary information with respect to more traditional features computed over the distribution of the rainfall intensities, 2) it provides strong invariance to deformations, 3) second order coefficients of the scattering transform nicely encodes spatial distribution of rain intensity.},\n  keywords = {geophysical image processing;image classification;radar imaging;scattering;rainfall radar image classification;scattering transform;light rain;shower;unorganised storm;organised storm;Scattering;Rain;Storms;Radar imaging;Wavelet transforms;Classification;scattering;image radar processing},\n  doi = {10.1109/EUSIPCO.2015.7362722},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570095847.pdf},\n}\n\n
\n
\n\n\n
\n The main objective of this paper is to classify rainfall radar images by using the scattering transform, which gives us a translation invariant representation of the images and preserves high-frequency information useful to encode important morphological aspects of the meteorological phenomena under study. To demonstrate the usefulness of the approach, a classification framework is considered, where the images are to be classified into 4 morphological classes: light rain, shower, unorganised storm and organised storm. Experiments show that the benefits of the scattering are threefold: 1) it provides complementary information with respect to more traditional features computed over the distribution of the rainfall intensities, 2) it provides strong invariance to deformations, 3) second order coefficients of the scattering transform nicely encodes spatial distribution of rain intensity.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Association of array processing and statistical modelling for seismic event monitoring.\n \n \n \n \n\n\n \n Quang, P. B.; Gaillard, P.; and Cano, Y.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1945-1949, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AssociationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362723,\n  author = {P. B. Quang and P. Gaillard and Y. Cano},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Association of array processing and statistical modelling for seismic event monitoring},\n  year = {2015},\n  pages = {1945-1949},\n  abstract = {We associate an array processing method, called progressive multi-channel correlation (PMCC), and statistical modelling, to detect and classify seismic events. PMCC detects any co herent wavefront crossing an array of seismometers, including the wavefronts not generated by actual seismic events. We use machine learning techniques to classify the PMCC detections between {"}events{"} and {"}noise{"}. These techniques are based on the statistical modelling of features extracted from the seismic signal. The features we select combine features computed directly from the raw signal and features re trieved by the PMCC detector. We apply our method on a real data set from the Songino seismic station, in Mongolia. We compare the performance of fours classifiers: Gaussian naive Bayes classifier, logistic regression, Gaussian mixture mod els (GMM), and hidden Markov models (HMM). In our case study, the GMM and the HMM yield the highest performance.},\n  keywords = {array signal processing;feature extraction;Gaussian processes;hidden Markov models;learning automata;mixture models;GMM;HMM;hidden Markov models;Gaussian mixture models;logistic regression;Gaussian naive Bayes classifier;seismic signal;feature extraction;machine learning techniques;progressive multichannel correlation;seismic event monitoring;statistical modelling;array processing;Feature extraction;Hidden Markov models;Sensors;Arrays;Delay effects;Signal processing algorithms;Training data;progressive multi-channel correlation;classification;Gaussian mixture models;hidden Markov},\n  doi = {10.1109/EUSIPCO.2015.7362723},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570097847.pdf},\n}\n\n
\n
\n\n\n
\n We associate an array processing method, called progressive multi-channel correlation (PMCC), and statistical modelling, to detect and classify seismic events. PMCC detects any co herent wavefront crossing an array of seismometers, including the wavefronts not generated by actual seismic events. We use machine learning techniques to classify the PMCC detections between \"events\" and \"noise\". These techniques are based on the statistical modelling of features extracted from the seismic signal. The features we select combine features computed directly from the raw signal and features re trieved by the PMCC detector. We apply our method on a real data set from the Songino seismic station, in Mongolia. We compare the performance of fours classifiers: Gaussian naive Bayes classifier, logistic regression, Gaussian mixture mod els (GMM), and hidden Markov models (HMM). In our case study, the GMM and the HMM yield the highest performance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Estimation of the battery state of charge: A Switching Markov state-space model.\n \n \n \n \n\n\n \n Kalawoun, J.; Pamphile, P.; Celeux, G.; Biletska, K.; and Montaru, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1950-1954, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"EstimationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362724,\n  author = {J. Kalawoun and P. Pamphile and G. Celeux and K. Biletska and M. Montaru},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Estimation of the battery state of charge: A Switching Markov state-space model},\n  year = {2015},\n  pages = {1950-1954},\n  abstract = {An efficient estimation of the State of Charge (SoC) of a battery is a challenging issue in the electric vehicle domain. The battery behavior depends on its chemistry and uncontrolled usage conditions, making it very difficult to estimate the SoC. This paper introduces a new model for SoC estimation given instantaneous measurements of current and voltage using a Switching Markov State-Space Model. The unknown parameters of the model are batch learned using a Monte Carlo approximation of the EM algorithm. Validation of the proposed approach on an electric vehicle real data is encouraging and shows the ability of this new model to accurately estimate the SoC for different usage conditions.},\n  keywords = {battery charge measurement;battery powered vehicles;estimation theory;Markov processes;Monte Carlo methods;state-space methods;battery state of charge estimation;battery SoC estimation;switching Markov state-space model;electric vehicle;Monte Carlo approximation;EM algorithm;Decision support systems;Europe;Signal processing;Conferences;State of Charge;Kalman Filter;Switching Markov State-Space Model;EM algorithm;Particle Filter},\n  doi = {10.1109/EUSIPCO.2015.7362724},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103453.pdf},\n}\n\n
\n
\n\n\n
\n An efficient estimation of the State of Charge (SoC) of a battery is a challenging issue in the electric vehicle domain. The battery behavior depends on its chemistry and uncontrolled usage conditions, making it very difficult to estimate the SoC. This paper introduces a new model for SoC estimation given instantaneous measurements of current and voltage using a Switching Markov State-Space Model. The unknown parameters of the model are batch learned using a Monte Carlo approximation of the EM algorithm. Validation of the proposed approach on an electric vehicle real data is encouraging and shows the ability of this new model to accurately estimate the SoC for different usage conditions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Cooperative game-theoretic approach to load balancing in smart grids with community energy storage.\n \n \n \n \n\n\n \n Rajasekharan, J.; and Koivunen, V.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1955-1959, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"CooperativePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362725,\n  author = {J. Rajasekharan and V. Koivunen},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Cooperative game-theoretic approach to load balancing in smart grids with community energy storage},\n  year = {2015},\n  pages = {1955-1959},\n  abstract = {In this paper, we propose a model for households to share energy from community energy storage (CES) such that both households and utility company benefit from CES. In addition to providing a range of ancillary grid services, CES can also be used for demand side management, to shave peaks and fill valleys in system load. We introduce a method stemming from consumer theory and cooperative game theory that uses CES to balance the load of an entire locality and manage household energy allocations respectively. Load balancing is derived as a geometric programming problem. Each households contribution to overall non-uniformity of the load profile is modeled using a characteristic function and Shapley values are used to allocate the amount and price of surplus energy stored in CES. The proposed method is able to perfectly balance the load while also making sure that each household is guaranteed a reduction in energy costs.},\n  keywords = {cost reduction;demand side management;domestic appliances;energy storage;game theory;geometric programming;smart power grids;energy cost reduction;Shapley value;characteristic function;geometric programming problem;household energy allocations management;demand side management;ancillary grid service;CES;community energy storage;smart grid;load balancing;cooperative game-theoretic approach;Erbium;Europe;Signal processing;Companies;Yttrium;Renewable energy sources;Production;Smart grids;demand side management;community energy storage;load balancing;cooperative game},\n  doi = {10.1109/EUSIPCO.2015.7362725},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102111.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a model for households to share energy from community energy storage (CES) such that both households and utility company benefit from CES. In addition to providing a range of ancillary grid services, CES can also be used for demand side management, to shave peaks and fill valleys in system load. We introduce a method stemming from consumer theory and cooperative game theory that uses CES to balance the load of an entire locality and manage household energy allocations respectively. Load balancing is derived as a geometric programming problem. Each households contribution to overall non-uniformity of the load profile is modeled using a characteristic function and Shapley values are used to allocate the amount and price of surplus energy stored in CES. The proposed method is able to perfectly balance the load while also making sure that each household is guaranteed a reduction in energy costs.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Modeling and estimation of transient current signals.\n \n \n \n \n\n\n \n Meziane, M. N.; Ravier, P.; Lamarque, G.; Abed-Meraim, K.; Le Bunetel, J.; and Raingeaud, Y.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1960-1964, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ModelingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362726,\n  author = {M. N. Meziane and P. Ravier and G. Lamarque and K. Abed-Meraim and J. {Le Bunetel} and Y. Raingeaud},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Modeling and estimation of transient current signals},\n  year = {2015},\n  pages = {1960-1964},\n  abstract = {In this paper, we propose a nonstationary model for transient electrical current signals based on the physical behavior of electrical appliances during their turn-on. This model takes into account the nonstationarity of those transient signals and the special form of their envelope. We also propose an algorithm for the estimation of this model's parameters and we evaluate its performance on synthetic and real signals. The measured transient current signals actually reflect the physical phenomena appearing in the electrical appliances when turning on, and therefore, the model estimates of these transient current signals are useful for characterizing electrical appliances and can be helpful for distinguishing appliances in addition to the use of their steady-state power consumption.},\n  keywords = {domestic appliances;electric current measurement;electrical products;estimation theory;power consumption;signal processing;transients;transient electrical current signals;electrical appliances;power consumption;Transient analysis;Home appliances;Estimation;Surges;Databases;Signal to noise ratio;Current measurement;Electrical current modeling;Turn-on transient;Nonstationary signals;Parameter estimation;NILM},\n  doi = {10.1109/EUSIPCO.2015.7362726},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104563.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a nonstationary model for transient electrical current signals based on the physical behavior of electrical appliances during their turn-on. This model takes into account the nonstationarity of those transient signals and the special form of their envelope. We also propose an algorithm for the estimation of this model's parameters and we evaluate its performance on synthetic and real signals. The measured transient current signals actually reflect the physical phenomena appearing in the electrical appliances when turning on, and therefore, the model estimates of these transient current signals are useful for characterizing electrical appliances and can be helpful for distinguishing appliances in addition to the use of their steady-state power consumption.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A generalization of the fixed point estimate.\n \n \n \n \n\n\n \n Taylor, A.; Forster, P.; Daout, F.; Oriot, H.; and Savy, L.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1965-1968, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362727,\n  author = {A. Taylor and P. Forster and F. Daout and H. Oriot and L. Savy},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A generalization of the fixed point estimate},\n  year = {2015},\n  pages = {1965-1968},\n  abstract = {In this paper, the problem of proportional covariance matrices estimation for random Gaussian complex vectors is investigated. The maximum likelihood estimates of the matrix and the scale factors are derived, and their statistical performances are studied, through bias, consistency and asymptotic distribution. It is also shown that the problem treated here generalizes the covariance estimation problem for Spherically Invariant Random Vector (SIRV). An iterative estimation algorithm is proposed. A simulation based on a detection problem is presented. The results suggest that the asymptotic distribution obtained is a really good approximation, even for a small number of data.},\n  keywords = {covariance matrices;Gaussian distribution;iterative methods;maximum likelihood estimation;signal detection;iterative estimation algorithm;SIRV;spherically invariant random vector;asymptotic distribution;statistical performance;maximum likelihood estimation;random Gaussian complex vector;proportional covariance matrix estimation;fixed point estimate generalization;Europe;Signal processing;Conferences;Maximum likelihood estimate;covariance estimate;proportional covariance matrices;spherically invariant random vector (SIRV)},\n  doi = {10.1109/EUSIPCO.2015.7362727},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104709.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, the problem of proportional covariance matrices estimation for random Gaussian complex vectors is investigated. The maximum likelihood estimates of the matrix and the scale factors are derived, and their statistical performances are studied, through bias, consistency and asymptotic distribution. It is also shown that the problem treated here generalizes the covariance estimation problem for Spherically Invariant Random Vector (SIRV). An iterative estimation algorithm is proposed. A simulation based on a detection problem is presented. The results suggest that the asymptotic distribution obtained is a really good approximation, even for a small number of data.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Dirichlet-process-mixture-based Bayesian nonparametric method for Markov switching process estimation.\n \n \n \n\n\n \n Magnant, C.; Giremus, A.; Grivel, E.; Ratton, L.; and Joseph, B.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1969-1973, Aug 2015. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362728,\n  author = {C. Magnant and A. Giremus and E. Grivel and L. Ratton and B. Joseph},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Dirichlet-process-mixture-based Bayesian nonparametric method for Markov switching process estimation},\n  year = {2015},\n  pages = {1969-1973},\n  abstract = {Dirichlet process (DP) mixtures were recently introduced to deal with switching linear dynamical models (SLDM). They assume the system can switch between an a priori infinite number of state-space representations (SSR) whose parameters are on-line inferred. The estimation problem can thus be of high dimension when the SSR matrices are unknown. Nevertheless, in many applications, the SSRs can be categorized in different classes. In each class, the SSRs are characterized by a known functional form but differ by a reduced set of unknown hyperparameters. To use this information, we thus propose a new hierarchical model for the SLDM wherein a discrete variable indicates the SSR class. Conditionally to this class, the distributions of the hyperparameters are modeled by DPs. The estimation problem is solved by using a Rao-Blackwellized particle filter. Simulation results show that our model outperforms existing methods in the field of target tracking.},\n  keywords = {Bayes methods;estimation theory;inference mechanisms;Markov processes;particle filtering (numerical methods);signal representation;target tracking;Dirichlet-process-mixture;DP mixtures;Bayesian nonparametric method;Markov switching process estimation;switching linear dynamical models;SLDM;state-space representations;SSR matrices;hierarchical model;hyperparameters;Rao-Blackwellized particle filter;target tracking;Estimation;Switches;Bayes methods;Covariance matrices;Target tracking;Europe;Signal processing;Bayesian non-parametric methods;Dirichlet process mixtures;particle filter;Rao-Blackwellization;interactive multiple models;target tracking},\n  doi = {10.1109/EUSIPCO.2015.7362728},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n Dirichlet process (DP) mixtures were recently introduced to deal with switching linear dynamical models (SLDM). They assume the system can switch between an a priori infinite number of state-space representations (SSR) whose parameters are on-line inferred. The estimation problem can thus be of high dimension when the SSR matrices are unknown. Nevertheless, in many applications, the SSRs can be categorized in different classes. In each class, the SSRs are characterized by a known functional form but differ by a reduced set of unknown hyperparameters. To use this information, we thus propose a new hierarchical model for the SLDM wherein a discrete variable indicates the SSR class. Conditionally to this class, the distributions of the hyperparameters are modeled by DPs. The estimation problem is solved by using a Rao-Blackwellized particle filter. Simulation results show that our model outperforms existing methods in the field of target tracking.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Error control for the detection of rare and weak signatures in massive data.\n \n \n \n \n\n\n \n Meillier, C.; Châtelain, F.; Michel, O.; and Ayasso, H.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1974-1978, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ErrorPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362729,\n  author = {C. Meillier and F. Châtelain and O. Michel and H. Ayasso},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Error control for the detection of rare and weak signatures in massive data},\n  year = {2015},\n  pages = {1974-1978},\n  abstract = {In this paper, we address the general issue of detecting rare and weak signatures in very noisy data. Multiple hypothe ses testing approaches can be used to extract a list of com ponents of the data that are likely to be contaminated by a source while controlling a global error criterion. However most of efficients methods available in the literature are de rived for independent tests. Based on the work of Benjamini and Yekutieli [1], we show that under some classical positivity assumptions, the Benjamini-Hochberg procedure for False Discovery Rate (FDR) control can be directly applied to the result produced by a very common tool in signal and image processing: the matched filter. This shows that despite the de pendency structure between the components of the matched filter output, the Benjamini-Hochberg procedure still guaran tee the FDR control. This is illustrated on both synthetic and real data.},\n  keywords = {matched filters;signal detection;error control;massive data;rare weak signatures detection;hypotheses testing approaches;global error criterion;Benjamini-Hochberg procedure;false discovery rate control;image processing;signal processing;matched filter;FDR control;Impedance matching;Yttrium;Noise measurement;Error correction;Sparse matrices;Europe;source detection;matched filter;error control;FDR;massive data},\n  doi = {10.1109/EUSIPCO.2015.7362729},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570099159.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we address the general issue of detecting rare and weak signatures in very noisy data. Multiple hypothe ses testing approaches can be used to extract a list of com ponents of the data that are likely to be contaminated by a source while controlling a global error criterion. However most of efficients methods available in the literature are de rived for independent tests. Based on the work of Benjamini and Yekutieli [1], we show that under some classical positivity assumptions, the Benjamini-Hochberg procedure for False Discovery Rate (FDR) control can be directly applied to the result produced by a very common tool in signal and image processing: the matched filter. This shows that despite the de pendency structure between the components of the matched filter output, the Benjamini-Hochberg procedure still guaran tee the FDR control. This is illustrated on both synthetic and real data.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Bayesian Track-Before-Detect for closely spaced targets.\n \n \n \n \n\n\n \n Papi, F.; and Gostar, A. K.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1979-1983, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"BayesianPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362730,\n  author = {F. Papi and A. K. Gostar},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Bayesian Track-Before-Detect for closely spaced targets},\n  year = {2015},\n  pages = {1979-1983},\n  abstract = {Track-Before-Detect (TBD) is an effective approach to multi-target tracking problems with low signal-to-noise (SNR) ratio. In this paper we propose a novel Labeled Random Finite Set (RFS) solution to the multi-target TBD problem for a generic pixel based measurement model. In particular, we discuss the applicability of the Generalized Labeled Multi-Bernoulli (GLMB) distribution to the TBD problem for low SNR and closely spaced targets. In such case, the commonly used separable targets assumption does not hold and a more sophisticated algorithm is required. The proposed GLMB recursion is effective in the sense that it matches the cardinality distribution and Probability Hypothesis Density (PHD) function of the true joint posterior density. The approach is validated through simulation results in challenging scenarios.},\n  keywords = {Bayes methods;target tracking;Bayesian track-before-detect;closely spaced targets;multitarget tracking problems;low signal-to-noise;labeled random finite set;RFS;multitarget TBD problem;generic pixel based measurement;generalized labeled multiBernoulli distribution;GLMB distribution;GLMB recursion;cardinality distribution;probability hypothesis density function;PHD function;joint posterior density;Radar tracking;Target tracking;Simulation;Approximation methods;Signal to noise ratio;Europe},\n  doi = {10.1109/EUSIPCO.2015.7362730},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096515.pdf},\n}\n\n
\n
\n\n\n
\n Track-Before-Detect (TBD) is an effective approach to multi-target tracking problems with low signal-to-noise (SNR) ratio. In this paper we propose a novel Labeled Random Finite Set (RFS) solution to the multi-target TBD problem for a generic pixel based measurement model. In particular, we discuss the applicability of the Generalized Labeled Multi-Bernoulli (GLMB) distribution to the TBD problem for low SNR and closely spaced targets. In such case, the commonly used separable targets assumption does not hold and a more sophisticated algorithm is required. The proposed GLMB recursion is effective in the sense that it matches the cardinality distribution and Probability Hypothesis Density (PHD) function of the true joint posterior density. The approach is validated through simulation results in challenging scenarios.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A nonparametric cumulative sum scheme based on sequential ranks and adaptive control limits.\n \n \n \n\n\n \n Lang, M.; and Zoubir, A. M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1984-1988, Aug 2015. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362731,\n  author = {M. Lang and A. M. Zoubir},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A nonparametric cumulative sum scheme based on sequential ranks and adaptive control limits},\n  year = {2015},\n  pages = {1984-1988},\n  abstract = {We consider the problem of quickest detection, i.e. we sequentially monitor a data sequence to detect a shift in the sampling distribution which may occur at an unknown time instance. Conventional quickest detection procedures typically require a-priori knowledge of the underlying pre- and post-change distributions of the process. Such knowledge may not be available in practice or be flawed, e.g. because the distributional assumptions itself or the respective parameter estimates are inadequate. In this paper we propose a distribution-free cumulative sum (CUSUM) procedure based on sequential ranks and adaptive control limits. The presented procedure does not require a historical set of training data and is therefore especially suited for initial monitoring phases.},\n  keywords = {adaptive control;control charts;parameter estimation;statistical analysis;statistical process control;nonparametric cumulative sum scheme;sequential ranks;adaptive control limits;quickest detection problem;sampling distribution;time instance;parameter estimation;CUSUM procedure;Training data;Monitoring;Signal processing;Europe;Adaptive control;Control charts;Indexes;Quickest Detection;Nonparametric;Sequential Ranks;Cumulative Sums},\n  doi = {10.1109/EUSIPCO.2015.7362731},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n We consider the problem of quickest detection, i.e. we sequentially monitor a data sequence to detect a shift in the sampling distribution which may occur at an unknown time instance. Conventional quickest detection procedures typically require a-priori knowledge of the underlying pre- and post-change distributions of the process. Such knowledge may not be available in practice or be flawed, e.g. because the distributional assumptions itself or the respective parameter estimates are inadequate. In this paper we propose a distribution-free cumulative sum (CUSUM) procedure based on sequential ranks and adaptive control limits. The presented procedure does not require a historical set of training data and is therefore especially suited for initial monitoring phases.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Optimal multiuser scheduling schemes for simultaneous wireless information and power transfer.\n \n \n \n \n\n\n \n Chynonova, M.; Morsi, R.; Ng, D. W. K.; and Schober, R.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1989-1993, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"OptimalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362732,\n  author = {M. Chynonova and R. Morsi and D. W. K. Ng and R. Schober},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Optimal multiuser scheduling schemes for simultaneous wireless information and power transfer},\n  year = {2015},\n  pages = {1989-1993},\n  abstract = {In this paper, we study the downlink multiuser scheduling problem for systems with simultaneous wireless information and power transfer (SWIPT). We design optimal scheduling algorithms that maximize the long-term average system throughput under different fairness requirements, such as proportional fairness and equal throughput fairness. In particular, the algorithm designs are formulated as non-convex optimization problems which take into account the minimum required average sum harvested energy in the system. The problems are solved by using convex optimization techniques and the proposed optimization framework reveals the tradeoff between the long-term average system throughput and the sum harvested energy in multiuser systems with fairness constraints. Simulation results demonstrate that substantial performance gains can be achieved by the proposed optimization framework compared to existing suboptimal scheduling algorithms from the literature.},\n  keywords = {concave programming;energy harvesting;scheduling;optimal multiuser scheduling schemes;simultaneous wireless information;power transfer;downlink multiuser scheduling;algorithm designs;nonconvex optimization problems;convex optimization techniques;multiuser systems;optimization framework;suboptimal scheduling algorithms;Optimal scheduling;Throughput;Radio frequency;Receivers;Wireless communication;Measurement;RF energy harvesting;wireless information and power transfer;optimal multiuser scheduling},\n  doi = {10.1109/EUSIPCO.2015.7362732},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570094393.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we study the downlink multiuser scheduling problem for systems with simultaneous wireless information and power transfer (SWIPT). We design optimal scheduling algorithms that maximize the long-term average system throughput under different fairness requirements, such as proportional fairness and equal throughput fairness. In particular, the algorithm designs are formulated as non-convex optimization problems which take into account the minimum required average sum harvested energy in the system. The problems are solved by using convex optimization techniques and the proposed optimization framework reveals the tradeoff between the long-term average system throughput and the sum harvested energy in multiuser systems with fairness constraints. Simulation results demonstrate that substantial performance gains can be achieved by the proposed optimization framework compared to existing suboptimal scheduling algorithms from the literature.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Swipt through eigen-decomposition of mimo channels.\n \n \n \n \n\n\n \n Timotheou, S.; and Krikidis, I.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1994-1998, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"SwiptPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362733,\n  author = {S. Timotheou and I. Krikidis},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Swipt through eigen-decomposition of mimo channels},\n  year = {2015},\n  pages = {1994-1998},\n  abstract = {In this paper, we theoretically investigate a new technique for simultaneous wireless information and power transfer in multiple-input multiple-output (MIMO) point-to-point with radio frequency energy harvesting capabilities. The proposed technique exploits the spatial decomposition of the MIMO channel and uses the eigenchannels either to convey information or to transfer energy. An optimization problem that minimizes the total transmitted power subject to maximum power per eigenchannel, information and energy constraints is formulated as a mixed-integer nonlinear program and solved to optimality using mixed-integer second-order cone programming.},\n  keywords = {eigenvalues and eigenfunctions;energy harvesting;integer programming;MIMO communication;nonlinear programming;radiofrequency power transmission;telecommunication power management;eigendecomposition;wireless information;power transfer;multiple-input multiple-output channels;MIMO channels;radio frequency energy harvesting capabilities;spatial decomposition;eigenchannels;energy constraints;mixed-integer nonlinear program;mixed-integer second-order cone programming;MIMO;Optimization;Radio frequency;Resource management;Europe;Signal processing;Energy harvesting;RF energy harvesting;SWIPT;MIMO channel;SVD;optimization;MISOCP},\n  doi = {10.1109/EUSIPCO.2015.7362733},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096783.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we theoretically investigate a new technique for simultaneous wireless information and power transfer in multiple-input multiple-output (MIMO) point-to-point with radio frequency energy harvesting capabilities. The proposed technique exploits the spatial decomposition of the MIMO channel and uses the eigenchannels either to convey information or to transfer energy. An optimization problem that minimizes the total transmitted power subject to maximum power per eigenchannel, information and energy constraints is formulated as a mixed-integer nonlinear program and solved to optimality using mixed-integer second-order cone programming.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Cooperative non-orthogonal multiple access in 5G systems with SWIPT.\n \n \n \n\n\n \n Liu, Y.; Ding, Z.; Eïkashlan, M.; and Poor, H. V.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1999-2003, Aug 2015. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362734,\n  author = {Y. Liu and Z. Ding and M. Eïkashlan and H. V. Poor},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Cooperative non-orthogonal multiple access in 5G systems with SWIPT},\n  year = {2015},\n  pages = {1999-2003},\n  abstract = {In this paper, the application of simultaneous wireless information and power transfer (SWIPT) to non-orthogonal multiple access (NOMA) is investigated. A new cooperative SWIPT NOMA protocol is proposed in which near NOMA users which are close to the source act as energy harvesting relays to help far NOMA users. By assuming that all users are spatially randomly located in the network, new analytical expressions for outage probability are derived both for the near and far users, in order to characterize the performance of the proposed protocol. The diversity of both the near and far users is analyzed to demonstrate that the use of SWIPT will not jeopardize the diversity order compared to the conventional NOMA. Numerical results are also provided to verify the accuracy of the developed analytical results.},\n  keywords = {5G mobile communication;cooperative communication;microwave power transmission;multi-access systems;protocols;telecommunication power management;cooperative nonorthogonal multiple access systems;5G systems;simultaneous wireless information and power transfer;cooperative SWIPT NOMA protocol;outage probability;Bismuth;Protocols;Relays;Signal to noise ratio;Diversity methods;Europe;Non-orthogonal multiple access;simultaneously wireless information and power transfer;stochastic geometry},\n  doi = {10.1109/EUSIPCO.2015.7362734},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n In this paper, the application of simultaneous wireless information and power transfer (SWIPT) to non-orthogonal multiple access (NOMA) is investigated. A new cooperative SWIPT NOMA protocol is proposed in which near NOMA users which are close to the source act as energy harvesting relays to help far NOMA users. By assuming that all users are spatially randomly located in the network, new analytical expressions for outage probability are derived both for the near and far users, in order to characterize the performance of the proposed protocol. The diversity of both the near and far users is analyzed to demonstrate that the use of SWIPT will not jeopardize the diversity order compared to the conventional NOMA. Numerical results are also provided to verify the accuracy of the developed analytical results.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An unsupervised approach to the semantic description of the sound quality of violins.\n \n \n \n \n\n\n \n Buccoli, M.; Zanoni, M.; Setragno, F.; Antonacci, F.; and Sarti, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2004-2008, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362735,\n  author = {M. Buccoli and M. Zanoni and F. Setragno and F. Antonacci and A. Sarti},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {An unsupervised approach to the semantic description of the sound quality of violins},\n  year = {2015},\n  pages = {2004-2008},\n  abstract = {In this study we propose a set of semantic musical descriptors that can be used for describing the timbre of violins. The proposed semantic model follows a dimensional approach, which allows us to express the degree of intensity of each descriptor. A set of recordings of a number of violins (among them, Stradivari, Amati and Guarnieri instruments) were annotated with the descriptors through questionnaires. The recordings are processed with deep learning techniques, to learn salient features from the audio signal in an unsupervised fashion. In this study we propose an automatic annotation procedure based on a set of regression functions that model each semantic descriptor using the learned set of features.},\n  keywords = {audio signal processing;feature extraction;learning (artificial intelligence);musical acoustics;musical instruments;regression analysis;violin sound quality;semantic musical descriptors;violins timbre;learning techniques;salient features;audio signal;unsupervised fashion;automatic annotation;regression function;Semantics;Instruments;Training;Neurons;Feature extraction;Europe;Signal processing;High-level music descriptor;violin;timbre;sound quality},\n  doi = {10.1109/EUSIPCO.2015.7362735},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570097543.pdf},\n}\n\n
\n
\n\n\n
\n In this study we propose a set of semantic musical descriptors that can be used for describing the timbre of violins. The proposed semantic model follows a dimensional approach, which allows us to express the degree of intensity of each descriptor. A set of recordings of a number of violins (among them, Stradivari, Amati and Guarnieri instruments) were annotated with the descriptors through questionnaires. The recordings are processed with deep learning techniques, to learn salient features from the audio signal in an unsupervised fashion. In this study we propose an automatic annotation procedure based on a set of regression functions that model each semantic descriptor using the learned set of features.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Improving piano note tracking by HMM smoothing.\n \n \n \n \n\n\n \n Cheng, T.; Dixon, S.; and Mauch, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2009-2013, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ImprovingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362736,\n  author = {T. Cheng and S. Dixon and M. Mauch},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Improving piano note tracking by HMM smoothing},\n  year = {2015},\n  pages = {2009-2013},\n  abstract = {In this paper we improve piano note tracking using a Hidden Markov Model (HMM). We first transcribe piano music based on a non-negative matrix factorisation (NMF) method. For each note four templates are trained to represent the different stages of piano sounds: silence, attack, decay and release. Then a four-state HMM is employed to track notes on the gains of each pitch. We increase the likelihood of staying in silence for low pitches and set a minimum duration to reduce short false-positive notes. For quickly repeated notes, we allow the note state to transition from decay directly back to attack. The experiments tested on 30 piano pieces from the MAPS dataset shows promising results for both frame-wise and note-wise transcription.},\n  keywords = {acoustic signal processing;hidden Markov models;matrix decomposition;music;musical acoustics;musical instruments;smoothing methods;piano note tracking;HMM smoothing;hidden Markov model;piano music transcription;nonnegative matrix factorisation method;NMF method;piano sound;four-state HMM;pitch gain;false-positive notes;quickly repeated notes;MAPS dataset;frame-wise transcription;note-wise transcription;Hidden Markov models;Spectrogram;Europe;Estimation;Matrix decomposition;Training;piano note tracking;Hidden Markov Model},\n  doi = {10.1109/EUSIPCO.2015.7362736},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570097651.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we improve piano note tracking using a Hidden Markov Model (HMM). We first transcribe piano music based on a non-negative matrix factorisation (NMF) method. For each note four templates are trained to represent the different stages of piano sounds: silence, attack, decay and release. Then a four-state HMM is employed to track notes on the gains of each pitch. We increase the likelihood of staying in silence for low pitches and set a minimum duration to reduce short false-positive notes. For quickly repeated notes, we allow the note state to transition from decay directly back to attack. The experiments tested on 30 piano pieces from the MAPS dataset shows promising results for both frame-wise and note-wise transcription.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Aliasing reduction in soft-clipping algorithms.\n \n \n \n \n\n\n \n Esqueda, F.; Välimäki, V.; and Bilbao, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2014-2018, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AliasingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362737,\n  author = {F. Esqueda and V. Välimäki and S. Bilbao},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Aliasing reduction in soft-clipping algorithms},\n  year = {2015},\n  pages = {2014-2018},\n  abstract = {Soft-clipping algorithms used to implement musical distortion effects are major sources of aliasing due to their nonlinear behavior. It is a research challenge to design computationally efficient methods for alias-free distortion without over-sampling. In the proposed approach, soft clipping is decomposed into a hard clipper and a low-order polynomial part. A technique for aliasing reduction of the hard-clipped signal is presented based on a polynomial approximation of the ban-dlimited ramp function. This correction function operates by quasi-bandlimiting the discontinuities introduced in the first derivative of the signal. The proposed method effectively reduces perceivable aliasing in soft-clipped audio signals having low frequency content. This work presents the first step towards alias-free implementations of nonlinear virtual analog effects.},\n  keywords = {audio signal processing;music;aliasing reduction;soft clipping algorithms;musical distortion effects;alias free distortion;hard clipping method;low order polynomial;hard clipped signal aliasing;polynomial approximation;ban dlimited ramp function;quasiband-limiting method;soft-clipped audio signals;nonlinear virtual analog effect;Signal processing algorithms;Approximation methods;Europe;Algorithm design and analysis;Acoustic distortion;Audio signal processing;antialiasing;music;nonlinear distortion},\n  doi = {10.1109/EUSIPCO.2015.7362737},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104119.pdf},\n}\n\n
\n
\n\n\n
\n Soft-clipping algorithms used to implement musical distortion effects are major sources of aliasing due to their nonlinear behavior. It is a research challenge to design computationally efficient methods for alias-free distortion without over-sampling. In the proposed approach, soft clipping is decomposed into a hard clipper and a low-order polynomial part. A technique for aliasing reduction of the hard-clipped signal is presented based on a polynomial approximation of the ban-dlimited ramp function. This correction function operates by quasi-bandlimiting the discontinuities introduced in the first derivative of the signal. The proposed method effectively reduces perceivable aliasing in soft-clipped audio signals having low frequency content. This work presents the first step towards alias-free implementations of nonlinear virtual analog effects.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Parallel digital signal processing for efficient piano synthesis.\n \n \n \n \n\n\n \n Gabrielli, L.; Zambon, S.; and Fontana, F.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2019-2022, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ParallelPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362738,\n  author = {L. Gabrielli and S. Zambon and F. Fontana},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Parallel digital signal processing for efficient piano synthesis},\n  year = {2015},\n  pages = {2019-2022},\n  abstract = {While computational acoustics techniques for musical instruments emulation reached a remarkable maturity due to continuous development in the last three decades, implementation into embedded digital instruments lags behind, with only a few notable commercial products to solely employ physics-based algorithms for acoustic instruments tone synthesis. In this paper a parallel DSP architecture for the efficient implementation of the acoustic piano on embedded processors is reported. The resulting model is able to provide faithful reproduction of the acoustic piano physical behaviour and can also be used as an engine for novel instruments that need to provide advanced multimodal output (haptics, spatial audio) with a low-cost embedded platform.},\n  keywords = {acoustic signal processing;signal synthesis;parallel digital signal processing architecture;efficient acoustic piano synthesis;computational acoustics technique;musical instrument emulation;embedded digital instrument;physics-based algorithm;acoustic instruments tone synthesis;parallel DSP architecture;embedded processor;Digital signal processing;Computational modeling;Acoustics;Instruments;Signal processing algorithms;Program processors;Sound synthesis;physical modeling;parallel computing;digital signal processing},\n  doi = {10.1109/EUSIPCO.2015.7362738},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104271.pdf},\n}\n\n
\n
\n\n\n
\n While computational acoustics techniques for musical instruments emulation reached a remarkable maturity due to continuous development in the last three decades, implementation into embedded digital instruments lags behind, with only a few notable commercial products to solely employ physics-based algorithms for acoustic instruments tone synthesis. In this paper a parallel DSP architecture for the efficient implementation of the acoustic piano on embedded processors is reported. The resulting model is able to provide faithful reproduction of the acoustic piano physical behaviour and can also be used as an engine for novel instruments that need to provide advanced multimodal output (haptics, spatial audio) with a low-cost embedded platform.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Alternate level clustering for drum transcription.\n \n \n \n \n\n\n \n Rossignol, M.; Lagrange, M.; Lafay, G.; and Benetos, E.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2023-2027, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AlternatePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362739,\n  author = {M. Rossignol and M. Lagrange and G. Lafay and E. Benetos},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Alternate level clustering for drum transcription},\n  year = {2015},\n  pages = {2023-2027},\n  abstract = {This paper introduces a clustering-based unsupervised approach to the problem of drum transcription. The proposed method is based on a stack of multiple clustering and segmentation stages that progressively build up meaningful audio events, in a bottom-up fashion. At each level, the inherent redundancy of the repeating events guides the clustering of objects into more complex structures. Comparison with state-of-the-art approaches demonstrate the potential of the proposed approach, both in terms of efficiency and of ability to generalize.},\n  keywords = {audio signal processing;musical instruments;alternate level clustering;drum transcription;clustering-based unsupervised approach;segmentation stages;audio events;Signal processing algorithms;Clustering algorithms;Signal processing;Acoustics;Algorithm design and analysis;Europe;Standards;Audio segmentation;automatic music transcription;drum transcription;unsupervised learning},\n  doi = {10.1109/EUSIPCO.2015.7362739},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104733.pdf},\n}\n\n
\n
\n\n\n
\n This paper introduces a clustering-based unsupervised approach to the problem of drum transcription. The proposed method is based on a stack of multiple clustering and segmentation stages that progressively build up meaningful audio events, in a bottom-up fashion. At each level, the inherent redundancy of the repeating events guides the clustering of objects into more complex structures. Comparison with state-of-the-art approaches demonstrate the potential of the proposed approach, both in terms of efficiency and of ability to generalize.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Enforcing consistency in spectral masks using Markov random fields.\n \n \n \n \n\n\n \n Mandel, M. I.; and Roman, N.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2028-2032, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"EnforcingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362740,\n  author = {M. I. Mandel and N. Roman},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Enforcing consistency in spectral masks using Markov random fields},\n  year = {2015},\n  pages = {2028-2032},\n  abstract = {Localization-based multichannel source separation algorithms typically operate by clustering or classifying individual time-frequency points based on their spatial characteristics, treating adjacent points as independent observations. The Model-based EM Source Separation and Localization (MESSL) algorithm is one such approach for binaural signals that achieves additional robustness by enforcing consistency in inaural parameters across frequency. This paper incorporates MESSL into a Markov Random Field (MRF) framework in order to addition ally enforce consistency in the assignment of neighboring time-frequency units to sources. Approximate inference in the MRF is performed using loopy belief propagation (LBP), and the same approach can be used to smooth any probabilistic source separation mask. The proposed MESSL-MRF algorithm is tested on binaural mixtures of three sources in reverberant conditions and shows significant improvements over the original MESSL algorithm as measured by both signal-to-distortion ratios as well as a speech intelligibility predictor.},\n  keywords = {Markov processes;source separation;speech intelligibility;speech processing;spectral mask consistency;Markov random fields;localization based multichannel source separation algorithm;model based EM source separation and localization algorithm;binaural signals;Markov random field interference;loopy belief propagation;reverberant conditions;speech intelligibility predictor;Time-frequency analysis;Source separation;Signal processing algorithms;Approximation methods;Approximation algorithms;Belief propagation;Europe;Binaural separation;Spectral masking;Markov Random Fields},\n  doi = {10.1109/EUSIPCO.2015.7362740},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570098257.pdf},\n}\n\n
\n
\n\n\n
\n Localization-based multichannel source separation algorithms typically operate by clustering or classifying individual time-frequency points based on their spatial characteristics, treating adjacent points as independent observations. The Model-based EM Source Separation and Localization (MESSL) algorithm is one such approach for binaural signals that achieves additional robustness by enforcing consistency in inaural parameters across frequency. This paper incorporates MESSL into a Markov Random Field (MRF) framework in order to addition ally enforce consistency in the assignment of neighboring time-frequency units to sources. Approximate inference in the MRF is performed using loopy belief propagation (LBP), and the same approach can be used to smooth any probabilistic source separation mask. The proposed MESSL-MRF algorithm is tested on binaural mixtures of three sources in reverberant conditions and shows significant improvements over the original MESSL algorithm as measured by both signal-to-distortion ratios as well as a speech intelligibility predictor.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A structured nonnegative matrix factorization for source separation.\n \n \n \n \n\n\n \n Laroche, C.; Kowalski, M.; Papadopoulos, H.; and Richard, G.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2033-2037, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362741,\n  author = {C. Laroche and M. Kowalski and H. Papadopoulos and G. Richard},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A structured nonnegative matrix factorization for source separation},\n  year = {2015},\n  pages = {2033-2037},\n  abstract = {In this paper, we propose a new unconstrained nonnegative matrix factorization method designed to utilize the multilayer structure of audio signals to improve the quality of the source separation. The tonal layer is sparse in frequency and temporally stable, while the transient layer is composed of short term broadband sounds. Our method has a part well suited for tonal extraction which decomposes the signals in sparse orthogonal components, while the transient part is represented by a regular nonnegative matrix factorization decomposition. Experiments on synthetic and real music data in a source separation context show that such decomposition is suitable for audio signal. Compared with three state-of-the-art harmonic/percussive decomposition algorithms, the proposed method shows competitive performances.},\n  keywords = {audio signal processing;matrix decomposition;source separation;unconstrained nonnegative matrix factorization method;structured nonnegative matrix factorization method;source separation quality improvement;regular nonnegative matrix factorization decomposition;sparse orthogonal component;signal decomposition;tonal extraction;short term broadband sounds;transient layer;sparse tonal layer;audio signal multilayer structure;Matrix decomposition;Source separation;Harmonic analysis;Signal to noise ratio;Europe;Databases;nonnegative matrix factorization;projective nonnegative matrix factorization;audio source separation;harmonic/percussive decomposition},\n  doi = {10.1109/EUSIPCO.2015.7362741},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102869.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a new unconstrained nonnegative matrix factorization method designed to utilize the multilayer structure of audio signals to improve the quality of the source separation. The tonal layer is sparse in frequency and temporally stable, while the transient layer is composed of short term broadband sounds. Our method has a part well suited for tonal extraction which decomposes the signals in sparse orthogonal components, while the transient part is represented by a regular nonnegative matrix factorization decomposition. Experiments on synthetic and real music data in a source separation context show that such decomposition is suitable for audio signal. Compared with three state-of-the-art harmonic/percussive decomposition algorithms, the proposed method shows competitive performances.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A multi-sensor approach for real-time detection and classification of impact sounds.\n \n \n \n \n\n\n \n Stefanakis, N.; and Mouchtaris, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2038-2042, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362742,\n  author = {N. Stefanakis and A. Mouchtaris},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A multi-sensor approach for real-time detection and classification of impact sounds},\n  year = {2015},\n  pages = {2038-2042},\n  abstract = {We present a method for real-time detection and classification of impact sounds - relying solely on spatial features - that exploits the difference in the location of each impacted structure. Using a compact sensor array, we formulate the classification problem in terms of an undetermined source separation process where we assume that the linear mixing model can be learned through a training phase. The recovered source amplitudes are exploited for estimating the source activity in time, and the detection and classification decisions are derived based on simple energy criteria. Experimental results with two sensors demonstrate the efficiency of the method in an application scenario which considers the use of a simple object as a real-time control interface for triggering a percussion synthesizer.},\n  keywords = {acoustic signal detection;acoustic transducers;sensor arrays;impact sound real-time detection;impact sound real-time classification;sensor array;linear mixing model;percussion synthesizer;Training;Europe;Microphones;Signal processing;Real-time systems;Indexes;Acoustics;impact sound;sensor array;gesture recognition},\n  doi = {10.1109/EUSIPCO.2015.7362742},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104809.pdf},\n}\n\n
\n
\n\n\n
\n We present a method for real-time detection and classification of impact sounds - relying solely on spatial features - that exploits the difference in the location of each impacted structure. Using a compact sensor array, we formulate the classification problem in terms of an undetermined source separation process where we assume that the linear mixing model can be learned through a training phase. The recovered source amplitudes are exploited for estimating the source activity in time, and the detection and classification decisions are derived based on simple energy criteria. Experimental results with two sensors demonstrate the efficiency of the method in an application scenario which considers the use of a simple object as a real-time control interface for triggering a percussion synthesizer.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Unified approach for audio source separation with multichannel factorial HMM and DOA mixture model.\n \n \n \n \n\n\n \n Higuchi, T.; and Kameoka, H.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2043-2047, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"UnifiedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362743,\n  author = {T. Higuchi and H. Kameoka},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Unified approach for audio source separation with multichannel factorial HMM and DOA mixture model},\n  year = {2015},\n  pages = {2043-2047},\n  abstract = {We deal with the problems of blind source separation, dereverberation, audio event detection and direction-of-arrival (DOA) estimation. We previously proposed a generative model of multichannel signals called the multichannel facto rial hidden Markov model, which allows us to simultaneously solve these problems through a joint optimization problem formulation. In this approach, we modeled the spatial cor relation matrix of each source as a weighted sum of the spatial correlation matrices corresponding to all possible DOAs. However, it became clear through real environment experiments that the estimate of the spatial correlation matrix tended to deviate from the actual correlation matrix since the plane wave assumption does not hold due to reverber ation and noise components. To handle such deviations, we propose introducing a prior distribution over the spatial correlation matrices called the DOA mixture model instead of using the weighted sum model. The experiment showed that the proposed method provided 1.94 [dB] improvement compared with our previous method in terms of the the signal-to-distortion ratios of separated signals.},\n  keywords = {audio signal processing;blind source separation;correlation methods;direction-of-arrival estimation;hidden Markov models;matrix algebra;reverberation;audio source separation;HMM;DOA mixture model;blind source separation;dereverberation;audio event detection;direction-of-arrival estimation;multichannel signals;multichannel factorial hidden Markov model;spatial correlation matrix;plane wave assumption;signal-to-distortion ratios;Hidden Markov models;Correlation;Direction-of-arrival estimation;Time-frequency analysis;Arrays;Microphones;Source separation;Blind source separation;voice activity detection;dereverberation;DOA estimation},\n  doi = {10.1109/EUSIPCO.2015.7362743},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570099211.pdf},\n}\n\n
\n
\n\n\n
\n We deal with the problems of blind source separation, dereverberation, audio event detection and direction-of-arrival (DOA) estimation. We previously proposed a generative model of multichannel signals called the multichannel facto rial hidden Markov model, which allows us to simultaneously solve these problems through a joint optimization problem formulation. In this approach, we modeled the spatial cor relation matrix of each source as a weighted sum of the spatial correlation matrices corresponding to all possible DOAs. However, it became clear through real environment experiments that the estimate of the spatial correlation matrix tended to deviate from the actual correlation matrix since the plane wave assumption does not hold due to reverber ation and noise components. To handle such deviations, we propose introducing a prior distribution over the spatial correlation matrices called the DOA mixture model instead of using the weighted sum model. The experiment showed that the proposed method provided 1.94 [dB] improvement compared with our previous method in terms of the the signal-to-distortion ratios of separated signals.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An evaluation of stereo speech enhancement methods for different audio-visual scenarios.\n \n \n \n \n\n\n \n Craciun, A.; Uhle, C.; and Bäckström, T.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2048-2052, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362744,\n  author = {A. Craciun and C. Uhle and T. Bäckström},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {An evaluation of stereo speech enhancement methods for different audio-visual scenarios},\n  year = {2015},\n  pages = {2048-2052},\n  abstract = {Following speech on TV or radio in the presence of interferers is sometimes challenging, in particular for the elderly and the hearing-impaired. To evaluate the performance of speech enhancement methods for such scenarios, we consider a stereo mixture composed of a speech signal and interfering sources. We apply different approaches to separate the mixture into two components, where the first component contains mainly speech (the desired signal) and the second component contains the rest of the mixture. An improved stereo signal is constructed by recombining these components such that speech gets emphasized with respect to the rest of the mixture and at the same time the amount of artifacts is kept to a minimum. Listening tests and objective measures show that the center extraction approach is in general the most effective, although it is sensitive to speaker positioning.},\n  keywords = {interference suppression;speech enhancement;stereo speech enhancement methods;audio-visual scenarios;stereo mixture;stereo signal;center extraction approach;speaker positioning;Speech;Speech enhancement;Time-frequency analysis;Estimation;Signal processing algorithms;Noise measurement;Signal to noise ratio;speech enhancement;center extraction;noise suppression;direct-ambient decomposition},\n  doi = {10.1109/EUSIPCO.2015.7362744},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570095143.pdf},\n}\n\n
\n
\n\n\n
\n Following speech on TV or radio in the presence of interferers is sometimes challenging, in particular for the elderly and the hearing-impaired. To evaluate the performance of speech enhancement methods for such scenarios, we consider a stereo mixture composed of a speech signal and interfering sources. We apply different approaches to separate the mixture into two components, where the first component contains mainly speech (the desired signal) and the second component contains the rest of the mixture. An improved stereo signal is constructed by recombining these components such that speech gets emphasized with respect to the rest of the mixture and at the same time the amount of artifacts is kept to a minimum. Listening tests and objective measures show that the center extraction approach is in general the most effective, although it is sensitive to speaker positioning.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Attack detectors for data aggregation in clustered sensor networks.\n \n \n \n \n\n\n \n López-Valcarce, R.; and Romero, D.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2053-2057, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AttackPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362745,\n  author = {R. López-Valcarce and D. Romero},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Attack detectors for data aggregation in clustered sensor networks},\n  year = {2015},\n  pages = {2053-2057},\n  abstract = {Among many security threats to sensor networks, compromised sensing is particularly challenging due to the fact that it cannot be addressed by standard authentication approaches. We consider a clustered scenario for data aggregation in which an attacker injects a disturbance in sensor readings. Casting the problem in an estimation framework, we systematically apply the Generalized Likelihood Ratio approach to derive attack detectors. The analysis under different attacks reveals that detectors based on similarity of means across clusters are suboptimal, with Bartlett's test for homoscedasticity constituting a good candidate when lacking a priori knowledge of the variance of the underlying distribution.},\n  keywords = {cryptographic protocols;data aggregation;sensor arrays;Bartletts test;generalized likelihood ratio approach;sensor readings;authentication approach;security threats;clustered sensor networks;data aggregation;attack detectors;Detectors;Europe;Signal processing;Maximum likelihood estimation;Security;Standards;resilient data aggregation;attack detection;sensor networks},\n  doi = {10.1109/EUSIPCO.2015.7362745},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570095343.pdf},\n}\n\n
\n
\n\n\n
\n Among many security threats to sensor networks, compromised sensing is particularly challenging due to the fact that it cannot be addressed by standard authentication approaches. We consider a clustered scenario for data aggregation in which an attacker injects a disturbance in sensor readings. Casting the problem in an estimation framework, we systematically apply the Generalized Likelihood Ratio approach to derive attack detectors. The analysis under different attacks reveals that detectors based on similarity of means across clusters are suboptimal, with Bartlett's test for homoscedasticity constituting a good candidate when lacking a priori knowledge of the variance of the underlying distribution.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n On the security of a double-scroll based \"true\" random bit generator.\n \n \n \n\n\n \n Ergün, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2058-2061, Aug 2015. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362746,\n  author = {S. Ergün},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {On the security of a double-scroll based {"}true{"} random bit generator},\n  year = {2015},\n  pages = {2058-2061},\n  abstract = {This paper is on the security of a true random bit generator (RBG) based on a double-scroll attractor. A clone system is proposed to analyze the security weaknesses of the RBG and its convergence is proved using master slave synchronization scheme. All secret parameters of the RBG are revealed where the only information available are the structure of the RBG and a scalar time series observed from the double-scroll at-tractor. Simulation and numerical results verifying the feasibility of the clone system are given such that the RBG doesn't fulfill NIST-800-22 statistical test suite, not only the next bit but also the same output bit stream of the RBG can be reproduced.},\n  keywords = {cryptography;random number generation;statistical testing;synchronisation;telecommunication security;time series;true random bit generator;RBG;double-scroll attractor;clone system;master slave synchronization scheme;scalar time series;NIST-800-22 statistical test suite;Chaos;Synchronization;Cloning;Random number generation;Generators;Random number generator;continuous-time chaos;truly random;synchronization of chaotic systems;cryptanalysis},\n  doi = {10.1109/EUSIPCO.2015.7362746},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n This paper is on the security of a true random bit generator (RBG) based on a double-scroll attractor. A clone system is proposed to analyze the security weaknesses of the RBG and its convergence is proved using master slave synchronization scheme. All secret parameters of the RBG are revealed where the only information available are the structure of the RBG and a scalar time series observed from the double-scroll at-tractor. Simulation and numerical results verifying the feasibility of the clone system are given such that the RBG doesn't fulfill NIST-800-22 statistical test suite, not only the next bit but also the same output bit stream of the RBG can be reproduced.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n SIFT match removal and keypoint preservation through dominant orientation shift.\n \n \n \n \n\n\n \n Caldelli, R.; Amerini, I.; and Costanzo, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2062-2066, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"SIFTPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362747,\n  author = {R. Caldelli and I. Amerini and A. Costanzo},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {SIFT match removal and keypoint preservation through dominant orientation shift},\n  year = {2015},\n  pages = {2062-2066},\n  abstract = {In Image Forensics, very often, copy-move attack is countered by resorting at instruments based on matching local features descriptors, usually SIFT. On the other side, to overcome such techniques, smart hackers can try firstly to remove keypoints before performing image patch cloning in order to inhibit the successive matching operation. However, keypoint removal determines per se some suspicious empty areas that could indicate that a manipulation has occurred. In this paper, the goal to nullify SIFT matches while preserving keypoints is pursued. The basic idea is to succeed in altering the features descriptor by means of shifting the dominant orientation associated to a specific keypoint. In fact, to provide rotation invariance, all the values of the descriptor are computed according to such orientation. So doing, it should impair the whole matching phase.},\n  keywords = {feature extraction;image forensics;image matching;transforms;SIFT match removal;scale invariant feature transform;keypoint preservation;dominant orientation shift;image forensics;copy-move attack;local features descriptor matching;keypoint removal;image patch cloning;successive matching operation;rotation invariance;Image edge detection;Databases;Image forensics;Histograms;Europe;Signal processing;Signal processing algorithms;SIFT;dominant orientation;copy-move attack;image forensics;edges},\n  doi = {10.1109/EUSIPCO.2015.7362747},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570097323.pdf},\n}\n\n
\n
\n\n\n
\n In Image Forensics, very often, copy-move attack is countered by resorting at instruments based on matching local features descriptors, usually SIFT. On the other side, to overcome such techniques, smart hackers can try firstly to remove keypoints before performing image patch cloning in order to inhibit the successive matching operation. However, keypoint removal determines per se some suspicious empty areas that could indicate that a manipulation has occurred. In this paper, the goal to nullify SIFT matches while preserving keypoints is pursued. The basic idea is to succeed in altering the features descriptor by means of shifting the dominant orientation associated to a specific keypoint. In fact, to provide rotation invariance, all the values of the descriptor are computed according to such orientation. So doing, it should impair the whole matching phase.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An SVD approach to forensic image resampling detection.\n \n \n \n \n\n\n \n Vázquez-Padín, D.; Comesaña, P.; and Pérez-González, F.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2067-2071, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362748,\n  author = {D. Vázquez-Padín and P. Comesaña and F. Pérez-González},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {An SVD approach to forensic image resampling detection},\n  year = {2015},\n  pages = {2067-2071},\n  abstract = {This paper describes a new strategy for image resampling detection whenever the applied resampling factor is larger than one. Delving into the linear dependencies induced in an image after the application of an upsampling operation, we show that interpolated images belong to a subspace defined by the interpolation kernel. Within this framework, by computing the SVD of a given image block and a measure of its degree of saturated pixels per row/column, we derive a simple detector capable of discriminating between upsampled images and genuine images. Furthermore, the proposed detector shows remarkable results with blocks of small size and outperforms state-of-the-art methods.},\n  keywords = {image forensics;image resolution;image sampling;interpolation;object detection;singular value decomposition;SVD approach;singular value decomposition;forensic image resampling detection;resampling factor;linear dependencies;upsampling operation;interpolation kernel;image block;saturated pixels-per-row degree;saturated pixels-per-column degree;Kernel;Detectors;Matrix decomposition;Interpolation;Quantization (signal);Singular value decomposition;Europe;digital image forensics;resampling de tection;singular value decomposition;tampering detection},\n  doi = {10.1109/EUSIPCO.2015.7362748},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570097585.pdf},\n}\n\n
\n
\n\n\n
\n This paper describes a new strategy for image resampling detection whenever the applied resampling factor is larger than one. Delving into the linear dependencies induced in an image after the application of an upsampling operation, we show that interpolated images belong to a subspace defined by the interpolation kernel. Within this framework, by computing the SVD of a given image block and a measure of its degree of saturated pixels per row/column, we derive a simple detector capable of discriminating between upsampled images and genuine images. Furthermore, the proposed detector shows remarkable results with blocks of small size and outperforms state-of-the-art methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Forensic and anti-forensic analysis of indoor/outdoor classifiers based on acoustic clues.\n \n \n \n \n\n\n \n Mascia, M.; Canclini, A.; Antonacci, F.; Tagliasacchi, M.; Sarti, A.; and Tubaro, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2072-2076, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ForensicPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362749,\n  author = {M. Mascia and A. Canclini and F. Antonacci and M. Tagliasacchi and A. Sarti and S. Tubaro},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Forensic and anti-forensic analysis of indoor/outdoor classifiers based on acoustic clues},\n  year = {2015},\n  pages = {2072-2076},\n  abstract = {This paper addresses the problem of identifying the class of the environment where an audio recording was taken. We focus on distinguishing between indoor and outdoor speech recordings, and we propose a set of classifiers that provide a support for the forensic analyst in verifying the authenticity of audio content. The classifiers rely on acoustic clues extracted from the reverberant signal, namely the reverberation time (RT60) and MFCC/LMSC feature vectors. We conducted several experiments, aimed at analyzing the algorithms from both the forensic and anti-forensic perspective. To do so, we devised a methodology for manipulating the signals in order to pretend that outdoor contents were recorded indoor, and vice-versa. Experimental results confirm the effectiveness of the proposed methods, which achieve high classification ac curacy. The anti-forensics analysis reveals that attacks have moderate success rates, and severely depend from the classi fication algorithm adopted by the analyst.},\n  keywords = {recording;reverberation;speech synthesis;antiforensic analysis;forensic analysis;indoor-outdoor classifiers;acoustic clues;outdoor speech recordings;indoor speech recordings;reverberation time;MFCC-LMSC feature vectors;Feature extraction;Signal processing algorithms;Speech;Reverberation;Forensics;Algorithm design and analysis;audio forensics;anti-forensics;audio au thentication;acoustic environment identification},\n  doi = {10.1109/EUSIPCO.2015.7362749},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570101293.pdf},\n}\n\n
\n
\n\n\n
\n This paper addresses the problem of identifying the class of the environment where an audio recording was taken. We focus on distinguishing between indoor and outdoor speech recordings, and we propose a set of classifiers that provide a support for the forensic analyst in verifying the authenticity of audio content. The classifiers rely on acoustic clues extracted from the reverberant signal, namely the reverberation time (RT60) and MFCC/LMSC feature vectors. We conducted several experiments, aimed at analyzing the algorithms from both the forensic and anti-forensic perspective. To do so, we devised a methodology for manipulating the signals in order to pretend that outdoor contents were recorded indoor, and vice-versa. Experimental results confirm the effectiveness of the proposed methods, which achieve high classification ac curacy. The anti-forensics analysis reveals that attacks have moderate success rates, and severely depend from the classi fication algorithm adopted by the analyst.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Dynamic speech emotion recognition with state-space models.\n \n \n \n \n\n\n \n Markov, K.; Matsui, T.; Septier, F.; and Peters, G.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2077-2081, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"DynamicPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362750,\n  author = {K. Markov and T. Matsui and F. Septier and G. Peters},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Dynamic speech emotion recognition with state-space models},\n  year = {2015},\n  pages = {2077-2081},\n  abstract = {Automatic emotion recognition from speech has been focused mainly on identifying categorical or static affect states, but the spectrum of human emotion is continuous and time-varying. In this paper, we present a recognition system for dynamic speech emotion based on state-space models (SSMs). The prediction of the unknown emotion trajectory in the affect space spanned by Arousal, Valence, and Dominance (A-V-D) descriptors is cast as a time series filtering task. The state space models we investigated include a standard linear model (Kalman filter) as well as novel non-linear, non-parametric Gaussian Processes (GP) based SSM. We use the AVEC 2014 database for evaluation, which provides ground truth A-V-D labels which allows state and measurement functions to be learned separately simplifying the model training. For the filtering with GP SSM, we used two approximation methods: a recently proposed analytic method and Particle filter. All models were evaluated in terms of average Pearson correlation R and root mean square error (RMSE). The results show that using the same feature vectors, the GP SSMs achieve twice higher correlation and twice smaller RMSE than a Kalman filter.},\n  keywords = {approximation theory;emotion recognition;Gaussian processes;Kalman filters;mean square error methods;particle filtering (numerical methods);speech recognition;time series;dynamic speech emotion recognition;state-space models;automatic emotion recognition;human emotion;time series filtering;linear model;Kalman filter;Gaussian processes;AVEC 2014 database;approximation methods;particle filter;Pearson correlation;root mean square error;RMSE;feature vectors;Speech;Speech recognition;Emotion recognition;Kalman filters;Approximation methods;State-space methods;Gaussian processes;Emotion recognition;Affect recognition;Kalman filter;Gaussian Process state-space model},\n  doi = {10.1109/EUSIPCO.2015.7362750},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570101161.pdf},\n}\n\n
\n
\n\n\n
\n Automatic emotion recognition from speech has been focused mainly on identifying categorical or static affect states, but the spectrum of human emotion is continuous and time-varying. In this paper, we present a recognition system for dynamic speech emotion based on state-space models (SSMs). The prediction of the unknown emotion trajectory in the affect space spanned by Arousal, Valence, and Dominance (A-V-D) descriptors is cast as a time series filtering task. The state space models we investigated include a standard linear model (Kalman filter) as well as novel non-linear, non-parametric Gaussian Processes (GP) based SSM. We use the AVEC 2014 database for evaluation, which provides ground truth A-V-D labels which allows state and measurement functions to be learned separately simplifying the model training. For the filtering with GP SSM, we used two approximation methods: a recently proposed analytic method and Particle filter. All models were evaluated in terms of average Pearson correlation R and root mean square error (RMSE). The results show that using the same feature vectors, the GP SSMs achieve twice higher correlation and twice smaller RMSE than a Kalman filter.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Speaker diarization through speaker embeddings.\n \n \n \n \n\n\n \n Rouvier, M.; Bousquet, P.; and Favre, B.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2082-2086, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"SpeakerPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362751,\n  author = {M. Rouvier and P. Bousquet and B. Favre},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Speaker diarization through speaker embeddings},\n  year = {2015},\n  pages = {2082-2086},\n  abstract = {This paper proposes to learn a set of high-level feature representations through deep learning, referred to as Speaker Embeddings, for speaker diarization. Speaker Embedding features are taken from the hidden layer neuron activations of Deep Neural Networks (DNN), when learned as classifiers to recognize a thousand speaker identities in a training set. Although learned through identification, speaker embeddings are shown to be effective for speaker verification in particular to recognize speakers unseen in the training set. In particular, this approach is applied to speaker diarization. Experiments, conducted on the corpus of French broadcast news ETAPE, show that this new speaker modeling technique decreases DER by 1.67 points (a relative improvement of about 8% DER).},\n  keywords = {neural nets;signal representation;speaker recognition;speaker verification;speaker identities;DNN;deep neural networks;hidden layer neuron activations;speaker diarization;speaker embeddings;deep learning;high-level feature representations;Training;Density estimation robust algorithm;Speech;Neurons;Feature extraction;Europe;Signal processing;Speaker Diarization;Deep Neural Network;Speaker Embeddings;Speaker Clustering;i-vector},\n  doi = {10.1109/EUSIPCO.2015.7362751},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570097371.pdf},\n}\n\n
\n
\n\n\n
\n This paper proposes to learn a set of high-level feature representations through deep learning, referred to as Speaker Embeddings, for speaker diarization. Speaker Embedding features are taken from the hidden layer neuron activations of Deep Neural Networks (DNN), when learned as classifiers to recognize a thousand speaker identities in a training set. Although learned through identification, speaker embeddings are shown to be effective for speaker verification in particular to recognize speakers unseen in the training set. In particular, this approach is applied to speaker diarization. Experiments, conducted on the corpus of French broadcast news ETAPE, show that this new speaker modeling technique decreases DER by 1.67 points (a relative improvement of about 8% DER).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Improved binary key speaker diarization system.\n \n \n \n \n\n\n \n Delgado, H.; Anguera, X.; Fredouille, C.; and Serrano, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2087-2091, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ImprovedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362752,\n  author = {H. Delgado and X. Anguera and C. Fredouille and J. Serrano},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Improved binary key speaker diarization system},\n  year = {2015},\n  pages = {2087-2091},\n  abstract = {The recently proposed speaker diarization technique based on binary keys provides a very fast alternative to state-of-the-art systems. However, this speed up has the cost of a little increase in Diarization Error Rate (DER). This paper proposes a series of improvements to the original algorithm with the aim to get closer to state-of-the-art performance. First, several alternative similarity measures between binary key speaker/segment models are introduced. Second, we perform a first attempt at applying Intra-Session and IntraSpeaker Variability (ISISV) compensation within the binary diarization approach through the Nuisance Attribute Projection. Experimental results show the benefits of the newly introduced similarity metrics, as well as the potential of the Nuisance Attribute Projection for ISISV compensation in the binary key speaker diarization framework.},\n  keywords = {audio signal processing;compensation;speaker recognition;audio file segmentation;nuisance attribute projection;ISISV compensation;intrasession-and-intraspeaker variability compensation;binary key segment model;binary key speaker model;similarity measures;improved binary key speaker diarization system;Speech;Measurement;Speaker recognition;Acoustics;Eigenvalues and eigenfunctions;Europe;Signal processing;Speaker diarization;binary key;cosine distance;chi-square distance;session variability compensation;nuisance attribute projection},\n  doi = {10.1109/EUSIPCO.2015.7362752},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102019.pdf},\n}\n\n
\n
\n\n\n
\n The recently proposed speaker diarization technique based on binary keys provides a very fast alternative to state-of-the-art systems. However, this speed up has the cost of a little increase in Diarization Error Rate (DER). This paper proposes a series of improvements to the original algorithm with the aim to get closer to state-of-the-art performance. First, several alternative similarity measures between binary key speaker/segment models are introduced. Second, we perform a first attempt at applying Intra-Session and IntraSpeaker Variability (ISISV) compensation within the binary diarization approach through the Nuisance Attribute Projection. Experimental results show the benefits of the newly introduced similarity metrics, as well as the potential of the Nuisance Attribute Projection for ISISV compensation in the binary key speaker diarization framework.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Dealing with additive noise in speaker recognition systems based on i-vector approach.\n \n \n \n \n\n\n \n Matrouf, D.; Ben Kheder, W.; Bousquet, P.; Ajili, M.; and Bonastre, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2092-2096, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"DealingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362753,\n  author = {D. Matrouf and W. {Ben Kheder} and P. Bousquet and M. Ajili and J. Bonastre},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Dealing with additive noise in speaker recognition systems based on i-vector approach},\n  year = {2015},\n  pages = {2092-2096},\n  abstract = {In the last years, the i-vector approach became the state-of-the-art in speaker recognition systems. As in previous approaches, i-vector -based systems suffer greatly in presence of additive noise, especially in low SNR cases. In this paper, we will describe a statistical framework allowing to estimate a clean i-vector given the noisy one or to integrate, directly, statistical knowledges about the noise and clean i-vectors in the scoring phase. The proposed procedure is essentially based on a method which enables to produce statistical knowledge about the noise effect in the i-vector domain. The work presented here is based on the hypothesis that the noise effect is Gaussian and additive in the i-vector space. To validate our approach, experiments were carried out on NIST 2008 data (det7). Significant improvement was observed compared to the baseline system and to the {"}muti-style{"} backend training technique.},\n  keywords = {AWGN;maximum likelihood estimation;speaker recognition;i-vector approach;speaker recognition system;additive noise;statistical framework;Gaussian noise effect;additive noise effect;i-vector space;i-vector domain;maximum a posteriori;Noise measurement;Additive noise;Computational modeling;Speaker recognition;Adaptation models;Robustness;Speech;i-vector;additive noise;speaker recognition},\n  doi = {10.1109/EUSIPCO.2015.7362753},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102597.pdf},\n}\n\n
\n
\n\n\n
\n In the last years, the i-vector approach became the state-of-the-art in speaker recognition systems. As in previous approaches, i-vector -based systems suffer greatly in presence of additive noise, especially in low SNR cases. In this paper, we will describe a statistical framework allowing to estimate a clean i-vector given the noisy one or to integrate, directly, statistical knowledges about the noise and clean i-vectors in the scoring phase. The proposed procedure is essentially based on a method which enables to produce statistical knowledge about the noise effect in the i-vector domain. The work presented here is based on the hypothesis that the noise effect is Gaussian and additive in the i-vector space. To validate our approach, experiments were carried out on NIST 2008 data (det7). Significant improvement was observed compared to the baseline system and to the \"muti-style\" backend training technique.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n S-vector: A discriminative representation derived from i-vector for speaker verification.\n \n \n \n \n\n\n \n Işik, Y. Z.; Erdogan, H.; and Sarikaya, R.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2097-2101, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"S-vector:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362754,\n  author = {Y. Z. Işik and H. Erdogan and R. Sarikaya},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {S-vector: A discriminative representation derived from i-vector for speaker verification},\n  year = {2015},\n  pages = {2097-2101},\n  abstract = {Representing data in ways to disentangle and factor out hidden dependencies is a critical step in speaker recognition systems. In this work, we employ deep neural networks (DNN) as a feature extractor to disentangle and emphasize the speaker factors from other sources of variability in the commonly used i-vector features. Denoising autoencoder based unsupervised pre-training, random dropout fine-tuning, and Nesterov accelerated gradient based momentum is used in DNN training. Replacing the i-vectors with the resulting speaker vectors (s-vectors), we obtain superior results on NIST SRE corpora on a wide range of operating points using probabilistic linear discriminant analysis (PLDA) back-end.},\n  keywords = {feature extraction;neural nets;speaker recognition;S-vector;I-vector;speaker verification;speaker recognition systems;deep neural networks;feature extractor;denoising autoencoder based unsupervised pre-training;random dropout fine-tuning;Nesterov accelerated gradient based momentum;NIST SRE corpora;probabilistic linear discriminant analysis;Training;Neural networks;Noise reduction;NIST;Feature extraction;Robustness;Noise measurement;speaker verification;denoising autoencoder;random dropout},\n  doi = {10.1109/EUSIPCO.2015.7362754},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102691.pdf},\n}\n\n
\n
\n\n\n
\n Representing data in ways to disentangle and factor out hidden dependencies is a critical step in speaker recognition systems. In this work, we employ deep neural networks (DNN) as a feature extractor to disentangle and emphasize the speaker factors from other sources of variability in the commonly used i-vector features. Denoising autoencoder based unsupervised pre-training, random dropout fine-tuning, and Nesterov accelerated gradient based momentum is used in DNN training. Replacing the i-vectors with the resulting speaker vectors (s-vectors), we obtain superior results on NIST SRE corpora on a wide range of operating points using probabilistic linear discriminant analysis (PLDA) back-end.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A discriminative approach for speaker selection in speaker de-identification systems.\n \n \n \n \n\n\n \n Abou-Zleikha, M.; Tan, Z.; Christensen, M. G.; and Jensen, S. H.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2102-2106, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362755,\n  author = {M. Abou-Zleikha and Z. Tan and M. G. Christensen and S. H. Jensen},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A discriminative approach for speaker selection in speaker de-identification systems},\n  year = {2015},\n  pages = {2102-2106},\n  abstract = {Speaker de-identification is an interesting and newly investigated task in speech processing. In the current implementations, this task is based on transforming one speaker speech to another speaker in order to hide the speaker identity. In this paper we present a discriminative approach for human speaker selection for speaker de-identification. We used two modules, a speaker identification system and a speaker transformation one, to select the most appropriate speaker to transform the source speaker speech from a set of speakers. In order to select the target speaker, we minimize the identification confidence of the transformed speech as the source speaker and maximize the confusion about the transformed speech membership to the rest of the speaker models and the identiication conidence of the re-transformed speech using the source speaker model. These three factors are combined to achieve overall optimization performance in order to select the best target speaker to transform the source.},\n  keywords = {data privacy;speaker recognition;discriminative speaker selection;speaker deidentification systems;speaker identity hiding;human speaker selection;speaker transformation;source speaker;Speech;Syntactics;Transforms;Indexes;Entropy;Europe;Signal processing;speaker de-identification;speaker identi-ication;speaker transformation},\n  doi = {10.1109/EUSIPCO.2015.7362755},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103595.pdf},\n}\n\n
\n
\n\n\n
\n Speaker de-identification is an interesting and newly investigated task in speech processing. In the current implementations, this task is based on transforming one speaker speech to another speaker in order to hide the speaker identity. In this paper we present a discriminative approach for human speaker selection for speaker de-identification. We used two modules, a speaker identification system and a speaker transformation one, to select the most appropriate speaker to transform the source speaker speech from a set of speakers. In order to select the target speaker, we minimize the identification confidence of the transformed speech as the source speaker and maximize the confusion about the transformed speech membership to the rest of the speaker models and the identiication conidence of the re-transformed speech using the source speaker model. These three factors are combined to achieve overall optimization performance in order to select the best target speaker to transform the source.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Phone adaptive training for short-duration speaker verification.\n \n \n \n \n\n\n \n Soldi, G.; Bozonnet, S.; Beaugeant, C.; and Evans, N.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2107-2111, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"PhonePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362756,\n  author = {G. Soldi and S. Bozonnet and C. Beaugeant and N. Evans},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Phone adaptive training for short-duration speaker verification},\n  year = {2015},\n  pages = {2107-2111},\n  abstract = {Phone adaptive training (PAT) aims to derive a new acoustic feature space in which the influence of phone variation is minimised while that of speaker variation is maximised. Originally proposed in the context of speaker diarization, our most recent work showed the utility of PAT in short-duration, automatic speaker verification where phone variation typically degrades performance. New to this contribution is the assessment of PAT utilising automatically generated acoustic class transcriptions whose number is controlled by regression tree analysis. Experimental results using a standard database show that PAT delivers significant improvements in the performance of a state-of-the-art iVector speaker verification system.},\n  keywords = {regression analysis;speaker recognition;phone adaptive training;short-duration speaker verification;acoustic feature space;speaker variation;speaker diarization;automatic speaker verification;regression tree analysis;speaker verification system;Acoustics;Training;Hidden Markov models;Data models;Training data;Transforms;Bars;Speaker modelling;short-duration;phone adaptive training;automatic speaker verification},\n  doi = {10.1109/EUSIPCO.2015.7362756},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104685.pdf},\n}\n\n
\n
\n\n\n
\n Phone adaptive training (PAT) aims to derive a new acoustic feature space in which the influence of phone variation is minimised while that of speaker variation is maximised. Originally proposed in the context of speaker diarization, our most recent work showed the utility of PAT in short-duration, automatic speaker verification where phone variation typically degrades performance. New to this contribution is the assessment of PAT utilising automatically generated acoustic class transcriptions whose number is controlled by regression tree analysis. Experimental results using a standard database show that PAT delivers significant improvements in the performance of a state-of-the-art iVector speaker verification system.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Adaptive and online speaker diarization for meeting data.\n \n \n \n \n\n\n \n Soldi, G.; Beaugeant, C.; and Evans, N.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2112-2116, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AdaptivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362757,\n  author = {G. Soldi and C. Beaugeant and N. Evans},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Adaptive and online speaker diarization for meeting data},\n  year = {2015},\n  pages = {2112-2116},\n  abstract = {Speaker diarization aims to determine `who spoke when' in a given audio stream. Different applications, such as document structuring or information retrieval have led to the exploration of speaker diarization in many different domains, from broadcast news to lectures, phone conversations and meetings. Almost all current diarization systems are offline and ill-suited to the growing need for online or real-time diarization, stemming from the increasing popularity of powerful, mobile smart devices. While a small number of such systems have been reported, truly online diarization systems for challenging and highly spontaneous meeting data are lacking. This paper reports our work to develop an adaptive and online diarization system using the NIST Rich Transcription meetings corpora. While not dissimilar to those previously reported for less challenging domains, high diarization error rates illustrate the challenge ahead and lead to some ideas to improve performance through future research.},\n  keywords = {adaptive signal processing;audio databases;audio signal processing;audio streaming;information retrieval;smart phones;speaker recognition;telecommunication computing;telephony;adaptive speaker diarization;online speaker diarization;meeting data;audio stream;document structuring;information retrieval;phone conversations;mobile smart devices;online diarization systems;NIST rich transcription meetings corpora;diarization error rates;Speech;Adaptation models;Density estimation robust algorithm;NIST;Acoustics;Data models;Computational modeling;Speaker diarization;clustering and segmentation;online diarization},\n  doi = {10.1109/EUSIPCO.2015.7362757},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104657.pdf},\n}\n\n
\n
\n\n\n
\n Speaker diarization aims to determine `who spoke when' in a given audio stream. Different applications, such as document structuring or information retrieval have led to the exploration of speaker diarization in many different domains, from broadcast news to lectures, phone conversations and meetings. Almost all current diarization systems are offline and ill-suited to the growing need for online or real-time diarization, stemming from the increasing popularity of powerful, mobile smart devices. While a small number of such systems have been reported, truly online diarization systems for challenging and highly spontaneous meeting data are lacking. This paper reports our work to develop an adaptive and online diarization system using the NIST Rich Transcription meetings corpora. While not dissimilar to those previously reported for less challenging domains, high diarization error rates illustrate the challenge ahead and lead to some ideas to improve performance through future research.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Feature classification by means of deep belief networks for speaker recognition.\n \n \n \n \n\n\n \n Safari, P.; Ghahabi, O.; and Hernando, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2117-2121, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"FeaturePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362758,\n  author = {P. Safari and O. Ghahabi and J. Hernando},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Feature classification by means of deep belief networks for speaker recognition},\n  year = {2015},\n  pages = {2117-2121},\n  abstract = {In this paper, we propose to discriminatively model target and impostor spectral features using Deep Belief Networks (DBNs) for speaker recognition. In the feature level, the number of impostor samples is considerably large compared to previous works based on i-vectors. Therefore, those i-vector based impostor selection algorithms are not computationally practical. On the other hand, the number of samples for each target speaker is different from one speaker to another which makes the training process more difficult. In this work, we take advantage of DBN unsupervised learning to train a global model, which will be referred to as Universal DBN (UDBN). Then we adapt this UDBN to the data of each target speaker. The evaluation is performed on the core test condition of the NIST SRE 2006 database and it is shown that the proposed architecture achieves more than 8% relative improvement in comparison to the conventional Multilayer Perceptron (MLP).},\n  keywords = {belief networks;feature extraction;speaker recognition;unsupervised learning;feature clasification;UDBN;universal DBN;DBN unsupervised learning;i-vector based impostor selection algorithms;impostor samples;speaker recognition;deep belief networks;spectral features;Adaptation models;Training;Feature extraction;Data models;Speaker recognition;Europe;Signal processing;Speaker Recognition;Deep Belief Network;Restricted Boltzmann Machine;Feature Classification},\n  doi = {10.1109/EUSIPCO.2015.7362758},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104993.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose to discriminatively model target and impostor spectral features using Deep Belief Networks (DBNs) for speaker recognition. In the feature level, the number of impostor samples is considerably large compared to previous works based on i-vectors. Therefore, those i-vector based impostor selection algorithms are not computationally practical. On the other hand, the number of samples for each target speaker is different from one speaker to another which makes the training process more difficult. In this work, we take advantage of DBN unsupervised learning to train a global model, which will be referred to as Universal DBN (UDBN). Then we adapt this UDBN to the data of each target speaker. The evaluation is performed on the core test condition of the NIST SRE 2006 database and it is shown that the proposed architecture achieves more than 8% relative improvement in comparison to the conventional Multilayer Perceptron (MLP).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Design of PR oversampled DFT transmultiplexers with minimal dimension.\n \n \n \n \n\n\n \n Pinchon, D.; and Siohan, P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2122-2126, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"DesignPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362759,\n  author = {D. Pinchon and P. Siohan},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Design of PR oversampled DFT transmultiplexers with minimal dimension},\n  year = {2015},\n  pages = {2122-2126},\n  abstract = {In this paper we propose a new design method for oversampled perfect reconstruction (PR) Discrete Fourier Transform (DFT) transmultiplexers (TMUXs). The resulting multicarrier modulation (MCM) systems are characterized by their minimal dimension, i.e., they involve a minimal number of Givens rotations. Our design method is applicable for system parameters that have never been reached before and also provides improved results in terms of out-of-band energy.},\n  keywords = {discrete Fourier transforms;OFDM modulation;transmultiplexing;PR oversampled DFT transmultiplexer design;minimal dimension;oversampled perfect reconstruction discrete Fourier transform transmultiplexers;TMUX;multicarrier modulation systems;MCM systems;system parameters;OFDM;Decision support systems;Zirconium;Europe;Signal processing;Conferences;Manganese;DFT;FMT;OFDM;Oversampled;Trans-multiplexer},\n  doi = {10.1109/EUSIPCO.2015.7362759},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570087663.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we propose a new design method for oversampled perfect reconstruction (PR) Discrete Fourier Transform (DFT) transmultiplexers (TMUXs). The resulting multicarrier modulation (MCM) systems are characterized by their minimal dimension, i.e., they involve a minimal number of Givens rotations. Our design method is applicable for system parameters that have never been reached before and also provides improved results in terms of out-of-band energy.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n DFE/THP duality for FBMC with highly frequency selective channels.\n \n \n \n \n\n\n \n Jedda, H.; Baltar, L. G.; De Candido, O.; Mezghani, A.; and Nossek, J. A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2127-2131, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"DFE/THPPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362760,\n  author = {H. Jedda and L. G. Baltar and O. {De Candido} and A. Mezghani and J. A. Nossek},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {DFE/THP duality for FBMC with highly frequency selective channels},\n  year = {2015},\n  pages = {2127-2131},\n  abstract = {Filter bank based multicarrier with Offset-QAM systems (FBMC/OQAM) are strong candidates for the waveform of future 5-th generation (5G) wireless standards. These systems can achieve maximum spectral efficiency compared to other multicarrier schemes, particularly in highly frequency selective propagation conditions. In this case a multi-tap, fractionally spaced equalizer or precoder needs to be inserted in each subcarrier at the receiver or transmitter side to compensate inter-symbol interference (ISI) and inter-carrier interference (ICI). In this paper we propose a new Tomlinson-Harashima precoder (THP) design for FBMC/OQAM based on the mean squared error (MSE) duality from a minimum MSE (MMSE) designed decision feedback equalizer (DFE).},\n  keywords = {channel bank filters;decision feedback equalisers;duality (mathematics);frequency selective surfaces;intercarrier interference;intersymbol interference;mean square error methods;precoding;quadrature amplitude modulation;DFE-THP duality;FBMC;frequency selective channel;filter bank based multicarrier scheme;offset-QAM system;OQAM;5th generation wireless standard;5G wireless standard;frequency selective propagation condition;multitap fractionally spaced equalizer;receiver;transmitter;intersymbol interference;ISI;intercarrier interference;ICI;compensation;Tomlinson-Harashima precoder;minimum mean squared error;MMSE;decision feedback equalizer;Receivers;Transmitters;Decision feedback equalizers;OFDM;Europe;Filter Bank Multicarrier;Offset-QAM;Decision Feedback;Tomlinson-Harashima;MSE duality},\n  doi = {10.1109/EUSIPCO.2015.7362760},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104913.pdf},\n}\n\n
\n
\n\n\n
\n Filter bank based multicarrier with Offset-QAM systems (FBMC/OQAM) are strong candidates for the waveform of future 5-th generation (5G) wireless standards. These systems can achieve maximum spectral efficiency compared to other multicarrier schemes, particularly in highly frequency selective propagation conditions. In this case a multi-tap, fractionally spaced equalizer or precoder needs to be inserted in each subcarrier at the receiver or transmitter side to compensate inter-symbol interference (ISI) and inter-carrier interference (ICI). In this paper we propose a new Tomlinson-Harashima precoder (THP) design for FBMC/OQAM based on the mean squared error (MSE) duality from a minimum MSE (MMSE) designed decision feedback equalizer (DFE).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robust multiuser binary CPM detection with unkown modulation index.\n \n \n \n \n\n\n \n Messai, M.; Guilloud, F.; Amis, K.; and Colavolpe, G.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2132-2136, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"RobustPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362761,\n  author = {M. Messai and F. Guilloud and K. Amis and G. Colavolpe},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Robust multiuser binary CPM detection with unkown modulation index},\n  year = {2015},\n  pages = {2132-2136},\n  abstract = {We develop a robust multiuser detector for a Frequency Division Multiplexing (FDM) system where each user employs a binary continuous phase modulation (CPM) generated through a low-cost transmitter, thus characterized by a significant modulation index uncertainty, and sent over a channel affected by phase noise. In this FDM system the spectral efficiency can be increased by reducing the spacing between two adjacent channels. The proposed receiver is designed by adopting a simplified representation of a binary CPM signal with the principal component of its Laurent decomposition and is obtained by using the framework based on factor graphs and the sum-product algorithm. This detector can be used for iterative detection/decoding of a coded scheme in which each user employs a binary CPM modulator serially concatenated with an outer encoder through a pseudo-random interleaver. It does not require an explicit estimation of the modulation index nor of the channel phase and is very robust to large uncertainties of the nominal value of the modulation index.},\n  keywords = {continuous phase modulation;decoding;frequency division multiplexing;radio transmitters;telecommunication channels;robust multiuser binary CPM detection;unkown modulation index;frequency division multiplexing system;binary continuous phase modulation;low-cost transmitter;modulation index uncertainty;FDM system;binary CPM signal;Laurent decomposition;factor graphs;sum-product algorithm;iterative detection-decoding;binary CPM modulator;pseudo-random interleaver;modulation index;channel phase;modulation index;Modulation;Approximation methods;Phase noise;Robustness;Frequency division multiplexing;Detectors;Transmitters;Continuous phase modulation;CPM-FDM;frequency spacing;spectral efficiency;modulation index mismatch;phase noise;factor graph;sum product;iterative detection and decoding},\n  doi = {10.1109/EUSIPCO.2015.7362761},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105101.pdf},\n}\n\n
\n
\n\n\n
\n We develop a robust multiuser detector for a Frequency Division Multiplexing (FDM) system where each user employs a binary continuous phase modulation (CPM) generated through a low-cost transmitter, thus characterized by a significant modulation index uncertainty, and sent over a channel affected by phase noise. In this FDM system the spectral efficiency can be increased by reducing the spacing between two adjacent channels. The proposed receiver is designed by adopting a simplified representation of a binary CPM signal with the principal component of its Laurent decomposition and is obtained by using the framework based on factor graphs and the sum-product algorithm. This detector can be used for iterative detection/decoding of a coded scheme in which each user employs a binary CPM modulator serially concatenated with an outer encoder through a pseudo-random interleaver. It does not require an explicit estimation of the modulation index nor of the channel phase and is very robust to large uncertainties of the nominal value of the modulation index.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Double relay communication protocol for bandwidth management in cellular systems.\n \n \n \n \n\n\n \n Torrea-Duran, R.; Rosas, F.; Khan, Z. K. Z.; Pollin, S.; Tsiaflakis, P.; and Moonen, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2137-2141, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"DoublePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362762,\n  author = {R. Torrea-Duran and F. Rosas and Z. K. Z. Khan and S. Pollin and P. Tsiaflakis and M. Moonen},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Double relay communication protocol for bandwidth management in cellular systems},\n  year = {2015},\n  pages = {2137-2141},\n  abstract = {The continuously growing demand for wireless connectivity has turned bandwidth into a scarce resource that should be carefully managed. A common solution is to assign disjoint portions of the bandwidth to different users, but the portion size decreases as the number of users grows. An alternative solution is to introduce spatial diversity through coordinated base stations, but such systems are very sensitive to timing and frequency synchronization offsets. To tackle these problems, we use principles of network coding for bandwidth management in a double relay cellular system of two base stations and two users. We propose a three-time-slot transmission strategy and a MMSE reception strategy. It avoids the need of tight frequency or timing synchronization through a simple communication protocol without using additional bandwidth or infrastructure. By finding a balance between spatial diversity and transmission time, our approach achieves the system capacity and fairness in all SNR conditions.},\n  keywords = {cellular radio;diversity reception;least mean squares methods;protocols;synchronisation;double relay communication protocol;bandwidth management;cellular systems;wireless connectivity;scarce resource;spatial diversity;coordinated base stations;frequency synchronization;double relay cellular system;three-time-slot transmission strategy;timing synchronization;communication protocol;SNR conditions;system capacity;Base stations;Bandwidth;Spatial diversity;Signal to noise ratio;Relays;Time division multiple access;Mathematical model;double relay;MMSE;time-multiplexing},\n  doi = {10.1109/EUSIPCO.2015.7362762},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102485.pdf},\n}\n\n
\n
\n\n\n
\n The continuously growing demand for wireless connectivity has turned bandwidth into a scarce resource that should be carefully managed. A common solution is to assign disjoint portions of the bandwidth to different users, but the portion size decreases as the number of users grows. An alternative solution is to introduce spatial diversity through coordinated base stations, but such systems are very sensitive to timing and frequency synchronization offsets. To tackle these problems, we use principles of network coding for bandwidth management in a double relay cellular system of two base stations and two users. We propose a three-time-slot transmission strategy and a MMSE reception strategy. It avoids the need of tight frequency or timing synchronization through a simple communication protocol without using additional bandwidth or infrastructure. By finding a balance between spatial diversity and transmission time, our approach achieves the system capacity and fairness in all SNR conditions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Faster than Nyquist-an enabler for achieving maximum spectral efficiency in coexistence scenarios?.\n \n \n \n \n\n\n \n Le, C.; Fuhrwerk, M.; Schellmann, M.; and Peissig, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2142-2146, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"FasterPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362763,\n  author = {C. Le and M. Fuhrwerk and M. Schellmann and J. Peissig},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Faster than Nyquist-an enabler for achieving maximum spectral efficiency in coexistence scenarios?},\n  year = {2015},\n  pages = {2142-2146},\n  abstract = {In the context of spectrum sharing, a loss in spectral efficiency is incurred due to required guard bands needed for a desired co-channel interference level between systems. Our main interest is to show whether FTN signaling can recover the loss of spectral efficiency (SE) due to guard bands by adjusting the symbol duration and tolerating ISI in practical coexistence scenarios. In this paper, the SE of FTN signaling is investigated taking into account a practical multi-access channel, where a single-carrier system based on Nyquist or FTN signaling shares spectrum with a multi-carrier system. A single-user based on OQAM/OFDM system is considered as reference system. Simulation results show that by choosing an appropriate symbol duration, FTN signaling can recover the loss of SE at high SNRs. Furthermore, in coexistence scenarios with small guard bands, using FTN the SE curve is very close to that of the single-user reference system.},\n  keywords = {cochannel interference;OFDM modulation;quadrature amplitude modulation;radio spectrum management;maximum spectral efficiency;coexistence scenarios;spectrum sharing;cochannel interference;FTN signaling;multiaccess channel;single-carrier system;Nyquist signaling;multicarrier system;OQAM-OFDM system;Shape;Bandwidth;Receivers;Interference;Europe;Pulse shaping methods;Faster than Nyquist (FTN) signaling;Spectral efficiency;Multi-access channel},\n  doi = {10.1109/EUSIPCO.2015.7362763},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104689.pdf},\n}\n\n
\n
\n\n\n
\n In the context of spectrum sharing, a loss in spectral efficiency is incurred due to required guard bands needed for a desired co-channel interference level between systems. Our main interest is to show whether FTN signaling can recover the loss of spectral efficiency (SE) due to guard bands by adjusting the symbol duration and tolerating ISI in practical coexistence scenarios. In this paper, the SE of FTN signaling is investigated taking into account a practical multi-access channel, where a single-carrier system based on Nyquist or FTN signaling shares spectrum with a multi-carrier system. A single-user based on OQAM/OFDM system is considered as reference system. Simulation results show that by choosing an appropriate symbol duration, FTN signaling can recover the loss of SE at high SNRs. Furthermore, in coexistence scenarios with small guard bands, using FTN the SE curve is very close to that of the single-user reference system.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Weak signal detection based on two dimensional stochastic resonance.\n \n \n \n \n\n\n \n Barbini, L.; Cole, M. O. T.; Hillis, A. J.; and du Bois , J. L.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2147-2151, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"WeakPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362764,\n  author = {L. Barbini and M. O. T. Cole and A. J. Hillis and J. L. {du Bois}},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Weak signal detection based on two dimensional stochastic resonance},\n  year = {2015},\n  pages = {2147-2151},\n  abstract = {The analysis of vibrations from rotating machines gives information about their faults. From the signal processing perspective a significant problem is the detection of weak signals embedded in strong noise. Stochastic resonance (SR) is a mechanism where noise is not suppressed but exploited to trigger the synchronization of a non-linear system and in its one-dimensional form has been recently applied to vibration analysis. This paper focuses on the use of SR in a two-dimensional system of gradient type for detection of weak signals submerged in Gaussian noise. Comparing the traditional one-dimensional system and the two-dimensional used here, this paper shows that the latter can offer a more sensitive means of detection. An alternative metric is proposed to assess the output signal quality, requiring no a priori knowledge of the signal to be detected, and it is shown to offer similar results to the more conventional signal-to-noise ratio.},\n  keywords = {resonance;signal detection;stochastic processes;vibrational signal processing;weak signal detection;two dimensional stochastic resonance;rotating machines;signal processing perspective;nonlinear system;Gaussian noise;output signal quality;a priori knowledge;signal-to-noise ratio;one-dimensional system;Steady-state;Couplings;Synchronization;Stochastic resonance;Signal to noise ratio;Tuning;stochastic resonance;weak signal detection;non linear signal processing},\n  doi = {10.1109/EUSIPCO.2015.7362764},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103493.pdf},\n}\n\n
\n
\n\n\n
\n The analysis of vibrations from rotating machines gives information about their faults. From the signal processing perspective a significant problem is the detection of weak signals embedded in strong noise. Stochastic resonance (SR) is a mechanism where noise is not suppressed but exploited to trigger the synchronization of a non-linear system and in its one-dimensional form has been recently applied to vibration analysis. This paper focuses on the use of SR in a two-dimensional system of gradient type for detection of weak signals submerged in Gaussian noise. Comparing the traditional one-dimensional system and the two-dimensional used here, this paper shows that the latter can offer a more sensitive means of detection. An alternative metric is proposed to assess the output signal quality, requiring no a priori knowledge of the signal to be detected, and it is shown to offer similar results to the more conventional signal-to-noise ratio.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Cooperative simultaneous localization and tracking for mobile AD-HOC sensor networks.\n \n \n \n \n\n\n \n Teng, J.; Zhou, R.; and Zhang, Y.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2152-2156, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"CooperativePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362765,\n  author = {J. Teng and R. Zhou and Y. Zhang},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Cooperative simultaneous localization and tracking for mobile AD-HOC sensor networks},\n  year = {2015},\n  pages = {2152-2156},\n  abstract = {Over the past decade, there has been much focus on mobile ad-hoc sensor networks. The mobility alleviates several issues relating to sensor network coverage and connectivity, whereas aggravates the difficulties of applications such as target tracking. Traditional solutions always localize the sensors first, and then track the target. In contrast, cooperative simultaneous localization and tracking (CoSLAT) adopts both the sensor-target and the inter-sensor observations to simultaneously refine the target and the sensor estimates. We propose a distributed variational filtering (VF) algorithm for CoSLAT, which greatly cuts down the estimate errors, while having nearly the same complexity as the traditional particle filtering (PF) algorithm. In addition, the update and the approximation of the a posteriori distribution are jointly performed by the VF, yielding a natural and adaptive compression. Since the temporal dependence is reduced from a great number of particles to one Gaussian component, the communication cost is significantly diminished.},\n  keywords = {cooperative communication;maximum likelihood estimation;mobile ad hoc networks;particle filtering (numerical methods);target tracking;communication cost;Gaussian component;a posteriori distribution;particle filtering algorithm;distributed variational filtering algorithm;inter-sensor observations;sensor-target observations;CoSLAT;cooperative simultaneous localization and tracking;target tracking;sensor network coverage;mobile ad-hoc sensor networks;Mobile computing;Ad hoc networks;Signal processing algorithms;Approximation methods;Target tracking;Mobile nodes;Variational Filtering;Cooperative Simultaneous Localization and Tracking;Mobile Ad-hoc sensor NETworks},\n  doi = {10.1109/EUSIPCO.2015.7362765},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103299.pdf},\n}\n\n
\n
\n\n\n
\n Over the past decade, there has been much focus on mobile ad-hoc sensor networks. The mobility alleviates several issues relating to sensor network coverage and connectivity, whereas aggravates the difficulties of applications such as target tracking. Traditional solutions always localize the sensors first, and then track the target. In contrast, cooperative simultaneous localization and tracking (CoSLAT) adopts both the sensor-target and the inter-sensor observations to simultaneously refine the target and the sensor estimates. We propose a distributed variational filtering (VF) algorithm for CoSLAT, which greatly cuts down the estimate errors, while having nearly the same complexity as the traditional particle filtering (PF) algorithm. In addition, the update and the approximation of the a posteriori distribution are jointly performed by the VF, yielding a natural and adaptive compression. Since the temporal dependence is reduced from a great number of particles to one Gaussian component, the communication cost is significantly diminished.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Decentralized reconstruction from compressive random projections driven by principal components.\n \n \n \n \n\n\n \n Fowler, J. E.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2157-2161, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"DecentralizedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362766,\n  author = {J. E. Fowler},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Decentralized reconstruction from compressive random projections driven by principal components},\n  year = {2015},\n  pages = {2157-2161},\n  abstract = {The decentralized reconstruction of data acquired m a sensor network via compressive random projections is considered. Assuming each node acquires a signal while simultaneously reducing its dimensionality, the proposed decentralized reconstruction recovers each signal to its original dimensionality with the reconstruction process being distributed across the network such that each node performs limited computation with limited communication with its neighboring nodes. In contrast to prior decentralized reconstructions driven by sparsity-based compressed-sensing techniques, the proposed approach employs reconstruction based on principal component analysis using an iterative consensus algorithm to calculate the required covariance across the network. Experimental results reveal that the performance of the proposed decentralized reconstruction approaches that of the original centralized algorithm as the number of consensus iterations increases.},\n  keywords = {compressed sensing;iterative methods;principal component analysis;signal reconstruction;wireless sensor networks;sensor network neighboring nodes;sparsity-based compressed-sensing technique;iterative consensus algorithm;principal component analysis;signal reconstruction process;data decentralized reconstruction;compressive random projection;Sensors;Image reconstruction;Signal processing algorithms;Approximation methods;Europe;Signal processing;Principal component analysis;random projections;principal component analysis;decentralized reconstruction;sensor networks},\n  doi = {10.1109/EUSIPCO.2015.7362766},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570085977.pdf},\n}\n\n
\n
\n\n\n
\n The decentralized reconstruction of data acquired m a sensor network via compressive random projections is considered. Assuming each node acquires a signal while simultaneously reducing its dimensionality, the proposed decentralized reconstruction recovers each signal to its original dimensionality with the reconstruction process being distributed across the network such that each node performs limited computation with limited communication with its neighboring nodes. In contrast to prior decentralized reconstructions driven by sparsity-based compressed-sensing techniques, the proposed approach employs reconstruction based on principal component analysis using an iterative consensus algorithm to calculate the required covariance across the network. Experimental results reveal that the performance of the proposed decentralized reconstruction approaches that of the original centralized algorithm as the number of consensus iterations increases.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Scale invariant divergences for signal and image reconstruction.\n \n \n \n \n\n\n \n Lantéri, H.; Theys, C.; and Aime, C.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2162-2166, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ScalePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362767,\n  author = {H. Lantéri and C. Theys and C. Aime},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Scale invariant divergences for signal and image reconstruction},\n  year = {2015},\n  pages = {2162-2166},\n  abstract = {The subject of this paper is the reconstruction of a signal or an image under constraints of non negativity and of constant sum. The sum constraint is imposed by the use of scale invariant divergences, which allows the development of simple iterative reconstruction algorithms. Two families of divergences between two data fields p and q are considered, the a-divergence and the β-divergence. A procedure is applied to make them scale-invariant w.r.t. p and q. The resulting method is an interior point type algorithm useful in the context of ill-posed problems. Numerical illustrations are given for the deconvolution of a solar spectrum and an interferometric image.},\n  keywords = {image reconstruction;iterative methods;scale invariant divergences;image reconstruction;signal reconstruction;simple iterative reconstruction algorithms;interior point type algorithm;ill-posed problems;numerical illustrations;interferometric image;solar spectrum deconvolution;Signal processing algorithms;Image reconstruction;Deconvolution;Europe;Signal processing;Noise measurement;Iterative methods;Inverse problems;non-negativity and sum constrained minimization;scale invariant divergences},\n  doi = {10.1109/EUSIPCO.2015.7362767},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102047.pdf},\n}\n\n
\n
\n\n\n
\n The subject of this paper is the reconstruction of a signal or an image under constraints of non negativity and of constant sum. The sum constraint is imposed by the use of scale invariant divergences, which allows the development of simple iterative reconstruction algorithms. Two families of divergences between two data fields p and q are considered, the a-divergence and the β-divergence. A procedure is applied to make them scale-invariant w.r.t. p and q. The resulting method is an interior point type algorithm useful in the context of ill-posed problems. Numerical illustrations are given for the deconvolution of a solar spectrum and an interferometric image.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Wavelet based unsupervised variational Bayesian image reconstruction approach.\n \n \n \n \n\n\n \n Zheng, Y.; Fraysse, A.; and Rodet, T.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2167-2171, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"WaveletPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362768,\n  author = {Y. Zheng and A. Fraysse and T. Rodet},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Wavelet based unsupervised variational Bayesian image reconstruction approach},\n  year = {2015},\n  pages = {2167-2171},\n  abstract = {In this paper, we present a variational Bayesian approach in the wavelet domain for linear image reconstruction problems. This approach is based on a Gaussian Scale Mixture prior and an improved variational Bayesian approximation method. Its main advantages are that it is unsupervised and can be used to solve various linear inverse problems. We show the good performance of our approach through comparisons with state of the art approaches on a deconvolution problem.},\n  keywords = {deconvolution;Gaussian processes;image reconstruction;wavelet transforms;linear inverse problems;Bayesian approximation;Gaussian scale mixture;linear image reconstruction;wavelet domain;wavelet based unsupervised variational Bayesian image reconstruction;Bayes methods;GSM;Wavelet transforms;Image reconstruction;Deconvolution;Europe;Signal processing;unsupervised approach;wavelet transform;variational Bayesian;GSM;Generalized Gaussian},\n  doi = {10.1109/EUSIPCO.2015.7362768},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104899.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we present a variational Bayesian approach in the wavelet domain for linear image reconstruction problems. This approach is based on a Gaussian Scale Mixture prior and an improved variational Bayesian approximation method. Its main advantages are that it is unsupervised and can be used to solve various linear inverse problems. We show the good performance of our approach through comparisons with state of the art approaches on a deconvolution problem.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sampling FRI signals with the SOS kernel: Bounds and optimal kernel.\n \n \n \n \n\n\n \n Bernhardt, S.; Boyer, R.; Marcos, S.; Eldar, Y. C.; and Larzabal, P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2172-2176, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"SamplingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362769,\n  author = {S. Bernhardt and R. Boyer and S. Marcos and Y. C. Eldar and P. Larzabal},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Sampling FRI signals with the SOS kernel: Bounds and optimal kernel},\n  year = {2015},\n  pages = {2172-2176},\n  abstract = {Recently it has been shown that using appropriate sampling kernel, finite rate of innovation signals can be perfectly recon structed even tough they are non-bandlimited. In the presence of noise, reconstruction is achieved by an estimation procedure of all the parameters of the incoming signal. In this paper we consider the estimation of a finite stream of pulses using the Sum of Sincs (SoS) kernel. We derive the Cramér Rao Bound (BCRB) relative to the estimated parameters. The SoS kernel is used since it is configurable by a vector of weights: we propose a family of kernels which maximizes the Bayesian Fisher Information (BIM) i.e. the total amount of information about each of the parameter in the measures. The advantage of the proposed family is that it can be user-adjusted to favor one specific parameter. The variety of the resulting kernel goes from a perfect sinusoid to the Dirichlet kernel.},\n  keywords = {Bayes methods;estimation theory;parameter estimation;signal reconstruction;signal sampling;surface reconstruction;FRI signal sampling;SOS kernel;finite rate of innovation signal sampling;signal reconstruction;parameter estimation procedure;pulses finite stream estimation;sum of sincs kernel;Cramer Rao bound;BCRB;Bayesian fisher information;BIM;Dirichlet kernel;Kernel;Bayes methods;Delays;Linear programming;Europe;Shape;Signal processing},\n  doi = {10.1109/EUSIPCO.2015.7362769},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570097617.pdf},\n}\n\n
\n
\n\n\n
\n Recently it has been shown that using appropriate sampling kernel, finite rate of innovation signals can be perfectly recon structed even tough they are non-bandlimited. In the presence of noise, reconstruction is achieved by an estimation procedure of all the parameters of the incoming signal. In this paper we consider the estimation of a finite stream of pulses using the Sum of Sincs (SoS) kernel. We derive the Cramér Rao Bound (BCRB) relative to the estimated parameters. The SoS kernel is used since it is configurable by a vector of weights: we propose a family of kernels which maximizes the Bayesian Fisher Information (BIM) i.e. the total amount of information about each of the parameter in the measures. The advantage of the proposed family is that it can be user-adjusted to favor one specific parameter. The variety of the resulting kernel goes from a perfect sinusoid to the Dirichlet kernel.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Improving state estimates over finite data using optimal FIR filtering with embedded unbiasedness.\n \n \n \n \n\n\n \n Zhao, S.; Shmaliy, Y. S.; Khan, S. H.; and Liu, °. F.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2177-2180, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ImprovingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362770,\n  author = {S. Zhao and Y. S. Shmaliy and S. H. Khan and °. F. Liu},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Improving state estimates over finite data using optimal FIR filtering with embedded unbiasedness},\n  year = {2015},\n  pages = {2177-2180},\n  abstract = {In this paper, the optimal finite impulse response (OFIR) with embedded unbiasedness (EU) filter is derived by minimizing the mean square error (MSE) subject to the unbiasedness constraint for discrete time-invariant state-space models. Un like the OFIR filter, the OFIR-EU filter does not require the initial conditions. In terms of accuracy, the OFIR-EU filter occupies an intermediate place between the UFIR and OFIR filters. With a two-state harmonic model, we show that the OFIR-UE filter has higher immunity against errors in the noise statistics and better robustness against temporary model uncertainties than the OFIR and Kalman filters.},\n  keywords = {FIR filters;mean square error methods;state-space methods;optimal FIR filtering;optimal finite impulse response filtering;state estimates;finite data;embedded unbiasedness filter;mean square error;MSE;unbiasedness constraint;discrete time-invariant state-space models;two-state harmonic model;Finite impulse response filters;State-space methods;Optimization;Noise measurement;Europe;Harmonic analysis},\n  doi = {10.1109/EUSIPCO.2015.7362770},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570099387.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, the optimal finite impulse response (OFIR) with embedded unbiasedness (EU) filter is derived by minimizing the mean square error (MSE) subject to the unbiasedness constraint for discrete time-invariant state-space models. Un like the OFIR filter, the OFIR-EU filter does not require the initial conditions. In terms of accuracy, the OFIR-EU filter occupies an intermediate place between the UFIR and OFIR filters. With a two-state harmonic model, we show that the OFIR-UE filter has higher immunity against errors in the noise statistics and better robustness against temporary model uncertainties than the OFIR and Kalman filters.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Periodic ARMA models: Application to particulate matter concentrations.\n \n \n \n \n\n\n \n Sarnaglia, A. J. Q.; Reisen, V. A.; and Bondon, P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2181-2185, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"PeriodicPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362771,\n  author = {A. J. Q. Sarnaglia and V. A. Reisen and P. Bondon},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Periodic ARMA models: Application to particulate matter concentrations},\n  year = {2015},\n  pages = {2181-2185},\n  abstract = {We propose the use of multivariate version of Whittle's methodology to estimate periodic autoregressive moving average models. In the literature, this estimator has been widely used to deal with large data sets, since, in this context, its performance is similar to the Gaussian maximum likelihood estimator and the estimates are obtained much faster. Here, the usefulness of Whittle estimator is illustrated by a Monte Carlo simulation and by fitting the periodic autoregressive moving average model to daily mean concentrations of particulate matter observed in Cariacica, Brazil. The results confirm the potentiality of Whittle estimator when applied to periodic time series.},\n  keywords = {air pollution;autoregressive moving average processes;Gaussian processes;periodic ARMA models;particulate matter concentrations;Whittle methodology;periodic autoregressive moving average models;Gaussian maximum likelihood estimator;Whittle estimator;Monte Carlo simulation;Cariacica;Brazil;Biological system modeling;Autoregressive processes;Estimation;Atmospheric modeling;Signal processing;Monte Carlo methods;Computational modeling;Cyclostationarity;periodic stationarity;PARMA models;Whittle estimation;particulate matter},\n  doi = {10.1109/EUSIPCO.2015.7362771},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570101097.pdf},\n}\n\n
\n
\n\n\n
\n We propose the use of multivariate version of Whittle's methodology to estimate periodic autoregressive moving average models. In the literature, this estimator has been widely used to deal with large data sets, since, in this context, its performance is similar to the Gaussian maximum likelihood estimator and the estimates are obtained much faster. Here, the usefulness of Whittle estimator is illustrated by a Monte Carlo simulation and by fitting the periodic autoregressive moving average model to daily mean concentrations of particulate matter observed in Cariacica, Brazil. The results confirm the potentiality of Whittle estimator when applied to periodic time series.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributed rao-blackwellized point mass filter for blind equalization in receiver networks.\n \n \n \n \n\n\n \n Bordin, C. J.; and Bruno, M. G. S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2186-2190, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"DistributedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362772,\n  author = {C. J. Bordin and M. G. S. Bruno},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Distributed rao-blackwellized point mass filter for blind equalization in receiver networks},\n  year = {2015},\n  pages = {2186-2190},\n  abstract = {We describe new Bayesian algorithms for cooperative blind equalization in a network in which the signal broadcast by a single transmitter is received by multiple remote nodes through distinct frequency-selective channels. The algorithms are based on Rao-Blackwellized point mass filters, and approximate the posterior densities of the unknown channel parameters by Gaussian mixtures of fixed order. To keep computations treatable, density mixtures are reduced by a modified version of West's algorithm. A reduced complexity approach which employs a single mode approximation of some remote quantities is also considered. Via numerical simulations, we verify that the proposed algorithms outperform certain particle-filtering-based algorithms with comparable communication loads.},\n  keywords = {Bayes methods;blind equalisers;Gaussian processes;mixture models;particle filtering (numerical methods);Distribute Rao-Blackwellized point mass filter;receiver network;Bayesian algorithm;cooperative blind equalization;signal broadcast;single transmitter;frequency selective channel;posterior density;Gaussian mixture model;West algorithm;reduced complexity approach;single mode approximation;particle-filtering-based algorithm;Signal processing algorithms;Approximation algorithms;Blind equalizers;Approximation methods;Europe;Receivers;Blind Equalization;Distributed Algorithms;Bayesian Estimation;Point Mass Filter},\n  doi = {10.1109/EUSIPCO.2015.7362772},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096397.pdf},\n}\n\n
\n
\n\n\n
\n We describe new Bayesian algorithms for cooperative blind equalization in a network in which the signal broadcast by a single transmitter is received by multiple remote nodes through distinct frequency-selective channels. The algorithms are based on Rao-Blackwellized point mass filters, and approximate the posterior densities of the unknown channel parameters by Gaussian mixtures of fixed order. To keep computations treatable, density mixtures are reduced by a modified version of West's algorithm. A reduced complexity approach which employs a single mode approximation of some remote quantities is also considered. Via numerical simulations, we verify that the proposed algorithms outperform certain particle-filtering-based algorithms with comparable communication loads.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Adaptive approximate filtering of state-space models.\n \n \n \n \n\n\n \n Dedecius, K.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2191-2195, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AdaptivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362773,\n  author = {K. Dedecius},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Adaptive approximate filtering of state-space models},\n  year = {2015},\n  pages = {2191-2195},\n  abstract = {Approximate Bayesian computation (ABC) filtration of state-space models replaces popular particle filters in cases where the observation models (i.e. likelihoods) are either computationally too demanding or completely intractable, but it is still possible to simulate from them. These sequential Monte Carlo methods evaluate importance weights based on the distance between the true observation and the simulated pseudoobservations. The paper proposes a new adaptive method consisting of probability kernel-based evaluation of importance weights with online determination of kernel scale. It is shown that the resulting algorithm achieves performance close to particle filters in the case of well-specified models, and outperforms generic particle filters and state-of-art ABC filters under heavy-tailed noise and model misspecification.},\n  keywords = {adaptive filters;approximation theory;Monte Carlo methods;particle filtering (numerical methods);probability;state-space methods;adaptive approximate filtering;state-space models;approximate Bayesian computation filtration;particle filters;sequential Monte Carlo methods;probability kernel-based evaluation;Kernel;Computational modeling;Approximation methods;Biological system modeling;Adaptation models;Bayes methods;State-space methods;Approximate Bayesian computation;ABC;filtration;adaptive kernels},\n  doi = {10.1109/EUSIPCO.2015.7362773},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104325.pdf},\n}\n\n
\n
\n\n\n
\n Approximate Bayesian computation (ABC) filtration of state-space models replaces popular particle filters in cases where the observation models (i.e. likelihoods) are either computationally too demanding or completely intractable, but it is still possible to simulate from them. These sequential Monte Carlo methods evaluate importance weights based on the distance between the true observation and the simulated pseudoobservations. The paper proposes a new adaptive method consisting of probability kernel-based evaluation of importance weights with online determination of kernel scale. It is shown that the resulting algorithm achieves performance close to particle filters in the case of well-specified models, and outperforms generic particle filters and state-of-art ABC filters under heavy-tailed noise and model misspecification.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Cyclostationarity-based detection and identification of binary offset carrier-modulated signals.\n \n \n \n\n\n \n Thuillier, E.; and Lundén, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2196-2200, Aug 2015. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362774,\n  author = {E. Thuillier and J. Lundén},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Cyclostationarity-based detection and identification of binary offset carrier-modulated signals},\n  year = {2015},\n  pages = {2196-2200},\n  abstract = {In this paper, cyclostationarity-based detection and identification of binary offset carrier (BOC)-modulated signals is considered. BOC-modulated signals are widely used in current and next-generation global navigation satellite systems. The cyclostationary properties of the BOC-modulated signals are summarized and distinguishing features are discussed. A multiple hypothesis testing problem is formulated for detecting and identifying the strongest BOC-modulated signal from a multi-signal mixture. Two cyclic detection and identification tests are proposed. Their asymptotic distributions under the null hypothesis, when only noise is present, are established. Simulation results that show the very good detection and identification performance of the proposed algorithms are provided.},\n  keywords = {modulation;next generation networks;satellite navigation;null hypothesis;asymptotic distributions;cyclic identification tests;cyclic detection;multisignal mixture;multiple hypothesis testing problem;next-generation global navigation satellite systems;binary offset carrier-modulated signal identification;cyclostationarity-based detection;Signal to noise ratio;Signal processing algorithms;Correlation;Europe;Signal detection;Random variables;Binary offset carrier;cyclostationarity;detection;modulation classiication},\n  doi = {10.1109/EUSIPCO.2015.7362774},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n In this paper, cyclostationarity-based detection and identification of binary offset carrier (BOC)-modulated signals is considered. BOC-modulated signals are widely used in current and next-generation global navigation satellite systems. The cyclostationary properties of the BOC-modulated signals are summarized and distinguishing features are discussed. A multiple hypothesis testing problem is formulated for detecting and identifying the strongest BOC-modulated signal from a multi-signal mixture. Two cyclic detection and identification tests are proposed. Their asymptotic distributions under the null hypothesis, when only noise is present, are established. Simulation results that show the very good detection and identification performance of the proposed algorithms are provided.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Iterative windowed parameter estimation of multiple superimposed damped exponentials in noise.\n \n \n \n \n\n\n \n Ye, S.; and Aboutanios, E.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2201-2205, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"IterativePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362775,\n  author = {S. Ye and E. Aboutanios},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Iterative windowed parameter estimation of multiple superimposed damped exponentials in noise},\n  year = {2015},\n  pages = {2201-2205},\n  abstract = {The research problem of the parameter estimation of multiple superimposed damped complex exponentials in noise is of significant importance in many engineering and science applications. In this paper, we propose a simple yet accurate estimator to address the problem. By combining an efficient windowed frequency and damping estimator for a single component with an iterative leakage subtraction scheme, the novel method consecutively and iteratively estimates one component at a time by gradually reducing the leakage introduced by other components presented. Simulation results are presented to verify that the proposed algorithm is capable of outperforming state-of-art time and frequency domain algorithms.},\n  keywords = {frequency estimation;iterative methods;signal sampling;multiple superimposed damped exponentials;iterative windowed parameter estimation;noise;windowed frequency estimator;windowed damping estimator;iterative leakage subtraction scheme;Frequency estimation;Signal processing algorithms;Estimation;Signal to noise ratio;Damping;Interpolation;Parameter estimator;damped exponential;interpolation algorithm;nuclear magnetic resonance spectroscopy},\n  doi = {10.1109/EUSIPCO.2015.7362775},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570091399.pdf},\n}\n\n
\n
\n\n\n
\n The research problem of the parameter estimation of multiple superimposed damped complex exponentials in noise is of significant importance in many engineering and science applications. In this paper, we propose a simple yet accurate estimator to address the problem. By combining an efficient windowed frequency and damping estimator for a single component with an iterative leakage subtraction scheme, the novel method consecutively and iteratively estimates one component at a time by gradually reducing the leakage introduced by other components presented. Simulation results are presented to verify that the proposed algorithm is capable of outperforming state-of-art time and frequency domain algorithms.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Bayesian parameter estimation for asymmetric power distributions.\n \n \n \n \n\n\n \n Baussard, A.; and Tourneret, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2206-2210, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"BayesianPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362776,\n  author = {A. Baussard and J. Tourneret},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Bayesian parameter estimation for asymmetric power distributions},\n  year = {2015},\n  pages = {2206-2210},\n  abstract = {This paper proposes a hierarchical Bayesian model for estimating the parameters of asymmetric power distributions (APDs). These distributions are defined by shape, scale and asymmetry parameters which make them very flexible for approximating empirical distributions. A hybrid Markov chain Monte Carlo method is then studied to sample the unknown parameters of APDs. The generated samples can be used to compute the Bayesian estimators of the unknown APD parameters. Numerical experiments show the good performance of the proposed estimation method. An application to an image segmentation problem is finally investigated.},\n  keywords = {image segmentation;Markov processes;Monte Carlo methods;parameter estimation;Bayesian parameter estimation;asymmetric power distributions;APD parameter;hybrid Markov chain Monte Carlo method;image segmentation;Bayes methods;Image segmentation;Shape;Markov processes;Histograms;Estimation;Europe;Asymmetric power distributions;hierarchical Bayesian model;MCMC;Gibbs sampler;Image segmentation},\n  doi = {10.1109/EUSIPCO.2015.7362776},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102961.pdf},\n}\n\n
\n
\n\n\n
\n This paper proposes a hierarchical Bayesian model for estimating the parameters of asymmetric power distributions (APDs). These distributions are defined by shape, scale and asymmetry parameters which make them very flexible for approximating empirical distributions. A hybrid Markov chain Monte Carlo method is then studied to sample the unknown parameters of APDs. The generated samples can be used to compute the Bayesian estimators of the unknown APD parameters. Numerical experiments show the good performance of the proposed estimation method. An application to an image segmentation problem is finally investigated.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Bayesian multi-target tracking with superpositional measurements using labeled random finite sets.\n \n \n \n \n\n\n \n Papi, F.; and Kim, D. Y.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2211-2215, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"BayesianPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362777,\n  author = {F. Papi and D. Y. Kim},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Bayesian multi-target tracking with superpositional measurements using labeled random finite sets},\n  year = {2015},\n  pages = {2211-2215},\n  abstract = {In this paper we present a general solution for multi-target tracking problems with superpositional measurements. In a superpositional sensor model, the measurement collected by the sensor at each time step is a superposition of measurements generated by each of the targets present in the surveillance area. We use the Bayes multi-target filter with Labeled Random Finite Set (RFS) in order to jointly estimate the number of targets and their trajectories. We propose an implementation of this filter using Sequential Monte Carlo (SMC) methods with an efficient multi-target sampling strategy based on the Approximate Superpositional Cardinalized Probability Hypothesis Density (CPHD) filter.},\n  keywords = {Bayes methods;Monte Carlo methods;target tracking;Bayesian multitarget tracking;superpositional measurements;labeled random finite sets;superpositional sensor;surveillance area;Bayes multitarget filter;sequential Monte Carlo methods;SMC methods;multitarget sampling;approximate superpositional cardinalized probability hypothesis density filter;CPHD filter;Approximation methods;Proposals;Radar tracking;Target tracking;Time measurement;Europe;Signal processing},\n  doi = {10.1109/EUSIPCO.2015.7362777},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103229.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we present a general solution for multi-target tracking problems with superpositional measurements. In a superpositional sensor model, the measurement collected by the sensor at each time step is a superposition of measurements generated by each of the targets present in the surveillance area. We use the Bayes multi-target filter with Labeled Random Finite Set (RFS) in order to jointly estimate the number of targets and their trajectories. We propose an implementation of this filter using Sequential Monte Carlo (SMC) methods with an efficient multi-target sampling strategy based on the Approximate Superpositional Cardinalized Probability Hypothesis Density (CPHD) filter.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Composite real principal component analysis of complex signals.\n \n \n \n \n\n\n \n Hellings, C.; Gogler, P.; and Utschick, W.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2216-2220, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"CompositePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362778,\n  author = {C. Hellings and P. Gogler and W. Utschick},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Composite real principal component analysis of complex signals},\n  year = {2015},\n  pages = {2216-2220},\n  abstract = {Principal component analysis (PCA) is a tool for dimensionality reduction, feature extraction, and data compression, which is applied to both real-valued and complex-valued data sets. For complex data, a modified version of PCA based on widely linear transformations was shown to be beneficial if the considered random variables are improper, i.e., in the case of correlations or power imbalances between real and imaginary parts. This widely linear approach is formulated in an augmented complex representation in the existing literature. In this paper, we propose a composite real PCA, which instead transforms the complex data into a set of real-valued principal components. This alternative approach is superior in dimensionality reduction due to the finer granularity that is possible when counting dimensions in the real-valued representation. Moreover, it can be used to obtain the same results as the augmented complex version at a lower computational complexity.},\n  keywords = {computational complexity;data compression;feature extraction;principal component analysis;signal representation;complex signals;composite real principal component analysis;PCA;dimensionality reduction;feature extraction;data compression;real-valued data set;complex-valued data set;widely linear transformations;random variables;widely linear approach;augmented complex representation;augmented complex version;computational complexity;Principal component analysis;Eigenvalues and eigenfunctions;Covariance matrices;Random variables;Signal processing;Chlorine;Europe;Composite real representation;dimensionality reduction;improper signals;noncircular;principal component analysis (PCA)},\n  doi = {10.1109/EUSIPCO.2015.7362778},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105171.pdf},\n}\n\n
\n
\n\n\n
\n Principal component analysis (PCA) is a tool for dimensionality reduction, feature extraction, and data compression, which is applied to both real-valued and complex-valued data sets. For complex data, a modified version of PCA based on widely linear transformations was shown to be beneficial if the considered random variables are improper, i.e., in the case of correlations or power imbalances between real and imaginary parts. This widely linear approach is formulated in an augmented complex representation in the existing literature. In this paper, we propose a composite real PCA, which instead transforms the complex data into a set of real-valued principal components. This alternative approach is superior in dimensionality reduction due to the finer granularity that is possible when counting dimensions in the real-valued representation. Moreover, it can be used to obtain the same results as the augmented complex version at a lower computational complexity.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Cramér-Rao bounds for particle size distribution estimation from multiangle dynamic light scattering.\n \n \n \n\n\n \n Boualem, A.; Jabloun, M.; Ravier, P.; Naiim, M.; and Jalocha, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2221-2225, Aug 2015. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362779,\n  author = {A. Boualem and M. Jabloun and P. Ravier and M. Naiim and A. Jalocha},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Cramér-Rao bounds for particle size distribution estimation from multiangle dynamic light scattering},\n  year = {2015},\n  pages = {2221-2225},\n  abstract = {We derive the Cramér-Rao lower bounds (CRB) for parametric estimation of the number-weighted particle size distribution (PSD) from multiangle Dynamic Light Scattering (DLS) measurements. The CRB is a useful statistical tool to investigate the optimality of the PSD estimators. In the present paper, a Gaussian mixture (GM) model of the multimodal PSD is assumed and the associated Fisher information matrix (FIM) is determined. The usefulness of multiangle DLS in significantly decreasing the CRB is demonstrated. The mean square error (MSE) of the PSD GM model parameters estimation by the Bayesian inference method proposed in [1] is compared to the derived CRB for a simulated monomodal PSD. Results show that the MSE achieves the derived CRBs for the unbiased estimators of the PSD GM model parameters.},\n  keywords = {Bayes methods;Gaussian processes;inference mechanisms;light scattering;mean square error methods;mixture models;parameter estimation;particle size;Cramér-Rao bounds;particle size distribution estimation;multiangle dynamic light scattering;CRB;parametric estimation;number-weighted particle size distribution;PSD;DLS;Gaussian mixture model;Fisher information matrix;FIM;mean square error;MSE;Bayesian inference method;Estimation;Light scattering;Mean square error methods;Bayes methods;Robustness;Europe;Signal processing;Particle Size Distribution;Multiangle Dynamic Light Scattering;Cramér-Rao Bound;Inverse Problem;Bayesian Inference},\n  doi = {10.1109/EUSIPCO.2015.7362779},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n We derive the Cramér-Rao lower bounds (CRB) for parametric estimation of the number-weighted particle size distribution (PSD) from multiangle Dynamic Light Scattering (DLS) measurements. The CRB is a useful statistical tool to investigate the optimality of the PSD estimators. In the present paper, a Gaussian mixture (GM) model of the multimodal PSD is assumed and the associated Fisher information matrix (FIM) is determined. The usefulness of multiangle DLS in significantly decreasing the CRB is demonstrated. The mean square error (MSE) of the PSD GM model parameters estimation by the Bayesian inference method proposed in [1] is compared to the derived CRB for a simulated monomodal PSD. Results show that the MSE achieves the derived CRBs for the unbiased estimators of the PSD GM model parameters.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Video analysis and synthesis based on a retinal-inspired frame.\n \n \n \n \n\n\n \n Doutsi, E.; Fillatre, L.; Antonini, M.; and Gaulmin, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2226-2230, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"VideoPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362780,\n  author = {E. Doutsi and L. Fillatre and M. Antonini and J. Gaulmin},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Video analysis and synthesis based on a retinal-inspired frame},\n  year = {2015},\n  pages = {2226-2230},\n  abstract = {This paper introduces a novel retinal-inspired filter which is applied on video streams. We mathematically prove that under specific assumptions the spatiotemporal convolution turns into a spatial convolution with a short lifespan temporal kernel. As a consequence, the filter is applied on each image of the video stream separately. We analyze how each image is decomposed into a group of subbands, each one of which approximates the image providing different kind of information. Afterwords, we propose an algorithm to reconstruct each image by exploiting the group of subbands. Finally, we defend our mathematical proofs by providing numerical simulations which show the relevance of our study.},\n  keywords = {convolution;image reconstruction;image sequences;numerical analysis;spatiotemporal phenomena;video streaming;video analysis;video synthesis;retinal-inspired frame;retinal-inspired filter;video streams;spatiotemporal convolution;spatial convolution;short lifespan temporal kernel;image reconstruction;numerical simulations;mathematical proofs;Streaming media;Retina;Convolution;Spatiotemporal phenomena;Image reconstruction;Europe;Retinal-inspired processing;non-separable spatiotemporal filter;frame theory;dual frame},\n  doi = {10.1109/EUSIPCO.2015.7362780},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104971.pdf},\n}\n\n
\n
\n\n\n
\n This paper introduces a novel retinal-inspired filter which is applied on video streams. We mathematically prove that under specific assumptions the spatiotemporal convolution turns into a spatial convolution with a short lifespan temporal kernel. As a consequence, the filter is applied on each image of the video stream separately. We analyze how each image is decomposed into a group of subbands, each one of which approximates the image providing different kind of information. Afterwords, we propose an algorithm to reconstruct each image by exploiting the group of subbands. Finally, we defend our mathematical proofs by providing numerical simulations which show the relevance of our study.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sparse coding of natural images using a prior on edge co-occurences.\n \n \n \n \n\n\n \n Perrinet, L. U.; and Bednar, J. A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2231-2235, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"SparsePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362781,\n  author = {L. U. Perrinet and J. A. Bednar},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Sparse coding of natural images using a prior on edge co-occurences},\n  year = {2015},\n  pages = {2231-2235},\n  abstract = {Oriented edges in images commonly occur in co-linear and co-circular arrangements, obeying the {"}good continuation law{"} of Gestalt psychology. The human visual system appears to exploit this property of images, with contour detection, line completion, and grouping performance well predicted by such an {"}association field{"} between edge elements [1, 2]. In this paper, we show that an association field of this type can be used to enhance the sparse representation of natural images. First, we define the SparseLets framework as an efficient representation of images based on a discrete wavelet transform. Second, we extract second-order information about edge co-occurrences from a set of images of natural scenes. Finally, we incorporate this prior information into our framework and show that it allows for the extraction of features relevant to natural scenes, like a round shape. This novel approach points the way to practical computer vision algorithms with human-like performance.},\n  keywords = {computer vision;discrete wavelet transforms;edge detection;image coding;image representation;natural images sparse coding;edge co-occurences;co-circular arrangements;Gestalt psychology;line completion;discrete wavelet transform;second-order information;practical computer vision algorithms;Image edge detection;Signal processing algorithms;Visualization;Wavelet transforms;Europe;Signal processing;Image coding;sparse coding;natural scene statistics;sparselets;lateral connections;association field},\n  doi = {10.1109/EUSIPCO.2015.7362781},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096789.pdf},\n}\n\n
\n
\n\n\n
\n Oriented edges in images commonly occur in co-linear and co-circular arrangements, obeying the \"good continuation law\" of Gestalt psychology. The human visual system appears to exploit this property of images, with contour detection, line completion, and grouping performance well predicted by such an \"association field\" between edge elements [1, 2]. In this paper, we show that an association field of this type can be used to enhance the sparse representation of natural images. First, we define the SparseLets framework as an efficient representation of images based on a discrete wavelet transform. Second, we extract second-order information about edge co-occurrences from a set of images of natural scenes. Finally, we incorporate this prior information into our framework and show that it allows for the extraction of features relevant to natural scenes, like a round shape. This novel approach points the way to practical computer vision algorithms with human-like performance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Exploring discrete approaches to lossy compression schemes for natural image patches.\n \n \n \n \n\n\n \n Mehta, R.; Marzen, S.; and Hillar, C.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2236-2240, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ExploringPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362782,\n  author = {R. Mehta and S. Marzen and C. Hillar},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Exploring discrete approaches to lossy compression schemes for natural image patches},\n  year = {2015},\n  pages = {2236-2240},\n  abstract = {Optimal compressions in a rate-distortion sense are usually discrete random variables, so clever discretizations of natural images might be key to developing better compression schemes. A new image compression method achieved good perceptual coding performance by using as primitives memories of a Hopfield network trained on discretized natural images. Here we explore why Hopfield network fixed-points are good lossy perceptual features even though the implied generative model (a second-order Lenz-Ising model) does not provide a state-of-the-art match to the true probability distribution of discretized natural images. Even so, we demonstrate that this deterministic coding scheme can achieve near-optimality by comparing with the rate-distortion function for discretized natural image patches.},\n  keywords = {data compression;Hopfield neural nets;image coding;Ising model;probability;rate distortion theory;natural image patches;lossy compression schemes;optimal compressions;rate-distortion sense;discrete random variables;image compression;perceptual coding performance;primitives memories;discretized natural images;Hopfield network fixed-points;lossy perceptual features;second-order Lenz-Ising model;probability distribution;deterministic coding scheme;Image coding;Rate-distortion;Encoding;Neurons;Distortion;Data models;Europe;natural images;Hopfield network;recurrent neural network;image compression;Lenz-Ising model},\n  doi = {10.1109/EUSIPCO.2015.7362782},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570097345.pdf},\n}\n\n
\n
\n\n\n
\n Optimal compressions in a rate-distortion sense are usually discrete random variables, so clever discretizations of natural images might be key to developing better compression schemes. A new image compression method achieved good perceptual coding performance by using as primitives memories of a Hopfield network trained on discretized natural images. Here we explore why Hopfield network fixed-points are good lossy perceptual features even though the implied generative model (a second-order Lenz-Ising model) does not provide a state-of-the-art match to the true probability distribution of discretized natural images. Even so, we demonstrate that this deterministic coding scheme can achieve near-optimality by comparing with the rate-distortion function for discretized natural image patches.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Decoding MT motion response for optical flow estimation: An experimental evaluation.\n \n \n \n \n\n\n \n Chessa, M.; Kartheek Medathati, N. V.; Masson, G. S.; Solari, F.; and Kornprobst, P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2241-2245, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"DecodingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362783,\n  author = {M. Chessa and N. V. {Kartheek Medathati} and G. S. Masson and F. Solari and P. Kornprobst},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Decoding MT motion response for optical flow estimation: An experimental evaluation},\n  year = {2015},\n  pages = {2241-2245},\n  abstract = {Motion processing in primates is an intensely studied problem in visual neurosciences and after more than two decades of research, representation of motion in terms of motion energies computed by V1-MT feedforward interactions remains a strong hypothesis. Thus, decoding the motion energies is of natural interest for developing biologically inspired computer vision algorithms for dense optical flow estimation. Here, we address this problem by evaluating four strategies for motion decoding: intersection of constraints, linear decoding through learned weights on MT responses, maximum likelihood and regression with neural network using multi scale-features. We characterize the performances and the current limitations of the different strategies, in terms of recovering dense flow estimation using Middlebury benchmark dataset widely used in computer vision, and we highlight key aspects for future developments.},\n  keywords = {computer vision;image sequences;neural nets;decoding MT motion response;motion processing;biologically inspired computer vision algorithms;dense optical flow estimation;linear decoding;neural network;dense flow estimation;Middlebury benchmark dataset;Maximum likelihood decoding;Sociology;Optical signal processing;Optical imaging;Computer vision;Maximum likelihood estimation;Optical flow;spatio-temporal filters;motion energy;population code;V1;MT;Middlebury dataset},\n  doi = {10.1109/EUSIPCO.2015.7362783},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104649.pdf},\n}\n\n
\n
\n\n\n
\n Motion processing in primates is an intensely studied problem in visual neurosciences and after more than two decades of research, representation of motion in terms of motion energies computed by V1-MT feedforward interactions remains a strong hypothesis. Thus, decoding the motion energies is of natural interest for developing biologically inspired computer vision algorithms for dense optical flow estimation. Here, we address this problem by evaluating four strategies for motion decoding: intersection of constraints, linear decoding through learned weights on MT responses, maximum likelihood and regression with neural network using multi scale-features. We characterize the performances and the current limitations of the different strategies, in terms of recovering dense flow estimation using Middlebury benchmark dataset widely used in computer vision, and we highlight key aspects for future developments.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Periodic components estimation in chronobiological time series via a Bayesian approach.\n \n \n \n \n\n\n \n Dumitru, M.; and Mohammad-Djafari, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2246-2250, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"PeriodicPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362784,\n  author = {M. Dumitru and A. Mohammad-Djafari},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Periodic components estimation in chronobiological time series via a Bayesian approach},\n  year = {2015},\n  pages = {2246-2250},\n  abstract = {In chronobiology a periodic components variation analysis for the signals expressing the biological rhythms is needed. Therefore precise estimation of the periodic components is required. The classical approaches, based on FFT methods, are inefficient considering the particularities of the data (non-stationary, short length and noisy). In this paper we propose a new method using inverse problem and Bayesian approach with sparsity enforcing prior. The considered prior law is the Student-t distribution, viewed as a marginal distribution of an Infinite Gaussian Scale Mixture (IGSM) defined via the inverse variances. For modelling the non stationarity of the observed signal and the noise we use a Gaussian model with unknown variances. To infer those variances as well as the variances of the periodic components we use conjugate priors. From the joint posterior law the unknowns are estimated via Posterior Mean (PM) using the Variational Bayesian Approximation (VBA). Finally, we validate the proposed method on synthetic data and present some preliminary results for real chronobiological data.},\n  keywords = {Bayes methods;fast Fourier transforms;Gaussian processes;inverse problems;medical signal processing;mixture models;time series;variational techniques;periodic component estimation;chronobiological time series;Bayesian approach;chronobiology;periodic component variation analysis;biological rhythms;FFT methods;inverse problem;sparsity enforcing prior;Student-t distribution;Infinite Gaussian Scale Mixture;IGSM;inverse variances;signal nonstationarity;Gaussian model;joint posterior law;Posterior Mean;Variational Bayesian Approximation;VBA;synthetic data;real chronobiological data;Bayes methods;Estimation;Europe;Signal processing;Time series analysis;Biology;Cancer;Periodic components estimation;Inverse Problem;Variational Bayesian Approximation (VBA);Kullback-Leibler divergence (KL);Infinite Gaussian Scale Mixture (IGSM);Posterior Mean (PM)},\n  doi = {10.1109/EUSIPCO.2015.7362784},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104897.pdf},\n}\n\n
\n
\n\n\n
\n In chronobiology a periodic components variation analysis for the signals expressing the biological rhythms is needed. Therefore precise estimation of the periodic components is required. The classical approaches, based on FFT methods, are inefficient considering the particularities of the data (non-stationary, short length and noisy). In this paper we propose a new method using inverse problem and Bayesian approach with sparsity enforcing prior. The considered prior law is the Student-t distribution, viewed as a marginal distribution of an Infinite Gaussian Scale Mixture (IGSM) defined via the inverse variances. For modelling the non stationarity of the observed signal and the noise we use a Gaussian model with unknown variances. To infer those variances as well as the variances of the periodic components we use conjugate priors. From the joint posterior law the unknowns are estimated via Posterior Mean (PM) using the Variational Bayesian Approximation (VBA). Finally, we validate the proposed method on synthetic data and present some preliminary results for real chronobiological data.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Phasor estimation using conditional maximum likelihood: Strengths and limitations.\n \n \n \n \n\n\n \n Choqueuse, V.; Belouchrani, A.; and Benbouzid, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2251-2255, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"PhasorPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362785,\n  author = {V. Choqueuse and A. Belouchrani and M. Benbouzid},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Phasor estimation using conditional maximum likelihood: Strengths and limitations},\n  year = {2015},\n  pages = {2251-2255},\n  abstract = {This paper focuses on the estimation of the phasor parameters in three-phase power systems for smart grid monitoring. Specifically, it investigates the use of the Conditional Maximum Likelihood (ML) for phasor parameter estimation. The contribution of this paper is twofold. First, it presents the condition on the signal model for identifiability of the phasor parameters. Then, it shows that the Conditional Maximum Likelihood estimator has a simple closed form expression, which can be determined from simple geometrical properties. Simulation results illustrate the effectiveness of the proposed approach for the estimation of the phasor amplitude and angle shift under dynamic conditions.},\n  keywords = {amplitude estimation;maximum likelihood estimation;phasor measurement;smart power grids;phasor parameter estimation;three-phase power systems;smart grid monitoring;conditional maximum likelihood estimator;phasor amplitude estimation;Maximum likelihood estimation;Signal processing;Europe;Maximum likelihood detection;Covariance matrices;Eigenvalues and eigenfunctions;Phasor Measurement Units;Smart Grid;Maximum Likelihood;Condition Monitoring},\n  doi = {10.1109/EUSIPCO.2015.7362785},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096401.pdf},\n}\n\n
\n
\n\n\n
\n This paper focuses on the estimation of the phasor parameters in three-phase power systems for smart grid monitoring. Specifically, it investigates the use of the Conditional Maximum Likelihood (ML) for phasor parameter estimation. The contribution of this paper is twofold. First, it presents the condition on the signal model for identifiability of the phasor parameters. Then, it shows that the Conditional Maximum Likelihood estimator has a simple closed form expression, which can be determined from simple geometrical properties. Simulation results illustrate the effectiveness of the proposed approach for the estimation of the phasor amplitude and angle shift under dynamic conditions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Effects of compressed sensing on classification of bearing faults with entropic features.\n \n \n \n \n\n\n \n Wong, M. L. D.; Zhang, M.; and Nandi, A. K.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2256-2260, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"EffectsPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362786,\n  author = {M. L. D. Wong and M. Zhang and A. K. Nandi},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Effects of compressed sensing on classification of bearing faults with entropic features},\n  year = {2015},\n  pages = {2256-2260},\n  abstract = {The ability of automatically determining the underlying fault type in-situ for a roller element bearing is highly desired in machine condition monitoring applications nowadays. In this paper, we classify roller element fault types under a compressed sensing framework. Firstly, vibration signals of roller element bearings are acquired in the time domain and resampled with a random Bernoulli matrix to emulate the compressed sensing mechanism. Sample entropy based features are then computed for both the normalized raw vibration signals and the reconstructed compressed sensed signals. Classification performance using Support Vector Machine (SVM) shows slight per formance degradation with significant reduction of the bandwidth requirement.},\n  keywords = {compressed sensing;condition monitoring;mechanical engineering computing;rolling bearings;signal reconstruction;signal sampling;support vector machines;vibrations;SVM;support vector machines;signal reconstruction;compressed sensing mechanism;random Bernoulli matrix;vibration signals;fault classification;machine condition monitoring;roller element bearings;compressed sensing effects;Vibrations;Compressed sensing;Entropy;Time series analysis;Sparse matrices;Feature extraction;Frequency-domain analysis;Bearing Fault Classification;Com pressed Sensing;Machine Condition Monitoring;Sample Entropy},\n  doi = {10.1109/EUSIPCO.2015.7362786},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570099709.pdf},\n}\n\n
\n
\n\n\n
\n The ability of automatically determining the underlying fault type in-situ for a roller element bearing is highly desired in machine condition monitoring applications nowadays. In this paper, we classify roller element fault types under a compressed sensing framework. Firstly, vibration signals of roller element bearings are acquired in the time domain and resampled with a random Bernoulli matrix to emulate the compressed sensing mechanism. Sample entropy based features are then computed for both the normalized raw vibration signals and the reconstructed compressed sensed signals. Classification performance using Support Vector Machine (SVM) shows slight per formance degradation with significant reduction of the bandwidth requirement.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sensitivity analysis of the sequential test for detecting cyber-physical attacks.\n \n \n \n \n\n\n \n Long Do, V.; Fillatre, L.; and Nikiforov, I.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2261-2265, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"SensitivityPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362787,\n  author = {V. {Long Do} and L. Fillatre and I. Nikiforov},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Sensitivity analysis of the sequential test for detecting cyber-physical attacks},\n  year = {2015},\n  pages = {2261-2265},\n  abstract = {This paper deals with the problem of detecting cyber-physical attacks on Supervisory Control And Data Acquisition (SCADA) systems. The discrete-time state space model is used to describe the systems. The attacks are modeled as additive signals of short duration on both state evolution and sensor measurement equations. The steady-state Kalman filter is employed to generate the sequence of innovations. Next, these independent random variables are used as entries of the Variable Threshold Window Limited CUmulative SUM (VTWL CUSUM) test. It has been shown that the optimal choice of thresholds with respect to (w.r.t.) the transient change detection criterion leads to the Finite Moving Average (FMA) test. The main contribution of this paper is a sensitivity analysis of the FMA test. This analysis is based on a numerical calculation of the probabilities of wrong decision under the variation of operational parameters. Theoretical results are applied to the detection of an attack scenario on a SCADA water network.},\n  keywords = {feature extraction;Kalman filters;moving average processes;SCADA systems;security of data;sensitivity analysis;state-space methods;sensitivity analysis;cyber-physical attack detection;supervisory control and data acquisition;SCADA system;discrete-time state space model;Kalman filter;variable threshold window limited cumulative sum;VTWL CUSUM test;transient change detection criterion;finite moving average;FMA;Transient analysis;Technological innovation;Kalman filters;SCADA systems;Steady-state;Signal processing algorithms;Europe;Transient change detection;Window Limited CUSUM test;FMA test;cyber-physical attacks;SCADA systems},\n  doi = {10.1109/EUSIPCO.2015.7362787},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102655.pdf},\n}\n\n
\n
\n\n\n
\n This paper deals with the problem of detecting cyber-physical attacks on Supervisory Control And Data Acquisition (SCADA) systems. The discrete-time state space model is used to describe the systems. The attacks are modeled as additive signals of short duration on both state evolution and sensor measurement equations. The steady-state Kalman filter is employed to generate the sequence of innovations. Next, these independent random variables are used as entries of the Variable Threshold Window Limited CUmulative SUM (VTWL CUSUM) test. It has been shown that the optimal choice of thresholds with respect to (w.r.t.) the transient change detection criterion leads to the Finite Moving Average (FMA) test. The main contribution of this paper is a sensitivity analysis of the FMA test. This analysis is based on a numerical calculation of the probabilities of wrong decision under the variation of operational parameters. Theoretical results are applied to the detection of an attack scenario on a SCADA water network.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Analytical model of the KL divergence for gamma distributed data: Application to fault estimation.\n \n \n \n \n\n\n \n Youssef, A.; Delpha, C.; and Diallo, D.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2266-2270, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AnalyticalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362788,\n  author = {A. Youssef and C. Delpha and D. Diallo},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Analytical model of the KL divergence for gamma distributed data: Application to fault estimation},\n  year = {2015},\n  pages = {2266-2270},\n  abstract = {Incipient fault diagnosis has become a key issue for reliability and safety of industrial processes. Data-driven methods are effective for feature extraction and feature analysis using multivariate statistical techniques. Beside fault detection, fault estimation is essential for making the appropriate decision (safe stop or fault accommodation). Therefore, in this paper, we have developed an analytical model of the Kullback-Leibler Divergence (KLD) for Gamma distributed data to be used for the fault severity estimation. In the Principal Component Analysis (PCA) framework, the proposed model of the KLD has been analysed and compared to an estimated value of the KLD using the Monte-Carlo estimator. The results show that for incipient faults (<;10%) in usual noise conditions (SNR>40dB), the analytical model is accurate enough with a relative error around 10%.},\n  keywords = {fault diagnosis;feature extraction;gamma distribution;Monte Carlo methods;principal component analysis;reliability;analytical model;KL divergence;gamma distributed data;fault estimation;incipient fault diagnosis;industrial processes;reliability;safety;feature extraction;feature analysis;multivariate statistical techniques;fault detection;fault accommodation;Kullback-Leibler divergence;Gamma distributed data;principal component analysis;PCA framework;KLD;Monte-Carlo estimator;incipient faults;noise conditions;Decision support systems;Europe;Signal processing;Conferences;Fault detection;KLD model and estimation;Gamma distributed data;Incipient faults},\n  doi = {10.1109/EUSIPCO.2015.7362788},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102733.pdf},\n}\n\n
\n
\n\n\n
\n Incipient fault diagnosis has become a key issue for reliability and safety of industrial processes. Data-driven methods are effective for feature extraction and feature analysis using multivariate statistical techniques. Beside fault detection, fault estimation is essential for making the appropriate decision (safe stop or fault accommodation). Therefore, in this paper, we have developed an analytical model of the Kullback-Leibler Divergence (KLD) for Gamma distributed data to be used for the fault severity estimation. In the Principal Component Analysis (PCA) framework, the proposed model of the KLD has been analysed and compared to an estimated value of the KLD using the Monte-Carlo estimator. The results show that for incipient faults (<;10%) in usual noise conditions (SNR>40dB), the analytical model is accurate enough with a relative error around 10%.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A novel algorithm for the estimation of the parameters of a real sinusoid in noise.\n \n \n \n \n\n\n \n Ye, S.; Kocherry, D. L.; and Aboutanios, E.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2271-2275, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362789,\n  author = {S. Ye and D. L. Kocherry and E. Aboutanios},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A novel algorithm for the estimation of the parameters of a real sinusoid in noise},\n  year = {2015},\n  pages = {2271-2275},\n  abstract = {In this paper, we put forward a computationally efficient algorithm to estimate the frequency and complex amplitude of a real sinusoidal signal in additive Gaussian noise. The novel method extends an iterative frequency estimator for single complex exponentials that is based on interpolation on Fourier coefficients to the real case by incorporating an iterative leakage subtraction strategy. Simulation results are presented to verify that the proposed algorithm can obtain more accurate estimation than both time and frequency domain parameter estimators in the literature, and the estimation variance of the method sits on the Cramer-Rao lower bound with only a few iterations required.},\n  keywords = {frequency estimation;Gaussian noise;iterative methods;signal sampling;computationally efficient algorithm;real sinusoidal signal;additive Gaussian noise;iterative frequency estimator;single complex exponentials;Fourier coefficients;iterative leakage subtraction strategy;time domain parameter estimators;frequency domain parameter estimators;estimation variance;Cramer-Rao lower bound;Estimation;Signal processing algorithms;Frequency estimation;Signal to noise ratio;Europe;Iterative methods;Frequency estimation;interpolation algorithm;Fourier coefficients;real sinusoid},\n  doi = {10.1109/EUSIPCO.2015.7362789},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570095593.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we put forward a computationally efficient algorithm to estimate the frequency and complex amplitude of a real sinusoidal signal in additive Gaussian noise. The novel method extends an iterative frequency estimator for single complex exponentials that is based on interpolation on Fourier coefficients to the real case by incorporating an iterative leakage subtraction strategy. Simulation results are presented to verify that the proposed algorithm can obtain more accurate estimation than both time and frequency domain parameter estimators in the literature, and the estimation variance of the method sits on the Cramer-Rao lower bound with only a few iterations required.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robust and reliable audio watermarking based on dynamic phase coding and error control coding.\n \n \n \n \n\n\n \n Ngo, N. M.; Kurkoski, B. M.; and Unoki, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2276-2280, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"RobustPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362790,\n  author = {N. M. Ngo and B. M. Kurkoski and M. Unoki},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Robust and reliable audio watermarking based on dynamic phase coding and error control coding},\n  year = {2015},\n  pages = {2276-2280},\n  abstract = {This paper proposes an audio watermarking method based on dynamic phase coding and error control coding. The technique of quantization index modulation is employed for embedding watermarks into the phase spectrum of audio signals. Since most of the audio information is distributed in moderately low frequencies, to increase robustness, this frequency region is chosen for embedding watermarks. Phase modification causes sound distortion in a manner that is proportional to the magnitude. Therefore, the amount of phase modification is adjusted according to the magnitude to balance inaudibility and robustness. Error control coding is incorporated to further increase reliability of watermark detection. The experimental results show that the watermarks could be kept inaudible in watermarked signals and robust against various attacks. Error control coding is effective in increasing watermark detection accuracy remarkably.},\n  keywords = {audio watermarking;error correction codes;modulation;phase coding;quantisation (signal);dynamic phase coding;error control coding;audio watermarking method;quantization index modulation technique;audio signal phase spectrum;phase modification;watermark detection reliability;Watermarking;Robustness;Encoding;Error correction codes;Bit rate;Error correction;audio watermarking;quantization index modulation;inaudibility;robustness;error control coding},\n  doi = {10.1109/EUSIPCO.2015.7362790},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104339.pdf},\n}\n\n
\n
\n\n\n
\n This paper proposes an audio watermarking method based on dynamic phase coding and error control coding. The technique of quantization index modulation is employed for embedding watermarks into the phase spectrum of audio signals. Since most of the audio information is distributed in moderately low frequencies, to increase robustness, this frequency region is chosen for embedding watermarks. Phase modification causes sound distortion in a manner that is proportional to the magnitude. Therefore, the amount of phase modification is adjusted according to the magnitude to balance inaudibility and robustness. Error control coding is incorporated to further increase reliability of watermark detection. The experimental results show that the watermarks could be kept inaudible in watermarked signals and robust against various attacks. Error control coding is effective in increasing watermark detection accuracy remarkably.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Blind sampling rate offset estimation based on coherence drift in wireless acoustic sensor networks.\n \n \n \n \n\n\n \n Bahari, M. H.; Bertrand, A.; and Moonen, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2281-2285, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"BlindPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362791,\n  author = {M. H. Bahari and A. Bertrand and M. Moonen},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Blind sampling rate offset estimation based on coherence drift in wireless acoustic sensor networks},\n  year = {2015},\n  pages = {2281-2285},\n  abstract = {In this paper, a new approach for sampling rate offset (SRO) estimation between nodes of a wireless acoustic sensor network (WASN) is proposed using the phase drift of the coherence function between the signals. This method, referred to as least squares coherence drift (LCD) estimation, assumes that the SRO induces a linearly increasing phase-shift in the short-time Fourier transform (STFT) domain. This phase-shift, observed as a drift in the phase of the signal coherence, is applied in a least-squares estimation framework to estimate the SRO. Simulation results in different scenarios show that the LCD estimation approach can estimate the SRO with a mean absolute error of around 1%. We finally demonstrate that the use of the LCD estimation within a compensation approach eliminates the performance-loss due to SRO in a multichannel Wiener filter (MWF)-based speech enhancement task.},\n  keywords = {acoustic communication (telecommunication);blind equalisers;Fourier transforms;least squares approximations;speech enhancement;Wiener filters;wireless channels;wireless sensor networks;speech enhancement task;multichannel Wiener filter;STFT domain;short-time Fourier transform domain;least squares coherence drift estimation;phase drift;WASN nodes;blind sampling rate offset estimation;coherence drift;wireless acoustic sensor network;Estimation;Coherence;Delays;Wireless sensor networks;Microphones;Signal processing;Wireless communication;Wireless Acoustic Sensor Networks;Signal Enhancement;Sampling Rate Offset;Coherence Drift},\n  doi = {10.1109/EUSIPCO.2015.7362791},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103777.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, a new approach for sampling rate offset (SRO) estimation between nodes of a wireless acoustic sensor network (WASN) is proposed using the phase drift of the coherence function between the signals. This method, referred to as least squares coherence drift (LCD) estimation, assumes that the SRO induces a linearly increasing phase-shift in the short-time Fourier transform (STFT) domain. This phase-shift, observed as a drift in the phase of the signal coherence, is applied in a least-squares estimation framework to estimate the SRO. Simulation results in different scenarios show that the LCD estimation approach can estimate the SRO with a mean absolute error of around 1%. We finally demonstrate that the use of the LCD estimation within a compensation approach eliminates the performance-loss due to SRO in a multichannel Wiener filter (MWF)-based speech enhancement task.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sound classification in indoor environment thanks to belief functions.\n \n \n \n \n\n\n \n Labourey, Q.; Pellerin, D.; Rombaut, M.; Aycard, O.; and Garbay, C.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2286-2290, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"SoundPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362792,\n  author = {Q. Labourey and D. Pellerin and M. Rombaut and O. Aycard and C. Garbay},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Sound classification in indoor environment thanks to belief functions},\n  year = {2015},\n  pages = {2286-2290},\n  abstract = {Sounds provide substantial information on human activities in an indoor environment, such as an apartment or a house, but it is a difficult task to classify them, mainly due to the variability and the diversity of realization of sounds in those environments. In this paper, sounds are considered as a class of information, to be mixed with other modalities (video in particular) in the design of ambient monitoring systems. As a consequence, we propose a classification scheme aimed at (i) exploiting the specificities of this modality with respect to others and (ii) leaving doubtful events for further analysis, so that the risk of errors is overall minimized. A dedicated taxonomy together with belief functions are proposed in this respect. Belief functions are an adapted way to face the variability of sounds, as they are able to quantify their impossibility to classify the signals when it differs too much from what is known by creating class of doubt. The algorithm is tested on a dataset composed of real-life signals.},\n  keywords = {belief networks;signal classification;sound classification scheme;indoor environment;belief functions;ambient monitoring systems;signal classification;real-life signals;Feature extraction;Monitoring;Taxonomy;Indoor environments;Signal processing algorithms;Sensors;Speech;Sound classification;Indoor sounds;Belief functions;Features selection;Reject class},\n  doi = {10.1109/EUSIPCO.2015.7362792},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570097519.pdf},\n}\n\n
\n
\n\n\n
\n Sounds provide substantial information on human activities in an indoor environment, such as an apartment or a house, but it is a difficult task to classify them, mainly due to the variability and the diversity of realization of sounds in those environments. In this paper, sounds are considered as a class of information, to be mixed with other modalities (video in particular) in the design of ambient monitoring systems. As a consequence, we propose a classification scheme aimed at (i) exploiting the specificities of this modality with respect to others and (ii) leaving doubtful events for further analysis, so that the risk of errors is overall minimized. A dedicated taxonomy together with belief functions are proposed in this respect. Belief functions are an adapted way to face the variability of sounds, as they are able to quantify their impossibility to classify the signals when it differs too much from what is known by creating class of doubt. The algorithm is tested on a dataset composed of real-life signals.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multi-modal service operation estimation using DNN-based acoustic bag-of-features.\n \n \n \n \n\n\n \n Tamura, S.; Uno, T.; Takehara, M.; Hayamizu, S.; and Kurata, T.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2291-2295, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Multi-modalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362793,\n  author = {S. Tamura and T. Uno and M. Takehara and S. Hayamizu and T. Kurata},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Multi-modal service operation estimation using DNN-based acoustic bag-of-features},\n  year = {2015},\n  pages = {2291-2295},\n  abstract = {In service engineering it is important to estimate when and what a worker did, because they include crucial evidences to improve service quality and working environments. For Service Operation Estimation (SOE), acoustic information is one of useful and key modalities; particularly environmental or background sounds include effective cues. This paper focuses on two aspects: (1) extracting powerful and robust acoustic features by using stacked-denoising-autoencoder and bag-of-feature techniques, and (2) investigating a multi-modal SOE scheme by combining the audio features and the other sensor data as well as non-sensor information. We conducted evaluation experiments using multi-modal data recorded in a restaurant. We improved SOE performance in comparison to conventional acoustic features, and effectiveness of our multimodal SOE scheme is also clarified.},\n  keywords = {acoustic signal processing;audio signal processing;neural nets;signal denoising;multimodal service operation estimation;DNN-based acoustic bag-of-features;service engineering;service quality;working environments;acoustic information;stacked-denoising-autoencoder;multimodal SOE scheme;audio feature;acoustic feature;deep neural network;Feature extraction;Speech;Mel frequency cepstral coefficient;Signal processing;Estimation;Support vector machines;Service operation estimation;multimodal signal processing;stacked denoising autoencoder;bag of features;environmental sounds},\n  doi = {10.1109/EUSIPCO.2015.7362793},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104597.pdf},\n}\n\n
\n
\n\n\n
\n In service engineering it is important to estimate when and what a worker did, because they include crucial evidences to improve service quality and working environments. For Service Operation Estimation (SOE), acoustic information is one of useful and key modalities; particularly environmental or background sounds include effective cues. This paper focuses on two aspects: (1) extracting powerful and robust acoustic features by using stacked-denoising-autoencoder and bag-of-feature techniques, and (2) investigating a multi-modal SOE scheme by combining the audio features and the other sensor data as well as non-sensor information. We conducted evaluation experiments using multi-modal data recorded in a restaurant. We improved SOE performance in comparison to conventional acoustic features, and effectiveness of our multimodal SOE scheme is also clarified.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Direction of arrival estimation using pseudo-intensity vectors with direct-path dominance test.\n \n \n \n \n\n\n \n Moore, A.; Evers, C.; Naylor, P. A.; Alon, D. L.; and Rafaely, B.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2296-2300, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"DirectionPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362794,\n  author = {A. Moore and C. Evers and P. A. Naylor and D. L. Alon and B. Rafaely},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Direction of arrival estimation using pseudo-intensity vectors with direct-path dominance test},\n  year = {2015},\n  pages = {2296-2300},\n  abstract = {The accuracy of direction of arrival estimation tends to degrade under reverberant conditions due to the presence of reflected signal components which are correlated with the direct path. The recently proposed direct-path dominance test provides a means of identifying time-frequency regions in which a single signal path is dominant. By analysing only these regions it was shown that the accuracy of the FS-MUSIC algorithm could be significantly improved. However, for real-time implementation a less computationally demanding localisation algorithm would be preferable. In the present contribution we investigate the direct-path dominance test as a preprocessing step to pseudo-intensity vector-based localisation. A novel formulation of the pseudo-intensity vector is proposed which further exploits the direct path dominance test and leads to improved localisation performance.},\n  keywords = {direction-of-arrival estimation;signal classification;time-frequency analysis;vectors;direction of arrival estimation;reflected signal components;direct-path dominance test;time-frequency regions;FS-MUSIC algorithm;preprocessing step;pseudo-intensity vector-based localisation;direct path dominance test;Direction-of-arrival estimation;Time-frequency analysis;Estimation;Arrays;Harmonic analysis;Correlation;Multiple signal classification;direction of arrival estimation;spherical harmonic domain;pseudo-intensity vectors},\n  doi = {10.1109/EUSIPCO.2015.7362794},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104221.pdf},\n}\n\n
\n
\n\n\n
\n The accuracy of direction of arrival estimation tends to degrade under reverberant conditions due to the presence of reflected signal components which are correlated with the direct path. The recently proposed direct-path dominance test provides a means of identifying time-frequency regions in which a single signal path is dominant. By analysing only these regions it was shown that the accuracy of the FS-MUSIC algorithm could be significantly improved. However, for real-time implementation a less computationally demanding localisation algorithm would be preferable. In the present contribution we investigate the direct-path dominance test as a preprocessing step to pseudo-intensity vector-based localisation. A novel formulation of the pseudo-intensity vector is proposed which further exploits the direct path dominance test and leads to improved localisation performance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Acoustic direction finding in highly reverberant environment with single acoustic vector sensor.\n \n \n \n \n\n\n \n Aktas, M.; Akgun, T.; and Ozkan, H.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2301-2305, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AcousticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362795,\n  author = {M. Aktas and T. Akgun and H. Ozkan},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Acoustic direction finding in highly reverberant environment with single acoustic vector sensor},\n  year = {2015},\n  pages = {2301-2305},\n  abstract = {We propose a novel wideband acoustic direction finding method for highly reverberant environments using measurements from a single Acoustic Vector Sensor (AVS). Since an AVS is small in size and can be effectively used within the full acoustic frequency bands, the proposed solution is suitable for wideband acoustic source localization. In particular, we introduce a novel approach to extract the signal portions that are not distorted with multipath signals and noise. We do not make any stochastic and sparseness assumptions regarding the underlying signal source. Hence, our approach can be applied to a wide range of wideband acoustic signals. We present experiments with acoustic signals that are specially exposed to long reverberations, where the Signal-to-Noise Ratio is as low as 0 dB. In these experiments, the proposed method reliably estimates the source direction with less than 5 degrees of error even under the introduced significantly high reverberation conditions.},\n  keywords = {acoustic radiators;reverberation;time-frequency analysis;acoustic direction finding;single acoustic vector sensor;highly reverberant environments;wideband acoustic source localization;source direction;Time-frequency analysis;Direction-of-arrival estimation;Reverberation;Estimation;Arrays;Wideband;Acoustic Vector Sensor;Under-determined Direction Finding;Reverberation;Time-Frequency Analysis},\n  doi = {10.1109/EUSIPCO.2015.7362795},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096979.pdf},\n}\n\n
\n
\n\n\n
\n We propose a novel wideband acoustic direction finding method for highly reverberant environments using measurements from a single Acoustic Vector Sensor (AVS). Since an AVS is small in size and can be effectively used within the full acoustic frequency bands, the proposed solution is suitable for wideband acoustic source localization. In particular, we introduce a novel approach to extract the signal portions that are not distorted with multipath signals and noise. We do not make any stochastic and sparseness assumptions regarding the underlying signal source. Hence, our approach can be applied to a wide range of wideband acoustic signals. We present experiments with acoustic signals that are specially exposed to long reverberations, where the Signal-to-Noise Ratio is as low as 0 dB. In these experiments, the proposed method reliably estimates the source direction with less than 5 degrees of error even under the introduced significantly high reverberation conditions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Daily activity recognition based on DNN using environmental sound and acceleration signals.\n \n \n \n \n\n\n \n Hayashi, T.; Nishida, M.; Kitaoka, N.; and Takeda, K.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2306-2310, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"DailyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362796,\n  author = {T. Hayashi and M. Nishida and N. Kitaoka and K. Takeda},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Daily activity recognition based on DNN using environmental sound and acceleration signals},\n  year = {2015},\n  pages = {2306-2310},\n  abstract = {We propose a new method of recognizing daily human activities based on a Deep Neural Network (DNN), using multimodal signals such as environmental sound and subject acceleration. We conduct recognition experiments to compare the proposed method to other methods such as a Support Vector Machine (SVM), using real-world data recorded continuously over 72 hours. Our proposed method achieved a frame accuracy rate of 85.5% and a sample accuracy rate of 91.7% when identifying nine different types of daily activities. Furthermore, the proposed method outperformed the SVM-based method when an additional {"}Other{"} activity category was included. Therefore, we demonstrate that DNNs are a robust method of daily activity recognition.},\n  keywords = {cameras;medical signal processing;neural nets;smart phones;support vector machines;daily activity recognition;DNN;environmental sound;acceleration signals;daily human activities;deep neural network;multimodal signals;subject acceleration;Support Vector Machine;SVM;real-world data;frame accuracy rate;sample accuracy rate;time 72 hr;Acceleration;Feature extraction;Support vector machines;Europe;Signal processing;Robustness;Sociology;Daily activity recognition;DNN;multimodal;acceleration signal;environmental sound signal},\n  doi = {10.1109/EUSIPCO.2015.7362796},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096685.pdf},\n}\n\n
\n
\n\n\n
\n We propose a new method of recognizing daily human activities based on a Deep Neural Network (DNN), using multimodal signals such as environmental sound and subject acceleration. We conduct recognition experiments to compare the proposed method to other methods such as a Support Vector Machine (SVM), using real-world data recorded continuously over 72 hours. Our proposed method achieved a frame accuracy rate of 85.5% and a sample accuracy rate of 91.7% when identifying nine different types of daily activities. Furthermore, the proposed method outperformed the SVM-based method when an additional \"Other\" activity category was included. Therefore, we demonstrate that DNNs are a robust method of daily activity recognition.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Audio salient event detection and summarization using audio and text modalities.\n \n \n \n \n\n\n \n Zlatintsi, A.; Iosif, E.; Marago, P.; and Potamianos, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2311-2315, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AudioPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362797,\n  author = {A. Zlatintsi and E. Iosif and P. Marago and A. Potamianos},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Audio salient event detection and summarization using audio and text modalities},\n  year = {2015},\n  pages = {2311-2315},\n  abstract = {This paper investigates the problem of audio event detection and summarization, building on previous work [1,2] on the detection of perceptually important audio events based on saliency models. We take a synergistic approach to audio summarization where saliency computation of audio streams is assisted by using the text modality as well. Auditory saliency is assessed by auditory and perceptual cues such as Teager energy, loudness and roughness; all known to correlate with attention and human hearing. Text analysis incorporates part-of-speech tagging and affective modeling. A computational method for the automatic correction of the boundaries of the selected audio events is applied creating summaries that consist not only of salient but also meaningful and semantically coherent events. A non-parametric classification technique is employed and results are reported on the MovSum movie database using objective evaluations against ground-truth designating the auditory and semantically salient events.},\n  keywords = {audio streaming;nonparametric statistics;speech synthesis;MovSum movie database;nonparametric classification technique;semantically coherent events;affective modeling;part-of-speech tagging;text analysis;Teager energy;auditory saliency;audio streams;saliency computation;saliency models;perceptually important audio events;text modalities;audio modalities;audio summarization;audio salient event detection;Motion pictures;Feature extraction;Semantics;Event detection;Text analysis;Databases;Speech;monomodal auditory saliency;affective text analysis;audio-text salient events;audio summarization},\n  doi = {10.1109/EUSIPCO.2015.7362797},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570097435.pdf},\n}\n\n
\n
\n\n\n
\n This paper investigates the problem of audio event detection and summarization, building on previous work [1,2] on the detection of perceptually important audio events based on saliency models. We take a synergistic approach to audio summarization where saliency computation of audio streams is assisted by using the text modality as well. Auditory saliency is assessed by auditory and perceptual cues such as Teager energy, loudness and roughness; all known to correlate with attention and human hearing. Text analysis incorporates part-of-speech tagging and affective modeling. A computational method for the automatic correction of the boundaries of the selected audio events is applied creating summaries that consist not only of salient but also meaningful and semantically coherent events. A non-parametric classification technique is employed and results are reported on the MovSum movie database using objective evaluations against ground-truth designating the auditory and semantically salient events.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Evaluation of denoising algorithms applied to the reduction of speckle in digital holography.\n \n \n \n \n\n\n \n Montresor, S.; Quehe, P. Y.; Verhaeghe, S.; and Picard, P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2316-2320, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"EvaluationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362798,\n  author = {S. Montresor and P. Y. Quehe and S. Verhaeghe and P. Picard},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Evaluation of denoising algorithms applied to the reduction of speckle in digital holography},\n  year = {2015},\n  pages = {2316-2320},\n  abstract = {In this article we compare image denoising algorithms applied to laser digital holography technique. The presented work focuses on reducing speckle noise contribution. The evaluation methodology lies on images of synthesized phases of which one controls the level and noise type. We retain five algorithms known for their efficiency in the field of image processing. These are: algorithms used for SAR filtering (Synthetic Aperture Radar), algorithms based on wavelets, the NLmeans algorithm recently proposed, the Wiener filter and the median filter. Three evaluation criteria are used to compare selected algorithms: the gain of SNR and quality index; we propose also a new one: the reconstructed phase error which is particularly relevant in the domain of digital holography.},\n  keywords = {holography;image denoising;median filters;wavelet transforms;Wiener filters;speckle noise reduction;image denoising algorithm;laser digital holography technique;SAR filtering;synthetic aperture radar;NLmeans algorithm;Wiener filter;median filter;wavelet-based algorithms;quality index;SNR gain;reconstructed phase error;Speckle;Holography;Image reconstruction;Mathematical model;Signal processing algorithms;Decorrelation;Synthetic aperture radar;denoising;image processing;speckle noise;phase;wavelets},\n  doi = {10.1109/EUSIPCO.2015.7362798},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096343.pdf},\n}\n\n
\n
\n\n\n
\n In this article we compare image denoising algorithms applied to laser digital holography technique. The presented work focuses on reducing speckle noise contribution. The evaluation methodology lies on images of synthesized phases of which one controls the level and noise type. We retain five algorithms known for their efficiency in the field of image processing. These are: algorithms used for SAR filtering (Synthetic Aperture Radar), algorithms based on wavelets, the NLmeans algorithm recently proposed, the Wiener filter and the median filter. Three evaluation criteria are used to compare selected algorithms: the gain of SNR and quality index; we propose also a new one: the reconstructed phase error which is particularly relevant in the domain of digital holography.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n L2 registration for colour transfer.\n \n \n \n \n\n\n \n Grogan, M.; Prasad, M.; and Dahyot, R.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1-5, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"L2Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362799,\n  author = {M. Grogan and M. Prasad and R. Dahyot},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {L2 registration for colour transfer},\n  year = {2015},\n  pages = {1-5},\n  abstract = {This paper proposes to perform colour transfer by minimising a divergence (the L2 distance) between two colour distributions. We propose to model each dataset by a compact Gaussian mixture which is designed for the specific purpose of colour transfer between images which have different scene content. A non rigid transformation is estimated by minimising the Euclidean distance (L2) between these two distributions and the estimated transformation is used for transfering colour statistics from one image to another. Experimental results show that this is a very promising approach for transferring colour and it performs very well against an alternative reference approach. },\n  keywords = {Portable document format;IEEE Xplore;Colour transfer;registration;L2;Gaussian Mixtures},\n  doi = {10.1109/EUSIPCO.2015.7362799},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102575.pdf},\n}\n\n
\n
\n\n\n
\n This paper proposes to perform colour transfer by minimising a divergence (the L2 distance) between two colour distributions. We propose to model each dataset by a compact Gaussian mixture which is designed for the specific purpose of colour transfer between images which have different scene content. A non rigid transformation is estimated by minimising the Euclidean distance (L2) between these two distributions and the estimated transformation is used for transfering colour statistics from one image to another. Experimental results show that this is a very promising approach for transferring colour and it performs very well against an alternative reference approach. \n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Non local means image denoising using noise-adaptive SSIM.\n \n \n \n \n\n\n \n Bruni, V.; Panella, D.; and Vitulano, D.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2326-2330, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"NonPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362800,\n  author = {V. Bruni and D. Panella and D. Vitulano},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Non local means image denoising using noise-adaptive SSIM},\n  year = {2015},\n  pages = {2326-2330},\n  abstract = {This paper embeds SSIM in place of the L2 norm in a one step Non Local Means (NLM) scheme. This is possible thanks to a new form of SSIM that can be formally derived from the classical SSIM using the spreading error analysis. This approach has several advantages over L2 norm based NLM such as greater robustness to parameters setting, higher performance in terms of PSNR and SSIM, optimal subjective visual quality. In addition, it is possible to show that the cascade of the proposed pure visual approach and a second step based on L2 norm allows us to reach results close (slightly less) to the state of the art (BM3D) in terms of PSNR and SSIM.},\n  keywords = {error analysis;image denoising;optimal subjective visual quality;PSNR;spreading error analysis;NLM scheme;noise-adaptive SSIM;nonlocal mean image denoising;Noise reduction;Noise measurement;Visualization;Smoothing methods;Estimation;Europe;Signal processing;Image denoising;Non Local Means;SSIM;Wiener filter},\n  doi = {10.1109/EUSIPCO.2015.7362800},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096817.pdf},\n}\n\n
\n
\n\n\n
\n This paper embeds SSIM in place of the L2 norm in a one step Non Local Means (NLM) scheme. This is possible thanks to a new form of SSIM that can be formally derived from the classical SSIM using the spreading error analysis. This approach has several advantages over L2 norm based NLM such as greater robustness to parameters setting, higher performance in terms of PSNR and SSIM, optimal subjective visual quality. In addition, it is possible to show that the cascade of the proposed pure visual approach and a second step based on L2 norm allows us to reach results close (slightly less) to the state of the art (BM3D) in terms of PSNR and SSIM.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An adaptive perception-based image preprocessing method.\n \n \n \n \n\n\n \n Bruni, V.; Selesnick, I.; Tarchi, L.; and Vitulano, D.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2331-2335, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362801,\n  author = {V. Bruni and I. Selesnick and L. Tarchi and D. Vitulano},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {An adaptive perception-based image preprocessing method},\n  year = {2015},\n  pages = {2331-2335},\n  abstract = {The aim of this paper is to introduce an adaptive preprocessing procedure based on human perception in order to increase the performance of some standard image processing techniques. Specifically, image frequency content has been weighted by the corresponding value of the contrast sensitivity function, in agreement with the sensitiveness of human eye to the different image frequencies and contrasts. The 2D Rational dilation wavelet transform has been employed for representing image frequencies. In fact, it provides an adaptive and flexible multiresolution framework, enabling an easy and straightforward adaptation to the image frequency content. Preliminary experimental results show that the proposed preprocessing allows us to increase the performance of some standard image enhancement algorithms in terms of visual quality and often also in terms of PSNR.},\n  keywords = {image processing;wavelet transforms;adaptive perception-based image preprocessing method;adaptive preprocessing procedure;human perception;standard image processing techniques;image frequencies;2D rational dilation wavelet transform;image frequency content;standard image enhancement algorithms;Image resolution;Q-factor;Wavelet transforms;Shape;Europe;Signal processing;Human Visual System;Constrast sensitivity function;Image enhnacement;SSIM},\n  doi = {10.1109/EUSIPCO.2015.7362801},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104135.pdf},\n}\n\n
\n
\n\n\n
\n The aim of this paper is to introduce an adaptive preprocessing procedure based on human perception in order to increase the performance of some standard image processing techniques. Specifically, image frequency content has been weighted by the corresponding value of the contrast sensitivity function, in agreement with the sensitiveness of human eye to the different image frequencies and contrasts. The 2D Rational dilation wavelet transform has been employed for representing image frequencies. In fact, it provides an adaptive and flexible multiresolution framework, enabling an easy and straightforward adaptation to the image frequency content. Preliminary experimental results show that the proposed preprocessing allows us to increase the performance of some standard image enhancement algorithms in terms of visual quality and often also in terms of PSNR.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multiscale guided deblurring: Chromatic aberration correction in color and near-infrared imaging.\n \n \n \n \n\n\n \n Sadeghipoor, Z.; Lu, Y. M.; Mendez, E.; and Süsstrunk, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2336-2340, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"MultiscalePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362802,\n  author = {Z. Sadeghipoor and Y. M. Lu and E. Mendez and S. Süsstrunk},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Multiscale guided deblurring: Chromatic aberration correction in color and near-infrared imaging},\n  year = {2015},\n  pages = {2336-2340},\n  abstract = {Chromatic aberration, caused by photographic lens imperfections, results in the image of only one spectral channel being sharp, while the other channels are blurred depending on their wavelengths difference with the sharp channel. We study chromatic aberration for a system that jointly records color and near-infrared (NIR) images on a single sensor. Chromatic aberration in such a system leads to a blurred NIR image when the color image is in-focus and sharp. We propose an algorithm that deblurs the NIR image using the gradients of the sharp color image, as both scene representations are generally similar. However, the details of these images often exhibit significant differences due to varying scene reflection and absorption in the corresponding bands. To account for this, we compute the correlation between color and NIR gradients, and use the gradients of the color image in reconstructing NIR only where the gradients are highly correlated. We propose a multiscale scheme that gradually deblurs NIR and accurately computes similarities between color and NIR gradients. Experimental results show that our algorithm recovers details of NIR without producing visible artifacts.},\n  keywords = {aberrations;image colour analysis;image representation;image restoration;infrared imaging;NIR reconstruction;NIR gradients;color gradients;scene representations;sharp color image;single sensor;near-infrared imaging;chromatic aberration correction;multiscale guided deblurring;Image color analysis;Yttrium;Color;Kernel;Signal processing algorithms;Lenses;Image edge detection;Axial chromatic aberration;NIR imaging;similarity maps;gradient-based deblurring},\n  doi = {10.1109/EUSIPCO.2015.7362802},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104337.pdf},\n}\n\n
\n
\n\n\n
\n Chromatic aberration, caused by photographic lens imperfections, results in the image of only one spectral channel being sharp, while the other channels are blurred depending on their wavelengths difference with the sharp channel. We study chromatic aberration for a system that jointly records color and near-infrared (NIR) images on a single sensor. Chromatic aberration in such a system leads to a blurred NIR image when the color image is in-focus and sharp. We propose an algorithm that deblurs the NIR image using the gradients of the sharp color image, as both scene representations are generally similar. However, the details of these images often exhibit significant differences due to varying scene reflection and absorption in the corresponding bands. To account for this, we compute the correlation between color and NIR gradients, and use the gradients of the color image in reconstructing NIR only where the gradients are highly correlated. We propose a multiscale scheme that gradually deblurs NIR and accurately computes similarities between color and NIR gradients. Experimental results show that our algorithm recovers details of NIR without producing visible artifacts.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Class-specific model mixtures for the classification of time-series.\n \n \n \n \n\n\n \n Baggenstoss, P. M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2341-2345, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Class-specificPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362803,\n  author = {P. M. Baggenstoss},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Class-specific model mixtures for the classification of time-series},\n  year = {2015},\n  pages = {2341-2345},\n  abstract = {We present a new classifier for acoustic time-series that involves a mixture of generative models. Each model operates on a feature stream extracted from the time-series using overlapped Hanning-weighted segments and has a probability density function (PDF) modeled with a hidden Markov model (HMM). The models use a variety of segmentation sizes and feature extraction methods, yet can be combined at a higher level using a mixture PDF thanks to the PDF projection theorem (PPT) that converts the feature PDF to raw time-series PDFs. The effectiveness of the method is shown using an open data set of short-duration acoustic signals.},\n  keywords = {acoustic signal processing;feature extraction;hidden Markov models;signal classification;time series;time-series classification;short-duration acoustic signals;PPT;PDF projection theorem;mixture PDF;feature extraction methods;segmentation sizes;HMM;hidden Markov model;probability density function;overlapped Hanning-weighted segments;feature stream;generative models;acoustic time-series;Hidden Markov models;Feature extraction;Computational modeling;Mel frequency cepstral coefficient;Cepstrum;Support vector machines;Probability density function;Classification;PDF projection;generative models;kernel methods},\n  doi = {10.1109/EUSIPCO.2015.7362803},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096431.pdf},\n}\n\n
\n
\n\n\n
\n We present a new classifier for acoustic time-series that involves a mixture of generative models. Each model operates on a feature stream extracted from the time-series using overlapped Hanning-weighted segments and has a probability density function (PDF) modeled with a hidden Markov model (HMM). The models use a variety of segmentation sizes and feature extraction methods, yet can be combined at a higher level using a mixture PDF thanks to the PDF projection theorem (PPT) that converts the feature PDF to raw time-series PDFs. The effectiveness of the method is shown using an open data set of short-duration acoustic signals.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multiple metric learning for large margin kNN classification of time series.\n \n \n \n \n\n\n \n Do, C.; Douzal-Chouakria, A.; Marié, S.; and Rombaut, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2346-2350, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"MultiplePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362804,\n  author = {C. Do and A. Douzal-Chouakria and S. Marié and M. Rombaut},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Multiple metric learning for large margin kNN classification of time series},\n  year = {2015},\n  pages = {2346-2350},\n  abstract = {Time series are complex data objects, they may present noise, varying delays or involve several temporal granularities. To classify time series, promising solutions refer to the combination of multiple basic metrics to compare time series according to several characteristics. This work proposes a new framework to learn a combination of multiple metrics for a robust kNN classifier. By introducing the concept of pairwise space, the combination function is learned in this new space through a {"}large margin{"} optimization process. We apply it to compare time series on both their values and behaviors. The efficiency of the learned metric is compared to the major alternative metrics on large public datasets.},\n  keywords = {learning (artificial intelligence);neural nets;optimisation;time series;multiple metric learning;kNN classification;time series;varying delays;robust kNN classifier;combination function;large margin optimization process;Time series analysis;Extraterrestrial measurements;Training;Niobium;Optimization;Europe;Multiple metric learning;Time series;kNN;Classification},\n  doi = {10.1109/EUSIPCO.2015.7362804},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103649.pdf},\n}\n\n
\n
\n\n\n
\n Time series are complex data objects, they may present noise, varying delays or involve several temporal granularities. To classify time series, promising solutions refer to the combination of multiple basic metrics to compare time series according to several characteristics. This work proposes a new framework to learn a combination of multiple metrics for a robust kNN classifier. By introducing the concept of pairwise space, the combination function is learned in this new space through a \"large margin\" optimization process. We apply it to compare time series on both their values and behaviors. The efficiency of the learned metric is compared to the major alternative metrics on large public datasets.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A simple sequential outlier detection with several residuals.\n \n \n \n \n\n\n \n Yoon, J. W.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2351-2355, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362805,\n  author = {J. W. Yoon},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A simple sequential outlier detection with several residuals},\n  year = {2015},\n  pages = {2351-2355},\n  abstract = {Outlier detection schemes have been used to identify the unwanted noise and this helps us to obtain underlying valuable signals and predicting the next state of the systems/signals. However, there are few researches on sequential outlier detection in time series although a lot of outlier detection algorithms are developed in off-line systems. In this paper, we focus on the sequential (on-line) outlier detection schemes, that are based on the `delete-replace' approach. We also demonstrate that three different types of residuals can be used to design the outlier detection scheme to achieve accurate sequential estimation: marginal residual, conditional residual, and contribution.},\n  keywords = {sequential estimation;signal detection;time series;simple sequential outlier detection;unwanted noise identification;time series;delete-replace approach;marginal residual estimation;conditional residual estimation;contribution residual estimation;off-line system;Yttrium;Signal processing;Estimation;Trajectory;Europe;Predictive models;Time series analysis;Outlier detection;Marginal residual;Conditional residual;Contribution},\n  doi = {10.1109/EUSIPCO.2015.7362805},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103797.pdf},\n}\n\n
\n
\n\n\n
\n Outlier detection schemes have been used to identify the unwanted noise and this helps us to obtain underlying valuable signals and predicting the next state of the systems/signals. However, there are few researches on sequential outlier detection in time series although a lot of outlier detection algorithms are developed in off-line systems. In this paper, we focus on the sequential (on-line) outlier detection schemes, that are based on the `delete-replace' approach. We also demonstrate that three different types of residuals can be used to design the outlier detection scheme to achieve accurate sequential estimation: marginal residual, conditional residual, and contribution.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Bayesian approach for extreme learning machine-based subspace learning.\n \n \n \n \n\n\n \n Iosifidis, A.; and Gabbouj, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2356-236, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362806,\n  author = {A. Iosifidis and M. Gabbouj},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A Bayesian approach for extreme learning machine-based subspace learning},\n  year = {2015},\n  pages = {2356-236},\n  abstract = {In this paper, we describe a supervised subspace learning method that combines Extreme Learning methods and Bayesian learning. We approach the standard Extreme Learning Machine algorithm from a probabilistic point of view. Subsequently and we devise a method for the calculation of the network target vectors for Extreme Learning Machine-based neural network training that is based on a Bayesian model exploiting both the labeling information available for the training data and geometric class information in the feature space determined by the network's hidden layer outputs. We combine the derived subspace learning method with Nearest Neighbor-based classification and compare its performance with that of the standard ELM approach and other standard methods.},\n  keywords = {Bayes methods;learning (artificial intelligence);neural nets;vectors;extreme learning machine;ELM;supervised subspace learning method;extreme learning methods;Bayesian learning;network target vectors;neural network training;Bayesian model;geometric class information;nearest neighbor-based classification;Training;Standards;Labeling;Training data;Kernel;Neurons;Signal processing;Subspace Learning;Network targets determination;Extreme Learning Machine},\n  doi = {10.1109/EUSIPCO.2015.7362806},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104065.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we describe a supervised subspace learning method that combines Extreme Learning methods and Bayesian learning. We approach the standard Extreme Learning Machine algorithm from a probabilistic point of view. Subsequently and we devise a method for the calculation of the network target vectors for Extreme Learning Machine-based neural network training that is based on a Bayesian model exploiting both the labeling information available for the training data and geometric class information in the feature space determined by the network's hidden layer outputs. We combine the derived subspace learning method with Nearest Neighbor-based classification and compare its performance with that of the standard ELM approach and other standard methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Bayesian learning for robust principal component analysis.\n \n \n \n \n\n\n \n Sundin, M.; Chatterjee, S.; and Jansson, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2361-2365, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"BayesianPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362807,\n  author = {M. Sundin and S. Chatterjee and M. Jansson},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Bayesian learning for robust principal component analysis},\n  year = {2015},\n  pages = {2361-2365},\n  abstract = {We develop a Bayesian learning method for robust principal component analysis where the main task is to estimate a low-rank matrix from noisy and outlier contaminated measurements. To promote low-rank, we use a structured Gaussian prior that induces correlations among column vectors as well as row vectors of the matrix under estimation. In our method, the noise and outliers are modeled by a combined noise model. The method is evaluated and compared to other methods using synthetic data as well as data from the MovieLens 100Kdataset. Comparisons show that the method empirically provides a significant performance improvement over existing methods.},\n  keywords = {Bayes methods;correlation methods;Gaussian processes;learning (artificial intelligence);matrix algebra;noise;principal component analysis;Bayesian learning;robust principal component analysis;low-rank matrix estimation;noisy contaminated measurements;outlier contaminated measurements;structured Gaussian prior;correlations;column vectors;row vectors;noise model;Bayes methods;Signal to noise ratio;Sparse matrices;Robustness;Estimation;Yttrium;Signal processing algorithms;Robust principal component analysis;matrix completion;Bayesian learning},\n  doi = {10.1109/EUSIPCO.2015.7362807},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104195.pdf},\n}\n\n
\n
\n\n\n
\n We develop a Bayesian learning method for robust principal component analysis where the main task is to estimate a low-rank matrix from noisy and outlier contaminated measurements. To promote low-rank, we use a structured Gaussian prior that induces correlations among column vectors as well as row vectors of the matrix under estimation. In our method, the noise and outliers are modeled by a combined noise model. The method is evaluated and compared to other methods using synthetic data as well as data from the MovieLens 100Kdataset. Comparisons show that the method empirically provides a significant performance improvement over existing methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Gossip algorithms for principal component analysis in networks.\n \n \n \n \n\n\n \n Ghadban, N.; Honeine, P.; Mourad-Chehade, F.; Farah, J.; and Francis, C.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2366-2370, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"GossipPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362808,\n  author = {N. Ghadban and P. Honeine and F. Mourad-Chehade and J. Farah and C. Francis},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Gossip algorithms for principal component analysis in networks},\n  year = {2015},\n  pages = {2366-2370},\n  abstract = {This paper deals with the issues of the dimensionality reduction and the extraction of the structure of data using principal component analysis for the multivariable data in large-scale networks. In order to overcome the high computational complexity of this technique, we derive several in-network strategies to estimate the principal axes without the need for computing the sample covariance matrix. To this aim, we propose to combine Oja's iterative rule with average gossiping algorithms. Gossiping is used as a solution for communication between asynchronous nodes. The performance of the proposed approach is illustrated on time series acquisition in wireless sensor networks.},\n  keywords = {computational complexity;iterative methods;principal component analysis;time series;wireless sensor networks;gossip algorithms;principal component analysis;large-scale networks;computational complexity;Oja;iterative rule;asynchronous nodes;time series acquisition;wireless sensor networks;Principal component analysis;Signal processing algorithms;Algorithm design and analysis;Cost function;Routing;Signal processing;Data mining;Gossip averaging;principal component analysis;in-network processing;adaptive learning;distributed processing},\n  doi = {10.1109/EUSIPCO.2015.7362808},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104469.pdf},\n}\n\n
\n
\n\n\n
\n This paper deals with the issues of the dimensionality reduction and the extraction of the structure of data using principal component analysis for the multivariable data in large-scale networks. In order to overcome the high computational complexity of this technique, we derive several in-network strategies to estimate the principal axes without the need for computing the sample covariance matrix. To this aim, we propose to combine Oja's iterative rule with average gossiping algorithms. Gossiping is used as a solution for communication between asynchronous nodes. The performance of the proposed approach is illustrated on time series acquisition in wireless sensor networks.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Complex kernels for proper complex-valued signals: A review.\n \n \n \n \n\n\n \n Boloix-Tortosa, R.; Payán-Somet, F. J.; Arias-de-Reyna, E.; and Murillo-Fuentes, J. J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2371-2375, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ComplexPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362809,\n  author = {R. Boloix-Tortosa and F. J. Payán-Somet and E. Arias-de-Reyna and J. J. Murillo-Fuentes},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Complex kernels for proper complex-valued signals: A review},\n  year = {2015},\n  pages = {2371-2375},\n  abstract = {In this paper we investigate the conditions that complex kernels must satisfy for proper complex-valued signals. We study the structure that complex kernels for proper complex-valued signals must have. Also, we demonstrate that complex kernels that have been previously proposed and used in adaptive filtering of complex-valued signals assume that those signals are proper, i.e, they are not correlated with their complex conjugate. We provide an example of how a complex-valued kernel suitable for a particular model is designed, with a procedure that could help in other designs. The experiments included show the good behavior of the proposed kernel in the task of nonlinear channel equalization.},\n  keywords = {adaptive filters;filtering theory;Gaussian processes;regression analysis;complex-valued signals;adaptive filtering;complex-valued kernel;nonlinear channel equalization;Gaussian process;Kernel;Covariance matrices;Europe;Convolution;Symmetric matrices;Adaptation models;Gaussian processes;regression;proper complex processes;kernel methods},\n  doi = {10.1109/EUSIPCO.2015.7362809},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105115.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we investigate the conditions that complex kernels must satisfy for proper complex-valued signals. We study the structure that complex kernels for proper complex-valued signals must have. Also, we demonstrate that complex kernels that have been previously proposed and used in adaptive filtering of complex-valued signals assume that those signals are proper, i.e, they are not correlated with their complex conjugate. We provide an example of how a complex-valued kernel suitable for a particular model is designed, with a procedure that could help in other designs. The experiments included show the good behavior of the proposed kernel in the task of nonlinear channel equalization.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Operator-valued kernel recursive least squares algorithm.\n \n \n \n \n\n\n \n Amblard, P. O.; and Kadri, H.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2376-2380, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Operator-valuedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362810,\n  author = {P. O. Amblard and H. Kadri},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Operator-valued kernel recursive least squares algorithm},\n  year = {2015},\n  pages = {2376-2380},\n  abstract = {The paper develops recursive least square algorithms for nonlinear filtering of multivariate or functional data streams. The framework relies on kernel Hilbert spaces of operators. The results generalize to this framework the kernel recursive least squares developed in the scalar case. We particularly propose two possible extensions of the notion of approximate linear dependence of the regressors, which in the context of the paper, are operators. The development of the algorithms are done in infinite-dimensional spaces using matrices of operators. The algorithms are easily written in finite-dimensional settings using block matrices, and are illustrated in this context for the prediction of a bivariate time series.},\n  keywords = {approximation theory;data analysis;matrix algebra;nonlinear filters;recursive filters;regression analysis;time series;operator-valued kernel recursive least squares algorithm;multivariate data stream nonlinear filtering;functional data stream nonlinear filtering;approximate linear dependence;infinite-dimensional spaces;operator matrices;finite-dimensional settings;block matrices;bivariate time series prediction;functional data analysis;Yttrium;Kernel;Hilbert space;Signal processing algorithms;Dictionaries;Approximation algorithms;Context;kernel RLS;operator-valued kernels;vector-valued RKHS;multitask learning;functional data analysis},\n  doi = {10.1109/EUSIPCO.2015.7362810},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102849.pdf},\n}\n\n
\n
\n\n\n
\n The paper develops recursive least square algorithms for nonlinear filtering of multivariate or functional data streams. The framework relies on kernel Hilbert spaces of operators. The results generalize to this framework the kernel recursive least squares developed in the scalar case. We particularly propose two possible extensions of the notion of approximate linear dependence of the regressors, which in the context of the paper, are operators. The development of the algorithms are done in infinite-dimensional spaces using matrices of operators. The algorithms are easily written in finite-dimensional settings using block matrices, and are illustrated in this context for the prediction of a bivariate time series.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Online nonnegative matrix factorization based on kernel machines.\n \n \n \n \n\n\n \n Zhu, F.; and Honeine, P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2381-2385, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"OnlinePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362811,\n  author = {F. Zhu and P. Honeine},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Online nonnegative matrix factorization based on kernel machines},\n  year = {2015},\n  pages = {2381-2385},\n  abstract = {Nonnegative matrix factorization (NMF) has been increasingly investigated for data analysis and dimension-reduction. To tackle large-scale data, several online techniques for NMF have been introduced recently. So far, the online NMF has been limited to the linear model. This paper develops an online version of the nonlinear kernel-based NMF, where the decomposition is performed in the feature space. Taking the advantage of the stochastic gradient descent and the mini-batch scheme, the proposed method has a fixed, tractable complexity independent of the increasing samples number. We derive the multiplicative update rules of the general form, and describe in detail the case of the Gaussian kernel. The effectiveness of the proposed method is validated on unmixing hyperspectral images, compared with the state-of-the-art online NMF methods.},\n  keywords = {data analysis;Gaussian processes;geophysical image processing;gradient methods;hyperspectral imaging;matrix decomposition;stochastic processes;online nonnegative matrix factorization;kernel machine;data analysis;dimension-reduction;nonlinear kernel-based NMF;stochastic gradient descent scheme;minibatch scheme;Gaussian kernel;unmixing hyperspectral imaging;Kernel;Encoding;Linear programming;Europe;Signal processing;Stochastic processes;Computational complexity;Nonnegative matrix factorization;online learning;kernel machines;hyperspectral unmixing},\n  doi = {10.1109/EUSIPCO.2015.7362811},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096379.pdf},\n}\n\n
\n
\n\n\n
\n Nonnegative matrix factorization (NMF) has been increasingly investigated for data analysis and dimension-reduction. To tackle large-scale data, several online techniques for NMF have been introduced recently. So far, the online NMF has been limited to the linear model. This paper develops an online version of the nonlinear kernel-based NMF, where the decomposition is performed in the feature space. Taking the advantage of the stochastic gradient descent and the mini-batch scheme, the proposed method has a fixed, tractable complexity independent of the increasing samples number. We derive the multiplicative update rules of the general form, and describe in detail the case of the Gaussian kernel. The effectiveness of the proposed method is validated on unmixing hyperspectral images, compared with the state-of-the-art online NMF methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A modular framework for efficient sound recognition using a smartphone.\n \n \n \n \n\n\n \n Mielke, M.; Weber, L.; and Brück, R.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2386-2390, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362812,\n  author = {M. Mielke and L. Weber and R. Brück},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A modular framework for efficient sound recognition using a smartphone},\n  year = {2015},\n  pages = {2386-2390},\n  abstract = {The identification of sounds is an important tool in ubiquitous and context aware applications. Today's smartphones are capable of performing even computational intensive tasks, like digital signal processing and pattern recognition. In this contribution an implementation scheme and a framework for sound recognition for smartphones are presented. A basic sound recognition flow consists of preprocessing, feature extraction, feature selection, classiication, and action trigger. A flow is not hard coded but described in a JSON file and build dynamically by the framework. The framework itself is implemented in Java for the Android operating system. But specific algorithms can be realized in Java, C(++), and Renderscript for execution on the CPU, or in Filterscript for execution on a GPU. An example flow is presented and benchmark results are shown for Java-, C-, and Filterscript-implementations of Mel Frequency Cepstral Coefficients (MFCC). Recommendations for technology selection are made.},\n  keywords = {feature extraction;feature selection;signal classification;smart phones;smartphone;sounds identification;ubiquitous applications;context aware applications;computational intensive tasks;digital signal processing;pattern recognition;sound recognition flow;feature extraction;feature selection;classiication;action trigger;JSON file;Java;Android operating system;mel frequency cepstral coefficients;MFCC;Mel frequency cepstral coefficient;Kernel;Pipelines;Feature extraction;Signal processing algorithms;Filtering algorithms;Graphics processing units;sound recognition;smartphone;GPU computation},\n  doi = {10.1109/EUSIPCO.2015.7362812},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104979.pdf},\n}\n\n
\n
\n\n\n
\n The identification of sounds is an important tool in ubiquitous and context aware applications. Today's smartphones are capable of performing even computational intensive tasks, like digital signal processing and pattern recognition. In this contribution an implementation scheme and a framework for sound recognition for smartphones are presented. A basic sound recognition flow consists of preprocessing, feature extraction, feature selection, classiication, and action trigger. A flow is not hard coded but described in a JSON file and build dynamically by the framework. The framework itself is implemented in Java for the Android operating system. But specific algorithms can be realized in Java, C(++), and Renderscript for execution on the CPU, or in Filterscript for execution on a GPU. An example flow is presented and benchmark results are shown for Java-, C-, and Filterscript-implementations of Mel Frequency Cepstral Coefficients (MFCC). Recommendations for technology selection are made.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Principle and implementation of vector-based phasor measurement unit algorithm using delay devices.\n \n \n \n\n\n \n Nishie, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2391-2395, Aug 2015. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362813,\n  author = {S. Nishie},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Principle and implementation of vector-based phasor measurement unit algorithm using delay devices},\n  year = {2015},\n  pages = {2391-2395},\n  abstract = {This paper proposes a novel Phasor Measurement Unit (PMU) algorithm for P and M class measurement using vector-based operation with originally designed pseudo-IQ signal translation using a delay device. A PMU is an essential equipment in the smart grid. However some technical difficulties with frequency measurement are still discussed. The presented algorithm in this paper aims to be a compact algorithm with implementations for breakthrough by 1) directly measuring the nominal frequency by using an F0 measurement method, 2) using vector product operation to calculate the phasor, and 3) not using DFT or the quadrature heterodyne method due to the difficult filter design requirements This paper also reports on the evaluation result based on IEEE Std C37.118.1, and the presented algorithm shows excellent frequency measurement, dynamic response, stability, and preciseness.},\n  keywords = {delays;discrete Fourier transforms;dynamic response;frequency measurement;IEEE standards;phasor measurement;power filters;power system stability;smart power grids;delay device;vector-based phasor measurement unit algorithm;PMU algorithm;P class measurement;M class measurement;pseudo-IQ signal translation;smart grid;frequency measurement;F0 measurement method;vector product operation;DFT;quadrature heterodyne method;filter design;IEEE Std C37.118.1;dynamic response;stability;preciseness;Delays;Frequency measurement;Signal processing algorithms;Phasor measurement units;Signal processing;Algorithm design and analysis;Yttrium;PMU;IQ-signal;Instantaneous Frequency;Delay Device},\n  doi = {10.1109/EUSIPCO.2015.7362813},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n This paper proposes a novel Phasor Measurement Unit (PMU) algorithm for P and M class measurement using vector-based operation with originally designed pseudo-IQ signal translation using a delay device. A PMU is an essential equipment in the smart grid. However some technical difficulties with frequency measurement are still discussed. The presented algorithm in this paper aims to be a compact algorithm with implementations for breakthrough by 1) directly measuring the nominal frequency by using an F0 measurement method, 2) using vector product operation to calculate the phasor, and 3) not using DFT or the quadrature heterodyne method due to the difficult filter design requirements This paper also reports on the evaluation result based on IEEE Std C37.118.1, and the presented algorithm shows excellent frequency measurement, dynamic response, stability, and preciseness.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Robust student's-t distribution PHD filter with OCSVM updating for multiple human tracking.\n \n \n \n \n\n\n \n Feng, P.; Yu, M.; Naqvi, S. M.; Wang, W.; and Chambers, J. A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2396-2400, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362814,\n  author = {P. Feng and M. Yu and S. M. Naqvi and W. Wang and J. A. Chambers},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A Robust student's-t distribution PHD filter with OCSVM updating for multiple human tracking},\n  year = {2015},\n  pages = {2396-2400},\n  abstract = {We propose a novel robust probability hypothesis density (PHD) filter for multiple target tracking in an enclosed environment, where a one-class support vector machine (OCSVM) is used in the update step for combining different human features to mitigate the effect of measurement noise on the calculation of particle weights. A Student's-t distribution is employed to improve the robustness of the filters whose tail is heavier than the Gaussian distribution and thus has the potential to cover more widely-spread particles. The OCSVM is trained based on both colour and oriented gradient (HOG) histogram features and then used to mitigate the measurement noise from the particle selection step, thereby improve the tracking performance. To evaluate the proposed PHD filter, we employed two sequences from the CAVIAR dataset and used the optimal subpattern assignment (OSPA) method as an objective measure. The results show that the proposed robust PHD filter outperforms the traditional PHD filter.},\n  keywords = {particle filtering (numerical methods);probability;support vector machines;target tracking;probability hypothesis density filter;PHD filter;multiple target tracking;one-class support vector machine;OCSVM;human features;measurement noise;particle weights calculation;student's-t distribution;oriented gradient histogram;HOG histogram features;CAVIAR dataset;optimal subpattern assignment method;OSPA method;Robustness;Target tracking;Atmospheric measurements;Particle measurements;Monte Carlo methods;Feature extraction;Signal processing;Multiple human tracking;PHD filter;Student's-t distribution;OCSVM},\n  doi = {10.1109/EUSIPCO.2015.7362814},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570087453.pdf},\n}\n\n
\n
\n\n\n
\n We propose a novel robust probability hypothesis density (PHD) filter for multiple target tracking in an enclosed environment, where a one-class support vector machine (OCSVM) is used in the update step for combining different human features to mitigate the effect of measurement noise on the calculation of particle weights. A Student's-t distribution is employed to improve the robustness of the filters whose tail is heavier than the Gaussian distribution and thus has the potential to cover more widely-spread particles. The OCSVM is trained based on both colour and oriented gradient (HOG) histogram features and then used to mitigate the measurement noise from the particle selection step, thereby improve the tracking performance. To evaluate the proposed PHD filter, we employed two sequences from the CAVIAR dataset and used the optimal subpattern assignment (OSPA) method as an objective measure. The results show that the proposed robust PHD filter outperforms the traditional PHD filter.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Motion machine: A new framework for motion capture signal feature prototyping.\n \n \n \n\n\n \n Tilmanne, J.; and d'Alessandro , N.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2401-2405, Aug 2015. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362815,\n  author = {J. Tilmanne and N. d'Alessandro},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Motion machine: A new framework for motion capture signal feature prototyping},\n  year = {2015},\n  pages = {2401-2405},\n  abstract = {Motion capture (mocap) is rapidly evolving and embraced by a growing community in various research areas. However, a common problem is the high dimensionality of mocap data, and the difficulty to extract and understand meaningful features. In this paper, we propose a framework for the rapid prototyping of feature sets, MotionMachine, which helps to overcome the standard problem of mocap feature understanding by an interactive visualisation of both features and 3D scene. Our framework aims at being flexible to input data format, and to work both offline or in real-time. The design of the feature extraction modules in C++ is intended for modules to be used both for visualisation in the MotionMachine framework and for integration in end-user applications or communication with other existing softwares. We present two examples of use-cases in which the main features of this framework have successfully been tested.},\n  keywords = {feature extraction;image motion analysis;stereo image processing;C++;feature extraction modules;3D scene;mocap feature;MotionMachine;mocap data;motion capture signal feature prototyping;Feature extraction;Data visualization;Three-dimensional displays;Signal processing;Tracking;Data mining;Libraries;mocap;feature prototyping;library},\n  doi = {10.1109/EUSIPCO.2015.7362815},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n Motion capture (mocap) is rapidly evolving and embraced by a growing community in various research areas. However, a common problem is the high dimensionality of mocap data, and the difficulty to extract and understand meaningful features. In this paper, we propose a framework for the rapid prototyping of feature sets, MotionMachine, which helps to overcome the standard problem of mocap feature understanding by an interactive visualisation of both features and 3D scene. Our framework aims at being flexible to input data format, and to work both offline or in real-time. The design of the feature extraction modules in C++ is intended for modules to be used both for visualisation in the MotionMachine framework and for integration in end-user applications or communication with other existing softwares. We present two examples of use-cases in which the main features of this framework have successfully been tested.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Graph inference enhancement with clustering: Application to Gene Regulatory Network reconstruction.\n \n \n \n \n\n\n \n Pirayre, A.; Couprie, C.; Duval, L.; and Pesquet, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2406-2410, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"GraphPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362816,\n  author = {A. Pirayre and C. Couprie and L. Duval and J. Pesquet},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Graph inference enhancement with clustering: Application to Gene Regulatory Network reconstruction},\n  year = {2015},\n  pages = {2406-2410},\n  abstract = {The obtention of representative graphs is a key problem in an increasing number of fields, such as computer graphics, social sciences, and biology to name a few. Due to the large number of possible solutions from the available amount of data, building meaningful graphs is often challenging. Nonetheless, enforcing a priori on the graph structure, such as a modularity, may reduce the underdetermination in the underlying problem. In this work, we introduce such a methodology in the context of Gene Regulatory Network inference. These networks are useful to visualize gene interactions occurring in living organisms: some genes regulate the expression of others, structuring the network into modules where they play a central role. Our approach consists in jointly inferring the graph and performing a clustering using the graph-Laplacian-based random walker algorithm. We validate our approach on the DREAM4 dataset, showing significant improvement over state-of-the-art GRN inference methods.},\n  keywords = {genetics;graph theory;pattern clustering;graph inference enhancement;gene regulatory network reconstruction;graph structure;gene regulatory network inference;gene interaction visualization;living organism;clustering;graph-Laplacian-based random walker algorithm;DREAM4 dataset;state-of-the-art GRN inference method;Optimization;Signal processing;Europe;Context;Covariance matrices;Graphical models;Gene expression;genomic data analysis;graph construction;combinatorial Dirichlet problem;random walker},\n  doi = {10.1109/EUSIPCO.2015.7362816},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104715.pdf},\n}\n\n
\n
\n\n\n
\n The obtention of representative graphs is a key problem in an increasing number of fields, such as computer graphics, social sciences, and biology to name a few. Due to the large number of possible solutions from the available amount of data, building meaningful graphs is often challenging. Nonetheless, enforcing a priori on the graph structure, such as a modularity, may reduce the underdetermination in the underlying problem. In this work, we introduce such a methodology in the context of Gene Regulatory Network inference. These networks are useful to visualize gene interactions occurring in living organisms: some genes regulate the expression of others, structuring the network into modules where they play a central role. Our approach consists in jointly inferring the graph and performing a clustering using the graph-Laplacian-based random walker algorithm. We validate our approach on the DREAM4 dataset, showing significant improvement over state-of-the-art GRN inference methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Improving sensor noise analysis for CT-Scanner identification.\n \n \n \n \n\n\n \n Kharboutly, A.; Puech, W.; Subsol, G.; and Hoa, D.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2411-2415, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ImprovingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362817,\n  author = {A. Kharboutly and W. Puech and G. Subsol and D. Hoa},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Improving sensor noise analysis for CT-Scanner identification},\n  year = {2015},\n  pages = {2411-2415},\n  abstract = {CT-Scanner devices produce three-dimensional images of the internal structure of the body. In this paper, we propose a method that is based on the analysis of sensor noise to identify the CT-Scanner device. For each CT-scanner we built a reference pattern noise and a correlation map from its slices. Finally, we can correlate any test slice with the reference pattern noise of each device according to its correlation map. This correlation map gives a weighting for each pixel regarding its position in the reference pattern noise. We used a wavelet-based Wiener filter and an edge detection method to extract the noise from a slice. Experiments were applied on three CT-Scanners with 40 3D images, including 3600 slices, and we demonstrate that we are able to identify each CT-Scanner separately.},\n  keywords = {computerised tomography;edge detection;feature extraction;image denoising;medical image processing;wavelet transforms;Wiener filters;sensor noise analysis;CT-Scanner identification;three-dimensional images;body internal structure;CT-Scanner device;reference pattern noise;correlation map;wavelet-based Wiener filter;edge detection method;noise extraction;Correlation;Image edge detection;Noise reduction;Three-dimensional displays;Biomedical imaging;Wavelet transforms;Europe;Digital forensics;medical image foren-sics;authentication;device identification;noise pattern;sensor noise;denoise filtering;edge detection;wavelet transformation},\n  doi = {10.1109/EUSIPCO.2015.7362817},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103637.pdf},\n}\n\n
\n
\n\n\n
\n CT-Scanner devices produce three-dimensional images of the internal structure of the body. In this paper, we propose a method that is based on the analysis of sensor noise to identify the CT-Scanner device. For each CT-scanner we built a reference pattern noise and a correlation map from its slices. Finally, we can correlate any test slice with the reference pattern noise of each device according to its correlation map. This correlation map gives a weighting for each pixel regarding its position in the reference pattern noise. We used a wavelet-based Wiener filter and an edge detection method to extract the noise from a slice. Experiments were applied on three CT-Scanners with 40 3D images, including 3600 slices, and we demonstrate that we are able to identify each CT-Scanner separately.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Feasibility analysis and adaptive thresholding for mobile applications controlled by EEG signals.\n \n \n \n \n\n\n \n Lee, C.; Chin, J.; Yi, L.; Lee, B.; and McKeown, M. J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2416-2420, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"FeasibilityPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362818,\n  author = {C. Lee and J. Chin and L. Yi and B. Lee and M. J. McKeown},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Feasibility analysis and adaptive thresholding for mobile applications controlled by EEG signals},\n  year = {2015},\n  pages = {2416-2420},\n  abstract = {Given the availability of EEG technology and existing studies, this paper discusses the feasibility of development of mobile applications controlled by brainwaves using a low-cost, non-invasive, headband type of device that collects two-channel EEG signals at frontal lobe. We have performed temporal, spectral and spatial analysis on EEG signals collected during game-playing and found particular trends of EEG signals at certain brain (mental) states for all subjects, and some variations of the trends among different subjects. The analysis results motivate us to design an adaptive thresholding mechanism to find user-specific thresholds for a classifier that controls mobile applications.},\n  keywords = {electroencephalography;medical signal processing;mobile computing;mobile applications;brain waves;two-channel EEG signals;adaptive thresholding mechanism;user-specific thresholds;Games;Electroencephalography;Mobile applications;Market research;Electrodes;Lead;Training data;Brainwaves;Classification;Mobile application},\n  doi = {10.1109/EUSIPCO.2015.7362818},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104693.pdf},\n}\n\n
\n
\n\n\n
\n Given the availability of EEG technology and existing studies, this paper discusses the feasibility of development of mobile applications controlled by brainwaves using a low-cost, non-invasive, headband type of device that collects two-channel EEG signals at frontal lobe. We have performed temporal, spectral and spatial analysis on EEG signals collected during game-playing and found particular trends of EEG signals at certain brain (mental) states for all subjects, and some variations of the trends among different subjects. The analysis results motivate us to design an adaptive thresholding mechanism to find user-specific thresholds for a classifier that controls mobile applications.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Unitary transform-based template protection and its properties.\n \n \n \n \n\n\n \n Nakamura, I.; Tonomura, Y.; and Kiya, H.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2421-2425, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"UnitaryPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362819,\n  author = {I. Nakamura and Y. Tonomura and H. Kiya},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Unitary transform-based template protection and its properties},\n  year = {2015},\n  pages = {2421-2425},\n  abstract = {We focus on the feature transform approach as one methodology for biometric template protection, where the template consists of the features extracted from the biometric trait. This paper considers some properties of the unitary transform-based template protection in particular. It is known that the Euclidean distance between the templates protected by a unitary transform is the same as that between original (non-protected) ones as a property. In this paper, moreover, it is shown that it provides the same results in l2-norm minimization problems as those of original templates. This means that there is no degradation of recognition performance in authentication systems using l2-norm minimization. Therefore, the protected templates can be reissued multiple times without original templates. In addition, a DFT-based template protection scheme is proposed as an unitary transform-based one. The proposed scheme enables to efficiently generate protected templates by the FFT, in addition to the useful properties. It is also applied to face recognition experiments to evaluate the effectiveness.},\n  keywords = {biometrics (access control);discrete Fourier transforms;face recognition;fast Fourier transforms;feature extraction;minimisation;unitary transform-based template protection;feature transform approach;biometric template protection;features extracted;Euclidean distance;l2-norm minimization;DFT-based template protection scheme;FFT;face recognition;Transforms;Authentication;Minimization;Training;Feature extraction;Databases;Biometrics;Template protection;Unitary transform;l2-norm minimization},\n  doi = {10.1109/EUSIPCO.2015.7362819},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104419.pdf},\n}\n\n
\n
\n\n\n
\n We focus on the feature transform approach as one methodology for biometric template protection, where the template consists of the features extracted from the biometric trait. This paper considers some properties of the unitary transform-based template protection in particular. It is known that the Euclidean distance between the templates protected by a unitary transform is the same as that between original (non-protected) ones as a property. In this paper, moreover, it is shown that it provides the same results in l2-norm minimization problems as those of original templates. This means that there is no degradation of recognition performance in authentication systems using l2-norm minimization. Therefore, the protected templates can be reissued multiple times without original templates. In addition, a DFT-based template protection scheme is proposed as an unitary transform-based one. The proposed scheme enables to efficiently generate protected templates by the FFT, in addition to the useful properties. It is also applied to face recognition experiments to evaluate the effectiveness.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Combined classification and regression for simultaneous and proportional EMG control of wrist forces.\n \n \n \n \n\n\n \n Shahmoradi, M. H.; Akhaee, M. A.; and Mirian, M. S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2426-2430, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"CombinedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362820,\n  author = {M. H. Shahmoradi and M. A. Akhaee and M. S. Mirian},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Combined classification and regression for simultaneous and proportional EMG control of wrist forces},\n  year = {2015},\n  pages = {2426-2430},\n  abstract = {In this study, a novel method for estimating wrist forces from surface electromyogram (EMG) measured from the upper limb is proposed, which can be applied for unilateral transradial amputees. Three degrees of freedom (DoFs) of wrist including flexion-extension, abduction-adduction, and pronation-supination were used. We first classify feature vectors extracted from the EMG signals into three classes namely positive output, negative output and dead zone output, using a multiple kernel learning (MKL) algorithm. Then for each DoF and each class, a neural network was trained to associate EMG features to their corresponding force outputs. We will show that this classification prior to regression plays an important role in increasing the performance of force estimation. The accuracy of estimation ranges from 90% to 94% (R2 index) in 8 able-bodied subjects, which is proved to be significantly higher (p<;0.05) than that of the previous works.},\n  keywords = {electromyography;feature extraction;learning (artificial intelligence);medical signal processing;neural nets;prosthetics;regression analysis;signal classification;proportional EMG control;wrist force estimation;surface electromyogram;unilateral transradial amputees;flexion-extension;abduction-adduction;pronation-supination;feature vector extraction;EMG signals;positive output class;negative output class;dead zone output class;multiple kernel learning algorithm;MKL algorithm;neural network;Electromyography;Force;Kernel;Training;Estimation;Artificial neural networks;Feature extraction;Electromyography (EMG);multiple kernel learning (MKL);artificial neural network (ANN);prosthetic control},\n  doi = {10.1109/EUSIPCO.2015.7362820},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104923.pdf},\n}\n\n
\n
\n\n\n
\n In this study, a novel method for estimating wrist forces from surface electromyogram (EMG) measured from the upper limb is proposed, which can be applied for unilateral transradial amputees. Three degrees of freedom (DoFs) of wrist including flexion-extension, abduction-adduction, and pronation-supination were used. We first classify feature vectors extracted from the EMG signals into three classes namely positive output, negative output and dead zone output, using a multiple kernel learning (MKL) algorithm. Then for each DoF and each class, a neural network was trained to associate EMG features to their corresponding force outputs. We will show that this classification prior to regression plays an important role in increasing the performance of force estimation. The accuracy of estimation ranges from 90% to 94% (R2 index) in 8 able-bodied subjects, which is proved to be significantly higher (p<;0.05) than that of the previous works.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Plant electrical activity analysis for ozone pollution critical level detection.\n \n \n \n \n\n\n \n Dolfi, M.; Colzi, I.; Morosi, S.; Masi, E.; Mancuso, S.; Del Re, E.; Francini, F.; and Magliacani, R.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2431-2435, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"PlantPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362821,\n  author = {M. Dolfi and I. Colzi and S. Morosi and E. Masi and S. Mancuso and E. {Del Re} and F. Francini and R. Magliacani},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Plant electrical activity analysis for ozone pollution critical level detection},\n  year = {2015},\n  pages = {2431-2435},\n  abstract = {The electrical activity signals in plants can provide useful information to monitor environmental conditions, such as atmospheric pollution. Nonetheless the study of the relationship between environmental stimuli and electrical responses of plants is still a critical step in developing technologies that use plants as organic sensing devices. In this paper an automatic method of analysis of plant electrical signals for ozone critical levels detection is proposed, based on the fundamentals of correlation theory. In order to classify the morphology characteristics of plant response to ozone exposure we used a segmentation of time series measurements of the electrical activity of plants before, during and after the stimulation. Then, we extracted the significant deviations from the baseline trend to detect and identify the response to a known stimulus, in terms of correlation coefficient. As a result, the proposed detection algorithm represents a novel monitoring method for detecting critical levels of ozone concentrations.},\n  keywords = {air pollution;environmental science computing;signal classification;signal detection;plant electrical activity analysis;ozone pollution critical level detection;environmental conditions;atmospheric pollution;organic sensing devices;correlation theory;ozone concentrations;Gases;Correlation;Monitoring;Biomedical monitoring;Air pollution;Pollution measurement;Signal processing algorithms;Plant electrical signal;ozone pollution;spike detection;waveform correlation;data classification},\n  doi = {10.1109/EUSIPCO.2015.7362821},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105035.pdf},\n}\n\n
\n
\n\n\n
\n The electrical activity signals in plants can provide useful information to monitor environmental conditions, such as atmospheric pollution. Nonetheless the study of the relationship between environmental stimuli and electrical responses of plants is still a critical step in developing technologies that use plants as organic sensing devices. In this paper an automatic method of analysis of plant electrical signals for ozone critical levels detection is proposed, based on the fundamentals of correlation theory. In order to classify the morphology characteristics of plant response to ozone exposure we used a segmentation of time series measurements of the electrical activity of plants before, during and after the stimulation. Then, we extracted the significant deviations from the baseline trend to detect and identify the response to a known stimulus, in terms of correlation coefficient. As a result, the proposed detection algorithm represents a novel monitoring method for detecting critical levels of ozone concentrations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Distributed massive MIMO in cellular networks: Impact of imperfect hardware and number of oscillators.\n \n \n \n\n\n \n Björnson, E.; Matthaiou, M.; Pitarokoilis, A.; and Larsson, E. G.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2436-2440, Aug 2015. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362822,\n  author = {E. Björnson and M. Matthaiou and A. Pitarokoilis and E. G. Larsson},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Distributed massive MIMO in cellular networks: Impact of imperfect hardware and number of oscillators},\n  year = {2015},\n  pages = {2436-2440},\n  abstract = {Distributed massive multiple-input multiple-output (MIMO) combines the array gain of coherent MIMO processing with the proximity gains of distributed antenna setups. In this paper, we analyze how transceiver hardware impairments affect the downlink with maximum ratio transmission. We derive closed-form spectral efficiencies expressions and study their asymptotic behavior as the number of the antennas increases. We prove a scaling law on the hardware quality, which reveals that massive MIMO is resilient to additive distortions, while multiplicative phase noise is a limiting factor. It is also better to have separate oscillators at each antenna than one per BS.},\n  keywords = {antenna arrays;cellular radio;MIMO communication;phase noise;distributed massive multiple-input multiple-output;distributed massive MIMO;array gain;coherent MIMO processing;proximity gains;distributed antenna setups;transceiver hardware impairments;maximum ratio transmission;closed-form spectral efficiencies expressions;scaling law;additive distortions;multiplicative phase noise;cellular networks;MIMO;Antennas;Hardware;Distortion;Downlink;Phase noise;Arrays},\n  doi = {10.1109/EUSIPCO.2015.7362822},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n Distributed massive multiple-input multiple-output (MIMO) combines the array gain of coherent MIMO processing with the proximity gains of distributed antenna setups. In this paper, we analyze how transceiver hardware impairments affect the downlink with maximum ratio transmission. We derive closed-form spectral efficiencies expressions and study their asymptotic behavior as the number of the antennas increases. We prove a scaling law on the hardware quality, which reveals that massive MIMO is resilient to additive distortions, while multiplicative phase noise is a limiting factor. It is also better to have separate oscillators at each antenna than one per BS.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Base station cluster patterns for semi-static multi-cell cooperation in irregular network topologies.\n \n \n \n \n\n\n \n Park, J.; Lee, N.; and Heath, R. W.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2441-2445, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"BasePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362823,\n  author = {J. Park and N. Lee and R. W. Heath},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Base station cluster patterns for semi-static multi-cell cooperation in irregular network topologies},\n  year = {2015},\n  pages = {2441-2445},\n  abstract = {This paper proposes a clustering strategy for semi-static mul-ticell cooperation. Semi-static multicell cooperation exploits multiple predefined base station (BS) cluster patterns for improving cell-edge user throughput. The proposed clustering guarantees that every user communicates with their two closest BSs, so that users are protected from the dominant interferer. The key idea of the proposed clustering is to use the 2nd-order Voronoi region to form BS clusters. Each of the formed BS clusters is mapped into a particular cluster pattern by exploiting the edge-coloring in graph theory. Through simulations, the performance is compared to that of other conventional strategies. Our major finding is that the proposed clustering provides performance gains for cell-edge users compared to that of the conventional strategies.},\n  keywords = {cellular radio;computational geometry;graph theory;pattern clustering;telecommunication network topology;base station cluster pattern;semistatic multicell cooperation;irregular network topology;BS;cell-edge user throughput;2nd-order Voronoi region;edge-coloring;graph theory;Interference;Image color analysis;Time-frequency analysis;Europe;Signal processing;Base stations;Network topology},\n  doi = {10.1109/EUSIPCO.2015.7362823},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103173.pdf},\n}\n\n
\n
\n\n\n
\n This paper proposes a clustering strategy for semi-static mul-ticell cooperation. Semi-static multicell cooperation exploits multiple predefined base station (BS) cluster patterns for improving cell-edge user throughput. The proposed clustering guarantees that every user communicates with their two closest BSs, so that users are protected from the dominant interferer. The key idea of the proposed clustering is to use the 2nd-order Voronoi region to form BS clusters. Each of the formed BS clusters is mapped into a particular cluster pattern by exploiting the edge-coloring in graph theory. Through simulations, the performance is compared to that of other conventional strategies. Our major finding is that the proposed clustering provides performance gains for cell-edge users compared to that of the conventional strategies.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multi-dimensional continuous phase modulation in uplink of MIMO systems.\n \n \n \n \n\n\n \n Sedaghat, M. A.; and Müller, R.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2446-2450, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Multi-dimensionalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362824,\n  author = {M. A. Sedaghat and R. Müller},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Multi-dimensional continuous phase modulation in uplink of MIMO systems},\n  year = {2015},\n  pages = {2446-2450},\n  abstract = {Phase Modulation on the Hypersphere (PMH) is considered in which the instantaneous sum power is constant. It is shown that for an i.i.d. Gaussian channel, the capacity achieving input distribution is approximately uniform on a hypersphere when the number of receive antennas is much larger than the number of transmit antennas. Moreover, in the case that channel state information is not available at the transmitter, it is proven that the capacity achieving input distribution is exactly uniform on a hypersphere. Mutual information between input and output of PMH with discrete constellation for an i.i.d. Gaussian channel is evaluated numerically. Furthermore, a spherical spectral shaping method for PMH is proposed to have Continuous Phase Modulation on the Hypersphere (CPMH). In CPMH, the continuous time signal has a constant instantaneous sum power. It is shown that using a spherical low pass filter in spherical domain followed by a Cartesian filter results in very good spectral properties.},\n  keywords = {antenna arrays;continuous phase modulation;Gaussian channels;low-pass filters;MIMO communication;multidimensional signal processing;receiving antennas;transmitting antennas;multidimensional continuous phase modulation;MIMO systems;hypersphere;CPMH;receive antennas;transmit antennas;channel state information;mutual information;i.i.d. Gaussian channel;spherical spectral shaping method;continuous time signal;spherical low pass filter;spherical domain;Cartesian filter;spectral properties;MIMO;Pulse shaping methods;Radio transmitters;Receiving antennas;Mutual information;Peak to average power ratio;Phase modulation;multiple-input multiple-output (MIMO) systems;peak-to-average power ratio (PAPR);single-RF transmitters;continuous phase modulation (CPM);spherical filtering},\n  doi = {10.1109/EUSIPCO.2015.7362824},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103555.pdf},\n}\n\n
\n
\n\n\n
\n Phase Modulation on the Hypersphere (PMH) is considered in which the instantaneous sum power is constant. It is shown that for an i.i.d. Gaussian channel, the capacity achieving input distribution is approximately uniform on a hypersphere when the number of receive antennas is much larger than the number of transmit antennas. Moreover, in the case that channel state information is not available at the transmitter, it is proven that the capacity achieving input distribution is exactly uniform on a hypersphere. Mutual information between input and output of PMH with discrete constellation for an i.i.d. Gaussian channel is evaluated numerically. Furthermore, a spherical spectral shaping method for PMH is proposed to have Continuous Phase Modulation on the Hypersphere (CPMH). In CPMH, the continuous time signal has a constant instantaneous sum power. It is shown that using a spherical low pass filter in spherical domain followed by a Cartesian filter results in very good spectral properties.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n MIMO and massive MIMO — Analysis for a local area scenario.\n \n \n \n\n\n \n Dierks, S.; Zirwas, W.; Jäger, M.; Panzner, B.; and Kramer, G.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2451-2455, Aug 2015. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362825,\n  author = {S. Dierks and W. Zirwas and M. Jäger and B. Panzner and G. Kramer},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {MIMO and massive MIMO — Analysis for a local area scenario},\n  year = {2015},\n  pages = {2451-2455},\n  abstract = {The performance of centralized and distributed deployments of massive MIMO in an office building is analyzed both with and without cooperation. It is shown that using twice as many base station antennas as data streams provides most of the massive MIMO benefits. A simple transmission scheme achieves user fairness and operates close to a capacity upper bound. An example scheduling algorithm improves efficiency only for less than twice the number of base station antennas as data streams. The tradeoff between performance and cost for backhauling is evaluated by comparing cooperation of distributed base stations with a single central deployment.},\n  keywords = {5G mobile communication;MIMO communication;local area scenario;centralized massive MIMO deployment;distributed massive MIMO deployment;base station antennas;data streams;transmission scheme;example scheduling algorithm;user fairness;efficiency improvement;mobile communication standards;5G;MIMO;Antennas;Upper bound;Buildings;OFDM;Resource management;Europe;MIMO;massive MIMO;network MIMO;5G;two stripe building},\n  doi = {10.1109/EUSIPCO.2015.7362825},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n The performance of centralized and distributed deployments of massive MIMO in an office building is analyzed both with and without cooperation. It is shown that using twice as many base station antennas as data streams provides most of the massive MIMO benefits. A simple transmission scheme achieves user fairness and operates close to a capacity upper bound. An example scheduling algorithm improves efficiency only for less than twice the number of base station antennas as data streams. The tradeoff between performance and cost for backhauling is evaluated by comparing cooperation of distributed base stations with a single central deployment.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Performance comparison of data-sharing and compression strategies for cloud radio access networks.\n \n \n \n \n\n\n \n Patil, P.; Dai, B.; and Yu, W.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2456-2460, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"PerformancePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362826,\n  author = {P. Patil and B. Dai and W. Yu},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Performance comparison of data-sharing and compression strategies for cloud radio access networks},\n  year = {2015},\n  pages = {2456-2460},\n  abstract = {This paper provides a system-level performance comparison of two fundamentally different transmission strategies for the downlink of a cloud radio access network. The two strategies, namely the data-sharing strategy and the compression-based strategy, differ in the way the limited backhaul is utilized. While the data-sharing strategy uses the backhaul to carry raw user data, the compression strategy uses the backhaul to carry compressed beamformed signals. Although these strategies have been individually studied in the literature, a fair comparison of the two schemes under practical network settings is challenging because of the complexity in jointly optimizing user scheduling, beamforming, and power control for system-level performance evaluation, along with the need to optimize cooperation clusters for the data-sharing strategy and quantization noise levels for the compression strategy. This paper presents an optimization framework for both the data-sharing and compression strategies, while taking into account losses due to practical modulation in terms of gap to capacity and practical quantization in terms of gap to rate-distortion limit. The main conclusion of this paper is that the compression-based strategy, even with a simple fixed-rate uniform quantizer, outperforms the data-sharing strategy under medium to high capacity backhauls. However, the data-sharing strategy outperforms the compression strategy under low capacity backhauls primarily because of the large quantization loss at low backhaul capacity with compression.},\n  keywords = {optimisation;quantisation (signal);radio access networks;system-level performance;cloud radio access networks;data-sharing strategy;compression-based strategy;cooperation clusters;optimization framework;simple fixed-rate uniform quantizer;Optimization;Quantization (signal);Interference;Signal to noise ratio;Chlorine;Array signal processing;Noise level},\n  doi = {10.1109/EUSIPCO.2015.7362826},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105055.pdf},\n}\n\n
\n
\n\n\n
\n This paper provides a system-level performance comparison of two fundamentally different transmission strategies for the downlink of a cloud radio access network. The two strategies, namely the data-sharing strategy and the compression-based strategy, differ in the way the limited backhaul is utilized. While the data-sharing strategy uses the backhaul to carry raw user data, the compression strategy uses the backhaul to carry compressed beamformed signals. Although these strategies have been individually studied in the literature, a fair comparison of the two schemes under practical network settings is challenging because of the complexity in jointly optimizing user scheduling, beamforming, and power control for system-level performance evaluation, along with the need to optimize cooperation clusters for the data-sharing strategy and quantization noise levels for the compression strategy. This paper presents an optimization framework for both the data-sharing and compression strategies, while taking into account losses due to practical modulation in terms of gap to capacity and practical quantization in terms of gap to rate-distortion limit. The main conclusion of this paper is that the compression-based strategy, even with a simple fixed-rate uniform quantizer, outperforms the data-sharing strategy under medium to high capacity backhauls. However, the data-sharing strategy outperforms the compression strategy under low capacity backhauls primarily because of the large quantization loss at low backhaul capacity with compression.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Speech dereverberation by data-dependent beamforming with signal pre-whitening.\n \n \n \n \n\n\n \n Dietzen, T.; Huleihel, N.; Spriet, A.; Tirry, W.; Doclo, S.; Moonen, M.; and van Waterschoot , T.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2461-2465, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"SpeechPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362827,\n  author = {T. Dietzen and N. Huleihel and A. Spriet and W. Tirry and S. Doclo and M. Moonen and T. {van Waterschoot}},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Speech dereverberation by data-dependent beamforming with signal pre-whitening},\n  year = {2015},\n  pages = {2461-2465},\n  abstract = {Among different microphone array processing techniques, data-dependent beamforming has been proven to be effective in suppressing ambient noise. When applied for dereverberation, however, the adaptation process results in a biased estimate of the beamformer coefficients leading to strong distortions at the beamformer output. In this paper, we investigate the origin of this bias for the generalized sidelobe canceller. It is shown that an unbiased estimate of the beam-former coefficients and thus dereverberation can be achieved if the source signal is a white random signal. Based on these findings, a pre-whitening approach for speech signals is proposed and combined with a generalized sidelobe canceller for speech dereverberation. The concept is demonstrated for the case of stationary speech-shaped noise as a source signal.},\n  keywords = {array signal processing;reverberation;speech processing;beamformer coefficient unbiased estimation;generalized sidelobe canceller;beamformer output distortions;microphone array processing technique;signal prewhitening;data dependent beamforming;speech dereverberation;Microphones;Reverberation;Array signal processing;Speech;Europe;Correlation;Dereverberation;beamforming;generalized sidelobe canceller;estimation bias;whitening},\n  doi = {10.1109/EUSIPCO.2015.7362827},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102147.pdf},\n}\n\n
\n
\n\n\n
\n Among different microphone array processing techniques, data-dependent beamforming has been proven to be effective in suppressing ambient noise. When applied for dereverberation, however, the adaptation process results in a biased estimate of the beamformer coefficients leading to strong distortions at the beamformer output. In this paper, we investigate the origin of this bias for the generalized sidelobe canceller. It is shown that an unbiased estimate of the beam-former coefficients and thus dereverberation can be achieved if the source signal is a white random signal. Based on these findings, a pre-whitening approach for speech signals is proposed and combined with a generalized sidelobe canceller for speech dereverberation. The concept is demonstrated for the case of stationary speech-shaped noise as a source signal.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Data-driven statistical modelling of room impulse responses in the power domain.\n \n \n \n \n\n\n \n Doire, C. S. J.; Brookes, M.; Naylor, P. A.; and Jensen, S. H.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2466-2470, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Data-drivenPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362828,\n  author = {C. S. J. Doire and M. Brookes and P. A. Naylor and S. H. Jensen},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Data-driven statistical modelling of room impulse responses in the power domain},\n  year = {2015},\n  pages = {2466-2470},\n  abstract = {Having an accurate statistical model of room impulse responses with a minimum number of parameters is of crucial importance in applications such as dereverberation. In this paper, by taking into account the behaviour of the early reflections, we extend the widely-used statistical model proposed by Polack. The squared room impulse response is modelled in each frequency band as the realisation of a stochastic process weighted by the sum of two exponential decays. Room-independent values for the new parameters involved are obtained through analysis of several room impulse response databases, and validation of the model in the likelihood sense is performed.},\n  keywords = {reverberation chambers;speech intelligibility;stochastic processes;transient response;stochastic process;squared room impulse response;dereverberation;data-driven statistical modelling;Reverberation;Computational modeling;Databases;Mathematical model;Optimization;Training;Europe;Statistical model;Impulse Response;Early Decay},\n  doi = {10.1109/EUSIPCO.2015.7362828},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103515.pdf},\n}\n\n
\n
\n\n\n
\n Having an accurate statistical model of room impulse responses with a minimum number of parameters is of crucial importance in applications such as dereverberation. In this paper, by taking into account the behaviour of the early reflections, we extend the widely-used statistical model proposed by Polack. The squared room impulse response is modelled in each frequency band as the realisation of a stochastic process weighted by the sum of two exponential decays. Room-independent values for the new parameters involved are obtained through analysis of several room impulse response databases, and validation of the model in the likelihood sense is performed.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fast sound field reproduction in box-shaped rooms: Rigid walls case.\n \n \n \n \n\n\n \n Martinez, J.; Leus, G.; and Kleijn, W. B.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2471-2475, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"FastPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362829,\n  author = {J. Martinez and G. Leus and W. B. Kleijn},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Fast sound field reproduction in box-shaped rooms: Rigid walls case},\n  year = {2015},\n  pages = {2471-2475},\n  abstract = {In this paper, an approach to sound field reproduction in reverberant rooms is presented. We focus on box-shaped rooms with fully reflective (rigid) walls. We propose a scheme based on mode-matching in the spatio-temporal Fourier domain combined with a simple least-squares approach to derive the loudspeaker weights that render the target sound-ield. By taking advantage of the fast Fourier transform (FFT), the method leads to a fast way to compute the loudspeaker weights. We address the reconstruction of basic sound ields (room-modes) using linear loudspeaker array conigurations. This is important, as more complex sound ields can be decomposed into a set of weighted room-modes. Our simulations show that accurate and eficient reconstruction of room-modes in a reverberant environment with perfectly reflective walls is possible.},\n  keywords = {acoustic signal processing;array signal processing;fast Fourier transforms;least squares approximations;loudspeakers;reverberation;signal reconstruction;spatiotemporal phenomena;fast sound field reproduction;box-shaped rooms;reverberant rooms;rigid walls;fully reflective walls;mode-matching;spatiotemporal Fourier domain;least squares approach;loudspeaker weights;fast Fourier transform;FFT;sound field reconstruction;linear loudspeaker array configurations;room-modes reconstruction;sound field synthesis;Loudspeakers;Apertures;Lattices;Arrays;Approximation methods;Generators;Green's function methods;sound field synthesis;Fourier domain;mode-matching;box-shaped room;room-modes},\n  doi = {10.1109/EUSIPCO.2015.7362829},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103563.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, an approach to sound field reproduction in reverberant rooms is presented. We focus on box-shaped rooms with fully reflective (rigid) walls. We propose a scheme based on mode-matching in the spatio-temporal Fourier domain combined with a simple least-squares approach to derive the loudspeaker weights that render the target sound-ield. By taking advantage of the fast Fourier transform (FFT), the method leads to a fast way to compute the loudspeaker weights. We address the reconstruction of basic sound ields (room-modes) using linear loudspeaker array conigurations. This is important, as more complex sound ields can be decomposed into a set of weighted room-modes. Our simulations show that accurate and eficient reconstruction of room-modes in a reverberant environment with perfectly reflective walls is possible.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Noise robust blind system identification algorithms based on a Rayleigh quotient cost function.\n \n \n \n \n\n\n \n Hu, M.; Doclo, S.; Sharma, D.; Brookes, M.; and Naylor, P. A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2476-2480, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"NoisePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362830,\n  author = {M. Hu and S. Doclo and D. Sharma and M. Brookes and P. A. Naylor},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Noise robust blind system identification algorithms based on a Rayleigh quotient cost function},\n  year = {2015},\n  pages = {2476-2480},\n  abstract = {An important prerequisite for acoustic multi-channel equalization for speech dereverberation involves the identification of the acoustic channels between the source and the microphones. Blind System Identification (BSI) algorithms based on cross-relation error minimization are known to mis-converge in the presence of noise. Although algorithms have been proposed in the literature to improve robustness to noise, the estimated room impulse responses are usually constrained to have a flat magnitude spectrum. In this paper, noise robust algorithms based on a Rayleigh quotient cost function are proposed. Unlike the traditional algorithms, the estimated impulse responses are not always forced to have unit norm. Experimental results using simulated room impulse responses and several SNRs show that one of the proposed algorithms outperforms competing algorithms in terms of normalized projection misalignment.},\n  keywords = {blind equalisers;microphones;Rayleigh channels;transient response;Rayleigh quotient cost function;noise robust blind system identification;acoustic multichannel equalization;speech dereverberation;acoustic channels;microphones;cross-relation error minimization;room impulse response;flat magnitude spectrum;normalized projection misalignment;Signal processing algorithms;Cost function;Microphones;Signal to noise ratio;Acoustics;Additive noise;Convergence;Blind System Identification;Rayleigh quotient;Least Mean Squares (LMS);noise robustness;dere-verberation},\n  doi = {10.1109/EUSIPCO.2015.7362830},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104145.pdf},\n}\n\n
\n
\n\n\n
\n An important prerequisite for acoustic multi-channel equalization for speech dereverberation involves the identification of the acoustic channels between the source and the microphones. Blind System Identification (BSI) algorithms based on cross-relation error minimization are known to mis-converge in the presence of noise. Although algorithms have been proposed in the literature to improve robustness to noise, the estimated room impulse responses are usually constrained to have a flat magnitude spectrum. In this paper, noise robust algorithms based on a Rayleigh quotient cost function are proposed. Unlike the traditional algorithms, the estimated impulse responses are not always forced to have unit norm. Experimental results using simulated room impulse responses and several SNRs show that one of the proposed algorithms outperforms competing algorithms in terms of normalized projection misalignment.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Late reverberant spectral variance estimation using acoustic channel equalization.\n \n \n \n \n\n\n \n Cauchi, B.; Naylor, P. A.; Gerkmann, T.; Doclo, S.; and Goetze, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2481-2485, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"LatePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362831,\n  author = {B. Cauchi and P. A. Naylor and T. Gerkmann and S. Doclo and S. Goetze},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Late reverberant spectral variance estimation using acoustic channel equalization},\n  year = {2015},\n  pages = {2481-2485},\n  abstract = {In many single- and multi-channel speech dereverberation methods an estimate of the late reverberant spectral variance (LRSV) is required. Contrary to LRSV estimators based on room acoustical properties, such as reverberation time, or based on isotropic models of the reverberant sound field, in this paper we propose to use acoustic channel equalization with estimated room impulse responses (RIRs) for LRSV estimation. Unlike the typical application of acoustic channel equalization, where the objective is to estimate the anechoic or the early reverberant speech component, here the late reverberant part of the estimated RIR is set as the target response. The combination of the proposed LRSV estimator with a beamformer and a spectral gain aims at a tradeoff between the performance of acoustic channel equalization and the robustness of methods based on models of the reverberant sound field. The performance, evaluated for different levels of RIR estimation error, is compared to the results obtained using a maximum likelihood estimator (MLE) of the LRSV, based on an isotropic model of the reverberant sound field, and to a state-of-the-art acoustic channel equalization method. Experimental results for different acoustic scenarios show that for medium levels of RIR estimation errors the proposed method outperforms acoustic channel equalization as well as the maximum-likelihood LRSV estimator in terms of instrumental speech quality measures.},\n  keywords = {acoustic signal processing;anechoic chambers (acoustic);architectural acoustics;array signal processing;equalisers;maximum likelihood estimation;reverberation;speech processing;instrumental speech quality measures;MLE;maximum-likelihood LRSV estimator;acoustic channel equalization method;spectral gain;beamformer;reverberant speech component;anechoic;RIR estimation error;room impulse responses;reverberant sound field;isotropic models;reverberation time;room acoustical properties;multichannel speech dereverberation methods;single-speech dereverberation methods;LRSV estimation;late reverberant spectral variance estimation;Speech;Channel estimation;Microphones;Maximum likelihood estimation;Reverberation;Dereverberation;spectral suppression;blocking matrix;channel equalization},\n  doi = {10.1109/EUSIPCO.2015.7362831},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104699.pdf},\n}\n\n
\n
\n\n\n
\n In many single- and multi-channel speech dereverberation methods an estimate of the late reverberant spectral variance (LRSV) is required. Contrary to LRSV estimators based on room acoustical properties, such as reverberation time, or based on isotropic models of the reverberant sound field, in this paper we propose to use acoustic channel equalization with estimated room impulse responses (RIRs) for LRSV estimation. Unlike the typical application of acoustic channel equalization, where the objective is to estimate the anechoic or the early reverberant speech component, here the late reverberant part of the estimated RIR is set as the target response. The combination of the proposed LRSV estimator with a beamformer and a spectral gain aims at a tradeoff between the performance of acoustic channel equalization and the robustness of methods based on models of the reverberant sound field. The performance, evaluated for different levels of RIR estimation error, is compared to the results obtained using a maximum likelihood estimator (MLE) of the LRSV, based on an isotropic model of the reverberant sound field, and to a state-of-the-art acoustic channel equalization method. Experimental results for different acoustic scenarios show that for medium levels of RIR estimation errors the proposed method outperforms acoustic channel equalization as well as the maximum-likelihood LRSV estimator in terms of instrumental speech quality measures.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Gradient-based approaches to learn tensor products.\n \n \n \n \n\n\n \n Rupp, M.; and Schwarz, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2486-2490, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Gradient-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362832,\n  author = {M. Rupp and S. Schwarz},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Gradient-based approaches to learn tensor products},\n  year = {2015},\n  pages = {2486-2490},\n  abstract = {Tensor algebra has become of high interest recently due to its application in the field of so-called Big Data. For signal processing a first important step is to compress a vast amount of data into a small enough set so that particular issues of interest can be investigated with todays computer methods. We propose various gradient-based methods to decompose tensors of matrix products as they appear in structured multiple-input multiple-output systems. While some methods work directly on the observed tensor, others use input-output observations to conclude to the desired decomposition. Although the algorithms are nonlinear in nature, they are being treated as linear estimators; numerical examples validate our results.},\n  keywords = {Big Data;data compression;gradient methods;matrix decomposition;signal processing;tensors;gradient-based approach;tensor algebra;Big data;signal processing;data compression;tensor matrix product decomposition;multiple input multiple output system;linear estimator;Tensile stress;Matrix decomposition;MIMO;Context;Least squares approximations;Signal processing;Signal processing algorithms;Tensors;Decomposition;BigData},\n  doi = {10.1109/EUSIPCO.2015.7362832},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570087301.pdf},\n}\n\n
\n
\n\n\n
\n Tensor algebra has become of high interest recently due to its application in the field of so-called Big Data. For signal processing a first important step is to compress a vast amount of data into a small enough set so that particular issues of interest can be investigated with todays computer methods. We propose various gradient-based methods to decompose tensors of matrix products as they appear in structured multiple-input multiple-output systems. While some methods work directly on the observed tensor, others use input-output observations to conclude to the desired decomposition. Although the algorithms are nonlinear in nature, they are being treated as linear estimators; numerical examples validate our results.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Class-specific nonlinear subspace learning based on optimized class representation.\n \n \n \n \n\n\n \n Iosifidis, A.; Tefas, A.; and Pitas, I.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2491-2495, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Class-specificPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362833,\n  author = {A. Iosifidis and A. Tefas and I. Pitas},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Class-specific nonlinear subspace learning based on optimized class representation},\n  year = {2015},\n  pages = {2491-2495},\n  abstract = {In this paper, a new nonlinear subspace learning technique for class-specific data representation based on an optimized class representation is described. An iterative optimization scheme is formulated where both the optimal nonlinear data projection and the optimal class representation are determined at each optimization step. This approach is tested on human face and action recognition problems, where its performance is compared with that of the standard class-specific subspace learning approach, as well as other nonlinear discriminant subspace learning techniques. Experimental results denote the effectiveness of this new approach, since it consistently outperforms the standard one and outperforms other nonlinear discriminant subspace learning techniques in most cases.},\n  keywords = {data structures;iterative methods;learning (artificial intelligence);optimisation;nonlinear discriminant subspace learning techniques;standard class-specific subspace learning approach;action recognition problems;human face recognition problems;optimal nonlinear data projection;iterative optimization scheme;optimized class representation;class-specific data representation;Optimization;Kernel;Face recognition;Standards;Training data;Europe;Signal processing;Class-specific discriminant learning;Nonlinear subspace learning;Action recognition;Face recognition},\n  doi = {10.1109/EUSIPCO.2015.7362833},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570087391.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, a new nonlinear subspace learning technique for class-specific data representation based on an optimized class representation is described. An iterative optimization scheme is formulated where both the optimal nonlinear data projection and the optimal class representation are determined at each optimization step. This approach is tested on human face and action recognition problems, where its performance is compared with that of the standard class-specific subspace learning approach, as well as other nonlinear discriminant subspace learning techniques. Experimental results denote the effectiveness of this new approach, since it consistently outperforms the standard one and outperforms other nonlinear discriminant subspace learning techniques in most cases.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Does diversity improve deep learning?.\n \n \n \n \n\n\n \n Alvear-Sandoval, R. F.; and Figueiras-Vidal, A. R.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2496-2500, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"DoesPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362834,\n  author = {R. F. Alvear-Sandoval and A. R. Figueiras-Vidal},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Does diversity improve deep learning?},\n  year = {2015},\n  pages = {2496-2500},\n  abstract = {In this work, we carry out a first exploration of the possibility of increasing the performance of Deep Neural Networks (DNNs) by applying diversity techniques to them. Since DNNs are usually very strong, weakening them can be important for this purpose. This paper includes experimental evidence of the effectiveness of binarizing multi-class problems to make beneficial the application of bagging to Denoising Auto-Encoding-Based DNNs for solving the classical MNIST problem. Many research opportunities appear following the diversification idea: We mention some of the most relevant lines at the end of this contribution.},\n  keywords = {database management systems;learning (artificial intelligence);neural nets;deep learning;deep neural network;diversity technique;multiclass problem binarization;bagging application;auto-encoding-based DNN denoising application;MNIST problem;Training;Bagging;Error analysis;Standards;Europe;Signal processing;Neural networks;Auto-encoding;classification;depth;diversity},\n  doi = {10.1109/EUSIPCO.2015.7362834},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096013.pdf},\n}\n\n
\n
\n\n\n
\n In this work, we carry out a first exploration of the possibility of increasing the performance of Deep Neural Networks (DNNs) by applying diversity techniques to them. Since DNNs are usually very strong, weakening them can be important for this purpose. This paper includes experimental evidence of the effectiveness of binarizing multi-class problems to make beneficial the application of bagging to Denoising Auto-Encoding-Based DNNs for solving the classical MNIST problem. Many research opportunities appear following the diversification idea: We mention some of the most relevant lines at the end of this contribution.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The class of generalized hampel filters.\n \n \n \n \n\n\n \n Pearson, R. K.; Neuvo, Y.; Astola, J.; and Gabbouj, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2501-2505, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362835,\n  author = {R. K. Pearson and Y. Neuvo and J. Astola and M. Gabbouj},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {The class of generalized hampel filters},\n  year = {2015},\n  pages = {2501-2505},\n  abstract = {The standard median filter has only one tuning parameter - the width of the moving window on which it is based - and this has led to the development of a number of extremely useful extensions, including the recursive median filter, weighted median filters, and recursive weighted median filters. The Hampel filter is a member of the class of decision filters that, as we note here, may be viewed as another generalization of the median filter. This paper exploits this relationship, defining and briefly exploring the class of generalized Hampel filters, obtained by applying the median filter extensions listed above.},\n  keywords = {median filters;recursive filters;generalized Hampel filter;recursive weighted median filter;decision filter;Standards;Europe;Distortion;Filtering theory;Tuning;White noise},\n  doi = {10.1109/EUSIPCO.2015.7362835},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096433.pdf},\n}\n\n
\n
\n\n\n
\n The standard median filter has only one tuning parameter - the width of the moving window on which it is based - and this has led to the development of a number of extremely useful extensions, including the recursive median filter, weighted median filters, and recursive weighted median filters. The Hampel filter is a member of the class of decision filters that, as we note here, may be viewed as another generalization of the median filter. This paper exploits this relationship, defining and briefly exploring the class of generalized Hampel filters, obtained by applying the median filter extensions listed above.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Parralelization of non-linear non-Gaussian Bayesian state estimators (Particle filters).\n \n \n \n\n\n \n Jarrah, A.; Jamali, M. M.; Hosseini, S. S. S.; Astola, J.; and Gabbouj, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2506-2510, Aug 2015. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362836,\n  author = {A. Jarrah and M. M. Jamali and S. S. S. Hosseini and J. Astola and M. Gabbouj},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Parralelization of non-linear non-Gaussian Bayesian state estimators (Particle filters)},\n  year = {2015},\n  pages = {2506-2510},\n  abstract = {Particle filter has been proven to be a very effective method for identifying targets in non-linear and non-Gaussian environment. However, particle filter is computationally intensive and may not achieve the real time requirements. So, it's desirable to implement it on parallel platforms by exploiting parallel and pipelining architecture to achieve its real time requirements. In this work, an efficient implementation of particle filter in both FPGA and GPU is proposed. Particle filter has also been implemented using MATLAB Parallel Computing Toolbox (PCT). Experimental results show that FPGA and GPU architectures can significantly outperform an equivalent sequential implementation. The results also show that FPGA implementation provides better performance than the GPU implementation. The achieved execution time on dual core and quad core Dell PC using PCT were higher than FPGAs and GPUs as was expected.},\n  keywords = {field programmable gate arrays;graphics processing units;parallel architectures;particle filtering (numerical methods);quad core Dell PC;dual core Dell PC;MATLAB PCT;MATLAB Parallel Computing Toolbox;GPU;FPGA;pipelining architecture;parallel architecture;parallel platforms;particle filter;Particle filters;Field programmable gate arrays;Graphics processing units;Parallel processing;Instruction sets;Particle measurements;MATLAB;Field Programmable Gate Array (FPGA);Graphic Processing Unit (GPU);Parallel Architecture;Particle Filter;MATLAB Parallel Computing Toolbox (PCT)},\n  doi = {10.1109/EUSIPCO.2015.7362836},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n Particle filter has been proven to be a very effective method for identifying targets in non-linear and non-Gaussian environment. However, particle filter is computationally intensive and may not achieve the real time requirements. So, it's desirable to implement it on parallel platforms by exploiting parallel and pipelining architecture to achieve its real time requirements. In this work, an efficient implementation of particle filter in both FPGA and GPU is proposed. Particle filter has also been implemented using MATLAB Parallel Computing Toolbox (PCT). Experimental results show that FPGA and GPU architectures can significantly outperform an equivalent sequential implementation. The results also show that FPGA implementation provides better performance than the GPU implementation. The achieved execution time on dual core and quad core Dell PC using PCT were higher than FPGAs and GPUs as was expected.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Online sketching for big data subspace learning.\n \n \n \n \n\n\n \n Mardani, M.; and Giannakis, G. B.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2511-2515, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"OnlinePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362837,\n  author = {M. Mardani and G. B. Giannakis},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Online sketching for big data subspace learning},\n  year = {2015},\n  pages = {2511-2515},\n  abstract = {Sketching (a.k.a. subsampling) high-dimensional data is a crucial task to facilitate data acquisition process e.g., in magnetic resonance imaging, and to render affordable `Big Data' analytics. Multidimensional nature and the need for realtime processing of data however pose major obstacles. To cope with these challenges, the present paper brings forth a novel real-time sketching scheme that exploits the correlations across data stream to learn a latent subspace based upon tensor PARAFAC decomposition `on the fly.' Leveraging the online subspace updates, we introduce a notion of importance score, which is subsequently adapted into a randomization scheme to predict a minimal subset of important features to acquire in the next time instant. Preliminary tests with synthetic data corroborate the effectiveness of the novel scheme relative to uniform sampling.},\n  keywords = {Big Data;data acquisition;learning (artificial intelligence);tensors;Big Data subspace learning;data acquisition process;Big Data analytics;data processing;realtime sketching scheme;PARAFAC decomposition;latent subspace;online subspace updates;importance score;randomization scheme;Tensile stress;Real-time systems;Magnetic resonance imaging;Matrix decomposition;Big data;Europe;Signal processing;Tensor;randomization;streaming data;subspace learning},\n  doi = {10.1109/EUSIPCO.2015.7362837},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096923.pdf},\n}\n\n
\n
\n\n\n
\n Sketching (a.k.a. subsampling) high-dimensional data is a crucial task to facilitate data acquisition process e.g., in magnetic resonance imaging, and to render affordable `Big Data' analytics. Multidimensional nature and the need for realtime processing of data however pose major obstacles. To cope with these challenges, the present paper brings forth a novel real-time sketching scheme that exploits the correlations across data stream to learn a latent subspace based upon tensor PARAFAC decomposition `on the fly.' Leveraging the online subspace updates, we introduce a notion of importance score, which is subsequently adapted into a randomization scheme to predict a minimal subset of important features to acquire in the next time instant. Preliminary tests with synthetic data corroborate the effectiveness of the novel scheme relative to uniform sampling.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n FAμST: Speeding up linear transforms for tractable inverse problems.\n \n \n \n\n\n \n Le Magoarou, L.; Gribonval, R.; and Gramfort, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2516-2520, Aug 2015. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362838,\n  author = {L. {Le Magoarou} and R. Gribonval and A. Gramfort},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {FAμST: Speeding up linear transforms for tractable inverse problems},\n  year = {2015},\n  pages = {2516-2520},\n  abstract = {In this paper, we propose a technique to factorize any matrix into multiple sparse factors. The resulting factorization, called Flexible Approximate MUlti-layer Sparse Transform (FAμST), yields reduced multiplication costs by the matrix and its adjoint. Such a desirable property can be used to speed up iterative algorithms commonly used to solve high dimensional linear inverse problems. The proposed approach is first motivated, introduced and related to prior art. The compromise between computational efficiency and data fidelity is then investigated, and finally the relevance of the approach is demonstrated on a problem of brain source localization using simulated magnetoencephalography (MEG) signals.},\n  keywords = {bioelectric potentials;compressed sensing;inverse problems;inverse transforms;iterative methods;magnetoencephalography;sparse matrices;flexible approximate multilayer sparse transform;FAμST;linear transforms;inverse problems;multiple sparse factors;iterative algorithms;brain source localization;magnetoencephalography signals;MEG signals;Sparse matrices;Complexity theory;Inverse problems;Signal processing algorithms;Transforms;Approximation algorithms;Europe;Inverse problems;Deconvolution;Matrix factorization;Fast algorithms;Brain source localization},\n  doi = {10.1109/EUSIPCO.2015.7362838},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a technique to factorize any matrix into multiple sparse factors. The resulting factorization, called Flexible Approximate MUlti-layer Sparse Transform (FAμST), yields reduced multiplication costs by the matrix and its adjoint. Such a desirable property can be used to speed up iterative algorithms commonly used to solve high dimensional linear inverse problems. The proposed approach is first motivated, introduced and related to prior art. The compromise between computational efficiency and data fidelity is then investigated, and finally the relevance of the approach is demonstrated on a problem of brain source localization using simulated magnetoencephalography (MEG) signals.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Efficient algorithms for ‘universally’ constrained matrix and tensor factorization.\n \n \n \n\n\n \n Huang, K.; Sidiropoulos, N. D.; and Liavas, A. P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2521-2525, Aug 2015. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362839,\n  author = {K. Huang and N. D. Sidiropoulos and A. P. Liavas},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Efficient algorithms for ‘universally’ constrained matrix and tensor factorization},\n  year = {2015},\n  pages = {2521-2525},\n  abstract = {We propose a general algorithmic framework for constrained matrix and tensor factorization, which is widely used in unsupervised learning. The new framework is a hybrid between alternating optimization (AO) and the alternating direction method of multipliers (ADMM): each matrix factor is updated in turn, using ADMM. This combination can naturally accommodate a great variety of constraints on the factor matrices, hence the term `universal'. Computation caching and warm start strategies are used to ensure that each update is evaluated efficiently, while the outer AO framework guarantees that the algorithm converges monotonically. Simulations on synthetic data show significantly improved performance relative to state-of-the-art algorithms.},\n  keywords = {matrix decomposition;signal processing;tensors;unsupervised learning;matrix factorization;tensor factorization;algorithmic framework;unsupervised learning;alternating optimization;multipliers alternating direction method;computation caching;warm start strategies;Signal processing algorithms;Yttrium;Tensile stress;Optimization;Convergence;Complexity theory;Matrix decomposition},\n  doi = {10.1109/EUSIPCO.2015.7362839},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n We propose a general algorithmic framework for constrained matrix and tensor factorization, which is widely used in unsupervised learning. The new framework is a hybrid between alternating optimization (AO) and the alternating direction method of multipliers (ADMM): each matrix factor is updated in turn, using ADMM. This combination can naturally accommodate a great variety of constraints on the factor matrices, hence the term `universal'. Computation caching and warm start strategies are used to ensure that each update is evaluated efficiently, while the outer AO framework guarantees that the algorithm converges monotonically. Simulations on synthetic data show significantly improved performance relative to state-of-the-art algorithms.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Online Bayesian low-rank subspace learning from partial observations.\n \n \n \n \n\n\n \n Giampouras, P. V.; Rontogiannis, A. A.; Themelis, K. E.; and Koutroumbas, K. D.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2526-2530, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"OnlinePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362840,\n  author = {P. V. Giampouras and A. A. Rontogiannis and K. E. Themelis and K. D. Koutroumbas},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Online Bayesian low-rank subspace learning from partial observations},\n  year = {2015},\n  pages = {2526-2530},\n  abstract = {Learning the underlying low-dimensional subspace from streaming incomplete high-dimensional observations data has attracted considerable attention lately. In this paper, we present a new computationally efficient Bayesian scheme for online low-rank subspace learning and matrix completion. The proposed scheme builds upon a properly defined hierarchical Bayesian model that explicitly imposes low rank to the latent subspace by assigning sparsity promoting Student-t priors to the columns of the subspace matrix. The new algorithm is fully automated and as corroborated by numerical simulations, provides higher estimation accuracy and a better estimate of the true subspace rank compared to state of the art methods.},\n  keywords = {computational complexity;learning (artificial intelligence);signal processing;variational techniques;subspace matrix;hierarchical Bayesian model;partial observations;online Bayesian low-rank subspace learning;Bayes methods;Yttrium;Estimation;Europe;Approximation methods;Signal processing;Computational modeling;Online low-rank subspace learning;matrix completion;variational Bayes;big data},\n  doi = {10.1109/EUSIPCO.2015.7362840},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104381.pdf},\n}\n\n
\n
\n\n\n
\n Learning the underlying low-dimensional subspace from streaming incomplete high-dimensional observations data has attracted considerable attention lately. In this paper, we present a new computationally efficient Bayesian scheme for online low-rank subspace learning and matrix completion. The proposed scheme builds upon a properly defined hierarchical Bayesian model that explicitly imposes low rank to the latent subspace by assigning sparsity promoting Student-t priors to the columns of the subspace matrix. The new algorithm is fully automated and as corroborated by numerical simulations, provides higher estimation accuracy and a better estimate of the true subspace rank compared to state of the art methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributed robust subspace tracking.\n \n \n \n \n\n\n \n Kopsinis, Y.; Chouvardas, S.; and Theodoridis, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2531-2535, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"DistributedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362841,\n  author = {Y. Kopsinis and S. Chouvardas and S. Theodoridis},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Distributed robust subspace tracking},\n  year = {2015},\n  pages = {2531-2535},\n  abstract = {In this paper, a distributed, set-theoretic based subspace tracking scheme is presented. In particular, each one of the agents in the network has access to a subset of data, which are not allowed to be shared among them. Moreover, the data vectors lie on a low-rank linear subspace, which is unknown and it might also be time-varying. The agents aim at estimating and tracking the unknown subspace using solely their own data and the tentative subspace estimates of their neighbours. Moreover, some of the the data might be corrupted with outlier noise. Method is evaluated in a synthetic simulation example, where the unknown subspace exhibits abrupt changes.},\n  keywords = {distributed tracking;estimation theory;set theory;signal denoising;vectors;distributed robust subspace tracking;distributed set-theoretic based subspace tracking scheme;data vector;low-rank linear subspace;unknown subspace;tentative subspace estimate;outlier noise;synthetic simulation example;Signal processing algorithms;Robustness;Europe;Signal processing;Electronic mail;Data analysis;Principal component analysis;Distributed online learning;Robust Sub-space Tracking;APSM},\n  doi = {10.1109/EUSIPCO.2015.7362841},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105397.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, a distributed, set-theoretic based subspace tracking scheme is presented. In particular, each one of the agents in the network has access to a subset of data, which are not allowed to be shared among them. Moreover, the data vectors lie on a low-rank linear subspace, which is unknown and it might also be time-varying. The agents aim at estimating and tracking the unknown subspace using solely their own data and the tentative subspace estimates of their neighbours. Moreover, some of the the data might be corrupted with outlier noise. Method is evaluated in a synthetic simulation example, where the unknown subspace exhibits abrupt changes.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Continuous measurement of impulse responses on a circle using a uniformly moving microphone.\n \n \n \n \n\n\n \n Hahn, N.; and Spors, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2536-2540, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ContinuousPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362842,\n  author = {N. Hahn and S. Spors},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Continuous measurement of impulse responses on a circle using a uniformly moving microphone},\n  year = {2015},\n  pages = {2536-2540},\n  abstract = {We propose a continuous measurement technique which can be used to capture a large number of impulse responses within short time. The response of an acoustic system is continuously captured by a moving microphone, and the instantaneous impulse responses are computed by post-processing. The time-variance due to the movement of the microphone is compensated by employing a recently proposed system identification method. In this method, each sample of the captured signal is interpreted as the orthogonal expansion coefficient of the instantaneous impulse response. The impulse responses are computed from the interpolated orthogonal coefficients. This method is applied to the measurement on a circle. Based on the modal bandwidth of the spatio-temporal impulse response, the relation among the length of the impulse response, the angular speed of the microphone, and the effective number of measurements is revealed. The presented measurement technique was used to measure a large number of room impulse responses, and the results were compared with a conventional sequential measurement technique.},\n  keywords = {acoustic signal processing;acoustic variables measurement;transient response;impulse response continuous measurement;uniformly moving microphone;acoustic system response;time variance;interpolated orthogonal coefficient;room impulse response;Microphones;Acoustic measurements;Measurement techniques;Wavelength measurement;Temperature measurement;Bandwidth;Arrays;Continuous measurement;circular array;sound field analysis;time-variant system identification},\n  doi = {10.1109/EUSIPCO.2015.7362842},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103671.pdf},\n}\n\n
\n
\n\n\n
\n We propose a continuous measurement technique which can be used to capture a large number of impulse responses within short time. The response of an acoustic system is continuously captured by a moving microphone, and the instantaneous impulse responses are computed by post-processing. The time-variance due to the movement of the microphone is compensated by employing a recently proposed system identification method. In this method, each sample of the captured signal is interpreted as the orthogonal expansion coefficient of the instantaneous impulse response. The impulse responses are computed from the interpolated orthogonal coefficients. This method is applied to the measurement on a circle. Based on the modal bandwidth of the spatio-temporal impulse response, the relation among the length of the impulse response, the angular speed of the microphone, and the effective number of measurements is revealed. The presented measurement technique was used to measure a large number of room impulse responses, and the results were compared with a conventional sequential measurement technique.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Room impulse response estimation using perfect sequences for Legendre nonlinear filters.\n \n \n \n \n\n\n \n Carini, A.; Cecchi, S.; and Romoli, L.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2541-2545, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"RoomPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362843,\n  author = {A. Carini and S. Cecchi and L. Romoli},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Room impulse response estimation using perfect sequences for Legendre nonlinear filters},\n  year = {2015},\n  pages = {2541-2545},\n  abstract = {The paper proposes a novel method for room impulse response estimation that is robust towards nonlinearities affecting the power amplifier or the loudspeaker of the measurement system. The method is based on measurements of the first order kernel of the Legendre nonlinear filter modeling the acoustic path. In the proposed approach, the first order kernel is efficiently estimated with the cross-correlation method using perfect periodic sequences for Legendre filters. Perfect sequences with period suitable for room impulse response identification are also developed within the paper. Simulation results in a realistic scenario illustrate the effectiveness and robustness towards nonlinearities of the proposed approach.},\n  keywords = {audio signal processing;correlation methods;loudspeakers;nonlinear filters;power amplifiers;room impulse response estimation;perfect periodic sequences;Legendre nonlinear filters;power amplifier;loudspeaker;measurement system;acoustic path modeling;cross-correlation method;room impulse response identification;audio processing;Kernel;Estimation;Acoustic measurements;Acoustics;Loudspeakers;Robustness;Power measurement;Room impulse response;Legendre nonlinear filters;perfect periodic sequences;cross-correlation method},\n  doi = {10.1109/EUSIPCO.2015.7362843},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104707.pdf},\n}\n\n
\n
\n\n\n
\n The paper proposes a novel method for room impulse response estimation that is robust towards nonlinearities affecting the power amplifier or the loudspeaker of the measurement system. The method is based on measurements of the first order kernel of the Legendre nonlinear filter modeling the acoustic path. In the proposed approach, the first order kernel is efficiently estimated with the cross-correlation method using perfect periodic sequences for Legendre filters. Perfect sequences with period suitable for room impulse response identification are also developed within the paper. Simulation results in a realistic scenario illustrate the effectiveness and robustness towards nonlinearities of the proposed approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Audio phrases for audio event recognition.\n \n \n \n \n\n\n \n Phan, H.; Hertel, L.; Maass, M.; Mazur, R.; and Mertins, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2546-2550, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AudioPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362844,\n  author = {H. Phan and L. Hertel and M. Maass and R. Mazur and A. Mertins},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Audio phrases for audio event recognition},\n  year = {2015},\n  pages = {2546-2550},\n  abstract = {The bag-of-audio-words approach has been widely used for audio event recognition. In these models, a local feature of an audio signal is matched to a code word according to a learned codebook. The signal is then represented by frequencies of the matched code words on the whole signal. We present in this paper an improved model based on the idea of audio phrases which are sequences of multiple audio words. By using audio phrases, we are able to capture the relationship between the isolated audio words and produce more semantic descriptors. Furthermore, we also propose an efficient approach to learn a compact codebook in a discriminative manner to deal with high-dimensionality of bag-of-audio-phrases representations. Experiments on the Freiburg-106 dataset show that the recognition performance with our proposed bag-of-audio-phrases descriptor outperforms not only the baselines but also the state-of-the-art results on the dataset.},\n  keywords = {audio signal processing;bag-of-audio-phrase descriptor;Freiburg-106 dataset;bag-of-audio-phrase representation;compact codebook;semantic descriptors;signal representation;learned codebook;audio signal local feature matching;bag-of-audio-word approach;audio event recognition;Signal processing;Kernel;Europe;Training data;Clustering methods;Histograms;Training;audio phrase;bag-of-words;audio event;recognition;human activity},\n  doi = {10.1109/EUSIPCO.2015.7362844},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103447.pdf},\n}\n\n
\n
\n\n\n
\n The bag-of-audio-words approach has been widely used for audio event recognition. In these models, a local feature of an audio signal is matched to a code word according to a learned codebook. The signal is then represented by frequencies of the matched code words on the whole signal. We present in this paper an improved model based on the idea of audio phrases which are sequences of multiple audio words. By using audio phrases, we are able to capture the relationship between the isolated audio words and produce more semantic descriptors. Furthermore, we also propose an efficient approach to learn a compact codebook in a discriminative manner to deal with high-dimensionality of bag-of-audio-phrases representations. Experiments on the Freiburg-106 dataset show that the recognition performance with our proposed bag-of-audio-phrases descriptor outperforms not only the baselines but also the state-of-the-art results on the dataset.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multi-label vs. combined single-label sound event detection with deep neural networks.\n \n \n \n \n\n\n \n Cakir, E.; Heittola, T.; Huttunen, H.; and Virtanen, T.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2551-2555, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Multi-labelPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362845,\n  author = {E. Cakir and T. Heittola and H. Huttunen and T. Virtanen},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Multi-label vs. combined single-label sound event detection with deep neural networks},\n  year = {2015},\n  pages = {2551-2555},\n  abstract = {In real-life audio scenes, many sound events from different sources are simultaneously active, which makes the automatic sound event detection challenging. In this paper, we compare two different deep learning methods for the detection of environmental sound events: combined single-label classification and multi-label classification. We investigate the accuracy of both methods on the audio with different levels of polyphony. Multi-label classification achieves an overall 62.8% accuracy, whereas combined single-label classification achieves a very close 61.9% accuracy. The latter approach offers more flexibility on real-world applications by gathering the relevant group of sound events in a single classifier with various combinations.},\n  keywords = {audio signal processing;learning (artificial intelligence);neural nets;signal classification;signal detection;multilabel sound event detection;combined single-label sound event detection;deep neural network;real-life audio scenes;deep learning method;combined single-label classification;multilabel classification;Training;Feature extraction;Signal processing;Europe;Event detection;Databases;Cost function;Sound event detection;deep neural networks;multi-label classification;binary classification;audio analysis},\n  doi = {10.1109/EUSIPCO.2015.7362845},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103605.pdf},\n}\n\n
\n
\n\n\n
\n In real-life audio scenes, many sound events from different sources are simultaneously active, which makes the automatic sound event detection challenging. In this paper, we compare two different deep learning methods for the detection of environmental sound events: combined single-label classification and multi-label classification. We investigate the accuracy of both methods on the audio with different levels of polyphony. Multi-label classification achieves an overall 62.8% accuracy, whereas combined single-label classification achieves a very close 61.9% accuracy. The latter approach offers more flexibility on real-world applications by gathering the relevant group of sound events in a single classifier with various combinations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Optimization of amplitude modulation features for low-resource acoustic scene classification.\n \n \n \n \n\n\n \n Agcaer, S.; Schlesinger, A.; Hoffmann, F.; and Martin, R.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2556-2560, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"OptimizationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362846,\n  author = {S. Agcaer and A. Schlesinger and F. Hoffmann and R. Martin},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Optimization of amplitude modulation features for low-resource acoustic scene classification},\n  year = {2015},\n  pages = {2556-2560},\n  abstract = {We developed a new feature extraction algorithm based on the Amplitude Modulation Spectrum (AMS), which mainly consists of two filter bank stages composed of low-order recursive filters. The passband range of each filter was optimized by using the Covariance Matrix Adaptation - Evolution Strategy (CMA-ES). The classification task was accomplished by a Linear Discriminant Analysis (LDA) classifier. To evaluate the performance of the proposed acoustic scene classifier based on AMS features, we tested it with the publicly available dataset provided by the IEEE AASP Challenge 2013. Using only 9 optimized AMS features, we achieved 85 % classification accuracy, outperforming the best previously available approaches by 10 %.},\n  keywords = {acoustic signal processing;amplitude modulation;channel bank filters;feature extraction;recursive filters;IEEE AASP Challenge 2013;acoustic scene classifier;linear discriminant analysis;covariance matrix adaptation-evolution strategy;low-order recursive filters;filter bank stages;amplitude modulation spectrum;feature extraction algorithm;low-resource acoustic scene classification;Feature extraction;Acoustics;Time-domain analysis;Frequency modulation;Covariance matrices;evolutionary optimization;acoustic scene classification;acoustic feature extraction;amplitude modulation spectrum},\n  doi = {10.1109/EUSIPCO.2015.7362846},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102095.pdf},\n}\n\n
\n
\n\n\n
\n We developed a new feature extraction algorithm based on the Amplitude Modulation Spectrum (AMS), which mainly consists of two filter bank stages composed of low-order recursive filters. The passband range of each filter was optimized by using the Covariance Matrix Adaptation - Evolution Strategy (CMA-ES). The classification task was accomplished by a Linear Discriminant Analysis (LDA) classifier. To evaluate the performance of the proposed acoustic scene classifier based on AMS features, we tested it with the publicly available dataset provided by the IEEE AASP Challenge 2013. Using only 9 optimized AMS features, we achieved 85 % classification accuracy, outperforming the best previously available approaches by 10 %.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robust real-time PPG-based heart rate monitoring.\n \n \n \n \n\n\n \n Nowak, E.; Kraiński, M.; Rubiński, M.; Pazderska, M.; and Raczyński, S. A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2561-2565, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"RobustPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362847,\n  author = {E. Nowak and M. Kraiński and M. Rubiński and M. Pazderska and S. A. Raczyński},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Robust real-time PPG-based heart rate monitoring},\n  year = {2015},\n  pages = {2561-2565},\n  abstract = {Many existing methods for tracking heart rate (HR) from pho-toplethysmographic (PPG) signals can be found in the literature, but they are tested only in static scenarios and fail when motion artifacts are strong. Recently, an algorithm called TROIKA was proposed for robust HR tracking, but the computational complexity of that algorithm is very high which makes it difficult to implement in small embedded devices, such as wrist-wearable HR monitors. In this article we present a new, fast family of methods robust against very strong motion artifacts using spectral subtraction or nonnegative matrix factorization (NMF) for signal enhancement and MA removal and online Viterbi decoding or particle filtering for HR tracking. On our test data set we obtain an average error of 1.3%.},\n  keywords = {cardiology;matrix decomposition;medical signal processing;particle filtering (numerical methods);patient monitoring;photoplethysmography;Viterbi decoding;particle filtering;online Viterbi decoding;MA removal;signal enhancement;nonnegative matrix factorization;spectral subtraction;wrist-wearable HR monitors;HR tracking;TROIKA algorithm;motion artifacts;static scenarios;photoplethysmographic signals;robust real-time PPG-based heart rate monitoring;Heart rate;Yttrium;Viterbi algorithm;Monitoring;Matrix decomposition;Spectrogram;Robustness;heart rate;PPG;photoplethysmography;particle filter;Viterbi algorithm;spectral subtraction;NMF},\n  doi = {10.1109/EUSIPCO.2015.7362847},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105041.pdf},\n}\n\n
\n
\n\n\n
\n Many existing methods for tracking heart rate (HR) from pho-toplethysmographic (PPG) signals can be found in the literature, but they are tested only in static scenarios and fail when motion artifacts are strong. Recently, an algorithm called TROIKA was proposed for robust HR tracking, but the computational complexity of that algorithm is very high which makes it difficult to implement in small embedded devices, such as wrist-wearable HR monitors. In this article we present a new, fast family of methods robust against very strong motion artifacts using spectral subtraction or nonnegative matrix factorization (NMF) for signal enhancement and MA removal and online Viterbi decoding or particle filtering for HR tracking. On our test data set we obtain an average error of 1.3%.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Estimation of the difference between peak airway and tracheal pressures during HFPV.\n \n \n \n \n\n\n \n Ajcevic, M.; Lucangelo, U.; and Accardo, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2566-2570, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"EstimationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362848,\n  author = {M. Ajcevic and U. Lucangelo and A. Accardo},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Estimation of the difference between peak airway and tracheal pressures during HFPV},\n  year = {2015},\n  pages = {2566-2570},\n  abstract = {High frequency percussive ventilation (HFPV) is an advanced ventilatory strategy which has proven very effective in patients with acute respiratory failure. The airway pressure measured by HFPV ventilator represents the sum of the endotracheal tube pressure drop and the tracheal pressure dissipated to inflate a lung. The estimation of the difference between the peak airway and tracheal pressure APp may be very useful to the clinician to avoid lung injury. The aim of this study is to provide an in vitro estimation of APp based only on the ventilator set parameters (i.e. peak pressures, pulsatile frequencies) and the patient's respiratory system resistance and compliance. The model for the estimation of APp was determined by using the Least Absolute Shrinkage and Selection Operator (LASSO) regularized least-squares regression technique. The identified model was successively assessed on test data set.},\n  keywords = {least mean squares methods;medical signal processing;patient diagnosis;peak airway;tracheal pressures;HFPV;high frequency percussive ventilation;advanced ventilatory strategy;acute respiratory;airway pressure;endotracheal tube pressure drop;respiratory system resistance;respiratory system compliance;least absolute shrinkage and selection operator;LASSO;regularized least-squares regression technique;Signal processing;Europe;Yttrium;Physiology;Indexes;Conferences;In vitro;HFPV;respiratory signal processing;model identification;LASSO;endotracheal tubes},\n  doi = {10.1109/EUSIPCO.2015.7362848},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105571.pdf},\n}\n\n
\n
\n\n\n
\n High frequency percussive ventilation (HFPV) is an advanced ventilatory strategy which has proven very effective in patients with acute respiratory failure. The airway pressure measured by HFPV ventilator represents the sum of the endotracheal tube pressure drop and the tracheal pressure dissipated to inflate a lung. The estimation of the difference between the peak airway and tracheal pressure APp may be very useful to the clinician to avoid lung injury. The aim of this study is to provide an in vitro estimation of APp based only on the ventilator set parameters (i.e. peak pressures, pulsatile frequencies) and the patient's respiratory system resistance and compliance. The model for the estimation of APp was determined by using the Least Absolute Shrinkage and Selection Operator (LASSO) regularized least-squares regression technique. The identified model was successively assessed on test data set.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automated scoring of rehabilitative tests with singular spectrum analysis.\n \n \n \n \n\n\n \n Lee, T. K. M.; Leo, K. H.; Sanei, S.; and Chew, E.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2571-2575, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AutomatedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362849,\n  author = {T. K. M. Lee and K. H. Leo and S. Sanei and E. Chew},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Automated scoring of rehabilitative tests with singular spectrum analysis},\n  year = {2015},\n  pages = {2571-2575},\n  abstract = {In rehabilitation, continual assessment of those with disabilities is needed to determine the effectiveness of therapy and to prescribe the regimen and intensity of future treatment. Conducting assessments is challenging - there is a need to maintain objectivity and consistency across time. Also, repetitious tests can lull the assessor into lower levels of alertness. These motivate for automated scoring of rehabilitative tests. In this paper, we describe our work in automating the widely used and accepted Action Research Arm Test. We focus on the grasp subtest which employs a cube into which we embed sensors. Previously we have used live patient simulators and now the full set of patient trials have been completed. We employ Singular Spectrum Analysis on the signals, for which the resulting eigenvalues are then selected in a principled way to aid in signal filtering. The results show encouraging promise in our quest for automated scoring.},\n  keywords = {eigenvalues and eigenfunctions;filtering theory;intelligent sensors;medical signal processing;patient rehabilitation;spectral analysers;automated scoring;rehabilitative tests;singular spectrum analysis;continual assessment;action research arm test;patient trials;eigenvalues;signal filtering;Eigenvalues and eigenfunctions;Accelerometers;Spectral analysis;Instruments;Force sensors;Europe;Singular spectrum analysis;subspace analysis stroke;rehabilitation;accelerometer;instrumented objects;automatic scoring},\n  doi = {10.1109/EUSIPCO.2015.7362849},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570097549.pdf},\n}\n\n
\n
\n\n\n
\n In rehabilitation, continual assessment of those with disabilities is needed to determine the effectiveness of therapy and to prescribe the regimen and intensity of future treatment. Conducting assessments is challenging - there is a need to maintain objectivity and consistency across time. Also, repetitious tests can lull the assessor into lower levels of alertness. These motivate for automated scoring of rehabilitative tests. In this paper, we describe our work in automating the widely used and accepted Action Research Arm Test. We focus on the grasp subtest which employs a cube into which we embed sensors. Previously we have used live patient simulators and now the full set of patient trials have been completed. We employ Singular Spectrum Analysis on the signals, for which the resulting eigenvalues are then selected in a principled way to aid in signal filtering. The results show encouraging promise in our quest for automated scoring.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A tensor decomposition approach to noninvasive atrial activity extraction in atrial fibrillation ECG.\n \n \n \n \n\n\n \n Ribeiro, L. N.; Hidalgo-Muñoz, A. R.; Favier, G.; Mota, J. C. M.; de Almeida , A. L. F.; and Zarzoso, V.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2576-2580, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362850,\n  author = {L. N. Ribeiro and A. R. Hidalgo-Muñoz and G. Favier and J. C. M. Mota and A. L. F. {de Almeida} and V. Zarzoso},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A tensor decomposition approach to noninvasive atrial activity extraction in atrial fibrillation ECG},\n  year = {2015},\n  pages = {2576-2580},\n  abstract = {Atrial fibrillation (AF), the most common arrhythmia in adults, is still considered as the last great frontier of cardiac electrophysiology, since its mechanisms are not completely understood. Analysis of the atrial activity (AA) signal contained in electrocardiograms during AF episodes is a noninvasive and inexpensive solution for obtaining useful information about AF. This work presents tensor decompositions as an alternative to classic blind source separation methods based on matrix decompositions due to their appealing uniqueness properties and considers in particular the block term decomposition (BTD). The practical usefulness of BTD is evaluated by comparing its AA estimation quality, measured by spectral concentration, to those oftwo benchmark methods, revealing that BTD presents a better performance. The results presented in this work motivate further investigation oftensor decompositions for AF analysis.},\n  keywords = {electrocardiography;matrix algebra;medical signal processing;tensors;BTD;block term decomposition;matrix decompositions;AF episodes;atrial activity signal;cardiac electrophysiology;ECG;atrial fibrillation;noninvasive atrial activity extraction;tensor decomposition approach;Tensile stress;Mathematical model;Electrocardiography;Yttrium;Matrix decomposition;Numerical models;Estimation;Atrial fibrillation;blind source separation;tensor decompositions;electrocardiogram},\n  doi = {10.1109/EUSIPCO.2015.7362850},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103891.pdf},\n}\n\n
\n
\n\n\n
\n Atrial fibrillation (AF), the most common arrhythmia in adults, is still considered as the last great frontier of cardiac electrophysiology, since its mechanisms are not completely understood. Analysis of the atrial activity (AA) signal contained in electrocardiograms during AF episodes is a noninvasive and inexpensive solution for obtaining useful information about AF. This work presents tensor decompositions as an alternative to classic blind source separation methods based on matrix decompositions due to their appealing uniqueness properties and considers in particular the block term decomposition (BTD). The practical usefulness of BTD is evaluated by comparing its AA estimation quality, measured by spectral concentration, to those oftwo benchmark methods, revealing that BTD presents a better performance. The results presented in this work motivate further investigation oftensor decompositions for AF analysis.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Empirical mode decomposition for noninvasive atrial fibrillation dominant frequency estimation.\n \n \n \n \n\n\n \n Hidalgo-Muñoz, A. R.; Tomé, A. M.; and Zarzoso, V.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2581-2585, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"EmpiricalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362851,\n  author = {A. R. Hidalgo-Muñoz and A. M. Tomé and V. Zarzoso},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Empirical mode decomposition for noninvasive atrial fibrillation dominant frequency estimation},\n  year = {2015},\n  pages = {2581-2585},\n  abstract = {The dominant frequency (DF) of the atrial activity signal is arguably one of the most relevant features characterizing atrial fibrillation (AF), the most common cardiac arrhythmia. Its accurate estimation from noninvasive acquisition modalities such as the electrocardiogram (ECG) can avoid risks of potential complications to patients in a cost-effective manner. However, the approximation of the underlying intracardiac atrial activity by noninvasive techniques such as average beat subtraction or blind source separation has not always been satisfactory. In the present work, a new approach based on the ensemble empirical mode decomposition (EEMD) is proposed for AF DF estimation. Our results suggest that EEMD provides more accurate estimates of intracardiac AF DF than alternative noninvasive methods. In addition, the empirical nature of EEMD overcomes important drawbacks of other techniques, simplifying its implementation in automatic tools for diagnosis aid.},\n  keywords = {bioelectric potentials;blind source separation;electrocardiography;frequency estimation;medical disorders;medical signal processing;intracardiac atrial activity;diagnosis aid;alternative noninvasive methods;intracardiac AF DF;AF DF estimation;EEMD;ensemble empirical mode decomposition;blind source separation;average beat subtraction;ECG;electrocardiogram;noninvasive acquisition modality;cardiac arrhythmia;noninvasive atrial fibrillation dominant frequency estimation;Electrocardiography;Heart beat;Signal processing;Databases;Signal processing algorithms;Lead;Robustness;Atrial fibrillation;dominant frequency;electrocardiogram;empirical mode decomposition},\n  doi = {10.1109/EUSIPCO.2015.7362851},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103463.pdf},\n}\n\n
\n
\n\n\n
\n The dominant frequency (DF) of the atrial activity signal is arguably one of the most relevant features characterizing atrial fibrillation (AF), the most common cardiac arrhythmia. Its accurate estimation from noninvasive acquisition modalities such as the electrocardiogram (ECG) can avoid risks of potential complications to patients in a cost-effective manner. However, the approximation of the underlying intracardiac atrial activity by noninvasive techniques such as average beat subtraction or blind source separation has not always been satisfactory. In the present work, a new approach based on the ensemble empirical mode decomposition (EEMD) is proposed for AF DF estimation. Our results suggest that EEMD provides more accurate estimates of intracardiac AF DF than alternative noninvasive methods. In addition, the empirical nature of EEMD overcomes important drawbacks of other techniques, simplifying its implementation in automatic tools for diagnosis aid.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Adaptive identification of oscillatory bands from subcortical neural data.\n \n \n \n \n\n\n \n Özkurt, T. E.; Butz, M.; Hirschmann, J.; and Schnitzler, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2586-2590, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AdaptivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362852,\n  author = {T. E. Özkurt and M. Butz and J. Hirschmann and A. Schnitzler},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Adaptive identification of oscillatory bands from subcortical neural data},\n  year = {2015},\n  pages = {2586-2590},\n  abstract = {Neural oscillations in various distinct frequency bands and their interrelations yield high temporal resolution signatures of the human brain activity. This study demonstrates solutions to some of the common challenges in the analysis of neurophysiological data by means of subthalamic local field potentials (LFP) acquired form patients with Parkinson's Disease (PD) undergoing deep brain stimulation therapy. Multivariate empirical mode decomposition (MEMD), being a data-driven method suitable for multichannel data, is employed. This method allows identification of oscillatory bands without the requirement of fixed a priori basis functions. Our study focuses on two issues: (i) Determination of data specific frequency bands and revealing the weak inconspicuous high frequency components in the data and (ii) validation of the biological meaningfulness of the MEMD oscillatory components via phase-amplitude coupling as previously shown to be inherent in subcortical PD LFP data.},\n  keywords = {brain;diseases;neurophysiology;patient treatment;adaptive identification;oscillatory bands;subcortical neural data;neural oscillations;human brain activity;neurophysiological data;subthalamic local field potentials;LFP;Parkinson's disease;deep brain stimulation therapy;multivariate empirical mode decomposition;data driven method;multichannel data;MEMD oscillatory components;phase-amplitude coupling;Hafnium oxide;Couplings;Time-frequency analysis;Europe;Signal processing;Oscillators;Empirical mode decomposition;local field potentials;oscillations;Parkinson's disease;coupling;multivariate empirical mode decomposition},\n  doi = {10.1109/EUSIPCO.2015.7362852},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104151.pdf},\n}\n\n
\n
\n\n\n
\n Neural oscillations in various distinct frequency bands and their interrelations yield high temporal resolution signatures of the human brain activity. This study demonstrates solutions to some of the common challenges in the analysis of neurophysiological data by means of subthalamic local field potentials (LFP) acquired form patients with Parkinson's Disease (PD) undergoing deep brain stimulation therapy. Multivariate empirical mode decomposition (MEMD), being a data-driven method suitable for multichannel data, is employed. This method allows identification of oscillatory bands without the requirement of fixed a priori basis functions. Our study focuses on two issues: (i) Determination of data specific frequency bands and revealing the weak inconspicuous high frequency components in the data and (ii) validation of the biological meaningfulness of the MEMD oscillatory components via phase-amplitude coupling as previously shown to be inherent in subcortical PD LFP data.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Ultrasonic fatty liver imaging.\n \n \n \n \n\n\n \n Deng, Y.; Jago, J.; and Gong, Y.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2591-2595, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"UltrasonicPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362853,\n  author = {Y. Deng and J. Jago and Y. Gong},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Ultrasonic fatty liver imaging},\n  year = {2015},\n  pages = {2591-2595},\n  abstract = {Fatty liver disease is a prevalent condition which may result in serious liver complications and is currently lack of an effective and efficient approach for its quantification. In the paper, we propose to directly image the fat content distribution in liver based on ultrasound echo radio-frequency signals. In the proposed method, spectral difference is utilized to represent the small pieces of liver tissues. Then the connection between the data representation and liver tissues is directly established by an elaborately designed learning process in the high-dimensional feature space, which includes comprehensive hyperparameter learning and model learning. Experimental results demonstrate the effectiveness of the proposed method which is able to visualize the fat distribution and has a 0.93 correlation coefficient with the fat-percentage quantification results of doctor's pathological analysis.},\n  keywords = {biological tissues;biomedical ultrasonics;diseases;feature extraction;learning (artificial intelligence);liver;medical image processing;ultrasonic fatty liver imaging;ultrasound echo radiofrequency signals;liver tissues;high-dimensional feature space;hyperparameter learning process;Ultrasonic imaging;Liver diseases;Imaging;Attenuation;Radio frequency;RF signals;Fatty liver;Quantification;Imaging;Ultrasound echo signal;Machine learning},\n  doi = {10.1109/EUSIPCO.2015.7362853},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570095721.pdf},\n}\n\n
\n
\n\n\n
\n Fatty liver disease is a prevalent condition which may result in serious liver complications and is currently lack of an effective and efficient approach for its quantification. In the paper, we propose to directly image the fat content distribution in liver based on ultrasound echo radio-frequency signals. In the proposed method, spectral difference is utilized to represent the small pieces of liver tissues. Then the connection between the data representation and liver tissues is directly established by an elaborately designed learning process in the high-dimensional feature space, which includes comprehensive hyperparameter learning and model learning. Experimental results demonstrate the effectiveness of the proposed method which is able to visualize the fat distribution and has a 0.93 correlation coefficient with the fat-percentage quantification results of doctor's pathological analysis.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Cortical network modulation during paced arm movements.\n \n \n \n \n\n\n \n Storti, S. F.; Formaggio, E.; Manganotti, P.; and Menegaz, G.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2596-2600, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"CorticalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362854,\n  author = {S. F. Storti and E. Formaggio and P. Manganotti and G. Menegaz},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Cortical network modulation during paced arm movements},\n  year = {2015},\n  pages = {2596-2600},\n  abstract = {In this paper we investigate task-related changes in brain functional connectivity (FC) by applying different methods namely event-related desynchronization (ERD), coherence and graph-theoretical analysis to electroencephalographic (EEG) recordings. While ERD provides an estimate of the differences in power spectral densities between task and rest conditions, coherence allows assessing the level of synchronization between the recorded signals and graph analysis enables the estimation of the functional network topology. EEGs were recorded on 10 subjects during left/right arm movements. Conventional analysis showed a significant ERD in both alpha and beta bands over the sensorimotor cortex. Connectivity assessment highlighted that stronger connections are those involving the motor regions for which graph analysis revealed reduced accessibility and an increased cen-trality during the movement. This highlights that network analysis brings complementary knowledge with respect to established approaches for modeling motor-induced FC.},\n  keywords = {biomechanics;electroencephalography;motor-induced functional connectivity modeling;sensorimotor cortex;functional network topology;power spectral density;EEG recording;electroencephalographic recording;graph-theoretical analysis;event-related desynchronization;brain functional connectivity;paced arm movement;cortical network modulation;Electroencephalography;Coherence;Random access memory;Electrodes;Brain modeling;Europe;Signal processing;EEG power;ERD;functional connectivity;coherence;graph analysis},\n  doi = {10.1109/EUSIPCO.2015.7362854},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103699.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we investigate task-related changes in brain functional connectivity (FC) by applying different methods namely event-related desynchronization (ERD), coherence and graph-theoretical analysis to electroencephalographic (EEG) recordings. While ERD provides an estimate of the differences in power spectral densities between task and rest conditions, coherence allows assessing the level of synchronization between the recorded signals and graph analysis enables the estimation of the functional network topology. EEGs were recorded on 10 subjects during left/right arm movements. Conventional analysis showed a significant ERD in both alpha and beta bands over the sensorimotor cortex. Connectivity assessment highlighted that stronger connections are those involving the motor regions for which graph analysis revealed reduced accessibility and an increased cen-trality during the movement. This highlights that network analysis brings complementary knowledge with respect to established approaches for modeling motor-induced FC.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A multilevel memory-assisted lossless compression algorithm for medical images.\n \n \n \n \n\n\n \n Hesabi, Z. R.; Kazimipour, B.; Deriche, M.; and Navarro, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2601-2605, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362855,\n  author = {Z. R. Hesabi and B. Kazimipour and M. Deriche and A. Navarro},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A multilevel memory-assisted lossless compression algorithm for medical images},\n  year = {2015},\n  pages = {2601-2605},\n  abstract = {As medical imaging facilities move towards film-less imaging technology, robust image compression systems are starting to play a key role. Conventional storage and transmission of large-scale raw medical image datasets can be very expensive and time-consuming. Recently, we proposed a memory-assisted lossless image compression algorithm based on Principal Component Analysis(PCA). In this paper, we further improve the performance of the algorithm in two different directions: Firstly, we replace PC A with NMF (Non Negative Matrix Factorization). NMF has several advantages in representing images with an image-like basis, results in sparse factors, and provides better user control over iterations. Secondly, we expand the single-level model with a new multilevel decomposition/projection framework to further reduce entropy of residual images. Our experimental results on X-ray images confirm that both modifications provide significant improvements over the single level PCA based algorithm as well as existing non-memory based techniques.},\n  keywords = {data compression;image coding;image representation;matrix decomposition;medical image processing;principal component analysis;X-ray imaging;multilevel memory-assisted lossless image compression algorithm;medical image;principal component analysis;PCA;nonnegative matrix factorization;NMF;image representation;multilevel decomposition-projection framework;residual image entropy reduction;X-ray image;Europe;Signal processing;Conferences;Lossless Compression;Medical Imaging;Non-negative Matrix Factorization;Unsupervised Learning},\n  doi = {10.1109/EUSIPCO.2015.7362855},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105531.pdf},\n}\n\n
\n
\n\n\n
\n As medical imaging facilities move towards film-less imaging technology, robust image compression systems are starting to play a key role. Conventional storage and transmission of large-scale raw medical image datasets can be very expensive and time-consuming. Recently, we proposed a memory-assisted lossless image compression algorithm based on Principal Component Analysis(PCA). In this paper, we further improve the performance of the algorithm in two different directions: Firstly, we replace PC A with NMF (Non Negative Matrix Factorization). NMF has several advantages in representing images with an image-like basis, results in sparse factors, and provides better user control over iterations. Secondly, we expand the single-level model with a new multilevel decomposition/projection framework to further reduce entropy of residual images. Our experimental results on X-ray images confirm that both modifications provide significant improvements over the single level PCA based algorithm as well as existing non-memory based techniques.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Design of a new high-energy concentration kernel quadratic TFD for EEG spike signal.\n \n \n \n \n\n\n \n Ben-Jabeur, T.; and Kadri, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2606-2610, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"DesignPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362856,\n  author = {T. Ben-Jabeur and A. Kadri},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Design of a new high-energy concentration kernel quadratic TFD for EEG spike signal},\n  year = {2015},\n  pages = {2606-2610},\n  abstract = {In this paper, the design of a novel high-energy concentration kernel quadratic TFD for EEG spike signal analysis is presented. Firstly, we show that the suppression of the negative frequency of the signal due of the use of Hilbert transform causes low Time-Frequency Distribution (TFD) resolution in the very low frequency band. To remedy this artifact, a frequency shifting of the signal to the mid frequency band is used so that the negative and positive frequencies are taken into account in the time-frequency domain. This process enhances the TFD resolution in the very low frequency band. Secondly, we derived a new separable kernel TFD with a high auto-terms energy concentration based on the localization of the auto-terms and cross-terms of the EEG spike signal in the ambiguity domain. The proposed kernel uses only two parameters and offers high TFD resolution compared to the existing ones.},\n  keywords = {electroencephalography;Hilbert transforms;medical signal processing;EEG spike signal analysis;time-frequency distribution;high-energy concentration kernel quadratic TFD;negative frequency suppression;Hilbert transform;time-frequency domain;ambiguity domain;electroencephalography;Kernel;Electroencephalography;Signal resolution;Time-frequency analysis;Doppler effect;Transforms;EEG Spike signal;Quadratic TFD;Auto-terms and cross-terms},\n  doi = {10.1109/EUSIPCO.2015.7362856},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105443.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, the design of a novel high-energy concentration kernel quadratic TFD for EEG spike signal analysis is presented. Firstly, we show that the suppression of the negative frequency of the signal due of the use of Hilbert transform causes low Time-Frequency Distribution (TFD) resolution in the very low frequency band. To remedy this artifact, a frequency shifting of the signal to the mid frequency band is used so that the negative and positive frequencies are taken into account in the time-frequency domain. This process enhances the TFD resolution in the very low frequency band. Secondly, we derived a new separable kernel TFD with a high auto-terms energy concentration based on the localization of the auto-terms and cross-terms of the EEG spike signal in the ambiguity domain. The proposed kernel uses only two parameters and offers high TFD resolution compared to the existing ones.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On the effect of random snapshot timing jitter on the covariance matrix for JADE estimation.\n \n \n \n \n\n\n \n Bazzi, A.; Slock, D. T. M.; and Meilhac, L.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2611-2615, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362857,\n  author = {A. Bazzi and D. T. M. Slock and L. Meilhac},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {On the effect of random snapshot timing jitter on the covariance matrix for JADE estimation},\n  year = {2015},\n  pages = {2611-2615},\n  abstract = {In this paper, we focus on joint multipath angle and delay estimation (JADE) in an OFDM communication setting. We analyse the effect of Gaussian random snapshot (OFDM symbol) timing jitter on the spatio-frequency sample covariance matrix containing delay and direction information. This sample covariance matrix is an input to the JADE and many other algorithms for signal parameter estimation. The analysis suggests a simple way to compensate for the jitter in the sample covariance matrix. We also present two simple methods for estimating the jitter variance, allowing its compensation. These techniques attempt to restore the low rank nature or other structure in the signal contribution. We then finally present some simulations for the resulting estimation quality of the multipath delays (ToAs) and angles (AoAs) of the incoming signals.},\n  keywords = {covariance matrices;delay estimation;direction-of-arrival estimation;jitter;OFDM modulation;signal restoration;time-of-arrival estimation;JADE estimation;onjoint multipath angle and delay estimation;OFDM communication;Gaussian random snapshot timing jitter;spatiofrequency sample covariance matrix;signal parameter estimation;signal contribution;ToA;AoA;time-of-arrival estimation;angle-of-arrival estimation;Covariance matrices;OFDM;IEEE 802.11 Standard;Timing jitter;Europe;Snapshot timing jitter;perturbed sample covariance matrix;JADE;ToA;DoA;matrix rank minimization},\n  doi = {10.1109/EUSIPCO.2015.7362857},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104921.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we focus on joint multipath angle and delay estimation (JADE) in an OFDM communication setting. We analyse the effect of Gaussian random snapshot (OFDM symbol) timing jitter on the spatio-frequency sample covariance matrix containing delay and direction information. This sample covariance matrix is an input to the JADE and many other algorithms for signal parameter estimation. The analysis suggests a simple way to compensate for the jitter in the sample covariance matrix. We also present two simple methods for estimating the jitter variance, allowing its compensation. These techniques attempt to restore the low rank nature or other structure in the signal contribution. We then finally present some simulations for the resulting estimation quality of the multipath delays (ToAs) and angles (AoAs) of the incoming signals.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n TARGET: A direct AOA-TDOA estimation for blind broadband geolocalization.\n \n \n \n \n\n\n \n Delestre, C.; Ferréol, A.; Larzabal, P.; and Germond, C.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2616-2620, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"TARGET:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362858,\n  author = {C. Delestre and A. Ferréol and P. Larzabal and C. Germond},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {TARGET: A direct AOA-TDOA estimation for blind broadband geolocalization},\n  year = {2015},\n  pages = {2616-2620},\n  abstract = {In this paper, a new robust and low computationally algorithm is proposed for broadband geolocalization. Recent work have demonstrated the superiority of the geolocalization in 1-step over the 2-steps algorithms. However this superiority is obtained at the price of a bandwidth slicing which is unfortunately limited for computational reasons and leads to an asymptotic bias due to the residual broadband effect. This paper we propose an alternative approach fully exploiting the total bandwidth and consequently suppressing the slicing drawbacks. The proposed method is named TARGET and exploits the rank deficiency of a temporal shift dependent covariance matrix after a multichannel synchronization. Our analysis and simulations prove the performance advantage of proposed method over recently introduced ones.},\n  keywords = {broadband networks;navigation;synchronisation;telecommunication channels;time-of-arrival estimation;direct AOA-TDOA estimation;blind broadband geolocalization;TARGET;residual broadband effect;temporal shift dependent covariance matrix;multichannel synchronization;Covariance matrices;Broadband communication;Geology;Antenna arrays;Broadband antennas;Estimation;Signal processing algorithms;joint AoA and TDoA estimation;broad band geolocalization;direct geolocalization},\n  doi = {10.1109/EUSIPCO.2015.7362858},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570101199.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, a new robust and low computationally algorithm is proposed for broadband geolocalization. Recent work have demonstrated the superiority of the geolocalization in 1-step over the 2-steps algorithms. However this superiority is obtained at the price of a bandwidth slicing which is unfortunately limited for computational reasons and leads to an asymptotic bias due to the residual broadband effect. This paper we propose an alternative approach fully exploiting the total bandwidth and consequently suppressing the slicing drawbacks. The proposed method is named TARGET and exploits the rank deficiency of a temporal shift dependent covariance matrix after a multichannel synchronization. Our analysis and simulations prove the performance advantage of proposed method over recently introduced ones.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Marked poisson point process PHD filter for DOA tracking.\n \n \n \n \n\n\n \n Saucan, A.; Chonavel, T.; Sintes, C.; and Le Caillec, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2621-2625, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"MarkedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362859,\n  author = {A. Saucan and T. Chonavel and C. Sintes and J. {Le Caillec}},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Marked poisson point process PHD filter for DOA tracking},\n  year = {2015},\n  pages = {2621-2625},\n  abstract = {In this paper we propose a Track Before Detect (TBD) filter for Direction Of Arrival (DOA) tracking of multiple targets from phased-array observations. The phased-array model poses a new problem since each target emits a signal, called source signal. Existing methods consider the source signal as part of the system state. This is inefficient, especially for particle approximations of posteriors, where samples are drawn from the higher-dimensional posterior of the extended state. To address this problem, we propose a novel Marked Poisson Point Process (MPPP) model and derive the Probability Hypothesis Density (PHD) filter that adaptively estimates target DOAs. The PPP models variations of both the number and the location of points representing targets. The mark of a point represents the source signal, without the need of an extended state. Recursive formulas for the MPPP PHD filter are derived with simulations showcasing improved performance over state-of-the art methods.},\n  keywords = {adaptive signal processing;array signal processing;direction-of-arrival estimation;filtering theory;stochastic processes;target tracking;marked Poisson point process PHD filter;DOA tracking;track-before-detect filter;TBD filter;direction-of-arrival tracking;phased-array model;source signal;posterior particle approximation;higher-dimensional posterior;MPPP model;probability hypothesis density filter;adaptive target DOA estimation;MPPP PHD filter;multiple-target tracking;Target tracking;Direction-of-arrival estimation;Radar tracking;Approximation methods;Kinematics;Array signal processing;Europe;DOA tracking;marked Poisson point process;PHD filter;track before detect;DBSCAN},\n  doi = {10.1109/EUSIPCO.2015.7362859},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096449.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we propose a Track Before Detect (TBD) filter for Direction Of Arrival (DOA) tracking of multiple targets from phased-array observations. The phased-array model poses a new problem since each target emits a signal, called source signal. Existing methods consider the source signal as part of the system state. This is inefficient, especially for particle approximations of posteriors, where samples are drawn from the higher-dimensional posterior of the extended state. To address this problem, we propose a novel Marked Poisson Point Process (MPPP) model and derive the Probability Hypothesis Density (PHD) filter that adaptively estimates target DOAs. The PPP models variations of both the number and the location of points representing targets. The mark of a point represents the source signal, without the need of an extended state. Recursive formulas for the MPPP PHD filter are derived with simulations showcasing improved performance over state-of-the art methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Decentralized cooperative DOA tracking using non-Hermitian generalized eigendecomposition.\n \n \n \n \n\n\n \n Suleiman, W.; Pesavento, M.; and Zoubir, A. M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2626-2630, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"DecentralizedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362860,\n  author = {W. Suleiman and M. Pesavento and A. M. Zoubir},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Decentralized cooperative DOA tracking using non-Hermitian generalized eigendecomposition},\n  year = {2015},\n  pages = {2626-2630},\n  abstract = {The problem of direction-of-arrival (DOA) estimation using partly calibrated arrays composed of multiple identically oriented subarrays is considered. The subarrays are assumed to possess the shift-invariance property which is exploited to develop a distributed search-free DOA estimation algorithm that is based on the generalized eigendecomposition (GED) of a pair of covariance matrices. We propose a fully decentralized adaptive algorithm which tracks the generalized eigenvalues (GEVs) of a non-Hermitian pair of covariance matrices, from which the DOAs are estimated. Moreover, to enforce the amplitude property of the nominal source GEDs, we propose a suitable measurement weighting scheme. We demonstrate the estimation performance of our algorithm with simulations and confirm that our algorithm is able to identify more sources than each subarray individually can.},\n  keywords = {adaptive estimation;adaptive signal processing;array signal processing;covariance matrices;direction-of-arrival estimation;eigenvalues and eigenfunctions;matrix decomposition;decentralized cooperative DOA tracking;nonHERMITIAN generalized eigendecomposition;direction-of-arrival estimation;partly calibrated array;multiple identically oriented subarray;shift-invariance property;distributed search-free DOA estimation algorithm;GED;covariance matrix;decentralized adaptive algorithm;generalized eigenvalue tracking;GEV tracking;nominal source GED amplitude property;measurement weighting scheme;Direction-of-arrival estimation;Estimation;Eigenvalues and eigenfunctions;Yttrium;Signal processing algorithms;Covariance matrices;Transmission line matrix methods;partly calibrated arrays;cooperative DOA tracking;decentralized generalized eigendecomposition},\n  doi = {10.1109/EUSIPCO.2015.7362860},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103965.pdf},\n}\n\n
\n
\n\n\n
\n The problem of direction-of-arrival (DOA) estimation using partly calibrated arrays composed of multiple identically oriented subarrays is considered. The subarrays are assumed to possess the shift-invariance property which is exploited to develop a distributed search-free DOA estimation algorithm that is based on the generalized eigendecomposition (GED) of a pair of covariance matrices. We propose a fully decentralized adaptive algorithm which tracks the generalized eigenvalues (GEVs) of a non-Hermitian pair of covariance matrices, from which the DOAs are estimated. Moreover, to enforce the amplitude property of the nominal source GEDs, we propose a suitable measurement weighting scheme. We demonstrate the estimation performance of our algorithm with simulations and confirm that our algorithm is able to identify more sources than each subarray individually can.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Partially adaptive transmit beamforming for search free 2D DOA estimation in MIMO radar.\n \n \n \n \n\n\n \n Morency, M. W.; and Vorobyov, S. A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2631-2635, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"PartiallyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362861,\n  author = {M. W. Morency and S. A. Vorobyov},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Partially adaptive transmit beamforming for search free 2D DOA estimation in MIMO radar},\n  year = {2015},\n  pages = {2631-2635},\n  abstract = {In this paper, a partially adaptive two dimensional (2D) trans mit beamforming approach is proposed to enable search-free azimuth and elevation direction of arrival (DOA) estimation in MIMO radar. Specifically, the 2D transmit array is non-adaptively partitioned into a number of subarrays. Then, a beamspace matrix is adaptively designed for each subarray, such that the beampatterns corresponding to each matrix have the exact same magnitude. By constraining the beams to be transmitted from different subarrays, multiple data in variances are enforced independently of the receive array geometry. The invariances are then exploited by search-free DOA estimation methods. Simulations validate the proposed approach.},\n  keywords = {array signal processing;direction-of-arrival estimation;matrix algebra;MIMO radar;radar signal processing;search free 2D DOA estimation;MIMO radar;adaptive two dimensional transmit beamforming approach;search-free azimuth;elevation direction of arrival estimation;2D transmit array;beamspace matrix;receive array geometry;Arrays;Direction-of-arrival estimation;Receivers;Yttrium;Array signal processing;Estimation;MIMO radar;Adaptive beamforming;MIMO radar;search-free DOA estimation methods},\n  doi = {10.1109/EUSIPCO.2015.7362861},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570100913.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, a partially adaptive two dimensional (2D) trans mit beamforming approach is proposed to enable search-free azimuth and elevation direction of arrival (DOA) estimation in MIMO radar. Specifically, the 2D transmit array is non-adaptively partitioned into a number of subarrays. Then, a beamspace matrix is adaptively designed for each subarray, such that the beampatterns corresponding to each matrix have the exact same magnitude. By constraining the beams to be transmitted from different subarrays, multiple data in variances are enforced independently of the receive array geometry. The invariances are then exploited by search-free DOA estimation methods. Simulations validate the proposed approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automatic cattle location tracking using image processing.\n \n \n \n \n\n\n \n Dao, T.; Le, T.; Harle, D.; Murray, P.; Tachtatzis, C.; Marshall, S.; Michie, C.; and Andonovic, I.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2636-2640, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AutomaticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362862,\n  author = {T. Dao and T. Le and D. Harle and P. Murray and C. Tachtatzis and S. Marshall and C. Michie and I. Andonovic},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Automatic cattle location tracking using image processing},\n  year = {2015},\n  pages = {2636-2640},\n  abstract = {Behavioural scientists track animal behaviour patterns through the construction of ethograms which detail the activities of cattle over time. To achieve this, scientists currently view video footage from multiple cameras located in and around a pen, which houses the animals, to extract their location and determine their activity. This is a time consuming, laborious task, which could be automated. In this paper we extend the well-known Real-Time Compressive Tracking algorithm to automatically determine the location of dairy and beef cows from multiple video cameras in the pen. Several optimisations are introduced to improve algorithm accuracy. An automatic approach for updating the bounding box which discourages the algorithm from learning the background is presented. We also dynamically weight the location estimates from multiple cameras using boosting to avoid errors introduced by occlusion and by the tracked animal moving in and out of the field of view.},\n  keywords = {farming;feature extraction;image processing;optimisation;target tracking;video cameras;video signal processing;cattle location tracking;image processing;animal behaviour patterns;ethograms;real-time compressive tracking algorithm;video cameras;Cameras;Detectors;Cows;Feature extraction;Signal processing algorithms;Streaming media;Image and video processing;Object tracking in crowded environments;Cattle localisation},\n  doi = {10.1109/EUSIPCO.2015.7362862},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102933.pdf},\n}\n\n
\n
\n\n\n
\n Behavioural scientists track animal behaviour patterns through the construction of ethograms which detail the activities of cattle over time. To achieve this, scientists currently view video footage from multiple cameras located in and around a pen, which houses the animals, to extract their location and determine their activity. This is a time consuming, laborious task, which could be automated. In this paper we extend the well-known Real-Time Compressive Tracking algorithm to automatically determine the location of dairy and beef cows from multiple video cameras in the pen. Several optimisations are introduced to improve algorithm accuracy. An automatic approach for updating the bounding box which discourages the algorithm from learning the background is presented. We also dynamically weight the location estimates from multiple cameras using boosting to avoid errors introduced by occlusion and by the tracked animal moving in and out of the field of view.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Chromatographic signal processing for PAH in methanol solution.\n \n \n \n \n\n\n \n Bertholon, F.; Harant, O.; Foan, L.; Vignoud, S.; Jutten, C.; and Grangeat, P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2641-2645, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ChromatographicPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362863,\n  author = {F. Bertholon and O. Harant and L. Foan and S. Vignoud and C. Jutten and P. Grangeat},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Chromatographic signal processing for PAH in methanol solution},\n  year = {2015},\n  pages = {2641-2645},\n  abstract = {In this paper we describe two methods to estimate the concentration of polycyclic aromatic hydrocarbons (PAHs) in a methanol solution, from a gas chromatography analysis. We present an innovative stochastic forward model based on a molecular random walk. To infer on PAHs concentration profiles, we use two inversion methods. The first one is a Bayesian estimator using a MCMC algorithm and Gibbs sampling. The second one is a sparse representation method with non-negativity constraint on the mixture vector based on the decomposition of the signal on a dictionary of chromatographic impulse response functions as defined by the forward model. Some results provided by those two methods are finally shown with a comparison of the computational and the quantification performances.},\n  keywords = {Bayes methods;chemical engineering computing;chromatography;Markov processes;Monte Carlo methods;organic compounds;signal representation;transient response;chromatographic signal processing;methanol solution;polycyclic aromatic hydrocarbon;gas chromatography analysis;innovative stochastic forward model;inversion method;Bayesian estimator;MCMC algorithm;Gibbs sampling;sparse representation method;nonnegativity constraint;signal decomposition;chromatographic impulse response function;Monte Carlo Markov chain;Signal processing algorithms;Bayes methods;Dictionaries;Computational modeling;Signal processing;Europe;Markov processes;Gas chromatography;Bayesian estimation;Monte Carlo Markov Chain (MCMC);Sparse Representation;Dictionary;FOCUSS Algorithm},\n  doi = {10.1109/EUSIPCO.2015.7362863},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104761.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we describe two methods to estimate the concentration of polycyclic aromatic hydrocarbons (PAHs) in a methanol solution, from a gas chromatography analysis. We present an innovative stochastic forward model based on a molecular random walk. To infer on PAHs concentration profiles, we use two inversion methods. The first one is a Bayesian estimator using a MCMC algorithm and Gibbs sampling. The second one is a sparse representation method with non-negativity constraint on the mixture vector based on the decomposition of the signal on a dictionary of chromatographic impulse response functions as defined by the forward model. Some results provided by those two methods are finally shown with a comparison of the computational and the quantification performances.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n CARMA: A robust motion artifact reduction algorithm for heart rate monitoring from PPG signals.\n \n \n \n \n\n\n \n Bacà, A.; Biagetti, G.; Camilletti, M.; Crippa, P.; Falaschetti, L.; Orcioni, S.; Rossini, L.; Tonelli, D.; and Turchetti, C.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2646-2650, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"CARMA:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362864,\n  author = {A. {Bacà} and G. Biagetti and M. Camilletti and P. Crippa and L. Falaschetti and S. Orcioni and L. Rossini and D. Tonelli and C. Turchetti},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {CARMA: A robust motion artifact reduction algorithm for heart rate monitoring from PPG signals},\n  year = {2015},\n  pages = {2646-2650},\n  abstract = {Photoplethysmography (PPG) is a non invasive measurement of the blood flow, that can be used instead of electrocardiography to estimate heart rate (HR). Most existing techniques used for HR monitoring in fitness with PPG focus on slowly running alone, while those suitable for intensive physical exercise need an initialization stage in which wearers are required to stand still for several seconds. This paper present a novel algorithm for HR estimation from PPG signal based on motion artifact removal (MAR) and adaptive tracking (AT) that overcomes limitations of the previous techniques. Experimental evaluations performed on datasets recorded from several subjects during running show an average absolute error of HR estimation of 2.26 beats per minute, demonstrating the validity of the presented technique to monitor HR using wearable devices which use PPG signals.},\n  keywords = {blood flow measurement;body sensor networks;gait analysis;medical signal processing;photoplethysmography;signal denoising;CARMA;motion artifact reduction algorithm;heart rate monitoring;PPG signals;photoplethysmography;blood flow measurement;electrocardiography;HR monitoring;intensive physical exercise;initialization stage;HR estimation;motion artifact removal;MAR;adaptive tracking;AT;running;wearable devices;Heart rate;Signal processing algorithms;Monitoring;Tracking;Frequency estimation;Accelerometers;Heart rate monitoring;photoplethysmography (PPG);motion artifact;SVD decomposition},\n  doi = {10.1109/EUSIPCO.2015.7362864},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096797.pdf},\n}\n\n
\n
\n\n\n
\n Photoplethysmography (PPG) is a non invasive measurement of the blood flow, that can be used instead of electrocardiography to estimate heart rate (HR). Most existing techniques used for HR monitoring in fitness with PPG focus on slowly running alone, while those suitable for intensive physical exercise need an initialization stage in which wearers are required to stand still for several seconds. This paper present a novel algorithm for HR estimation from PPG signal based on motion artifact removal (MAR) and adaptive tracking (AT) that overcomes limitations of the previous techniques. Experimental evaluations performed on datasets recorded from several subjects during running show an average absolute error of HR estimation of 2.26 beats per minute, demonstrating the validity of the presented technique to monitor HR using wearable devices which use PPG signals.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Features' selection based on weighted distance minimization, application to biodegradation process evaluation.\n \n \n \n\n\n \n Rammal, A.; Fenniri, H.; Goupil, A.; Chabbert, B.; Bertrand, I.; and Vrabie, V.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2651-2655, Aug 2015. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362865,\n  author = {A. Rammal and H. Fenniri and A. Goupil and B. Chabbert and I. Bertrand and V. Vrabie},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Features' selection based on weighted distance minimization, application to biodegradation process evaluation},\n  year = {2015},\n  pages = {2651-2655},\n  abstract = {Infrared spectroscopy can provide useful information of the biomass composition and has been extensively used in several domains such as biology, food science, pharmaceutical, petrochemical, agricultural applications, etc. However, not all spectral information are valuable for biomarkers construction or for applying regression or classification models and by identifying interesting wavenumbers a better processing and interpretation can be achieved. The selection of optimal subsets has been addressed through several variable or feature selection methods including genetic algorithms. Some of them are not adapted on large data, others require additional information such as concentrations or are difficult to tune. This paper proposes an alternative approach by considering a weighted Euclidean distance. We show on real Mid-infrared spectra that this constrained nonlinear optimizer allows identifying the wavenumbers that best highlights the discrimination within the periods of the biodegradation process of the ligno-cellulosic biomass. These results are compared with previous ones obtained by a genetic algorithm.},\n  keywords = {biotechnology;environmental degradation;feature selection;genetic algorithms;infrared spectroscopy;minimisation;regression analysis;feature selection;weighted distance minimization;biodegradation process evaluation;infrared spectroscopy;biomass composition information;classification model;regression model;genetic algorithm;weighted Euclidean distance;mid infrared spectra;constrained nonlinear optimizer;wavenumber identification;lignocellulosic biomass biodegradation process;Genetic algorithms;Biomass;Biodegradation;Indexes;Biological cells;Degradation;Euclidean distance;Weighted Euclidean distance;feature selection method;genetic algorithm;infrared spectra;biodegradation process;lignocellulosic biomass},\n  doi = {10.1109/EUSIPCO.2015.7362865},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n Infrared spectroscopy can provide useful information of the biomass composition and has been extensively used in several domains such as biology, food science, pharmaceutical, petrochemical, agricultural applications, etc. However, not all spectral information are valuable for biomarkers construction or for applying regression or classification models and by identifying interesting wavenumbers a better processing and interpretation can be achieved. The selection of optimal subsets has been addressed through several variable or feature selection methods including genetic algorithms. Some of them are not adapted on large data, others require additional information such as concentrations or are difficult to tune. This paper proposes an alternative approach by considering a weighted Euclidean distance. We show on real Mid-infrared spectra that this constrained nonlinear optimizer allows identifying the wavenumbers that best highlights the discrimination within the periods of the biodegradation process of the ligno-cellulosic biomass. These results are compared with previous ones obtained by a genetic algorithm.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n On signal denoising by EMD in the frequency domain.\n \n \n \n\n\n \n Ahmed, H. A. B.; Komaty, A.; Dare, D.; and Boudraa, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2656-2660, Aug 2015. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362866,\n  author = {H. A. B. Ahmed and A. Komaty and D. Dare and A. Boudraa},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {On signal denoising by EMD in the frequency domain},\n  year = {2015},\n  pages = {2656-2660},\n  abstract = {In this work a new denoising scheme based on the empirical mode decomposition associated with a frequency analysis is introduced. Compared to classical approaches where the extracted modes are thresholded in time domain, in the proposed strategy the thresholding is done in the frequency domain. Each mode is divided into blocks of equal length where the frequency content of each one is analyzed. Relevant modes are identified using an energy and a frequency thresholds obtained by training. The denoised signal is obtained by the superposition of the thresholded modes. The effectiveness of the proposed scheme is illustrated on synthetic and real signals and the results compared to those of methods reported recently.},\n  keywords = {signal denoising;signal denoising;EMD;frequency domain;denoising scheme;empirical mode decomposition;frequency analysis;time domain;frequency domain;frequency thresholds;denoised signal;thresholded modes;Doppler effect;Noise reduction;Frequency-domain analysis;Noise measurement;Bandwidth;Signal processing;Europe;Empirical Mode Decomposition;Intrinsic Mode Function;Denoising},\n  doi = {10.1109/EUSIPCO.2015.7362866},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n In this work a new denoising scheme based on the empirical mode decomposition associated with a frequency analysis is introduced. Compared to classical approaches where the extracted modes are thresholded in time domain, in the proposed strategy the thresholding is done in the frequency domain. Each mode is divided into blocks of equal length where the frequency content of each one is analyzed. Relevant modes are identified using an energy and a frequency thresholds obtained by training. The denoised signal is obtained by the superposition of the thresholded modes. The effectiveness of the proposed scheme is illustrated on synthetic and real signals and the results compared to those of methods reported recently.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n An efficient K-SCA based unerdetermined channel identification algorithm for online applications.\n \n \n \n\n\n \n Eqlimi, E.; and Makkiabadi, B.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2661-2665, Aug 2015. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362867,\n  author = {E. Eqlimi and B. Makkiabadi},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {An efficient K-SCA based unerdetermined channel identification algorithm for online applications},\n  year = {2015},\n  pages = {2661-2665},\n  abstract = {In a sparse component analysis problem, under some non-strict conditions on sparsity of the sources, called k-SCA, we are able to estimate both mixing system (A) and sparse sources (5) uniquely. Based on k-SCA assumptions, if each column of source matrix has at most Nx-1 nonzero component, where Nx is the number of sensors, observed signal lies on a hyperplane spanned by active columns of the mixing matrix. Here, we propose an efficient algorithm to recover the mixing matrix under k-SCA assumptions. Compared to the current approaches, the proposed method has advantages in two aspects. It is able to reject the outliers within subspace estimation process also detect the number of existing subspaces automatically. Furthermore, to accelerate the process, we integrate the {"}subspaces clustering{"} and {"}channel clustering{"} stages in an online scenario to estimate the mixing matrix columns as the mixture vectors are received sequentially.},\n  keywords = {array signal processing;channel estimation;compressed sensing;estimation theory;sensor arrays;vectors;unerdetermined channel identification algorithm;online applications;sparse component analysis problem;mixing system;sparse sources;K-SCA assumptions;source matrix;subspace estimation process;subspaces clustering;channel clustering;mixing matrix columns;mixture vectors;5G mobile communication;Europe;Signal processing;Conferences;Underdetermined Blind Identification;Sparse Component Analysis (SCA);k-SCA and Subspace Clustering},\n  doi = {10.1109/EUSIPCO.2015.7362867},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n In a sparse component analysis problem, under some non-strict conditions on sparsity of the sources, called k-SCA, we are able to estimate both mixing system (A) and sparse sources (5) uniquely. Based on k-SCA assumptions, if each column of source matrix has at most Nx-1 nonzero component, where Nx is the number of sensors, observed signal lies on a hyperplane spanned by active columns of the mixing matrix. Here, we propose an efficient algorithm to recover the mixing matrix under k-SCA assumptions. Compared to the current approaches, the proposed method has advantages in two aspects. It is able to reject the outliers within subspace estimation process also detect the number of existing subspaces automatically. Furthermore, to accelerate the process, we integrate the \"subspaces clustering\" and \"channel clustering\" stages in an online scenario to estimate the mixing matrix columns as the mixture vectors are received sequentially.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A new method for heart rate monitoring during physical exercise using photoplethysmographic signals.\n \n \n \n \n\n\n \n Schäck, T.; Sledz, C.; Muma, M.; and Zoubir, A. M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2666-2670, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362868,\n  author = {T. Schäck and C. Sledz and M. Muma and A. M. Zoubir},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A new method for heart rate monitoring during physical exercise using photoplethysmographic signals},\n  year = {2015},\n  pages = {2666-2670},\n  abstract = {Accurate and reliable estimation of the heart rate using wearable devices, especially during physical exercise, must deal with noisy signals that contain motion artifacts. We present an approach that is based on photoplethysmographic (PPG) signals which are measured with two wrist-type pulse oximeters. The heart rate is related to intensity changes of the reflected light. Our proposed method suppresses the motion artifacts by adaptively estimating the transfer functions of each of the three-axis acceleration signals that produce the artifacts in the PPG signals. We combined the output of the six adaptive filters into a single enhanced time-frequency domain signal based on which we track the heart rate with a high accuracy. Our approach is real-time capable, computationally efficient and real data results for a benchmark data set illustrate the superior performance compared to a recently proposed approach.},\n  keywords = {adaptive filters;body sensor networks;medical signal processing;oximetry;patient monitoring;photoplethysmography;signal denoising;time-domain analysis;heart rate monitoring;physical exercise;photoplethysmographic signals;wearable devices;noisy signals;motion artifacts;wrist-type pulse oximeters;reflected light;transfer functions;three-axis acceleration signals;PPG signals;adaptive filters;time-frequency domain signal;Heart rate;Acceleration;Monitoring;Spectrogram;Biomedical monitoring;Photoplethysmography (PPG);Heart Rate Monitoring;Adaptive Filters;Accelerometer;Time-Frequency;Noise Reduction;Motion Artifacts},\n  doi = {10.1109/EUSIPCO.2015.7362868},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102009.pdf},\n}\n\n
\n
\n\n\n
\n Accurate and reliable estimation of the heart rate using wearable devices, especially during physical exercise, must deal with noisy signals that contain motion artifacts. We present an approach that is based on photoplethysmographic (PPG) signals which are measured with two wrist-type pulse oximeters. The heart rate is related to intensity changes of the reflected light. Our proposed method suppresses the motion artifacts by adaptively estimating the transfer functions of each of the three-axis acceleration signals that produce the artifacts in the PPG signals. We combined the output of the six adaptive filters into a single enhanced time-frequency domain signal based on which we track the heart rate with a high accuracy. Our approach is real-time capable, computationally efficient and real data results for a benchmark data set illustrate the superior performance compared to a recently proposed approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multistage generalized adaptive notch filter with improved accuracy.\n \n \n \n \n\n\n \n Meller, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2671-2675, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"MultistagePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362869,\n  author = {M. Meller},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Multistage generalized adaptive notch filter with improved accuracy},\n  year = {2015},\n  pages = {2671-2675},\n  abstract = {Generalized adaptive notch filters (GANFs) are estimators of coefficients of quasi-periodically time-varying systems. Current state of the art GANFs can deliver highly accurate estimates of system variations' frequency, but underperform in terms of accuracy of the coefficient estimates. The paper proposes a novel multistage GANF with accuracy improved in this aspect. The processing pipeline consists of three stages. The preliminary (pilot) frequency estimates are obtained first, then treated with a specially designed linear ilter and used to guide the coefficient tracking GANF, which works out the estimates of system coefficients. The proposed solution has considerably better performance than a single stage GANF or a simple two-stage approach consisting of the pilot frequency estimator and the amplitude tracking GANF only.},\n  keywords = {adaptive filters;frequency estimation;notch filters;multistage generalized adaptive notch filter;time-varying systems;linear filter;frequency estimator;amplitude tracking;Frequency estimation;Transfer functions;Europe;Yttrium;Signal processing algorithms;Estimation;generalized adaptive notch filters;estimation algorithms;adaptive signal processing},\n  doi = {10.1109/EUSIPCO.2015.7362869},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570092901.pdf},\n}\n\n
\n
\n\n\n
\n Generalized adaptive notch filters (GANFs) are estimators of coefficients of quasi-periodically time-varying systems. Current state of the art GANFs can deliver highly accurate estimates of system variations' frequency, but underperform in terms of accuracy of the coefficient estimates. The paper proposes a novel multistage GANF with accuracy improved in this aspect. The processing pipeline consists of three stages. The preliminary (pilot) frequency estimates are obtained first, then treated with a specially designed linear ilter and used to guide the coefficient tracking GANF, which works out the estimates of system coefficients. The proposed solution has considerably better performance than a single stage GANF or a simple two-stage approach consisting of the pilot frequency estimator and the amplitude tracking GANF only.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Instantaneous frequency estimation for a sinusoidal signal combining DESA-2 and notch filter.\n \n \n \n \n\n\n \n Sugiura, Y.; Usukura, K.; and Aikawa, N.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2676-2680, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"InstantaneousPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362870,\n  author = {Y. Sugiura and K. Usukura and N. Aikawa},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Instantaneous frequency estimation for a sinusoidal signal combining DESA-2 and notch filter},\n  year = {2015},\n  pages = {2676-2680},\n  abstract = {In this paper, we propose a new frequency estimation method for a sinusoidally modulated signal. The proposed method is constructed by three processing; the noise reduction, the frequency estimation, the updating of the noise reduction filter. In the noise reduction, we utilize an adaptive IIR bandpass filter which can extract only a specific frequency components. In the frequency estimation, we employ DESA-2, which is a non-linear operator with low computational complexity. The extraction frequency of the bandpass filter is effectively updated by integrating the estimation results of DESA-2 and the gradient estimation method. Through the estimation simulation for the noisy fluctuate sinusoidal signal, we show that the proposed method can estimate the instantaneous frequency accurately.},\n  keywords = {adaptive filters;band-pass filters;computational complexity;frequency estimation;gradient methods;IIR filters;notch filters;instantaneous frequency estimation;notch filter;noise reduction;adaptive IIR bandpass filter;nonlinear operator;computational complexity;gradient estimation method;DESA-2;noisy fluctuating sinusoidal signal;Frequency estimation;Estimation;Signal to noise ratio;Noise reduction;Convergence;Europe;Frequency Estimation;Sinusoidal Signal;Bandpass Filter},\n  doi = {10.1109/EUSIPCO.2015.7362870},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104841.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a new frequency estimation method for a sinusoidally modulated signal. The proposed method is constructed by three processing; the noise reduction, the frequency estimation, the updating of the noise reduction filter. In the noise reduction, we utilize an adaptive IIR bandpass filter which can extract only a specific frequency components. In the frequency estimation, we employ DESA-2, which is a non-linear operator with low computational complexity. The extraction frequency of the bandpass filter is effectively updated by integrating the estimation results of DESA-2 and the gradient estimation method. Through the estimation simulation for the noisy fluctuate sinusoidal signal, we show that the proposed method can estimate the instantaneous frequency accurately.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Stability of adaptive filters with linearly interfering update errors.\n \n \n \n \n\n\n \n Dallinger, R.; and Rupp, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2681-2685, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"StabilityPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362871,\n  author = {R. Dallinger and M. Rupp},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Stability of adaptive filters with linearly interfering update errors},\n  year = {2015},\n  pages = {2681-2685},\n  abstract = {We provide a time-domain analysis of the stability for two adaptive algorithms of gradient type that interfere with each other via their update errors. Such coupling can occur naturally as well as by desire of the designer. Especially, system identification algorithms that combine two adaptive schemes can often be described by such a structure. We derive precise statements on local contracting/expanding behaviour that in turn allow to deduce bounds ensuring Lyapunov stability. The application of our findings to a specific example shows how these bounds are obtained and how they outperform our previous results that were based on the small gain theorem.},\n  keywords = {adaptive filters;Lyapunov methods;stability;time-domain analysis;adaptive filter;linearly interfering update errors;time-domain analysis;system identification algorithms;adaptive schemes;local contracting-expanding behaviour;Lyapunov stability;Couplings;Stability criteria;Convergence;Signal processing algorithms;Europe;Signal processing;gradient type algorithms;system identification;Lyapunov stability;convergence;contraction mapping},\n  doi = {10.1109/EUSIPCO.2015.7362871},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096989.pdf},\n}\n\n
\n
\n\n\n
\n We provide a time-domain analysis of the stability for two adaptive algorithms of gradient type that interfere with each other via their update errors. Such coupling can occur naturally as well as by desire of the designer. Especially, system identification algorithms that combine two adaptive schemes can often be described by such a structure. We derive precise statements on local contracting/expanding behaviour that in turn allow to deduce bounds ensuring Lyapunov stability. The application of our findings to a specific example shows how these bounds are obtained and how they outperform our previous results that were based on the small gain theorem.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Implementation method of kernel adaptive filter as an add-on for a linear adaptive filter.\n \n \n \n \n\n\n \n Nishikawa, K.; and Albu, F.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2686-2690, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ImplementationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362872,\n  author = {K. Nishikawa and F. Albu},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Implementation method of kernel adaptive filter as an add-on for a linear adaptive filter},\n  year = {2015},\n  pages = {2686-2690},\n  abstract = {In this paper, we propose a novel structure for implementing a kernel adaptive filter as an add-on component for a linear adaptive filter. The kernel adaptive filter has been proposed as a solution to non-linear adaptive problems and their effectiveness has been demonstrated. However, it is not intended for replacing the linear adaptive filters at all, rather, we expect it to complement the performance of linear ones in nonlinear environments. We, therefore, consider a novel structure which enables us to implement a kernel adaptive filter as an add-on for a linear adaptive filter. The proposed structure performs as a linear adaptive filter in the linear-dominant environments, however, in non-linear environments, we can add a kernel adaptive filter without any modification on the operation of the linear one. The effectiveness of the proposed method is confirmed through the computer simulations.},\n  keywords = {adaptive filters;nonlinear filters;Kernel adaptive filter implementation method;linear adaptive filter;add-on component;nonlinear adaptive problem;Kernel;Signal processing algorithms;Convergence;Signal processing;Dictionaries;Mathematical model;Europe;Kernel adaptive filter;non-linear system identification;normalized LMS algorithm},\n  doi = {10.1109/EUSIPCO.2015.7362872},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570097227.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a novel structure for implementing a kernel adaptive filter as an add-on component for a linear adaptive filter. The kernel adaptive filter has been proposed as a solution to non-linear adaptive problems and their effectiveness has been demonstrated. However, it is not intended for replacing the linear adaptive filters at all, rather, we expect it to complement the performance of linear ones in nonlinear environments. We, therefore, consider a novel structure which enables us to implement a kernel adaptive filter as an add-on for a linear adaptive filter. The proposed structure performs as a linear adaptive filter in the linear-dominant environments, however, in non-linear environments, we can add a kernel adaptive filter without any modification on the operation of the linear one. The effectiveness of the proposed method is confirmed through the computer simulations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On the convergence, steady-state, and tracking analysis of the SRLMMN algorithm.\n \n \n \n \n\n\n \n Faiz, M. M. U.; and Zerguine, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2691-2695, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362873,\n  author = {M. M. U. Faiz and A. Zerguine},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {On the convergence, steady-state, and tracking analysis of the SRLMMN algorithm},\n  year = {2015},\n  pages = {2691-2695},\n  abstract = {In this work, a novel algorithm named sign regressor least mean mixed-norm (SRLMMN) algorithm is proposed as an alternative to the well-known least mean mixed-norm (LMMN) algorithm. The SRLMMN algorithm is a hybrid version of the sign regressor least mean square (SRLMS) and sign regressor least mean fourth (SRLMF) algorithms. Analytical expressions are derived to describe the convergence, steady-state, and tracking behavior of the proposed SRLMMN algorithm. To validate our theoretical findings, a system identification problem is considered for this purpose. It is shown that there is a very close correspondence between theory and simulation. Finally, it is also shown that the SRLMMN algorithm is robust enough in tracking the variations in the channel.},\n  keywords = {adaptive filters;convergence of numerical methods;least mean squares methods;regression analysis;tracking;SRLMMN algorithm convergence;steady-state analysis;tracking analysis;sign regressor least mean mixed norm algorithm;sign regressor least mean square algorithm;hybrid SRLMS-SRLMF algorithm;sign regressor least mean fourth algorithm;Signal processing algorithms;Steady-state;Algorithm design and analysis;Convergence;Mathematical model;Europe;Signal processing;LMS;LMF;LMMN;SRLMS;SRLMF;SRLMMN;sign regressor;mixed-norm;convergence;steady-state;tracking},\n  doi = {10.1109/EUSIPCO.2015.7362873},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104791.pdf},\n}\n\n
\n
\n\n\n
\n In this work, a novel algorithm named sign regressor least mean mixed-norm (SRLMMN) algorithm is proposed as an alternative to the well-known least mean mixed-norm (LMMN) algorithm. The SRLMMN algorithm is a hybrid version of the sign regressor least mean square (SRLMS) and sign regressor least mean fourth (SRLMF) algorithms. Analytical expressions are derived to describe the convergence, steady-state, and tracking behavior of the proposed SRLMMN algorithm. To validate our theoretical findings, a system identification problem is considered for this purpose. It is shown that there is a very close correspondence between theory and simulation. Finally, it is also shown that the SRLMMN algorithm is robust enough in tracking the variations in the channel.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Decentralized clustering over adaptive networks.\n \n \n \n \n\n\n \n Khawatmi, S.; Zoubir, A. M.; and Sayed, A. H.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2696-2700, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"DecentralizedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362874,\n  author = {S. Khawatmi and A. M. Zoubir and A. H. Sayed},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Decentralized clustering over adaptive networks},\n  year = {2015},\n  pages = {2696-2700},\n  abstract = {Cooperation among agents across the network leads to better estimation accuracy. However, in many network applications the agents infer and track different models of interest in an environment where agents do not know beforehand which models are being observed by their neighbors. In this work, we propose an adaptive and distributed clustering technique that allows agents to learn and form clusters from streaming data in a robust manner. Once clusters are formed, cooperation among agents with similar objectives then enhances the performance of the inference task. The performance of the proposed clustering algorithm is discussed by commenting on the behavior of probabilities of erroneous decision. We validate the performance of the algorithm by numerical simulations, that show how the clustering process enhances the mean-square-error performance of the agents across the net work.},\n  keywords = {error statistics;mean square error methods;network theory (graphs);pattern clustering;signal processing;decentralized clustering;cooperation adaptive network;distributed clustering technique;adaptive clustering technique;erroneous decision probabilities;numerical simulation;mean-square-error performance;Data models;Adaptation models;Signal processing;Europe;Clustering algorithms;Adaptive systems;Estimation;Decentralized clustering;multitask net works;self-organization;diffusion adaptation;adaptive net works},\n  doi = {10.1109/EUSIPCO.2015.7362874},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570101987.pdf},\n}\n\n
\n
\n\n\n
\n Cooperation among agents across the network leads to better estimation accuracy. However, in many network applications the agents infer and track different models of interest in an environment where agents do not know beforehand which models are being observed by their neighbors. In this work, we propose an adaptive and distributed clustering technique that allows agents to learn and form clusters from streaming data in a robust manner. Once clusters are formed, cooperation among agents with similar objectives then enhances the performance of the inference task. The performance of the proposed clustering algorithm is discussed by commenting on the behavior of probabilities of erroneous decision. We validate the performance of the algorithm by numerical simulations, that show how the clustering process enhances the mean-square-error performance of the agents across the net work.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Event-triggered real-time metering in smart grids.\n \n \n \n \n\n\n \n Werner, S.; and Lunden, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2701-2705, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Event-triggeredPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362875,\n  author = {S. Werner and J. Lunden},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Event-triggered real-time metering in smart grids},\n  year = {2015},\n  pages = {2701-2705},\n  abstract = {This paper introduces an event-triggered approach to realtime metering that significantly reduces the amount of reported measurements. The proposed method reports measurements only when the load profile changes due to a triggering event, and ensures that the maximum difference between reported and true measurement values is always bounded in magnitude. Moreover, it employs a change detection based filter to resolve the estimation-tracking conflict associated with non-stationary signals containing abrupt changes. Simulation results show that the proposed reporting method provides a convenient tradeoff between average reporting error as well as the reporting frequency.},\n  keywords = {power system measurement;smart meters;smart power grids;event-triggered real-time metering;smart grids;load profile;triggering event;change detection based filter;estimation-tracking conflict;nonstationary signals;reporting frequency;reporting error;Home appliances;Signal processing;Real-time systems;Atmospheric measurements;Particle measurements;Market research;Europe;Change detection;load profile;event-triggered communication;smart grid;smart metering},\n  doi = {10.1109/EUSIPCO.2015.7362875},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570092727.pdf},\n}\n\n
\n
\n\n\n
\n This paper introduces an event-triggered approach to realtime metering that significantly reduces the amount of reported measurements. The proposed method reports measurements only when the load profile changes due to a triggering event, and ensures that the maximum difference between reported and true measurement values is always bounded in magnitude. Moreover, it employs a change detection based filter to resolve the estimation-tracking conflict associated with non-stationary signals containing abrupt changes. Simulation results show that the proposed reporting method provides a convenient tradeoff between average reporting error as well as the reporting frequency.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Adaptive linear prediction filters based on maximum a posteriori estimation.\n \n \n \n \n\n\n \n Andersen, K. T.; van Waterschoot , T.; and Moonen, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2706-2710, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AdaptivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362876,\n  author = {K. T. Andersen and T. {van Waterschoot} and M. Moonen},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Adaptive linear prediction filters based on maximum a posteriori estimation},\n  year = {2015},\n  pages = {2706-2710},\n  abstract = {In this paper, we develop adaptive linear prediction filters in the framework of maximum a posteriori (MAP) estimation. It is shown how priors can be used to regularize the solution and references to known algorithms are made. The adaptive filters are suitable for implementation in real-time and by simulation with an adaptive line enhancer (ALE), it is shown how the parameters of the estimation problem affect the convergence of the adaptive filter. The adaptive line enhancer (ALE) is a widely used adaptive filter to separate periodic signals from additive background noise where it has traditionally been implemented using the least-mean-square (LMS) or recursive-least-square (RLS) filter. The derived algorithms can generally be used in any adaptive filter application with a desired target signal.},\n  keywords = {adaptive filters;convergence of numerical methods;least mean squares methods;linear phase filters;maximum likelihood estimation;prediction theory;recursive estimation;recursive filters;source separation;adaptive linear prediction filter convergence;maximum a posteriori estimation;adaptive line enhancer;MAP estimation problem;periodic signal separation;additive background noise;least-mean-square filter;recursive-least-square filter;RLS filter;LMS filter;Gaussian distribution;Covariance matrices;Optimization;Estimation;Prediction algorithms;Signal processing algorithms;Europe;Maximum a posteriori;adaptive filters;linear prediction;regularization;adaptive line enhancer},\n  doi = {10.1109/EUSIPCO.2015.7362876},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104721.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we develop adaptive linear prediction filters in the framework of maximum a posteriori (MAP) estimation. It is shown how priors can be used to regularize the solution and references to known algorithms are made. The adaptive filters are suitable for implementation in real-time and by simulation with an adaptive line enhancer (ALE), it is shown how the parameters of the estimation problem affect the convergence of the adaptive filter. The adaptive line enhancer (ALE) is a widely used adaptive filter to separate periodic signals from additive background noise where it has traditionally been implemented using the least-mean-square (LMS) or recursive-least-square (RLS) filter. The derived algorithms can generally be used in any adaptive filter application with a desired target signal.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Approximate Bayesian filtering using stabilized forgetting.\n \n \n \n \n\n\n \n Azizi, S.; and Quinn, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2711-2715, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ApproximatePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362877,\n  author = {S. Azizi and A. Quinn},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Approximate Bayesian filtering using stabilized forgetting},\n  year = {2015},\n  pages = {2711-2715},\n  abstract = {In this paper, we relax the modeling assumptions under which Bayesian filtering is tractable. In order to restore tractability, we adopt the stabilizing forgetting (SF) operator, which replaces the explicit time evolution model of Bayesian filtering. The principal contribution of the paper is to define a rich class of conditional observation models for which recursive, invariant, finite-dimensional statistics result from SF-based Bayesian filtering. We specialize the result to the mixture Kalman filter, verifying that the exact solution is available in this case. This allows us to consider the quality of the SF-based approximate solution. Finally, we assess SF-based tracking of the time-varying rate parameter (state) in data modelled as a mixture of exponential components.},\n  keywords = {Bayes methods;Kalman filters;stability;time-varying filters;approximate Bayesian filtering;stabilizing forgetting operator;time evolution model;finite-dimensional statistics result;mixture Kalman filter;time-varying rate parameter;Approximation methods;Computational modeling;Bayes methods;Kalman filters;Europe;Indexes;Approximate Bayesian filtering;stabilized forgetting;exponential family;mixture Kalman filter;exponential mixture},\n  doi = {10.1109/EUSIPCO.2015.7362877},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105243.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we relax the modeling assumptions under which Bayesian filtering is tractable. In order to restore tractability, we adopt the stabilizing forgetting (SF) operator, which replaces the explicit time evolution model of Bayesian filtering. The principal contribution of the paper is to define a rich class of conditional observation models for which recursive, invariant, finite-dimensional statistics result from SF-based Bayesian filtering. We specialize the result to the mixture Kalman filter, verifying that the exact solution is available in this case. This allows us to consider the quality of the SF-based approximate solution. Finally, we assess SF-based tracking of the time-varying rate parameter (state) in data modelled as a mixture of exponential components.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Normalized recursive least adaptive threshold nonlinear errors algorithm.\n \n \n \n \n\n\n \n Koike, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2716-2720, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"NormalizedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362878,\n  author = {S. Koike},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Normalized recursive least adaptive threshold nonlinear errors algorithm},\n  year = {2015},\n  pages = {2716-2720},\n  abstract = {This paper proposes a new adaptation algorithm named Normalized Recursive Least Adaptive Threshold Nonlinear Errors (NRLATNE) algorithm for complex-domain adaptive filters which makes the filters fast convergent for correlated filter inputs and robust against two types of impulse noise: one is found in additive observation noise and another at filter input. Analysis of the proposed NRLATNE algorithm is fully developed to theoretically calculate filter convergence behavior. Through experiments with some examples, we demonstrate the effectiveness of the proposed algorithm in improving the filter performance. Good agreement is observed between simulated and theoretically calculated filter convergence that shows the validity of the analysis.},\n  keywords = {adaptive filters;impulse noise;additive observation noise;impulse noise;correlated filter input;complex-domain adaptive filter;NRLATNE algorithm;normalized recursive least adaptive threshold nonlinear error algorithm;Adaptive filters;Signal processing algorithms;Filtering algorithms;Algorithm design and analysis;Convergence;Mathematical model;Robustness;Adaptive filter;recursive least squares;impulse noise;adaptive threshold;nonlinear error;normalization},\n  doi = {10.1109/EUSIPCO.2015.7362878},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570091835.pdf},\n}\n\n
\n
\n\n\n
\n This paper proposes a new adaptation algorithm named Normalized Recursive Least Adaptive Threshold Nonlinear Errors (NRLATNE) algorithm for complex-domain adaptive filters which makes the filters fast convergent for correlated filter inputs and robust against two types of impulse noise: one is found in additive observation noise and another at filter input. Analysis of the proposed NRLATNE algorithm is fully developed to theoretically calculate filter convergence behavior. Through experiments with some examples, we demonstrate the effectiveness of the proposed algorithm in improving the filter performance. Good agreement is observed between simulated and theoretically calculated filter convergence that shows the validity of the analysis.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Averaging covariance matrices for EEG signal classification based on the CSP: An empirical study.\n \n \n \n \n\n\n \n Yger, F.; Lotte, F.; and Sugiyama, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2721-2725, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AveragingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362879,\n  author = {F. Yger and F. Lotte and M. Sugiyama},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Averaging covariance matrices for EEG signal classification based on the CSP: An empirical study},\n  year = {2015},\n  pages = {2721-2725},\n  abstract = {This paper presents an empirical comparison of covariance matrix averaging methods for EEG signal classification. Indeed, averaging EEG signal covariance matrices is a key step in designing brain-computer interfaces (BCI) based on the popular common spatial pattern (CSP) algorithm. BCI paradigms are typically structured into trials and we argue that this structure should be taken into account. Moreover, the non-Euclidean structure of covariance matrices should be taken into consideration as well. We review several approaches from the literature for averaging covariance matrices in CSP and compare them empirically on three publicly available datasets. Our results show that using Riemannian geometry for averaging covariance matrices improves performances for small dimensional problems, but also the limits of this approach when the dimensionality increases.},\n  keywords = {brain-computer interfaces;covariance matrices;electroencephalography;medical signal processing;signal classification;EEG signal classification;CSP;empirical comparison;covariance matrix averaging methods;averaging EEG signal covariance matrices;brain-computer interfaces;BCI;common spatial pattern algorithm;nonEuclidean structure;Riemannian geometry;small dimensional problems;Covariance matrices;Electroencephalography;Geometry;Symmetric matrices;Feature extraction;Europe;Signal processing;common spatial pattern;SPD matrices;robust averaging;Riemannian geometry;EEG signal classification;brain-computer interface (BCI)},\n  doi = {10.1109/EUSIPCO.2015.7362879},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102435.pdf},\n}\n\n
\n
\n\n\n
\n This paper presents an empirical comparison of covariance matrix averaging methods for EEG signal classification. Indeed, averaging EEG signal covariance matrices is a key step in designing brain-computer interfaces (BCI) based on the popular common spatial pattern (CSP) algorithm. BCI paradigms are typically structured into trials and we argue that this structure should be taken into account. Moreover, the non-Euclidean structure of covariance matrices should be taken into consideration as well. We review several approaches from the literature for averaging covariance matrices in CSP and compare them empirically on three publicly available datasets. Our results show that using Riemannian geometry for averaging covariance matrices improves performances for small dimensional problems, but also the limits of this approach when the dimensionality increases.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Operationalization of conceptual imagery for BCIs.\n \n \n \n \n\n\n \n Kosmyna, N.; Tarpin-Bernard, F.; and Rivet, B.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2726-2730, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"OperationalizationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362880,\n  author = {N. Kosmyna and F. Tarpin-Bernard and B. Rivet},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Operationalization of conceptual imagery for BCIs},\n  year = {2015},\n  pages = {2726-2730},\n  abstract = {We present a Brain Computer Interface (BCI) system in an asynchronous setting that allows classifying objects in their semantic categories (e.g. a hammer is a tool). For training, we use visual cues that are representative of the concepts (e.g. a hammer image for the concept of hammer). We evaluate the system in an offline synchronous setting and in an online asynchronous setting. We consider two scenarios: the first one, where concepts are in close semantic families (10 subjects) and the second where concepts are from distinctly different categories (10 subjects). We find that both have classification accuracies of 70% and above, although more distant conceptual categories lead to 5% more in classification accuracy.},\n  keywords = {brain-computer interfaces;image classification;online asynchronous setting;offline synchronous setting;brain computer interface system;BCI;conceptual imagery;Semantics;Visualization;Training;Electrodes;Electroencephalography;Signal processing;Calibration;Brain Computer Interfaces;Conceptual Imagery},\n  doi = {10.1109/EUSIPCO.2015.7362880},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103701.pdf},\n}\n\n
\n
\n\n\n
\n We present a Brain Computer Interface (BCI) system in an asynchronous setting that allows classifying objects in their semantic categories (e.g. a hammer is a tool). For training, we use visual cues that are representative of the concepts (e.g. a hammer image for the concept of hammer). We evaluate the system in an offline synchronous setting and in an online asynchronous setting. We consider two scenarios: the first one, where concepts are in close semantic families (10 subjects) and the second where concepts are from distinctly different categories (10 subjects). We find that both have classification accuracies of 70% and above, although more distant conceptual categories lead to 5% more in classification accuracy.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An ocular artefacts correction method for discriminative EEG analysis based on logistic regression.\n \n \n \n \n\n\n \n Li, X.; Guan, C.; Aug, K. K.; Wang, C.; Chin, Z. Y.; Zhang, H.; Lim, C. G.; and Lee, T. S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2731-2735, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362881,\n  author = {X. Li and C. Guan and K. K. Aug and C. Wang and Z. Y. Chin and H. Zhang and C. G. Lim and T. S. Lee},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {An ocular artefacts correction method for discriminative EEG analysis based on logistic regression},\n  year = {2015},\n  pages = {2731-2735},\n  abstract = {Electrooculogram (EOG) contamination is a common critical issue in general EEG studies as well as in building highperformance brain computer interfaces (BCI). Existing regression or independent component analysis based artefacts correction methods are usually not applicable when EOG is not available or when there are very few EEG channels. In this paper, we propose a novel ocular artefacts correction method for processing EEG without using dedicated EOG channels. The method constructs estimate of ocular components through artefacts detection in EEG. Then, an optimization based on logistic regression is introduced to remove the components from EEG. Specifically, the optimization ensures that the discriminative information is maintained in the corrected EEG signals. The proposed method is offline evaluated with a large EEG data set containing 68 subjects. Experimental results show that, through the artefacts removal correction by the proposed method, EEG classification accuracy can be improved with statistical significance.},\n  keywords = {brain-computer interfaces;electroencephalography;electro-oculography;independent component analysis;medical signal detection;optimisation;regression analysis;signal classification;signal denoising;ocular artefact correction method;discriminative EEG analysis;logistic regression;electrooculogram contamination;high-performance brain computer interfaces;BCI;independent component analysis based artefact correction methods;EEG channels;dedicated EOG channels;ocular components;artefact detection;optimization;EEG signals;artefact removal correction;EEG classification accuracy;Electroencephalography;Electrooculography;Logistics;Europe;Signal processing;Training;Channel estimation;EEG;ocular artefacts correction;brain computer interface},\n  doi = {10.1109/EUSIPCO.2015.7362881},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103999.pdf},\n}\n\n
\n
\n\n\n
\n Electrooculogram (EOG) contamination is a common critical issue in general EEG studies as well as in building highperformance brain computer interfaces (BCI). Existing regression or independent component analysis based artefacts correction methods are usually not applicable when EOG is not available or when there are very few EEG channels. In this paper, we propose a novel ocular artefacts correction method for processing EEG without using dedicated EOG channels. The method constructs estimate of ocular components through artefacts detection in EEG. Then, an optimization based on logistic regression is introduced to remove the components from EEG. Specifically, the optimization ensures that the discriminative information is maintained in the corrected EEG signals. The proposed method is offline evaluated with a large EEG data set containing 68 subjects. Experimental results show that, through the artefacts removal correction by the proposed method, EEG classification accuracy can be improved with statistical significance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Parallel convolutional-linear neural network for motor imagery classification.\n \n \n \n \n\n\n \n Sakhavi, S.; Guan, C.; and Yan, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2736-2740, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ParallelPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362882,\n  author = {S. Sakhavi and C. Guan and S. Yan},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Parallel convolutional-linear neural network for motor imagery classification},\n  year = {2015},\n  pages = {2736-2740},\n  abstract = {Deep learning, recently, has been successfully applied to image classification, object recognition and speech recognition. However, the benefits of deep learning and accompanying architectures have been largely unknown for BCI applications. In motor imagery-based BCI, an energy-based feature, typically after spatial filtering, is commonly used for classification. Although this feature corresponds to the estimate of event-related synchronization/desynchronization in the brain, it neglects energy dynamics which may contain valuable discriminative information. Because traditional classiication methods, such as SVM, cannot handle this dynamical property, we proposed an architecture that inputs a dynamic energy representation of EEG data and utilizes convolutional neural networks for classification. By combining this network with a static energy network, we saw a significant increase in performance. We evaluated the proposed method and compared with SVM on a multi-class motor imagery dataset (BCI competition dataset IV-2a). Our method outperforms SVM with static energy features significantly (p <; 0.01).},\n  keywords = {brain-computer interfaces;electroencephalography;learning (artificial intelligence);medical signal processing;neural nets;signal classification;synchronisation;parallel convolutional-linear neural network;motor imagery classification;motor imagery-based BCI;energy-based feature;event-related synchronization-desynchronization;energy dynamics;support vector machines;dynamic energy representation;EEG data;static energy network;multiclass motor imagery dataset;BCI competition dataset IV-2a;Computer architecture;Electroencephalography;Convolution;Support vector machines;Feature extraction;Europe;Convolutional Neural Network;Deep Learning;Motor Imagery;Brain-Computer Interface;EEG},\n  doi = {10.1109/EUSIPCO.2015.7362882},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104275.pdf},\n}\n\n
\n
\n\n\n
\n Deep learning, recently, has been successfully applied to image classification, object recognition and speech recognition. However, the benefits of deep learning and accompanying architectures have been largely unknown for BCI applications. In motor imagery-based BCI, an energy-based feature, typically after spatial filtering, is commonly used for classification. Although this feature corresponds to the estimate of event-related synchronization/desynchronization in the brain, it neglects energy dynamics which may contain valuable discriminative information. Because traditional classiication methods, such as SVM, cannot handle this dynamical property, we proposed an architecture that inputs a dynamic energy representation of EEG data and utilizes convolutional neural networks for classification. By combining this network with a static energy network, we saw a significant increase in performance. We evaluated the proposed method and compared with SVM on a multi-class motor imagery dataset (BCI competition dataset IV-2a). Our method outperforms SVM with static energy features significantly (p <; 0.01).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Tackling noise, artifacts and nonstationarity in BCI with robust divergences.\n \n \n \n \n\n\n \n Samek, W.; and Müller, K.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2741-2745, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"TacklingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362883,\n  author = {W. Samek and K. Müller},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Tackling noise, artifacts and nonstationarity in BCI with robust divergences},\n  year = {2015},\n  pages = {2741-2745},\n  abstract = {Although the field of Brain-Computer Interfacing (BCI) has made incredible advances in the last decade, current BCIs are still scarcely used outside laboratories. One reason is the lack of robustness to noise, artifacts and nonstationarity which are intrinsic parts of the recorded brain signal. Furthermore out-of-lab environments imply the presence of external variables that are largely beyond the control of the user, but can severely corrupt signal quality. This paper presents a new generation of robust EEG signal processing approaches based on the information geometric notion of divergence. We show that these divergence-based methods can be used for robust spatial filtering and thus increase the systems' reliability when confronted to, e.g., environmental noise, users' motions or electrode artifacts. Furthermore we extend the divergence-based framework to heavy-tail distributions and investigate the advantages of a joint optimization for robustness and stationarity.},\n  keywords = {brain-computer interfaces;electroencephalography;filtering theory;medical signal processing;signal denoising;BCI noise;BCI artifacts;BCI nonstationarity;robust divergence;brain-computer interface;brain signal;robust EEG signal processing;divergence based methods;robust spatial filtering;Robustness;Electroencephalography;Electrodes;Signal processing;Linear programming;Error analysis;Europe;Brain-Computer Interfacing;Common Spatial Patterns;Nonstationarity;Robustness},\n  doi = {10.1109/EUSIPCO.2015.7362883},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105081.pdf},\n}\n\n
\n
\n\n\n
\n Although the field of Brain-Computer Interfacing (BCI) has made incredible advances in the last decade, current BCIs are still scarcely used outside laboratories. One reason is the lack of robustness to noise, artifacts and nonstationarity which are intrinsic parts of the recorded brain signal. Furthermore out-of-lab environments imply the presence of external variables that are largely beyond the control of the user, but can severely corrupt signal quality. This paper presents a new generation of robust EEG signal processing approaches based on the information geometric notion of divergence. We show that these divergence-based methods can be used for robust spatial filtering and thus increase the systems' reliability when confronted to, e.g., environmental noise, users' motions or electrode artifacts. Furthermore we extend the divergence-based framework to heavy-tail distributions and investigate the advantages of a joint optimization for robustness and stationarity.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Coding and modulation techniques for high spectral efficiency transmission in 5G and satcom.\n \n \n \n \n\n\n \n Kim, H.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2746-2750, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"CodingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362884,\n  author = {H. Kim},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Coding and modulation techniques for high spectral efficiency transmission in 5G and satcom},\n  year = {2015},\n  pages = {2746-2750},\n  abstract = {Achieving high spectral efficiency is the key requirement of 5G and Satcom systems because it provides us with much lower cost per bit. In order to achieve high spectral efficiency, channel coding and modulation are the key part of the physical layer. Basically, high spectral efficiency can be achieved when adopting a high order modulation and low code rate at a high SNR. However, the transmit power is limited in practical wireless communication systems. The high order modulation and low code rate is restrictively used. Thus, the integrated version of 5G and Satcom needs a new type of channel coding scheme. In this paper, we look into 5G requirements and Satcom's role in 5G, review candidate error correction coding schemes for 5G and future Satcom in terms of spectral efficiency, and evaluate the performance of the candidate error correction codes.},\n  keywords = {5G mobile communication;channel coding;error correction codes;modulation;performance evaluation;satellite communication;channel coding technique;modulation technique;high spectral efficiency transmission;5G system;Satcom system;spectral efficiency;transmit power;wireless communication systems;performance evaluation;candidate error correction codes;5G mobile communication;Turbo codes;Modulation;Satellites;Satellite broadcasting;Parity check codes;Spectral efficiency;Error correction codes;Turbo codes;LDPC codes;5G;Satcom;etc},\n  doi = {10.1109/EUSIPCO.2015.7362884},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096695.pdf},\n}\n\n
\n
\n\n\n
\n Achieving high spectral efficiency is the key requirement of 5G and Satcom systems because it provides us with much lower cost per bit. In order to achieve high spectral efficiency, channel coding and modulation are the key part of the physical layer. Basically, high spectral efficiency can be achieved when adopting a high order modulation and low code rate at a high SNR. However, the transmit power is limited in practical wireless communication systems. The high order modulation and low code rate is restrictively used. Thus, the integrated version of 5G and Satcom needs a new type of channel coding scheme. In this paper, we look into 5G requirements and Satcom's role in 5G, review candidate error correction coding schemes for 5G and future Satcom in terms of spectral efficiency, and evaluate the performance of the candidate error correction codes.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robust multibeam satellite systems for underlay licensed shared access.\n \n \n \n \n\n\n \n Vázquez, M. Á.; Pérez-Neira, A.; and Lagunas, M. A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2751-2755, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"RobustPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362885,\n  author = {M. Á. Vázquez and A. Pérez-Neira and M. A. Lagunas},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Robust multibeam satellite systems for underlay licensed shared access},\n  year = {2015},\n  pages = {2751-2755},\n  abstract = {This paper deals with the problem of multibeam satellite pre-coding design under spectrum sharing constraints. These regulation restrictions allow the coexistence with other wireless services such as terrestrial mm-wave wireless local loop systems. This work focuses on the case where the satellite operator can use a certain frequency band whenever the signal power strength is limited over the coverage area. The precoding design is optimized considering this restriction by means of formulating a robust optimization. Numerical results show the trade-off between the achievable rates of the satellite segment and the regulation violation outage.},\n  keywords = {optimisation;precoding;satellite communication;robust multibeam satellite systems;underlay licensed shared access;multibeam satellite precoding design;spectrum sharing constraints;wireless services;terrestrial mm-wave wireless local loop systems;satellite operator;signal power strength;precoding design;robust optimization;satellite segment;regulation violation outage;Satellites;Satellite broadcasting;Interference;Optimization;Robustness;Europe;Uncertainty;Multibeam satellite systems;precoding;spectrum sharing},\n  doi = {10.1109/EUSIPCO.2015.7362885},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096701.pdf},\n}\n\n
\n
\n\n\n
\n This paper deals with the problem of multibeam satellite pre-coding design under spectrum sharing constraints. These regulation restrictions allow the coexistence with other wireless services such as terrestrial mm-wave wireless local loop systems. This work focuses on the case where the satellite operator can use a certain frequency band whenever the signal power strength is limited over the coverage area. The precoding design is optimized considering this restriction by means of formulating a robust optimization. Numerical results show the trade-off between the achievable rates of the satellite segment and the regulation violation outage.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The role of satellites in 5G.\n \n \n \n \n\n\n \n Evans, B.; Onireti, O.; Spathopoulos, T.; and Imran, M. A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2756-2760, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362886,\n  author = {B. Evans and O. Onireti and T. Spathopoulos and M. A. Imran},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {The role of satellites in 5G},\n  year = {2015},\n  pages = {2756-2760},\n  abstract = {The next generation of mobile radio communication systems - so called 5G - will provide some major changes to those generations to date. The ability to cope with huge increases in data traffic at reduced latencies and improved quality of user experience together with major reduction in energy usage are big challenges. In addition future systems will need to embody connections to billions of objects - the so called Internet of Things (IoT) which raise new challenges. Visions of 5G are now available from regions across the World and research is ongoing towards new standards. The consensus is a flatter architecture that adds a dense network of small cells operating in the millimetre wave bands and which are adaptable and software controlled. But what place for satellites in such a vision? The paper examines several potential roles for satellite including coverage extension, content distribution, providing resilience, improved spectrum utilisation and integrated signalling systems.},\n  keywords = {5G mobile communication;Internet of Things;quality of experience;satellite communication;telecommunication traffic;5G mobile radio communication systems;data traffic;quality of user experience;Internet of Things;IoT;millimetre wave bands;integrated signalling systems;satellite communications;Satellites;5G mobile communication;Satellite broadcasting;Resilience;Europe;5G;satellite communications;net work architectures;signalling},\n  doi = {10.1109/EUSIPCO.2015.7362886},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570100777.pdf},\n}\n\n
\n
\n\n\n
\n The next generation of mobile radio communication systems - so called 5G - will provide some major changes to those generations to date. The ability to cope with huge increases in data traffic at reduced latencies and improved quality of user experience together with major reduction in energy usage are big challenges. In addition future systems will need to embody connections to billions of objects - the so called Internet of Things (IoT) which raise new challenges. Visions of 5G are now available from regions across the World and research is ongoing towards new standards. The consensus is a flatter architecture that adds a dense network of small cells operating in the millimetre wave bands and which are adaptable and software controlled. But what place for satellites in such a vision? The paper examines several potential roles for satellite including coverage extension, content distribution, providing resilience, improved spectrum utilisation and integrated signalling systems.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Spectrum awareness techniques for 5G satellite communications.\n \n \n \n \n\n\n \n Guidotti, A.; Tarchi, D.; Icolari, V.; Vanelli-Coralli, A.; and Corazza, G. E.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2761-2765, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"SpectrumPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362887,\n  author = {A. Guidotti and D. Tarchi and V. Icolari and A. Vanelli-Coralli and G. E. Corazza},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Spectrum awareness techniques for 5G satellite communications},\n  year = {2015},\n  pages = {2761-2765},\n  abstract = {5G communications will enable new paradigms architectures and services, and the integration of satellite and terrestrial networks can play a key role. Cognitive radios are seen as the most promising solution to dynamically cooperate, in order to exploit advanced frequency sharing techniques. To this end, efficient sensing techniques for spectrum awareness are a must. In this paper, we provide preliminary results on energy detection (ED) and cyclostationary feature detection (CFD) algorithms applied to a downlink Ka-band scenario. These results show that coexistence between satellite and terrestrial networks is possible and cognitive radios can ease their integration in future 5G communications.},\n  keywords = {5G mobile communication;cognitive radio;feature extraction;radio spectrum management;satellite communication;signal detection;spectrum awareness techniques;5G satellite communications;satellite networks;terrestrial networks;cognitive radios;frequency sharing techniques;sensing techniques;energy detection;cyclostationary feature detection;CFD algorithms;Satellites;Satellite broadcasting;Sensors;Frequency selective surfaces;Signal to noise ratio;5G mobile communication;Interference;5G;Satellite Communications;Cognitive Radio;Spectrum Sensing},\n  doi = {10.1109/EUSIPCO.2015.7362887},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104041.pdf},\n}\n\n
\n
\n\n\n
\n 5G communications will enable new paradigms architectures and services, and the integration of satellite and terrestrial networks can play a key role. Cognitive radios are seen as the most promising solution to dynamically cooperate, in order to exploit advanced frequency sharing techniques. To this end, efficient sensing techniques for spectrum awareness are a must. In this paper, we provide preliminary results on energy detection (ED) and cyclostationary feature detection (CFD) algorithms applied to a downlink Ka-band scenario. These results show that coexistence between satellite and terrestrial networks is possible and cognitive radios can ease their integration in future 5G communications.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Bayesian nonparametric approach for blind multiuser channel estimation.\n \n \n \n \n\n\n \n Valera, I.; Ruiz, F. J. R.; Svensson, L.; and Perez-Cruz, F.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2766-2770, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362888,\n  author = {I. Valera and F. J. R. Ruiz and L. Svensson and F. Perez-Cruz},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A Bayesian nonparametric approach for blind multiuser channel estimation},\n  year = {2015},\n  pages = {2766-2770},\n  abstract = {In many modern multiuser communication systems, users are allowed to enter and leave the system at any given time. Thus, the number of active users is an unknown and time-varying parameter, and the performance of the system depends on how accurately this parameter is estimated over time. We address the problem of blind joint channel parameter and data estimation in a multiuser communication channel in which the number of transmitters is not known. For that purpose, we develop a Bayesian nonparametric model based on the Markov Indian buffet process and an inference algorithm that makes use of slice sampling and particle Gibbs with ancestor sampling. Our experimental results show that the proposed approach can effectively recover the data-generating process for a wide range of scenarios.},\n  keywords = {Bayes methods;blind equalisers;channel estimation;estimation theory;inference mechanisms;Markov processes;Monte Carlo methods;multi-access systems;multiuser channels;signal sampling;blind multiuser channel estimation;multiuser communication systems;time-varying parameter;blind joint channel parameter;data estimation;multiuser communication channel;Bayesian nonparametric model;Markov Indian buffet process;inference algorithm;slice sampling;particle Gibbs;ancestor sampling;Transmitters;Hidden Markov models;Receiving antennas;Signal to noise ratio;Bayes methods;Communication systems;Bayesian nonparametric;factorial HMM;multiuser communication;machine-to-machine},\n  doi = {10.1109/EUSIPCO.2015.7362888},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096659.pdf},\n}\n\n
\n
\n\n\n
\n In many modern multiuser communication systems, users are allowed to enter and leave the system at any given time. Thus, the number of active users is an unknown and time-varying parameter, and the performance of the system depends on how accurately this parameter is estimated over time. We address the problem of blind joint channel parameter and data estimation in a multiuser communication channel in which the number of transmitters is not known. For that purpose, we develop a Bayesian nonparametric model based on the Markov Indian buffet process and an inference algorithm that makes use of slice sampling and particle Gibbs with ancestor sampling. Our experimental results show that the proposed approach can effectively recover the data-generating process for a wide range of scenarios.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Scalable Bayesian nonparametric dictionary learning.\n \n \n \n \n\n\n \n Sertoglu, S.; and Paisley, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2771-2775, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ScalablePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362889,\n  author = {S. Sertoglu and J. Paisley},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Scalable Bayesian nonparametric dictionary learning},\n  year = {2015},\n  pages = {2771-2775},\n  abstract = {We derive a stochastic EM algorithm for scalable dictionary learning with the beta-Bernoulli process, a Bayesian nonpara-metric prior that learns the dictionary size in addition to the sparse coding of each signal. The core EM algorithm provides a new way for doing inference in nonparametric dictionary learning models and has a close similarity to other sparse coding methods such as K-SVD. Our stochastic extension for handling large data sets is closely related to stochastic variational inference, with the stochastic update for one parameter exactly that found using SVI. We show our algorithm compares well with K-SVD and total variation minimization on a denoising problem using several images.},\n  keywords = {Bayes methods;compressed sensing;image coding;image denoising;inference mechanisms;stochastic processes;scalable Bayesian nonparametric dictionary learning;stochastic EM algorithm;beta-Bernoulli process;Bayesian nonparametric prior;nonparametric dictionary learning models;sparse coding methods;K-SVD;stochastic variational inference;SVI;denoising problem;Dictionaries;Encoding;Stochastic processes;Signal processing algorithms;Inference algorithms;Bayes methods;Zinc},\n  doi = {10.1109/EUSIPCO.2015.7362889},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102669.pdf},\n}\n\n
\n
\n\n\n
\n We derive a stochastic EM algorithm for scalable dictionary learning with the beta-Bernoulli process, a Bayesian nonpara-metric prior that learns the dictionary size in addition to the sparse coding of each signal. The core EM algorithm provides a new way for doing inference in nonparametric dictionary learning models and has a close similarity to other sparse coding methods such as K-SVD. Our stochastic extension for handling large data sets is closely related to stochastic variational inference, with the stochastic update for one parameter exactly that found using SVI. We show our algorithm compares well with K-SVD and total variation minimization on a denoising problem using several images.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Nonparametric Bayesian matrix factorization for assortative networks.\n \n \n \n \n\n\n \n Zhou, M.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2776-2780, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"NonparametricPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362890,\n  author = {M. Zhou},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Nonparametric Bayesian matrix factorization for assortative networks},\n  year = {2015},\n  pages = {2776-2780},\n  abstract = {We describe in detail the gamma process edge partition model that is well suited to analyze assortative relational networks. The model links the binary edges of an undirected and unweighted relational network with a latent factor model via the Bernoulli-Poisson link, and uses the gamma process to support a potentially infinite number of latent communities. The communities are allowed to overlap with each other, with a community's overlapping parts assumed to be more densely connected than its non-overlapping ones. The model is evaluated with synthetic data to illustrate its ability to model as-sortative networks and its restriction on modeling dissortative ones.},\n  keywords = {Bayes methods;matrix decomposition;social networking (online);stochastic processes;Bernoulli-Poisson link;latent factor model;unweighted relational network;binary edges;assortative relational networks;gamma process edge partition model;nonparametric Bayesian matrix factorization;Predator prey systems;Bayes methods;Computational modeling;Analytical models;Data models;Mathematical model;Europe;Gamma process;factor analysis;Bernoulli-Poisson link;overlapping community detection;link prediction},\n  doi = {10.1109/EUSIPCO.2015.7362890},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103871.pdf},\n}\n\n
\n
\n\n\n
\n We describe in detail the gamma process edge partition model that is well suited to analyze assortative relational networks. The model links the binary edges of an undirected and unweighted relational network with a latent factor model via the Bernoulli-Poisson link, and uses the gamma process to support a potentially infinite number of latent communities. The communities are allowed to overlap with each other, with a community's overlapping parts assumed to be more densely connected than its non-overlapping ones. The model is evaluated with synthetic data to illustrate its ability to model as-sortative networks and its restriction on modeling dissortative ones.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Numerical approximations for speeding up MCMC inference in the infinite relational model.\n \n \n \n \n\n\n \n Schmidt, M. N.; and Albers, K. J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2781-2785, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"NumericalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362891,\n  author = {M. N. Schmidt and K. J. Albers},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Numerical approximations for speeding up MCMC inference in the infinite relational model},\n  year = {2015},\n  pages = {2781-2785},\n  abstract = {The infinite relational model (IRM) is a powerful model for discovering clusters in complex networks; however, the computational speed of Markov chain Monte Carlo inference in the model can be a limiting factor when analyzing large networks. We investigate how using numerical approximations of the log-Gamma function in evaluating the likelihood of the IRM can improve the computational speed of MCMC inference, and how it affects the performance of the model. Using an ensemble of networks generated from the IRM, we compare three approximations in terms of their generalization performance measured on test data. We demonstrate that the computational time for MCMC inference can be reduced by a factor of two without affecting the performance, making it worthwhile in practical situations when on a computational budget.},\n  keywords = {approximation theory;data communication;Markov processes;Monte Carlo methods;numerical approximations;MCMC inference;infinite relational model;complex networks;Markov chain Monte Carlo inference;log-Gamma function;generalization performance;computational budget;complex network data;Function approximation;Computational modeling;Numerical models;Complex networks;Data models;Europe;Nonparametric Bayesian modeling;Infinite Relational Model;Numerical approximation},\n  doi = {10.1109/EUSIPCO.2015.7362891},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104865.pdf},\n}\n\n
\n
\n\n\n
\n The infinite relational model (IRM) is a powerful model for discovering clusters in complex networks; however, the computational speed of Markov chain Monte Carlo inference in the model can be a limiting factor when analyzing large networks. We investigate how using numerical approximations of the log-Gamma function in evaluating the likelihood of the IRM can improve the computational speed of MCMC inference, and how it affects the performance of the model. Using an ensemble of networks generated from the IRM, we compare three approximations in terms of their generalization performance measured on test data. We demonstrate that the computational time for MCMC inference can be reduced by a factor of two without affecting the performance, making it worthwhile in practical situations when on a computational budget.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On generative models for sequential formation of clusters.\n \n \n \n \n\n\n \n Djurić, P. M.; and Yu, K.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2786-2790, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362892,\n  author = {P. M. Djurić and K. Yu},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {On generative models for sequential formation of clusters},\n  year = {2015},\n  pages = {2786-2790},\n  abstract = {In the literature of machine learning, a class of unsupervised approaches is based on Dirichlet process mixture models. These approaches fall into the category of nonparametric Bayesian methods, and they find a wide range of applications including in biology, computer science, engineering, and finance. An important assumption of the Dirichlet process mixture models is that the data are exchangeable. This is a restriction for many types of data whose structures vary over time or space or some other independent variables. In this paper, we address generative models that remove the restriction of exchangeability of the Dirichlet process model, which allows for creation of mixtures with time-varying structures. We also address how these models can be applied to sequential estimation of clusters.},\n  keywords = {Bayes methods;learning (artificial intelligence);mixture models;pattern clustering;signal processing;statistical analysis;generative models;clusters sequential formation;machine learning;unsupervised approaches;Dirichlet process mixture models;nonparametric Bayesian methods;biology;computer science;Manganese;Europe;Signal processing;Computational modeling;Mixture models;Bayes methods;Estimation;machine learning;Dirichlet processes;time-varying clustering;Chinese restaurant processes with finite capacities},\n  doi = {10.1109/EUSIPCO.2015.7362892},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105199.pdf},\n}\n\n
\n
\n\n\n
\n In the literature of machine learning, a class of unsupervised approaches is based on Dirichlet process mixture models. These approaches fall into the category of nonparametric Bayesian methods, and they find a wide range of applications including in biology, computer science, engineering, and finance. An important assumption of the Dirichlet process mixture models is that the data are exchangeable. This is a restriction for many types of data whose structures vary over time or space or some other independent variables. In this paper, we address generative models that remove the restriction of exchangeability of the Dirichlet process model, which allows for creation of mixtures with time-varying structures. We also address how these models can be applied to sequential estimation of clusters.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Ultrasound compressive deconvolution with ℓP-Norm prior.\n \n \n \n\n\n \n Chen, Z.; Zhao, N.; Basarab, A.; and Kouamé, D.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2791-2795, Aug 2015. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362893,\n  author = {Z. Chen and N. Zhao and A. Basarab and D. Kouamé},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Ultrasound compressive deconvolution with ℓP-Norm prior},\n  year = {2015},\n  pages = {2791-2795},\n  abstract = {It has been recently shown that compressive sampling is an interesting perspective for fast ultrasound imaging. This paper addresses the problem of compressive deconvolution for ultrasound imaging systems using an assumption of generalized Gaussian distributed tissue reflectivity function. The benefit of compressive deconvolution is the joint volume reduction of the acquired data and the image resolution improvement. The main contribution of this work is to apply the framework of compressive deconvolution on ultrasound imaging and to propose a novel ℓp-norm (1 ≤ p ≤ 2) algorithm based on Alternating Direction Method of Multipliers. The performance of the proposed algorithm is tested on simulated data and compared with those obtained by a more intuitive sequential compressive deconvolution method.},\n  keywords = {biomedical ultrasonics;compressed sensing;deconvolution;Gaussian processes;image resolution;sequential decoding;ultrasonic imaging;ultrasound compressive deconvolution;compressive sampling;ultrasound imaging systems;Gaussian distributed tissue reflectivity function;joint volume reduction;image resolution;alternating direction method of multipliers;sequential compressive deconvolution method;Deconvolution;Image coding;Imaging;Ultrasonic imaging;Radio frequency;Signal processing algorithms;Minimization;ultrasound imaging;compressive sampling;deconvolution;generalized Gaussian distribution;alternating direction method of multipliers},\n  doi = {10.1109/EUSIPCO.2015.7362893},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n It has been recently shown that compressive sampling is an interesting perspective for fast ultrasound imaging. This paper addresses the problem of compressive deconvolution for ultrasound imaging systems using an assumption of generalized Gaussian distributed tissue reflectivity function. The benefit of compressive deconvolution is the joint volume reduction of the acquired data and the image resolution improvement. The main contribution of this work is to apply the framework of compressive deconvolution on ultrasound imaging and to propose a novel ℓp-norm (1 ≤ p ≤ 2) algorithm based on Alternating Direction Method of Multipliers. The performance of the proposed algorithm is tested on simulated data and compared with those obtained by a more intuitive sequential compressive deconvolution method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The role of the image phase in cardiac strain imaging.\n \n \n \n \n\n\n \n Alessandrini, M.; Basarab, A.; De Craene, M.; Sermesant, M.; Liebgott, H.; Bernard, O.; and D'hooge, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2796-2800, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362894,\n  author = {M. Alessandrini and A. Basarab and M. {De Craene} and M. Sermesant and H. Liebgott and O. Bernard and J. D'hooge},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {The role of the image phase in cardiac strain imaging},\n  year = {2015},\n  pages = {2796-2800},\n  abstract = {This paper reviews our most recent contributions in the field of cardiac deformation imaging, which includes a motion estimation framework based on the conservation of the image phase over time and an open pipeline to benchmark algorithms for cardiac strain imaging in 2D and 3D ultrasound. The paper also shows an original evaluation of the proposed motion estimation technique based on the new benchmarking pipeline.},\n  keywords = {biomechanics;deformation;echocardiography;medical image processing;motion estimation;cardiac strain imaging;cardiac deformation imaging;image phase conservation;2D ultrasound;3D ultrasound;motion estimation technique;Three-dimensional displays;Pipelines;Tracking;Magnetic resonance imaging;Ultrasonic imaging;Motion estimation;medical imaging;heart;deformation imaging;echocardiography;MRI;quality assurance;simulations},\n  doi = {10.1109/EUSIPCO.2015.7362894},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102187.pdf},\n}\n\n
\n
\n\n\n
\n This paper reviews our most recent contributions in the field of cardiac deformation imaging, which includes a motion estimation framework based on the conservation of the image phase over time and an open pipeline to benchmark algorithms for cardiac strain imaging in 2D and 3D ultrasound. The paper also shows an original evaluation of the proposed motion estimation technique based on the new benchmarking pipeline.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n RJMCMC-based tracking of vesicles in fluorescence time-lapse microscopy.\n \n \n \n \n\n\n \n Nam, D.; Arkill, K.; Eales, R.; Hodgson, L.; Verkade, P.; and Achim, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2801-2805, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"RJMCMC-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362895,\n  author = {D. Nam and K. Arkill and R. Eales and L. Hodgson and P. Verkade and A. Achim},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {RJMCMC-based tracking of vesicles in fluorescence time-lapse microscopy},\n  year = {2015},\n  pages = {2801-2805},\n  abstract = {Vesicles are a key component for the transport of materials throughout the cell. To manually analyze the behaviors of vesicles in fluorescence time-lapse microscopy images would be almost impossible. This is also true for the identification of key events, such as merging and splitting. In order to automate and increase the reliability of this processes we introduce a Reversible Jump Markov chain Monte Carlo method for tracking vesicles and identifying merging/splitting events, based on object interactions. We evaluate our method on a series of synthetic videos with varying degrees of noise. We show that our method compares well with other state-of-the-art techniques and well-known microscopy tracking tools. The robustness of our method is also demonstrated on real microscopy videos.},\n  keywords = {biomedical optical imaging;cellular transport;fluorescence;Markov processes;medical image processing;Monte Carlo methods;object tracking;optical microscopy;RJMCMC-based tracking;fluorescence time-lapse microscopy;material transport;vesicle merging;vesicle splitting;Reversible Jump Markov chain Monte Carlo method;microscopy tracking tools;real microscopy videos;Microscopy;Target tracking;Merging;Proposals;Monte Carlo methods;Videos;Light microscopy;biomedical imaging;MCMC;merging;splitting},\n  doi = {10.1109/EUSIPCO.2015.7362895},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104803.pdf},\n}\n\n
\n
\n\n\n
\n Vesicles are a key component for the transport of materials throughout the cell. To manually analyze the behaviors of vesicles in fluorescence time-lapse microscopy images would be almost impossible. This is also true for the identification of key events, such as merging and splitting. In order to automate and increase the reliability of this processes we introduce a Reversible Jump Markov chain Monte Carlo method for tracking vesicles and identifying merging/splitting events, based on object interactions. We evaluate our method on a series of synthetic videos with varying degrees of noise. We show that our method compares well with other state-of-the-art techniques and well-known microscopy tracking tools. The robustness of our method is also demonstrated on real microscopy videos.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Compressive computed tomography image reconstruction with denoising message passing algorithms.\n \n \n \n \n\n\n \n Perelli, A.; and Davies, M. E.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2806-2810, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"CompressivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362896,\n  author = {A. Perelli and M. E. Davies},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Compressive computed tomography image reconstruction with denoising message passing algorithms},\n  year = {2015},\n  pages = {2806-2810},\n  abstract = {In this paper we address the compressive reconstruction of images from a limited number of projections in order to reduce the X-ray radiation dose in Computed Tomography (CT) while achieving high diagnostic performances. Our objective is to study the feasibility of applying message passing Compressive Sensing (CS) imaging algorithms to CT image reconstruction extending the algorithm from its theoretical domain of i.i.d. random matrices. Exploiting the intuition described in [1] of employing a generic denoiser in a CS reconstruction algorithm, we propose a denoising-based Turbo CS algorithm (D-Turbo) and we extend the application of the de-noising approximate message passing (D-AMP) algorithm to partial Radon Projection data with a Gaussian approximation of the Poisson noise model. The proposed CS message passing approaches have been tested on simulated CT data using the BM3D denoiser [2] yielding an improvement in the reconstruction quality compared to existing direct and iterative methods. The promising results show the effectiveness of the idea to employ a generic denoiser Turbo CS or message passing algorithm for reduced number of views CT reconstruction.},\n  keywords = {approximation theory;compressed sensing;computerised tomography;Gaussian processes;image denoising;image reconstruction;iterative methods;matrix algebra;message passing;Radon transforms;turbo codes;iterative method;BM3D denoiser;Poisson noise model;Gaussian approximation;partial Radon Projection data;D-AMP algorithm;denoising approximate message passing algorithm;D-turbo;denoising-based turbo CS algorithm;IID random matrix;CT image reconstruction;CS imaging algorithm;message passing compressive sensing imaging algorithm;X-ray radiation dose;compressive computed tomography image reconstruction;Computed tomography;Signal processing algorithms;Radon;Message passing;Image reconstruction;X-ray imaging;Approximation algorithms;Computed Tomography;Radon Transform;Approximate Message Passing;Turbo Compressed Sensing},\n  doi = {10.1109/EUSIPCO.2015.7362896},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104845.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we address the compressive reconstruction of images from a limited number of projections in order to reduce the X-ray radiation dose in Computed Tomography (CT) while achieving high diagnostic performances. Our objective is to study the feasibility of applying message passing Compressive Sensing (CS) imaging algorithms to CT image reconstruction extending the algorithm from its theoretical domain of i.i.d. random matrices. Exploiting the intuition described in [1] of employing a generic denoiser in a CS reconstruction algorithm, we propose a denoising-based Turbo CS algorithm (D-Turbo) and we extend the application of the de-noising approximate message passing (D-AMP) algorithm to partial Radon Projection data with a Gaussian approximation of the Poisson noise model. The proposed CS message passing approaches have been tested on simulated CT data using the BM3D denoiser [2] yielding an improvement in the reconstruction quality compared to existing direct and iterative methods. The promising results show the effectiveness of the idea to employ a generic denoiser Turbo CS or message passing algorithm for reduced number of views CT reconstruction.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Semi-blind joint super-resolution/segmentation of 3D trabecular bone images by a TV box approach.\n \n \n \n \n\n\n \n Peyrin, F.; Toma, A.; Sixou, B.; Denis, L.; Burghardt, A.; and Pialat, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2811-2815, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Semi-blindPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362897,\n  author = {F. Peyrin and A. Toma and B. Sixou and L. Denis and A. Burghardt and J. Pialat},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Semi-blind joint super-resolution/segmentation of 3D trabecular bone images by a TV box approach},\n  year = {2015},\n  pages = {2811-2815},\n  abstract = {The investigation of bone fragility diseases, as osteoporosis, is based on the analysis of the trabecular bone microarchitecture. The aim of this paper is to improve the in-vivo trabecular bone segmentation and quantification by increasing the resolution of bone micro-architecture images. We propose a semi-blind joint super-resolution/segmentation approach based on a Total Variation regularization with a convex constraint. A comparison with the bicubic interpolation method and the non-blind version of the proposed method is shown. The validation is performed on blurred, noisy and down-sampled 3D synchrotron micro-CT bone images. Good estimates of the blur and of the high resolution image are obtained with the semi-blind approach. Preliminary results are obtained with the semi-blind approach on real HR-pQCT images.},\n  keywords = {bone;computerised tomography;diseases;image resolution;image segmentation;interpolation;semiblind joint super-resolution;semiblind joint segmentation;3D trabecular bone images;TV box approach;bone fragility diseases;osteoporosis;trabecular bone microarchitecture;in-vivo trabecular bone segmentation;bone microarchitecture image resolution;total variation regularization;bicubic interpolation method;down-sampled 3D synchrotron microCT bone images;high-resolution image;HR-pQCT images;Bones;Spatial resolution;Signal resolution;Image segmentation;Three-dimensional displays;Kernel;Semi-blind super-resolution;segmentation;Total Variation;3D micro-CT;bone micro-architecture},\n  doi = {10.1109/EUSIPCO.2015.7362897},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104853.pdf},\n}\n\n
\n
\n\n\n
\n The investigation of bone fragility diseases, as osteoporosis, is based on the analysis of the trabecular bone microarchitecture. The aim of this paper is to improve the in-vivo trabecular bone segmentation and quantification by increasing the resolution of bone micro-architecture images. We propose a semi-blind joint super-resolution/segmentation approach based on a Total Variation regularization with a convex constraint. A comparison with the bicubic interpolation method and the non-blind version of the proposed method is shown. The validation is performed on blurred, noisy and down-sampled 3D synchrotron micro-CT bone images. Good estimates of the blur and of the high resolution image are obtained with the semi-blind approach. Preliminary results are obtained with the semi-blind approach on real HR-pQCT images.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Properties of Ramanujan filter banks.\n \n \n \n \n\n\n \n Vaidyanathan, P. P.; and Tenneti, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2816-2820, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"PropertiesPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362898,\n  author = {P. P. Vaidyanathan and S. Tenneti},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Properties of Ramanujan filter banks},\n  year = {2015},\n  pages = {2816-2820},\n  abstract = {This paper studies a class of filter banks called the Ramanujan filter banks which are based on Ramanujan-sums. It is shown that these filter banks have some important mathematical properties which allow them to reveal localized hidden periodicities in real-time data. These are also compared with traditional comb filters which are sometimes used to identify periodicities. It is shown that non-adaptive comb filters cannot in general reveal periodic components in signals unless they are restricted to be Ramanujan filters. The paper also shows how Ramanujan filter banks can be used to generate time-period plane plots which track the presence of time varying, localized, periodic components.},\n  keywords = {channel bank filters;comb filters;Ramanujan filter banks;Ramanujan-sums;mathematical properties;localized hidden periodicities;comb filters;nonadaptive comb filters;time-period plane plots;periodic components;Discrete Fourier transforms;Indexes;Harmonic analysis;Passband;Europe;Filter banks;Ramanujan filter banks;Ramanujan-sum;periodicity;comb filter banks;coprime frequencies},\n  doi = {10.1109/EUSIPCO.2015.7362898},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570091833.pdf},\n}\n\n
\n
\n\n\n
\n This paper studies a class of filter banks called the Ramanujan filter banks which are based on Ramanujan-sums. It is shown that these filter banks have some important mathematical properties which allow them to reveal localized hidden periodicities in real-time data. These are also compared with traditional comb filters which are sometimes used to identify periodicities. It is shown that non-adaptive comb filters cannot in general reveal periodic components in signals unless they are restricted to be Ramanujan filters. The paper also shows how Ramanujan filter banks can be used to generate time-period plane plots which track the presence of time varying, localized, periodic components.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Spark under 2-D fourier sampling.\n \n \n \n \n\n\n \n Biswas, S.; Dasgupta, S.; Jacob, M.; and Mudumbai, R.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2821-2824, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"SparkPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362899,\n  author = {S. Biswas and S. Dasgupta and M. Jacob and R. Mudumbai},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Spark under 2-D fourier sampling},\n  year = {2015},\n  pages = {2821-2824},\n  abstract = {We consider the spark of submatrices of 2D-DFT matrices obtained by removing certain rows and relate it to the spark of associated 1D-DFT submatrices. A matrix has spark m if its smallest number of linearly dependent columns equals m. To recover an arbitrary fc-sparse vector, the spark of an observation matrix must exceed 2fc. We consider how to choose the rows of the 2D-DFT matrix so that it is full spark, i.e. its spark equals one more than its row dimension. We consider submatrices resulting from two sets of sampling patterns in frequency space: On a straight line and on a rectangular grid. We show that in the latter case full spark is rarely obtainable, though vectors with certain sparsity patterns can still be recovered. In the former case we provide a necessary and sufficient condition for full spark, and show that lines with integer slopes cannot attain it.},\n  keywords = {compressed sensing;discrete Fourier transforms;sampling methods;2D Fourier sampling;2D-DFT matrices;1D-DFT submatrices;arbitrary fc-sparse vector;observation matrix;frequency space;full spark;compressed sensing;Sparks;Discrete Fourier transforms;Magnetic resonance imaging;Sparse matrices;Europe;Signal processing;Jacobian matrices;Coprime sensing;full spark;compressed sensing;two dimensional;Fourier Sampling},\n  doi = {10.1109/EUSIPCO.2015.7362899},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103923.pdf},\n}\n\n
\n
\n\n\n
\n We consider the spark of submatrices of 2D-DFT matrices obtained by removing certain rows and relate it to the spark of associated 1D-DFT submatrices. A matrix has spark m if its smallest number of linearly dependent columns equals m. To recover an arbitrary fc-sparse vector, the spark of an observation matrix must exceed 2fc. We consider how to choose the rows of the 2D-DFT matrix so that it is full spark, i.e. its spark equals one more than its row dimension. We consider submatrices resulting from two sets of sampling patterns in frequency space: On a straight line and on a rectangular grid. We show that in the latter case full spark is rarely obtainable, though vectors with certain sparsity patterns can still be recovered. In the former case we provide a necessary and sufficient condition for full spark, and show that lines with integer slopes cannot attain it.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On the robustness of co-prime sampling.\n \n \n \n \n\n\n \n Koochakzadeh, A.; and Pal, P.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2825-2829, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362900,\n  author = {A. Koochakzadeh and P. Pal},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {On the robustness of co-prime sampling},\n  year = {2015},\n  pages = {2825-2829},\n  abstract = {Coprime sampling has been shown to be an effective deterministic sub-Nyquist sampling scheme for estimating the power spectrum of wide sense stationary signals without any loss of information. In contrast to the existing results in coprime sampling which only assume an ideal setting, this paper considers both additive perturbation on the sampled signal, as well as sampling jitter, and analyzes their effect on the quality of the estimated correlation sequence. A variety of bounds on the error introduced by such non ideal sampling schemes are computed by considering a statistical model for the perturbations. They indicate that coprime sampling leads to stable estimation of the autocorrelation sequence, in presence of small perturbations. Additional results on identifiability in spatial spectrum estimation are derived using the Fisher Information Matrix, which indicate that with high probability, it is still possible to identify O(M2) sources with M sensors, with a perturbed coprime array.},\n  keywords = {correlation methods;jitter;signal detection;signal sampling;coprime sampling;deterministic subNyquist sampling scheme;power spectrum estimation;wide sense stationary signals;additive perturbation;sampling jitter;estimated correlation sequence;fisher information matrix;Correlation;Jitter;Sensor arrays;Estimation;Robustness;Coprime Sampling;Spectrum Estimation;Co-Array;Jitter;Line Spectrum;Fisher Information Matrix},\n  doi = {10.1109/EUSIPCO.2015.7362900},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104429.pdf},\n}\n\n
\n
\n\n\n
\n Coprime sampling has been shown to be an effective deterministic sub-Nyquist sampling scheme for estimating the power spectrum of wide sense stationary signals without any loss of information. In contrast to the existing results in coprime sampling which only assume an ideal setting, this paper considers both additive perturbation on the sampled signal, as well as sampling jitter, and analyzes their effect on the quality of the estimated correlation sequence. A variety of bounds on the error introduced by such non ideal sampling schemes are computed by considering a statistical model for the perturbations. They indicate that coprime sampling leads to stable estimation of the autocorrelation sequence, in presence of small perturbations. Additional results on identifiability in spatial spectrum estimation are derived using the Fisher Information Matrix, which indicate that with high probability, it is still possible to identify O(M2) sources with M sensors, with a perturbed coprime array.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n DOA estimation with co-prime arrays in the presence of mutual coupling.\n \n \n \n \n\n\n \n BouDaher, E.; Ahmad, F.; Amin, M.; and Hoorfar, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2830-2834, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"DOAPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362901,\n  author = {E. BouDaher and F. Ahmad and M. Amin and A. Hoorfar},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {DOA estimation with co-prime arrays in the presence of mutual coupling},\n  year = {2015},\n  pages = {2830-2834},\n  abstract = {In this paper, we present a method for performing direction-of-arrival (DOA) estimation using co-prime arrays in the presence of mutual coupling. The effects of mutual coupling are first examined for extended co-prime arrays configurations using the Receiving-Mutual-Impedance Method (RMIM). DOA estimation is then achieved by performing a joint estimation of the angles of arrival and the mutual coupling matrix, using the mixed-parameter covariance matrix adaptation evolution strategy. Simulation results demonstrating the effectiveness of the proposed method are provided.},\n  keywords = {covariance matrices;direction-of-arrival estimation;DOA estimation;direction-of-arrival estimation;extended coprime arrays;receiving mutual impedance method;mutual coupling matrix;mixed parameter covariance matrix adaptation evolution strategy;Mutual coupling;Direction-of-arrival estimation;Estimation;Covariance matrices;Correlation;Antenna arrays;Co-prime arrays;DOA estimation;mutual coupling},\n  doi = {10.1109/EUSIPCO.2015.7362901},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104891.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we present a method for performing direction-of-arrival (DOA) estimation using co-prime arrays in the presence of mutual coupling. The effects of mutual coupling are first examined for extended co-prime arrays configurations using the Receiving-Mutual-Impedance Method (RMIM). DOA estimation is then achieved by performing a joint estimation of the angles of arrival and the mutual coupling matrix, using the mixed-parameter covariance matrix adaptation evolution strategy. Simulation results demonstrating the effectiveness of the proposed method are provided.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Wideband DOA estimation for uniform linear arrays based on the co-array concept.\n \n \n \n \n\n\n \n Shen, Q.; Liu, W.; Cui, W.; Wu, S.; Zhang, Y. D.; and Amin, M. G.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2835-2839, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"WidebandPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362902,\n  author = {Q. Shen and W. Liu and W. Cui and S. Wu and Y. D. Zhang and M. G. Amin},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Wideband DOA estimation for uniform linear arrays based on the co-array concept},\n  year = {2015},\n  pages = {2835-2839},\n  abstract = {A novel design for wideband uniform linear arrays (ULAs) with the associated group-sparsity based direction-of-arrival (DOA) estimation method is proposed. This design allows the number of source signals to significantly exceed the number of sensors. Linear frequency modulated continuous wave (LFMCW) is used as the transmitted signal to ensure the required correlation property among different frequencies. The received echo signals from multiple targets are decomposed into different frequencies by discrete Fourier transform (DFT). Then these frequency bins are divided into several pairs to increase the degrees of freedom (DOFs) based on the co-array concept in the spatio-spectral domain. Group sparsity based signal reconstruction method is employed to jointly estimate the DOA results across multiple frequency pairs. Simulation results demonstrate a significantly improved performance achieved by the proposed method.},\n  keywords = {array signal processing;direction-of-arrival estimation;discrete Fourier transforms;frequency modulation;signal reconstruction;group sparsity-based signal reconstruction method;spatiospectral domain;degree-of-freedom;frequency bins;DFT;discrete Fourier transform;received echo signals;correlation property;LFMCW;linear frequency-modulated continuous wave;source signals;direction-of-arrival estimation;associated group-sparsity-based DOA estimation method;wideband ULA;wideband uniform linear arrays;co-array concept;wideband DOA estimation;Sensor arrays;Direction-of-arrival estimation;Estimation;Wideband;Frequency-domain analysis;Frequency estimation},\n  doi = {10.1109/EUSIPCO.2015.7362902},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105123.pdf},\n}\n\n
\n
\n\n\n
\n A novel design for wideband uniform linear arrays (ULAs) with the associated group-sparsity based direction-of-arrival (DOA) estimation method is proposed. This design allows the number of source signals to significantly exceed the number of sensors. Linear frequency modulated continuous wave (LFMCW) is used as the transmitted signal to ensure the required correlation property among different frequencies. The received echo signals from multiple targets are decomposed into different frequencies by discrete Fourier transform (DFT). Then these frequency bins are divided into several pairs to increase the degrees of freedom (DOFs) based on the co-array concept in the spatio-spectral domain. Group sparsity based signal reconstruction method is employed to jointly estimate the DOA results across multiple frequency pairs. Simulation results demonstrate a significantly improved performance achieved by the proposed method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Angular information resolution from co-prime arrays in radar.\n \n \n \n \n\n\n \n Pribic, R.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2840-2844, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AngularPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362903,\n  author = {R. Pribic},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Angular information resolution from co-prime arrays in radar},\n  year = {2015},\n  pages = {2840-2844},\n  abstract = {Angular resolution can be improved by using co-prime arrays instead of uniform linear arrays (ULA) with the same number of elements. We investigate how the possible co-prime angle resolution is related to the angle resolution from a full ULA of the size equal to the virtual size of co-prime arrays. We take into account not only the resulting beam width but also the fact that fewer measurements are acquired by co-prime arrays. This fact is especially relevant in compressive acquisition typical for compressive sensing. This angular resolution is called angular information resolution as it is computed from the intrinsic geometrical structure of data models that is characterized by the Fisher information. Based on this information-geometry approach, we compare angular information resolution from co-prime arrays and from the two ULAs. This novel resolution analysis is applied in a one-dimensional azimuth case. Numerical results demonstrate the suitability in radar-resolution analysis.},\n  keywords = {compressed sensing;radar applications;angular information resolution;co-prime arrays;co-prime angle resolution;compressive acquisition;compressive sensing;intrinsic geometrical structure;Fisher information;information geometry approach;one-dimensional azimuth case;radar-resolution analysis;Radar;Signal resolution;Sensors;Array signal processing;Radar antennas;Antenna arrays;Arrays;resolution;information geometry;co-prime arrays;compressive sensing;radar},\n  doi = {10.1109/EUSIPCO.2015.7362903},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570096213.pdf},\n}\n\n
\n
\n\n\n
\n Angular resolution can be improved by using co-prime arrays instead of uniform linear arrays (ULA) with the same number of elements. We investigate how the possible co-prime angle resolution is related to the angle resolution from a full ULA of the size equal to the virtual size of co-prime arrays. We take into account not only the resulting beam width but also the fact that fewer measurements are acquired by co-prime arrays. This fact is especially relevant in compressive acquisition typical for compressive sensing. This angular resolution is called angular information resolution as it is computed from the intrinsic geometrical structure of data models that is characterized by the Fisher information. Based on this information-geometry approach, we compare angular information resolution from co-prime arrays and from the two ULAs. This novel resolution analysis is applied in a one-dimensional azimuth case. Numerical results demonstrate the suitability in radar-resolution analysis.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Signal processing challenges from audio-video coding to telecommunication: A living piece of history.\n \n \n \n \n\n\n \n Parladori, G.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2845-2848, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"SignalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362904,\n  author = {G. Parladori},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Signal processing challenges from audio-video coding to telecommunication: A living piece of history},\n  year = {2015},\n  pages = {2845-2848},\n  abstract = {When I was invited to contribute to the celebrations in honour of Prof. Giovanni Sicuranza, I accepted with en thusiasm. Giovanni is a prominent figure in Italian DSP (Digital Signal Processing) research history and I am fortunate enough to have been acquainted with him since the beginning of my career. When I joined Telettra and began working in Audio-Video Coding research, Giovan ni had already started his long-lasting and fruitful cooperation with the Telettra DSP Laboratory. This highly constructive relationship continued after Telettra was acquired by Alcatel, and I enjoyed the invaluable experience to share with Giovanni an exciting research season. Not only. In 2000, the DSP Lab started working in the area of Telecommunication and also in this research field the studies conducted with Giovanni deeply influenced the activity developed by Alcatel, first, and by Alcatel-Lucent later on, and prompted their Italian branch to produce outstanding results.},\n  keywords = {audio coding;video coding;signal processing;audio-video coding;Italian DSP research history;digital signal processing research history;Telettra DSP laboratory;Alcatel Lucent;Digital signal processing;Signal processing algorithms;Discrete cosine transforms;Video coding;Encoding;Communications technology;Signal Processing;Telecommunication;History;DSP;ASIC},\n  doi = {10.1109/EUSIPCO.2015.7362904},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570101989.pdf},\n}\n\n
\n
\n\n\n
\n When I was invited to contribute to the celebrations in honour of Prof. Giovanni Sicuranza, I accepted with en thusiasm. Giovanni is a prominent figure in Italian DSP (Digital Signal Processing) research history and I am fortunate enough to have been acquainted with him since the beginning of my career. When I joined Telettra and began working in Audio-Video Coding research, Giovan ni had already started his long-lasting and fruitful cooperation with the Telettra DSP Laboratory. This highly constructive relationship continued after Telettra was acquired by Alcatel, and I enjoyed the invaluable experience to share with Giovanni an exciting research season. Not only. In 2000, the DSP Lab started working in the area of Telecommunication and also in this research field the studies conducted with Giovanni deeply influenced the activity developed by Alcatel, first, and by Alcatel-Lucent later on, and prompted their Italian branch to produce outstanding results.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Single image super-resolution via BM3D sparse coding.\n \n \n \n \n\n\n \n Egiazarian, K.; and Katkovnik, V.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2849-2853, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"SinglePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362905,\n  author = {K. Egiazarian and V. Katkovnik},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Single image super-resolution via BM3D sparse coding},\n  year = {2015},\n  pages = {2849-2853},\n  abstract = {In this paper, a novel single image super-resolution (SISR) algorithm is proposed. It is based on the BM3D (Block-Matching and 3D filtering) paradigm, where both sparsity and nonlocal patch self-similarity priors are utilized. The algorithm is derived from a variational formulation of the problem and has a structure typical for iterative back-projection super-resolution methods. They are characterized by updating high-resolution image which is calculated using the previous estimate and upsampled low-resolution error. The developed method is thoroughly compared with the state-of-the-art SISR both for noiseless and noisy data, demonstrating superior performance objectively and subjectively.},\n  keywords = {compressed sensing;image coding;image filtering;image resolution;iterative methods;variational techniques;block-matching and 3D filtering;BM3D sparse coding;single image super resolution algorithm;SISR algorithm;variational formulation;iterative back-projection super-resolution methods;Signal processing algorithms;Transforms;Image resolution;Signal resolution;Dictionaries;Europe;Single image super-resolution;sparse non-local imaging;image upsampling;image resizing},\n  doi = {10.1109/EUSIPCO.2015.7362905},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570102479.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, a novel single image super-resolution (SISR) algorithm is proposed. It is based on the BM3D (Block-Matching and 3D filtering) paradigm, where both sparsity and nonlocal patch self-similarity priors are utilized. The algorithm is derived from a variational formulation of the problem and has a structure typical for iterative back-projection super-resolution methods. They are characterized by updating high-resolution image which is calculated using the previous estimate and upsampled low-resolution error. The developed method is thoroughly compared with the state-of-the-art SISR both for noiseless and noisy data, demonstrating superior performance objectively and subjectively.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Hyperspectral imaging for food applications.\n \n \n \n \n\n\n \n Marshall, S.; Kelman, T.; Qiao, T.; Murray, P.; and Zabalza, J.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2854-2858, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"HyperspectralPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362906,\n  author = {S. Marshall and T. Kelman and T. Qiao and P. Murray and J. Zabalza},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Hyperspectral imaging for food applications},\n  year = {2015},\n  pages = {2854-2858},\n  abstract = {Food quality analysis is a key area where reliable, nondestructive and accurate measures are required. Hyperspectral imaging is a technology which meets all of these requirements but only if appropriate signal processing techniques are implemented. In this paper, a discussion of some of these state-of-the-art processing techniques is followed by an explanation of four different applications of hyperspectral imaging for food quality analysis: shelf life estimation of baked sponges; beef quality prediction; classification of Chinese tea leaves; and classification of rice grains. The first two of these topics investigate the use of hyperspectral imaging to produce an objective measure about the quality of the food sample. The final two studies are classification problems, where an unknown sample is assigned to one of a previously defined set of classes.},\n  keywords = {food safety;hyperspectral imaging;image classification;hyperspectral imaging;food applications;food quality analysis;signal processing techniques;shelf life estimation;baked sponges;beef quality prediction;Chinese tea leave classification;rice grain classification;Principal component analysis;Covariance matrices;Feature extraction;Signal processing;Aging;Support vector machines;Europe;Signal Processing;Image Processing;Classifiers;Spectral Imaging},\n  doi = {10.1109/EUSIPCO.2015.7362906},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103039.pdf},\n}\n\n
\n
\n\n\n
\n Food quality analysis is a key area where reliable, nondestructive and accurate measures are required. Hyperspectral imaging is a technology which meets all of these requirements but only if appropriate signal processing techniques are implemented. In this paper, a discussion of some of these state-of-the-art processing techniques is followed by an explanation of four different applications of hyperspectral imaging for food quality analysis: shelf life estimation of baked sponges; beef quality prediction; classification of Chinese tea leaves; and classification of rice grains. The first two of these topics investigate the use of hyperspectral imaging to produce an objective measure about the quality of the food sample. The final two studies are classification problems, where an unknown sample is assigned to one of a previously defined set of classes.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An overview of robust compressive sensing of sparse signals in impulsive noise.\n \n \n \n \n\n\n \n Ramirez, A. B.; Carrillo, R. E.; Arce, G.; Barner, K. E.; and Sadler, B.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2859-2863, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362907,\n  author = {A. B. Ramirez and R. E. Carrillo and G. Arce and K. E. Barner and B. Sadler},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {An overview of robust compressive sensing of sparse signals in impulsive noise},\n  year = {2015},\n  pages = {2859-2863},\n  abstract = {While compressive sensing (CS) has traditionally relied on l2 as an error norm, a broad spectrum of applications has emerged where robust estimators are required. Among those, applications where the sampling process is performed in the presence of impulsive noise, or where the sampling of the high-dimensional sparse signals requires the preservation of a distance different than l2. This article overviews robust sampling and nonlinear reconstruction strategies for sparse signals based on the Cauchy distribution and the Lorentzian norm for the data fidelity. The derived methods outperform existing compressed sensing techniques in impulsive environments, thus offering a robust framework for CS.},\n  keywords = {compressed sensing;estimation theory;impulse noise;signal reconstruction;signal sampling;sparse signal compressive sensing;impulsive noise;CS;robust estimators;high-dimensional sparse signal sampling process;nonlinear reconstruction strategy;Cauchy distribution;Lorentzian norm;data fidelity;Robustness;Signal processing algorithms;Noise measurement;Signal processing;Compressed sensing;Europe;Compressed sensing;sampling methods;robust signal reconstruction;nonlinear estimation;impulsive noise},\n  doi = {10.1109/EUSIPCO.2015.7362907},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570103607.pdf},\n}\n\n
\n
\n\n\n
\n While compressive sensing (CS) has traditionally relied on l2 as an error norm, a broad spectrum of applications has emerged where robust estimators are required. Among those, applications where the sampling process is performed in the presence of impulsive noise, or where the sampling of the high-dimensional sparse signals requires the preservation of a distance different than l2. This article overviews robust sampling and nonlinear reconstruction strategies for sparse signals based on the Cauchy distribution and the Lorentzian norm for the data fidelity. The derived methods outperform existing compressed sensing techniques in impulsive environments, thus offering a robust framework for CS.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Virtual unrolling using X-ray computed tomography.\n \n \n \n \n\n\n \n Allegra, D.; Ciliberto, E.; Ciliberto, P.; Milotta, F. L. M.; Petrillo, G.; Stanco, F.; and Trombato, C.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2864-2868, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"VirtualPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362908,\n  author = {D. Allegra and E. Ciliberto and P. Ciliberto and F. L. M. Milotta and G. Petrillo and F. Stanco and C. Trombato},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Virtual unrolling using X-ray computed tomography},\n  year = {2015},\n  pages = {2864-2868},\n  abstract = {In recent years the virtual restoration of ancient papyri has become an important research challenge. This is because the papyrus degradation is often very serious, so physical analysis could damage the artifact. In this paper we address the problem of virtual unrolling to read papyrus scroll by avoiding a dangerous physical unrolling. To this aim we propose a virtual restoration method based on software manipulation of X-ray tomographic images. To test the proposed approach, a realistic papyrus model has been made using the ancient method and pigments compatible with the Egyptian use. The stack of 259 slices, obtained through X-Ray Tomography device, has been processed in order to obtain a digital unrolled papyrus that is quite similar to the hypothetical unrolled sheet.},\n  keywords = {computerised tomography;history;image restoration;text detection;X-ray microscopy;X-ray computed tomography;virtual unrolling;ancient papyrus;papyrus degradation;physical analysis;virtual restoration method;software manipulation;X-ray tomographic images;digital unrolled papyrus;Computed tomography;X-ray imaging;Skeleton;Spirals;Signal processing algorithms;Image resolution;Europe;Virtual unrolling;X-ray CT;Papyrus;Scroll},\n  doi = {10.1109/EUSIPCO.2015.7362908},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104373.pdf},\n}\n\n
\n
\n\n\n
\n In recent years the virtual restoration of ancient papyri has become an important research challenge. This is because the papyrus degradation is often very serious, so physical analysis could damage the artifact. In this paper we address the problem of virtual unrolling to read papyrus scroll by avoiding a dangerous physical unrolling. To this aim we propose a virtual restoration method based on software manipulation of X-ray tomographic images. To test the proposed approach, a realistic papyrus model has been made using the ancient method and pigments compatible with the Egyptian use. The stack of 259 slices, obtained through X-Ray Tomography device, has been processed in order to obtain a digital unrolled papyrus that is quite similar to the hypothetical unrolled sheet.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A nonlinear architecture involving a combination of proportionate functional link adaptive filters.\n \n \n \n \n\n\n \n Comminiello, D.; Scarpiniti, M.; Azpicueta-Ruiz, L. A.; Arenas-García, J.; and Uncini, A.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2869-2873, Aug 2015. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362909,\n  author = {D. Comminiello and M. Scarpiniti and L. A. Azpicueta-Ruiz and J. Arenas-García and A. Uncini},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {A nonlinear architecture involving a combination of proportionate functional link adaptive filters},\n  year = {2015},\n  pages = {2869-2873},\n  abstract = {In this paper, we consider a functional link-based architecture that separates the linear and nonlinear filterings and exploits any sparse representation of functional links. We focus our attention on the nonlinear path in order to improve the modeling performance of the overall architecture. To this end, we propose a new scheme that involves the adaptive combination of filters downstream of the nonlinear expansion. This combination enhances the sparse representation of functional links according to how much distorted the input signal is, thus improving the nonlinear modeling performance in case of time-varying nonlinear systems. Experimental results show the performance improvement produced by the proposed model.},\n  keywords = {adaptive filters;nonlinear filters;time-varying systems;nonlinear architecture;functional link adaptive filters;functional link-based architecture;nonlinear filterings;time-varying nonlinear systems;Adaptation models;Nonlinear systems;Signal processing algorithms;Europe;Adaptive filters;Indexes;Nonlinear Adaptive Filtering;Functional Links;Linear-in-the-Parameters Nonlinear Filters;Sparse Representations;Adaptive Combination of Filters},\n  doi = {10.1109/EUSIPCO.2015.7362909},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570104661.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we consider a functional link-based architecture that separates the linear and nonlinear filterings and exploits any sparse representation of functional links. We focus our attention on the nonlinear path in order to improve the modeling performance of the overall architecture. To this end, we propose a new scheme that involves the adaptive combination of filters downstream of the nonlinear expansion. This combination enhances the sparse representation of functional links according to how much distorted the input signal is, thus improving the nonlinear modeling performance in case of time-varying nonlinear systems. Experimental results show the performance improvement produced by the proposed model.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Robust regression in RKHS — An overview.\n \n \n \n\n\n \n Papageorgiou, G.; Bouboulis, P.; and Theodoridis, S.\n\n\n \n\n\n\n In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 2874-2878, Aug 2015. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{7362910,\n  author = {G. Papageorgiou and P. Bouboulis and S. Theodoridis},\n  booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n  title = {Robust regression in RKHS — An overview},\n  year = {2015},\n  pages = {2874-2878},\n  abstract = {The paper deals with the task of robust nonlinear regression in the presence of outliers. The problem is dealt in the context of reproducing kernel Hilbert spaces (RKHS). In contrast to more classical approaches, a recent trend is to model the outliers as a sparse vector noise component and mobilize tools from the sparsity-aware/compressed sensing theory to impose sparsity on it. In this paper, three of the most popular approaches are considered and compared. These represent three major directions in sparsity-aware learning context; that is, a) a greedy approach b) a convex relaxation of the sparsity-promoting task via the l\\ norm-based regularization of the least-squares cost and c) a Bayesian approach making use of appropriate priors, associated with the involved parameters.},\n  keywords = {belief networks;compressed sensing;Hilbert spaces;regression analysis;robust nonlinear regression;RKHS;reproducing kernel Hilbert spaces;sparse vector noise component;sparsity-aware-compressed sensing theory;greedy approach;convex relaxation;sparsity-promoting task;least-squares cost;Bayesian approach;Robustness;Kernel;Estimation;Europe;Signal processing;Bayes methods;Training;Robust regression in RKHS;learning with kernels;kernel greedy algorithm for robust denoising — (KGARD);robust non-linear regression},\n  doi = {10.1109/EUSIPCO.2015.7362910},\n  issn = {2076-1465},\n  month = {Aug},\n}\n
\n
\n\n\n
\n The paper deals with the task of robust nonlinear regression in the presence of outliers. The problem is dealt in the context of reproducing kernel Hilbert spaces (RKHS). In contrast to more classical approaches, a recent trend is to model the outliers as a sparse vector noise component and mobilize tools from the sparsity-aware/compressed sensing theory to impose sparsity on it. In this paper, three of the most popular approaches are considered and compared. These represent three major directions in sparsity-aware learning context; that is, a) a greedy approach b) a convex relaxation of the sparsity-promoting task via the l\\ norm-based regularization of the least-squares cost and c) a Bayesian approach making use of appropriate priors, associated with the involved parameters.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);