var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/show?bib=https%3A%2F%2Fraw.githubusercontent.com%2FRoznn%2FEUSIPCO%2Fmain%2Feusipco2020url.bib&jsonp=1&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=https%3A%2F%2Fraw.githubusercontent.com%2FRoznn%2FEUSIPCO%2Fmain%2Feusipco2020url.bib&jsonp=1\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/show?bib=https%3A%2F%2Fraw.githubusercontent.com%2FRoznn%2FEUSIPCO%2Fmain%2Feusipco2020url.bib&jsonp=1\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2020\n \n \n (499)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n One or Two Frequencies? The Scattering Transform Answers.\n \n \n \n \n\n\n \n Lostanlen, V.; Cohen-Hadria, A.; and Pablo Bello, J.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2205-2209, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"OnePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 5 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287216,\n  author = {V. Lostanlen and A. Cohen-Hadria and J. {Pablo Bello}},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {One or Two Frequencies? The Scattering Transform Answers},\n  year = {2020},\n  pages = {2205-2209},\n  abstract = {With the aim of constructing a biologically plausible model of machine listening, we study the representation of a multicomponent stationary signal by a wavelet scattering network. First, we show that renormalizing second-order nodes by their first-order parents gives a simple numerical criterion to assess whether two neighboring components will interfere psychoacoustically. Secondly, we run a manifold learning algorithm (Isomap) on scattering coefficients to visualize the similarity space underlying parametric additive synthesis. Thirdly, we generalize the “one or two components” framework to three sine waves or more, and prove that the effective scattering depth of a Fourier series grows in logarithmic proportion to its bandwidth.},\n  keywords = {Wavelet transforms;Visualization;Scattering;Signal processing algorithms;Bandwidth;Signal processing;Fourier series;Audio systems;Amplitude modulation;Continuous wavelet transform;Fourier series;Multi-layer neural network},\n  doi = {10.23919/Eusipco47968.2020.9287216},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002205.pdf},\n}\n\n
\n
\n\n\n
\n With the aim of constructing a biologically plausible model of machine listening, we study the representation of a multicomponent stationary signal by a wavelet scattering network. First, we show that renormalizing second-order nodes by their first-order parents gives a simple numerical criterion to assess whether two neighboring components will interfere psychoacoustically. Secondly, we run a manifold learning algorithm (Isomap) on scattering coefficients to visualize the similarity space underlying parametric additive synthesis. Thirdly, we generalize the “one or two components” framework to three sine waves or more, and prove that the effective scattering depth of a Fourier series grows in logarithmic proportion to its bandwidth.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An optimisation geometry framework for the Rayleigh quotient.\n \n \n \n \n\n\n \n Lefevre, J.; Manton, J. H.; and Le Bihan, N.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 955-959, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287217,\n  author = {J. Lefevre and J. H. Manton and N. {Le Bihan}},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {An optimisation geometry framework for the Rayleigh quotient},\n  year = {2020},\n  pages = {955-959},\n  abstract = {This paper briefly introduces optimisation geometry, a method based on family of functions that proposes to solve complex optimisation problems with continuation methods and pre-computed points. As an illustration, the problem of tracking eigenvectors is presented, based on the Rayleigh quotient, and conditions under which the proposed approach is operational are detailed.},\n  keywords = {Geometry;Symmetric matrices;Optimization methods;Europe;Signal processing;Probabilistic logic;Eigenvalues and eigenfunctions;global optimisation;optimisation geometry;homotopy method;Rayleigh quotient},\n  doi = {10.23919/Eusipco47968.2020.9287217},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000955.pdf},\n}\n\n
\n
\n\n\n
\n This paper briefly introduces optimisation geometry, a method based on family of functions that proposes to solve complex optimisation problems with continuation methods and pre-computed points. As an illustration, the problem of tracking eigenvectors is presented, based on the Rayleigh quotient, and conditions under which the proposed approach is operational are detailed.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Analysis of Race and Gender Bias in Deep Age Estimation Models.\n \n \n \n \n\n\n \n Puc, A.; Štruc, V.; and Grm, K.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 830-834, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AnalysisPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287219,\n  author = {A. Puc and V. {Štruc} and K. Grm},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Analysis of Race and Gender Bias in Deep Age Estimation Models},\n  year = {2020},\n  pages = {830-834},\n  abstract = {Due to advances in deep learning and convolutional neural networks (CNNs) there has been significant progress in the field of visual age estimation from face images over recent years. While today’s models are able to achieve considerable age estimation accuracy, their behaviour, especially with respect to specific demographic groups is still not well understood. In this paper, we take a deeper look at CNN-based age estimation models and analyze their performance across different race and gender groups. We use two publicly available off-the-shelf age estimation models, i.e., FaceNet and WideResNet, for our study and analyze their performance on the UTKFace and APPA-REAL datasets. We partition face images into sub-groups based on race, gender and combinations of race and gender. We then compare age estimation results and find that there are noticeable differences in performance across demographics. Specifically, our results show that age estimation accuracy is consistently higher for men than for women, while race does not appear to have consistent effects on the tested models across different test datasets.},\n  keywords = {Training;Analytical models;Visualization;Estimation;Signal processing;Faces;Testing},\n  doi = {10.23919/Eusipco47968.2020.9287219},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000830.pdf},\n}\n\n
\n
\n\n\n
\n Due to advances in deep learning and convolutional neural networks (CNNs) there has been significant progress in the field of visual age estimation from face images over recent years. While today’s models are able to achieve considerable age estimation accuracy, their behaviour, especially with respect to specific demographic groups is still not well understood. In this paper, we take a deeper look at CNN-based age estimation models and analyze their performance across different race and gender groups. We use two publicly available off-the-shelf age estimation models, i.e., FaceNet and WideResNet, for our study and analyze their performance on the UTKFace and APPA-REAL datasets. We partition face images into sub-groups based on race, gender and combinations of race and gender. We then compare age estimation results and find that there are noticeable differences in performance across demographics. Specifically, our results show that age estimation accuracy is consistently higher for men than for women, while race does not appear to have consistent effects on the tested models across different test datasets.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n UAV Mapping for Multiple Primary Users Localization.\n \n \n \n \n\n\n \n Li, Z.; Giorgetti, A.; and Kandeepan, S.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1787-1791, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"UAVPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287220,\n  author = {Z. Li and A. Giorgetti and S. Kandeepan},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {UAV Mapping for Multiple Primary Users Localization},\n  year = {2020},\n  pages = {1787-1791},\n  abstract = {The unique features of unmanned aerial vehicles (UAVs) extend a large number of existing technologies into environments that are not suitable for on-site operations. Localization, a critical basis of many applications such as cognitive radio and first response networks, can benefit UAV technology as well. In such scenarios, an underinvestigated problem is the non-collaborative localization of multiple primary users (PUs). Therefore, this work proposes a data-driven multiple PU localization algorithm based on the angular and power measurements performed by a UAV equipped with an antenna array. The measured data firstly generate a score map, then a threshold and a hierarchical clustering method are applied to the score map to both detect the number of PUs and estimate their location. The performance of the algorithm is assessed by numerical results in terms of probability of detecting the number of PUs, and root-mean-square-error of position estimation. The proposed solution exhibit remarkable performance considering that the approach requires only the knowledge of the PUs frequency band.},\n  keywords = {Antenna measurements;Uncertainty;Signal processing algorithms;Clustering algorithms;Signal processing;Unmanned aerial vehicles;Antenna arrays},\n  doi = {10.23919/Eusipco47968.2020.9287220},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001787.pdf},\n}\n\n
\n
\n\n\n
\n The unique features of unmanned aerial vehicles (UAVs) extend a large number of existing technologies into environments that are not suitable for on-site operations. Localization, a critical basis of many applications such as cognitive radio and first response networks, can benefit UAV technology as well. In such scenarios, an underinvestigated problem is the non-collaborative localization of multiple primary users (PUs). Therefore, this work proposes a data-driven multiple PU localization algorithm based on the angular and power measurements performed by a UAV equipped with an antenna array. The measured data firstly generate a score map, then a threshold and a hierarchical clustering method are applied to the score map to both detect the number of PUs and estimate their location. The performance of the algorithm is assessed by numerical results in terms of probability of detecting the number of PUs, and root-mean-square-error of position estimation. The proposed solution exhibit remarkable performance considering that the approach requires only the knowledge of the PUs frequency band.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sensor placement in arbitrarily restricted region for field estimation based on Gaussian process.\n \n \n \n \n\n\n \n Nishida, T.; Ueno, N.; Koyama, S.; and Saruwatari, H.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2289-2293, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"SensorPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287222,\n  author = {T. Nishida and N. Ueno and S. Koyama and H. Saruwatari},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Sensor placement in arbitrarily restricted region for field estimation based on Gaussian process},\n  year = {2020},\n  pages = {2289-2293},\n  abstract = {A sensor placement method that enables us to arbitrarily set a region of candidate positions independently of a region of estimation is proposed. Field estimation aims to estimate and interpolate the physical quantities of fields, e.g., temperature and sound pressure, in an entire region of interest, where Gaussian processes are typically used for modeling. Although a number of sensor placement methods are proposed in the literature, in most of the methods, an optimization criterion is evaluated only at the candidate positions of the sensors. However, a region in which sensors are placed is sometimes restricted in practical applications of field estimation. To overcome this issue, we formulate a cost function on the basis of the expected squared error inside the target region for field estimation, which is derived by Gaussian process regression. We also propose two algorithms, the greedy algorithm and convex relaxation method, to efficiently solve this optimization problem. Numerical simulation results indicated that our proposed method achieves accurate field estimation even when the placement region of sensor candidates is restricted.},\n  keywords = {Greedy algorithms;Sensor placement;Estimation;Signal processing algorithms;Gaussian processes;Numerical simulation;Relaxation methods;sensor placement;Gaussian process;field estimation;greedy algorithm;convex optimization},\n  doi = {10.23919/Eusipco47968.2020.9287222},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002289.pdf},\n}\n\n
\n
\n\n\n
\n A sensor placement method that enables us to arbitrarily set a region of candidate positions independently of a region of estimation is proposed. Field estimation aims to estimate and interpolate the physical quantities of fields, e.g., temperature and sound pressure, in an entire region of interest, where Gaussian processes are typically used for modeling. Although a number of sensor placement methods are proposed in the literature, in most of the methods, an optimization criterion is evaluated only at the candidate positions of the sensors. However, a region in which sensors are placed is sometimes restricted in practical applications of field estimation. To overcome this issue, we formulate a cost function on the basis of the expected squared error inside the target region for field estimation, which is derived by Gaussian process regression. We also propose two algorithms, the greedy algorithm and convex relaxation method, to efficiently solve this optimization problem. Numerical simulation results indicated that our proposed method achieves accurate field estimation even when the placement region of sensor candidates is restricted.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fast Block Size Decision for HEVC Encoders with On-the-Fly Trained Classifiers.\n \n \n \n \n\n\n \n Correa, G.; Dall’Oglio, P.; Palomino, D.; and Agostini, L.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 540-544, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"FastPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287223,\n  author = {G. Correa and P. Dall’Oglio and D. Palomino and L. Agostini},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Fast Block Size Decision for HEVC Encoders with On-the-Fly Trained Classifiers},\n  year = {2020},\n  pages = {540-544},\n  abstract = {High Efficiency Video Coding (HEVC) introduced flexible block partitioning structures that increased significantly compression rates in comparison to previous standards. However, such features resulted in a non-negligible increase in computational cost as well. To accelerate this complex partitioning process, this paper proposes a method that halts the usual rate-distortion optimization employed in Coding Unit size decision by a set of decision tree classifiers, which are trained on the fly according to the current video sequence characteristics. The classifiers are built during the encoding process by the C5 machine learning algorithm, which was chosen based on an extensive analysis that compared several algorithms in terms of decision accuracy and training complexity. Experimental results show that the strategy is capable of building accurate models and decreases the HEVC encoding time in 34.4% on average, at the cost of a compression efficiency loss of only 0.2%.},\n  keywords = {Machine learning algorithms;Signal processing algorithms;Encoding;Computational efficiency;Decision trees;Acceleration;High efficiency video coding;video coding;HEVC;complexity reduction;decision trees;machine learning},\n  doi = {10.23919/Eusipco47968.2020.9287223},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000540.pdf},\n}\n\n
\n
\n\n\n
\n High Efficiency Video Coding (HEVC) introduced flexible block partitioning structures that increased significantly compression rates in comparison to previous standards. However, such features resulted in a non-negligible increase in computational cost as well. To accelerate this complex partitioning process, this paper proposes a method that halts the usual rate-distortion optimization employed in Coding Unit size decision by a set of decision tree classifiers, which are trained on the fly according to the current video sequence characteristics. The classifiers are built during the encoding process by the C5 machine learning algorithm, which was chosen based on an extensive analysis that compared several algorithms in terms of decision accuracy and training complexity. Experimental results show that the strategy is capable of building accurate models and decreases the HEVC encoding time in 34.4% on average, at the cost of a compression efficiency loss of only 0.2%.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Exploiting Attention-based Sequence-to-Sequence Architectures for Sound Event Localization.\n \n \n \n \n\n\n \n Schymura, C.; Ochiai, T.; Delcroix, M.; Kinoshita, K.; Nakatani, T.; Araki, S.; and Kolossa, D.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 231-235, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ExploitingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287224,\n  author = {C. Schymura and T. Ochiai and M. Delcroix and K. Kinoshita and T. Nakatani and S. Araki and D. Kolossa},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Exploiting Attention-based Sequence-to-Sequence Architectures for Sound Event Localization},\n  year = {2020},\n  pages = {231-235},\n  abstract = {Sound event localization frameworks based on deep neural networks have shown increased robustness with respect to reverberation and noise in comparison to classical parametric approaches. In particular, recurrent architectures that incorporate temporal context into the estimation process seem to be well-suited for this task. This paper proposes a novel approach to sound event localization by utilizing an attention-based sequence-to-sequence model. These types of models have been successfully applied to problems in natural language processing and automatic speech recognition. In this work, a multi-channel audio signal is encoded to a latent representation, which is subsequently decoded to a sequence of estimated directions-of-arrival. Herein, attentions allow for capturing temporal dependencies in the audio signal by focusing on specific frames that are relevant for estimating the activity and direction-of-arrival of sound events at the current time-step. The framework is evaluated on three publicly available datasets for sound event localization. It yields superior localization performance compared to state-of-the-art methods in both anechoic and reverberant conditions.},\n  keywords = {Neural networks;Focusing;Signal processing;Robustness;Natural language processing;Reverberation;Task analysis;sound event localization;recurrent neural network;sequence-to-sequence model},\n  doi = {10.23919/Eusipco47968.2020.9287224},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000231.pdf},\n}\n\n
\n
\n\n\n
\n Sound event localization frameworks based on deep neural networks have shown increased robustness with respect to reverberation and noise in comparison to classical parametric approaches. In particular, recurrent architectures that incorporate temporal context into the estimation process seem to be well-suited for this task. This paper proposes a novel approach to sound event localization by utilizing an attention-based sequence-to-sequence model. These types of models have been successfully applied to problems in natural language processing and automatic speech recognition. In this work, a multi-channel audio signal is encoded to a latent representation, which is subsequently decoded to a sequence of estimated directions-of-arrival. Herein, attentions allow for capturing temporal dependencies in the audio signal by focusing on specific frames that are relevant for estimating the activity and direction-of-arrival of sound events at the current time-step. The framework is evaluated on three publicly available datasets for sound event localization. It yields superior localization performance compared to state-of-the-art methods in both anechoic and reverberant conditions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n DNN-Based Frequency Component Prediction for Frequency-Domain Audio Source Separation.\n \n \n \n \n\n\n \n Watanabe, R.; Kitamura, D.; Saruwatari, H.; Takahashi, Y.; and Kondo, K.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 805-809, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DNN-BasedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287225,\n  author = {R. Watanabe and D. Kitamura and H. Saruwatari and Y. Takahashi and K. Kondo},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {DNN-Based Frequency Component Prediction for Frequency-Domain Audio Source Separation},\n  year = {2020},\n  pages = {805-809},\n  abstract = {Multichannel audio source separation (MASS) plays an important role in various audio applications. Frequency-domain MASS algorithms such as multichannel nonnegative matrix factorization achieve better separation quality. However, they require a considerable computational cost for estimating the frequency-wise separation filter. To solve this problem, we propose a new framework combining the MASS algorithms and a simple deep neural network (DNN). In the proposed framework, frequency-domain MASS is performed only in narrowband frequency bins. Then, DNN predicts the separated source components in other frequency bins, where both the observed mixture of all frequency bins and the separated narrowband source components are used as DNN inputs. Our experimental results show the validity of the proposed MASS framework in terms of computational efficiency.},\n  keywords = {Source separation;Frequency-domain analysis;Neural networks;Signal processing algorithms;Prediction algorithms;Computational efficiency;Narrowband;audio source separation;deep neural networks;frequency component prediction},\n  doi = {10.23919/Eusipco47968.2020.9287225},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000805.pdf},\n}\n\n
\n
\n\n\n
\n Multichannel audio source separation (MASS) plays an important role in various audio applications. Frequency-domain MASS algorithms such as multichannel nonnegative matrix factorization achieve better separation quality. However, they require a considerable computational cost for estimating the frequency-wise separation filter. To solve this problem, we propose a new framework combining the MASS algorithms and a simple deep neural network (DNN). In the proposed framework, frequency-domain MASS is performed only in narrowband frequency bins. Then, DNN predicts the separated source components in other frequency bins, where both the observed mixture of all frequency bins and the separated narrowband source components are used as DNN inputs. Our experimental results show the validity of the proposed MASS framework in terms of computational efficiency.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Theoretical Tuning of the Autoencoder Bottleneck Layer Dimension: A Mutual Information-based Algorithm.\n \n \n \n \n\n\n \n Boquet, G.; Macias, E.; Morell, A.; Serrano, J.; and Vicario, J. L.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1512-1516, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"TheoreticalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287226,\n  author = {G. Boquet and E. Macias and A. Morell and J. Serrano and J. L. Vicario},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Theoretical Tuning of the Autoencoder Bottleneck Layer Dimension: A Mutual Information-based Algorithm},\n  year = {2020},\n  pages = {1512-1516},\n  abstract = {Under the transportation field, the literature states that forecasting with excessive number of features can be computational inefficient and undertakes the risk of over-fitting. Because of that, several authors proposed the use of autoencoders (AE) as a way of learning fewer but useful features to enhance the road traffic forecast. Notably, the adequacy of the bottleneck layer dimension of the AE has not been addressed, thus there is no standard way for automatic selection of the dimensionality. We address the problem from an information theory perspective as the reconstruction error is not a reliable indicator of the performance of the subsequent supervised learning algorithm. Hence, we propose an algorithm based on how mutual information and entropy of data evolve during training of the AE. We validate it against two real-world traffic datasets and provide discussion why the entropy of codes is a reliable performance indicator. Compared to the tendency found in the literature, based on trial-and-error methods, the advantage of our proposal is that a practitioner can efficiently find said dimension guaranteeing maximal data compression and reliable traffic forecast.},\n  keywords = {Training;Signal processing algorithms;Transportation;Reliability theory;Entropy;Forecasting;Standards;intelligent transportation systems;traffic forecasting;autoencoder;mutual information;entropy},\n  doi = {10.23919/Eusipco47968.2020.9287226},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001512.pdf},\n}\n\n
\n
\n\n\n
\n Under the transportation field, the literature states that forecasting with excessive number of features can be computational inefficient and undertakes the risk of over-fitting. Because of that, several authors proposed the use of autoencoders (AE) as a way of learning fewer but useful features to enhance the road traffic forecast. Notably, the adequacy of the bottleneck layer dimension of the AE has not been addressed, thus there is no standard way for automatic selection of the dimensionality. We address the problem from an information theory perspective as the reconstruction error is not a reliable indicator of the performance of the subsequent supervised learning algorithm. Hence, we propose an algorithm based on how mutual information and entropy of data evolve during training of the AE. We validate it against two real-world traffic datasets and provide discussion why the entropy of codes is a reliable performance indicator. Compared to the tendency found in the literature, based on trial-and-error methods, the advantage of our proposal is that a practitioner can efficiently find said dimension guaranteeing maximal data compression and reliable traffic forecast.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Stochasticity and Skip Connection Improve Knowledge Transfer.\n \n \n \n \n\n\n \n Nguyen, L. T.; Lee, K.; and Shim, B.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1537-1541, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"StochasticityPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287227,\n  author = {L. T. Nguyen and K. Lee and B. Shim},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Stochasticity and Skip Connection Improve Knowledge Transfer},\n  year = {2020},\n  pages = {1537-1541},\n  abstract = {Deep neural networks have achieved state-of-the-art performance in various fields. However, DNNs need to be scaled down to fit real-word applications where memory and computation resources are limited. As a means to compress the network yet still maintain the performance of the network, knowledge distillation has brought a lot of attention. This technique is based on the idea to train a student network using the provided output of a teacher network. Deploying multiple teacher networks facilitates learning of the student network, however, it causes to some extent waste of resources. In the proposed approach, we generate multiple teacher networks from a teacher network by exploiting stochastic block and skip connection. Thus, they can play the role of multiple teacher networks and provide sufficient knowledge to the student network without additional resources. We observe the improved performance of student network with the proposed approach using several dataset.},\n  keywords = {Knowledge engineering;Neural networks;Europe;Object detection;Signal processing;Task analysis;Knowledge transfer;convolutional neural network;Knowledge transfer;image classification;multiple teacher networks},\n  doi = {10.23919/Eusipco47968.2020.9287227},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001537.pdf},\n}\n\n
\n
\n\n\n
\n Deep neural networks have achieved state-of-the-art performance in various fields. However, DNNs need to be scaled down to fit real-word applications where memory and computation resources are limited. As a means to compress the network yet still maintain the performance of the network, knowledge distillation has brought a lot of attention. This technique is based on the idea to train a student network using the provided output of a teacher network. Deploying multiple teacher networks facilitates learning of the student network, however, it causes to some extent waste of resources. In the proposed approach, we generate multiple teacher networks from a teacher network by exploiting stochastic block and skip connection. Thus, they can play the role of multiple teacher networks and provide sufficient knowledge to the student network without additional resources. We observe the improved performance of student network with the proposed approach using several dataset.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Data-driven Kernel-based Probabilistic SAX for Time Series Dimensionality Reduction.\n \n \n \n \n\n\n \n Bountrogiannis, K.; Tzagkarakis, G.; and Tsakalides, P.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2343-2347, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Data-drivenPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287311,\n  author = {K. Bountrogiannis and G. Tzagkarakis and P. Tsakalides},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Data-driven Kernel-based Probabilistic SAX for Time Series Dimensionality Reduction},\n  year = {2020},\n  pages = {2343-2347},\n  abstract = {The ever-increasing volume and complexity of time series data, emerging in various application domains, necessitate efficient dimensionality reduction for facilitating data mining tasks. Symbolic representations, among them symbolic aggregate approximation (SAX), have proven very effective in compacting the information content of time series while exploiting the wealth of search algorithms used in bioinformatics and text mining communities. However, typical SAX-based techniques rely on a Gaussian assumption for the underlying data statistics, which often deteriorates their performance in practical scenarios. To overcome this limitation, this work introduces a method that negates any assumption on the probability distribution of time series. Specifically, a data-driven kernel density estimator is first applied on the data, followed by Lloyd-Max quantization to determine the optimal horizontal segmentation breakpoints. Experimental evaluation on distinct datasets demonstrates the superiority of our method, in terms of reconstruction accuracy and tightness of lower bound, when compared against the conventional and a modified SAX method.},\n  keywords = {Dimensionality reduction;Text mining;Quantization (signal);Time series analysis;Signal processing algorithms;Probabilistic logic;Task analysis;Data-driven probabilistic SAX;kernel density estimation;symbolic representations;Lloyd-Max quantization},\n  doi = {10.23919/Eusipco47968.2020.9287311},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002343.pdf},\n}\n\n
\n
\n\n\n
\n The ever-increasing volume and complexity of time series data, emerging in various application domains, necessitate efficient dimensionality reduction for facilitating data mining tasks. Symbolic representations, among them symbolic aggregate approximation (SAX), have proven very effective in compacting the information content of time series while exploiting the wealth of search algorithms used in bioinformatics and text mining communities. However, typical SAX-based techniques rely on a Gaussian assumption for the underlying data statistics, which often deteriorates their performance in practical scenarios. To overcome this limitation, this work introduces a method that negates any assumption on the probability distribution of time series. Specifically, a data-driven kernel density estimator is first applied on the data, followed by Lloyd-Max quantization to determine the optimal horizontal segmentation breakpoints. Experimental evaluation on distinct datasets demonstrates the superiority of our method, in terms of reconstruction accuracy and tightness of lower bound, when compared against the conventional and a modified SAX method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Spectrogram-based fundamental frequency tracking of spontaneous cries in preterm newborns.\n \n \n \n \n\n\n \n Met-Montot, B.; Cabon, S.; Carrault, G.; and Porée, F.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1185-1189, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Spectrogram-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287312,\n  author = {B. Met-Montot and S. Cabon and G. Carrault and F. Porée},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Spectrogram-based fundamental frequency tracking of spontaneous cries in preterm newborns},\n  year = {2020},\n  pages = {1185-1189},\n  abstract = {Cry analysis of preterm newborns has proven to be relevant for prediction of pathologies or for comparison with full-term newborns. In this paper we propose a new approach for the automated detection and tracking of the fundamental frequency in cries, based on the processing of the spectrogram. A first step automatically detects the frequency bounds including the fundamental frequency along each cry. Then, the tracking of the fundamental frequency is obtained after a contour detection. Results showed that this new approach allows to process efficiently all types of cries. This whole procedure applied to a database including 1889 cries from 14 babies, at term-equivalent age, highlighted differences between extremely, very and late preterm as well as full-term newborns. In addition, we observed a decrease of the mean fundamental frequency with increasing gestational age, a result in accordance with the literature.},\n  keywords = {Pediatrics;Pathology;Databases;Europe;Spectrogram;prematurity;monitoring;cry analysis;fundamental frequency tracking;melody},\n  doi = {10.23919/Eusipco47968.2020.9287312},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001185.pdf},\n}\n\n
\n
\n\n\n
\n Cry analysis of preterm newborns has proven to be relevant for prediction of pathologies or for comparison with full-term newborns. In this paper we propose a new approach for the automated detection and tracking of the fundamental frequency in cries, based on the processing of the spectrogram. A first step automatically detects the frequency bounds including the fundamental frequency along each cry. Then, the tracking of the fundamental frequency is obtained after a contour detection. Results showed that this new approach allows to process efficiently all types of cries. This whole procedure applied to a database including 1889 cries from 14 babies, at term-equivalent age, highlighted differences between extremely, very and late preterm as well as full-term newborns. In addition, we observed a decrease of the mean fundamental frequency with increasing gestational age, a result in accordance with the literature.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Study on the Influence of Multiple Image Inputs of a Multi-View Fusion Neural Network Based on Grad-CAM and Masked Image Inputs.\n \n \n \n \n\n\n \n Tilgner, S.; Wagner, D.; Kalischewski, K.; Schmitz, J. -.; and Kummert, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1427-1431, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"StudyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287315,\n  author = {S. Tilgner and D. Wagner and K. Kalischewski and J. -C. Schmitz and A. Kummert},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Study on the Influence of Multiple Image Inputs of a Multi-View Fusion Neural Network Based on Grad-CAM and Masked Image Inputs},\n  year = {2020},\n  pages = {1427-1431},\n  abstract = {Neural network models are used successfully in many applications like traffic sign recognition in the automotive context, cancer detection in medicine engineering, machine monitoring in the manufacturing industry, et cetera. However, the decisions of a neural network model for a particular input sample in a classification task are mostly nontransparent. We propose techniques to determine which input image of a Multi-View Fusion Neural Network has the most influence on the prediction of the model for a particular image sample pair and which regions in the input images are important. In addition, a trained Multi-View Fusion Neural Network is studied regarding the question of influence. The results are convincing and show that the studied model learned similar concepts like a human.},\n  keywords = {Visualization;Neural networks;Signal processing;Predictive models;Observers;Reliability;Task analysis;Influence Visualization;Multi-View Fusion Neural Network;Grad-CAM;Convolutional Neural Network},\n  doi = {10.23919/Eusipco47968.2020.9287315},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001427.pdf},\n}\n\n
\n
\n\n\n
\n Neural network models are used successfully in many applications like traffic sign recognition in the automotive context, cancer detection in medicine engineering, machine monitoring in the manufacturing industry, et cetera. However, the decisions of a neural network model for a particular input sample in a classification task are mostly nontransparent. We propose techniques to determine which input image of a Multi-View Fusion Neural Network has the most influence on the prediction of the model for a particular image sample pair and which regions in the input images are important. In addition, a trained Multi-View Fusion Neural Network is studied regarding the question of influence. The results are convincing and show that the studied model learned similar concepts like a human.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fast Multilevel Quantization for Distributed Detection Based on Gaussian Approximation.\n \n \n \n \n\n\n \n Gül, G.; and Baßler, M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2433-2437, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"FastPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287316,\n  author = {G. Gül and M. Baßler},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Fast Multilevel Quantization for Distributed Detection Based on Gaussian Approximation},\n  year = {2020},\n  pages = {2433-2437},\n  abstract = {An iterative algorithm is derived for multilevel quantization of sensor observations in distributed sensor networks, where each sensor transmits a summary of its observation to the fusion center and the fusion center makes the final decision. The proposed scheme is composed of a person-by-person optimum quantization at each sensor and a Gaussian approximation to the distribution of the test statistic at the fusion center. The complexity of the algorithm is linear both for identically and non-identically distributed independent sensors. Experimental results indicate that the proposed scheme is promising in comparison to the current state-of-the-art.},\n  keywords = {Quantization (signal);Error probability;Signal processing algorithms;Approximation algorithms;Iterative methods;Gaussian approximation;Optimization;Distributed detection;quantization;multisensor systems;cognitive radio;wireless sensor networks;signal detection},\n  doi = {10.23919/Eusipco47968.2020.9287316},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002433.pdf},\n}\n\n
\n
\n\n\n
\n An iterative algorithm is derived for multilevel quantization of sensor observations in distributed sensor networks, where each sensor transmits a summary of its observation to the fusion center and the fusion center makes the final decision. The proposed scheme is composed of a person-by-person optimum quantization at each sensor and a Gaussian approximation to the distribution of the test statistic at the fusion center. The complexity of the algorithm is linear both for identically and non-identically distributed independent sensors. Experimental results indicate that the proposed scheme is promising in comparison to the current state-of-the-art.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Wavelets in the Deep Learning Era.\n \n \n \n \n\n\n \n Ramzi, Z.; Starck, J. -.; Moreau, T.; and Ciuciu, P.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1417-1421, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"WaveletsPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287317,\n  author = {Z. Ramzi and J. -L. Starck and T. Moreau and P. Ciuciu},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Wavelets in the Deep Learning Era},\n  year = {2020},\n  pages = {1417-1421},\n  abstract = {Sparsity based methods, such as wavelets, have been state-of-the-art for more than 20 years for inverse problems before being overtaken by neural networks. In particular, U-nets have proven to be extremely effective. Their main ingredients are a highly non-linear processing, a massive learning made possible by the flourishing of optimization algorithms with the power of computers (GPU) and the use of large available data sets for training. While the many stages of non-linearity are intrinsic to deep learning, the usage of learning with training data could also be exploited by sparsity based approaches. The aim of our study is to push the limits of sparsity with learning, and comparing the results with U-nets. We present a new network architecture, which conserves the properties of sparsity based methods such as exact reconstruction and good generalization properties, while fostering the power of neural networks for learning and fast calculation. We evaluate the model on image denoising tasks and show it is competitive with learning-based models.},\n  keywords = {Deep learning;Maximum likelihood detection;Wavelet domain;Neural networks;Training data;Nonlinear filters;Image reconstruction;Machine Learning;Deep Learning;Neural Networks;Wavelets;Denoising;Image restoration},\n  doi = {10.23919/Eusipco47968.2020.9287317},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001417.pdf},\n}\n\n
\n
\n\n\n
\n Sparsity based methods, such as wavelets, have been state-of-the-art for more than 20 years for inverse problems before being overtaken by neural networks. In particular, U-nets have proven to be extremely effective. Their main ingredients are a highly non-linear processing, a massive learning made possible by the flourishing of optimization algorithms with the power of computers (GPU) and the use of large available data sets for training. While the many stages of non-linearity are intrinsic to deep learning, the usage of learning with training data could also be exploited by sparsity based approaches. The aim of our study is to push the limits of sparsity with learning, and comparing the results with U-nets. We present a new network architecture, which conserves the properties of sparsity based methods such as exact reconstruction and good generalization properties, while fostering the power of neural networks for learning and fast calculation. We evaluate the model on image denoising tasks and show it is competitive with learning-based models.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Deep Double-Q Learning-based Scheme for Anti-Jamming Communications.\n \n \n \n \n\n\n \n Nguyen, P. K. H.; Nguyen, V. H.; and Do, V. L.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1566-1570, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287318,\n  author = {P. K. H. Nguyen and V. H. Nguyen and V. L. Do},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A Deep Double-Q Learning-based Scheme for Anti-Jamming Communications},\n  year = {2020},\n  pages = {1566-1570},\n  abstract = {Cognitive radio has become an emerging advanced wireless communication technology to achieve maximal spectrum efficiency. In cognitive radio networks, the threat of radio jamming attack arises as a big issue due to the vulnerability of radio transmission. Therefore, anti-jamming is an active research topic for a long time. Recently, with the success of deep learning, deep reinforcement learning algorithms have been applied to solve the dynamic spectrum access and anti-jamming problem. In this paper, we propose a Deep Double-Q learning-based method to learn an efficient communication policy including channel access and transmission power for tackling different jamming scenarios. The proposed scheme uses observed spectral information as input and Q-function is approximated by a neural network. Simulation results show that Double-Q learning algorithm with Convolutional Neural Network achieves effective communication strategies to avoid various jamming patterns compared with other traditional methods.},\n  keywords = {Heuristic algorithms;Simulation;Neural networks;Signal processing algorithms;Reinforcement learning;Cognitive radio;Jamming;Cognitive radio;anti-jamming;spectrum sensing;reinforcement learning;deep double-Q learning},\n  doi = {10.23919/Eusipco47968.2020.9287318},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001566.pdf},\n}\n\n
\n
\n\n\n
\n Cognitive radio has become an emerging advanced wireless communication technology to achieve maximal spectrum efficiency. In cognitive radio networks, the threat of radio jamming attack arises as a big issue due to the vulnerability of radio transmission. Therefore, anti-jamming is an active research topic for a long time. Recently, with the success of deep learning, deep reinforcement learning algorithms have been applied to solve the dynamic spectrum access and anti-jamming problem. In this paper, we propose a Deep Double-Q learning-based method to learn an efficient communication policy including channel access and transmission power for tackling different jamming scenarios. The proposed scheme uses observed spectral information as input and Q-function is approximated by a neural network. Simulation results show that Double-Q learning algorithm with Convolutional Neural Network achieves effective communication strategies to avoid various jamming patterns compared with other traditional methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n CSP-based discriminative capacity index from EEG supporting ADHD diagnosis.\n \n \n \n \n\n\n \n Galindo-Noreña, S.; Cárdenas-Peña, D.; and Orozco-Gutierrez, A. A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1343-1347, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"CSP-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287319,\n  author = {S. Galindo-Noreña and D. Cárdenas-Peña and A. A. Orozco-Gutierrez},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {CSP-based discriminative capacity index from EEG supporting ADHD diagnosis},\n  year = {2020},\n  pages = {1343-1347},\n  abstract = {The Attention-Deficit/Hyperactivity Disorder (ADHD) is a childhood-onset neurological disorder that can persist in adolescence and adult life, reducing concentration, memory, and productivity. Although biomarkers as TBR and P300 rely on ADHD physiology, differences between ADHD and control lack significance. In this work, we propose a feature extraction approach based on the common spatial patterns (CSP) from EEG signals to support the ADHD diagnosis. Our features quantify the channel-wise discriminative capacity from the resulting spatial patterns and eigenvalues. We validated the proposed methodology using synthetic and real EEG signal. In the former, the proposed index suitably identifies the spatial location of differentiating sources, while attenuates the common activity. In the latter, the resulting subject-wise features fed a linear discriminant analysis as the supported-diagnosis tool. Achieved 87% accuracy rate proves that the discriminative index identifies outperforms conventional biomarkers in the ADHD diagnosis.},\n  keywords = {brain-computer interfaces;eigenvalues and eigenfunctions;electroencephalography;feature extraction;medical disorders;medical signal processing;neurophysiology;signal classification;CSP-based discriminative capacity index;ADHD diagnosis;childhood-onset neurological disorder;adolescence;ADHD physiology;feature extraction approach;channel-wise discriminative capacity;eigenvalues;synthetic EEG signal;real EEG signal;linear discriminant analysis;supported-diagnosis tool;discriminative index;Productivity;Biomarkers;Tools;Signal processing;Electroencephalography;Physiology;Indexes},\n  doi = {10.23919/Eusipco47968.2020.9287319},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001343.pdf},\n}\n\n
\n
\n\n\n
\n The Attention-Deficit/Hyperactivity Disorder (ADHD) is a childhood-onset neurological disorder that can persist in adolescence and adult life, reducing concentration, memory, and productivity. Although biomarkers as TBR and P300 rely on ADHD physiology, differences between ADHD and control lack significance. In this work, we propose a feature extraction approach based on the common spatial patterns (CSP) from EEG signals to support the ADHD diagnosis. Our features quantify the channel-wise discriminative capacity from the resulting spatial patterns and eigenvalues. We validated the proposed methodology using synthetic and real EEG signal. In the former, the proposed index suitably identifies the spatial location of differentiating sources, while attenuates the common activity. In the latter, the resulting subject-wise features fed a linear discriminant analysis as the supported-diagnosis tool. Achieved 87% accuracy rate proves that the discriminative index identifies outperforms conventional biomarkers in the ADHD diagnosis.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Go-selfies: A Fast Selfies Background Removal Method Using ResU-Net Deep Learning.\n \n \n \n \n\n\n \n Wu, Y.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 615-619, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Go-selfies:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287320,\n  author = {Y. Wu},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Go-selfies: A Fast Selfies Background Removal Method Using ResU-Net Deep Learning},\n  year = {2020},\n  pages = {615-619},\n  abstract = {The selfies play an important role in recording meaningful moment in human’s daily life. In most cases, before sharing photos, people often synthesis attractive images on some phone applications, such as Photoshop. While these kinds of software have reached good performance nowadays, they are too complex for simple life usage. In this work, we proposed an automatic segmentation model unique to segment human selfies photos. We first constructed a large photo segmentation database and built 8 different models based on resolution, image size and whether or not to use transfer learning and picked the best one among them. We then applied cyclical learning rate method and pre-trained encoder network to fine tune our models. Finally, our best model tested on Google images demonstrated satisfying promising results on both accuracy scores and losses, which will be the precondition in real-time segmentation. We named this lovely web product as {"}Go Selfies{"}.},\n  keywords = {Deep learning;Image segmentation;Signal processing;Software;Real-time systems;Internet;Software development management;background removal;deep learning;selfies;ResU-Net},\n  doi = {10.23919/Eusipco47968.2020.9287320},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000615.pdf},\n}\n\n
\n
\n\n\n
\n The selfies play an important role in recording meaningful moment in human’s daily life. In most cases, before sharing photos, people often synthesis attractive images on some phone applications, such as Photoshop. While these kinds of software have reached good performance nowadays, they are too complex for simple life usage. In this work, we proposed an automatic segmentation model unique to segment human selfies photos. We first constructed a large photo segmentation database and built 8 different models based on resolution, image size and whether or not to use transfer learning and picked the best one among them. We then applied cyclical learning rate method and pre-trained encoder network to fine tune our models. Finally, our best model tested on Google images demonstrated satisfying promising results on both accuracy scores and losses, which will be the precondition in real-time segmentation. We named this lovely web product as \"Go Selfies\".\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Demographic Bias in Presentation Attack Detection of Iris Recognition Systems.\n \n \n \n \n\n\n \n Fang, M.; Damer, N.; Kirchbuchner, F.; and Kuijper, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 835-839, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DemographicPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287321,\n  author = {M. Fang and N. Damer and F. Kirchbuchner and A. Kuijper},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Demographic Bias in Presentation Attack Detection of Iris Recognition Systems},\n  year = {2020},\n  pages = {835-839},\n  abstract = {With the widespread use of biometric systems, the demographic bias problem raises more attention. Although many studies addressed bias issues in biometric verification, there are no works that analyze the bias in presentation attack detection (PAD) decisions. Hence, we investigate and analyze the demographic bias in iris PAD algorithms in this paper. To enable a clear discussion, we adapt the notions of differential performance and differential outcome to the PAD problem. We study the bias in iris PAD using three baselines (hand-crafted, transfer-learning, and training from scratch) using the NDCLD-2013 [18] database. The experimental results point out that female users will be significantly less protected by the PAD, in comparison to males.},\n  keywords = {Training;Databases;Training data;Signal processing algorithms;Europe;Signal processing;Iris recognition;PAD bias;iris PAD;differential performance},\n  doi = {10.23919/Eusipco47968.2020.9287321},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000835.pdf},\n}\n\n
\n
\n\n\n
\n With the widespread use of biometric systems, the demographic bias problem raises more attention. Although many studies addressed bias issues in biometric verification, there are no works that analyze the bias in presentation attack detection (PAD) decisions. Hence, we investigate and analyze the demographic bias in iris PAD algorithms in this paper. To enable a clear discussion, we adapt the notions of differential performance and differential outcome to the PAD problem. We study the bias in iris PAD using three baselines (hand-crafted, transfer-learning, and training from scratch) using the NDCLD-2013 [18] database. The experimental results point out that female users will be significantly less protected by the PAD, in comparison to males.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Evaluation of Zero Frequency Filtering based Method for Multi-pitch Streaming of Concurrent Speech Signals.\n \n \n \n \n\n\n \n Bouafif Mansali, M.; Bäckström, T.; and Lachiri, Z.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 286-290, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"EvaluationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287322,\n  author = {M. {Bouafif Mansali} and T. Bäckström and Z. Lachiri},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Evaluation of Zero Frequency Filtering based Method for Multi-pitch Streaming of Concurrent Speech Signals},\n  year = {2020},\n  pages = {286-290},\n  abstract = {Multiple pitch streaming from a mixture is a challenging problem for signal processing and especially for speech separation. In this paper, we use a Zero frequency filtering (ZFF) based new system to stream pitch of multiple concurrent speakers. We propose a workflow to estimate pitch values of all sources in each single frame then streaming them into trajectories, each corresponding to a distinct source. The method consists of detecting and localizing the involved speakers in a mixture, followed by a ZFF based approach where involved speakers’ pitches are iteratively streamed from the observed mixture. The robustness of the proposed system is tested over two, and three overlapping speech mixtures collected in reverberant environment. The results indicate that our proposal brings ZFF to a competitive level with another recently proposed streaming approach.},\n  keywords = {Filtering;Signal processing algorithms;Signal processing;Robustness;Trajectory;Proposals;Speech processing;Pitch estimation;Zero Frequency Filtering;Epochs;Multipitch;Streaming},\n  doi = {10.23919/Eusipco47968.2020.9287322},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000286.pdf},\n}\n\n
\n
\n\n\n
\n Multiple pitch streaming from a mixture is a challenging problem for signal processing and especially for speech separation. In this paper, we use a Zero frequency filtering (ZFF) based new system to stream pitch of multiple concurrent speakers. We propose a workflow to estimate pitch values of all sources in each single frame then streaming them into trajectories, each corresponding to a distinct source. The method consists of detecting and localizing the involved speakers in a mixture, followed by a ZFF based approach where involved speakers’ pitches are iteratively streamed from the observed mixture. The robustness of the proposed system is tested over two, and three overlapping speech mixtures collected in reverberant environment. The results indicate that our proposal brings ZFF to a competitive level with another recently proposed streaming approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robust End-to-End Speaker Verification Using EEG.\n \n \n \n \n\n\n \n Han, Y.; Krishna, G.; Tran, C.; Carnahan, M.; and Tewfik, A. H.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1170-1174, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"RobustPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287323,\n  author = {Y. Han and G. Krishna and C. Tran and M. Carnahan and A. H. Tewfik},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Robust End-to-End Speaker Verification Using EEG},\n  year = {2020},\n  pages = {1170-1174},\n  abstract = {In this paper we demonstrate that performance of a speaker verification system can be improved by concatenating electroencephalography (EEG) signal features with speech signal features or only using EEG signal features. We use state-of-the-art end-to-end deep learning model for performing speaker verification and we demonstrate our results for noisy speech. Our results indicate that EEG signals can improve the robustness of speaker verification systems, especially in noiser environment.},\n  keywords = {electroencephalography;feature extraction;learning (artificial intelligence);medical signal processing;speaker recognition;robust end-to-end speaker verification;speaker verification system;electroencephalography signal features;speech signal features;state-of-the-art end-to-end deep learning model;EEG signals;Europe;Signal processing;Electroencephalography;Robustness;Noise measurement;Mel frequency cepstral coefficient;Speech processing;Electroencephalography (EEG);Speaker Verification;Deep Learning;Bio-metrics},\n  doi = {10.23919/Eusipco47968.2020.9287323},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001170.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we demonstrate that performance of a speaker verification system can be improved by concatenating electroencephalography (EEG) signal features with speech signal features or only using EEG signal features. We use state-of-the-art end-to-end deep learning model for performing speaker verification and we demonstrate our results for noisy speech. Our results indicate that EEG signals can improve the robustness of speaker verification systems, especially in noiser environment.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Monitoring the Rehabilitation Progress Using a DCNN and Kinematic Data for Digital Healthcare.\n \n \n \n \n\n\n \n Alcaraz, J. C.; Moghaddamnia, S.; Penner, M.; and Peissig, J.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1333-1337, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"MonitoringPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287324,\n  author = {J. C. Alcaraz and S. Moghaddamnia and M. Penner and J. Peissig},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Monitoring the Rehabilitation Progress Using a DCNN and Kinematic Data for Digital Healthcare},\n  year = {2020},\n  pages = {1333-1337},\n  abstract = {Monitoring the progress of patients during the rehabilitation process after an operation is beneficial for adjusting care and medical treatment in order to improve the patient’s quality of life. The supervised methods used for this in the literature need data labeling, which is a time and cost-intensive procedure. In this paper, we propose Deep Convolutional Neural Network (DCNN) for monitoring the progress of the rehabilitation, utilizing the kinematic data from a Wearable Sensor System (WSS). The WSS provides three-dimensional linear acceleration and angular velocity from multiple body parts such as the lower back and lower limbs during walking at any speed on level ground. Twelve patients with hip unilateral arthroplasty completed two weeks of gait training after the operation. The classification results of different Inertial Measurement Unit (IMU) placements revealed that the IMU placed at thigh achieved the highest accuracy. The proposed DCNN achieved up to 98% classification accuracy for the rehabilitation progress monitoring. This approach provides an objective and evidence-based way of understanding clinically important changes in human movement patterns in response to exercise therapy.},\n  keywords = {Thigh;Medical treatment;Kinematics;Biomedical monitoring;Monitoring;Wearable sensors;Hip;CNN;Gait Rehabilitation;Progress Monitoring;IMU;Machine Learning;Digital Healthcare and Therapy Control},\n  doi = {10.23919/Eusipco47968.2020.9287324},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001333.pdf},\n}\n\n
\n
\n\n\n
\n Monitoring the progress of patients during the rehabilitation process after an operation is beneficial for adjusting care and medical treatment in order to improve the patient’s quality of life. The supervised methods used for this in the literature need data labeling, which is a time and cost-intensive procedure. In this paper, we propose Deep Convolutional Neural Network (DCNN) for monitoring the progress of the rehabilitation, utilizing the kinematic data from a Wearable Sensor System (WSS). The WSS provides three-dimensional linear acceleration and angular velocity from multiple body parts such as the lower back and lower limbs during walking at any speed on level ground. Twelve patients with hip unilateral arthroplasty completed two weeks of gait training after the operation. The classification results of different Inertial Measurement Unit (IMU) placements revealed that the IMU placed at thigh achieved the highest accuracy. The proposed DCNN achieved up to 98% classification accuracy for the rehabilitation progress monitoring. This approach provides an objective and evidence-based way of understanding clinically important changes in human movement patterns in response to exercise therapy.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Incorporating User Feedback Into One-Class Support Vector Machines for Anomaly Detection.\n \n \n \n \n\n\n \n Lesouple, J.; and Tourneret, J. -.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1608-1612, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"IncorporatingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287325,\n  author = {J. Lesouple and J. -Y. Tourneret},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Incorporating User Feedback Into One-Class Support Vector Machines for Anomaly Detection},\n  year = {2020},\n  pages = {1608-1612},\n  abstract = {Machine learning and data-driven algorithms have gained a growth of interest during the past decades due to the computation capability of the computers which has increased and the quantity of data available in various domains. One possible application of machine learning is to perform unsupervised anomaly detection. Indeed, among all available data, the anomalies are supposed to be very sparse and the expert might not have the time to label all the data as nominal or not. Many solutions exist to this unsupervised problem, but are known to provide many false alarms, because some scarce nominal modes might not be included in the training dataset and thus will be detected as anomalies. To tackle this issue, we propose to present an existing iterative algorithm, which presents potential anomaly to the expert at each iteration, and compute a new boundary according to this feedback using One Class Support Vector Machine.},\n  keywords = {Support vector machines;Training;Machine learning algorithms;Signal processing algorithms;Machine learning;Signal processing;Anomaly detection;Machine learning;Semi-supervised Learning;Active Learning;User Feedback;Anomaly Detection;One-Class Support Vector Machines},\n  doi = {10.23919/Eusipco47968.2020.9287325},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001608.pdf},\n}\n\n
\n
\n\n\n
\n Machine learning and data-driven algorithms have gained a growth of interest during the past decades due to the computation capability of the computers which has increased and the quantity of data available in various domains. One possible application of machine learning is to perform unsupervised anomaly detection. Indeed, among all available data, the anomalies are supposed to be very sparse and the expert might not have the time to label all the data as nominal or not. Many solutions exist to this unsupervised problem, but are known to provide many false alarms, because some scarce nominal modes might not be included in the training dataset and thus will be detected as anomalies. To tackle this issue, we propose to present an existing iterative algorithm, which presents potential anomaly to the expert at each iteration, and compute a new boundary according to this feedback using One Class Support Vector Machine.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n One-Class based learning for Hybrid Spectrum Sensing in Cognitive Radio.\n \n \n \n \n\n\n \n Jaber, M.; Nasser, A.; Charara, N.; Mansour, A.; and Yao, K. C.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1683-1686, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"One-ClassPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287326,\n  author = {M. Jaber and A. Nasser and N. Charara and A. Mansour and K. C. Yao},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {One-Class based learning for Hybrid Spectrum Sensing in Cognitive Radio},\n  year = {2020},\n  pages = {1683-1686},\n  abstract = {The main aim of the Spectrum Sensing (SS) in a Cognitive Radio system is to distinguish between the binary hypotheses H0: Primary User (PU) is absent and H1: PU is active. In this paper, Machine Learning (ML)-based hybrid Spectrum Sensing (SS) scheme is proposed. The scattering of the Test Statistics (TSs) of two detectors is used in the learning and prediction phases. As the SS decision is binary, the proposed scheme requires the learning of only the boundaries of H0-class in order to make a decision on the PU status: active or idle. Thus, a set of data generated under H0 hypothesis is used to train the detection system. Accordingly, unlike the existing ML-based schemes of the literature, no PU statistical parameters are required. In order to discriminate between H0-class and elsewhere, we used a one-class classification approach that is inspired by the Isolation Forest algorithm. Extensive simulations are done in order to investigate the efficiency of such hybrid SS and the impact of the novelty detection model parameters on the detection performance. Indeed, these simulations corroborate the efficiency of the proposed one-class learning of the hybrid SS system.},\n  keywords = {Signal processing algorithms;Vegetation;Detectors;Signal processing;Sensors;Cognitive radio;Signal to noise ratio},\n  doi = {10.23919/Eusipco47968.2020.9287326},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001683.pdf},\n}\n\n
\n
\n\n\n
\n The main aim of the Spectrum Sensing (SS) in a Cognitive Radio system is to distinguish between the binary hypotheses H0: Primary User (PU) is absent and H1: PU is active. In this paper, Machine Learning (ML)-based hybrid Spectrum Sensing (SS) scheme is proposed. The scattering of the Test Statistics (TSs) of two detectors is used in the learning and prediction phases. As the SS decision is binary, the proposed scheme requires the learning of only the boundaries of H0-class in order to make a decision on the PU status: active or idle. Thus, a set of data generated under H0 hypothesis is used to train the detection system. Accordingly, unlike the existing ML-based schemes of the literature, no PU statistical parameters are required. In order to discriminate between H0-class and elsewhere, we used a one-class classification approach that is inspired by the Isolation Forest algorithm. Extensive simulations are done in order to investigate the efficiency of such hybrid SS and the impact of the novelty detection model parameters on the detection performance. Indeed, these simulations corroborate the efficiency of the proposed one-class learning of the hybrid SS system.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Techniques Improving the Robustness of Deep Learning Models for Industrial Sound Analysis.\n \n \n \n \n\n\n \n Johnson, D. S.; and Grollmisch, S.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 81-85, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"TechniquesPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287327,\n  author = {D. S. Johnson and S. Grollmisch},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Techniques Improving the Robustness of Deep Learning Models for Industrial Sound Analysis},\n  year = {2020},\n  pages = {81-85},\n  abstract = {The field of Industrial Sound Analysis (ISA) aims to automatically identify faults in production machinery or manufactured goods by analyzing audio signals. Publications in this field have shown that the surface condition of metal balls and different types of bulk materials (screws, nuts, etc.) sliding down a tube can be classified with a high accuracy using audio signals and deep neural networks. However, these systems suffer from domain shift, or dataset bias, due to minor changes in the recording setup which may easily happen in real-world production lines. This paper aims at finding methods to increase robustness of existing detection systems to domain shift, ideally without the need to record new data or retrain the models. Through five experiments, we implement a convolutional neural network (CNN) for two publicly available ISA datasets and evaluate transfer learning, data normalization and data augmentation as approaches to deal with domain shift. Our results show that while supervised methods with additional labeled data are the best approach, an unsupervised method that implements data augmentation with adaptive normalization is able to improve the performance by a large margin without the need of retraining neural networks.},\n  keywords = {Adaptation models;Neural networks;Training data;Production;Robustness;Data models;Task analysis;industrial sound analysis;neural networks;data augmentation;data normalization;transfer learning},\n  doi = {10.23919/Eusipco47968.2020.9287327},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000081.pdf},\n}\n\n
\n
\n\n\n
\n The field of Industrial Sound Analysis (ISA) aims to automatically identify faults in production machinery or manufactured goods by analyzing audio signals. Publications in this field have shown that the surface condition of metal balls and different types of bulk materials (screws, nuts, etc.) sliding down a tube can be classified with a high accuracy using audio signals and deep neural networks. However, these systems suffer from domain shift, or dataset bias, due to minor changes in the recording setup which may easily happen in real-world production lines. This paper aims at finding methods to increase robustness of existing detection systems to domain shift, ideally without the need to record new data or retrain the models. Through five experiments, we implement a convolutional neural network (CNN) for two publicly available ISA datasets and evaluate transfer learning, data normalization and data augmentation as approaches to deal with domain shift. Our results show that while supervised methods with additional labeled data are the best approach, an unsupervised method that implements data augmentation with adaptive normalization is able to improve the performance by a large margin without the need of retraining neural networks.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Decision-Feedback Differential Detection with Optimum Detection Order Metric for Noncoherent Massive MIMO Systems.\n \n \n \n \n\n\n \n Yammine, G.; and Fischer, R. F. H.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1658-1662, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Decision-FeedbackPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287328,\n  author = {G. Yammine and R. F. H. Fischer},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Decision-Feedback Differential Detection with Optimum Detection Order Metric for Noncoherent Massive MIMO Systems},\n  year = {2020},\n  pages = {1658-1662},\n  abstract = {Noncoherent detection in massive MIMO systems is an attractive alternative to channel-estimation-based detection. So far, receivers employing ordered decision-feedback differential detection based on the phase quantization error of the candidate symbols of the correlation matrix have been proposed. This approach however ignores reliability information in certain cases. In this paper, we analytically derive the maximum-likelihood decision metric used to search for the optimum detection order. We compare the proposed metric to the phase-quantization-based one in terms symbol error rate performance.},\n  keywords = {Maximum likelihood estimation;Maximum likelihood detection;Error analysis;Receivers;Massive MIMO;Linear antenna arrays;Sorting},\n  doi = {10.23919/Eusipco47968.2020.9287328},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001658.pdf},\n}\n\n
\n
\n\n\n
\n Noncoherent detection in massive MIMO systems is an attractive alternative to channel-estimation-based detection. So far, receivers employing ordered decision-feedback differential detection based on the phase quantization error of the candidate symbols of the correlation matrix have been proposed. This approach however ignores reliability information in certain cases. In this paper, we analytically derive the maximum-likelihood decision metric used to search for the optimum detection order. We compare the proposed metric to the phase-quantization-based one in terms symbol error rate performance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Graph-based Denoising of EEG Signals in Impulsive Environments.\n \n \n \n \n\n\n \n Pentari, A.; Tzagkarakis, G.; Marias, K.; and Tsakalides, P.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1095-1099, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Graph-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287329,\n  author = {A. Pentari and G. Tzagkarakis and K. Marias and P. Tsakalides},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Graph-based Denoising of EEG Signals in Impulsive Environments},\n  year = {2020},\n  pages = {1095-1099},\n  abstract = {As the fields of brain-computer interaction and digital monitoring of mental health are rapidly evolving, there is an increasing demand to improve the signal processing module of such systems. Specifically, the employment of electroencephalogram (EEG) signals is among the best non-invasive modalities for collecting brain signals. However, in practice, the quality of the recorded EEG signals is often deteriorated by impulsive noise, which hinders the accuracy of any decision-making process. Previous methods for denoising EEG signals primarily rely on second order statistics for the additive noise, which is not a valid assumption when operating in impulsive environments. To alleviate this issue, this work proposes a new method for suppressing the effects of heavy-tailed noise in EEG recordings. To this end, the spatio-temporal interdependence between the electrodes is first modelled by means of graph representations. Then, the family of alpha-stable models is employed to fit the distribution of the noisy graph signals and design an appropriate adjacency matrix. The denoised signals are obtained by solving iteratively a regularized optimization problem based on fractional lower-order moments. Experimental evaluation with real data reveals the improved denoising performance of our algorithm against well-established techniques.},\n  keywords = {electroencephalography;graph theory;impulse noise;medical signal processing;signal denoising;impulsive noise;decision-making process;order statistics;additive noise;impulsive environments;heavy-tailed noise;EEG recordings;spatio-temporal interdependence;graph representations;alpha-stable models;noisy graph signals;denoised signals;improved denoising performance;graph-based denoising;brain-computer interaction;digital monitoring;mental health;employment;electroencephalogram signals;noninvasive modalities;brain signals;recorded EEG signals;Noise reduction;Signal processing algorithms;Signal processing;Brain modeling;Electroencephalography;Noise measurement;Optimization;Graph signal denoising;alpha-stable models;fractional lower order moments;impulsive noise;EEG signals},\n  doi = {10.23919/Eusipco47968.2020.9287329},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001095.pdf},\n}\n\n
\n
\n\n\n
\n As the fields of brain-computer interaction and digital monitoring of mental health are rapidly evolving, there is an increasing demand to improve the signal processing module of such systems. Specifically, the employment of electroencephalogram (EEG) signals is among the best non-invasive modalities for collecting brain signals. However, in practice, the quality of the recorded EEG signals is often deteriorated by impulsive noise, which hinders the accuracy of any decision-making process. Previous methods for denoising EEG signals primarily rely on second order statistics for the additive noise, which is not a valid assumption when operating in impulsive environments. To alleviate this issue, this work proposes a new method for suppressing the effects of heavy-tailed noise in EEG recordings. To this end, the spatio-temporal interdependence between the electrodes is first modelled by means of graph representations. Then, the family of alpha-stable models is employed to fit the distribution of the noisy graph signals and design an appropriate adjacency matrix. The denoised signals are obtained by solving iteratively a regularized optimization problem based on fractional lower-order moments. Experimental evaluation with real data reveals the improved denoising performance of our algorithm against well-established techniques.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Jointly Leveraging Decorrelation and Sparsity for Improved Feedback Cancellation in Hearing Aids.\n \n \n \n \n\n\n \n Chen, K. -.; Lee, C. -.; Rao, B. D.; and Garudadri, H.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 121-125, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"JointlyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287330,\n  author = {K. -L. Chen and C. -H. Lee and B. D. Rao and H. Garudadri},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Jointly Leveraging Decorrelation and Sparsity for Improved Feedback Cancellation in Hearing Aids},\n  year = {2020},\n  pages = {121-125},\n  abstract = {We propose a new adaptive feedback cancellation (AFC) system in hearing aids (HAs) based on a well-posed optimization criterion that jointly considers both decorrelation of the signals and sparsity of the underlying channel. We show that the least squares criterion on subband errors regularized by a p-norm-like diversity measure can be used to simultaneously decorrelate the speech signals and exploit sparsity of the acoustic feedback path impulse response. Compared with traditional subband adaptive filters that are not appropriate for incorporating sparsity due to shorter sub-filters, our proposed framework is suitable for promoting sparse characteristics, as the update rule utilizing subband information actually operates in the fullband. Simulation results show that the normalized misalignment, added stable gain, and other objective metrics of the AFC are significantly improved by choosing a proper sparsity promoting factor and a suitable number of subbands. More importantly, the results indicate that the benefits of subband decomposition and sparsity promoting are complementary and additive for AFC in HAs.},\n  keywords = {Simulation;Measurement uncertainty;Adaptive filters;Hearing aids;Decorrelation;System identification;Optimization;hearing aids;feedback cancellation;whitening;decorrelation;sparsity;adaptive filter},\n  doi = {10.23919/Eusipco47968.2020.9287330},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000121.pdf},\n}\n\n
\n
\n\n\n
\n We propose a new adaptive feedback cancellation (AFC) system in hearing aids (HAs) based on a well-posed optimization criterion that jointly considers both decorrelation of the signals and sparsity of the underlying channel. We show that the least squares criterion on subband errors regularized by a p-norm-like diversity measure can be used to simultaneously decorrelate the speech signals and exploit sparsity of the acoustic feedback path impulse response. Compared with traditional subband adaptive filters that are not appropriate for incorporating sparsity due to shorter sub-filters, our proposed framework is suitable for promoting sparse characteristics, as the update rule utilizing subband information actually operates in the fullband. Simulation results show that the normalized misalignment, added stable gain, and other objective metrics of the AFC are significantly improved by choosing a proper sparsity promoting factor and a suitable number of subbands. More importantly, the results indicate that the benefits of subband decomposition and sparsity promoting are complementary and additive for AFC in HAs.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Constrained Clustering using Gaussian Processes.\n \n \n \n \n\n\n \n Traganitis, P. A.; and Giannakis, G. B.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1457-1461, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ConstrainedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287331,\n  author = {P. A. Traganitis and G. B. Giannakis},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Constrained Clustering using Gaussian Processes},\n  year = {2020},\n  pages = {1457-1461},\n  abstract = {Constrained clustering is an important machine learning, signal processing and data mining tool, for discovering clusters in data, in the presence of additional domain information. The present work introduces a probabilistic scheme for constrained clustering based on the popular Gaussian Process framework. The proposed scheme accommodates pairwise, must-and cannot-link constraints between data, does not require hyperparameter tuning, and enables assessment of the reliability of obtained results. Preliminary results on real data showcase the potential of the proposed approach.},\n  keywords = {Gaussian processes;Machine learning;Signal processing;Tools;Probabilistic logic;Reliability;Tuning;Constrained clustering;clustering;Gaussian process},\n  doi = {10.23919/Eusipco47968.2020.9287331},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001457.pdf},\n}\n\n
\n
\n\n\n
\n Constrained clustering is an important machine learning, signal processing and data mining tool, for discovering clusters in data, in the presence of additional domain information. The present work introduces a probabilistic scheme for constrained clustering based on the popular Gaussian Process framework. The proposed scheme accommodates pairwise, must-and cannot-link constraints between data, does not require hyperparameter tuning, and enables assessment of the reliability of obtained results. Preliminary results on real data showcase the potential of the proposed approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Beam Coordination Via Diffusion Reduced-Rank Adaptation Over Array Networks.\n \n \n \n \n\n\n \n Li, J.; and Xia, W.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1822-1826, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"BeamPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287332,\n  author = {J. Li and W. Xia},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Beam Coordination Via Diffusion Reduced-Rank Adaptation Over Array Networks},\n  year = {2020},\n  pages = {1822-1826},\n  abstract = {In this work, we consider a distributed reduced-rank beam coordination problem over array networks. We develop an inherently adaptive combination scheme based on combination matrix for beam coordination problem. Two adaptive efficient implementation strategies for diffusion reduced-rank beamforming are proposed. Illustrative simulations validate that the proposed distributed reduced-rank adaptive algorithms could remarkably improve the convergence speed in comparison with the existing techniques under the condition of small samples.},\n  keywords = {Array signal processing;Simulation;Adaptive arrays;Signal processing algorithms;Europe;Steady-state;Convergence;beam coordination;diffusion beamforming;reduced-rank;adaptive combination matrix;distributed strategies},\n  doi = {10.23919/Eusipco47968.2020.9287332},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001822.pdf},\n}\n\n
\n
\n\n\n
\n In this work, we consider a distributed reduced-rank beam coordination problem over array networks. We develop an inherently adaptive combination scheme based on combination matrix for beam coordination problem. Two adaptive efficient implementation strategies for diffusion reduced-rank beamforming are proposed. Illustrative simulations validate that the proposed distributed reduced-rank adaptive algorithms could remarkably improve the convergence speed in comparison with the existing techniques under the condition of small samples.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Combining acoustic features and medical data in deep learning networks for voice pathology classification.\n \n \n \n \n\n\n \n Miliaresi, I.; Poutos, K.; and Pikrakis, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1190-1194, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"CombiningPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287333,\n  author = {I. Miliaresi and K. Poutos and A. Pikrakis},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Combining acoustic features and medical data in deep learning networks for voice pathology classification},\n  year = {2020},\n  pages = {1190-1194},\n  abstract = {In this paper, we present a study on the efficiency of neural networks for the hard problem of automatically classifying voice disorders. To this end, convolutional architectures combined with feed-forward neural networks are used for the classification of four types of voice disorders. Speech signals and data from medical records, collected by the Far Eastern Memorial Hospital (FEMH), involving four speech pathologies, (functional dysphonia, phonotrauma, laryngeal neoplasm and unilateral vocal paralysis), were analyzed and the proposed method participated at the FEMH Voice Data challenge 2019. The respective classification accuracy at the challenge’s testing dataset was 57% and the method ranked fifth with a small performance margin from the leading method.},\n  keywords = {Pathology;Neural networks;Training data;Paralysis;Speech processing;Neoplasms;Testing;FEMH;Voice disorders;Neoplasm;Phonotrauma;Functional Dysphonia;Vocal Palsy;Neural networks},\n  doi = {10.23919/Eusipco47968.2020.9287333},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001190.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we present a study on the efficiency of neural networks for the hard problem of automatically classifying voice disorders. To this end, convolutional architectures combined with feed-forward neural networks are used for the classification of four types of voice disorders. Speech signals and data from medical records, collected by the Far Eastern Memorial Hospital (FEMH), involving four speech pathologies, (functional dysphonia, phonotrauma, laryngeal neoplasm and unilateral vocal paralysis), were analyzed and the proposed method participated at the FEMH Voice Data challenge 2019. The respective classification accuracy at the challenge’s testing dataset was 57% and the method ranked fifth with a small performance margin from the leading method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Hidden Markov Model Based Data-driven Calibration of Non-dispersive Infrared Gas Sensor.\n \n \n \n \n\n\n \n You, Y.; and Oechtering, T. J.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1717-1721, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"HiddenPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287334,\n  author = {Y. You and T. J. Oechtering},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Hidden Markov Model Based Data-driven Calibration of Non-dispersive Infrared Gas Sensor},\n  year = {2020},\n  pages = {1717-1721},\n  abstract = {Non-dispersive infrared gas sensing is one of the best gas measurement method for air quality monitoring. However, sensors drift over time due to sensor aging and environmental factors, which makes calibration necessary. In this paper, we propose a hidden Markov model approach for sensor self-calibration, which builds on the physical model of gas sensors based on the Beer-Lambert law. We focus on the statistical dependency between a calibration coefficient and the temperature change. Supervised and unsupervised learning algorithms to learn the stochastic parameters of the hidden Markov model are derived and numerically tested. The true calibration coefficient at each time instant is estimated using the Viterbi algorithm. The numerical experiments using CO2 sensor data show excellent initial results which confirms that data-driven calibration of non-dispersive infrared gas sensors is possible. Meanwhile, the challenge in the practical design is to find an appropriate quantization scheme to keep the computation burden reasonable while achieving good performance.},\n  keywords = {Temperature sensors;Temperature dependence;Hidden Markov models;Sensors;Calibration;Gas detectors;Unsupervised learning;Non-dispersive infrared gas sensor;drift;self calibration;data-driven modeling;hidden Markov model;statistical inference},\n  doi = {10.23919/Eusipco47968.2020.9287334},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001717.pdf},\n}\n\n
\n
\n\n\n
\n Non-dispersive infrared gas sensing is one of the best gas measurement method for air quality monitoring. However, sensors drift over time due to sensor aging and environmental factors, which makes calibration necessary. In this paper, we propose a hidden Markov model approach for sensor self-calibration, which builds on the physical model of gas sensors based on the Beer-Lambert law. We focus on the statistical dependency between a calibration coefficient and the temperature change. Supervised and unsupervised learning algorithms to learn the stochastic parameters of the hidden Markov model are derived and numerically tested. The true calibration coefficient at each time instant is estimated using the Viterbi algorithm. The numerical experiments using CO2 sensor data show excellent initial results which confirms that data-driven calibration of non-dispersive infrared gas sensors is possible. Meanwhile, the challenge in the practical design is to find an appropriate quantization scheme to keep the computation burden reasonable while achieving good performance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Novel Algorithms for Lp-Quasi-Norm Principal-Component Analysis.\n \n \n \n \n\n\n \n Chachlakis, D. G.; and Markopoulos, P. P.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1045-1049, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"NovelPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287335,\n  author = {D. G. Chachlakis and P. P. Markopoulos},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Novel Algorithms for Lp-Quasi-Norm Principal-Component Analysis},\n  year = {2020},\n  pages = {1045-1049},\n  abstract = {We consider outlier-resistant Lp-quasi-norm (p ≤ 1) Principal-Component Analysis (Lp-PCA) of a D-by-N matrix. It was recently shown that Lp-PCA (p ≤ 1) admits an exact solution by means of combinatorial optimization with computational cost exponential in N. To date, apart from the exact solution to Lp-PCA (p ≤ 1), there exists no converging algorithm of lower cost that approximates its exact solution. In this work, we (i) propose a novel and converging algorithm that approximates the exact solution to Lp-PCA with significantly lower computational cost than that of the exact solver, (ii) conduct formal complexity and convergence analyses, and (iii) propose a multi-component solver based on subspace-deflation. Numerical studies on matrix reconstruction and medical-data classification illustrate the outlier resistance of Lp-PCA.},\n  keywords = {Signal processing algorithms;Signal processing;Approximation algorithms;Classification algorithms;Computational efficiency;Immune system;Principal component analysis;Principal-Component Analysis;PCA;L1-PCA;Lp-norm;quasi-norm;Lp-quasi-norm;outliers;robustness},\n  doi = {10.23919/Eusipco47968.2020.9287335},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001045.pdf},\n}\n\n
\n
\n\n\n
\n We consider outlier-resistant Lp-quasi-norm (p ≤ 1) Principal-Component Analysis (Lp-PCA) of a D-by-N matrix. It was recently shown that Lp-PCA (p ≤ 1) admits an exact solution by means of combinatorial optimization with computational cost exponential in N. To date, apart from the exact solution to Lp-PCA (p ≤ 1), there exists no converging algorithm of lower cost that approximates its exact solution. In this work, we (i) propose a novel and converging algorithm that approximates the exact solution to Lp-PCA with significantly lower computational cost than that of the exact solver, (ii) conduct formal complexity and convergence analyses, and (iii) propose a multi-component solver based on subspace-deflation. Numerical studies on matrix reconstruction and medical-data classification illustrate the outlier resistance of Lp-PCA.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Experimental Analysis of EM and MU Algorithms for Optimizing Full-rank Spatial Covariance Model.\n \n \n \n \n\n\n \n Sawada, H.; Ikeshita, R.; and Nakatani, T.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 885-889, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ExperimentalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287336,\n  author = {H. Sawada and R. Ikeshita and T. Nakatani},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Experimental Analysis of EM and MU Algorithms for Optimizing Full-rank Spatial Covariance Model},\n  year = {2020},\n  pages = {885-889},\n  abstract = {Full-rank spatial covariance analysis (FCA) is based on a flexible source model, and achieves high-quality results for blind source separation. An expectation-maximization (EM) algorithm as well as a multiplicative update (MU) algorithm are known to optimize the FCA model parameters. In this paper, we first investigate the behaviors of both algorithms. We observed that the MU algorithm minimizes the FCA objective function faster than the EM algorithm, but the separation performance at the converged point is better by the EM algorithm than the MU algorithm. We found that the MU algorithm tends to push the covariance matrices towards rank deficient. To mitigate this tendency, we propose a modified FCA model where the tempo-ral parameters are shared within a time block. Experimental results show that the modified model provides better separation performance not only by the MU algorithm but also by the EM algorithm.},\n  keywords = {Analytical models;Signal processing algorithms;Europe;Signal processing;Linear programming;Blind source separation;Covariance matrices;blind source separation (BSS);full-rank spatial covariance analysis (FCA);expectation-maximization (EM) algorithm;multiplicative update (MU) algorithm;rank deficient},\n  doi = {10.23919/Eusipco47968.2020.9287336},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000885.pdf},\n}\n\n
\n
\n\n\n
\n Full-rank spatial covariance analysis (FCA) is based on a flexible source model, and achieves high-quality results for blind source separation. An expectation-maximization (EM) algorithm as well as a multiplicative update (MU) algorithm are known to optimize the FCA model parameters. In this paper, we first investigate the behaviors of both algorithms. We observed that the MU algorithm minimizes the FCA objective function faster than the EM algorithm, but the separation performance at the converged point is better by the EM algorithm than the MU algorithm. We found that the MU algorithm tends to push the covariance matrices towards rank deficient. To mitigate this tendency, we propose a modified FCA model where the tempo-ral parameters are shared within a time block. Experimental results show that the modified model provides better separation performance not only by the MU algorithm but also by the EM algorithm.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributed combined acoustic echo cancellation and noise reduction using GEVD-based distributed adaptive node specific signal estimation with prior knowledge.\n \n \n \n \n\n\n \n Ruiz, S.; van Waterschoot , T.; and Moonen, M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 206-210, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DistributedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287337,\n  author = {S. Ruiz and T. {van Waterschoot} and M. Moonen},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Distributed combined acoustic echo cancellation and noise reduction using GEVD-based distributed adaptive node specific signal estimation with prior knowledge},\n  year = {2020},\n  pages = {206-210},\n  abstract = {Distributed combined acoustic echo cancellation (AEC) and noise reduction (NR) in a wireless acoustic sensor network (WASN) is tackled by using a specific version of the PK-GEVD-DANSE algorithm (cfr. [1]). Although this algorithm was initially developed for distributed NR with partial prior knowledge of the desired speech steering vector, it is shown that it can also be used for AEC combined with NR. Simulations have been carried out using centralized and distributed batch-mode implementations to verify the performance of the algorithm in terms of AEC quantified with the echo return loss enhancement (ERLE), as well as in terms of the NR quantified with the signal- to-noise ratio (SNR).},\n  keywords = {Wireless communication;Wireless sensor networks;Echo cancellers;Noise reduction;Signal processing algorithms;Acoustic sensors;Acoustics;Distributed signal processing;wireless acoustic sensor networks;acoustic echo cancellation;noise reduction},\n  doi = {10.23919/Eusipco47968.2020.9287337},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000206.pdf},\n}\n\n
\n
\n\n\n
\n Distributed combined acoustic echo cancellation (AEC) and noise reduction (NR) in a wireless acoustic sensor network (WASN) is tackled by using a specific version of the PK-GEVD-DANSE algorithm (cfr. [1]). Although this algorithm was initially developed for distributed NR with partial prior knowledge of the desired speech steering vector, it is shown that it can also be used for AEC combined with NR. Simulations have been carried out using centralized and distributed batch-mode implementations to verify the performance of the algorithm in terms of AEC quantified with the echo return loss enhancement (ERLE), as well as in terms of the NR quantified with the signal- to-noise ratio (SNR).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Regularized DFA to study the gaze position of an airline pilot.\n \n \n \n \n\n\n \n Berthelot, B.; Grivel, É.; Legrand, P.; André, J. -.; and Mazoyer, P.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2403-2407, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"RegularizedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287338,\n  author = {B. Berthelot and É. Grivel and P. Legrand and J. -M. André and P. Mazoyer},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Regularized DFA to study the gaze position of an airline pilot},\n  year = {2020},\n  pages = {2403-2407},\n  abstract = {To estimate the Hurst exponent of a mono-fractal process, the detrended fluctuation analysis (DFA) is based on the estimation of the trend of the integrated process. The latter is subtracted from the integrated process. The power of the residual is then computed and corresponds to the square of the fluctuation function. Its logarithm is proportional to the Hurst exponent. In the last few years, a few variants of this method have been proposed and differ in the way of estimating the trend. Our contribution in this paper is threefold. First, we introduce a new variant of the DFA, based on a regularized least-square criterion to estimate the trend. Then, the influence of the regularization parameter on the fluctuation function is analyzed in two cases: when the process is wide sense stationary and when it is not. Finally, an application is presented in the field of aeronautics to characterize an attentional impairment: the visual tunneling.},\n  keywords = {fluctuations;fractals;regularization parameter;fluctuation function;regularized DFA;gaze position;airline pilot;Hurst exponent;detrended fluctuation analysis;regularized least-square criterion;monofractal process;visual tunneling;Visualization;Fluctuations;Time series analysis;Estimation;Tunneling;Tools;Market research;filter;interpretation;Hurst;DFA},\n  doi = {10.23919/Eusipco47968.2020.9287338},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002403.pdf},\n}\n\n
\n
\n\n\n
\n To estimate the Hurst exponent of a mono-fractal process, the detrended fluctuation analysis (DFA) is based on the estimation of the trend of the integrated process. The latter is subtracted from the integrated process. The power of the residual is then computed and corresponds to the square of the fluctuation function. Its logarithm is proportional to the Hurst exponent. In the last few years, a few variants of this method have been proposed and differ in the way of estimating the trend. Our contribution in this paper is threefold. First, we introduce a new variant of the DFA, based on a regularized least-square criterion to estimate the trend. Then, the influence of the regularization parameter on the fluctuation function is analyzed in two cases: when the process is wide sense stationary and when it is not. Finally, an application is presented in the field of aeronautics to characterize an attentional impairment: the visual tunneling.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n 3D Feature Detector-Descriptor Pair Evaluation on Point Clouds.\n \n \n \n\n\n \n Stancelova, P.; Sikudova, E.; and Cernekova, Z.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 590-594, Aug 2020. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287339,\n  author = {P. Stancelova and E. Sikudova and Z. Cernekova},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {3D Feature Detector-Descriptor Pair Evaluation on Point Clouds},\n  year = {2020},\n  pages = {590-594},\n  abstract = {In recent years, computer vision research has focused on extracting features from 3D data. In this work, we reviewed methods of extracting local features from objects represented in the form of point clouds. The goal of the work was to make theoretical overview and evaluation of selected point cloud detectors and descriptors. We performed an experimental assessment of the repeatability and computational efficiency of individual methods using the well known Stanford 3D Scanning Repository database with the aim of identifying a method which is computationally-efficient in finding good corresponding points between two point clouds. We also compared the efficiency of detector-descriptor pairing showing that the choice of a descriptor affects the performance of the object recognition based on the descriptor matching. We summarized the results into graphs and described them with respect to the individual tested properties of the methods.},\n  keywords = {Three-dimensional displays;Detectors;Signal processing;Feature extraction;Time measurement;Computational efficiency;Object recognition;3D detector;3D descriptor;point cloud;feature extraction},\n  doi = {10.23919/Eusipco47968.2020.9287339},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n In recent years, computer vision research has focused on extracting features from 3D data. In this work, we reviewed methods of extracting local features from objects represented in the form of point clouds. The goal of the work was to make theoretical overview and evaluation of selected point cloud detectors and descriptors. We performed an experimental assessment of the repeatability and computational efficiency of individual methods using the well known Stanford 3D Scanning Repository database with the aim of identifying a method which is computationally-efficient in finding good corresponding points between two point clouds. We also compared the efficiency of detector-descriptor pairing showing that the choice of a descriptor affects the performance of the object recognition based on the descriptor matching. We summarized the results into graphs and described them with respect to the individual tested properties of the methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robust Hypersphere-based Weight Imprinting for Few-Shot Learning.\n \n \n \n \n\n\n \n Passalis, N.; Iosifidis, A.; Gabbouj, M.; and Tefas, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1392-1396, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"RobustPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287340,\n  author = {N. Passalis and A. Iosifidis and M. Gabbouj and A. Tefas},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Robust Hypersphere-based Weight Imprinting for Few-Shot Learning},\n  year = {2020},\n  pages = {1392-1396},\n  abstract = {Performing fast few-shot learning is increasingly important in a number of embedded applications. Among them, a form of gradient-descent free learning known as Weight Imprinting was recently established as an efficient way to perform few-shot learning on Deep Learning (DL) accelerators that do no support back-propagation, such as Edge Tensor Processing Units (Edge TPUs). Despite its efficiency, WI comes with a number of critical limitations. For example, WI cannot effectively handle multimodal novel categories, while it is especially prone to overfitting that can have devastating effects on the accuracy of the models on novel categorizes. To overcome these limitations, in this paper we propose a robust hypersphere-based WI approach that allows for regularizing the training process in an imprinting-aware way. At the same time, the proposed formulation provides a natural way to handle multimodal novel categories. Indeed, as demonstrated through the conducted experiments, the proposed method leads to significant improvements over the baseline WI approach.},\n  keywords = {Training;Deep learning;Tensors;Neural networks;Stochastic processes;Signal processing;Task analysis;Weight Imprinting;Few-shot Learning;Edge TPU;Embedded Deep Learning},\n  doi = {10.23919/Eusipco47968.2020.9287340},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001392.pdf},\n}\n\n
\n
\n\n\n
\n Performing fast few-shot learning is increasingly important in a number of embedded applications. Among them, a form of gradient-descent free learning known as Weight Imprinting was recently established as an efficient way to perform few-shot learning on Deep Learning (DL) accelerators that do no support back-propagation, such as Edge Tensor Processing Units (Edge TPUs). Despite its efficiency, WI comes with a number of critical limitations. For example, WI cannot effectively handle multimodal novel categories, while it is especially prone to overfitting that can have devastating effects on the accuracy of the models on novel categorizes. To overcome these limitations, in this paper we propose a robust hypersphere-based WI approach that allows for regularizing the training process in an imprinting-aware way. At the same time, the proposed formulation provides a natural way to handle multimodal novel categories. Indeed, as demonstrated through the conducted experiments, the proposed method leads to significant improvements over the baseline WI approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n 3D Point Cloud Denoising Using a Joint Geometry and Color k-NN Graph.\n \n \n \n \n\n\n \n Irfan, M. A.; and Magli, E.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 585-589, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"3DPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287341,\n  author = {M. A. Irfan and E. Magli},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {3D Point Cloud Denoising Using a Joint Geometry and Color k-NN Graph},\n  year = {2020},\n  pages = {585-589},\n  abstract = {Many point cloud acquisition methods, e.g. multi-viewpoint image stereo matching and acquisition of depth data from active light sensors, suffer from significant geometry noise in the data. In the existing literature, denoising of this geometry noise has been performed using only geometry information. In this paper, based on the notion that color attributes are correlated with the geometry, we propose a novel geometry denoising technique that takes advantage of this correlation via a graph-based optimization process. In particular, we construct a graph based on both color and geometry information, and use it for graph-based Tikhonov regularization. Results on synthetic and real-world point clouds show that the proposed denoising method significantly outperforms existing geometry-only techniques.},\n  keywords = {Geometry;Three-dimensional displays;Image color analysis;Noise reduction;Signal processing algorithms;Signal processing;Colored noise;convex optimization;graph signal processing;point cloud denoising},\n  doi = {10.23919/Eusipco47968.2020.9287341},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000585.pdf},\n}\n\n
\n
\n\n\n
\n Many point cloud acquisition methods, e.g. multi-viewpoint image stereo matching and acquisition of depth data from active light sensors, suffer from significant geometry noise in the data. In the existing literature, denoising of this geometry noise has been performed using only geometry information. In this paper, based on the notion that color attributes are correlated with the geometry, we propose a novel geometry denoising technique that takes advantage of this correlation via a graph-based optimization process. In particular, we construct a graph based on both color and geometry information, and use it for graph-based Tikhonov regularization. Results on synthetic and real-world point clouds show that the proposed denoising method significantly outperforms existing geometry-only techniques.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Novel Imputation Method Using Average Code from Autoencoders in Clinical Data.\n \n \n \n \n\n\n \n Macias, E.; Serrano, J.; Vicario, J. L.; and Morell, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1576-1579, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"NovelPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287343,\n  author = {E. Macias and J. Serrano and J. L. Vicario and A. Morell},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Novel Imputation Method Using Average Code from Autoencoders in Clinical Data},\n  year = {2020},\n  pages = {1576-1579},\n  abstract = {It is possible to improve the reconstruction of clinical data combining codes from autoencoders (AE). The extracted in-formation can be used for enhancing existing imputation methods in this type of data. In the proposed approach, initially, encoder and decoder functions from trained autoencoder are extracted. Then, imputers equally spaced from normalized distribution of the variables generate codes that are combined in the average one that is finally used to reconstruct the original information. The proposed method is compared imputing by mean values of variables and using a single AE for reconstruction. The proposed approach has an outstanding performance recovering original information. It is even better with missing values in more than one variable. The error is at least 70% less than the other methods imputing one variable, and also the proposed approach is highly recommended with missing values in more than one variable.},\n  keywords = {Europe;Signal processing;Decoding;Data mining;Imputation;deep learning;autoencoder;health-care},\n  doi = {10.23919/Eusipco47968.2020.9287343},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001576.pdf},\n}\n\n
\n
\n\n\n
\n It is possible to improve the reconstruction of clinical data combining codes from autoencoders (AE). The extracted in-formation can be used for enhancing existing imputation methods in this type of data. In the proposed approach, initially, encoder and decoder functions from trained autoencoder are extracted. Then, imputers equally spaced from normalized distribution of the variables generate codes that are combined in the average one that is finally used to reconstruct the original information. The proposed method is compared imputing by mean values of variables and using a single AE for reconstruction. The proposed approach has an outstanding performance recovering original information. It is even better with missing values in more than one variable. The error is at least 70% less than the other methods imputing one variable, and also the proposed approach is highly recommended with missing values in more than one variable.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Comparison of Convolution Types in CNN-based Feature Extraction for Sound Source Localization.\n \n \n \n \n\n\n \n Krause, D.; Politis, A.; and Kowalczyk, K.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 820-824, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ComparisonPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287344,\n  author = {D. Krause and A. Politis and K. Kowalczyk},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Comparison of Convolution Types in CNN-based Feature Extraction for Sound Source Localization},\n  year = {2020},\n  pages = {820-824},\n  abstract = {This paper presents an overview of several approaches to convolutional feature extraction in the context of deep neural network (DNN) based sound source localization. Different ways of processing multichannel audio data in the time-frequency domain using convolutional neural networks (CNNs) are described and tested with the aim to provide a comparative study of their performance. In most considered approaches, models are trained with phase and magnitude components of the Short-Time Fourier Transform (STFT). In addition to state-of-the-art 2D convolutional layers, we investigate several solutions for the processing of 3D matrices containing multichannel complex representation of the microphone signals. The first two proposed approaches are the 3D convolutions and depthwise separable convolutions in which two types of filters are used to exploit information within and between the channels. Note that this paper presents the first application of depthwise separable convolutions in a task of sound source localization. The third approach is based on complex-valued neural networks which allows for performing convolutions directly on complex signal representations. Experiments are conducted using two synthetic datasets containing noise and speech signals recorded using a tetrahedral microphone array. The paper presents the results obtained using all investigated model types and discusses the resulting accuracy and computational complexity in DNN-based source localization.},\n  keywords = {Solid modeling;Three-dimensional displays;Convolution;Computational modeling;Two dimensional displays;Feature extraction;Task analysis;sound source localization;sound feature extraction;convolutional neural networks;complex convolutions;depthwise convolutions},\n  doi = {10.23919/Eusipco47968.2020.9287344},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000820.pdf},\n}\n\n
\n
\n\n\n
\n This paper presents an overview of several approaches to convolutional feature extraction in the context of deep neural network (DNN) based sound source localization. Different ways of processing multichannel audio data in the time-frequency domain using convolutional neural networks (CNNs) are described and tested with the aim to provide a comparative study of their performance. In most considered approaches, models are trained with phase and magnitude components of the Short-Time Fourier Transform (STFT). In addition to state-of-the-art 2D convolutional layers, we investigate several solutions for the processing of 3D matrices containing multichannel complex representation of the microphone signals. The first two proposed approaches are the 3D convolutions and depthwise separable convolutions in which two types of filters are used to exploit information within and between the channels. Note that this paper presents the first application of depthwise separable convolutions in a task of sound source localization. The third approach is based on complex-valued neural networks which allows for performing convolutions directly on complex signal representations. Experiments are conducted using two synthetic datasets containing noise and speech signals recorded using a tetrahedral microphone array. The paper presents the results obtained using all investigated model types and discusses the resulting accuracy and computational complexity in DNN-based source localization.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Online Dominant Generalized Eigenvectors Extraction Via A Randomized Method.\n \n \n \n \n\n\n \n Cai, H.; Kaloorazi, M. F.; Chen, J.; Chen, W.; and Richard, C.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2353-2357, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"OnlinePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 5 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287345,\n  author = {H. Cai and M. F. Kaloorazi and J. Chen and W. Chen and C. Richard},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Online Dominant Generalized Eigenvectors Extraction Via A Randomized Method},\n  year = {2020},\n  pages = {2353-2357},\n  abstract = {The generalized Hermitian eigendecomposition problem is ubiquitous in signal and machine learning applications. Considering the need of processing streaming data in practice and restrictions of existing methods, this paper is concerned with fast and efficient generalized eigenvectors tracking. We first present a computationally efficient algorithm based on randomization termed alternate-projections randomized eigenvalue decomposition (APR-EVD) to solve a standard eigenvalue problem. By exploiting rank-1 strategy, two online algorithms based on APR-EVD are developed for the dominant generalized eigenvectors extraction. Numerical examples show the practical applicability and efficacy of the proposed online algorithms.},\n  keywords = {Machine learning algorithms;Signal processing algorithms;Signal processing;Eigenvalues and eigenfunctions;Computational efficiency;Standards;Numerical stability;Randomized algorithms;dominant generalized eigenvectors;online algorithms;fast subspace tracking},\n  doi = {10.23919/Eusipco47968.2020.9287345},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002353.pdf},\n}\n\n
\n
\n\n\n
\n The generalized Hermitian eigendecomposition problem is ubiquitous in signal and machine learning applications. Considering the need of processing streaming data in practice and restrictions of existing methods, this paper is concerned with fast and efficient generalized eigenvectors tracking. We first present a computationally efficient algorithm based on randomization termed alternate-projections randomized eigenvalue decomposition (APR-EVD) to solve a standard eigenvalue problem. By exploiting rank-1 strategy, two online algorithms based on APR-EVD are developed for the dominant generalized eigenvectors extraction. Numerical examples show the practical applicability and efficacy of the proposed online algorithms.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Novel Non-Parametric Approach Of Tremor Detection Using Wrist-Based Photoplethysmograph.\n \n \n \n \n\n\n \n Ahmed, N.; Bhattacharyya, C.; and Ghose, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1150-1154, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287346,\n  author = {N. Ahmed and C. Bhattacharyya and A. Ghose},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A Novel Non-Parametric Approach Of Tremor Detection Using Wrist-Based Photoplethysmograph},\n  year = {2020},\n  pages = {1150-1154},\n  abstract = {Pervasive detection and quantification of tremor for Parkinson's Disease (PD) patients, using Commercial Off-the-self (COTS) wrist-wearable device is an important problem to investigate. Parkinsonian tremor is one of the earliest and major surrogate biomarker which indicates the progress or status of the disease for patients under treatment using drugs or deep brain stimulation (DBS) therapy. However, it is a challenging issue as tremor occurs at the minor extremities like fingers in some cases such as pill-rolling symptom, the effect of the same on a wrist-worn motion sensor system is not significant enough to be captured. In this paper, we explore the possibility of using the wrist-based photoplethysmography (PPG) as a novel sensor modality in detecting tremor at rest. Our preliminary results gathered from healthy cohorts performing simulations of Parkinsonian tremor elucidates the merit of the proposed method. Also, since PPG acquisition is power-hungry, we have leveraged a conceptual method of compressive sensing to reduce the overall power requirement of the application.},\n  keywords = {biomedical electronics;body sensor networks;brain;diseases;medical disorders;medical signal processing;neurophysiology;patient monitoring;patient treatment;photoplethysmography;novel nonparametric approach;tremor detection;wrist-based photoplethysmograph;Parkinson disease;Parkinsonian tremor;surrogate biomarker;deep brain stimulation therapy;pill-rolling symptom;wrist-worn motion sensor system;wrist-based photoplethysmography;commercial off-the-self wrist-wearable device;PPG acquisition;compressive sensing;Satellite broadcasting;Pipelines;Signal processing;Photoplethysmography;Motion detection;Sensors;Diseases;Parkinson’s Disease;Tremor;Photoplethysmogram (PPG);SSA;Compressive Sensing},\n  doi = {10.23919/Eusipco47968.2020.9287346},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001150.pdf},\n}\n\n
\n
\n\n\n
\n Pervasive detection and quantification of tremor for Parkinson's Disease (PD) patients, using Commercial Off-the-self (COTS) wrist-wearable device is an important problem to investigate. Parkinsonian tremor is one of the earliest and major surrogate biomarker which indicates the progress or status of the disease for patients under treatment using drugs or deep brain stimulation (DBS) therapy. However, it is a challenging issue as tremor occurs at the minor extremities like fingers in some cases such as pill-rolling symptom, the effect of the same on a wrist-worn motion sensor system is not significant enough to be captured. In this paper, we explore the possibility of using the wrist-based photoplethysmography (PPG) as a novel sensor modality in detecting tremor at rest. Our preliminary results gathered from healthy cohorts performing simulations of Parkinsonian tremor elucidates the merit of the proposed method. Also, since PPG acquisition is power-hungry, we have leveraged a conceptual method of compressive sensing to reduce the overall power requirement of the application.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Epileptic EEG Classification Using Synchrosqueezing Transform with Machine and Deep Learning Techniques.\n \n \n \n \n\n\n \n Cura, O. K.; Ozdemir, M. A.; and Akan, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1210-1214, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"EpilepticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287347,\n  author = {O. K. Cura and M. A. Ozdemir and A. Akan},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Epileptic EEG Classification Using Synchrosqueezing Transform with Machine and Deep Learning Techniques},\n  year = {2020},\n  pages = {1210-1214},\n  abstract = {Epilepsy is a neurological disease that is very common worldwide. In the literature, patient’s electroencephalography (EEG) signals are frequently used for an epilepsy diagnosis. However, the success of epileptic examination procedures from quantitative EEG signals is limited. In this paper, a high-resolution time-frequency (TF) representation called Synchrosqueezed Transform (SST) is used to classify epileptic EEG signals. The SST matrices of seizure and pre-seizure EEG data of 16 epilepsy patients are calculated. Two approaches based on machine learning and deep learning are proposed to classify pre-seizure and seizure signals. In the machine learning-based approach, the various features like higher-order joint moments are calculated and these features are classified by Support Vector Machine (SVM), k-Nearest Neighbor (kNN) and Naive Bayes (NB) classifiers. In the deep learning-based approach, the SST matrix was recorded as an image and a Convolutional Neural Network (CNN)-based architecture was used to classify these images. Simulation results demonstrate that both approaches achieved promising validation accuracy rates. While the maximum (90.2%) validation accuracy is achieved for the machine learning-based approach, (90.3%) validation accuracy is achieved for the deep learning-based approach.},\n  keywords = {Support vector machines;Deep learning;Image segmentation;Machine learning algorithms;Epilepsy;Transforms;Electroencephalography;CNN;EEG;SST;SVM;Time-Frequency Analysis},\n  doi = {10.23919/Eusipco47968.2020.9287347},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001210.pdf},\n}\n\n
\n
\n\n\n
\n Epilepsy is a neurological disease that is very common worldwide. In the literature, patient’s electroencephalography (EEG) signals are frequently used for an epilepsy diagnosis. However, the success of epileptic examination procedures from quantitative EEG signals is limited. In this paper, a high-resolution time-frequency (TF) representation called Synchrosqueezed Transform (SST) is used to classify epileptic EEG signals. The SST matrices of seizure and pre-seizure EEG data of 16 epilepsy patients are calculated. Two approaches based on machine learning and deep learning are proposed to classify pre-seizure and seizure signals. In the machine learning-based approach, the various features like higher-order joint moments are calculated and these features are classified by Support Vector Machine (SVM), k-Nearest Neighbor (kNN) and Naive Bayes (NB) classifiers. In the deep learning-based approach, the SST matrix was recorded as an image and a Convolutional Neural Network (CNN)-based architecture was used to classify these images. Simulation results demonstrate that both approaches achieved promising validation accuracy rates. While the maximum (90.2%) validation accuracy is achieved for the machine learning-based approach, (90.3%) validation accuracy is achieved for the deep learning-based approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Bayesian Hierarchical Model for Blind Audio Source Separation.\n \n \n \n \n\n\n \n Laufer, Y.; and Gannot, S.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 276-280, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287348,\n  author = {Y. Laufer and S. Gannot},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A Bayesian Hierarchical Model for Blind Audio Source Separation},\n  year = {2020},\n  pages = {276-280},\n  abstract = {This paper presents a fully Bayesian hierarchical model for blind audio source separation in a noisy environment. Our probabilistic approach is based on Gaussian priors for the speech signals, Gamma hyperpriors for the speech precisions and a Gamma prior for the noise precision. The time-varying acoustic channels are modelled with a linear-Gaussian state-space model. The inference is carried out using a variational Expectation-Maximization (VEM) algorithm, leading to a variant of the multi-speaker multichannel Wiener filter (MCWF) to separate and enhance the audio sources, and a Kalman smoother to infer the acoustic channels. The VEM speech estimator can be decomposed into two stages: A multi-speaker linearly constrained minimum variance (LCMV) beamformer followed by a variational multi-speaker postfilter. The proposed algorithm is evaluated in a static scenario using recorded room impulse responses (RIRs) with two reverberation levels, showing superior performance compared to competing methods.},\n  keywords = {Source separation;Wiener filters;Signal processing algorithms;Inference algorithms;Bayes methods;Reverberation;Noise measurement;Audio source separation;Variational EM},\n  doi = {10.23919/Eusipco47968.2020.9287348},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000276.pdf},\n}\n\n
\n
\n\n\n
\n This paper presents a fully Bayesian hierarchical model for blind audio source separation in a noisy environment. Our probabilistic approach is based on Gaussian priors for the speech signals, Gamma hyperpriors for the speech precisions and a Gamma prior for the noise precision. The time-varying acoustic channels are modelled with a linear-Gaussian state-space model. The inference is carried out using a variational Expectation-Maximization (VEM) algorithm, leading to a variant of the multi-speaker multichannel Wiener filter (MCWF) to separate and enhance the audio sources, and a Kalman smoother to infer the acoustic channels. The VEM speech estimator can be decomposed into two stages: A multi-speaker linearly constrained minimum variance (LCMV) beamformer followed by a variational multi-speaker postfilter. The proposed algorithm is evaluated in a static scenario using recorded room impulse responses (RIRs) with two reverberation levels, showing superior performance compared to competing methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fall detection system via smart phone and send people location.\n \n \n \n \n\n\n \n Mousavi, S. A.; f. heidari ; Tahami, E.; and Azarnoosh, M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1605-1607, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"FallPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287349,\n  author = {S. A. Mousavi and f. heidari and E. Tahami and M. Azarnoosh},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Fall detection system via smart phone and send people location},\n  year = {2020},\n  pages = {1605-1607},\n  abstract = {Due to the falling birth rate and industrial societies, the number of people over 65 is increasing and in 2050 almost 16% of the population will be elder. Falling in the elderly is the second leading cause of severe injury, leading to death. The aim of this study is to provide a high-speed method for diagnosing and treating the elderly. We have proposed a method to use the smartphone and the acceleration signal to detection falling. The proposed system has been able to detect a fall using the Smartphone sensor and report the location of the person. The results show that with the mean, skewness, and kurtosis features and support vector machine classifier, the accuracy of fall detection is 96.33%. The important point is the calculation time, which is less than 1 second.},\n  keywords = {Support vector machines;Senior citizens;Sociology;Signal processing;Feature extraction;Statistics;Smart phones;mobile health;fall detection;classification;feature extraction},\n  doi = {10.23919/Eusipco47968.2020.9287349},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001605.pdf},\n}\n\n
\n
\n\n\n
\n Due to the falling birth rate and industrial societies, the number of people over 65 is increasing and in 2050 almost 16% of the population will be elder. Falling in the elderly is the second leading cause of severe injury, leading to death. The aim of this study is to provide a high-speed method for diagnosing and treating the elderly. We have proposed a method to use the smartphone and the acceleration signal to detection falling. The proposed system has been able to detect a fall using the Smartphone sensor and report the location of the person. The results show that with the mean, skewness, and kurtosis features and support vector machine classifier, the accuracy of fall detection is 96.33%. The important point is the calculation time, which is less than 1 second.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Graph Regularized Subspace Clustering via Low-Rank Decomposition.\n \n \n \n \n\n\n \n Jiang, A.; Cheng, W.; Shang, J.; Miao, X.; and Zhu, Y.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2165-2169, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"GraphPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287350,\n  author = {A. Jiang and W. Cheng and J. Shang and X. Miao and Y. Zhu},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Graph Regularized Subspace Clustering via Low-Rank Decomposition},\n  year = {2020},\n  pages = {2165-2169},\n  abstract = {Subspace clustering (SC) is able to identify low-dimensional subspace structures embedded in high-dimensional data. Recently, graph-regularized approaches aim to tackle this problem by learning a linear representation of data samples and also a graph structure in a unified framework. However, previous approaches exploit a graph embedding term based on representation matrix, which could over-smooth the graph structure and thus adversely affect the clustering performance. In this paper, we present a novel algorithm based on joint low-rank decomposition and graph learning from data samples. In graph learning, only a low-rank component of the representation matrix is employed to construct the graph embedding term. An alternating direction method of multipliers (ADMM) is further developed to tackle the resulting nonconvex problem. Experimental results on both synthetic data and real benchmark databases validate the effectiveness of the proposed SC algorithm.},\n  keywords = {Databases;Signal processing algorithms;Clustering algorithms;Benchmark testing;Signal processing;Convex functions;Matrix decomposition;Affinity matrix;alternating direction method of multipliers (ADMM);graph Laplacian;low-rank decomposition;subspace clustering},\n  doi = {10.23919/Eusipco47968.2020.9287350},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002165.pdf},\n}\n\n
\n
\n\n\n
\n Subspace clustering (SC) is able to identify low-dimensional subspace structures embedded in high-dimensional data. Recently, graph-regularized approaches aim to tackle this problem by learning a linear representation of data samples and also a graph structure in a unified framework. However, previous approaches exploit a graph embedding term based on representation matrix, which could over-smooth the graph structure and thus adversely affect the clustering performance. In this paper, we present a novel algorithm based on joint low-rank decomposition and graph learning from data samples. In graph learning, only a low-rank component of the representation matrix is employed to construct the graph embedding term. An alternating direction method of multipliers (ADMM) is further developed to tackle the resulting nonconvex problem. Experimental results on both synthetic data and real benchmark databases validate the effectiveness of the proposed SC algorithm.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Pilot-based calibration of dual-tuner SDR receivers.\n \n \n \n \n\n\n \n Mazurek, G.; and Rytel-Andrianik, R.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1971-1975, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Pilot-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287351,\n  author = {G. Mazurek and R. Rytel-Andrianik},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Pilot-based calibration of dual-tuner SDR receivers},\n  year = {2020},\n  pages = {1971-1975},\n  abstract = {Multi-channel RF signal acquisition is a typical requirement in modern communications and radar applications. The use of multiple general-purpose Software-Defined-Radio receivers, connected to a host via USB interface, seems to be an attractive solution since it allows a high number of channels to be obtained with relatively low hardware cost and system weight. Unfortunately, such an architecture usually introduces some inter-channel phase-shifts and time-delays due to hardware limitations, and thus requires calibration routines. In this paper we present a calibration method that is based on pilot signals generated in additional hardware part and injected into the input RF signals. The system extended with such a calibration circuit was used during a field PCL measurement campaign and hours of the recorded signals have been processed offline with the proposed algorithm. The obtained results prove that it is possible to estimate the time delays and phase shifts in practice using the described solution even in the case of low SNR values and time-varying carrier frequency of the pilot signals.},\n  keywords = {Matched filters;Delay effects;RF signals;Signal processing algorithms;Frequency estimation;Hardware;Calibration;array calibration;software-defined radio;RF signal acquisition;passive coherent location},\n  doi = {10.23919/Eusipco47968.2020.9287351},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001971.pdf},\n}\n\n
\n
\n\n\n
\n Multi-channel RF signal acquisition is a typical requirement in modern communications and radar applications. The use of multiple general-purpose Software-Defined-Radio receivers, connected to a host via USB interface, seems to be an attractive solution since it allows a high number of channels to be obtained with relatively low hardware cost and system weight. Unfortunately, such an architecture usually introduces some inter-channel phase-shifts and time-delays due to hardware limitations, and thus requires calibration routines. In this paper we present a calibration method that is based on pilot signals generated in additional hardware part and injected into the input RF signals. The system extended with such a calibration circuit was used during a field PCL measurement campaign and hours of the recorded signals have been processed offline with the proposed algorithm. The obtained results prove that it is possible to estimate the time delays and phase shifts in practice using the described solution even in the case of low SNR values and time-varying carrier frequency of the pilot signals.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Unsupervised Interpretable Representation Learning for Singing Voice Separation.\n \n \n \n \n\n\n \n Mimilakis, S. I.; Drossos, K.; and Schuller, G.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1412-1416, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"UnsupervisedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287352,\n  author = {S. I. Mimilakis and K. Drossos and G. Schuller},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Unsupervised Interpretable Representation Learning for Singing Voice Separation},\n  year = {2020},\n  pages = {1412-1416},\n  abstract = {In this work, we present a method for learning interpretable music signal representations directly from waveform signals. Our method can be trained using unsupervised objectives and relies on the denoising auto-encoder model that uses a simple sinusoidal model as decoding functions to reconstruct the singing voice. To demonstrate the benefits of our method, we employ the obtained representations to the task of informed singing voice separation via binary masking, and measure the obtained separation quality by means of scale-invariant signal to distortion ratio. Our findings suggest that our method is capable of learning meaningful representations for singing voice separation, while preserving conveniences of the the short-time Fourier transform like non-negativity, smoothness, and reconstruction subject to time-frequency masking, that are desired in audio and music source separation.},\n  keywords = {Time-frequency analysis;Source separation;Fourier transforms;Noise reduction;Multiple signal classification;Signal representation;Task analysis;representation learning;unsupervised learning;denoising auto-encoders;singing voice separation},\n  doi = {10.23919/Eusipco47968.2020.9287352},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001412.pdf},\n}\n\n
\n
\n\n\n
\n In this work, we present a method for learning interpretable music signal representations directly from waveform signals. Our method can be trained using unsupervised objectives and relies on the denoising auto-encoder model that uses a simple sinusoidal model as decoding functions to reconstruct the singing voice. To demonstrate the benefits of our method, we employ the obtained representations to the task of informed singing voice separation via binary masking, and measure the obtained separation quality by means of scale-invariant signal to distortion ratio. Our findings suggest that our method is capable of learning meaningful representations for singing voice separation, while preserving conveniences of the the short-time Fourier transform like non-negativity, smoothness, and reconstruction subject to time-frequency masking, that are desired in audio and music source separation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Lego Radar Train — An Educational Workshop on Radar-based Advanced Driver Assistance Systems.\n \n \n \n\n\n \n Gerstmair, M.; Gschwandtner, M.; Findenig, R.; Melzer, A.; and Huemer, M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1981-1985, Aug 2020. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287353,\n  author = {M. Gerstmair and M. Gschwandtner and R. Findenig and A. Melzer and M. Huemer},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Lego Radar Train — An Educational Workshop on Radar-based Advanced Driver Assistance Systems},\n  year = {2020},\n  pages = {1981-1985},\n  abstract = {In modern vehicles a variety of sensors like radar, camera and lidar are combined in order to precisely sense the environment. This data is utilized by advanced driver assistance systems (ADAS) to provide comfort features and increased safety for the occupants. To cope with the demands of this growing market, it is necessary to attract young people to so-called science, technology, engineering and mathematics (STEM) studies and encourage them to contribute to this highly innovative topic. Therefore, we developed a workshop on radar-based ADAS to motivate pre-university students for STEM education and careers. In this paper we will present the individual hardware and software components as well as the basic structure of this practical hands-on training. Finally, we will shortly outline the spin-off projects resulting from the continuous development.},\n  keywords = {Training;Conferences;Software;Sensors;Advanced driver assistance systems;STEM;Vehicles;advanced driver assistance systems;FMCW radar;educational platform;experiential learning;Raspberry Pi;LEGO},\n  doi = {10.23919/Eusipco47968.2020.9287353},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n In modern vehicles a variety of sensors like radar, camera and lidar are combined in order to precisely sense the environment. This data is utilized by advanced driver assistance systems (ADAS) to provide comfort features and increased safety for the occupants. To cope with the demands of this growing market, it is necessary to attract young people to so-called science, technology, engineering and mathematics (STEM) studies and encourage them to contribute to this highly innovative topic. Therefore, we developed a workshop on radar-based ADAS to motivate pre-university students for STEM education and careers. In this paper we will present the individual hardware and software components as well as the basic structure of this practical hands-on training. Finally, we will shortly outline the spin-off projects resulting from the continuous development.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Nonlinear Dependent Component Analysis: Identifiability and Algorithm.\n \n \n \n \n\n\n \n Lyu, Q.; and Fu, X.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1010-1014, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"NonlinearPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287354,\n  author = {Q. Lyu and X. Fu},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Nonlinear Dependent Component Analysis: Identifiability and Algorithm},\n  year = {2020},\n  pages = {1010-1014},\n  abstract = {This work studies the model identification problem of a class of post-nonlinear mixture models in the presence of dependent latent components. Particularly, our interest lies in latent components that are nonnegative and sum-to-one. This problem is motivated by applications such as hyperspectral unmixing under nonlinear distortion effects. Many prior works tackled nonlinear mixture analysis using statistical independence among the latent components, which is not applicable in our case. A recent work by Yang et al. put forth a solution for this problem leveraging functional equations. However, the identifiability conditions derived there are somewhat restrictive. The associated implementation also has difficulties—the function approximator used in their work may not be able to represent general nonlinear distortions and the formulated constrained neural network optimization problem may be challenging to handle. In this work, we advance both the theoretical and practical aspects of the problem of interest. On the theory side, we offer a new identifiability condition that circumvents a series of stringent assumptions in Yang et al.’s work. On the algorithm side, we propose an easy-to-implement unconstrained neural network-based algorithm—without sacrificing function approximation capabilities. Numerical experiments are employed to support our design.},\n  keywords = {Sufficient conditions;Nonlinear distortion;Signal processing algorithms;Signal processing;Approximation algorithms;Function approximation;Optimization;post-nonlinear mixture;dependent component analysis;identifiability;neural networks;nonnegative matrix factorization},\n  doi = {10.23919/Eusipco47968.2020.9287354},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001010.pdf},\n}\n\n
\n
\n\n\n
\n This work studies the model identification problem of a class of post-nonlinear mixture models in the presence of dependent latent components. Particularly, our interest lies in latent components that are nonnegative and sum-to-one. This problem is motivated by applications such as hyperspectral unmixing under nonlinear distortion effects. Many prior works tackled nonlinear mixture analysis using statistical independence among the latent components, which is not applicable in our case. A recent work by Yang et al. put forth a solution for this problem leveraging functional equations. However, the identifiability conditions derived there are somewhat restrictive. The associated implementation also has difficulties—the function approximator used in their work may not be able to represent general nonlinear distortions and the formulated constrained neural network optimization problem may be challenging to handle. In this work, we advance both the theoretical and practical aspects of the problem of interest. On the theory side, we offer a new identifiability condition that circumvents a series of stringent assumptions in Yang et al.’s work. On the algorithm side, we propose an easy-to-implement unconstrained neural network-based algorithm—without sacrificing function approximation capabilities. Numerical experiments are employed to support our design.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Joint Robust Linear Regression and Anomaly Detection in Poisson noise using Expectation-Propagation.\n \n \n \n \n\n\n \n Yao, D.; Altmann, Y.; McLaughlin, S.; and Davies, M. E.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2463-2467, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"JointPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287355,\n  author = {D. Yao and Y. Altmann and S. McLaughlin and M. E. Davies},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Joint Robust Linear Regression and Anomaly Detection in Poisson noise using Expectation-Propagation},\n  year = {2020},\n  pages = {2463-2467},\n  abstract = {In this paper, we propose a new Expectation-Propagation (EP) algorithm to address the problem of joint robust linear regression and sparse anomaly detection from data corrupted by Poisson noise. Adopting an approximate Bayesian approach, an EP method is derived to approximate the posterior distribution of interest. The method accounts not only for additive anomalies, but also for destructive anomalies, i.e., anomalies that can lead to observations with amplitudes lower than the expected signals. Experiments conducted with both synthetic and real data illustrate the potential benefits of the proposed EP method in joint spectral unmixing and anomaly detection in the photon-starved regime of a Lidar system.},\n  keywords = {Additives;Computational modeling;Linear regression;Signal processing algorithms;Data models;Bayes methods;Anomaly detection;Linear regression;Poisson noise;Anomaly detection;Approximate Bayesian inference;Expectation-Propagation},\n  doi = {10.23919/Eusipco47968.2020.9287355},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002463.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a new Expectation-Propagation (EP) algorithm to address the problem of joint robust linear regression and sparse anomaly detection from data corrupted by Poisson noise. Adopting an approximate Bayesian approach, an EP method is derived to approximate the posterior distribution of interest. The method accounts not only for additive anomalies, but also for destructive anomalies, i.e., anomalies that can lead to observations with amplitudes lower than the expected signals. Experiments conducted with both synthetic and real data illustrate the potential benefits of the proposed EP method in joint spectral unmixing and anomaly detection in the photon-starved regime of a Lidar system.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robust clustering and outlier rejection using the Mahalanobis distance distribution.\n \n \n \n \n\n\n \n Roizman, V.; Jonckheere, M.; and Pascal, F.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2448-2452, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"RobustPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287356,\n  author = {V. Roizman and M. Jonckheere and F. Pascal},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Robust clustering and outlier rejection using the Mahalanobis distance distribution},\n  year = {2020},\n  pages = {2448-2452},\n  abstract = {Both clustering and outlier detection tasks have a wide range of applications in signal processing. We focus here on the case where the data is corrupted with outliers and samples are relatively small. We study approximations of the distribution of the Mahalanobis distance when using robust estimators for the mean and the scatter matrix. We develop clustering and outlier rejection methods in the context of robust mixture modelling. We leverage on robust clustering and parameter estimations on a portion of the data, and we perform outlier detection on the rest of the data. We illustrate the importance of our method with synthetic simulations where we compare the theoretical asymptotic distribution and an approximated distribution to the empirical distribution. We conclude with an application using the well-known data set MNIST contaminated with noise.},\n  keywords = {Parameter estimation;Estimation;Mixture models;Signal processing;Proposals;Task analysis;Anomaly detection;clustering;outlier rejection;Mahalanobis distance;robust estimation},\n  doi = {10.23919/Eusipco47968.2020.9287356},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002448.pdf},\n}\n\n
\n
\n\n\n
\n Both clustering and outlier detection tasks have a wide range of applications in signal processing. We focus here on the case where the data is corrupted with outliers and samples are relatively small. We study approximations of the distribution of the Mahalanobis distance when using robust estimators for the mean and the scatter matrix. We develop clustering and outlier rejection methods in the context of robust mixture modelling. We leverage on robust clustering and parameter estimations on a portion of the data, and we perform outlier detection on the rest of the data. We illustrate the importance of our method with synthetic simulations where we compare the theoretical asymptotic distribution and an approximated distribution to the empirical distribution. We conclude with an application using the well-known data set MNIST contaminated with noise.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Information – theoretic characterization of concurrent activity of neural spike trains.\n \n \n \n\n\n \n Mijatovic, G.; Loncar-Turukalo, T.; Bozanic, N.; and Faes, L.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 925-929, Aug 2020. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287357,\n  author = {G. Mijatovic and T. Loncar-Turukalo and N. Bozanic and L. Faes},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Information – theoretic characterization of concurrent activity of neural spike trains},\n  year = {2020},\n  pages = {925-929},\n  abstract = {The analysis of massively parallel spike train recordings facilitates investigation of communications and synchronization in neural networks. In this work we develop and evaluate a measure of concurrent neural activity, which is based on intrinsic firing properties of the recorded neural units. An overall single neuron activity is unfolded in time and decomposed into working and non-firing state, providing a coarse, binary representation of the neurons functional state. We propose a modified measure of mutual information to reflect the degree of simultaneous activation and concurrency in neural firing patterns. The measure is shown to be sensitive to both correlations and anti-correlations, and it is normalized to attain a fixed bounded index which makes it interpretable. Finally, the measure is compared with widely used indexes of spike train correlation. The estimate of all measures is carried out in controlled experiments with synthetic Poisson spike trains and their corresponding surrogate datasets to asses its statistical significance.},\n  keywords = {Correlation;Sensitivity;Firing;Neurons;Signal processing;Synchronization;Mutual information;spike trains;neural synchrony;concurrent activity;firing patterns;mutual information},\n  doi = {10.23919/Eusipco47968.2020.9287357},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n The analysis of massively parallel spike train recordings facilitates investigation of communications and synchronization in neural networks. In this work we develop and evaluate a measure of concurrent neural activity, which is based on intrinsic firing properties of the recorded neural units. An overall single neuron activity is unfolded in time and decomposed into working and non-firing state, providing a coarse, binary representation of the neurons functional state. We propose a modified measure of mutual information to reflect the degree of simultaneous activation and concurrency in neural firing patterns. The measure is shown to be sensitive to both correlations and anti-correlations, and it is normalized to attain a fixed bounded index which makes it interpretable. Finally, the measure is compared with widely used indexes of spike train correlation. The estimate of all measures is carried out in controlled experiments with synthetic Poisson spike trains and their corresponding surrogate datasets to asses its statistical significance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A New Iterative Method for Passive Doppler Geolocation Based on Semi-Definite Programming.\n \n \n \n \n\n\n \n Nuhoglu, M. A.; Kemal Alp, Y.; Bayri, A.; and Cirpany, H. A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1812-1816, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287358,\n  author = {M. A. Nuhoglu and Y. {Kemal Alp} and A. Bayri and H. A. Cirpany},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A New Iterative Method for Passive Doppler Geolocation Based on Semi-Definite Programming},\n  year = {2020},\n  pages = {1812-1816},\n  abstract = {In this work, we propose a new iterative method running on a receiver located at a moving platform for unco-operative radar geolocation. The method uses Doppler-shifted measurements of the stationary radar signals due to the platform motion. The carrier frequency and the position of the radar are estimated jointly in each iteration by solving a semi-definite program. Conducted experiments show that a few iterations are enough for convergence to stable estimates. Hence, the proposed method has a significant computational advantage compared to traditional techniques, which require an extensive grid search on either position or carrier frequency parameter space.},\n  keywords = {Radar measurements;Geology;Radar;Frequency estimation;Doppler radar;Iterative methods;Signal to noise ratio;Emitter geolocation;semi-definite programming;Doppler-shifted frequency;computational complexity},\n  doi = {10.23919/Eusipco47968.2020.9287358},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001812.pdf},\n}\n\n
\n
\n\n\n
\n In this work, we propose a new iterative method running on a receiver located at a moving platform for unco-operative radar geolocation. The method uses Doppler-shifted measurements of the stationary radar signals due to the platform motion. The carrier frequency and the position of the radar are estimated jointly in each iteration by solving a semi-definite program. Conducted experiments show that a few iterations are enough for convergence to stable estimates. Hence, the proposed method has a significant computational advantage compared to traditional techniques, which require an extensive grid search on either position or carrier frequency parameter space.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Kalman-based nested hybrid filters for recursive inference in state-space models.\n \n \n \n \n\n\n \n Pérez-Vieites, S.; and Míguez, J.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2468-2472, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Kalman-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287359,\n  author = {S. Pérez-Vieites and J. Míguez},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Kalman-based nested hybrid filters for recursive inference in state-space models},\n  year = {2020},\n  pages = {2468-2472},\n  abstract = {We introduce a new sequential methodology to calibrate the fixed parameters and trace the stochastic dynamical variables of a state-space system. The proposed framework is based on the nested hybrid filters (NHF) of [1], that combine two layers of filters, one inside the other, to compute the joint posterior probability distribution of the static parameters and the state variables. In particular, we explore the use of deterministic sampling techniques in the first layer of the algorithm, instead of Monte Carlo methods, which reduces computational cost and so makes the algorithms potentially better-suited for high-dimensional state and parameter spaces. We present numerical results for a stochastic Lorenz 63 model.},\n  keywords = {Monte Carlo methods;Computational modeling;Stochastic processes;Signal processing algorithms;Filtering algorithms;Probability distribution;Numerical models;filtering;Kalman;Monte Carlo;Bayesian inference},\n  doi = {10.23919/Eusipco47968.2020.9287359},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002468.pdf},\n}\n\n
\n
\n\n\n
\n We introduce a new sequential methodology to calibrate the fixed parameters and trace the stochastic dynamical variables of a state-space system. The proposed framework is based on the nested hybrid filters (NHF) of [1], that combine two layers of filters, one inside the other, to compute the joint posterior probability distribution of the static parameters and the state variables. In particular, we explore the use of deterministic sampling techniques in the first layer of the algorithm, instead of Monte Carlo methods, which reduces computational cost and so makes the algorithms potentially better-suited for high-dimensional state and parameter spaces. We present numerical results for a stochastic Lorenz 63 model.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Detection of Obstructive Sleep Apnoea by ECG signals using Deep Learning Architectures.\n \n \n \n \n\n\n \n Almutairi, H.; Hassan, G. M.; and Datta, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1382-1386, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DetectionPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287360,\n  author = {H. Almutairi and G. M. Hassan and A. Datta},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Detection of Obstructive Sleep Apnoea by ECG signals using Deep Learning Architectures},\n  year = {2020},\n  pages = {1382-1386},\n  abstract = {Obstructive Sleep Apnoea (OSA) is a breathing disorder that happens during sleep and general anaesthesia. This disorder can affect human life considerably. Early detection of OSA can protect human health from different diseases including cardiovascular diseases which may lead to sudden death. OSA is examined by physicians using Electrocardiography (ECG) signals, Electromyogram (EMG), Electroencephalogram (EEG), Electrooculography (EOG) and oxygen saturation. Previous studies of detecting OSA are focused on using feature engineering where a specific number of features from ECG signals are selected as an input to the machine learning model. In this study, we focus on detecting OSA from ECG signals where our proposed machine learning methods automatically extract the input as features from ECG signals. We proposed three architectures of deep learning approaches in this study: CNN, CNN with LSTM and CNN with GRU. These architectures utilized consecutive R interval and QRS complex amplitudes as inputs. Thirty-five recordings from PhysioNet Apnea-ECG database have been used to evaluate our models. Experimental results show that our architecture of CNN with LSTM performed best for OSA detection. The average classification accuracy, sensitivity and specificity achieved in this study are 89.11%, 89.91% and 87.78% respectively.},\n  keywords = {cardiovascular system;diseases;electrocardiography;electroencephalography;electromyography;electro-oculography;learning (artificial intelligence);medical disorders;medical signal detection;medical signal processing;pneumodynamics;signal classification;sleep;ECG signals;deep learning approaches;CNN;OSA detection;obstructive sleep apnoea;breathing disorder;electrocardiography signals;oxygen saturation;machine learning model;physionet apnea-ECG database;electromyogram;LSTM;Deep learning;Electrooculography;Sensitivity;Electrocardiography;Feature extraction;Brain modeling;Sleep apnea;Obstructive Sleep Apnoea;ECG;Deep Learning;Convolutional Neural Networks;Long Short Term Memory;Gated Recurrent Unit},\n  doi = {10.23919/Eusipco47968.2020.9287360},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001382.pdf},\n}\n\n
\n
\n\n\n
\n Obstructive Sleep Apnoea (OSA) is a breathing disorder that happens during sleep and general anaesthesia. This disorder can affect human life considerably. Early detection of OSA can protect human health from different diseases including cardiovascular diseases which may lead to sudden death. OSA is examined by physicians using Electrocardiography (ECG) signals, Electromyogram (EMG), Electroencephalogram (EEG), Electrooculography (EOG) and oxygen saturation. Previous studies of detecting OSA are focused on using feature engineering where a specific number of features from ECG signals are selected as an input to the machine learning model. In this study, we focus on detecting OSA from ECG signals where our proposed machine learning methods automatically extract the input as features from ECG signals. We proposed three architectures of deep learning approaches in this study: CNN, CNN with LSTM and CNN with GRU. These architectures utilized consecutive R interval and QRS complex amplitudes as inputs. Thirty-five recordings from PhysioNet Apnea-ECG database have been used to evaluate our models. Experimental results show that our architecture of CNN with LSTM performed best for OSA detection. The average classification accuracy, sensitivity and specificity achieved in this study are 89.11%, 89.91% and 87.78% respectively.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Designing CNNs for Multimodal Image Super-Resolution via the Method of Multipliers.\n \n \n \n \n\n\n \n Marivani, I.; Tsiligianni, E.; Cornelis, B.; and Deligiannis, N.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 780-783, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DesigningPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287361,\n  author = {I. Marivani and E. Tsiligianni and B. Cornelis and N. Deligiannis},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Designing CNNs for Multimodal Image Super-Resolution via the Method of Multipliers},\n  year = {2020},\n  pages = {780-783},\n  abstract = {Multimodal alias, guided, image super-resolution (SR) refers to the reconstruction of a high-resolution (HR) version of a low-resolution (LR) image with the aid of an HR image from another image modality. Common approaches for the SR problem include analytical methods which are computationally expensive. Deep learning methods are capable of learning a nonlinear mapping between LR and HR images from data, delivering high reconstruction accuracy at a low-computational cost during inference; however, these methods do not incorporate any prior knowledge about the problem, with the neural network model behaving like a black box. In this paper, we formulate multimodal image SR as a coupled convolutional sparse coding problem. To solve the corresponding minimization problem, we adopt the Method of Multipliers (MM). We then design a convolutional neural network (CNN) that unfolds the obtained MM algorithm. The proposed CNN accepts as input the LR image from the main modality and the HR image from the guidance modality to reconstruct the desired HR image. Unlike existing deep learning methods, our CNN provides an efficient and structured way to fuse information at different stages of the network and achieves high reconstruction accuracy. We evaluate the performance of the proposed model for the super-resolution of multi-spectral images guided by their high resolution RGB counterparts.},\n  keywords = {Convolutional codes;Deep learning;Image coding;Convolution;Signal processing algorithms;Image reconstruction;Signal resolution;Method of multipliers;deep unfolding;multimodal image super-resolution;multimodal CNN},\n  doi = {10.23919/Eusipco47968.2020.9287361},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000780.pdf},\n}\n\n
\n
\n\n\n
\n Multimodal alias, guided, image super-resolution (SR) refers to the reconstruction of a high-resolution (HR) version of a low-resolution (LR) image with the aid of an HR image from another image modality. Common approaches for the SR problem include analytical methods which are computationally expensive. Deep learning methods are capable of learning a nonlinear mapping between LR and HR images from data, delivering high reconstruction accuracy at a low-computational cost during inference; however, these methods do not incorporate any prior knowledge about the problem, with the neural network model behaving like a black box. In this paper, we formulate multimodal image SR as a coupled convolutional sparse coding problem. To solve the corresponding minimization problem, we adopt the Method of Multipliers (MM). We then design a convolutional neural network (CNN) that unfolds the obtained MM algorithm. The proposed CNN accepts as input the LR image from the main modality and the HR image from the guidance modality to reconstruct the desired HR image. Unlike existing deep learning methods, our CNN provides an efficient and structured way to fuse information at different stages of the network and achieves high reconstruction accuracy. We evaluate the performance of the proposed model for the super-resolution of multi-spectral images guided by their high resolution RGB counterparts.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Progressive Training Of Convolutional Neural Networks For Acoustic Events Classification.\n \n \n \n \n\n\n \n Colangelo, F.; Battisti, F.; and Neri, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 26-30, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ProgressivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287362,\n  author = {F. Colangelo and F. Battisti and A. Neri},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Progressive Training Of Convolutional Neural Networks For Acoustic Events Classification},\n  year = {2020},\n  pages = {26-30},\n  abstract = {Convolutional neural networks represent the state of the art in multiple fields. Techniques that improve the training of these models are of prime interest since they have the capability to improve performances on a large variety of tasks. In this paper, we investigate the performance of progressive resizing, originally introduced in computer vision, when applied to the training of convolutional neural networks for audio events classification. We evaluate the original resizing algorithm and introduce a novel one, comparing the performances against a baseline system. Two of the most relevant audio datasets are used for assessing the performances of the proposed approach. Experimental results suggest that progressive resizing methods improves the performances of audio events classification models. The novel approach introduces a complimentary gain in performances with respect to the original technique.},\n  keywords = {Training;Computer vision;Signal processing algorithms;Europe;Signal processing;Convolutional neural networks;Task analysis;Deep learning;acoustic events;classification;tagging},\n  doi = {10.23919/Eusipco47968.2020.9287362},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000026.pdf},\n}\n\n
\n
\n\n\n
\n Convolutional neural networks represent the state of the art in multiple fields. Techniques that improve the training of these models are of prime interest since they have the capability to improve performances on a large variety of tasks. In this paper, we investigate the performance of progressive resizing, originally introduced in computer vision, when applied to the training of convolutional neural networks for audio events classification. We evaluate the original resizing algorithm and introduce a novel one, comparing the performances against a baseline system. Two of the most relevant audio datasets are used for assessing the performances of the proposed approach. Experimental results suggest that progressive resizing methods improves the performances of audio events classification models. The novel approach introduces a complimentary gain in performances with respect to the original technique.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Recognition of Actions and Subjects from Inertial and FSR Sensors Attached to Objects.\n \n \n \n \n\n\n \n Peng, Y.; Jančovič, P.; and Russell, M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2006-2010, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"RecognitionPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287363,\n  author = {Y. Peng and P. Jančovič and M. Russell},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Recognition of Actions and Subjects from Inertial and FSR Sensors Attached to Objects},\n  year = {2020},\n  pages = {2006-2010},\n  abstract = {This paper describes automatic systems for human activity recognition and identification of subjects based on sensorised objects. The action recognition system is based on a deep neural network – hidden Markov model (DNN–HMM) with augmentation of feature vectors by the i-vector of a given recording to deal with the subject variability. The subject identification system is based on i-vectors. The sensors, comprising an accelerometer, gyroscope, magnetometer and force-sensitive resistors (FSRs), are packaged in a coaster attached to the base of an object, here a jug. Evaluations are performed using nearly 11 hours of data recordings from 26 subjects, containing actions involved in manipulating a jug to make cups of tea. We demonstrate the performance of the DNN–HMM action recognition system in a subject-dependent and subject-independent case and in controlled and natural scenarios. While the subject-dependent system achieved error rate of 0.15% in controlled scenario, this increased up to 15.33% for subject-independent system in natural scenario. The proposed i-vector augmentation provided 26% relative error rate reduction. The subject identification system based on i-vectors and cosine similarity calculation achieved over 68% recognition accuracy in natural scenario.},\n  keywords = {Training;Resistors;Error analysis;Neural networks;Signal processing;Markov processes;Object recognition;Activity recognition;subject identification;deep neural networks;hidden Markov models;sensors;feature augmentation;i-vector},\n  doi = {10.23919/Eusipco47968.2020.9287363},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002006.pdf},\n}\n\n
\n
\n\n\n
\n This paper describes automatic systems for human activity recognition and identification of subjects based on sensorised objects. The action recognition system is based on a deep neural network – hidden Markov model (DNN–HMM) with augmentation of feature vectors by the i-vector of a given recording to deal with the subject variability. The subject identification system is based on i-vectors. The sensors, comprising an accelerometer, gyroscope, magnetometer and force-sensitive resistors (FSRs), are packaged in a coaster attached to the base of an object, here a jug. Evaluations are performed using nearly 11 hours of data recordings from 26 subjects, containing actions involved in manipulating a jug to make cups of tea. We demonstrate the performance of the DNN–HMM action recognition system in a subject-dependent and subject-independent case and in controlled and natural scenarios. While the subject-dependent system achieved error rate of 0.15% in controlled scenario, this increased up to 15.33% for subject-independent system in natural scenario. The proposed i-vector augmentation provided 26% relative error rate reduction. The subject identification system based on i-vectors and cosine similarity calculation achieved over 68% recognition accuracy in natural scenario.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Differentiable Max-Directivity Beamforming Normalization for Independent Vector Analysis.\n \n \n \n \n\n\n \n Takeda, S.; Niwa, K.; and Shimizu, S.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 296-300, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DifferentiablePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287364,\n  author = {S. Takeda and K. Niwa and S. Shimizu},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Differentiable Max-Directivity Beamforming Normalization for Independent Vector Analysis},\n  year = {2020},\n  pages = {296-300},\n  abstract = {Independent vector analysis (IVA) minimizes an objective function to estimate separation filters that separate mixture signals into individual source signals. Unfortunately, IVA often suffers from the well-known block permutation problem. To mitigate that problem, the use of geometry knowledge has been studied, but two crucial issues remain: the necessity of non-differential processes outside the minimization and of high-level geometrical clues such as the directions of arrival (DOAs) of the source signals. This paper thus presents a novel IVA method whose objective function has a differentiable max-directivity beamforming normalization (MDBN) term. This term uses geometry knowledge from only a low-level geometrical clue (the positions of the microphone array) via the traditional beamforming (BF) concept that each separation filter should have a maximum gain for each specific DOA across all frequency bins. Thus, our overall objective function can be minimized by gradient descent, and the MDBN term encourages the separation filters to focus on specific directions, which implicitly estimates the most reasonable DOAs of the source signals at each iteration. Therefore, our method uses geometry knowledge while avoiding the above two issues and estimates good separation filters mitigating the permutation problem. Several experiments show that our method outperforms the conventional BF and IVA methods.},\n  keywords = {Geometry;Direction-of-arrival estimation;Array signal processing;Linear programming;Minimization;Microphone arrays;Reverberation;independent vector analysis;geometry knowledge;beamforming;maximum a posterior;normalization term;chain rule},\n  doi = {10.23919/Eusipco47968.2020.9287364},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000296.pdf},\n}\n\n
\n
\n\n\n
\n Independent vector analysis (IVA) minimizes an objective function to estimate separation filters that separate mixture signals into individual source signals. Unfortunately, IVA often suffers from the well-known block permutation problem. To mitigate that problem, the use of geometry knowledge has been studied, but two crucial issues remain: the necessity of non-differential processes outside the minimization and of high-level geometrical clues such as the directions of arrival (DOAs) of the source signals. This paper thus presents a novel IVA method whose objective function has a differentiable max-directivity beamforming normalization (MDBN) term. This term uses geometry knowledge from only a low-level geometrical clue (the positions of the microphone array) via the traditional beamforming (BF) concept that each separation filter should have a maximum gain for each specific DOA across all frequency bins. Thus, our overall objective function can be minimized by gradient descent, and the MDBN term encourages the separation filters to focus on specific directions, which implicitly estimates the most reasonable DOAs of the source signals at each iteration. Therefore, our method uses geometry knowledge while avoiding the above two issues and estimates good separation filters mitigating the permutation problem. Several experiments show that our method outperforms the conventional BF and IVA methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Fully Convolutional Sequence Learning Approach for Cued Speech Recognition from Videos.\n \n \n \n \n\n\n \n Papadimitriou, K.; and Potamianos, G.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 326-330, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287365,\n  author = {K. Papadimitriou and G. Potamianos},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A Fully Convolutional Sequence Learning Approach for Cued Speech Recognition from Videos},\n  year = {2020},\n  pages = {326-330},\n  abstract = {Cued Speech constitutes a sign-based communication variant for the speech and hearing impaired, which involves visual information from lip movements combined with hand positional and gestural cues. In this paper, we consider its automatic recognition in videos, introducing a deep sequence learning approach that consists of two separately trained components: an image learner based on convolutional neural networks (CNNs) and a fully convolutional encoder-decoder. Specifically, handshape and lip visual features extracted from a 3D-CNN feature learner, as well as hand position embeddings obtained by a 2D-CNN, are concatenated and fed to a time-depth separable (TDS) block structure, followed by a multi-step attention-based convolutional decoder for phoneme prediction. To our knowledge, this is the first work where recognition of cued speech is addressed using a common modeling approach based entirely on CNNs. The introduced model is evaluated on a French and a British English cued speech dataset in terms of phoneme error rate, and it is shown to significantly outperform alternative modeling approaches.},\n  keywords = {Visualization;Convolution;Lips;Speech recognition;Feature extraction;Decoding;Videos;Cued speech;3D-CNN;TDS encoder;attention-based convolutional decoder},\n  doi = {10.23919/Eusipco47968.2020.9287365},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000326.pdf},\n}\n\n
\n
\n\n\n
\n Cued Speech constitutes a sign-based communication variant for the speech and hearing impaired, which involves visual information from lip movements combined with hand positional and gestural cues. In this paper, we consider its automatic recognition in videos, introducing a deep sequence learning approach that consists of two separately trained components: an image learner based on convolutional neural networks (CNNs) and a fully convolutional encoder-decoder. Specifically, handshape and lip visual features extracted from a 3D-CNN feature learner, as well as hand position embeddings obtained by a 2D-CNN, are concatenated and fed to a time-depth separable (TDS) block structure, followed by a multi-step attention-based convolutional decoder for phoneme prediction. To our knowledge, this is the first work where recognition of cued speech is addressed using a common modeling approach based entirely on CNNs. The introduced model is evaluated on a French and a British English cued speech dataset in terms of phoneme error rate, and it is shown to significantly outperform alternative modeling approaches.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Deep Learning Method with CRF for Instance Segmentation of Metal-Organic Frameworks in Scanning Electron Microscopy Images.\n \n \n \n \n\n\n \n Batatia, I.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 625-629, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287366,\n  author = {I. Batatia},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A Deep Learning Method with CRF for Instance Segmentation of Metal-Organic Frameworks in Scanning Electron Microscopy Images},\n  year = {2020},\n  pages = {625-629},\n  abstract = {This paper proposes an integrated method for recognizing special crystals, called metal-organic frameworks (MOF), in scanning electron microscopy images (SEM). The proposed approach combines two deep learning networks and a dense conditional random field (CRF) to perform image segmentation. A modified Unet-like convolutional neural network (CNN), incorporating dilatation techniques using atrous convolution, is designed to segment cluttered objects in the SEM image. The dense CRF is tailored to enhance object boundaries and recover small objects. The unary energy of the CRF is obtained from the CNN. And the pairwise energy is estimated using mean field approximation. The resulting segmented regions are fed to a fully connected CNN that performs instance recognition. The method has been trained on a dataset of 500 images with 3200 objects from 3 classes. Testing achieves an overall accuracy of 95.7% MOF recognition. The proposed method opens up the possibility for developing automated chemical process monitoring that allows researchers to optimize conditions of MOF synthesis.},\n  keywords = {Deep learning;Image segmentation;Scanning electron microscopy;Image recognition;Convolution;Semantics;Crystals;metal-organic frameworks;semantic segmentation;deep learning;conditional random fields},\n  doi = {10.23919/Eusipco47968.2020.9287366},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000625.pdf},\n}\n\n
\n
\n\n\n
\n This paper proposes an integrated method for recognizing special crystals, called metal-organic frameworks (MOF), in scanning electron microscopy images (SEM). The proposed approach combines two deep learning networks and a dense conditional random field (CRF) to perform image segmentation. A modified Unet-like convolutional neural network (CNN), incorporating dilatation techniques using atrous convolution, is designed to segment cluttered objects in the SEM image. The dense CRF is tailored to enhance object boundaries and recover small objects. The unary energy of the CRF is obtained from the CNN. And the pairwise energy is estimated using mean field approximation. The resulting segmented regions are fed to a fully connected CNN that performs instance recognition. The method has been trained on a dataset of 500 images with 3200 objects from 3 classes. Testing achieves an overall accuracy of 95.7% MOF recognition. The proposed method opens up the possibility for developing automated chemical process monitoring that allows researchers to optimize conditions of MOF synthesis.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multi-Band Multi-Resolution Fully Convolutional Neural Networks for Singing Voice Separation.\n \n \n \n \n\n\n \n Grais, E. M.; Zhao, F.; and Plumbley, M. D.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 261-265, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Multi-BandPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287367,\n  author = {E. M. Grais and F. Zhao and M. D. Plumbley},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Multi-Band Multi-Resolution Fully Convolutional Neural Networks for Singing Voice Separation},\n  year = {2020},\n  pages = {261-265},\n  abstract = {Deep neural networks with convolutional layers usually process the entire spectrogram of an audio signal with the same time-frequency resolutions, number of filters, and dimensionality reduction scale. According to the constant-Q transform, good features can be extracted from audio signals if the low frequency bands are processed with high frequency resolution filters and the high frequency bands with high time resolution filters. In the spectrogram of a mixture of singing voices and music signals, there is usually more information about the voice in the low frequency bands than the high frequency bands. These raise the need for processing each part of the spectrogram differently. In this paper, we propose a multi-band multi-resolution fully convolutional neural network (MBR-FCN) for singing voice separation. The MBR-FCN processes the frequency bands that have more information about the target signals with more filters and smaller dimensionality reduction scale than the bands with less information. Furthermore, the MBR-FCN processes the low frequency bands with high frequency resolution filters and the high frequency bands with high time resolution filters. Our experimental results show that the proposed MBR-FCN with very few parameters achieves better singing voice separation performance than other deep neural networks.},\n  keywords = {Dimensionality reduction;Time-frequency analysis;Information filters;High frequency;Convolutional neural networks;Signal resolution;Spectrogram;Deep learning;convolutional neural networks;singing voice separation;single channel audio source separation;feature extraction},\n  doi = {10.23919/Eusipco47968.2020.9287367},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000261.pdf},\n}\n\n
\n
\n\n\n
\n Deep neural networks with convolutional layers usually process the entire spectrogram of an audio signal with the same time-frequency resolutions, number of filters, and dimensionality reduction scale. According to the constant-Q transform, good features can be extracted from audio signals if the low frequency bands are processed with high frequency resolution filters and the high frequency bands with high time resolution filters. In the spectrogram of a mixture of singing voices and music signals, there is usually more information about the voice in the low frequency bands than the high frequency bands. These raise the need for processing each part of the spectrogram differently. In this paper, we propose a multi-band multi-resolution fully convolutional neural network (MBR-FCN) for singing voice separation. The MBR-FCN processes the frequency bands that have more information about the target signals with more filters and smaller dimensionality reduction scale than the bands with less information. Furthermore, the MBR-FCN processes the low frequency bands with high frequency resolution filters and the high frequency bands with high time resolution filters. Our experimental results show that the proposed MBR-FCN with very few parameters achieves better singing voice separation performance than other deep neural networks.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Ventricular response regularity in atrial fibrillation and its relationship to successful catheter ablation.\n \n \n \n \n\n\n \n McCann, A.; Luca, A.; Pruvot, E.; Roten, L.; Sticherling, C.; and Vesin, J. -.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 910-914, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"VentricularPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287369,\n  author = {A. McCann and A. Luca and E. Pruvot and L. Roten and C. Sticherling and J. -M. Vesin},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Ventricular response regularity in atrial fibrillation and its relationship to successful catheter ablation},\n  year = {2020},\n  pages = {910-914},\n  abstract = {Atrial rate is known to modulate ventricular response during atrial fibrillation (AF). The resulting pulse irregularity translates into widely varying inter-beat intervals (IBIs) extracted from recorded surface electrocardiogram (ECG) activity. In AF, the random nature of the IBIs makes them difficult to analyze using traditional methods, and little work has investigated the relationship between ventricular response and persistent AF (persAF) disease progression. In this paper, we propose nonlinear approaches for characterizing IBI dynamics in patients undergoing catheter ablation for the treatment of persAF.},\n  keywords = {bioelectric phenomena;catheters;diseases;electrocardiography;medical signal processing;patient treatment;spatiotemporal phenomena;IBI dynamics;ventricular response regularity;atrial fibrillation;catheter ablation;atrial rate;widely varying inter-beat intervals;recorded surface electrocardiogram activity;persistent AF disease progression;pulse irregularity translates;Resistance;Atrial fibrillation;Organizations;Signal processing;Tools;Rhythm;Catheters;Atrial fibrillation;electrocardiogram;heart-rate variability;recurrence plot;nonlinear signal processing},\n  doi = {10.23919/Eusipco47968.2020.9287369},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000910.pdf},\n}\n\n
\n
\n\n\n
\n Atrial rate is known to modulate ventricular response during atrial fibrillation (AF). The resulting pulse irregularity translates into widely varying inter-beat intervals (IBIs) extracted from recorded surface electrocardiogram (ECG) activity. In AF, the random nature of the IBIs makes them difficult to analyze using traditional methods, and little work has investigated the relationship between ventricular response and persistent AF (persAF) disease progression. In this paper, we propose nonlinear approaches for characterizing IBI dynamics in patients undergoing catheter ablation for the treatment of persAF.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Multi-Agent Primal-Dual Strategy for Composite Optimization over Distributed Features.\n \n \n \n \n\n\n \n Alghunaim, S. A.; Yan, M.; and Sayed, A. H.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2095-2099, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287370,\n  author = {S. A. Alghunaim and M. Yan and A. H. Sayed},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A Multi-Agent Primal-Dual Strategy for Composite Optimization over Distributed Features},\n  year = {2020},\n  pages = {2095-2099},\n  abstract = {This work studies multi-agent sharing optimization problems with the objective function being the sum of smooth local functions plus a convex (possibly non-smooth) function coupling all agents. This scenario arises in many machine learning and engineering applications, such as regression over distributed features and resource allocation. We reformulate this problem into an equivalent saddle-point problem, which is amenable to decentralized solutions. We then propose a proximal primal-dual algorithm and establish its linear convergence to the optimal solution when the local functions are strongly-convex. To our knowledge, this is the first linearly convergent decentralized algorithm for multi-agent sharing problems with a general convex (possibly non-smooth) coupling function.},\n  keywords = {Couplings;Machine learning algorithms;Signal processing algorithms;Signal processing;Resource management;Optimization;Convergence;Decentralized composite optimization;primal- dual methods;linear convergence;distributed learning},\n  doi = {10.23919/Eusipco47968.2020.9287370},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002095.pdf},\n}\n\n
\n
\n\n\n
\n This work studies multi-agent sharing optimization problems with the objective function being the sum of smooth local functions plus a convex (possibly non-smooth) function coupling all agents. This scenario arises in many machine learning and engineering applications, such as regression over distributed features and resource allocation. We reformulate this problem into an equivalent saddle-point problem, which is amenable to decentralized solutions. We then propose a proximal primal-dual algorithm and establish its linear convergence to the optimal solution when the local functions are strongly-convex. To our knowledge, this is the first linearly convergent decentralized algorithm for multi-agent sharing problems with a general convex (possibly non-smooth) coupling function.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fast Depth Estimation for View Synthesis.\n \n \n \n \n\n\n \n Anantrasirichai, N.; Geravand, M.; Braendler, D.; and Bull, D. R.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 575-579, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"FastPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287371,\n  author = {N. Anantrasirichai and M. Geravand and D. Braendler and D. R. Bull},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Fast Depth Estimation for View Synthesis},\n  year = {2020},\n  pages = {575-579},\n  abstract = {Disparity/depth estimation from sequences of stereo images is an important element in 3D vision. Owing to occlusions, imperfect settings and homogeneous luminance, accurate estimate of depth remains a challenging problem. Targetting view synthesis, we propose a novel learning-based framework making use of dilated convolution, densely connected convolutional modules, compact decoder and skip connections. The network is shallow but dense, so it is fast and accurate. Two additional contributions - a non-linear adjustment of the depth resolution and the introduction of a projection loss, lead to reduction of estimation error by up to 20% and 25% respectively. The results show that our network outperforms state-of-the-art methods with an average improvement in accuracy of depth estimation and view synthesis by approximately 45% and 34% respectively. Where our method generates comparable quality of estimated depth, it performs 10 times faster than those methods.},\n  keywords = {Training;Three-dimensional displays;Image resolution;Convolution;Europe;Decoding;Image reconstruction;depth estimation;disparity estimation;deep learning;CNN;view synthesis},\n  doi = {10.23919/Eusipco47968.2020.9287371},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000575.pdf},\n}\n\n
\n
\n\n\n
\n Disparity/depth estimation from sequences of stereo images is an important element in 3D vision. Owing to occlusions, imperfect settings and homogeneous luminance, accurate estimate of depth remains a challenging problem. Targetting view synthesis, we propose a novel learning-based framework making use of dilated convolution, densely connected convolutional modules, compact decoder and skip connections. The network is shallow but dense, so it is fast and accurate. Two additional contributions - a non-linear adjustment of the depth resolution and the introduction of a projection loss, lead to reduction of estimation error by up to 20% and 25% respectively. The results show that our network outperforms state-of-the-art methods with an average improvement in accuracy of depth estimation and view synthesis by approximately 45% and 34% respectively. Where our method generates comparable quality of estimated depth, it performs 10 times faster than those methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sound Event Localization and Detection Using Convolutional Recurrent Neural Networks and Gated Linear Units.\n \n \n \n \n\n\n \n Komatsu, T.; Togami, M.; and Takahashi, T.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 41-45, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"SoundPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287372,\n  author = {T. Komatsu and M. Togami and T. Takahashi},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Sound Event Localization and Detection Using Convolutional Recurrent Neural Networks and Gated Linear Units},\n  year = {2020},\n  pages = {41-45},\n  abstract = {This paper proposes a sound event localization and detection (SELD) method using a convolutional recurrent neural network (CRNN) with gated linear units (GLUs). The proposed method introduces to employ GLUs with convolutional neural network (CNN) layers of the CRNN to extract adequate spectral features from amplitude and phase spectra. When the CNNs extract features of high-dimensional dependencies of frequency bins, the GLUs weight the extracted features based on the importance of the bins, like attention mechanism. Extracted features from bins where sounds are absent, which is not informative and degrade the SELD performance, are weighted to 0 and ignored by GLUs. Only the features extracted from informative bins are used for the CNN output for better SELD performance. Obtained CNN outputs are fed to consecutive bi-directional gated recurrent units (GRUs), which capture temporal information. Finally, the GRU output are shared by two task-specific layers, which are sound event detection (SED) layers and direction of arrival (DoA) estimation layers, to obtain SELD results. Evaluation results using the TAU Spatial Sound Events 2019 - Ambisonic dataset show the effectiveness of GLUs in the proposed method, and it improves SELD performance up to 0.10 in F1-score, 0.15 in error rate, 16.4° in DoA estimation error comparing to a CRNN baseline method.},\n  keywords = {Estimation error;Direction-of-arrival estimation;Recurrent neural networks;Error analysis;Logic gates;Feature extraction;Convolutional neural networks;Sound Event Localization and Detection;Recurrent Convolutional Neural Network;Gated Linear Unit},\n  doi = {10.23919/Eusipco47968.2020.9287372},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000041.pdf},\n}\n\n
\n
\n\n\n
\n This paper proposes a sound event localization and detection (SELD) method using a convolutional recurrent neural network (CRNN) with gated linear units (GLUs). The proposed method introduces to employ GLUs with convolutional neural network (CNN) layers of the CRNN to extract adequate spectral features from amplitude and phase spectra. When the CNNs extract features of high-dimensional dependencies of frequency bins, the GLUs weight the extracted features based on the importance of the bins, like attention mechanism. Extracted features from bins where sounds are absent, which is not informative and degrade the SELD performance, are weighted to 0 and ignored by GLUs. Only the features extracted from informative bins are used for the CNN output for better SELD performance. Obtained CNN outputs are fed to consecutive bi-directional gated recurrent units (GRUs), which capture temporal information. Finally, the GRU output are shared by two task-specific layers, which are sound event detection (SED) layers and direction of arrival (DoA) estimation layers, to obtain SELD results. Evaluation results using the TAU Spatial Sound Events 2019 - Ambisonic dataset show the effectiveness of GLUs in the proposed method, and it improves SELD performance up to 0.10 in F1-score, 0.15 in error rate, 16.4° in DoA estimation error comparing to a CRNN baseline method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Feature Overview for Joint Modeling of Sound Event Detection and Localization Using a Microphone Array.\n \n \n \n \n\n\n \n Krause, D.; Politis, A.; and Kowalczyk, K.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 31-35, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"FeaturePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287374,\n  author = {D. Krause and A. Politis and K. Kowalczyk},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Feature Overview for Joint Modeling of Sound Event Detection and Localization Using a Microphone Array},\n  year = {2020},\n  pages = {31-35},\n  abstract = {In this paper, we present a comparative study of a number of features and time-frequency signal representations for the task of joint sound event detection and localization using a state-of-the-art model based on a convolutional recurrent neural network. Experiments are performed for a dataset consisting of the recordings made using a tetrahedral microphone array. Several feature inputs specific to the task of sound event detection and sound source localization are combined and subsequently tested, with the aim to achieve joint performance of both tasks for multiple overlapping sound events using a single model based on a deep neural network architecture. Apart from providing a comprehensive comparison of various state-of-the-art acoustic features such as generalized cross-correlation, and inter-channel level and phase differences, we propose new features that have not been used for this task before such as eigenvectors of the microphone covariance matrix or sines and cosines of phase differences between the channels. Results for all combinations of input features are analyzed and discussed, followed by conclusions.},\n  keywords = {Recurrent neural networks;Event detection;Array signal processing;Feature extraction;Microphone arrays;Task analysis;Covariance matrices;sound event detection;sound source localization;convolutional recurrent neural networks;feature extraction},\n  doi = {10.23919/Eusipco47968.2020.9287374},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000031.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we present a comparative study of a number of features and time-frequency signal representations for the task of joint sound event detection and localization using a state-of-the-art model based on a convolutional recurrent neural network. Experiments are performed for a dataset consisting of the recordings made using a tetrahedral microphone array. Several feature inputs specific to the task of sound event detection and sound source localization are combined and subsequently tested, with the aim to achieve joint performance of both tasks for multiple overlapping sound events using a single model based on a deep neural network architecture. Apart from providing a comprehensive comparison of various state-of-the-art acoustic features such as generalized cross-correlation, and inter-channel level and phase differences, we propose new features that have not been used for this task before such as eigenvectors of the microphone covariance matrix or sines and cosines of phase differences between the channels. Results for all combinations of input features are analyzed and discussed, followed by conclusions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Refinement Network for unsupervised on the scene Foreground Segmentation.\n \n \n \n \n\n\n \n Pardàs, M.; and Canet, G.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 705-709, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"RefinementPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287375,\n  author = {M. {Pardàs} and G. Canet},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Refinement Network for unsupervised on the scene Foreground Segmentation},\n  year = {2020},\n  pages = {705-709},\n  abstract = {In this paper we present a network for foreground segmentation based on background subtraction which does not require specific scene training. The network is built as a refinement step on top of classic state of the art background subtraction systems. In this way, the system combines the possibility to define application oriented specifications as background subtraction systems do, and the highly accurate object segmentation abilities of deep learning systems. The refinement system is based on a semantic segmentation network. The network is trained on a common database and is not fine-tuned for the specific scenes, unlike existing solutions for foreground segmentation based on CNNs. Experiments on available databases show top results among unsupervised methods.},\n  keywords = {Training;Databases;Annotations;Semantics;Object segmentation;Signal processing;Task analysis;Background subtraction;semantic segmentation networks;refinement network},\n  doi = {10.23919/Eusipco47968.2020.9287375},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000705.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we present a network for foreground segmentation based on background subtraction which does not require specific scene training. The network is built as a refinement step on top of classic state of the art background subtraction systems. In this way, the system combines the possibility to define application oriented specifications as background subtraction systems do, and the highly accurate object segmentation abilities of deep learning systems. The refinement system is based on a semantic segmentation network. The network is trained on a common database and is not fine-tuned for the specific scenes, unlike existing solutions for foreground segmentation based on CNNs. Experiments on available databases show top results among unsupervised methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Hierarchical High Capacity Data Hiding in JPEG Crypto-compressed Images.\n \n \n \n \n\n\n \n Puteaux, P.; Wang, Z.; Zhang, X.; and Puech, W.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 725-729, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"HierarchicalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287376,\n  author = {P. Puteaux and Z. Wang and X. Zhang and W. Puech},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Hierarchical High Capacity Data Hiding in JPEG Crypto-compressed Images},\n  year = {2020},\n  pages = {725-729},\n  abstract = {With the fast development of cloud computing, exchanging JPEG compressed images in a secure way has significantly increased. Data hiding in encrypted images (DHEI) is an effective way to embed additional data directly into the encrypted domain. In recent state-of-the-art methods, almost all DHEI processes focused on uncompressed images. Recently, some schemes using data hiding (DH) in JPEG crypto-compressed images have been designed, but most of them are not fully JPEG format compliant. In this paper, we propose a hierarchical high capacity data hiding (HHCDH) approach for JPEG crypto-compressed images. After encrypting every non-null coefficients, they are processed from low to high frequencies. Sign bits that are specific to them are then substituted by bits of a secret message. During the decoding phase, correlations between neighboring blocks are exploited to hierarchically recover the original sign bit values. According to our experiments, we achieve to obtain a high payload value, while preserving a very good quality of the reconstructed JPEG image.},\n  keywords = {Image coding;Transform coding;Signal processing;Decoding;Cryptography;High frequency;Image reconstruction;Signal processing in the encrypted domain;data hiding;crypto-compression;JPEG compression},\n  doi = {10.23919/Eusipco47968.2020.9287376},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000725.pdf},\n}\n\n
\n
\n\n\n
\n With the fast development of cloud computing, exchanging JPEG compressed images in a secure way has significantly increased. Data hiding in encrypted images (DHEI) is an effective way to embed additional data directly into the encrypted domain. In recent state-of-the-art methods, almost all DHEI processes focused on uncompressed images. Recently, some schemes using data hiding (DH) in JPEG crypto-compressed images have been designed, but most of them are not fully JPEG format compliant. In this paper, we propose a hierarchical high capacity data hiding (HHCDH) approach for JPEG crypto-compressed images. After encrypting every non-null coefficients, they are processed from low to high frequencies. Sign bits that are specific to them are then substituted by bits of a secret message. During the decoding phase, correlations between neighboring blocks are exploited to hierarchically recover the original sign bit values. According to our experiments, we achieve to obtain a high payload value, while preserving a very good quality of the reconstructed JPEG image.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fast Source-Room-Receiver Acoustics Modeling.\n \n \n \n \n\n\n \n Luo, Y.; and Kim, W.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 51-55, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"FastPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287377,\n  author = {Y. Luo and W. Kim},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Fast Source-Room-Receiver Acoustics Modeling},\n  year = {2020},\n  pages = {51-55},\n  abstract = {Advances in speaker, room, and device acoustic modeling have given rise to large scale simulations of their spatial-frequency responses suitable for tasks such as rapid hardware prototyping, audio front-end algorithm validation, and back-end data-set augmentation for machine learning. Joint modeling of sources, rooms, and receivers is computationally prohibitive due to the large combinatorial space, coupling between models, and overhead cost of data exchange. To address these issues, we introduce the complex spherical harmonics as a separable set of basis functions for representing each of these models and their first-order interactions. We then present a partitioned frequency-dependent image-source model expanded into the spherical harmonics for efficient impulse response synthesis. Results are validated against real-world measurements.},\n  keywords = {Training;Computational modeling;Harmonic analysis;Acoustic measurements;Data models;Acoustics;Task analysis;Spherical harmonics;image-source;image-receiver;room acoustics;impulse response},\n  doi = {10.23919/Eusipco47968.2020.9287377},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000051.pdf},\n}\n\n
\n
\n\n\n
\n Advances in speaker, room, and device acoustic modeling have given rise to large scale simulations of their spatial-frequency responses suitable for tasks such as rapid hardware prototyping, audio front-end algorithm validation, and back-end data-set augmentation for machine learning. Joint modeling of sources, rooms, and receivers is computationally prohibitive due to the large combinatorial space, coupling between models, and overhead cost of data exchange. To address these issues, we introduce the complex spherical harmonics as a separable set of basis functions for representing each of these models and their first-order interactions. We then present a partitioned frequency-dependent image-source model expanded into the spherical harmonics for efficient impulse response synthesis. Results are validated against real-world measurements.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Phase-difference-based 3-D Source Localization Using a Compact Receiver Configuration.\n \n \n \n \n\n\n \n Chen, H.; Ballal, T.; and Al-Naffouri, T. Y.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 251-255, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Phase-difference-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287378,\n  author = {H. Chen and T. Ballal and T. Y. Al-Naffouri},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Phase-difference-based 3-D Source Localization Using a Compact Receiver Configuration},\n  year = {2020},\n  pages = {251-255},\n  abstract = {Source localization has many important applications, especially in tracking and navigation. Trilateration, triangulation, and multilateration are three widely-used techniques for localization depending on the available information. The main drawbacks of these methods are the requirements of a large number of anchors and an elaborately designed layout. To accomplish the localization task with minimal resources while maintaining reasonable accuracy, we propose a 3-D source localization method with a compact infrastructure (prototype realized with 4 anchors located within an area of 2.5×20 cm2) by utilizing only phase-difference information. The proposed method first estimates the direction-of-arrival (DOA) of the target and then finds the candidate 3-D location along on the DOA by minimizing a cost function. This system is compared to the other two similar setups based on simulations and the experimental tests are carried out using acoustic waves. The results show that the proposed approach can achieve 3-D location error of 2.77 cm for a target at 0.5 m without synchronization between the transmitter and the receivers. The relatively small system size and sufficient location accuracy provide possibilities in controller tracking for virtual reality applications.},\n  keywords = {Solid modeling;Direction-of-arrival estimation;Transmitters;Receivers;Virtual reality;Synchronization;Task analysis},\n  doi = {10.23919/Eusipco47968.2020.9287378},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000251.pdf},\n}\n\n
\n
\n\n\n
\n Source localization has many important applications, especially in tracking and navigation. Trilateration, triangulation, and multilateration are three widely-used techniques for localization depending on the available information. The main drawbacks of these methods are the requirements of a large number of anchors and an elaborately designed layout. To accomplish the localization task with minimal resources while maintaining reasonable accuracy, we propose a 3-D source localization method with a compact infrastructure (prototype realized with 4 anchors located within an area of 2.5×20 cm2) by utilizing only phase-difference information. The proposed method first estimates the direction-of-arrival (DOA) of the target and then finds the candidate 3-D location along on the DOA by minimizing a cost function. This system is compared to the other two similar setups based on simulations and the experimental tests are carried out using acoustic waves. The results show that the proposed approach can achieve 3-D location error of 2.77 cm for a target at 0.5 m without synchronization between the transmitter and the receivers. The relatively small system size and sufficient location accuracy provide possibilities in controller tracking for virtual reality applications.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Online Kernel-Based Nonlinear Neyman-Pearson Classification.\n \n \n \n \n\n\n \n Can, B.; Kerpicci, M.; and Ozkan, H.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1618-1622, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"OnlinePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287379,\n  author = {B. Can and M. Kerpicci and H. Ozkan},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Online Kernel-Based Nonlinear Neyman-Pearson Classification},\n  year = {2020},\n  pages = {1618-1622},\n  abstract = {We propose a novel Neyman-Pearson (NP) classification algorithm, which achieves the maximum detection rate and meanwhile keeps the false alarm rate around a user-specified threshold. The proposed method processes data in an online framework with nonlinear modeling capabilities by transforming the observations into a high dimensional space via the random Fourier features. After this transformation, we use a linear classifier whose parameters are sequentially learned. We emphasize that our algorithm is the first online Neyman-Pearson classifier in the literature, which is suitable for both linearly and nonlinearly separable datasets. In our experiments, we investigate the performance of our algorithm on well-known datasets and observe that the proposed online algorithm successfully learns the nonlinear class separations (by outperforming the linear models) while matching the desired false alarm rate.},\n  keywords = {Signal processing algorithms;Europe;Signal processing;Data models;Classification algorithms;Neyman-Pearson;online learning;nonlinear classification;kernel;random projections;Fourier features;per-ceptron},\n  doi = {10.23919/Eusipco47968.2020.9287379},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001618.pdf},\n}\n\n
\n
\n\n\n
\n We propose a novel Neyman-Pearson (NP) classification algorithm, which achieves the maximum detection rate and meanwhile keeps the false alarm rate around a user-specified threshold. The proposed method processes data in an online framework with nonlinear modeling capabilities by transforming the observations into a high dimensional space via the random Fourier features. After this transformation, we use a linear classifier whose parameters are sequentially learned. We emphasize that our algorithm is the first online Neyman-Pearson classifier in the literature, which is suitable for both linearly and nonlinearly separable datasets. In our experiments, we investigate the performance of our algorithm on well-known datasets and observe that the proposed online algorithm successfully learns the nonlinear class separations (by outperforming the linear models) while matching the desired false alarm rate.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Fast Ray Space Transform for Wave Field Processing using Acoustic Arrays.\n \n \n \n \n\n\n \n Borra, F.; Pezzoli, M.; Comanducci, L.; Bernardini, A.; Antonacci, F.; Tubaro, S.; and Sarti, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 186-190, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287380,\n  author = {F. Borra and M. Pezzoli and L. Comanducci and A. Bernardini and F. Antonacci and S. Tubaro and A. Sarti},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A Fast Ray Space Transform for Wave Field Processing using Acoustic Arrays},\n  year = {2020},\n  pages = {186-190},\n  abstract = {The importance of soundfield imaging techniques is expected to further increase in the next few years thanks to the ever-increasing availability of low-cost sensors such as MEMS microphones. When it comes to processing a relevant number of sensor signals, however, the computational load of space-time processing algorithms easily grows to unmanageable levels. The Ray Space Transform (RST) was recently introduced as a promising tool for soundfield analysis. Given the collection of signals captured by a uniform linear array of microphones, the RST allows us to collect and map the directional components of the acoustic field onto a domain called {"}ray space{"}, where relevant acoustic objects are represented as linear patterns for advanced acoustic analysis and synthesis applications. So far the computational complexity of the RST linearly increases with the number of microphones. In order to alleviate this problem, in this paper we propose an alternative efficient implementation of the RST based on the Non Uniform Fast Fourier Transform.},\n  keywords = {Array signal processing;Signal processing algorithms;Transforms;Parallel processing;Tools;Acoustic arrays;Sensors;Array Processing;Space-time Analysis;Sound- field Imaging;FFT},\n  doi = {10.23919/Eusipco47968.2020.9287380},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000186.pdf},\n}\n\n
\n
\n\n\n
\n The importance of soundfield imaging techniques is expected to further increase in the next few years thanks to the ever-increasing availability of low-cost sensors such as MEMS microphones. When it comes to processing a relevant number of sensor signals, however, the computational load of space-time processing algorithms easily grows to unmanageable levels. The Ray Space Transform (RST) was recently introduced as a promising tool for soundfield analysis. Given the collection of signals captured by a uniform linear array of microphones, the RST allows us to collect and map the directional components of the acoustic field onto a domain called \"ray space\", where relevant acoustic objects are represented as linear patterns for advanced acoustic analysis and synthesis applications. So far the computational complexity of the RST linearly increases with the number of microphones. In order to alleviate this problem, in this paper we propose an alternative efficient implementation of the RST based on the Non Uniform Fast Fourier Transform.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Aircraft Acoustic Signal Modeled as Oscillatory Almost-Cyclostationary Process.\n \n \n \n \n\n\n \n Napolitano, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2368-2372, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AircraftPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287381,\n  author = {A. Napolitano},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Aircraft Acoustic Signal Modeled as Oscillatory Almost-Cyclostationary Process},\n  year = {2020},\n  pages = {2368-2372},\n  abstract = {The acoustic almost-cyclostationary signal emitted by a moving aircraft is modeled as an oscillatory almost-cyclostationary process when it is received by a stationary listener. Its autocorrelation function is constituted by the superposition of angle-modulated sinewaves, where the angle modulation is consequence of the time-varying delay due to the relative motion between aircraft and listener. Conditions under which the source almost-cyclostationary signal can be recovered by the received signal by time de-warping are established. Thus, cyclic features of the source signal carrying information on aircraft parameters are estimated by classical cyclic spectral analysis.},\n  keywords = {Correlation;Atmospheric modeling;Modulation;Acoustics;Delays;Aircraft;Spectral analysis;oscillatory almost-cyclostationary process;Doppler;time-warping;aircraft acoustic signal},\n  doi = {10.23919/Eusipco47968.2020.9287381},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002368.pdf},\n}\n\n
\n
\n\n\n
\n The acoustic almost-cyclostationary signal emitted by a moving aircraft is modeled as an oscillatory almost-cyclostationary process when it is received by a stationary listener. Its autocorrelation function is constituted by the superposition of angle-modulated sinewaves, where the angle modulation is consequence of the time-varying delay due to the relative motion between aircraft and listener. Conditions under which the source almost-cyclostationary signal can be recovered by the received signal by time de-warping are established. Thus, cyclic features of the source signal carrying information on aircraft parameters are estimated by classical cyclic spectral analysis.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Pairwise and Hidden Markov Random Fields in Image Segmentation.\n \n \n \n \n\n\n \n Courbot, J. -.; and Mazet, V.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2458-2462, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287383,\n  author = {J. -B. Courbot and V. Mazet},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Pairwise and Hidden Markov Random Fields in Image Segmentation},\n  year = {2020},\n  pages = {2458-2462},\n  abstract = {The purpose of this paper is to identify the similarities and differences between two image restoration approaches based on Markov field modeling. The first one is the well-known Bayesian approach which models the unknowns with a Markovian prior. In the second approach, as proposed by Pieczynski and Tebbache [1], the pair unknowns–observations as a whole is considered Markovian. The two approaches are compared based on their posterior distribution, synthetic results and real examples, when applied to the segmentation of degraded images.},\n  keywords = {Image segmentation;Hidden Markov models;Europe;Signal processing;Image restoration;Bayes methods;Markov random fields;Bayesian Image Restoration;Pairwise Markov Random Fields;Hidden Markov Random Fields},\n  doi = {10.23919/Eusipco47968.2020.9287383},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002458.pdf},\n}\n\n
\n
\n\n\n
\n The purpose of this paper is to identify the similarities and differences between two image restoration approaches based on Markov field modeling. The first one is the well-known Bayesian approach which models the unknowns with a Markovian prior. In the second approach, as proposed by Pieczynski and Tebbache [1], the pair unknowns–observations as a whole is considered Markovian. The two approaches are compared based on their posterior distribution, synthetic results and real examples, when applied to the segmentation of degraded images.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Assessing Risks of Biases in Cognitive Decision Support Systems.\n \n \n \n \n\n\n \n Lai, K.; Oliveira, H. C. R.; Hou, M.; Yanushkevich, S. N.; and Shmerko, V.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 840-844, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AssessingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287384,\n  author = {K. Lai and H. C. R. Oliveira and M. Hou and S. N. Yanushkevich and V. Shmerko},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Assessing Risks of Biases in Cognitive Decision Support Systems},\n  year = {2020},\n  pages = {840-844},\n  abstract = {Recognizing, assessing, countering, and mitigating the biases of different nature from heterogeneous sources is a critical problem in designing a cognitive Decision Support System (DSS). An example of such a system is a cognitive biometric-enabled security checkpoint. Biased algorithms affect the decision-making process in an unpredictable way, e.g. face recognition for different demographic groups may severely impact the risk assessment at a checkpoint. This paper addresses a challenging research question on how to manage an ensemble of biases? We provide performance projections of the DSS operational landscape in terms of biases. A probabilistic reasoning technique is used for assessment of the risk of such biases. We also provide a motivational experiment using face biometric component of the checkpoint system which highlights the discovery of an ensemble of biases and the techniques to assess their risks.},\n  keywords = {Decision support systems;Face recognition;Signal processing algorithms;Signal processing;Probabilistic logic;Security;Risk management;Ensemble of biases;risk;trust;identity management;computational intelligence},\n  doi = {10.23919/Eusipco47968.2020.9287384},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000840.pdf},\n}\n\n
\n
\n\n\n
\n Recognizing, assessing, countering, and mitigating the biases of different nature from heterogeneous sources is a critical problem in designing a cognitive Decision Support System (DSS). An example of such a system is a cognitive biometric-enabled security checkpoint. Biased algorithms affect the decision-making process in an unpredictable way, e.g. face recognition for different demographic groups may severely impact the risk assessment at a checkpoint. This paper addresses a challenging research question on how to manage an ensemble of biases? We provide performance projections of the DSS operational landscape in terms of biases. A probabilistic reasoning technique is used for assessment of the risk of such biases. We also provide a motivational experiment using face biometric component of the checkpoint system which highlights the discovery of an ensemble of biases and the techniques to assess their risks.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n CinC-GAN for Effective F0 prediction for Whisper-to-Normal Speech Conversion.\n \n \n \n \n\n\n \n Patel, M.; Purohit, M.; Shah, J.; and Patil, H. A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 411-415, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"CinC-GANPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287385,\n  author = {M. Patel and M. Purohit and J. Shah and H. A. Patil},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {CinC-GAN for Effective F0 prediction for Whisper-to-Normal Speech Conversion},\n  year = {2020},\n  pages = {411-415},\n  abstract = {Recently, Generative Adversarial Networks (GAN)based methods have shown remarkable performance for the Voice Conversion and WHiSPer-to-normal SPeeCH (WHSP2SPCH) conversion. One of the key challenges in WHSP2SPCH conversion is the prediction of fundamental frequency (F0). Recently, authors have proposed state-of-the-art method Cycle-Consistent Generative Adversarial Networks (CycleGAN) for WHSP2SPCH conversion. The CycleGAN-based method uses two different models, one for Mel Cepstral Coefficients (MCC) mapping, and another for F0 prediction, where F0 is highly dependent on the pre-trained model of MCC mapping. This leads to additional nonlinear noise in predicted F0. To suppress this noise, we propose Cycle-in-Cycle GAN (i.e., CinC-GAN). It is specially designed to increase the effectiveness in F0 prediction without losing the accuracy of MCC mapping. We evaluated the proposed method on a non-parallel setting and analyzed on speaker-specific, and gender-specific tasks. The objective and subjective tests show that CinC-GAN significantly outperforms the CycleGAN. In addition, we analyze the CycleGAN and CinC-GAN for unseen speakers and the results show the clear superiority of CinC-GAN.},\n  keywords = {Europe;Predictive models;Signal processing;Generative adversarial networks;Frequency conversion;Task analysis;Speech processing;Whisper-to-Normal Speech;Non-parallel;F0 prediction;CycleGAN;CinC-GAN},\n  doi = {10.23919/Eusipco47968.2020.9287385},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000411.pdf},\n}\n\n
\n
\n\n\n
\n Recently, Generative Adversarial Networks (GAN)based methods have shown remarkable performance for the Voice Conversion and WHiSPer-to-normal SPeeCH (WHSP2SPCH) conversion. One of the key challenges in WHSP2SPCH conversion is the prediction of fundamental frequency (F0). Recently, authors have proposed state-of-the-art method Cycle-Consistent Generative Adversarial Networks (CycleGAN) for WHSP2SPCH conversion. The CycleGAN-based method uses two different models, one for Mel Cepstral Coefficients (MCC) mapping, and another for F0 prediction, where F0 is highly dependent on the pre-trained model of MCC mapping. This leads to additional nonlinear noise in predicted F0. To suppress this noise, we propose Cycle-in-Cycle GAN (i.e., CinC-GAN). It is specially designed to increase the effectiveness in F0 prediction without losing the accuracy of MCC mapping. We evaluated the proposed method on a non-parallel setting and analyzed on speaker-specific, and gender-specific tasks. The objective and subjective tests show that CinC-GAN significantly outperforms the CycleGAN. In addition, we analyze the CycleGAN and CinC-GAN for unseen speakers and the results show the clear superiority of CinC-GAN.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n U-Net based Multi-level Texture Suppression for Vessel Segmentation in Low Contrast Regions.\n \n \n \n \n\n\n \n Upadhyay, K.; Agrawal, M.; and Vashist, P.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1304-1308, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"U-NetPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287387,\n  author = {K. Upadhyay and M. Agrawal and P. Vashist},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {U-Net based Multi-level Texture Suppression for Vessel Segmentation in Low Contrast Regions},\n  year = {2020},\n  pages = {1304-1308},\n  abstract = {Segmentation of retinal blood vessels is important for diagnosis of many retinal diseases. Precise segmentation of complete vessel-map is still a challenge in low contrast regions of fundus images. Vessel pixels belonging to these regions, such as, fine vessel-endings and boundaries of vessels, get merged in the neighboring vessel-like texture. This paper proposes a novel retinal vessel segmentation algorithm which handles the background vessel-like texture in a sophisticated manner without harming the vessel pixels. In this work, first we enhance all possible vessel-like features of fundus at different `levels' using 2-D Gabor wavelet and Gaussian matched filtering. At each `level', texture is suppressed using Local Laplacian filter while preserving the vessel edges. The resulting images are combined to produce a maximum response image with enhanced vessels of different thicknesses and suppressed texture. This handcrafted image is used to train the deep U-net model for further suppression of non-vessel pixels. Proposed segmentation method is tested on publicly available DRIVE and STARE databases. The algorithm has produced state-of-the-art results. It has performed outstandingly well in terms of sensitivity measure which is most affected with the correct segmentation of fine vessels and vessel-boundary pixels present in low-contrast regions.},\n  keywords = {biomedical optical imaging;blood vessels;diseases;edge detection;eye;feature extraction;filtering theory;image colour analysis;image enhancement;image filtering;image segmentation;image texture;matched filters;medical image processing;U-net based multilevel texture suppression;low contrast regions;retinal blood vessels;retinal diseases;precise segmentation;complete vessel-map;fundus images;vessel pixels;vessel-endings;neighboring vessel-like texture;retinal vessel segmentation algorithm;background vessel-like texture;vessel edges;maximum response image;enhanced vessels;suppressed texture;nonvessel pixels;segmentation method;correct segmentation;fine vessels;vessel-boundary pixels;low-contrast regions;Image segmentation;Matched filters;Sensitivity;Simulation;Signal processing algorithms;Signal processing;Retinal vessels;segmentation;texture suppression;multiscale;U-net},\n  doi = {10.23919/Eusipco47968.2020.9287387},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001304.pdf},\n}\n\n
\n
\n\n\n
\n Segmentation of retinal blood vessels is important for diagnosis of many retinal diseases. Precise segmentation of complete vessel-map is still a challenge in low contrast regions of fundus images. Vessel pixels belonging to these regions, such as, fine vessel-endings and boundaries of vessels, get merged in the neighboring vessel-like texture. This paper proposes a novel retinal vessel segmentation algorithm which handles the background vessel-like texture in a sophisticated manner without harming the vessel pixels. In this work, first we enhance all possible vessel-like features of fundus at different `levels' using 2-D Gabor wavelet and Gaussian matched filtering. At each `level', texture is suppressed using Local Laplacian filter while preserving the vessel edges. The resulting images are combined to produce a maximum response image with enhanced vessels of different thicknesses and suppressed texture. This handcrafted image is used to train the deep U-net model for further suppression of non-vessel pixels. Proposed segmentation method is tested on publicly available DRIVE and STARE databases. The algorithm has produced state-of-the-art results. It has performed outstandingly well in terms of sensitivity measure which is most affected with the correct segmentation of fine vessels and vessel-boundary pixels present in low-contrast regions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A parallel strategy for an evolutionary stochastic algorithm: application to the CP decomposition of nonnegative N-th order tensors.\n \n \n \n \n\n\n \n Laura, S.; Prissette, C.; Maire, S.; and Thirion-Moreau, N.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1956-1960, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287389,\n  author = {S. Laura and C. Prissette and S. Maire and N. Thirion-Moreau},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A parallel strategy for an evolutionary stochastic algorithm: application to the CP decomposition of nonnegative N-th order tensors},\n  year = {2020},\n  pages = {1956-1960},\n  abstract = {In this article, we address the problem of the Canonical Polyadic decomposition (a.k.a. CP, Candecomp or Parafac decomposition) of N-th order tensors that can be very large. In our case, this decomposition is performed under nonnegativity constraints. While this problem is often tackled thanks to deterministic approaches, we focus here, on a stochastic approach based on a memetic algorithm. It relies on the evolution of a population and a local search stage. The main drawback of such algorithms can be their relative slowness. It is the reason why we suggest and implement a parallel strategy to increase the acceptance rate of the original algorithm and thus to accelerate its convergence speed. Numerical simulations are performed in order to illustrate the effectiveness of our approach on simulated 3D fluorescence spectroscopy tensors.},\n  keywords = {Spectroscopy;Tensors;Three-dimensional displays;Sociology;Signal processing algorithms;Signal processing;Statistics;Tensor decomposition;stochastic optimization;memetic algorithms;parallel strategy},\n  doi = {10.23919/Eusipco47968.2020.9287389},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001956.pdf},\n}\n\n
\n
\n\n\n
\n In this article, we address the problem of the Canonical Polyadic decomposition (a.k.a. CP, Candecomp or Parafac decomposition) of N-th order tensors that can be very large. In our case, this decomposition is performed under nonnegativity constraints. While this problem is often tackled thanks to deterministic approaches, we focus here, on a stochastic approach based on a memetic algorithm. It relies on the evolution of a population and a local search stage. The main drawback of such algorithms can be their relative slowness. It is the reason why we suggest and implement a parallel strategy to increase the acceptance rate of the original algorithm and thus to accelerate its convergence speed. Numerical simulations are performed in order to illustrate the effectiveness of our approach on simulated 3D fluorescence spectroscopy tensors.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Algorithms for Overpredictive Signal Analytics in Federated Learning.\n \n \n \n \n\n\n \n Anavangot, V.; and Kumar, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1502-1506, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AlgorithmsPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287390,\n  author = {V. Anavangot and A. Kumar},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Algorithms for Overpredictive Signal Analytics in Federated Learning},\n  year = {2020},\n  pages = {1502-1506},\n  abstract = {Distributed signal processing will play a major role in performing learning and inference tasks in a client-server model proposed in federated learning. Clients (IoT devices) having many signal samples can aid a data-center (server) in learning the global signal model, by pooling these distributed samples. The clients may have privacy concerns, and the pooling of distributed samples will require accounting of communication cost involved. As a result, a processed approximation of these samples may be desirable. This decentralized learning approach is termed as distributed signal analytics in this work. Overpredictive signal approximations may be desired to perform such distributed signal analytics, which are primarily motivated by applications in network demand (capacity) estimation and planning. In this work, we propose algorithms that calculate an overpredictive signal approximation at the client devices using an efficient convex optimization framework. A tradeoff between the number of bits communicated by clients to the server and the signal approximation error is quantified. An analysis of our approximations is presented on an available residential energy consumption dataset.},\n  keywords = {Performance evaluation;Signal processing algorithms;Signal processing;Collaborative work;Approximation error;Approximation algorithms;Servers;signal reconstruction;approximation methods},\n  doi = {10.23919/Eusipco47968.2020.9287390},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001502.pdf},\n}\n\n
\n
\n\n\n
\n Distributed signal processing will play a major role in performing learning and inference tasks in a client-server model proposed in federated learning. Clients (IoT devices) having many signal samples can aid a data-center (server) in learning the global signal model, by pooling these distributed samples. The clients may have privacy concerns, and the pooling of distributed samples will require accounting of communication cost involved. As a result, a processed approximation of these samples may be desirable. This decentralized learning approach is termed as distributed signal analytics in this work. Overpredictive signal approximations may be desired to perform such distributed signal analytics, which are primarily motivated by applications in network demand (capacity) estimation and planning. In this work, we propose algorithms that calculate an overpredictive signal approximation at the client devices using an efficient convex optimization framework. A tradeoff between the number of bits communicated by clients to the server and the signal approximation error is quantified. An analysis of our approximations is presented on an available residential energy consumption dataset.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Transfer Learning improves MI BCI models classification accuracy in Parkinson's disease patients.\n \n \n \n \n\n\n \n Miladinović, A.; Ajčević, M.; Busan, P.; Jarmolowska, J.; Silveri, G.; Mezzarobba, S.; Battaglini, P. P.; and Accardo, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1353-1356, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"TransferPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287391,\n  author = {A. Miladinović and M. Ajčević and P. Busan and J. Jarmolowska and G. Silveri and S. Mezzarobba and P. P. Battaglini and A. Accardo},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Transfer Learning improves MI BCI models classification accuracy in Parkinson's disease patients},\n  year = {2020},\n  pages = {1353-1356},\n  abstract = {Motor-Imagery based BCI (MI-BCI) neurorehabilitation can improve locomotor ability and reduce the deficit symptoms in Parkinson's Disease patients. Advanced Motor-Imagery BCI methods are needed to overcome the accuracy and time-related MI BCI calibration challenges in such patients. In this study, we proposed a Multi-session FBCSP (msFBCSP) based on inter-session transfer learning and we investigated its performance compared to the single-session based FBSCP. The main result of this study is the significantly improved accuracy obtained by proposed msFBCSP compared to single-session FBCSP in PD patients (median 81.3%, range 41.2-100.0% vs median 61.1%, range 25.0-100.0%, respectively; p<; 0.001). In conclusion, this study proposes a transfer learning-based multi-session based FBCSP approach which allowed to significantly improve calibration accuracy in MI BCI performed on PD patients.},\n  keywords = {brain-computer interfaces;calibration;diseases;electroencephalography;learning (artificial intelligence);medical signal processing;neurophysiology;patient rehabilitation;MI BCI models classification accuracy;Parkinson's disease patients;BCI neurorehabilitation;MI-BCI;deficit symptoms;Multisession FBCSP;inter-session transfer learning;single-session FBCSP;PD patients;transfer learning-based multisession;calibration accuracy;advanced motor-imagery BCI methods;Parkinson's disease;Europe;Signal processing;Brain modeling;Brain-computer interfaces;Calibration;Brain-computer interface;transfer learning;Parkinson's disease;Motor-Imagery Classification},\n  doi = {10.23919/Eusipco47968.2020.9287391},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001353.pdf},\n}\n\n
\n
\n\n\n
\n Motor-Imagery based BCI (MI-BCI) neurorehabilitation can improve locomotor ability and reduce the deficit symptoms in Parkinson's Disease patients. Advanced Motor-Imagery BCI methods are needed to overcome the accuracy and time-related MI BCI calibration challenges in such patients. In this study, we proposed a Multi-session FBCSP (msFBCSP) based on inter-session transfer learning and we investigated its performance compared to the single-session based FBSCP. The main result of this study is the significantly improved accuracy obtained by proposed msFBCSP compared to single-session FBCSP in PD patients (median 81.3%, range 41.2-100.0% vs median 61.1%, range 25.0-100.0%, respectively; p<; 0.001). In conclusion, this study proposes a transfer learning-based multi-session based FBCSP approach which allowed to significantly improve calibration accuracy in MI BCI performed on PD patients.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Dictionary Learning Using Rank-One Projection.\n \n \n \n \n\n\n \n Cheng, C.; and Dai, W.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2030-2034, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DictionaryPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287392,\n  author = {C. Cheng and W. Dai},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Dictionary Learning Using Rank-One Projection},\n  year = {2020},\n  pages = {2030-2034},\n  abstract = {Dictionary learning aims to find a dictionary that can sparsely represent the training data. Methods in the literature typically solve the problem by alternating between two stages: sparse coding and dictionary update. In this paper, we propose a novel dictionary learning algorithm using rank-one projection (ROP). The key contribution is that we cast dictionary learning as an optimization with respect to a single variable which is a set of rank one matrices. The resulting algorithm is hence single-stage. An alternating direction method of multipliers (ADMM) is derived to solve the optimization problem and a lower bound of penalty parameter is computed to guarantee a global convergence despite non-convexity of the optimization formulation. From practical point of view, ROP reduces the number of tuning parameters required in benchmark algorithms. Numerical tests demonstrate that ROP outperforms benchmark algorithms for both synthetic and real data.},\n  keywords = {Dictionaries;Signal processing algorithms;Machine learning;Benchmark testing;Convex functions;Optimization;Tuning;ADMM;dictionary learning;non-convex optimization;rank-one projection;single image super-resolution},\n  doi = {10.23919/Eusipco47968.2020.9287392},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002030.pdf},\n}\n\n
\n
\n\n\n
\n Dictionary learning aims to find a dictionary that can sparsely represent the training data. Methods in the literature typically solve the problem by alternating between two stages: sparse coding and dictionary update. In this paper, we propose a novel dictionary learning algorithm using rank-one projection (ROP). The key contribution is that we cast dictionary learning as an optimization with respect to a single variable which is a set of rank one matrices. The resulting algorithm is hence single-stage. An alternating direction method of multipliers (ADMM) is derived to solve the optimization problem and a lower bound of penalty parameter is computed to guarantee a global convergence despite non-convexity of the optimization formulation. From practical point of view, ROP reduces the number of tuning parameters required in benchmark algorithms. Numerical tests demonstrate that ROP outperforms benchmark algorithms for both synthetic and real data.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Complex FIR Digital Filter Sharpening With Three-Path Structures.\n \n \n \n \n\n\n \n Cain, G. D.; Yardim, A.; and Harris, F. J.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2324-2328, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ComplexPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287393,\n  author = {G. D. Cain and A. Yardim and F. J. Harris},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Complex FIR Digital Filter Sharpening With Three-Path Structures},\n  year = {2020},\n  pages = {2324-2328},\n  abstract = {The Kaiser & Hamming two-path sharpening structure has long been the primary processing arrangement for using a filter’s coefficient set to improve its own performance. Here we study the three-path structure version they proposed, and also introduce a new such structure that makes use of prototype pairing. This expansion, in which we make use of conjugate-reversal of the coefficient set, brings about greatly increased sharpening applicability. Our new structure joins two recently-introduced two-path structures in comparison exercises here. Three of these four offer the option of linear-phase sharpened resultants and comprise what we believe to be the most versatile FIR sharpeners available for effective flattening of two-level gain plateaus in multiband filtering scenarios. All four are compared in complex-valued impulse response example settings, and we arrive at clear ranking of their merits.},\n  keywords = {Finite impulse response filters;Shape;Filtering;Prototypes;Signal processing;Delays;Passband;filter sharpening;complex FIR filters;frequency response;sharpening polynomials},\n  doi = {10.23919/Eusipco47968.2020.9287393},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002324.pdf},\n}\n\n
\n
\n\n\n
\n The Kaiser & Hamming two-path sharpening structure has long been the primary processing arrangement for using a filter’s coefficient set to improve its own performance. Here we study the three-path structure version they proposed, and also introduce a new such structure that makes use of prototype pairing. This expansion, in which we make use of conjugate-reversal of the coefficient set, brings about greatly increased sharpening applicability. Our new structure joins two recently-introduced two-path structures in comparison exercises here. Three of these four offer the option of linear-phase sharpened resultants and comprise what we believe to be the most versatile FIR sharpeners available for effective flattening of two-level gain plateaus in multiband filtering scenarios. All four are compared in complex-valued impulse response example settings, and we arrive at clear ranking of their merits.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On the Deterministic Estimaton of Multiscale Permutation Entropy of High-Order Autoregressive-Moving-Average Processes as a Function of ARMA Parameters.\n \n \n \n \n\n\n \n Dávalos, A.; Jabloun, M.; Ravier, P.; and Buttelli, O.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2225-2229, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287394,\n  author = {A. Dávalos and M. Jabloun and P. Ravier and O. Buttelli},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {On the Deterministic Estimaton of Multiscale Permutation Entropy of High-Order Autoregressive-Moving-Average Processes as a Function of ARMA Parameters},\n  year = {2020},\n  pages = {2225-2229},\n  abstract = {Multiscale Permutation Entropy (MPE) is one of the most common techniques to assess the ordinal information content within a time series. In the present paper we propose an explicit, deterministic function of the MPE of a general ARMA process, as a function only of its parameters and time scale. We compare our theoretical results with the MPE of corresponding simulated signals, which further support our formulation. We also present an exploration of the effects of the ARMA parameters on the MPE curve, where we found a monotonic decrease of entropy for long-term time scales, and highly non-linear effects on short scales. With these results, we aim to provide a benchmark for the MPE of any real time series modelled as an ARMA process.},\n  keywords = {autoregressive moving average processes;entropy;time series;deterministic estimation;ARMA parameters;multiscale permutation entropy;ordinal information content;time series;explicit function;deterministic function;general ARMA process;time scale;corresponding simulated signals;MPE curve;long-term time scales;high-order autoregressive-moving-average process;Time series analysis;Benchmark testing;Signal processing;Entropy;Real-time systems;Probability distribution;Mathematical model;Multiscale Permutation Entropy;ARMA Process;Coarse-graining},\n  doi = {10.23919/Eusipco47968.2020.9287394},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002225.pdf},\n}\n\n
\n
\n\n\n
\n Multiscale Permutation Entropy (MPE) is one of the most common techniques to assess the ordinal information content within a time series. In the present paper we propose an explicit, deterministic function of the MPE of a general ARMA process, as a function only of its parameters and time scale. We compare our theoretical results with the MPE of corresponding simulated signals, which further support our formulation. We also present an exploration of the effects of the ARMA parameters on the MPE curve, where we found a monotonic decrease of entropy for long-term time scales, and highly non-linear effects on short scales. With these results, we aim to provide a benchmark for the MPE of any real time series modelled as an ARMA process.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Residential Energy Consumption Prediction Using Inter-Household Energy Data and Socioeconomic Information.\n \n \n \n \n\n\n \n Schirmer, P. A.; Geiger, C.; and Mporas, I.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1595-1599, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ResidentialPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287395,\n  author = {P. A. Schirmer and C. Geiger and I. Mporas},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Residential Energy Consumption Prediction Using Inter-Household Energy Data and Socioeconomic Information},\n  year = {2020},\n  pages = {1595-1599},\n  abstract = {Previous studies have shown that residential energy consumption prediction accuracy can be improved when households energy data are fused with residents’ socioeconomic information. In this article we propose an architecture for the prediction of residential energy consumption using past energy consumption from other/neighboring households in combination with socioeconomic information of the corresponding residents. The architecture is based on a Long Short Term Memory model and was evaluated using a large-scale dataset monitoring households of London. The proposed approach significantly improves the accuracy of the energy consumption predictor reducing the mean absolute error up to 25.2% with prediction error rate equal to 5.4%.},\n  keywords = {Energy consumption;Signal processing;Predictive models;Market research;History;Monitoring;Load modeling;Energy Consumption Prediction;Load Forecasting;Socioeconomic Features},\n  doi = {10.23919/Eusipco47968.2020.9287395},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001595.pdf},\n}\n\n
\n
\n\n\n
\n Previous studies have shown that residential energy consumption prediction accuracy can be improved when households energy data are fused with residents’ socioeconomic information. In this article we propose an architecture for the prediction of residential energy consumption using past energy consumption from other/neighboring households in combination with socioeconomic information of the corresponding residents. The architecture is based on a Long Short Term Memory model and was evaluated using a large-scale dataset monitoring households of London. The proposed approach significantly improves the accuracy of the energy consumption predictor reducing the mean absolute error up to 25.2% with prediction error rate equal to 5.4%.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The Smoothed Reassigned Spectrogram for Robust Energy Estimation.\n \n \n \n \n\n\n \n Månsson, E. M.; and Sandsten, M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2210-2214, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287396,\n  author = {E. M. Månsson and M. Sandsten},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {The Smoothed Reassigned Spectrogram for Robust Energy Estimation},\n  year = {2020},\n  pages = {2210-2214},\n  abstract = {The matched window reassigned spectrogram relocates all signal energy of an oscillating transient to the time-and frequency locations, resulting in a sharp peak in the time-frequency plane. However, previous research has shown that the method may result in split energy peaks for close components and in high noise levels, and the peak energy is then erroneously estimated. With use of novel knowledge on the statistics when subjected to noise, we propose a novel method, the smoothed reassigned spectrogram, for obtaining a stable and accurate measure of the signal energy from the peak value, with retained resolution properties. We also suggest a simple set of rules to enhance the reassigned spectrogram and speed up its calculation. Simulations are performed to verify the accuracy and an application example on radar data is shown.},\n  keywords = {Time-frequency analysis;Radar applications;Noise measurement;Kernel;Transient analysis;Spectrogram;Signal resolution;oscillating transient;time-frequency reassignment;reassignment vector statistics;smoothing kernels},\n  doi = {10.23919/Eusipco47968.2020.9287396},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002210.pdf},\n}\n\n
\n
\n\n\n
\n The matched window reassigned spectrogram relocates all signal energy of an oscillating transient to the time-and frequency locations, resulting in a sharp peak in the time-frequency plane. However, previous research has shown that the method may result in split energy peaks for close components and in high noise levels, and the peak energy is then erroneously estimated. With use of novel knowledge on the statistics when subjected to noise, we propose a novel method, the smoothed reassigned spectrogram, for obtaining a stable and accurate measure of the signal energy from the peak value, with retained resolution properties. We also suggest a simple set of rules to enhance the reassigned spectrogram and speed up its calculation. Simulations are performed to verify the accuracy and an application example on radar data is shown.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Organ-Based Chronological Age Estimation Based on 3D MRI Scans.\n \n \n \n \n\n\n \n Armanious, K.; Abdulatif, S.; Bhaktharaguttu, A. R.; Küstner, T.; Hepp, T.; Gatidis, S.; and Yang, B.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1225-1228, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Organ-BasedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287398,\n  author = {K. Armanious and S. Abdulatif and A. R. Bhaktharaguttu and T. Küstner and T. Hepp and S. Gatidis and B. Yang},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Organ-Based Chronological Age Estimation Based on 3D MRI Scans},\n  year = {2020},\n  pages = {1225-1228},\n  abstract = {Individuals age differently depending on a multitude of different factors such as lifestyle, medical history and genetics. Often, the global chronological age is not indicative of the true ageing process. An organ-based age estimation would yield a more accurate health state assessment. In this work, we propose a new deep learning architecture for organ-based age estimation based on magnetic resonance images (MRI). The proposed network is a 3D convolutional neural network (CNN) with increased depth and width made possible by the hybrid utilization of inception and fire modules. We apply the proposed framework for the tasks of brain and knee age estimation. Quantitative comparisons against concurrent MR-based regression networks and different 2D and 3D data feeding strategies illustrated the superior performance of the proposed work.},\n  keywords = {biomedical MRI;brain;learning (artificial intelligence);medical image processing;neural nets;regression analysis;ageing process;global chronological age;individuals age;organ-based chronological age estimation;concurrent MR-based regression networks;knee age estimation;brain;3D convolutional neural network;organ-based age estimation;Three-dimensional displays;Magnetic resonance imaging;Two dimensional displays;Estimation;Computer architecture;Signal processing;Task analysis;Age estimation;deep learning;magnetic resonance imaging;convolutional neural networks},\n  doi = {10.23919/Eusipco47968.2020.9287398},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001225.pdf},\n}\n\n
\n
\n\n\n
\n Individuals age differently depending on a multitude of different factors such as lifestyle, medical history and genetics. Often, the global chronological age is not indicative of the true ageing process. An organ-based age estimation would yield a more accurate health state assessment. In this work, we propose a new deep learning architecture for organ-based age estimation based on magnetic resonance images (MRI). The proposed network is a 3D convolutional neural network (CNN) with increased depth and width made possible by the hybrid utilization of inception and fire modules. We apply the proposed framework for the tasks of brain and knee age estimation. Quantitative comparisons against concurrent MR-based regression networks and different 2D and 3D data feeding strategies illustrated the superior performance of the proposed work.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n GPX-ADMM-Net: ADMM-based Neural Network with Generalized Proximal Operator.\n \n \n \n \n\n\n \n Hu, S. -.; Lin, G. -.; and Lu, C. -.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2055-2059, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"GPX-ADMM-Net:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287399,\n  author = {S. -W. Hu and G. -X. Lin and C. -S. Lu},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {GPX-ADMM-Net: ADMM-based Neural Network with Generalized Proximal Operator},\n  year = {2020},\n  pages = {2055-2059},\n  abstract = {In this paper, we propose a highly efficient and well interpretable deep learning solver, called Generalized ProXimal ADMM-Net (GPX-ADMM-Net), for the linear inverse problem, which is conventionally solved with intensive computations.GPX-ADMM-Net is characterized by the generalized proximal operator, convolutional dictionary, and modified loss function. Without loss of interpretability, GPX-ADMM-Net only needs a small number of parameters in a learning model to retain elegant reconstruction quality.Different from traditional optimization methods, all the parameters of GPX-ADMM-Net need no more hand-crafted but determined by learning strategies. Furthermore, unlike other deep learning-based methods, GPX-ADMM-Net is able to adapt to various measurement rates with only one single set of training parameters. Extensive experimental results further demonstrate the advantages of our proposed method.},\n  keywords = {Deep learning;Training;Learning systems;Neural networks;Signal processing algorithms;Optimization methods;Periodic structures;ADMM;Convolution neural network;Deep learning;Linear inverse problem;Proximal operator},\n  doi = {10.23919/Eusipco47968.2020.9287399},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002055.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a highly efficient and well interpretable deep learning solver, called Generalized ProXimal ADMM-Net (GPX-ADMM-Net), for the linear inverse problem, which is conventionally solved with intensive computations.GPX-ADMM-Net is characterized by the generalized proximal operator, convolutional dictionary, and modified loss function. Without loss of interpretability, GPX-ADMM-Net only needs a small number of parameters in a learning model to retain elegant reconstruction quality.Different from traditional optimization methods, all the parameters of GPX-ADMM-Net need no more hand-crafted but determined by learning strategies. Furthermore, unlike other deep learning-based methods, GPX-ADMM-Net is able to adapt to various measurement rates with only one single set of training parameters. Extensive experimental results further demonstrate the advantages of our proposed method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Inexact Proximal Conjugate Subgradient Algorithm for fMRI Data Completion.\n \n \n \n \n\n\n \n Belyaeva, I.; Long, Q.; and Adali, T.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1025-1029, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"InexactPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287401,\n  author = {I. Belyaeva and Q. Long and T. Adali},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Inexact Proximal Conjugate Subgradient Algorithm for fMRI Data Completion},\n  year = {2020},\n  pages = {1025-1029},\n  abstract = {Tensor representations have proven useful for many problems, including data completion. A promising application for tensor completion is functional magnetic resonance imaging (fMRI) data that has an inherent four-dimensional (4D) structure and is prone to missing voxels and regions due to issues in acquisition. A key component of successful tensor completion is a rank estimation. While widely used as a convex relaxation of the tensor rank, tensor nuclear norm (TNN) imposes strong low-rank constraints on all tensor modes to be simultaneously low-rank and often leads to suboptimal solutions. We propose a novel tensor completion model in tensor train (TT) format with a proximal conjugate subgradient (PCS-TT) method for solving the nonconvex rank minimization problem by using properties of Moreau's decomposition. PCS-TT allows the use of a wide range of robust estimators and can be used for data completion and sparse signal recovery problems. We present experimental results for data completion in fMRI, where PCS-TT demonstrates significant improvements compared with competing methods. In addition, we present results that demonstrate the advantages of considering the 4D structure of the fMRI data. as opposed to using three- and two-dimensional representations that have dominated the work on fMRI analysis.},\n  keywords = {biomedical MRI;concave programming;convex programming;gradient methods;matrix algebra;medical image processing;minimisation;tensors;inexact proximal conjugate subgradient algorithm;fMRI data completion;tensor representations;functional magnetic resonance imaging data;four-dimensional structure;low-rank constraints;tensor modes;novel tensor completion model;tensor train format;PCS-TT;nonconvex rank minimization problem;sparse signal recovery problems;Moreau decomposition;Tensors;Three-dimensional displays;Signal processing algorithms;Functional magnetic resonance imaging;Signal processing;Minimization;Spatiotemporal phenomena;Tensor completion;Tensor train decomposition;fMRI missing data completion},\n  doi = {10.23919/Eusipco47968.2020.9287401},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001025.pdf},\n}\n\n
\n
\n\n\n
\n Tensor representations have proven useful for many problems, including data completion. A promising application for tensor completion is functional magnetic resonance imaging (fMRI) data that has an inherent four-dimensional (4D) structure and is prone to missing voxels and regions due to issues in acquisition. A key component of successful tensor completion is a rank estimation. While widely used as a convex relaxation of the tensor rank, tensor nuclear norm (TNN) imposes strong low-rank constraints on all tensor modes to be simultaneously low-rank and often leads to suboptimal solutions. We propose a novel tensor completion model in tensor train (TT) format with a proximal conjugate subgradient (PCS-TT) method for solving the nonconvex rank minimization problem by using properties of Moreau's decomposition. PCS-TT allows the use of a wide range of robust estimators and can be used for data completion and sparse signal recovery problems. We present experimental results for data completion in fMRI, where PCS-TT demonstrates significant improvements compared with competing methods. In addition, we present results that demonstrate the advantages of considering the 4D structure of the fMRI data. as opposed to using three- and two-dimensional representations that have dominated the work on fMRI analysis.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n SORN-based Cascade Support Vector Machine.\n \n \n \n \n\n\n \n Hülsmeier, N.; Bärthel, M.; Rust, J.; and Paul, S.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1507-1511, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"SORN-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287402,\n  author = {N. Hülsmeier and M. Bärthel and J. Rust and S. Paul},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {SORN-based Cascade Support Vector Machine},\n  year = {2020},\n  pages = {1507-1511},\n  abstract = {This paper presents a SORN-based cascade support vector machine (SVM). For the first time a SVM is implemented by the usage of SORNs (Set Of Real Numbers). The SORN representation is a dual number representation that uses a fixed set of exact values and open intervals. Arithmetic operations in SORN representation can be realized by lookup tables which allows fast and low-complexity computing. This arithmetic is used for the non-support vector filtering in the early stages of a cascade SVM. Once the training subsets passed the bottom layer of the cascade, the optimization on the remaining support vectors can be done in classic representations like floating point or fixed point.},\n  keywords = {Support vector machines;Training;Filtering;Europe;Signal processing;Hardware;Optimization;support vector machine;SVM;unum;SORN;machine learning},\n  doi = {10.23919/Eusipco47968.2020.9287402},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001507.pdf},\n}\n\n
\n
\n\n\n
\n This paper presents a SORN-based cascade support vector machine (SVM). For the first time a SVM is implemented by the usage of SORNs (Set Of Real Numbers). The SORN representation is a dual number representation that uses a fixed set of exact values and open intervals. Arithmetic operations in SORN representation can be realized by lookup tables which allows fast and low-complexity computing. This arithmetic is used for the non-support vector filtering in the early stages of a cascade SVM. Once the training subsets passed the bottom layer of the cascade, the optimization on the remaining support vectors can be done in classic representations like floating point or fixed point.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Capturing and Explaining Trajectory Singularities using Composite Signal Neural Networks.\n \n \n \n \n\n\n \n Dubois, H.; Le Callet, P.; Hornberger, M.; Spiers, H. J.; and Coutrot, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1422-1426, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"CapturingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287403,\n  author = {H. Dubois and P. {Le Callet} and M. Hornberger and H. J. Spiers and A. Coutrot},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Capturing and Explaining Trajectory Singularities using Composite Signal Neural Networks},\n  year = {2020},\n  pages = {1422-1426},\n  abstract = {Spatial trajectories are ubiquitous and complex signals. Their analysis is crucial in many research fields, from urban planning to neuroscience. Several approaches have been proposed to cluster trajectories. They rely on hand-crafted features, which struggle to capture the spatio-temporal complexity of the signal, or on Artificial Neural Networks (ANNs) which can be more efficient but less interpretable. In this paper we present a novel ANN architecture designed to capture the spatio-temporal patterns characteristic of a set of trajectories, while taking into account the demographics of the navigators. Hence, our model extracts markers linked to both behaviour and demographics. We propose a composite signal analyser (CompSNN) combining three simple ANN modules. Each of these modules uses different signal representations of the trajectory while remaining interpretable. Our CompSNN performs significantly better than its modules taken in isolation and allows to visualise which parts of the signal were most useful to discriminate the trajectories.},\n  keywords = {Visualization;Navigation;Urban planning;Time series analysis;Artificial neural networks;Trajectory;Signal representation;graph signal processing;neural network;cnn;gcnn;explainability;trajectory;pattern analysis},\n  doi = {10.23919/Eusipco47968.2020.9287403},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001422.pdf},\n}\n\n
\n
\n\n\n
\n Spatial trajectories are ubiquitous and complex signals. Their analysis is crucial in many research fields, from urban planning to neuroscience. Several approaches have been proposed to cluster trajectories. They rely on hand-crafted features, which struggle to capture the spatio-temporal complexity of the signal, or on Artificial Neural Networks (ANNs) which can be more efficient but less interpretable. In this paper we present a novel ANN architecture designed to capture the spatio-temporal patterns characteristic of a set of trajectories, while taking into account the demographics of the navigators. Hence, our model extracts markers linked to both behaviour and demographics. We propose a composite signal analyser (CompSNN) combining three simple ANN modules. Each of these modules uses different signal representations of the trajectory while remaining interpretable. Our CompSNN performs significantly better than its modules taken in isolation and allows to visualise which parts of the signal were most useful to discriminate the trajectories.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Tensor Recovery via Nonconvex Low-Rank Approximation.\n \n \n \n \n\n\n \n Chen, L.; Jiang, X.; Liu, X.; and Zhou, Z.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 710-714, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"TensorPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287404,\n  author = {L. Chen and X. Jiang and X. Liu and Z. Zhou},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Tensor Recovery via Nonconvex Low-Rank Approximation},\n  year = {2020},\n  pages = {710-714},\n  abstract = {The low-rank tensor recovery is a powerful approach to depict the intrinsic structure within high-dimensional data, and has been extensively leveraged in many real-world applications. Conventional techniques of low-rank recovery formulate it as a rank minimization problem, then approximate the rank function with the convex relaxation. In this paper, we propose a new tensor logarithmic norm as the nonconvex rank surrogate. Compared with the convex surrogate of nuclear norm, the proposed logarithmic norm is proved to be a tighter approximation to the tensor average rank, and thus is more sparsity-encouraging to extract the underlying low-rank information. Although minimizing the logarithmic norm leads to a nonconvex optimization problem, we rigorously derive its closed-form solution with the guarantee of local optimality. Experimental results demonstrate the strong convergence behavior of the proposed algorithm. In the real-world application of video recovery, our method outperforms several state-of-the-art methods and shows the remarkable recovery accuracy.},\n  keywords = {Tensors;Signal processing algorithms;Europe;Signal processing;Minimization;Robustness;Optimization;low-rank;tensor recovery;nonconvex approximation;augmented Lagrange multiplier},\n  doi = {10.23919/Eusipco47968.2020.9287404},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000710.pdf},\n}\n\n
\n
\n\n\n
\n The low-rank tensor recovery is a powerful approach to depict the intrinsic structure within high-dimensional data, and has been extensively leveraged in many real-world applications. Conventional techniques of low-rank recovery formulate it as a rank minimization problem, then approximate the rank function with the convex relaxation. In this paper, we propose a new tensor logarithmic norm as the nonconvex rank surrogate. Compared with the convex surrogate of nuclear norm, the proposed logarithmic norm is proved to be a tighter approximation to the tensor average rank, and thus is more sparsity-encouraging to extract the underlying low-rank information. Although minimizing the logarithmic norm leads to a nonconvex optimization problem, we rigorously derive its closed-form solution with the guarantee of local optimality. Experimental results demonstrate the strong convergence behavior of the proposed algorithm. In the real-world application of video recovery, our method outperforms several state-of-the-art methods and shows the remarkable recovery accuracy.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Testing different methodologies for Granger causality estimation: A simulation study.\n \n \n \n \n\n\n \n Antonacci, Y.; Astolfi, L.; and Faes, L.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 940-944, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"TestingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287405,\n  author = {Y. Antonacci and L. Astolfi and L. Faes},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Testing different methodologies for Granger causality estimation: A simulation study},\n  year = {2020},\n  pages = {940-944},\n  abstract = {Granger causality (GC) is a method for determining whether and how two time series exert causal influences one over the other. As it is easy to implement through vector autoregressive (VAR) models and can be generalized to the multivariate case, GC has spread in many different areas of research such as neuroscience and network physiology. In its basic formulation, the computation of GC involves two different regressions, taking respectively into account the whole past history of the investigated multivariate time series (full model) and the past of all time series except the putatively causal time series (restricted model). However, the restricted model cannot be represented through a finite order VAR process and, when few data samples are available or the number of time series is very high, the estimation of GC exhibits a strong reduction in accuracy. To mitigate these problems, improved estimation strategies have been recently implemented, including state space (SS) models and partial conditioning (PC) approaches. In this work, we propose a new method to compute GC which combines SS and PC and tests it together with other four commonly used estimation approaches. In simulated networks of linearly interacting time series, we show the possibility to reconstruct the network structure even in challenging conditions of data samples available.},\n  keywords = {autoregressive processes;causality;time series;vectors;Granger causality estimation;time series exert;vector autoregressive models;multivariate case;neuroscience;network physiology;investigated multivariate time series;putatively causal time series;restricted model;finite order VAR process;data samples;linearly interacting time series;commonly used estimation approaches;state space models;improved estimation strategies;Reactive power;Computational modeling;Time series analysis;Estimation;Signal processing;Physiology;Testing;Granger Causality;Dynamical Networks;Vector Autoregressive Processes;Multivariate Time Series},\n  doi = {10.23919/Eusipco47968.2020.9287405},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000940.pdf},\n}\n\n
\n
\n\n\n
\n Granger causality (GC) is a method for determining whether and how two time series exert causal influences one over the other. As it is easy to implement through vector autoregressive (VAR) models and can be generalized to the multivariate case, GC has spread in many different areas of research such as neuroscience and network physiology. In its basic formulation, the computation of GC involves two different regressions, taking respectively into account the whole past history of the investigated multivariate time series (full model) and the past of all time series except the putatively causal time series (restricted model). However, the restricted model cannot be represented through a finite order VAR process and, when few data samples are available or the number of time series is very high, the estimation of GC exhibits a strong reduction in accuracy. To mitigate these problems, improved estimation strategies have been recently implemented, including state space (SS) models and partial conditioning (PC) approaches. In this work, we propose a new method to compute GC which combines SS and PC and tests it together with other four commonly used estimation approaches. In simulated networks of linearly interacting time series, we show the possibility to reconstruct the network structure even in challenging conditions of data samples available.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Model and learning-based computational 3D phase microscopy with intensity diffraction tomography.\n \n \n \n \n\n\n \n Matlock, A.; Xue, Y.; Li, Y.; Cheng, S.; Tahir, W.; and Tian, L.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 760-764, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ModelPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287407,\n  author = {A. Matlock and Y. Xue and Y. Li and S. Cheng and W. Tahir and L. Tian},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Model and learning-based computational 3D phase microscopy with intensity diffraction tomography},\n  year = {2020},\n  pages = {760-764},\n  abstract = {Intensity Diffraction Tomography (IDT) is a new computational microscopy technique providing quantitative, volumetric, large field-of-view (FOV) phase imaging of biological samples. This approach uses computationally efficient inverse scattering models to recover 3D phase volumes of weakly scattering objects from intensity measurements taken under diverse illumination at a single focal plane. IDT is easily implemented in a standard microscope equipped with an LED array source and requires no exogenous contrast agents, making the technology widely accessible for biological research.Here, we discuss model and learning-based approaches for complex 3D object recovery with IDT. We present two model-based computational illumination strategies, multiplexed IDT (mIDT) [1] and annular IDT (aIDT) [2], that achieve high-throughput quantitative 3D object phase recovery at hardware-limited 4Hz and 10Hz volume rates, respectively. We illustrate these techniques on living epithelial buccal cells and Caenorhabditis elegans worms. For strong scattering object recovery with IDT, we present an uncertainty quantification framework for assessing the reliability of deep learning-based phase recovery methods [3]. This framework provides per-pixel evaluation of a neural network predictions confidence level, allowing for efficient and reliable complex object recovery. This uncertainty learning framework is widely applicable for reliable deep learning-based biomedical imaging techniques and shows significant potential for IDT.},\n  keywords = {biological tissues;biomedical optical imaging;cellular biophysics;image reconstruction;image resolution;learning (artificial intelligence);medical image processing;optical microscopy;optical tomography;reliable complex object recovery;uncertainty learning framework;reliable deep learning-based biomedical imaging techniques;learning-based computational 3D phase microscopy;intensity diffraction tomography;computational microscopy technique;field-of-view phase imaging;biological samples;computationally efficient inverse;3D phase volumes;weakly scattering objects;intensity measurements;diverse illumination;single focal plane;LED array source;exogenous contrast agents;biological research;complex 3D object recovery;model-based computational illumination strategies;multiplexed IDT;quantitative 3D object phase recovery;strong scattering object recovery;deep learning-based phase recovery methods;efficient object recovery;frequency 10.0 Hz;frequency 4.0 Hz;Solid modeling;Three-dimensional displays;Uncertainty;Computational modeling;Biological system modeling;Microscopy;Reliability;Tomography;High Volume-Rate Imaging;Physics-Based Learning;Uncertainty Learning;Computational Imaging},\n  doi = {10.23919/Eusipco47968.2020.9287407},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000760.pdf},\n}\n\n
\n
\n\n\n
\n Intensity Diffraction Tomography (IDT) is a new computational microscopy technique providing quantitative, volumetric, large field-of-view (FOV) phase imaging of biological samples. This approach uses computationally efficient inverse scattering models to recover 3D phase volumes of weakly scattering objects from intensity measurements taken under diverse illumination at a single focal plane. IDT is easily implemented in a standard microscope equipped with an LED array source and requires no exogenous contrast agents, making the technology widely accessible for biological research.Here, we discuss model and learning-based approaches for complex 3D object recovery with IDT. We present two model-based computational illumination strategies, multiplexed IDT (mIDT) [1] and annular IDT (aIDT) [2], that achieve high-throughput quantitative 3D object phase recovery at hardware-limited 4Hz and 10Hz volume rates, respectively. We illustrate these techniques on living epithelial buccal cells and Caenorhabditis elegans worms. For strong scattering object recovery with IDT, we present an uncertainty quantification framework for assessing the reliability of deep learning-based phase recovery methods [3]. This framework provides per-pixel evaluation of a neural network predictions confidence level, allowing for efficient and reliable complex object recovery. This uncertainty learning framework is widely applicable for reliable deep learning-based biomedical imaging techniques and shows significant potential for IDT.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n DNN Classification Model-based Speech Enhancement Using Mask Selection Technique.\n \n \n \n \n\n\n \n Lee, B. -.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 436-440, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DNNPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287410,\n  author = {B. -K. Lee},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {DNN Classification Model-based Speech Enhancement Using Mask Selection Technique},\n  year = {2020},\n  pages = {436-440},\n  abstract = {This paper presents a speech enhancement algorithm using a DNN classification model combined with noise classification-based ensemble. Although various single-channel speech enhancement algorithms based on deep learning have been recently developed, since it is optimized for reducing the mean square error, it can not accurately estimate the actual target values in a regression task, resulting in muffled enhanced speech. Therefore, this paper proposes the DNN classification-based single-channel speech enhancement algorithm to overcome disadvantages of the existing DNN regression-based speech enhancement algorithms. To replace the DNN regression task into the classification task, gain mask templates are predefined using k-means clustering among the gain masks. The input feature vector extracted from the microphone input signal is fed into the DNN’s input and then an optimal gain mask is selected from the gain mask templates. Furthermore, we define the gain mask templates for each noise environment using the DNN-based noise classification to cover various noise environments and use an ensemble structure based on a probability of the noise classification stage.},\n  keywords = {Simulation;Signal processing algorithms;Clustering algorithms;Speech enhancement;Feature extraction;Classification algorithms;Task analysis;speech enhancement;deep learning;classification;ideal ratio mask;log power spectrum;ensemble},\n  doi = {10.23919/Eusipco47968.2020.9287410},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000436.pdf},\n}\n\n
\n
\n\n\n
\n This paper presents a speech enhancement algorithm using a DNN classification model combined with noise classification-based ensemble. Although various single-channel speech enhancement algorithms based on deep learning have been recently developed, since it is optimized for reducing the mean square error, it can not accurately estimate the actual target values in a regression task, resulting in muffled enhanced speech. Therefore, this paper proposes the DNN classification-based single-channel speech enhancement algorithm to overcome disadvantages of the existing DNN regression-based speech enhancement algorithms. To replace the DNN regression task into the classification task, gain mask templates are predefined using k-means clustering among the gain masks. The input feature vector extracted from the microphone input signal is fed into the DNN’s input and then an optimal gain mask is selected from the gain mask templates. Furthermore, we define the gain mask templates for each noise environment using the DNN-based noise classification to cover various noise environments and use an ensemble structure based on a probability of the noise classification stage.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Tensor Decomposition Based DOA Estimation for Transmit Beamspace MIMO Radar.\n \n \n \n \n\n\n \n Xu, F.; Morency, M. W.; and Vorobyov, S. A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1871-1875, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"TensorPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287411,\n  author = {F. Xu and M. W. Morency and S. A. Vorobyov},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Tensor Decomposition Based DOA Estimation for Transmit Beamspace MIMO Radar},\n  year = {2020},\n  pages = {1871-1875},\n  abstract = {The detection and localization of multiple targets is a fundamental research area for multiple input multiple output (MIMO) radar. In many civilian applications of MIMO technology, for example, automotive radar, high resolution direction of arrival (DOA) estimation is required. In this paper, a novel DOA estimation algorithm based on tensor decomposition is proposed for collocated transmit beamspace MIMO radar. First, we introduce the flipped-conjugate version of the transmit beamspace matrix, which focuses the transmit energy into fixed region. This can increase the signal to noise ratio (SNR) of targets. Then we reshape the received data into a tensor form, the structure of which provides the estimations of the transmit and receive steering matrices. The alternating least squares (ALS) algorithm is applied to find the tensor components. The DOA estimation is conducted in transmitters via the rotational invariance property achieved by beamspace matrix. It is proved that at most M−2 grating lobes exist during the process of DOA estimation, where M is the number of the transmitters. These grating lobes can be eliminated by finite trials of spectrum search. The performance of our proposed DOA estimation method surpasses several conventional algorithms in terms of accuracy and resolution.},\n  keywords = {Direction-of-arrival estimation;Tensors;MIMO radar;Estimation;Signal processing algorithms;Gratings;Signal to noise ratio;Collocated MIMO radar;DOA estimation;Grating lobes;Localization;Tensor decomposition},\n  doi = {10.23919/Eusipco47968.2020.9287411},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001871.pdf},\n}\n\n
\n
\n\n\n
\n The detection and localization of multiple targets is a fundamental research area for multiple input multiple output (MIMO) radar. In many civilian applications of MIMO technology, for example, automotive radar, high resolution direction of arrival (DOA) estimation is required. In this paper, a novel DOA estimation algorithm based on tensor decomposition is proposed for collocated transmit beamspace MIMO radar. First, we introduce the flipped-conjugate version of the transmit beamspace matrix, which focuses the transmit energy into fixed region. This can increase the signal to noise ratio (SNR) of targets. Then we reshape the received data into a tensor form, the structure of which provides the estimations of the transmit and receive steering matrices. The alternating least squares (ALS) algorithm is applied to find the tensor components. The DOA estimation is conducted in transmitters via the rotational invariance property achieved by beamspace matrix. It is proved that at most M−2 grating lobes exist during the process of DOA estimation, where M is the number of the transmitters. These grating lobes can be eliminated by finite trials of spectrum search. The performance of our proposed DOA estimation method surpasses several conventional algorithms in terms of accuracy and resolution.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Geometric Interpretation of Trilateration for RSS-based Localization.\n \n \n \n \n\n\n \n Le, H. M.; Rossi, J. -.; and Slock, D.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1797-1801, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287413,\n  author = {H. M. Le and J. -P. Rossi and D. Slock},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A Geometric Interpretation of Trilateration for RSS-based Localization},\n  year = {2020},\n  pages = {1797-1801},\n  abstract = {Trilateration is a popular approach in localization. Many related geometric approaches have been proposed for 2D scenarios. In general, each approach has a standard case in which the main solution is applied, and many specific cases. Each specific case has a particular solution, which makes the algorithm more complex. This paper introduces a novel geometric approach that covers all the cases considered by previous algorithms. It turns out though that this approach is a special case of an existing approach, for which we hence provide a geometric interpretation. Numerical results illustrate the method in RSS-based localization while estimating simultaneously the path loss component.},\n  keywords = {Solid modeling;Three-dimensional displays;Two dimensional displays;Signal processing algorithms;Estimation;Signal processing;Standards;trilateration;algorithm;localization;RSS-based;path loss estimation},\n  doi = {10.23919/Eusipco47968.2020.9287413},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001797.pdf},\n}\n\n
\n
\n\n\n
\n Trilateration is a popular approach in localization. Many related geometric approaches have been proposed for 2D scenarios. In general, each approach has a standard case in which the main solution is applied, and many specific cases. Each specific case has a particular solution, which makes the algorithm more complex. This paper introduces a novel geometric approach that covers all the cases considered by previous algorithms. It turns out though that this approach is a special case of an existing approach, for which we hence provide a geometric interpretation. Numerical results illustrate the method in RSS-based localization while estimating simultaneously the path loss component.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Stochastic EM algorithm for fast analysis of single waveform multi-spectral Lidar data.\n \n \n \n \n\n\n \n Legros, Q.; McLaughlin, S.; Altmann, Y.; and Meignen, S.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2413-2417, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"StochasticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287414,\n  author = {Q. Legros and S. McLaughlin and Y. Altmann and S. Meignen},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Stochastic EM algorithm for fast analysis of single waveform multi-spectral Lidar data},\n  year = {2020},\n  pages = {2413-2417},\n  abstract = {This paper addresses the problem of estimating spectral and range profiles from single-photon Lidar waveforms associated with single surfaces in presence of an unknown background. A single Lidar waveform per pixel is considered, whereby a single detector is used to acquire information simultaneously at multiple wavelengths. A novel Bayesian approach is developed to perform the estimation of model parameters in a reduced computational time. This is achieved by transforming an EM-based algorithm recently proposed into a stochastic EM algorithm, which is computationally more attractive. The reconstruction performance and computational complexity of our approach are assessed through a series of experiments using synthetic data under different observation scenarios. The obtained results demonstrate a significant speed-up compared to the state-of-the-art method, without significant degradation of the estimation quality.},\n  keywords = {Degradation;Laser radar;Computational modeling;Estimation;Signal processing algorithms;Stochastic processes;Photonics;Multispectral imaging;3D imaging;Single-photon Lidar;Bayesian estimation},\n  doi = {10.23919/Eusipco47968.2020.9287414},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002413.pdf},\n}\n\n
\n
\n\n\n
\n This paper addresses the problem of estimating spectral and range profiles from single-photon Lidar waveforms associated with single surfaces in presence of an unknown background. A single Lidar waveform per pixel is considered, whereby a single detector is used to acquire information simultaneously at multiple wavelengths. A novel Bayesian approach is developed to perform the estimation of model parameters in a reduced computational time. This is achieved by transforming an EM-based algorithm recently proposed into a stochastic EM algorithm, which is computationally more attractive. The reconstruction performance and computational complexity of our approach are assessed through a series of experiments using synthetic data under different observation scenarios. The obtained results demonstrate a significant speed-up compared to the state-of-the-art method, without significant degradation of the estimation quality.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Efficient Estimation of Kronecker Product of Linear Structured Scatter Matrices under t-distribution.\n \n \n \n \n\n\n \n Mériaux, B.; Ren, C.; Breloy, A.; El Korso, M. N.; and Forster, P.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2418-2422, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"EfficientPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287415,\n  author = {B. Mériaux and C. Ren and A. Breloy and M. N. {El Korso} and P. Forster},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Efficient Estimation of Kronecker Product of Linear Structured Scatter Matrices under t-distribution},\n  year = {2020},\n  pages = {2418-2422},\n  abstract = {This paper addresses structured scatter matrix estimation within the non convex set of Kronecker product structure. The latter model usually involves two matrices, which can be themselves linearly constrained, and arises in many applications, such as MIMO communication, MEG/EEG data analysis. Taking this prior knowledge into account generally improves estimation accuracy. In the framework of robust estimation, the t-distribution is particularly suited to model heavy-tailed data. In this context, we introduce an estimator of the scatter matrix, having a Kronecker product structure and potential linear structured factors. In addition, we show that the proposed method yields a consistent and efficient estimate.},\n  keywords = {data analysis;electroencephalography;estimation theory;magnetoencephalography;matrix algebra;medical signal processing;MIMO communication;linear structured scatter matrices;scatter matrix estimation;nonconvex set;Kronecker product structure;potential linear structured factors;t-distribution;MIMO communication;MEG-EEG data analysis;Analytical models;Data analysis;Estimation;Europe;Signal processing;Data models;MIMO communication;Structured scatter matrix;Kronecker product;t-distribution;M-estimators},\n  doi = {10.23919/Eusipco47968.2020.9287415},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002418.pdf},\n}\n\n
\n
\n\n\n
\n This paper addresses structured scatter matrix estimation within the non convex set of Kronecker product structure. The latter model usually involves two matrices, which can be themselves linearly constrained, and arises in many applications, such as MIMO communication, MEG/EEG data analysis. Taking this prior knowledge into account generally improves estimation accuracy. In the framework of robust estimation, the t-distribution is particularly suited to model heavy-tailed data. In this context, we introduce an estimator of the scatter matrix, having a Kronecker product structure and potential linear structured factors. In addition, we show that the proposed method yields a consistent and efficient estimate.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Deep-Unfolded Reference-Based RPCA Network For Video Foreground-Background Separation.\n \n \n \n \n\n\n \n Van Luong, H.; Joukovsky, B.; Eldar, Y. C.; and Deligiannis, N.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1432-1436, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287416,\n  author = {H. {Van Luong} and B. Joukovsky and Y. C. Eldar and N. Deligiannis},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A Deep-Unfolded Reference-Based RPCA Network For Video Foreground-Background Separation},\n  year = {2020},\n  pages = {1432-1436},\n  abstract = {Deep unfolded neural networks are designed by unrolling the iterations of optimization algorithms. They can be shown to achieve faster convergence and higher accuracy than their optimization counterparts. This paper proposes a new deep-unfolding-based network design for the problem of Robust Principal Component Analysis (RPCA) with application to video foreground-background separation. Unlike existing designs, our approach focuses on modeling the temporal correlation between the sparse representations of consecutive video frames. To this end, we perform the unfolding of an iterative algorithm for solving reweighted ℓ1-ℓ1 minimization; this unfolding leads to a different proximal operator (a.k.a. different activation function) adaptively learned per neuron. Experimentation using the moving MNIST dataset shows that the proposed network outperforms a recently proposed state-of-the-art RPCA network in the task of video foreground-background separation.},\n  keywords = {Correlation;Neurons;Signal processing algorithms;Signal processing;Minimization;Task analysis;Optimization;Deep unfolding;deep learning;robust PCA;video analysis;foreground-background separation},\n  doi = {10.23919/Eusipco47968.2020.9287416},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001432.pdf},\n}\n\n
\n
\n\n\n
\n Deep unfolded neural networks are designed by unrolling the iterations of optimization algorithms. They can be shown to achieve faster convergence and higher accuracy than their optimization counterparts. This paper proposes a new deep-unfolding-based network design for the problem of Robust Principal Component Analysis (RPCA) with application to video foreground-background separation. Unlike existing designs, our approach focuses on modeling the temporal correlation between the sparse representations of consecutive video frames. To this end, we perform the unfolding of an iterative algorithm for solving reweighted ℓ1-ℓ1 minimization; this unfolding leads to a different proximal operator (a.k.a. different activation function) adaptively learned per neuron. Experimentation using the moving MNIST dataset shows that the proposed network outperforms a recently proposed state-of-the-art RPCA network in the task of video foreground-background separation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Modeling the relationship between acoustic stimulus and EEG with a dilated convolutional neural network.\n \n \n \n \n\n\n \n Accou, B.; Jalilpour Monesi, M.; Montoya, J.; Van hamme, H.; and Francart, T.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1175-1179, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ModelingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287417,\n  author = {B. Accou and M. {Jalilpour Monesi} and J. Montoya and H. {Van hamme} and T. Francart},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Modeling the relationship between acoustic stimulus and EEG with a dilated convolutional neural network},\n  year = {2020},\n  pages = {1175-1179},\n  abstract = {Current tests to measure whether a person can understand speech require behavioral responses from the person, which is in practice not always possible (e.g. young children). Therefore there is a need for objective measures of speech intelligibility. Recently, it has been shown that speech intelligibility can be measured by letting a person listen to natural speech, recording the electroencephalogram (EEG) and decoding the speech envelope from the EEG signal. Linear decoders are used, which is sub-optimal, as the human brain is a complex non-linear system and cannot easily be modeled by a linear decoder. We therefore propose an approach based on deep learning which can model complex non-linear relationships. Our approach is based on dilated convolutions as used in WaveNet to maximize the receptive field with regard to the number of tunable parameters. Comparison with a model based on a state of the art linear decoder and a convolutional baseline model shows that our proposed model significantly improves on both models (from 62.3% to 90.6% (p<; 0.001) and from 78.8% to 90.6% (p<; 0.001) respectively). Best results are achieved with a receptive field size between 250-500ms, which is longer than the optimal integration window for a linear decoder.},\n  keywords = {convolutional neural nets;electroencephalography;learning (artificial intelligence);medical signal processing;speech intelligibility;speech intelligibility;natural speech;EEG signal;nonlinear system;nonlinear relationships;dilated convolutions;art linear decoder;convolutional baseline model;acoustic stimulus;dilated convolutional neural network;behavioral responses;young children;Convolution;Current measurement;Natural languages;Europe;Brain modeling;Electroencephalography;Decoding;match/mismatch;EEG decoding;speech;auditory system;envelope},\n  doi = {10.23919/Eusipco47968.2020.9287417},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001175.pdf},\n}\n\n
\n
\n\n\n
\n Current tests to measure whether a person can understand speech require behavioral responses from the person, which is in practice not always possible (e.g. young children). Therefore there is a need for objective measures of speech intelligibility. Recently, it has been shown that speech intelligibility can be measured by letting a person listen to natural speech, recording the electroencephalogram (EEG) and decoding the speech envelope from the EEG signal. Linear decoders are used, which is sub-optimal, as the human brain is a complex non-linear system and cannot easily be modeled by a linear decoder. We therefore propose an approach based on deep learning which can model complex non-linear relationships. Our approach is based on dilated convolutions as used in WaveNet to maximize the receptive field with regard to the number of tunable parameters. Comparison with a model based on a state of the art linear decoder and a convolutional baseline model shows that our proposed model significantly improves on both models (from 62.3% to 90.6% (p<; 0.001) and from 78.8% to 90.6% (p<; 0.001) respectively). Best results are achieved with a receptive field size between 250-500ms, which is longer than the optimal integration window for a linear decoder.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Cognitive Fatigue Detection from EEG Signals using Topological Signal Processing.\n \n \n \n \n\n\n \n Das, A. K.; Kumar, K.; Gavas, R. D.; Jaiswal, D.; Chatterjee, D.; Ramakrishnan, R. K.; Chandra, M. G.; and Pal, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1313-1317, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"CognitivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287418,\n  author = {A. K. Das and K. Kumar and R. D. Gavas and D. Jaiswal and D. Chatterjee and R. K. Ramakrishnan and M. G. Chandra and A. Pal},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Cognitive Fatigue Detection from EEG Signals using Topological Signal Processing},\n  year = {2020},\n  pages = {1313-1317},\n  abstract = {Topological signal processing has attracted substantial interest lately owing to its attribute of multi-scale tracking of simplicial complexes. This distinctive aspect is exploited to study the topological evolution of time series/signals. Specifically, EEG signals analysis is considered here for the challenging task of cognitive fatigue detection. This work utilizes the topological attributes like Betti numbers, and persistent homology of dimension 0 and 1 extracted from EEG signals to study the cognitive state of an individual. Using the CogBeacon dataset, a comparison of the topological features with the conventional time and frequency domain features is presented. Random forest classifier is used to classify the fatigue state. Results show that the performance of topological features is at par with the conventional features even when significantly less number of topological features are used. Also, enhancement in classification accuracy is observed by appropriately combining both conventional and topological features which outperforms the state-of-the-art method for fatigue detection. Additionally, recursive feature elimination is applied on combined features to reduce redundancy by selecting a subset consisting of prominent features. Analysis indicates that all topological features derived from EEG signals contribute to the best performing subset, which also increases the overall accuracy.},\n  keywords = {cognition;electroencephalography;feature extraction;medical signal processing;signal classification;support vector machines;time series;cognitive fatigue detection;topological signal processing;topological evolution;EEG signals analysis;topological attributes;topological features;conventional features;Systematics;Redundancy;Fatigue;Feature extraction;Electroencephalography;Task analysis;Signal analysis;Topological Signal Processing;Electroencephalogram;Cognitive Fatigue;Persistent Homology;Classification;Recursive Feature Elimination},\n  doi = {10.23919/Eusipco47968.2020.9287418},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001313.pdf},\n}\n\n
\n
\n\n\n
\n Topological signal processing has attracted substantial interest lately owing to its attribute of multi-scale tracking of simplicial complexes. This distinctive aspect is exploited to study the topological evolution of time series/signals. Specifically, EEG signals analysis is considered here for the challenging task of cognitive fatigue detection. This work utilizes the topological attributes like Betti numbers, and persistent homology of dimension 0 and 1 extracted from EEG signals to study the cognitive state of an individual. Using the CogBeacon dataset, a comparison of the topological features with the conventional time and frequency domain features is presented. Random forest classifier is used to classify the fatigue state. Results show that the performance of topological features is at par with the conventional features even when significantly less number of topological features are used. Also, enhancement in classification accuracy is observed by appropriately combining both conventional and topological features which outperforms the state-of-the-art method for fatigue detection. Additionally, recursive feature elimination is applied on combined features to reduce redundancy by selecting a subset consisting of prominent features. Analysis indicates that all topological features derived from EEG signals contribute to the best performing subset, which also increases the overall accuracy.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Study of Deep-Learning-based Prediction Methods for Lossless Coding.\n \n \n \n \n\n\n \n Schiopu, I.; Huang, H.; and Munteanu, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 521-525, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287420,\n  author = {I. Schiopu and H. Huang and A. Munteanu},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A Study of Deep-Learning-based Prediction Methods for Lossless Coding},\n  year = {2020},\n  pages = {521-525},\n  abstract = {In recent years, the research community started to explore new strategies for encoding image and video content based on innovative coding solutions developed using machine learning (ML) tools. An emerging research strategy proposes novel hybrid coding solutions as an alternative to traditional coding paradigms by replacing specific coding modules with efficient deep-learning (DL) based techniques. The paper presents a study on DL-based intra-prediction methods for lossless compression applications. For image coding, the paper studies our recently proposed pixel-wise prediction methods designed based on the residual learning concept, integrated into conventional lossless image coding frameworks. Moreover, a novel neural network design is proposed based on a new structure of layers. For video coding, the paper studies our recently proposed block-wise prediction methods designed based on recent breakthroughs in the ML domain, and integrated in the lossless HEVC standard. Experimental results show that the proposed lossless image codec achieves an improved performance with 1.6% compared to state- of-the-art DL-based methods. The study reveals that the hybrid coding solutions which incorporate DL-based prediction methods systematically and substantially improve the coding performance over traditional lossless coding paradigms.},\n  keywords = {Image coding;Systematics;Prediction methods;Tools;Signal processing;Encoding;Standards;Lossless compression;deep-learning;intra-prediction;hybrid coding solutions},\n  doi = {10.23919/Eusipco47968.2020.9287420},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000521.pdf},\n}\n\n
\n
\n\n\n
\n In recent years, the research community started to explore new strategies for encoding image and video content based on innovative coding solutions developed using machine learning (ML) tools. An emerging research strategy proposes novel hybrid coding solutions as an alternative to traditional coding paradigms by replacing specific coding modules with efficient deep-learning (DL) based techniques. The paper presents a study on DL-based intra-prediction methods for lossless compression applications. For image coding, the paper studies our recently proposed pixel-wise prediction methods designed based on the residual learning concept, integrated into conventional lossless image coding frameworks. Moreover, a novel neural network design is proposed based on a new structure of layers. For video coding, the paper studies our recently proposed block-wise prediction methods designed based on recent breakthroughs in the ML domain, and integrated in the lossless HEVC standard. Experimental results show that the proposed lossless image codec achieves an improved performance with 1.6% compared to state- of-the-art DL-based methods. The study reveals that the hybrid coding solutions which incorporate DL-based prediction methods systematically and substantially improve the coding performance over traditional lossless coding paradigms.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Gravitational Search Algorithm for IIR Filter-Based Audio Equalization.\n \n \n \n \n\n\n \n Pepe, G.; Gabrielli, L.; Squartini, S.; Cattani, L.; and Tripodi, C.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 496-500, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"GravitationalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287421,\n  author = {G. Pepe and L. Gabrielli and S. Squartini and L. Cattani and C. Tripodi},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Gravitational Search Algorithm for IIR Filter-Based Audio Equalization},\n  year = {2020},\n  pages = {496-500},\n  abstract = {In this paper we present an evolutionary algorithm for the design of stable IIR filters for binaural audio equalization. The filters are arranged as a cascade of second-order sections (SOS’s) and the gravitational search algorithm (GSA) is used. This process seeks for optimal coefficients based on a fitness function, possibly leading to unstable filters. To avoid this, we propose two alternative methods. Experiments have been performed taking an in-car listening environment as the use case, characterized by multiple loudspeakers, thus, multiple impulse responses (IR). This technique has been compared with a previous heuristic method, achieving superior results.},\n  keywords = {Loudspeakers;Finite impulse response filters;Signal processing algorithms;IIR filters;Evolutionary computation;Filtering algorithms;Reflection;Gravitational search algorithm;Audio equalization;IIR filters;Automotive},\n  doi = {10.23919/Eusipco47968.2020.9287421},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000496.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we present an evolutionary algorithm for the design of stable IIR filters for binaural audio equalization. The filters are arranged as a cascade of second-order sections (SOS’s) and the gravitational search algorithm (GSA) is used. This process seeks for optimal coefficients based on a fitness function, possibly leading to unstable filters. To avoid this, we propose two alternative methods. Experiments have been performed taking an in-car listening environment as the use case, characterized by multiple loudspeakers, thus, multiple impulse responses (IR). This technique has been compared with a previous heuristic method, achieving superior results.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Unsupervised Clustering on Signed Graphs with Unknown Number of Clusters.\n \n \n \n \n\n\n \n Dittrich, T.; and Matz, G.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1060-1064, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"UnsupervisedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287424,\n  author = {T. Dittrich and G. Matz},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Unsupervised Clustering on Signed Graphs with Unknown Number of Clusters},\n  year = {2020},\n  pages = {1060-1064},\n  abstract = {We consider the problem of unsupervised clustering on signed graphs, i.e., graphs with positive and negative edge weights. Motivated by signed cut minimization, we propose an optimization problem that minimizes the total variation of the cluster labels subject to constraints on the cluster size, augmented with a regularization that prevents clusters consisting of isolated nodes. We estimate the unknown number of clusters by tracking the change of total variation with successively increasing putative cluster numbers. Simulation results indicate that our method yields excellent results for moderately unbalanced graphs.},\n  keywords = {TV;Upper bound;Simulation;Signal processing algorithms;Clustering algorithms;Signal processing;Partitioning algorithms},\n  doi = {10.23919/Eusipco47968.2020.9287424},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001060.pdf},\n}\n\n
\n
\n\n\n
\n We consider the problem of unsupervised clustering on signed graphs, i.e., graphs with positive and negative edge weights. Motivated by signed cut minimization, we propose an optimization problem that minimizes the total variation of the cluster labels subject to constraints on the cluster size, augmented with a regularization that prevents clusters consisting of isolated nodes. We estimate the unknown number of clusters by tracking the change of total variation with successively increasing putative cluster numbers. Simulation results indicate that our method yields excellent results for moderately unbalanced graphs.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Stochastic Complex-valued Neural Networks for Radar.\n \n \n \n \n\n\n \n Ouabi, O. -.; Pribić, R.; and Olaru, S.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1442-1446, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"StochasticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287425,\n  author = {O. -L. Ouabi and R. Pribić and S. Olaru},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Stochastic Complex-valued Neural Networks for Radar},\n  year = {2020},\n  pages = {1442-1446},\n  abstract = {Neural networks (NNs) prove to be performant in learning nonlinear models, but their mechanisms are yet to be fully understood. Since signal models in radar are inherently nonlinear with respect to unknown range, Doppler or angles, and moreover, radar processing is intrinsically stochastic, stochastic NNs which tie the numerical capability of NNs with the probabilistic inferences can enhance model-based radar processing. Indeed, radar data are complex-valued while most algorithms based on NNs are real-valued and furthermore, lack of uncertainty assessment. To address these issues, we elaborate, in the present paper, a stochastic complex-valued NNs framework for radar. We show that these networks can achieve parameter estimation with refined learned models from radar measurements and provide an indicator of the uncertainty on the estimation. We also build a stopping criterion based on the detection principles, so that the NNs training stops when there is noise only in data. Finally, the performances of the networks are illustrated in simulation.},\n  keywords = {Uncertainty;Parameter estimation;Stochastic processes;Artificial neural networks;Radar signal processing;Doppler radar;Numerical models;models;neural networks;radar;raw data},\n  doi = {10.23919/Eusipco47968.2020.9287425},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001442.pdf},\n}\n\n
\n
\n\n\n
\n Neural networks (NNs) prove to be performant in learning nonlinear models, but their mechanisms are yet to be fully understood. Since signal models in radar are inherently nonlinear with respect to unknown range, Doppler or angles, and moreover, radar processing is intrinsically stochastic, stochastic NNs which tie the numerical capability of NNs with the probabilistic inferences can enhance model-based radar processing. Indeed, radar data are complex-valued while most algorithms based on NNs are real-valued and furthermore, lack of uncertainty assessment. To address these issues, we elaborate, in the present paper, a stochastic complex-valued NNs framework for radar. We show that these networks can achieve parameter estimation with refined learned models from radar measurements and provide an indicator of the uncertainty on the estimation. We also build a stopping criterion based on the detection principles, so that the NNs training stops when there is noise only in data. Finally, the performances of the networks are illustrated in simulation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Review of different robust x-vector extractors for speaker verification.\n \n \n \n \n\n\n \n Rouvier, M.; Dufour, R.; and Bousquet, P. -.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1-5, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ReviewPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287426,\n  author = {M. Rouvier and R. Dufour and P. -M. Bousquet},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Review of different robust x-vector extractors for speaker verification},\n  year = {2020},\n  pages = {1-5},\n  abstract = {Recently, the x-vector framework, extracted with deep neural network architectures, became the state-of-the-art method for speaker verification. Although another level of performance has been overcome with this approach, fine-tuning and optimizing the hyper-parameters of a deep neural network to obtain a robust x-vector extractor is cost- and time-consuming. Several approaches have been proposed to train robust x-vector extractors. In this paper, we propose to review and analyse the impact of the most significant x-vector related approaches, including variations in terms of data augmentation, number of epochs, size of mini-batch, acoustic features and frames per iteration. By applying these approaches to the default recipe provided in the Kaldi toolkit, we observed a significant relative gain of more than 50% in terms of EER on Speaker in the Wild and Voxceleb1-E datasets.},\n  keywords = {Neural networks;Europe;Signal processing;Feature extraction;Acoustics;Task analysis;Standards;x-vector;deep neural network;speaker verification},\n  doi = {10.23919/Eusipco47968.2020.9287426},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000366.pdf},\n}\n\n
\n
\n\n\n
\n Recently, the x-vector framework, extracted with deep neural network architectures, became the state-of-the-art method for speaker verification. Although another level of performance has been overcome with this approach, fine-tuning and optimizing the hyper-parameters of a deep neural network to obtain a robust x-vector extractor is cost- and time-consuming. Several approaches have been proposed to train robust x-vector extractors. In this paper, we propose to review and analyse the impact of the most significant x-vector related approaches, including variations in terms of data augmentation, number of epochs, size of mini-batch, acoustic features and frames per iteration. By applying these approaches to the default recipe provided in the Kaldi toolkit, we observed a significant relative gain of more than 50% in terms of EER on Speaker in the Wild and Voxceleb1-E datasets.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Energy Harvesting via Analog-to-Digital Conversion.\n \n \n \n \n\n\n \n Jain, N.; Shlezinger, N.; Eldar, Y. C.; Gupta, A.; and Bohara, V. A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2299-2303, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"EnergyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287427,\n  author = {N. Jain and N. Shlezinger and Y. C. Eldar and A. Gupta and V. A. Bohara},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Energy Harvesting via Analog-to-Digital Conversion},\n  year = {2020},\n  pages = {2299-2303},\n  abstract = {Analog-to-digital converters (ADCs) allow physical signals to be processed using digital hardware. A common ADC architecture is based on sample-and-hold (S/H) circuits, where the acquired signal is observed in repeated intervals of fixed duration, referred to as the sampling time, and is not utilized for the entire duration. In this paper, we extend the structure of S/H ADCs, allowing them to harvest energy from the observed signal by modifying the circuitry during hold time of the sampling process of an ADC. This harvested energy can be used to supplement the ADC itself, paving the way to the possibility of zero power and power saving ADCs. We analyze the tradeoff between the ability to accurately recover the sampled signal and the energy harvesting which arises from the proposed ADC architecture, and provide guidelines to setting the sampling rate in light of accuracy and energy constraints. Our numerical evaluations indicate that energy harvesting ADCs operating with up to 16 bits per sample can acquire analog signals such that they can be recovered with minimal errors without requiring power from the external source.},\n  keywords = {Energy resolution;Europe;Hardware;Energy harvesting;Analog-digital conversion;Signal resolution;Guidelines;Energy harvesting;analog-to-digital conversion},\n  doi = {10.23919/Eusipco47968.2020.9287427},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002299.pdf},\n}\n\n
\n
\n\n\n
\n Analog-to-digital converters (ADCs) allow physical signals to be processed using digital hardware. A common ADC architecture is based on sample-and-hold (S/H) circuits, where the acquired signal is observed in repeated intervals of fixed duration, referred to as the sampling time, and is not utilized for the entire duration. In this paper, we extend the structure of S/H ADCs, allowing them to harvest energy from the observed signal by modifying the circuitry during hold time of the sampling process of an ADC. This harvested energy can be used to supplement the ADC itself, paving the way to the possibility of zero power and power saving ADCs. We analyze the tradeoff between the ability to accurately recover the sampled signal and the energy harvesting which arises from the proposed ADC architecture, and provide guidelines to setting the sampling rate in light of accuracy and energy constraints. Our numerical evaluations indicate that energy harvesting ADCs operating with up to 16 bits per sample can acquire analog signals such that they can be recovered with minimal errors without requiring power from the external source.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Signal processing in Network Physiology: quantifying network dynamics of organ interactions.\n \n \n \n \n\n\n \n Ivanov, P. C.; Wang, J. W. J. L.; and Zhang, X.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 945-949, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"SignalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287428,\n  author = {P. C. Ivanov and J. W. J. L. Wang and X. Zhang},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Signal processing in Network Physiology: quantifying network dynamics of organ interactions},\n  year = {2020},\n  pages = {945-949},\n  abstract = {A fundamental problem in new field of Network Physiology is how organ systems in the human body dynamically interact to coordinate and synchronize their functions, and integrate as a network to generate distinct states and behaviours at the organism level. Physiological systems exhibit complex dynamics, operate at different time scales, and are regulated by multi-component mechanisms, which poses challenges to studying physiologic coupling and network interactions. We present a method based on the concept of time delay stability to probe transient physiologic network interactions in a group of healthy subjects during sleep. We investigate the multi-layer network structure and dynamics of interactions among (i) physiologically relevant brain rhythms within and across cortical locations, (ii) brain rhythms and key peripheral organ systems, and (iii) organ systems with each other. We demonstrate that each physiologic state (sleep stage) is characterized by a specific network structure and link strength distribution, and that the entire physiological network undergoes hierarchical reorganization across layers with transition from one stage to another. Our findings are consistent across subjects, and indicate a robust association of network structure and dynamics with physiologic state and function. The presented approach provides a new framework to explore physiologic states through networks of organ interactions.},\n  keywords = {Signal processing;Physiology;Stability analysis;Organisms;Synchronization;Transient analysis;Probes;Network Physiology;time series analysis;time delay stability;coupling;dynamic networks;brain rhythms;sleep},\n  doi = {10.23919/Eusipco47968.2020.9287428},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000945.pdf},\n}\n\n
\n
\n\n\n
\n A fundamental problem in new field of Network Physiology is how organ systems in the human body dynamically interact to coordinate and synchronize their functions, and integrate as a network to generate distinct states and behaviours at the organism level. Physiological systems exhibit complex dynamics, operate at different time scales, and are regulated by multi-component mechanisms, which poses challenges to studying physiologic coupling and network interactions. We present a method based on the concept of time delay stability to probe transient physiologic network interactions in a group of healthy subjects during sleep. We investigate the multi-layer network structure and dynamics of interactions among (i) physiologically relevant brain rhythms within and across cortical locations, (ii) brain rhythms and key peripheral organ systems, and (iii) organ systems with each other. We demonstrate that each physiologic state (sleep stage) is characterized by a specific network structure and link strength distribution, and that the entire physiological network undergoes hierarchical reorganization across layers with transition from one stage to another. Our findings are consistent across subjects, and indicate a robust association of network structure and dynamics with physiologic state and function. The presented approach provides a new framework to explore physiologic states through networks of organ interactions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Privacy-Preserving Distributed Graph Filtering.\n \n \n \n \n\n\n \n Li, Q.; Coutino, M.; Leus, G.; and Christensen, M. G.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2155-2159, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Privacy-PreservingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287429,\n  author = {Q. Li and M. Coutino and G. Leus and M. G. Christensen},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Privacy-Preserving Distributed Graph Filtering},\n  year = {2020},\n  pages = {2155-2159},\n  abstract = {With an increasingly interconnected and digitized world, distributed signal processing and graph signal processing have been proposed to process its big amount of data. However, privacy has become one of the biggest challenges holding back the widespread adoption of these tools for processing sensitive data. As a step towards a solution, we demonstrate the privacy-preserving capabilities of variants of the so-called distributed graph filters. Such implementations allow each node to compute a desired linear transformation of the networked data while protecting its own private data. In particular, the proposed approach eliminates the risk of possible privacy abuse by ensuring that the private data is only available to its owner. Moreover, it preserves the distributed implementation and keeps the same communication and computational cost as its non-secure counterparts. Furthermore, we show that this computational model is secure under both passive and eavesdropping adversary models. Finally, its performance is demonstrated by numerical tests and it is shown to be a valid and competitive privacy-preserving alternative to traditional distributed optimization techniques.},\n  keywords = {Data privacy;Computational modeling;Distributed databases;Signal processing;Numerical models;Computational efficiency;Optimization;distributed computation;distributed graph filters;encryption;graph signal processing;privacy-preserving},\n  doi = {10.23919/Eusipco47968.2020.9287429},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002155.pdf},\n}\n\n
\n
\n\n\n
\n With an increasingly interconnected and digitized world, distributed signal processing and graph signal processing have been proposed to process its big amount of data. However, privacy has become one of the biggest challenges holding back the widespread adoption of these tools for processing sensitive data. As a step towards a solution, we demonstrate the privacy-preserving capabilities of variants of the so-called distributed graph filters. Such implementations allow each node to compute a desired linear transformation of the networked data while protecting its own private data. In particular, the proposed approach eliminates the risk of possible privacy abuse by ensuring that the private data is only available to its owner. Moreover, it preserves the distributed implementation and keeps the same communication and computational cost as its non-secure counterparts. Furthermore, we show that this computational model is secure under both passive and eavesdropping adversary models. Finally, its performance is demonstrated by numerical tests and it is shown to be a valid and competitive privacy-preserving alternative to traditional distributed optimization techniques.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Proximal Gradient Algorithm in the Presence of Adjoint Mismatch.\n \n \n \n \n\n\n \n Savanier, M.; Chouzenoux, E.; Pesquet, J. -.; Riddell, C.; and Trousset, Y.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2140-2144, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ProximalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287430,\n  author = {M. Savanier and E. Chouzenoux and J. -C. Pesquet and C. Riddell and Y. Trousset},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Proximal Gradient Algorithm in the Presence of Adjoint Mismatch},\n  year = {2020},\n  pages = {2140-2144},\n  abstract = {The proximal gradient algorithm is a popular iterative algorithm to deal with penalized least-squares minimization problems. Its simplicity and versatility allow one to embed nonsmooth penalties efficiently. In the context of inverse problems arising in signal and image processing, a major concern lies in the computational burden when implementing minimization algorithms. For instance, in tomographic image reconstruction, a bottleneck is the cost for applying the forward linear operator and its adjoint [1], [2]. Consequently, it often happens that these operators are approximated numerically, so that the adjoint property is no longer fulfilled. In this paper, we focus on the proximal gradient algorithm stability properties when such an adjoint mismatch arises. By making use of tools from convex analysis and fixed point theory, we establish conditions under which the algorithm can still converge to a fixed point. We provide bounds on the error between this point and the solution to the minimization problem. We illustrate the applicability of our theoretical results through numerical examples in the context of computed tomography.},\n  keywords = {Signal processing algorithms;Tools;Approximation algorithms;Minimization;Image reconstruction;X-ray imaging;Convergence;Proximal gradient algorithm;adjoint mismatch;convergence analysis;fixed point methods;image reconstruction;computed tomography},\n  doi = {10.23919/Eusipco47968.2020.9287430},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002140.pdf},\n}\n\n
\n
\n\n\n
\n The proximal gradient algorithm is a popular iterative algorithm to deal with penalized least-squares minimization problems. Its simplicity and versatility allow one to embed nonsmooth penalties efficiently. In the context of inverse problems arising in signal and image processing, a major concern lies in the computational burden when implementing minimization algorithms. For instance, in tomographic image reconstruction, a bottleneck is the cost for applying the forward linear operator and its adjoint [1], [2]. Consequently, it often happens that these operators are approximated numerically, so that the adjoint property is no longer fulfilled. In this paper, we focus on the proximal gradient algorithm stability properties when such an adjoint mismatch arises. By making use of tools from convex analysis and fixed point theory, we establish conditions under which the algorithm can still converge to a fixed point. We provide bounds on the error between this point and the solution to the minimization problem. We illustrate the applicability of our theoretical results through numerical examples in the context of computed tomography.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Online Switch-Based Hybrid Beamforming for Massive MIMO Systems.\n \n \n \n \n\n\n \n Nosrati, H.; Aboutanios, E.; and Smith, D.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1757-1761, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"OnlinePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287431,\n  author = {H. Nosrati and E. Aboutanios and D. Smith},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Online Switch-Based Hybrid Beamforming for Massive MIMO Systems},\n  year = {2020},\n  pages = {1757-1761},\n  abstract = {Switch-based hybrid beamforming is a low-cost solution for implementing the analog segment of a hybrid beam-forming network. Although an analog beamformer comprising a network of switches allows low hardware complexity, designing such a network is computationally expensive. In this paper, we consider a single user massive multiple-input multiple-output (MIMO) system and propose a low computational complexity method for designing a switch-based hybrid precoder that maximizes the mutual information. We propose a method wherein the analog beamformer is approximated after solving a convex (concave) problem and employing low-rank matrix decomposition. Then, considering a sequence of channel realizations we frame the intermediate convex problem as an online convex optimization (OCO) and give the conditions under which the online version approaches the solution of the primary convex problem after some iterations by learning from previous steps. We finally study the performance through numerical results and demonstrate that the proposed online method offers a low complexity solution that tracks the spectral efficiency delivered by fully digital beamformer, and converges to the solution provided by direct maximization of the intermediate problem.},\n  keywords = {Hybrid beamforming;Precoding;Massive MIMO;Online Convex Optimization},\n  doi = {10.23919/Eusipco47968.2020.9287431},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001757.pdf},\n}\n\n
\n
\n\n\n
\n Switch-based hybrid beamforming is a low-cost solution for implementing the analog segment of a hybrid beam-forming network. Although an analog beamformer comprising a network of switches allows low hardware complexity, designing such a network is computationally expensive. In this paper, we consider a single user massive multiple-input multiple-output (MIMO) system and propose a low computational complexity method for designing a switch-based hybrid precoder that maximizes the mutual information. We propose a method wherein the analog beamformer is approximated after solving a convex (concave) problem and employing low-rank matrix decomposition. Then, considering a sequence of channel realizations we frame the intermediate convex problem as an online convex optimization (OCO) and give the conditions under which the online version approaches the solution of the primary convex problem after some iterations by learning from previous steps. We finally study the performance through numerical results and demonstrate that the proposed online method offers a low complexity solution that tracks the spectral efficiency delivered by fully digital beamformer, and converges to the solution provided by direct maximization of the intermediate problem.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An Effective Contextual Language Modeling Framework for Speech Summarization with Augmented Features.\n \n \n \n \n\n\n \n Weng, S. -.; Lo, T. -.; and Chen, B.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 316-320, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287432,\n  author = {S. -Y. Weng and T. -H. Lo and B. Chen},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {An Effective Contextual Language Modeling Framework for Speech Summarization with Augmented Features},\n  year = {2020},\n  pages = {316-320},\n  abstract = {Tremendous amounts of multimedia associated with speech information are driving an urgent need to develop efficient and effective automatic summarization methods. To this end, we have seen rapid progress in applying supervised deep neural network-based methods to extractive speech summarization. More recently, the Bidirectional Encoder Representations from Transformers (BERT) model was proposed and has achieved record-breaking success on many natural language processing (NLP) tasks such as question answering and language understanding. In view of this, we in this paper contextualize and enhance the state-of-the-art BERT-based model for speech summarization, while its contributions are at least three-fold. First, we explore the incorporation of confidence scores into sentence representations to see if such an attempt could help alleviate the negative effects caused by imperfect automatic speech recognition (ASR). Secondly, we also augment the sentence embeddings obtained from BERT with extra structural and linguistic features, such as sentence position and inverse document frequency (IDF) statistics. Finally, we validate the effectiveness of our proposed method on a benchmark dataset, in comparison to several classic and celebrated speech summarization methods.},\n  keywords = {Bit error rate;Speech enhancement;Signal processing;Feature extraction;Natural language processing;Task analysis;Context modeling;Extractive speech summarization;BERT;speech recognition;confidence score},\n  doi = {10.23919/Eusipco47968.2020.9287432},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000316.pdf},\n}\n\n
\n
\n\n\n
\n Tremendous amounts of multimedia associated with speech information are driving an urgent need to develop efficient and effective automatic summarization methods. To this end, we have seen rapid progress in applying supervised deep neural network-based methods to extractive speech summarization. More recently, the Bidirectional Encoder Representations from Transformers (BERT) model was proposed and has achieved record-breaking success on many natural language processing (NLP) tasks such as question answering and language understanding. In view of this, we in this paper contextualize and enhance the state-of-the-art BERT-based model for speech summarization, while its contributions are at least three-fold. First, we explore the incorporation of confidence scores into sentence representations to see if such an attempt could help alleviate the negative effects caused by imperfect automatic speech recognition (ASR). Secondly, we also augment the sentence embeddings obtained from BERT with extra structural and linguistic features, such as sentence position and inverse document frequency (IDF) statistics. Finally, we validate the effectiveness of our proposed method on a benchmark dataset, in comparison to several classic and celebrated speech summarization methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robust Drone Detection for Acoustic Monitoring Applications.\n \n \n \n \n\n\n \n Ohlenbusch, M.; Ahrens, A.; Rollwage, C.; and Bitzer, J.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 6-10, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"RobustPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287433,\n  author = {M. Ohlenbusch and A. Ahrens and C. Rollwage and J. Bitzer},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Robust Drone Detection for Acoustic Monitoring Applications},\n  year = {2020},\n  pages = {6-10},\n  abstract = {Commercially available light-weight unmanned aerial vehicles (UAVs) present a challenge for public safety, e.g. espionage, transporting dangerous goods or devices. Therefore, countermeasures are necessary. Usually, detection of UAVs is a first step. Along many other modalities, acoustic detection seems promising. Recent publications show interesting results by using machine and deep learning methods. The acoustic detection of UAVs appears to be particularly difficult in adverse situations, such as in heavy wind noise or in the presence of construction noise. In this contribution, the typical feature set is extended to increase separation of background noise and the UAV signature noise. The decision algorithm utilized is support vector machine (SVM) classification. The classification is based on an extended training dataset labeled to support binary classification. The proposed method is evaluated in comparison to previously published algorithms, on the basis of a dataset recorded from different acoustic environments, including unknown UAV types. The results show an improvement over existing methods, especially in terms of false-positive detection rate. For a first step into real-time embedded systems a recursive feature elimination method is applied to reduce the model dimensionality. The results indicate only a slight decreases in detection performance.},\n  keywords = {Support vector machines;Training;Training data;Acoustics;Safety;Classification algorithms;Drones;Drone detection;UAV;public safety;binary classification;acoustic event detection;feature selection},\n  doi = {10.23919/Eusipco47968.2020.9287433},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000006.pdf},\n}\n\n
\n
\n\n\n
\n Commercially available light-weight unmanned aerial vehicles (UAVs) present a challenge for public safety, e.g. espionage, transporting dangerous goods or devices. Therefore, countermeasures are necessary. Usually, detection of UAVs is a first step. Along many other modalities, acoustic detection seems promising. Recent publications show interesting results by using machine and deep learning methods. The acoustic detection of UAVs appears to be particularly difficult in adverse situations, such as in heavy wind noise or in the presence of construction noise. In this contribution, the typical feature set is extended to increase separation of background noise and the UAV signature noise. The decision algorithm utilized is support vector machine (SVM) classification. The classification is based on an extended training dataset labeled to support binary classification. The proposed method is evaluated in comparison to previously published algorithms, on the basis of a dataset recorded from different acoustic environments, including unknown UAV types. The results show an improvement over existing methods, especially in terms of false-positive detection rate. For a first step into real-time embedded systems a recursive feature elimination method is applied to reduce the model dimensionality. The results indicate only a slight decreases in detection performance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Local Activation Time Estimation in Atrial Electrograms Using Cross-Correlation over Higher-Order Neighbors.\n \n \n \n \n\n\n \n Kölling, B.; Abdi, B.; de Groot , N. M. S.; and Hendriks, R. C.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 905-909, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"LocalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287434,\n  author = {B. Kölling and B. Abdi and N. M. S. {de Groot} and R. C. Hendriks},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Local Activation Time Estimation in Atrial Electrograms Using Cross-Correlation over Higher-Order Neighbors},\n  year = {2020},\n  pages = {905-909},\n  abstract = {Atrial electrograms are often used to gain under-standing on the development of atrial fibrillation (AF). Using such electrograms, cardiologists can reconstruct how the de-polarization wave-front propagates across the atrium. Knowing the exact moment at which the depolarization wavefront in the tissue reaches each electrode is an important aspect of such reconstruction. A common way to determine the LAT is based on the steepest deflection (SD) of the individual electrograms. However, the SD annotates each electrogram individually and is expected to be more prone to errors compared to approaches that would employ the data from the surrounding electrodes to estimate the LAT. As electrograms from neighboring electrodes tend to have rather similar morphology up to a delay, we propose in this paper to use the cross-correlation to find the pair-wise relative delays between electrograms. Instead of only using the direct neighbors we consider the array as a graph and involve higher order neighbors as well. Using a least-squares method, the absolute LATs can then be estimated from the calculated pair-wise relative delays. Simulated and clinically recorded elec-trograms are used to evaluate the proposed approach. From the simulated data it follows that the proposed approach outperforms the SD approach.},\n  keywords = {electrocardiography;medical signal processing;local activation time estimation;atrial electrograms;cross-correlation;higher-order neighbors;atrial fibrillation;de-polarization wave-front propagates;LAT;pair-wise relative delays;SD approach;steepest deflection;least-squares method;Electrodes;Electric potential;Estimation;Morphology;Europe;Signal processing;Delays;local activation time;electrogram;cross-correlation},\n  doi = {10.23919/Eusipco47968.2020.9287434},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000905.pdf},\n}\n\n
\n
\n\n\n
\n Atrial electrograms are often used to gain under-standing on the development of atrial fibrillation (AF). Using such electrograms, cardiologists can reconstruct how the de-polarization wave-front propagates across the atrium. Knowing the exact moment at which the depolarization wavefront in the tissue reaches each electrode is an important aspect of such reconstruction. A common way to determine the LAT is based on the steepest deflection (SD) of the individual electrograms. However, the SD annotates each electrogram individually and is expected to be more prone to errors compared to approaches that would employ the data from the surrounding electrodes to estimate the LAT. As electrograms from neighboring electrodes tend to have rather similar morphology up to a delay, we propose in this paper to use the cross-correlation to find the pair-wise relative delays between electrograms. Instead of only using the direct neighbors we consider the array as a graph and involve higher order neighbors as well. Using a least-squares method, the absolute LATs can then be estimated from the calculated pair-wise relative delays. Simulated and clinically recorded elec-trograms are used to evaluate the proposed approach. From the simulated data it follows that the proposed approach outperforms the SD approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Deep Learning Methods for Image Decomposition of Cervical Cells.\n \n \n \n \n\n\n \n Mahyari, T. L.; and Dansereau, R. M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1110-1114, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DeepPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287435,\n  author = {T. L. Mahyari and R. M. Dansereau},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Deep Learning Methods for Image Decomposition of Cervical Cells},\n  year = {2020},\n  pages = {1110-1114},\n  abstract = {One way to solve under-determined image decomposition is to use statistical information about the type of data to be decomposed. This information can be obtained by a deep learning where convolutional neural networks (CNN) are a subset recently used widely in image processing. In this paper, we have designed a two-stage CNN that takes cytology images of overlapped cervical cells and attempts to separate the cell images. In the first stage, we designed a CNN to segment overlapping cells. In the second stage, we designed a CNN that uses this segmentation and the original image to separate the regions. We implemented a CNN similar to U-Net for image segmentation and implemented a new network for the image separation. To train and test the proposed networks, we simulated 50000 cervical cell cytology images by overlaying individual images of real cervical cells using the Beer-Lambert law. Of these 50000 images, we used 49000 images for training and evaluated the method with 1000 test images. Results on these synthetic images give more than 97% segmentation accuracy and gives decomposition SSIM scores of more than 0.99 and PSNR score of more than 30 dB. Despite these positive results, the permutation problem that commonly effects signal separation occasionally occurred resulting in some cell structure mis-separation (for example, one cell given two nucleoli and the other given none). In addition, when the segmentation was poor from the first stage, the resulting separation was poor.},\n  keywords = {biomedical optical imaging;cellular biophysics;convolutional neural nets;image segmentation;learning (artificial intelligence);medical image processing;under-determined image decomposition;statistical information;convolutional neural networks;image processing;two-stage CNN;overlapped cervical cells;cell images;segment overlapping cells;image segmentation;image separation;decomposition SSIM scores;cell structure mis-separation;deep learning methods;cervical cell cytology images;Beer-Lambert law;Deep learning;Training;Image segmentation;Source separation;Europe;Image decomposition;Convolutional neural networks;Machine learning;deep learning;image segmentation;image separation;translucent overlapped images},\n  doi = {10.23919/Eusipco47968.2020.9287435},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001110.pdf},\n}\n\n
\n
\n\n\n
\n One way to solve under-determined image decomposition is to use statistical information about the type of data to be decomposed. This information can be obtained by a deep learning where convolutional neural networks (CNN) are a subset recently used widely in image processing. In this paper, we have designed a two-stage CNN that takes cytology images of overlapped cervical cells and attempts to separate the cell images. In the first stage, we designed a CNN to segment overlapping cells. In the second stage, we designed a CNN that uses this segmentation and the original image to separate the regions. We implemented a CNN similar to U-Net for image segmentation and implemented a new network for the image separation. To train and test the proposed networks, we simulated 50000 cervical cell cytology images by overlaying individual images of real cervical cells using the Beer-Lambert law. Of these 50000 images, we used 49000 images for training and evaluated the method with 1000 test images. Results on these synthetic images give more than 97% segmentation accuracy and gives decomposition SSIM scores of more than 0.99 and PSNR score of more than 30 dB. Despite these positive results, the permutation problem that commonly effects signal separation occasionally occurred resulting in some cell structure mis-separation (for example, one cell given two nucleoli and the other given none). In addition, when the segmentation was poor from the first stage, the resulting separation was poor.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Foreground-Background Ambient Sound Scene Separation.\n \n \n \n \n\n\n \n Olvera, M.; Vincent, E.; Serizel, R.; and Gasso, G.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 281-285, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Foreground-BackgroundPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287436,\n  author = {M. Olvera and E. Vincent and R. Serizel and G. Gasso},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Foreground-Background Ambient Sound Scene Separation},\n  year = {2020},\n  pages = {281-285},\n  abstract = {Ambient sound scenes typically comprise multiple short events occurring on top of a somewhat stationary background. We consider the task of separating these events from the background, which we call foreground-background ambient sound scene separation. We propose a deep learning-based separation framework with a suitable feature normalization scheme and an optional auxiliary network capturing the background statistics, and we investigate its ability to handle the great variety of sound classes encountered in ambient sound scenes, which have often not been seen in training. To do so, we create single-channel foreground-background mixtures using isolated sounds from the DESED and Audioset datasets, and we conduct extensive experiments with mixtures of seen or unseen sound classes at various signal-to-noise ratios. Our experimental findings demonstrate the generalization ability of the proposed approach.},\n  keywords = {Training;Adaptation models;Protocols;Signal processing;Separation processes;Task analysis;Signal to noise ratio;Audio source separation;ambient sound scenes;generalization ability;deep learning},\n  doi = {10.23919/Eusipco47968.2020.9287436},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000281.pdf},\n}\n\n
\n
\n\n\n
\n Ambient sound scenes typically comprise multiple short events occurring on top of a somewhat stationary background. We consider the task of separating these events from the background, which we call foreground-background ambient sound scene separation. We propose a deep learning-based separation framework with a suitable feature normalization scheme and an optional auxiliary network capturing the background statistics, and we investigate its ability to handle the great variety of sound classes encountered in ambient sound scenes, which have often not been seen in training. To do so, we create single-channel foreground-background mixtures using isolated sounds from the DESED and Audioset datasets, and we conduct extensive experiments with mixtures of seen or unseen sound classes at various signal-to-noise ratios. Our experimental findings demonstrate the generalization ability of the proposed approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n DASP Implementation of Continuous-Time, Finite-Impulse-Response Systems.\n \n \n \n \n\n\n \n Tarczynski, A.; and Darawsheh, H. Y.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2244-2248, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DASPPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287437,\n  author = {A. Tarczynski and H. Y. Darawsheh},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {DASP Implementation of Continuous-Time, Finite-Impulse-Response Systems},\n  year = {2020},\n  pages = {2244-2248},\n  abstract = {Digital Alias-free Signal Processing (DASP) uses random sampling to mitigate aliasing. This paper investigates the use of DASP for realization of continuous-time, linear, time-invariant systems with finite-duration impulse response. We propose a random sampling scheme and suitable processing algorithm to produce an estimator of the target output. The estimator is unbiased, and its variance is guaranteed to converge to zero at least at O(T) rate, where T is the average distance between consecutive sampling instants. If the input signal and system impulse response are piecewise continuous and satisfy some benign conditions, the convergence rate is at least O(T^2). But if they are continuous everywhere, the rate increases to O(T^3).},\n  keywords = {Signal processing algorithms;Europe;Signal processing;Sampling methods;System implementation;Convergence;Testing;alias-free sampling;alias-free digital signal processing;system realization;random sampling;fast convergence},\n  doi = {10.23919/Eusipco47968.2020.9287437},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002244.pdf},\n}\n\n
\n
\n\n\n
\n Digital Alias-free Signal Processing (DASP) uses random sampling to mitigate aliasing. This paper investigates the use of DASP for realization of continuous-time, linear, time-invariant systems with finite-duration impulse response. We propose a random sampling scheme and suitable processing algorithm to produce an estimator of the target output. The estimator is unbiased, and its variance is guaranteed to converge to zero at least at O(T) rate, where T is the average distance between consecutive sampling instants. If the input signal and system impulse response are piecewise continuous and satisfy some benign conditions, the convergence rate is at least O(T^2). But if they are continuous everywhere, the rate increases to O(T^3).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On Two-Dimensional Polynomial Predictors.\n \n \n \n \n\n\n \n Astola, J.; Neuvo, Y.; and Rusu, C.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2254-2258, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287438,\n  author = {J. Astola and Y. Neuvo and C. Rusu},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {On Two-Dimensional Polynomial Predictors},\n  year = {2020},\n  pages = {2254-2258},\n  abstract = {Many signals in nature and engineering systems can be locally modeled as relatively low degree polynomials, thus one-dimensional polynomial predictive filters are useful especially in time-critical systems. The goal of this paper is to introduce the two-dimensional polynomial predictive FIR filters and present few of their properties. First we discuss previous main results in one-dimensional polynomial predictive filters. Then we show how to find the coefficients and the system functions of the minimum area polynomial predictor, and we present the recursive form for the system function of a minimum area polynomial predictor. Finally, we approach the general form of 2D polynomial predictors.},\n  keywords = {Finite impulse response filters;Two dimensional displays;Wind farms;Signal processing;Time measurement;Time factors;Wind forecasting},\n  doi = {10.23919/Eusipco47968.2020.9287438},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002254.pdf},\n}\n\n
\n
\n\n\n
\n Many signals in nature and engineering systems can be locally modeled as relatively low degree polynomials, thus one-dimensional polynomial predictive filters are useful especially in time-critical systems. The goal of this paper is to introduce the two-dimensional polynomial predictive FIR filters and present few of their properties. First we discuss previous main results in one-dimensional polynomial predictive filters. Then we show how to find the coefficients and the system functions of the minimum area polynomial predictor, and we present the recursive form for the system function of a minimum area polynomial predictor. Finally, we approach the general form of 2D polynomial predictors.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fast 2D Phase Retrieval using Bandlimited Masks.\n \n \n \n \n\n\n \n Cordor, C.; Williams, B.; Hristova, Y.; and Viswanathan, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 980-984, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"FastPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287439,\n  author = {C. Cordor and B. Williams and Y. Hristova and A. Viswanathan},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Fast 2D Phase Retrieval using Bandlimited Masks},\n  year = {2020},\n  pages = {980-984},\n  abstract = {We propose a new phase retrieval algorithm for recovering 2D discrete signals from the squared magnitudes of their short-time Fourier transform measurements. The algorithm works by efficiently inverting (in FFT-time) a Fourier-based, physics-driven, and highly structured linear system to obtain relative phase information. The missing phases are subsequently recovered through the use of an eigenvector-based angular synchronization procedure. In addition to providing a deterministic measurement mask construction, the efficiency and robustness of the proposed method are demonstrated through numerical experiments.},\n  keywords = {eigenvalues and eigenfunctions;fast Fourier transforms;iterative methods;synchronisation;squared magnitudes;short-time Fourier transform measurements;algorithm works;FFT-time;Fourier-based algorithm;linear system;relative phase information;missing phases;deterministic measurement mask construction;fast 2D phase retrieval;bandlimited masks;phase retrieval algorithm;2D discrete signals;eigenvector-based angular synchronization procedure;Linear systems;Phase measurement;Two dimensional displays;Signal processing algorithms;Signal processing;Robustness;Synchronization;phaseless imaging;phase retrieval;bandlimited masks;ptychography;angular synchronization},\n  doi = {10.23919/Eusipco47968.2020.9287439},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000980.pdf},\n}\n\n
\n
\n\n\n
\n We propose a new phase retrieval algorithm for recovering 2D discrete signals from the squared magnitudes of their short-time Fourier transform measurements. The algorithm works by efficiently inverting (in FFT-time) a Fourier-based, physics-driven, and highly structured linear system to obtain relative phase information. The missing phases are subsequently recovered through the use of an eigenvector-based angular synchronization procedure. In addition to providing a deterministic measurement mask construction, the efficiency and robustness of the proposed method are demonstrated through numerical experiments.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Exploring State Transition Uncertainty in Variational Reinforcement Learning.\n \n \n \n \n\n\n \n Chien, J. -.; Liao, W. -.; and El Naqa, I.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1527-1531, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ExploringPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287440,\n  author = {J. -T. Chien and W. -L. Liao and I. {El Naqa}},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Exploring State Transition Uncertainty in Variational Reinforcement Learning},\n  year = {2020},\n  pages = {1527-1531},\n  abstract = {Model-free agent in reinforcement learning (RL) generally performs well but inefficient in training process with sparse data. A practical solution is to incorporate a model-based module in model-free agent. State transition can be learned to make desirable prediction of next state based on current state and action at each time step. This paper presents a new learning representation for variational RL by introducing the so-called transition uncertainty critic based on the variational encoder-decoder network where the uncertainty of structured state transition is encoded in a model-based agent. In particular, an action-gating mechanism is carried out to learn and decode the trajectory of actions and state transitions in latent variable space. The transition uncertainty maximizing exploration (TUME) is performed according to the entropy search by using the intrinsic reward based on the uncertainty measure corresponding to different states and actions. A dedicate latent variable model with a penalty using the bias of state-action value is developed. Experiments on Cart Pole and dialogue system show that the proposed TUME considerably performs better than the other exploration methods for reinforcement learning.},\n  keywords = {Training;Uncertainty;Reinforcement learning;Signal processing;Entropy;Trajectory;Task analysis;machine learning;reward optimization},\n  doi = {10.23919/Eusipco47968.2020.9287440},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001527.pdf},\n}\n\n
\n
\n\n\n
\n Model-free agent in reinforcement learning (RL) generally performs well but inefficient in training process with sparse data. A practical solution is to incorporate a model-based module in model-free agent. State transition can be learned to make desirable prediction of next state based on current state and action at each time step. This paper presents a new learning representation for variational RL by introducing the so-called transition uncertainty critic based on the variational encoder-decoder network where the uncertainty of structured state transition is encoded in a model-based agent. In particular, an action-gating mechanism is carried out to learn and decode the trajectory of actions and state transitions in latent variable space. The transition uncertainty maximizing exploration (TUME) is performed according to the entropy search by using the intrinsic reward based on the uncertainty measure corresponding to different states and actions. A dedicate latent variable model with a penalty using the bias of state-action value is developed. Experiments on Cart Pole and dialogue system show that the proposed TUME considerably performs better than the other exploration methods for reinforcement learning.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributed Learning with Non-Smooth Objective Functions.\n \n \n \n \n\n\n \n Gratton, C.; Venkategowda, N. K. D.; Arablouei, R.; and Werner, S.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2180-2184, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DistributedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287441,\n  author = {C. Gratton and N. K. D. Venkategowda and R. Arablouei and S. Werner},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Distributed Learning with Non-Smooth Objective Functions},\n  year = {2020},\n  pages = {2180-2184},\n  abstract = {We develop a new distributed algorithm to solve a learning problem with non-smooth objective functions when data are distributed over a multi-agent network. We employ a zeroth-order method to minimize the associated augmented Lagrangian in the primal domain using the alternating direction method of multipliers (ADMM) to develop the proposed algorithm, named distributed zeroth-order based ADMM (D-ZOA). Unlike most existing algorithms for non-smooth optimization, which rely on calculating subgradients or proximal operators, D-ZOA only requires function values to approximate gradients of the objective function. Convergence of D-ZOA to the centralized solution is confirmed via theoretical analysis and simulation results.},\n  keywords = {Simulation;Signal processing algorithms;Linear programming;Approximation algorithms;Convex functions;Optimization;Convergence},\n  doi = {10.23919/Eusipco47968.2020.9287441},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002180.pdf},\n}\n\n
\n
\n\n\n
\n We develop a new distributed algorithm to solve a learning problem with non-smooth objective functions when data are distributed over a multi-agent network. We employ a zeroth-order method to minimize the associated augmented Lagrangian in the primal domain using the alternating direction method of multipliers (ADMM) to develop the proposed algorithm, named distributed zeroth-order based ADMM (D-ZOA). Unlike most existing algorithms for non-smooth optimization, which rely on calculating subgradients or proximal operators, D-ZOA only requires function values to approximate gradients of the objective function. Convergence of D-ZOA to the centralized solution is confirmed via theoretical analysis and simulation results.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Bayesian Fusion of Multiview Human Crowd Detections for Autonomous UAV Fleet Safety.\n \n \n \n \n\n\n \n Kakaletsis, E.; Mademlis, I.; Nikolaidis, N.; and Pitas, I.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2473-2477, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"BayesianPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287442,\n  author = {E. Kakaletsis and I. Mademlis and N. Nikolaidis and I. Pitas},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Bayesian Fusion of Multiview Human Crowd Detections for Autonomous UAV Fleet Safety},\n  year = {2020},\n  pages = {2473-2477},\n  abstract = {In this paper, a Bayesian method for fusing multiple visual human crowd detections (in the form of heatmaps) under an autonomous UAV fleet deployment setting is proposed, aiming at enhanced vision-assisted human crowd avoidance in line with common UAV safety regulations. 2D crowd heatmaps are derived using deep neural human crowd detectors on multiple UAV camera streams covering the same large-scale area over time (e.g., when each drone tracks a different target). Then, these heatmaps are back-projected onto the 3D terrain of the navigation environment. The projected crowd heatmaps are fused by exploiting a Bayesian filtering approach that favors newer crowd observations over older ones. Thus, during flight, an area is marked as crowded (therefore, a no-fly zone) if all, or most, UAV-mounted visual detectors have recently and confidently indicated crowd existence on it. Empirical evaluation on synthetic multiview video sequences depicting human crowds in outdoor environments verifies the efficiency of the proposed method against the no-fusion case.},\n  keywords = {Heating systems;Visualization;Three-dimensional displays;Video sequences;Detectors;Bayes methods;Safety;Multiview;Crowd Detection;Bayesian fusion;Linear Opinion Pool;Drone Safety},\n  doi = {10.23919/Eusipco47968.2020.9287442},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002473.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, a Bayesian method for fusing multiple visual human crowd detections (in the form of heatmaps) under an autonomous UAV fleet deployment setting is proposed, aiming at enhanced vision-assisted human crowd avoidance in line with common UAV safety regulations. 2D crowd heatmaps are derived using deep neural human crowd detectors on multiple UAV camera streams covering the same large-scale area over time (e.g., when each drone tracks a different target). Then, these heatmaps are back-projected onto the 3D terrain of the navigation environment. The projected crowd heatmaps are fused by exploiting a Bayesian filtering approach that favors newer crowd observations over older ones. Thus, during flight, an area is marked as crowded (therefore, a no-fly zone) if all, or most, UAV-mounted visual detectors have recently and confidently indicated crowd existence on it. Empirical evaluation on synthetic multiview video sequences depicting human crowds in outdoor environments verifies the efficiency of the proposed method against the no-fusion case.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Methodology for the Estimation of Propagation Speed of Longitudinal Waves in Tone Wood.\n \n \n \n \n\n\n \n Villa, L.; Pezzoli, M.; Antonacci, F.; and Sarti, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 66-70, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287443,\n  author = {L. Villa and M. Pezzoli and F. Antonacci and A. Sarti},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A Methodology for the Estimation of Propagation Speed of Longitudinal Waves in Tone Wood},\n  year = {2020},\n  pages = {66-70},\n  abstract = {In this paper we propose a methodology for the estimation of the longitudinal wave velocity in tone wood. Differently from techniques adopted in the field of luthiery, the proposed estimation method does not require neither specific user skill nor expensive instrumentation. The introduced method exploits the impulse response of the wood block, acquired by means of accelerometers. The measured signals are processed in order to compute an estimate of the longitudinal wave velocity of the tone wood in a rake receiver fashion. We tested the technique both on synthetic data and measurements of actual tone wood blocks, showing the effectiveness of the proposed solution with respect to state-of-the-art methods.},\n  keywords = {Accelerometers;Estimation;Time measurement;Reflection;Velocity measurement;Multipath channels;Material properties;Material properties estimation;velocity measurement;rake receivers;tone wood;matched field processing},\n  doi = {10.23919/Eusipco47968.2020.9287443},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000066.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we propose a methodology for the estimation of the longitudinal wave velocity in tone wood. Differently from techniques adopted in the field of luthiery, the proposed estimation method does not require neither specific user skill nor expensive instrumentation. The introduced method exploits the impulse response of the wood block, acquired by means of accelerometers. The measured signals are processed in order to compute an estimate of the longitudinal wave velocity of the tone wood in a rake receiver fashion. We tested the technique both on synthetic data and measurements of actual tone wood blocks, showing the effectiveness of the proposed solution with respect to state-of-the-art methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Computational Approach to Track Beats in Improvisational Music Performance.\n \n \n \n \n\n\n \n Xie, X.; Houghtaling, J.; Foubert, K.; and van Waterschoot , T.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 166-170, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ComputationalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287444,\n  author = {X. Xie and J. Houghtaling and K. Foubert and T. {van Waterschoot}},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Computational Approach to Track Beats in Improvisational Music Performance},\n  year = {2020},\n  pages = {166-170},\n  abstract = {Beat tracking, or identifying the temporal locations of beats in a musical recording, has a variety of applications that range from music information retrieval to machine listening. Algorithms designed to monitor the tempo of a musical recording have thus far been optimized for music with relatively stable rhythms, repetitive structures, and consistent melodies; these algorithms typically struggle to follow the free-form nature of improvisational music. Here, we present a multi-agent improvisation beat tracker (MAIBT) that addresses the challenges posed by improvisations and compare its performance with other state-of-the-art methods on a unique data set collected during improvisational music therapy sessions. This algorithm is designed for MIDI files and proceeds in four stages: (1) preprocessing to remove notes that are timid and overlapping, (2) clustering of the remaining notes and subsequent ranking of the clusters, (3) agent initialization and performance-based selection, and (4) artificial beat insertion and deletion to fill remaining beat gaps and create a comprehensive beat sequence. This particular method performs better than other generic beat-tracking approaches for music that lacks regularity; it is thus well suited to applications where unpredictability and inaccuracy are predominant, such as in music therapy improvisation.},\n  keywords = {Protocols;Statistical analysis;Medical treatment;Signal processing algorithms;Signal processing;Rhythm;Optimization;Beat Tracking;Improvisation;Multi-Agent;MIDI Data;Music Therapy},\n  doi = {10.23919/Eusipco47968.2020.9287444},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000166.pdf},\n}\n\n
\n
\n\n\n
\n Beat tracking, or identifying the temporal locations of beats in a musical recording, has a variety of applications that range from music information retrieval to machine listening. Algorithms designed to monitor the tempo of a musical recording have thus far been optimized for music with relatively stable rhythms, repetitive structures, and consistent melodies; these algorithms typically struggle to follow the free-form nature of improvisational music. Here, we present a multi-agent improvisation beat tracker (MAIBT) that addresses the challenges posed by improvisations and compare its performance with other state-of-the-art methods on a unique data set collected during improvisational music therapy sessions. This algorithm is designed for MIDI files and proceeds in four stages: (1) preprocessing to remove notes that are timid and overlapping, (2) clustering of the remaining notes and subsequent ranking of the clusters, (3) agent initialization and performance-based selection, and (4) artificial beat insertion and deletion to fill remaining beat gaps and create a comprehensive beat sequence. This particular method performs better than other generic beat-tracking approaches for music that lacks regularity; it is thus well suited to applications where unpredictability and inaccuracy are predominant, such as in music therapy improvisation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Adaptation in Online Social Learning.\n \n \n \n \n\n\n \n Bordignon, V.; Matta, V.; and Sayed, A. H.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2170-2174, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AdaptationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287445,\n  author = {V. Bordignon and V. Matta and A. H. Sayed},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Adaptation in Online Social Learning},\n  year = {2020},\n  pages = {2170-2174},\n  abstract = {This work studies social learning under non-stationary conditions. Although designed for online inference, traditional social learning algorithms perform poorly under drifting conditions. To mitigate this drawback, we propose the Adaptive Social Learning (ASL) strategy. This strategy lever-ages an adaptive Bayesian update, where the adaptation degree can be modulated by tuning a suitable step-size parameter. The learning performance of the ASL algorithm is examined by means of a steady-state analysis. It is shown that, under the regime of small step-sizes: i) consistent learning is possible; ii) and an accurate prediction of the performance can be furnished in terms of a Gaussian approximation.},\n  keywords = {Signal processing algorithms;Approximation algorithms;Prediction algorithms;Inference algorithms;Steady-state;Random variables;Tuning;Social learning;Bayesian update;adaptive learning;diffusion strategy},\n  doi = {10.23919/Eusipco47968.2020.9287445},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002170.pdf},\n}\n\n
\n
\n\n\n
\n This work studies social learning under non-stationary conditions. Although designed for online inference, traditional social learning algorithms perform poorly under drifting conditions. To mitigate this drawback, we propose the Adaptive Social Learning (ASL) strategy. This strategy lever-ages an adaptive Bayesian update, where the adaptation degree can be modulated by tuning a suitable step-size parameter. The learning performance of the ASL algorithm is examined by means of a steady-state analysis. It is shown that, under the regime of small step-sizes: i) consistent learning is possible; ii) and an accurate prediction of the performance can be furnished in terms of a Gaussian approximation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Emotional Response Analysis Using Electrodermal Activity, Electrocardiogram and Eye Tracking Signals in Drivers With Various Car Setups.\n \n \n \n \n\n\n \n Zontone, P.; Affanni, A.; Bernardini, R.; Del Linz, L.; Piras, A.; and Rinaldo, R.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1160-1164, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"EmotionalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287446,\n  author = {P. Zontone and A. Affanni and R. Bernardini and L. {Del Linz} and A. Piras and R. Rinaldo},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Emotional Response Analysis Using Electrodermal Activity, Electrocardiogram and Eye Tracking Signals in Drivers With Various Car Setups},\n  year = {2020},\n  pages = {1160-1164},\n  abstract = {In the automotive industry, it is important to evaluate different car setups in order to match a professional driver's preference or to match the most acceptable setup for most drivers. Therefore, it is of great significance to devise objective and automatic procedures to assess a driver's response to different car settings. In this work, we analyze different physiological signals in order to evaluate how a particular car setup can be more or less stressful than others. In detail, we record an endosomatic Electrodermal Activity (EDA) signal, called Skin Potential Response (SPR), the Electrocardiogram (ECG) signal, and eye tracking coordinates. We eliminate motion artifacts by processing two SPR signals, one from each hand of the driver. Tests are carried out in a company that designs driving simulators, where the tested individuals had to drive along a straight highway with several lane changes. Three different car setups have been tested (neutral, understeering, and oversteering). We apply a statistical test to the data extracted from the cleaned SPR signal, and we then compare the results with the ones obtained using a Machine Learning algorithm. We show that we are able to discriminate the drivers' response to each setup, and, in particular, that the base car setup generates the least intense emotional response when compared to the understeering and the oversteering car setups.},\n  keywords = {automobiles;driver information systems;electrocardiography;emotion recognition;feature extraction;human computer interaction;human factors;learning (artificial intelligence);medical signal processing;psychology;skin;emotional response analysis;intense emotional response;base car setup;cleaned SPR signal;SPR signals;eye tracking coordinates;electrocardiogram signal;skin potential response;endosomatic electrodermal activity signal;physiological signals;car settings;automatic procedures;objective procedures;acceptable setup;professional driver;eye tracking signals;oversteering car setups;Machine learning algorithms;Gaze tracking;Skin;Physiology;Emotional responses;Automobiles;Vehicles;Skin Potential Response;Electrocardiogram;Eye Tracking;Supervised Machine Learning Algorithm;Stress Detection},\n  doi = {10.23919/Eusipco47968.2020.9287446},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001160.pdf},\n}\n\n
\n
\n\n\n
\n In the automotive industry, it is important to evaluate different car setups in order to match a professional driver's preference or to match the most acceptable setup for most drivers. Therefore, it is of great significance to devise objective and automatic procedures to assess a driver's response to different car settings. In this work, we analyze different physiological signals in order to evaluate how a particular car setup can be more or less stressful than others. In detail, we record an endosomatic Electrodermal Activity (EDA) signal, called Skin Potential Response (SPR), the Electrocardiogram (ECG) signal, and eye tracking coordinates. We eliminate motion artifacts by processing two SPR signals, one from each hand of the driver. Tests are carried out in a company that designs driving simulators, where the tested individuals had to drive along a straight highway with several lane changes. Three different car setups have been tested (neutral, understeering, and oversteering). We apply a statistical test to the data extracted from the cleaned SPR signal, and we then compare the results with the ones obtained using a Machine Learning algorithm. We show that we are able to discriminate the drivers' response to each setup, and, in particular, that the base car setup generates the least intense emotional response when compared to the understeering and the oversteering car setups.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Autoregulatory Efficiency Assessment in Kidneys Using Deep Learning.\n \n \n \n \n\n\n \n Alphonse, S.; Polichnowski, A. J.; Griffin, K. A.; Bidani, A. K.; and Williamson, G. A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1165-1169, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AutoregulatoryPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287447,\n  author = {S. Alphonse and A. J. Polichnowski and K. A. Griffin and A. K. Bidani and G. A. Williamson},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Autoregulatory Efficiency Assessment in Kidneys Using Deep Learning},\n  year = {2020},\n  pages = {1165-1169},\n  abstract = {A convolutional deep neural network is employed to assess renal autoregulation using time series of arterial blood pressure and blood flow rate measurements in conscious rats. The network is trained using representative data samples from rats with intact autoregulation and rats whose autoregulation is impaired by the calcium channel blocker amlodipine. Network performance is evaluated using test data of the types used for training, but also with data from other models for autoregulatory impairment, including different calcium channel blockers and also renal mass reduction. The network is shown to provide effective classification for impairments from calcium channel blockers. However, the assessment of autoregulation when impaired by renal mass reduction was not as clear, evidencing a different signature in the hemodynamic data for that impairment model. When calcium channel blockers were given to those animals, however, the classification again was effective.},\n  keywords = {blood flow measurement;blood pressure measurement;blood vessels;convolutional neural nets;drugs;haemodynamics;kidney;learning (artificial intelligence);autoregulatory efficiency assessment;kidneys;deep learning;convolutional deep neural network;renal autoregulation;time series;arterial blood pressure measurements;blood flow rate measurements;conscious rats;representative data samples;intact autoregulation;calcium channel blocker amlodipine;autoregulatory impairment;calcium channel blockers;renal mass reduction;hemodynamic data;impairment model;Training;Calcium;Time series analysis;Rats;Data models;Time measurement;Pressure measurement;machine learning;neural networks;biomedical signal processing;physiology;nephrology},\n  doi = {10.23919/Eusipco47968.2020.9287447},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001165.pdf},\n}\n\n
\n
\n\n\n
\n A convolutional deep neural network is employed to assess renal autoregulation using time series of arterial blood pressure and blood flow rate measurements in conscious rats. The network is trained using representative data samples from rats with intact autoregulation and rats whose autoregulation is impaired by the calcium channel blocker amlodipine. Network performance is evaluated using test data of the types used for training, but also with data from other models for autoregulatory impairment, including different calcium channel blockers and also renal mass reduction. The network is shown to provide effective classification for impairments from calcium channel blockers. However, the assessment of autoregulation when impaired by renal mass reduction was not as clear, evidencing a different signature in the hemodynamic data for that impairment model. When calcium channel blockers were given to those animals, however, the classification again was effective.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Taco-VC: A Single Speaker Tacotron based Voice Conversion with Limited Data.\n \n \n \n \n\n\n \n Levy-Leshem, R.; and Giryes, R.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 391-395, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Taco-VC:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287448,\n  author = {R. Levy-Leshem and R. Giryes},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Taco-VC: A Single Speaker Tacotron based Voice Conversion with Limited Data},\n  year = {2020},\n  pages = {391-395},\n  abstract = {This paper introduces Taco-VC, a novel architecture for voice conversion based on Tacotron synthesizer, which is a sequence-to-sequence with attention model. The training of multi-speaker voice conversion systems requires a large number of resources, both in training and corpus size. Taco-VC is implemented using a single speaker Tacotron synthesizer based on Phonetic PosteriorGrams (PPGs) and a single speaker WaveNet vocoder conditioned on mel spectrograms. To enhance the converted speech quality, and to overcome over-smoothing, the outputs of Tacotron are passed through a novel speech-enhancement network, which is composed of a combination of the phoneme recognition and Tacotron networks. Our system is trained just with a single speaker corpus and adapts to new speakers using only a few minutes of training data. Using mid-size public datasets, our method outperforms the baseline in the VCC 2018 SPOKE non-parallel voice conversion task and achieves competitive results compared to multi-speaker networks trained on large private datasets.},\n  keywords = {Training;Synthesizers;Vocoders;Noise reduction;Speech recognition;Speech enhancement;Task analysis;Voice Conversion;Speech Recognition;Speech Synthesis;Adaptation},\n  doi = {10.23919/Eusipco47968.2020.9287448},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000391.pdf},\n}\n\n
\n
\n\n\n
\n This paper introduces Taco-VC, a novel architecture for voice conversion based on Tacotron synthesizer, which is a sequence-to-sequence with attention model. The training of multi-speaker voice conversion systems requires a large number of resources, both in training and corpus size. Taco-VC is implemented using a single speaker Tacotron synthesizer based on Phonetic PosteriorGrams (PPGs) and a single speaker WaveNet vocoder conditioned on mel spectrograms. To enhance the converted speech quality, and to overcome over-smoothing, the outputs of Tacotron are passed through a novel speech-enhancement network, which is composed of a combination of the phoneme recognition and Tacotron networks. Our system is trained just with a single speaker corpus and adapts to new speakers using only a few minutes of training data. Using mid-size public datasets, our method outperforms the baseline in the VCC 2018 SPOKE non-parallel voice conversion task and achieves competitive results compared to multi-speaker networks trained on large private datasets.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Toward the Wave Digital Real-Time Emulation of Audio Circuits with Multiple Nonlinearities.\n \n \n \n \n\n\n \n Proverbio, A.; Bernardini, A.; and Sarti, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 151-155, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"TowardPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287449,\n  author = {A. Proverbio and A. Bernardini and A. Sarti},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Toward the Wave Digital Real-Time Emulation of Audio Circuits with Multiple Nonlinearities},\n  year = {2020},\n  pages = {151-155},\n  abstract = {Over the past two decades, Wave Digital Filters have been extensively used in the fields of sound synthesis through physical modeling and Virtual Analog modeling to emulate audio circuits in an efficient and modular fashion. However, as far as the implementation of circuits with multiple nonlinearities is concerned, much research effort is still needed in order to develop systematic strategies for solving the corresponding multivariate systems of implicit equations with low computational requirements. In this regard, this paper discusses the computational cost of the Scattering Iterative Method (SIM), a recently proposed iterative fixed-point algorithm that works in the Wave Digital domain and it is capable of handling circuits with J one-port nonlinearities using J separate local solvers. In the light of the computational cost analysis, we also propose a refinement of SIM relying on the so called Dynamic Scattering Matrix Recomputation (DSR) procedure. The DSR procedure significantly improves the performance of the algorithm, paving the way toward Virtual Analog applications in which SIM-based audio plugins emulating nonlinear circuits run in real-time.},\n  keywords = {Heuristic algorithms;Scattering;Signal processing algorithms;Real-time systems;Computational efficiency;Mathematical model;Digital filters;Digital Audio Signal Processing;Nonlinear Audio Circuits;Virtual Analog Modeling;Wave Digital Filters},\n  doi = {10.23919/Eusipco47968.2020.9287449},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000151.pdf},\n}\n\n
\n
\n\n\n
\n Over the past two decades, Wave Digital Filters have been extensively used in the fields of sound synthesis through physical modeling and Virtual Analog modeling to emulate audio circuits in an efficient and modular fashion. However, as far as the implementation of circuits with multiple nonlinearities is concerned, much research effort is still needed in order to develop systematic strategies for solving the corresponding multivariate systems of implicit equations with low computational requirements. In this regard, this paper discusses the computational cost of the Scattering Iterative Method (SIM), a recently proposed iterative fixed-point algorithm that works in the Wave Digital domain and it is capable of handling circuits with J one-port nonlinearities using J separate local solvers. In the light of the computational cost analysis, we also propose a refinement of SIM relying on the so called Dynamic Scattering Matrix Recomputation (DSR) procedure. The DSR procedure significantly improves the performance of the algorithm, paving the way toward Virtual Analog applications in which SIM-based audio plugins emulating nonlinear circuits run in real-time.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Rate Region of the K-user MIMO Interference Channel with Imperfect Transmitters.\n \n \n \n \n\n\n \n Soleymani, M.; Santamaria, I.; Maham, B.; and Schreier, P. J.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1638-1642, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"RatePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287450,\n  author = {M. Soleymani and I. Santamaria and B. Maham and P. J. Schreier},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Rate Region of the K-user MIMO Interference Channel with Imperfect Transmitters},\n  year = {2020},\n  pages = {1638-1642},\n  abstract = {This paper studies the rate region of a multiple-input, multiple-output (MIMO) system with imperfect transmitters when interference is treated as noise at the receiver side. We consider a K-user MIMO interference channel (IC) in which the transmitters suffer from an additive hardware distortion (HWD) modeled as spatially uncorrelated Gaussian noise with covariance matrix proportional to the transmit covariance matrix. We employ the difference of convex programming (DCP) technique to solve the rate-region optimization problem and obtain its stationary points. Our proposed HWD-aware algorithm outperforms the HWD-unaware design that disregards HWD. Our results show that the performance of the K-user MIMO IC is highly affected by HWD, especially in high signal-to-noise-ratio scenarios.},\n  keywords = {MIMO communication;Optimization;Additives;Covariance matrices;Performance evaluation;Hardware;Wireless communication;Achievable rate region;additive hardware distortions;difference of convex programming;interference channel;MIMO systems},\n  doi = {10.23919/Eusipco47968.2020.9287450},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001638.pdf},\n}\n\n
\n
\n\n\n
\n This paper studies the rate region of a multiple-input, multiple-output (MIMO) system with imperfect transmitters when interference is treated as noise at the receiver side. We consider a K-user MIMO interference channel (IC) in which the transmitters suffer from an additive hardware distortion (HWD) modeled as spatially uncorrelated Gaussian noise with covariance matrix proportional to the transmit covariance matrix. We employ the difference of convex programming (DCP) technique to solve the rate-region optimization problem and obtain its stationary points. Our proposed HWD-aware algorithm outperforms the HWD-unaware design that disregards HWD. Our results show that the performance of the K-user MIMO IC is highly affected by HWD, especially in high signal-to-noise-ratio scenarios.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n PRNU-leaks: facts and remedies.\n \n \n \n \n\n\n \n Pérez-González, F.; and Fernández-Menduiña, S.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 720-724, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"PRNU-leaks:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287451,\n  author = {F. Pérez-González and S. Fernández-Menduiña},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {PRNU-leaks: facts and remedies},\n  year = {2020},\n  pages = {720-724},\n  abstract = {We address the problem of information leakage from estimates of the PhotoResponse Non-Uniformity (PRNU) fingerprints of a sensor. This leakage may compromise privacy in forensic scenarios, as it may reveal information from the images used in the PRNU estimation. We propose a new way to compute the information-theoretic leakage that is based on embedding synthetic PRNUs, and presesent affordable approximations and bounds. We also propose a new compact measure for the performance in membership inference tests. Finally, we analyze two potential countermeasures against leakage: binarization, which was already used in PRNU-storage contexts, and equalization, which is novel and offers better performance. Theoretical results are validated with experiments carried out on a real-world image dataset.},\n  keywords = {Degradation;Privacy;Forensics;Europe;Estimation;Fingerprint recognition;Signal processing;Fingerprint;PRNU;Leakage;Information theory;Membership inference},\n  doi = {10.23919/Eusipco47968.2020.9287451},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000720.pdf},\n}\n\n
\n
\n\n\n
\n We address the problem of information leakage from estimates of the PhotoResponse Non-Uniformity (PRNU) fingerprints of a sensor. This leakage may compromise privacy in forensic scenarios, as it may reveal information from the images used in the PRNU estimation. We propose a new way to compute the information-theoretic leakage that is based on embedding synthetic PRNUs, and presesent affordable approximations and bounds. We also propose a new compact measure for the performance in membership inference tests. Finally, we analyze two potential countermeasures against leakage: binarization, which was already used in PRNU-storage contexts, and equalization, which is novel and offers better performance. Theoretical results are validated with experiments carried out on a real-world image dataset.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Noninvasive Assessment of Spatio-Temporal Recurrence in Atrial Fibrillation.\n \n \n \n \n\n\n \n Bonizzi, P.; Zeemering, S.; van Rosmalen , F.; Schotten, U.; and Karel, J.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 900-904, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"NoninvasivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287452,\n  author = {P. Bonizzi and S. Zeemering and F. {van Rosmalen} and U. Schotten and J. Karel},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Noninvasive Assessment of Spatio-Temporal Recurrence in Atrial Fibrillation},\n  year = {2020},\n  pages = {900-904},\n  abstract = {Propagation of Atrial Activity during atrial fibrillation (AF) is a complex phenomenon characterized by a certain degree of recurrence (periodic repetition). In this study, we investigated the possibility to detect recurrence noninvasively from body surface potential map recordings in patients affected by persistent AF, and localize this recurrence both in time and space. Results showed that clusters of recurrence can be identified from body surface recordings in these patients. Moreover, the number of clusters detected and their location on the top-right of the back of the torso were significantly associated with AF recurrence 4 to 6 weeks after electrical cardioversion. This suggests that noninvasive quantification of recurrence in persistent AF patients is possible, and may contribute to improve patient stratification.},\n  keywords = {diseases;electrocardiography;medical signal processing;surface potential;noninvasive assessment;spatio-temporal recurrence;atrial fibrillation;atrial activity;periodic repetition;body surface potential map recordings;body surface recordings;AF recurrence;noninvasive quantification;persistent AF patients;time 4.0 week to 6.0 week;Torso;Atrial fibrillation;Europe;Signal processing;Complexity theory;Substrates;Atrial fibrillation;Noninvasive;Recurrence analysis;AF substrate complexity},\n  doi = {10.23919/Eusipco47968.2020.9287452},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000900.pdf},\n}\n\n
\n
\n\n\n
\n Propagation of Atrial Activity during atrial fibrillation (AF) is a complex phenomenon characterized by a certain degree of recurrence (periodic repetition). In this study, we investigated the possibility to detect recurrence noninvasively from body surface potential map recordings in patients affected by persistent AF, and localize this recurrence both in time and space. Results showed that clusters of recurrence can be identified from body surface recordings in these patients. Moreover, the number of clusters detected and their location on the top-right of the back of the torso were significantly associated with AF recurrence 4 to 6 weeks after electrical cardioversion. This suggests that noninvasive quantification of recurrence in persistent AF patients is possible, and may contribute to improve patient stratification.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fast VP9-to-AV1 Transcoding based on Block Partitioning Inheritance.\n \n \n \n \n\n\n \n Borges, A.; Palomino, D.; Zatt, B.; Porto, M.; and Correa, G.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 555-559, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"FastPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287453,\n  author = {A. Borges and D. Palomino and B. Zatt and M. Porto and G. Correa},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Fast VP9-to-AV1 Transcoding based on Block Partitioning Inheritance},\n  year = {2020},\n  pages = {555-559},\n  abstract = {This paper proposes a fast VP9-to-AV1 video transcoding algorithm based on block partitioning inheritance. The proposed algorithm relies on the reuse of VP9 block partitioning during the AV1 re-encoding process. This way, the exhaustive search for the best block size option is avoided to save encoding time. The reuse of VP9 block partitioning is proposed based on a statiscal analysis that shows the relation of block parititioning sizes between VP9 and AV1. The analysis demontrates that there is a high probability of the AV1 encoding process to choose block sizes of the same size as in the VP9 encoding. Experimental results show that the proposed algorithm is able to accelerate the VP9-to-AV1 transcoding process by 28% on average at the cost of only 4% increase in the BD-Rate when compared with the complete decoding and reencoding process.},\n  keywords = {Signal processing algorithms;Transcoding;Companies;Streaming media;Partitioning algorithms;Decoding;Acceleration;AV1;VP9;transcoding;video coding},\n  doi = {10.23919/Eusipco47968.2020.9287453},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000555.pdf},\n}\n\n
\n
\n\n\n
\n This paper proposes a fast VP9-to-AV1 video transcoding algorithm based on block partitioning inheritance. The proposed algorithm relies on the reuse of VP9 block partitioning during the AV1 re-encoding process. This way, the exhaustive search for the best block size option is avoided to save encoding time. The reuse of VP9 block partitioning is proposed based on a statiscal analysis that shows the relation of block parititioning sizes between VP9 and AV1. The analysis demontrates that there is a high probability of the AV1 encoding process to choose block sizes of the same size as in the VP9 encoding. Experimental results show that the proposed algorithm is able to accelerate the VP9-to-AV1 transcoding process by 28% on average at the cost of only 4% increase in the BD-Rate when compared with the complete decoding and reencoding process.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Hardware Architecture for Ultra-Wideband Channel Impulse Response Measurements Using Compressed Sensing.\n \n \n \n \n\n\n \n Wagner, C. W.; Semper, S.; Römer, F.; Schönfeld, A.; and Del Galdo, G.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1663-1667, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"HardwarePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287454,\n  author = {C. W. Wagner and S. Semper and F. Römer and A. Schönfeld and G. {Del Galdo}},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Hardware Architecture for Ultra-Wideband Channel Impulse Response Measurements Using Compressed Sensing},\n  year = {2020},\n  pages = {1663-1667},\n  abstract = {We propose a compact hardware architecture for measuring sparse channel impulse responses (IR) by extending the M-Sequence ultra-wideband (UWB) measurement principle with the concept of compressed sensing. A channel is excited with a periodic M-sequence and its response signal is observed using a Random Demodulator (RD), which observes pseudo-random linear combinations of the response signal at a rate significantly lower than the measurement bandwidth. The excitation signal and the RD mixing signal are generated from compactly implementable Linear Feedback Shift registers (LFSR) and operated from a common clock. A linear model is derived that allows retrieving an IR from a set of observations using Sparse-Signal-Recovery (SSR). A Matrix-free model implementation is possible due to the choice of synchronous LFSRs as signal generators, resulting in low computational complexity. For validation, real measurement data of a time-variant channel containing multipath components is processed by simulation models of our proposed architecture and the classic M-Sequence method. We show successful IR recovery using our architecture and SSR, outperforming the classic method significantly in terms of IR measurement rate. Compared to the classic method, the proposed architecture allows faster measurements of sparse time-varying channels, resulting in higher Doppler tolerance without increasing hardware or data stream complexity.},\n  keywords = {Computational modeling;Computer architecture;Hardware;Data models;Doppler effect;Integrated circuit modeling;Ultra wideband technology;Hardware Architecture;Ultra-Wideband;Impulse Response Measurement;Random Demodulator;Compressed Sensing},\n  doi = {10.23919/Eusipco47968.2020.9287454},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001663.pdf},\n}\n\n
\n
\n\n\n
\n We propose a compact hardware architecture for measuring sparse channel impulse responses (IR) by extending the M-Sequence ultra-wideband (UWB) measurement principle with the concept of compressed sensing. A channel is excited with a periodic M-sequence and its response signal is observed using a Random Demodulator (RD), which observes pseudo-random linear combinations of the response signal at a rate significantly lower than the measurement bandwidth. The excitation signal and the RD mixing signal are generated from compactly implementable Linear Feedback Shift registers (LFSR) and operated from a common clock. A linear model is derived that allows retrieving an IR from a set of observations using Sparse-Signal-Recovery (SSR). A Matrix-free model implementation is possible due to the choice of synchronous LFSRs as signal generators, resulting in low computational complexity. For validation, real measurement data of a time-variant channel containing multipath components is processed by simulation models of our proposed architecture and the classic M-Sequence method. We show successful IR recovery using our architecture and SSR, outperforming the classic method significantly in terms of IR measurement rate. Compared to the classic method, the proposed architecture allows faster measurements of sparse time-varying channels, resulting in higher Doppler tolerance without increasing hardware or data stream complexity.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Near-field source localization of quasi-stationary signals with increased degrees of freedom.\n \n \n \n \n\n\n \n Pan, J.; Sun, M.; Wang, Y.; and Zhang, X.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1777-1781, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Near-fieldPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287455,\n  author = {J. Pan and M. Sun and Y. Wang and X. Zhang},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Near-field source localization of quasi-stationary signals with increased degrees of freedom},\n  year = {2020},\n  pages = {1777-1781},\n  abstract = {Near-field source localization has attracted much interest in many applications of radar, sonar, speech and seismology. In near-field, both the direction of arrival (DOA) and range are required for the localization. In order to reduce computational burden, a lot of methods perform 1-D estimation procedures based on symmetric array configurations, but with degrees of freedom about the half of the number of sensors. In this paper, we consider the near-field source localization problem with increased degrees of freedom, by using the property of quasi-stationary signals and Khatri-Rao product. In order to reduce the computational burden, we adopt the dimension-reduced technique by splitting the steering vector in terms of DOA and range. The DOAs are estimated through a one-dimensional search and the ranges are obtained with the estimated DOAs without searching or pairing. Simulations are provided to show the effectiveness of the proposed method.},\n  keywords = {Direction-of-arrival estimation;Sonar applications;Estimation;Signal processing;Sensors;Multiple signal classification;Sensor arrays;Near-field;source localization;quasi-stationary signal},\n  doi = {10.23919/Eusipco47968.2020.9287455},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001777.pdf},\n}\n\n
\n
\n\n\n
\n Near-field source localization has attracted much interest in many applications of radar, sonar, speech and seismology. In near-field, both the direction of arrival (DOA) and range are required for the localization. In order to reduce computational burden, a lot of methods perform 1-D estimation procedures based on symmetric array configurations, but with degrees of freedom about the half of the number of sensors. In this paper, we consider the near-field source localization problem with increased degrees of freedom, by using the property of quasi-stationary signals and Khatri-Rao product. In order to reduce the computational burden, we adopt the dimension-reduced technique by splitting the steering vector in terms of DOA and range. The DOAs are estimated through a one-dimensional search and the ranges are obtained with the estimated DOAs without searching or pairing. Simulations are provided to show the effectiveness of the proposed method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Hyperspectral Unmixing with Rare Endmembers via Minimax Nonnegative Matrix Factorization.\n \n \n \n \n\n\n \n Marrinan, T.; and Gillis, N.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1015-1019, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"HyperspectralPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287456,\n  author = {T. Marrinan and N. Gillis},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Hyperspectral Unmixing with Rare Endmembers via Minimax Nonnegative Matrix Factorization},\n  year = {2020},\n  pages = {1015-1019},\n  abstract = {Hyperspectral images are used for ground-cover classification because many materials can be identified by their spectral signature, even in images with low spatial resolution. Pixels in such an image are often modeled as a convex combination of vectors, called endmembers, that correspond to the reflectance of a material to different wavelengths of light. This is the so-called linear mixing model. Since reflectance is inherently nonnegative, the task of unmixing hyperspectral pixels can be posed as a low-rank nonnegative matrix factorization (NMF) problem, where the data matrix is decomposed into the product of the estimated endmembers and their abundances in the scene. The standard NMF problem then minimizes the residual of the decomposition. Thus, using NMF works well when materials are present in similar amounts, but if some materials are under-represented, they may be missed with this formulation. Alternatively, we propose a novel hyperspectral unmixing model using a collection of NMF subproblems solved for patches of the original image. The endmembers are estimated jointly, such that the the maximum residual across all patches is minimized. In this paper we estimate the solution to the patch-based minimax NMF model, and show that it can estimate rare endmembers with superior accuracy.},\n  keywords = {Reflectivity;Signal processing algorithms;Matrix decomposition;Task analysis;Spatial resolution;Standards;Hyperspectral imaging;nonnegative matrix factorization;hyperspectral unmixing;minimax;approximate subgradient;low-rank},\n  doi = {10.23919/Eusipco47968.2020.9287456},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001015.pdf},\n}\n\n
\n
\n\n\n
\n Hyperspectral images are used for ground-cover classification because many materials can be identified by their spectral signature, even in images with low spatial resolution. Pixels in such an image are often modeled as a convex combination of vectors, called endmembers, that correspond to the reflectance of a material to different wavelengths of light. This is the so-called linear mixing model. Since reflectance is inherently nonnegative, the task of unmixing hyperspectral pixels can be posed as a low-rank nonnegative matrix factorization (NMF) problem, where the data matrix is decomposed into the product of the estimated endmembers and their abundances in the scene. The standard NMF problem then minimizes the residual of the decomposition. Thus, using NMF works well when materials are present in similar amounts, but if some materials are under-represented, they may be missed with this formulation. Alternatively, we propose a novel hyperspectral unmixing model using a collection of NMF subproblems solved for patches of the original image. The endmembers are estimated jointly, such that the the maximum residual across all patches is minimized. In this paper we estimate the solution to the patch-based minimax NMF model, and show that it can estimate rare endmembers with superior accuracy.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Generic Compression of Off-The-Air Radio Frequency Signals with Grouped-Bin FFT Quantisation.\n \n \n \n \n\n\n \n Muir, D.; Crockett, L. H.; and Stewart, R. W.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1767-1771, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"GenericPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287457,\n  author = {D. Muir and L. H. Crockett and R. W. Stewart},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Generic Compression of Off-The-Air Radio Frequency Signals with Grouped-Bin FFT Quantisation},\n  year = {2020},\n  pages = {1767-1771},\n  abstract = {This paper studies the capabilities of a proposed lossy, grouped-bin FFT quantisation compression method for targeting Off-The-Air (OTA) Radio Frequency (RF) signals. The bins within a 512-point Fast Fourier Transform (FFT) are split into groups of adjacent bins, and these groups are each quantised separately. Additional compression can be achieved by setting groups which are not deemed to contain significant information to zero, based on a pre-defined minimum magnitude threshold. In this paper, we propose two alternative methods for quantising the remaining groups. The first of these, Grouped-bin FFT Threshold Quantisation (GFTQ), involves allocating quantisation wordlengths based on several pre-defined magnitude thresholds. The second, Grouped-bin FFT Error Quantisation (GFEQ), involves incrementing the quantisation wordlength for each group until the calculated quantisation error falls below a minimum error threshold. Both algorithms were tested for a variety of signal types, including Digital Private Mobile Radio 446 MHz (dPMR446), which was considered as a case study. While GFTQ allowed for higher Compression Ratios (CR), the compression process resulted in added quantisation noise. The GFEQ algorithm achieved lower CRs, but also lower noise levels across all test signals.},\n  keywords = {Quantization (signal);OFDM;Frequency-domain analysis;RF signals;Redundancy;Signal processing algorithms;Compression algorithms;radio;compression;generic;RF},\n  doi = {10.23919/Eusipco47968.2020.9287457},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001767.pdf},\n}\n\n
\n
\n\n\n
\n This paper studies the capabilities of a proposed lossy, grouped-bin FFT quantisation compression method for targeting Off-The-Air (OTA) Radio Frequency (RF) signals. The bins within a 512-point Fast Fourier Transform (FFT) are split into groups of adjacent bins, and these groups are each quantised separately. Additional compression can be achieved by setting groups which are not deemed to contain significant information to zero, based on a pre-defined minimum magnitude threshold. In this paper, we propose two alternative methods for quantising the remaining groups. The first of these, Grouped-bin FFT Threshold Quantisation (GFTQ), involves allocating quantisation wordlengths based on several pre-defined magnitude thresholds. The second, Grouped-bin FFT Error Quantisation (GFEQ), involves incrementing the quantisation wordlength for each group until the calculated quantisation error falls below a minimum error threshold. Both algorithms were tested for a variety of signal types, including Digital Private Mobile Radio 446 MHz (dPMR446), which was considered as a case study. While GFTQ allowed for higher Compression Ratios (CR), the compression process resulted in added quantisation noise. The GFEQ algorithm achieved lower CRs, but also lower noise levels across all test signals.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On the Use of Dictionary Learning in Time Series Imputation.\n \n \n \n \n\n\n \n Zheng, X.; Dumitrescu, B.; Liu, J.; and Giurcăneanu, C. D.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2016-2020, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287458,\n  author = {X. Zheng and B. Dumitrescu and J. Liu and C. D. Giurcăneanu},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {On the Use of Dictionary Learning in Time Series Imputation},\n  year = {2020},\n  pages = {2016-2020},\n  abstract = {In this work, we show how dictionary learning (DL) can be employed in the imputation of univariate and multivariate time series. In the multivariate case, we propose to use a structured dictionary. The size of the dictionary and the sparsity level are selected by information theoretic criteria. We also evaluate the effect of removing the trend/seasonality before applying DL. We conduct an extensive experimental study on real-life data. The positions of the missing data are simulated by applying two strategies: (i) sampling without replacement, which leads to isolated occurrences of the missing data, and (ii) sampling via Polya urn model that is likely to produce long sequences of missing data. In all scenarios, the novel DL-based methods compare favorably with the state-of-the-art.},\n  keywords = {time series;dictionary learning;time series imputation;univariate time series;multivariate time series;multivariate case;structured dictionary;sparsity level;information theoretic criteria;real-life data;DL-based methods;Dictionaries;Time series analysis;Europe;Machine learning;Signal processing;Data models;Time series;missing data;dictionary learning;Polya urn;information theoretic criteria},\n  doi = {10.23919/Eusipco47968.2020.9287458},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002016.pdf},\n}\n\n
\n
\n\n\n
\n In this work, we show how dictionary learning (DL) can be employed in the imputation of univariate and multivariate time series. In the multivariate case, we propose to use a structured dictionary. The size of the dictionary and the sparsity level are selected by information theoretic criteria. We also evaluate the effect of removing the trend/seasonality before applying DL. We conduct an extensive experimental study on real-life data. The positions of the missing data are simulated by applying two strategies: (i) sampling without replacement, which leads to isolated occurrences of the missing data, and (ii) sampling via Polya urn model that is likely to produce long sequences of missing data. In all scenarios, the novel DL-based methods compare favorably with the state-of-the-art.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An Optimization Framework for Regularized Linearly Coupled Matrix-Tensor Factorization.\n \n \n \n \n\n\n \n Schenker, C.; Cohen, J. E.; and Acar, E.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 985-989, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287459,\n  author = {C. Schenker and J. E. Cohen and E. Acar},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {An Optimization Framework for Regularized Linearly Coupled Matrix-Tensor Factorization},\n  year = {2020},\n  pages = {985-989},\n  abstract = {An effective way of jointly analyzing data from multiple sources, in other words, data fusion, is to formulate the problem as a coupled matrix and tensor factorization (CMTF) problem. However, one major challenge in data fusion is that due to eclectic characteristics of data stemming from different sources, various constraints and different types of coupling between data sets should be incorporated. In this paper, we propose a flexible and efficient algorithmic framework building onto Alternating Optimization (AO) and Alternating Direction Method of Multipliers (ADMM) for coupled matrix and tensor factorizations incorporating a variety of constraints and coupling with linear transformations. Numerical experiments demonstrate that the proposed approach is accurate, computationally efficient with comparable or better performance than available CMTF methods while being also more flexible.},\n  keywords = {Couplings;Tensors;Signal processing algorithms;Data integration;Europe;Signal processing;Optimization;tensor factorizations;coupled tensor factorizations;linear couplings;AO-ADMM},\n  doi = {10.23919/Eusipco47968.2020.9287459},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000985.pdf},\n}\n\n
\n
\n\n\n
\n An effective way of jointly analyzing data from multiple sources, in other words, data fusion, is to formulate the problem as a coupled matrix and tensor factorization (CMTF) problem. However, one major challenge in data fusion is that due to eclectic characteristics of data stemming from different sources, various constraints and different types of coupling between data sets should be incorporated. In this paper, we propose a flexible and efficient algorithmic framework building onto Alternating Optimization (AO) and Alternating Direction Method of Multipliers (ADMM) for coupled matrix and tensor factorizations incorporating a variety of constraints and coupling with linear transformations. Numerical experiments demonstrate that the proposed approach is accurate, computationally efficient with comparable or better performance than available CMTF methods while being also more flexible.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Interpolating and translation-invariant approximations of parametric dictionaries.\n \n \n \n \n\n\n \n Champagnat, F.; and Herzet, C.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2011-2015, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"InterpolatingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287460,\n  author = {F. Champagnat and C. Herzet},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Interpolating and translation-invariant approximations of parametric dictionaries},\n  year = {2020},\n  pages = {2011-2015},\n  abstract = {In this paper, we address the problem of approximating the atoms of a parametric dictionary ${\\mathcal{A}} = \\left\\{ {a\\left(\\theta\\right):\\theta \\in \\Theta } \\right\\}$, commonly encountered in the context of sparse representations in {"}continuous{"} dictionaries. We focus on the case of translation-invariant dictionaries, where the inner product between a(θ) and a(θ′) only depends on the difference θ – θ′. We investigate the following general question: is there some low-rank approximation of ${\\mathcal{A}}$ which interpolates a subset of atoms $\\left\\{ {a\\left({{\\theta _j}}\\right)} \\right\\}_{j = 1}^J$ while preserving the translation-invariant nature of the original dictionary? In this paper, we derive necessary and sufficient conditions characterizing the existence of such an {"}interpolating{"} and {"}translation-invariant{"} low-rank approximation. Moreover, we provide closed-form expressions of such a dictionary when it exists. We illustrate the applicability of our results in the case of a two-dimensional isotropic Gaussian dictionary. We show that, in this particular setup, the proposed approximation framework outperforms standard Taylor approximation.},\n  keywords = {Interpolation;Sufficient conditions;Dictionaries;Closed-form solutions;Signal processing;Matrix decomposition;Standards;Sparse representations;continuous dictionaries;translation invariance;interpolating approximations},\n  doi = {10.23919/Eusipco47968.2020.9287460},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002011.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we address the problem of approximating the atoms of a parametric dictionary ${\\mathcal{A}} = łeft\\{ {ałeft(θi̊ght):θ ∈ Θ } i̊ght\\}$, commonly encountered in the context of sparse representations in \"continuous\" dictionaries. We focus on the case of translation-invariant dictionaries, where the inner product between a(θ) and a(θ′) only depends on the difference θ – θ′. We investigate the following general question: is there some low-rank approximation of ${\\mathcal{A}}$ which interpolates a subset of atoms $łeft\\{ {ałeft({{θ _j}}i̊ght)} i̊ght\\}_{j = 1}^J$ while preserving the translation-invariant nature of the original dictionary? In this paper, we derive necessary and sufficient conditions characterizing the existence of such an \"interpolating\" and \"translation-invariant\" low-rank approximation. Moreover, we provide closed-form expressions of such a dictionary when it exists. We illustrate the applicability of our results in the case of a two-dimensional isotropic Gaussian dictionary. We show that, in this particular setup, the proposed approximation framework outperforms standard Taylor approximation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Statistical Approach based Optimization for the Application of Chaotic Sequences to Radar.\n \n \n \n \n\n\n \n Jemaa, Z. B.; Marcos, S.; and Belghith, S.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1921-1925, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"StatisticalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287461,\n  author = {Z. B. Jemaa and S. Marcos and S. Belghith},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Statistical Approach based Optimization for the Application of Chaotic Sequences to Radar},\n  year = {2020},\n  pages = {1921-1925},\n  abstract = {In this paper, we consider chaotic sequences as an alternative to other sequences in the literature for the design of radar waveforms in multiple-input multiple-output (MIMO) radar. For this aim we here adopt a statistical approach; by considering the codes defining the transmitted waveform as realizations of a random variable we show that a suitable distribution of the random variable can give good codes. As an example we show how the chaotic skew tent map allows to generate deterministic codes having the desired statistical properties and thus makes it possible to obtain a good ambiguity function. These results are confirmed by simulations and compared to those using Gold codes and optimized codes.},\n  keywords = {MIMO radar;chaos-based sequences;ambiguity function;skew tent map;invariant probability density},\n  doi = {10.23919/Eusipco47968.2020.9287461},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001921.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we consider chaotic sequences as an alternative to other sequences in the literature for the design of radar waveforms in multiple-input multiple-output (MIMO) radar. For this aim we here adopt a statistical approach; by considering the codes defining the transmitted waveform as realizations of a random variable we show that a suitable distribution of the random variable can give good codes. As an example we show how the chaotic skew tent map allows to generate deterministic codes having the desired statistical properties and thus makes it possible to obtain a good ambiguity function. These results are confirmed by simulations and compared to those using Gold codes and optimized codes.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Simultaneous Spline Quantile Regression Under Shape Constraints.\n \n \n \n \n\n\n \n Kitahara, D.; Leng, K.; Tezuka, Y.; and Hirabayashi, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2423-2427, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"SimultaneousPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287462,\n  author = {D. Kitahara and K. Leng and Y. Tezuka and A. Hirabayashi},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Simultaneous Spline Quantile Regression Under Shape Constraints},\n  year = {2020},\n  pages = {2423-2427},\n  abstract = {As data analysis methods, hypothesis testing and regression analysis are famous. However, the hypothesis testing can only detect significant differences between two groups divided by some characteristic or some empirical threshold, and the regression analysis can only construct one averaged model whose information is limited. Quantile regression is a robust and flexible analysis method, and can construct multilevel models, e.g., the median and the first and third quartiles. To make the most of the quantile regression, existing papers employed spline regression models as generalizations of polynomial regression models, but the regression of each level was individually executed. In this paper, we propose simultaneous spline quantile regression which considers the similarity between the adjacent quantiles. Further, the proposed method enforces the non-crossing and one shape (non-decreasing/non-increasing/convex/concave) constraints. Experiments demonstrate that the proposed method recovers harmonious quantiles.},\n  keywords = {Analytical models;Smoothing methods;Shape;Signal processing;Regression analysis;Splines (mathematics);Testing;Quantile regression;spline function;simultaneous regression;shape constraint;convex optimization},\n  doi = {10.23919/Eusipco47968.2020.9287462},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002423.pdf},\n}\n\n
\n
\n\n\n
\n As data analysis methods, hypothesis testing and regression analysis are famous. However, the hypothesis testing can only detect significant differences between two groups divided by some characteristic or some empirical threshold, and the regression analysis can only construct one averaged model whose information is limited. Quantile regression is a robust and flexible analysis method, and can construct multilevel models, e.g., the median and the first and third quartiles. To make the most of the quantile regression, existing papers employed spline regression models as generalizations of polynomial regression models, but the regression of each level was individually executed. In this paper, we propose simultaneous spline quantile regression which considers the similarity between the adjacent quantiles. Further, the proposed method enforces the non-crossing and one shape (non-decreasing/non-increasing/convex/concave) constraints. Experiments demonstrate that the proposed method recovers harmonious quantiles.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n GAN-Based Rain Noise Removal from Single-Image Considering Rain Composite Models.\n \n \n \n \n\n\n \n Matsui, T.; and Ikehara, M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 665-669, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"GAN-BasedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287463,\n  author = {T. Matsui and M. Ikehara},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {GAN-Based Rain Noise Removal from Single-Image Considering Rain Composite Models},\n  year = {2020},\n  pages = {665-669},\n  abstract = {Under severe weather conditions, outdoor images or videos captured by cameras can be affected by heavy rain and fog. In this paper, we address a single-image rain removal problem (de-raining). As compared to video-based methods, single-image based methods are challenging because of the lack of temporal information. Although many existing methods have tackled these challenges, they suffer from overfitting, over-smoothing, and unnatural hue change. To solve these problems, we propose a GAN-based de-raining method. The optimal generator is determined by experimental comparisons. To train the generator, we learn the mapping between rainy and residual images from the training dataset. Besides, we synthesize a variety of rainy images to train our network. In particular, we focus on not only the orientations and scales of rain streaks but also the rainy image composite models. Our method also achieves better performance on both synthetic and real-world images than state-of-the-art methods in terms of quantitative and visual performances.},\n  keywords = {Training;Visualization;Rain;Generators;Videos;Testing;Sports;Generative adversarial network;single-image de-raining;deep learning;image restoration;residual learning},\n  doi = {10.23919/Eusipco47968.2020.9287463},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000665.pdf},\n}\n\n
\n
\n\n\n
\n Under severe weather conditions, outdoor images or videos captured by cameras can be affected by heavy rain and fog. In this paper, we address a single-image rain removal problem (de-raining). As compared to video-based methods, single-image based methods are challenging because of the lack of temporal information. Although many existing methods have tackled these challenges, they suffer from overfitting, over-smoothing, and unnatural hue change. To solve these problems, we propose a GAN-based de-raining method. The optimal generator is determined by experimental comparisons. To train the generator, we learn the mapping between rainy and residual images from the training dataset. Besides, we synthesize a variety of rainy images to train our network. In particular, we focus on not only the orientations and scales of rain streaks but also the rainy image composite models. Our method also achieves better performance on both synthetic and real-world images than state-of-the-art methods in terms of quantitative and visual performances.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Semi-supervised Multichannel Speech Separation Based on a Phone- and Speaker-Aware Deep Generative Model of Speech Spectrograms.\n \n \n \n \n\n\n \n Du, Y.; Sekiguchi, K.; Bando, Y.; Arie Nugraha, A.; Fontaine, M.; Yoshii, K.; and Kawahara, T.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 870-874, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Semi-supervisedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287464,\n  author = {Y. Du and K. Sekiguchi and Y. Bando and A. {Arie Nugraha} and M. Fontaine and K. Yoshii and T. Kawahara},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Semi-supervised Multichannel Speech Separation Based on a Phone- and Speaker-Aware Deep Generative Model of Speech Spectrograms},\n  year = {2020},\n  pages = {870-874},\n  abstract = {This paper describes a semi-supervised multichannel speech separation method that uses clean speech signals with frame-wise phonetic labels and sample-level speaker labels for pre-training. A standard approach to statistical source separation is to formulate a probabilistic model of multichannel mixture spectrograms that combines source models representing the time-frequency characteristics of sources with spatial models representing the covariance structure between channels. For speech separation and enhancement, deep generative models with latent variables have successfully been used as source models. The parameters of such a speech model can be trained beforehand from clean speech signals with a variational autoencoder (VAE) or its conditional variant (CVAE) that takes speaker labels as auxiliary inputs. Because human speech is characterized by both phonetic features and speaker identities, we propose a probabilistic model that combines a phone- and speaker-aware deep speech model with a full-rank spatial model. Our speech model is trained with a CVAE taking both phone and speaker labels as conditions. Given speech mixtures, the spatial covariance matrices, latent variables of sources, and phone and speaker labels of sources are jointly estimated. Comparative experimental results showed that the performance of speech separation can be improved by explicitly considering phonetic features and/or speaker identities.},\n  keywords = {Time-frequency analysis;Source separation;Phonetics;Speech enhancement;Probabilistic logic;Spectrogram;Standards;multichannel source separation;speech separation;variational autoencoder},\n  doi = {10.23919/Eusipco47968.2020.9287464},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000870.pdf},\n}\n\n
\n
\n\n\n
\n This paper describes a semi-supervised multichannel speech separation method that uses clean speech signals with frame-wise phonetic labels and sample-level speaker labels for pre-training. A standard approach to statistical source separation is to formulate a probabilistic model of multichannel mixture spectrograms that combines source models representing the time-frequency characteristics of sources with spatial models representing the covariance structure between channels. For speech separation and enhancement, deep generative models with latent variables have successfully been used as source models. The parameters of such a speech model can be trained beforehand from clean speech signals with a variational autoencoder (VAE) or its conditional variant (CVAE) that takes speaker labels as auxiliary inputs. Because human speech is characterized by both phonetic features and speaker identities, we propose a probabilistic model that combines a phone- and speaker-aware deep speech model with a full-rank spatial model. Our speech model is trained with a CVAE taking both phone and speaker labels as conditions. Given speech mixtures, the spatial covariance matrices, latent variables of sources, and phone and speaker labels of sources are jointly estimated. Comparative experimental results showed that the performance of speech separation can be improved by explicitly considering phonetic features and/or speaker identities.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Blind Bandwidth Extension of Speech based on LPCNet.\n \n \n \n \n\n\n \n Schmidt, K.; and Edler, B.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 426-430, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"BlindPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287465,\n  author = {K. Schmidt and B. Edler},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Blind Bandwidth Extension of Speech based on LPCNet},\n  year = {2020},\n  pages = {426-430},\n  abstract = {A blind bandwidth extension is presented which improves the perceived quality of 4 kHz speech by artificially extending the speech’s frequency range to 8 kHz. Based on the source-filter model of the human speech production, the speech signal is decomposed into spectral envelope and excitation signal and each of them is extrapolated separately. With this decomposition, good perceptual quality can be achieved while keeping the computational complexity low. The focus of this work is in the generation of an excitation signal with and autoregressive model that calculates a distribution for each audio sample conditioned on previous samples. This is achieved with a deep neural network following the architecture of LPCNet [1].A listening test shows that it significantly improves the perceived quality of bandlimited speech. The system has an algorithmic delay of 30 ms and can be applied in state-of-the-art speech and audio codecs.},\n  keywords = {Speech coding;Computational modeling;Signal processing algorithms;Bandwidth;Delays;Computational complexity;Signal resolution;bandwidth extension;artificial bandwidth expansion;speech enhancement;audio super resolution;speech super resolution},\n  doi = {10.23919/Eusipco47968.2020.9287465},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000426.pdf},\n}\n\n
\n
\n\n\n
\n A blind bandwidth extension is presented which improves the perceived quality of 4 kHz speech by artificially extending the speech’s frequency range to 8 kHz. Based on the source-filter model of the human speech production, the speech signal is decomposed into spectral envelope and excitation signal and each of them is extrapolated separately. With this decomposition, good perceptual quality can be achieved while keeping the computational complexity low. The focus of this work is in the generation of an excitation signal with and autoregressive model that calculates a distribution for each audio sample conditioned on previous samples. This is achieved with a deep neural network following the architecture of LPCNet [1].A listening test shows that it significantly improves the perceived quality of bandlimited speech. The system has an algorithmic delay of 30 ms and can be applied in state-of-the-art speech and audio codecs.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Towards Domain Independence in CNN-based Acoustic Localization using Deep Cross Correlations.\n \n \n \n \n\n\n \n Vera-Diaz, J. M.; Pizarro, D.; and Macias-Guarasa, J.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 226-230, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"TowardsPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287466,\n  author = {J. M. Vera-Diaz and D. Pizarro and J. Macias-Guarasa},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Towards Domain Independence in CNN-based Acoustic Localization using Deep Cross Correlations},\n  year = {2020},\n  pages = {226-230},\n  abstract = {Time delay estimation is essential in Acoustic Source Localization (ASL) systems. One of the most used techniques for this purpose is the Generalized Cross Correlation (GCC) between a pair of signals and its use in Steered Response Power (SRP) techniques, which estimate the acoustic power at a specific location. Nowadays, Deep Learning strategies may outperform these methods. However, they are generally dependent on the geometric and sensor configuration conditions that are available during the training phases, thus having limited generalization capabilities when facing new environments if no re-training nor adaptation is applied. In this work, we propose a method based on an encoder-decoder CNN architecture capable of outperforming the well known SRP-PHAT algorithm, and also other Deep Learning strategies when working in mismatched training-testing conditions without requiring a model re-training. Our proposal aims to estimate a smoothed version of the correlation signals, that is then used to generate a refined acoustic power map, which leads to better performance on the ASL task. Our experimental evaluation uses three publicly available realistic datasets and provides a comparison with the SRP-PHAT algorithm and other recent proposals based on Deep Learning.},\n  keywords = {Deep learning;Training;Correlation;Acoustics;Proposals;Task analysis;Microphones;Acoustic Source Localization;Generalized Cross Correlation;Steered Response Power;Convolutional Neural Networks;Deep Learning},\n  doi = {10.23919/Eusipco47968.2020.9287466},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000226.pdf},\n}\n\n
\n
\n\n\n
\n Time delay estimation is essential in Acoustic Source Localization (ASL) systems. One of the most used techniques for this purpose is the Generalized Cross Correlation (GCC) between a pair of signals and its use in Steered Response Power (SRP) techniques, which estimate the acoustic power at a specific location. Nowadays, Deep Learning strategies may outperform these methods. However, they are generally dependent on the geometric and sensor configuration conditions that are available during the training phases, thus having limited generalization capabilities when facing new environments if no re-training nor adaptation is applied. In this work, we propose a method based on an encoder-decoder CNN architecture capable of outperforming the well known SRP-PHAT algorithm, and also other Deep Learning strategies when working in mismatched training-testing conditions without requiring a model re-training. Our proposal aims to estimate a smoothed version of the correlation signals, that is then used to generate a refined acoustic power map, which leads to better performance on the ASL task. Our experimental evaluation uses three publicly available realistic datasets and provides a comparison with the SRP-PHAT algorithm and other recent proposals based on Deep Learning.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Polarization-based online interference mitigation in radio interferometry.\n \n \n \n \n\n\n \n Yatawatta, S.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1961-1965, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Polarization-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287467,\n  author = {S. Yatawatta},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Polarization-based online interference mitigation in radio interferometry},\n  year = {2020},\n  pages = {1961-1965},\n  abstract = {Mitigation of radio frequency interference (RFI) is essential to deliver science-ready radio interferometric data to astronomers. In this paper, using dual polarized radio interferometers, we propose to use the polarization information of post-correlation interference signals to detect and mitigate them. We use the directional statistics of the polarized signals as the detection criteria and formulate a distributed, wideband spectrum sensing problem. Using consensus optimization, we solve this in an online manner, working with mini-batches of data. We present extensive results based on simulations to demonstrate the feasibility of our method.},\n  keywords = {Signal processing algorithms;Radio interferometry;Interference;Signal processing;Sensors;Wideband;Radiofrequency interference;Radio astronomy;spectrum sensing;RFI;directional statistics},\n  doi = {10.23919/Eusipco47968.2020.9287467},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001961.pdf},\n}\n\n
\n
\n\n\n
\n Mitigation of radio frequency interference (RFI) is essential to deliver science-ready radio interferometric data to astronomers. In this paper, using dual polarized radio interferometers, we propose to use the polarization information of post-correlation interference signals to detect and mitigate them. We use the directional statistics of the polarized signals as the detection criteria and formulate a distributed, wideband spectrum sensing problem. Using consensus optimization, we solve this in an online manner, working with mini-batches of data. We present extensive results based on simulations to demonstrate the feasibility of our method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n RTF Based LCMV Beamformer with Multiple Reference Microphones.\n \n \n \n \n\n\n \n Schreibman, A.; Barnov, A.; Gendelman, A.; and Tzirkel, E.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 181-185, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"RTFPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287468,\n  author = {A. Schreibman and A. Barnov and A. Gendelman and E. Tzirkel},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {RTF Based LCMV Beamformer with Multiple Reference Microphones},\n  year = {2020},\n  pages = {181-185},\n  abstract = {Microphone arrays are widely used for speech enhancement applications. We consider the enhancement of multiple desired speakers in a noisy environment while utilizing the Relative Transfer Function (RTF) approach for the beamformer realization. When beamforming operation relies on an estimated RTF, it produces a signal with reduced noise compared to the signal received by some reference microphone associated with the RTF, while maintaining its speech component undistorted. The reference microphone is usually chosen as the microphone with the highest Signal-to-Noise Ratio (SNR). For large arrays, in a multiple desired speaker environment, there is no single reference microphone which maximizes SNR for all desired speakers. The commonly used RTF technique may result in compromised performance for one or more desired speakers. Here, we propose an alternative scheme that considers multiple reference microphones. Each reference microphone is associated with a single desired speaker such that it maximizes its input SNR, in comparison to other array microphones. We show that by this technique, the beamformer maintains the desired noise reduction without compromising SNR for any of the desired speakers. We present analytical analysis for a 2 microphone array in free-field propagation for far-field sources. Based on this analysis some design criteria are derived and finally evaluated both in synthetic and recorded room environments.},\n  keywords = {Array signal processing;Noise reduction;Transfer functions;Speech enhancement;Microphone arrays;Noise measurement;Signal to noise ratio},\n  doi = {10.23919/Eusipco47968.2020.9287468},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000181.pdf},\n}\n\n
\n
\n\n\n
\n Microphone arrays are widely used for speech enhancement applications. We consider the enhancement of multiple desired speakers in a noisy environment while utilizing the Relative Transfer Function (RTF) approach for the beamformer realization. When beamforming operation relies on an estimated RTF, it produces a signal with reduced noise compared to the signal received by some reference microphone associated with the RTF, while maintaining its speech component undistorted. The reference microphone is usually chosen as the microphone with the highest Signal-to-Noise Ratio (SNR). For large arrays, in a multiple desired speaker environment, there is no single reference microphone which maximizes SNR for all desired speakers. The commonly used RTF technique may result in compromised performance for one or more desired speakers. Here, we propose an alternative scheme that considers multiple reference microphones. Each reference microphone is associated with a single desired speaker such that it maximizes its input SNR, in comparison to other array microphones. We show that by this technique, the beamformer maintains the desired noise reduction without compromising SNR for any of the desired speakers. We present analytical analysis for a 2 microphone array in free-field propagation for far-field sources. Based on this analysis some design criteria are derived and finally evaluated both in synthetic and recorded room environments.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robust Source Separation with Differential Microphone Arrays and Independent Low-Rank Matrix Analysis.\n \n \n \n \n\n\n \n Li, D.; Huang, G.; Lei, Y.; Chen, J.; and Benesty, J.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 291-295, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"RobustPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287469,\n  author = {D. Li and G. Huang and Y. Lei and J. Chen and J. Benesty},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Robust Source Separation with Differential Microphone Arrays and Independent Low-Rank Matrix Analysis},\n  year = {2020},\n  pages = {291-295},\n  abstract = {Acoustic source separation has been an active and important area of research in the field of acoustic signal processing. This paper deals with this problem using small and compact differential microphone arrays (DMAs) so that the resulting technology can be used in a broad range of small devices in voice communication and human-machine interfaces. A straightforward way to achieve source separation with DMAs is through differential beamforming. Although it has frequency-invariant beampatterns and high directivity in comparison with other existing beamforming methods with the same number of sensors, differential beamforming with small DMAs has limited spatial gain, generally leading to insufficient separation performance. To circumvent this limitation, we propose in this work a method to combine differential beamforming with an independent vector analysis (IVA) based algorithm. Specifically, differential beamformers are designed and applied to separate sound sources from different directions. Then, differential beamformers’ outputs are used as inputs for the independent low-rank matrix analysis (ILRMA) algorithm, a widely used IVA method for blind source separation. The advantage of this proposed method consists of at least three aspects: 1) improving the source separation performance, 2) helping deal with the permutation problem, and 3) helping improve the convergence of ILRMA.},\n  keywords = {Array signal processing;Signal processing algorithms;Acoustic arrays;Microphone arrays;Acoustics;Sensors;Man-machine systems;Differential microphone arrays;beamforming;source separation;independent low-rank matrix analysis},\n  doi = {10.23919/Eusipco47968.2020.9287469},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000291.pdf},\n}\n\n
\n
\n\n\n
\n Acoustic source separation has been an active and important area of research in the field of acoustic signal processing. This paper deals with this problem using small and compact differential microphone arrays (DMAs) so that the resulting technology can be used in a broad range of small devices in voice communication and human-machine interfaces. A straightforward way to achieve source separation with DMAs is through differential beamforming. Although it has frequency-invariant beampatterns and high directivity in comparison with other existing beamforming methods with the same number of sensors, differential beamforming with small DMAs has limited spatial gain, generally leading to insufficient separation performance. To circumvent this limitation, we propose in this work a method to combine differential beamforming with an independent vector analysis (IVA) based algorithm. Specifically, differential beamformers are designed and applied to separate sound sources from different directions. Then, differential beamformers’ outputs are used as inputs for the independent low-rank matrix analysis (ILRMA) algorithm, a widely used IVA method for blind source separation. The advantage of this proposed method consists of at least three aspects: 1) improving the source separation performance, 2) helping deal with the permutation problem, and 3) helping improve the convergence of ILRMA.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Image storage in DNA using Vector Quantization.\n \n \n \n \n\n\n \n Dimopoulou, M.; and Antonini, M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 516-520, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ImagePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287470,\n  author = {M. Dimopoulou and M. Antonini},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Image storage in DNA using Vector Quantization},\n  year = {2020},\n  pages = {516-520},\n  abstract = {Rapid technological advances and the increasing use of social media has caused a tremendous increase in the generation of digital data, a fact that imposes nowadays a great challenge for the field of digital data storage due to the shortterm reliability of conventional storage devices. Hard disks, flash, tape or even optical storage have a durability of 5 to 20 years while running data centers also require huge amounts of energy. An alternative to hard drives is the use of DNA, which is life's information-storage material, as a means of digital data storage. Recent works have proven that storing digital data into DNA is not only feasible but also very promising as the DNA's biological properties allow the storage of a great amount of information into an extraordinary small volume for centuries or even longer with no loss of information. In this work we present an extended end-to-end storage workflow specifically designed for the efficient storage of images onto synthetic DNA. This workflow uses a new encoding algorithm which serves the needs of image compression while also being robust to the biological errors which may corrupt the encoding.},\n  keywords = {Image coding;Social networking (online);Vector quantization;DNA;Memory;Image storage;Biological information theory},\n  doi = {10.23919/Eusipco47968.2020.9287470},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000516.pdf},\n}\n\n
\n
\n\n\n
\n Rapid technological advances and the increasing use of social media has caused a tremendous increase in the generation of digital data, a fact that imposes nowadays a great challenge for the field of digital data storage due to the shortterm reliability of conventional storage devices. Hard disks, flash, tape or even optical storage have a durability of 5 to 20 years while running data centers also require huge amounts of energy. An alternative to hard drives is the use of DNA, which is life's information-storage material, as a means of digital data storage. Recent works have proven that storing digital data into DNA is not only feasible but also very promising as the DNA's biological properties allow the storage of a great amount of information into an extraordinary small volume for centuries or even longer with no loss of information. In this work we present an extended end-to-end storage workflow specifically designed for the efficient storage of images onto synthetic DNA. This workflow uses a new encoding algorithm which serves the needs of image compression while also being robust to the biological errors which may corrupt the encoding.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n In Silico Cardiac Model to Evaluate Myocardial Ischemia effect on Hemodynamic Parameters.\n \n \n \n \n\n\n \n Mazumder, O.; Roy, D.; and Sinha, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1105-1109, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"InPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287472,\n  author = {O. Mazumder and D. Roy and A. Sinha},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {In Silico Cardiac Model to Evaluate Myocardial Ischemia effect on Hemodynamic Parameters},\n  year = {2020},\n  pages = {1105-1109},\n  abstract = {Myocardial ischemia is one of the leading cause of sudden cardiac death, often due to narrowing of Coronary artery resulting in poor oxygen supply (ischemic effect) in cardiac muscles. This disease has a high variable manifestation due to difference in location and extent of damaged area, thus hampering the understanding of disease progression and stratification. In this paper, we propose a multimodal simulation of cardiac ischemia to understand the disease progression with change in ischemic size and myocardial electrical propagation and to observe changes in hemodynamic parameters with change in disease severity. The in-silico cardiac model binds electrophysiological characteristics with hemodynamic parameters, thus giving a broader understanding of the effect of disease progression on various parameters like ejection fraction, contractility, blood pressure, etc, to assess ischemic manifestation leading to heart failure. Three different cases of ischemia have been simulated to study the effect of disease and its progression. The developed in silico cardiac model can be used to simulate and study the holistic effect of any such cardiac conduction disorder along with its effect and manifestation over hemodynamic parameters.},\n  keywords = {bioelectric phenomena;blood;blood vessels;cardiovascular system;cellular biophysics;diseases;haemodynamics;muscle;hemodynamic parameters;sudden cardiac death;coronary artery;oxygen supply;ischemic effect;cardiac muscles;high variable manifestation;disease progression;cardiac ischemia;ischemic size;myocardial electrical propagation;disease severity;in-silico cardiac model binds electrophysiological characteristics;ischemic manifestation;holistic effect;cardiac conduction disorder;myocardial ischemia effect;Heart;Myocardium;Electrocardiography;Predictive models;Data models;Hemodynamics;Diseases;Electrophysiology;Hemodynamics;Myocardial Ischemia;Compliance;Transmembrane Potential},\n  doi = {10.23919/Eusipco47968.2020.9287472},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001105.pdf},\n}\n\n
\n
\n\n\n
\n Myocardial ischemia is one of the leading cause of sudden cardiac death, often due to narrowing of Coronary artery resulting in poor oxygen supply (ischemic effect) in cardiac muscles. This disease has a high variable manifestation due to difference in location and extent of damaged area, thus hampering the understanding of disease progression and stratification. In this paper, we propose a multimodal simulation of cardiac ischemia to understand the disease progression with change in ischemic size and myocardial electrical propagation and to observe changes in hemodynamic parameters with change in disease severity. The in-silico cardiac model binds electrophysiological characteristics with hemodynamic parameters, thus giving a broader understanding of the effect of disease progression on various parameters like ejection fraction, contractility, blood pressure, etc, to assess ischemic manifestation leading to heart failure. Three different cases of ischemia have been simulated to study the effect of disease and its progression. The developed in silico cardiac model can be used to simulate and study the holistic effect of any such cardiac conduction disorder along with its effect and manifestation over hemodynamic parameters.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Convex optimization-based Privacy-Preserving Distributed Least Squares via Subspace Perturbation.\n \n \n \n \n\n\n \n Li, Q.; Heusdens, R.; and Christensen, M. G.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2110-2114, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ConvexPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287473,\n  author = {Q. Li and R. Heusdens and M. G. Christensen},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Convex optimization-based Privacy-Preserving Distributed Least Squares via Subspace Perturbation},\n  year = {2020},\n  pages = {2110-2114},\n  abstract = {Over the past decades, privacy-preservation has received considerable attention, not only as a consequence of regulations such as the General Data Protection Regulation in the EU, but also from the fact that people are more concerned about data abuse as the world is becoming increasingly digitized. In this paper we propose a convex optimization-based subspace perturbation approach to solve privacy-preserving distributed least squares problems. Based on the primal-dual method of multipliers, the introduced dual variables will only converge in a subspace determined by the graph topology and do not converge in its orthogonal complement. We, therefore, propose to exploit this property for privacy-preservation by using the nonconverging part of the dual variables to perturb the private data, thereby protecting it from being revealed. Moreover, we prove that the proposed approach is secure under both eavesdropping and passive adversaries. Computer simulations are conducted to demonstrate the benefits of the proposed approach through its convergence properties and accuracy.},\n  keywords = {Perturbation methods;Signal processing;Regulation;Topology;Optimization;Eavesdropping;Convergence;Distributed least squares;subspace;privacy;noise perturbation;convex optimization},\n  doi = {10.23919/Eusipco47968.2020.9287473},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002110.pdf},\n}\n\n
\n
\n\n\n
\n Over the past decades, privacy-preservation has received considerable attention, not only as a consequence of regulations such as the General Data Protection Regulation in the EU, but also from the fact that people are more concerned about data abuse as the world is becoming increasingly digitized. In this paper we propose a convex optimization-based subspace perturbation approach to solve privacy-preserving distributed least squares problems. Based on the primal-dual method of multipliers, the introduced dual variables will only converge in a subspace determined by the graph topology and do not converge in its orthogonal complement. We, therefore, propose to exploit this property for privacy-preservation by using the nonconverging part of the dual variables to perturb the private data, thereby protecting it from being revealed. Moreover, we prove that the proposed approach is secure under both eavesdropping and passive adversaries. Computer simulations are conducted to demonstrate the benefits of the proposed approach through its convergence properties and accuracy.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Anomaly Detection for Symbolic Time Series Representations of Reduced Dimensionality.\n \n \n \n \n\n\n \n Bountrogiannis, K.; Tzagkarakis, G.; and Tsakalides, P.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2398-2402, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AnomalyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287474,\n  author = {K. Bountrogiannis and G. Tzagkarakis and P. Tsakalides},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Anomaly Detection for Symbolic Time Series Representations of Reduced Dimensionality},\n  year = {2020},\n  pages = {2398-2402},\n  abstract = {The systematic collection of data has become an intrinsic process of all aspects in modern life. From industrial to healthcare machines and wearable sensors, an unprecedented amount of data is becoming available for mining and information retrieval. In particular, anomaly detection plays a key role in a wide range of applications, and has been studied extensively. However, many anomaly detection methods are unsuitable in practical scenarios, where streaming data of large volume arrive in nearly real-time at devices with limited resources. Dimensionality reduction has been excessively used to enable efficient processing for numerous high-level tasks. In this paper, we propose a computationally efficient, yet highly accurate, framework for anomaly detection of streaming data in lower-dimensional spaces, utilizing a modification of the symbolic aggregate approximation for dimensionality reduction and a statistical hypothesis testing based on the Kullback-Leibler divergence.},\n  keywords = {Dimensionality reduction;Systematics;Time series analysis;Task analysis;Anomaly detection;Wearable sensors;Testing;Online anomaly detection;kernel density estimator;symbolic representations;mode-bounding Lloyd-Max quantizer},\n  doi = {10.23919/Eusipco47968.2020.9287474},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002398.pdf},\n}\n\n
\n
\n\n\n
\n The systematic collection of data has become an intrinsic process of all aspects in modern life. From industrial to healthcare machines and wearable sensors, an unprecedented amount of data is becoming available for mining and information retrieval. In particular, anomaly detection plays a key role in a wide range of applications, and has been studied extensively. However, many anomaly detection methods are unsuitable in practical scenarios, where streaming data of large volume arrive in nearly real-time at devices with limited resources. Dimensionality reduction has been excessively used to enable efficient processing for numerous high-level tasks. In this paper, we propose a computationally efficient, yet highly accurate, framework for anomaly detection of streaming data in lower-dimensional spaces, utilizing a modification of the symbolic aggregate approximation for dimensionality reduction and a statistical hypothesis testing based on the Kullback-Leibler divergence.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Chaotic signals representation and spectral characterization using linear discrete-time filters.\n \n \n \n \n\n\n \n da Costa , R. A.; and Eisencraft, M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2235-2238, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ChaoticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287475,\n  author = {R. A. {da Costa} and M. Eisencraft},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Chaotic signals representation and spectral characterization using linear discrete-time filters},\n  year = {2020},\n  pages = {2235-2238},\n  abstract = {We present a discrete-time linear recursive filter representation for a piecewise-linear map that generates chaotic signals. It can be used to easily deduce analytical formulas for power spectral density of chaotic signals, providing useful results for chaos-based communication systems and signal processing. Numerical simulations are used to validate the theoretical results.},\n  keywords = {Maximum likelihood detection;Chaotic communication;Nonlinear filters;Signal processing;Numerical simulation;Filtering theory;Signal representation;chaotic signals;nonlinear systems;power spectral density;recursive filters},\n  doi = {10.23919/Eusipco47968.2020.9287475},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002235.pdf},\n}\n\n
\n
\n\n\n
\n We present a discrete-time linear recursive filter representation for a piecewise-linear map that generates chaotic signals. It can be used to easily deduce analytical formulas for power spectral density of chaotic signals, providing useful results for chaos-based communication systems and signal processing. Numerical simulations are used to validate the theoretical results.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Deep Learning Decoder for Long-Range Communication Systems.\n \n \n \n \n\n\n \n Pascual, D.; Tanner, S.; Vänskä, M.; and Wattenhofer, R.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1668-1672, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287476,\n  author = {D. Pascual and S. Tanner and M. Vänskä and R. Wattenhofer},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A Deep Learning Decoder for Long-Range Communication Systems},\n  year = {2020},\n  pages = {1668-1672},\n  abstract = {Long-range communication systems require receivers that can detect and decode messages in spite of strong distorting effects. However, classical decoders often fail at coping with complex effects such as interference or multipath propagation. Deep learning has shown strong generalization and adaptation capabilities and is a promising approach for improving decoding systems. In this work, we study the specific case of aircraft communication and build a purely deep learning-based receiver. It detects incoming messages, finds the exact starting point and then decodes their message bits. We demonstrate the performance of our system and show that it can decode 45% more messages than a classical baseline decoder. Our approach is general and can be adapted to many communication protocols.},\n  keywords = {Deep learning;Protocols;Training data;Receivers;Decoding;Air traffic control;Aircraft;Message detection;aircraft communication;deep learning},\n  doi = {10.23919/Eusipco47968.2020.9287476},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001668.pdf},\n}\n\n
\n
\n\n\n
\n Long-range communication systems require receivers that can detect and decode messages in spite of strong distorting effects. However, classical decoders often fail at coping with complex effects such as interference or multipath propagation. Deep learning has shown strong generalization and adaptation capabilities and is a promising approach for improving decoding systems. In this work, we study the specific case of aircraft communication and build a purely deep learning-based receiver. It detects incoming messages, finds the exact starting point and then decodes their message bits. We demonstrate the performance of our system and show that it can decode 45% more messages than a classical baseline decoder. Our approach is general and can be adapted to many communication protocols.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Dilated U-net based approach for multichannel speech enhancement from First-Order Ambisonics recordings.\n \n \n \n \n\n\n \n Bosca, A.; Guérin, A.; Perotin, L.; and Kitić, S.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 216-220, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DilatedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287478,\n  author = {A. Bosca and A. Guérin and L. Perotin and S. Kitić},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Dilated U-net based approach for multichannel speech enhancement from First-Order Ambisonics recordings},\n  year = {2020},\n  pages = {216-220},\n  abstract = {We present a CNN architecture for speech enhancement from multichannel first-order Ambisonics mixtures. The data-dependent spatial filters, deduced from a mask-based approach, are used to help an automatic speech recognition engine to face adverse conditions of reverberation and competitive speakers. The mask predictions are provided by a neural network, fed with rough estimations of speech and noise amplitude spectra, under the assumption of known directions of arrival. This study evaluates the replacing of the recurrent LSTM network previously investigated by a convolutive U-net under more stressing conditions with an additional second competitive speaker. We show that, due to more accurate short-term masks prediction, the U-net architecture brings some improvements in terms of word error rate. Moreover, results indicate that the use of dilated convolutive layers is beneficial in difficult situations with two interfering speakers, and/or where the target and interferences are close to each other in terms of the angular distance. Moreover, these results come with a two-fold reduction in the number of parameters.},\n  keywords = {Convolution;Neural networks;Speech enhancement;Spatial filters;Reverberation;Noise measurement;Power harmonic filters;multichannel speech separation;first-order Ambisonics;U-net;dilated convolution},\n  doi = {10.23919/Eusipco47968.2020.9287478},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000216.pdf},\n}\n\n
\n
\n\n\n
\n We present a CNN architecture for speech enhancement from multichannel first-order Ambisonics mixtures. The data-dependent spatial filters, deduced from a mask-based approach, are used to help an automatic speech recognition engine to face adverse conditions of reverberation and competitive speakers. The mask predictions are provided by a neural network, fed with rough estimations of speech and noise amplitude spectra, under the assumption of known directions of arrival. This study evaluates the replacing of the recurrent LSTM network previously investigated by a convolutive U-net under more stressing conditions with an additional second competitive speaker. We show that, due to more accurate short-term masks prediction, the U-net architecture brings some improvements in terms of word error rate. Moreover, results indicate that the use of dilated convolutive layers is beneficial in difficult situations with two interfering speakers, and/or where the target and interferences are close to each other in terms of the angular distance. Moreover, these results come with a two-fold reduction in the number of parameters.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n New Dictionary Learning Methods for Two-Dimensional Signals.\n \n \n \n \n\n\n \n Shahriari-Mehr, F.; Parsa, J.; Babaie-Zadeh, M.; and Jutten, C.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2021-2025, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"NewPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287479,\n  author = {F. Shahriari-Mehr and J. Parsa and M. Babaie-Zadeh and C. Jutten},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {New Dictionary Learning Methods for Two-Dimensional Signals},\n  year = {2020},\n  pages = {2021-2025},\n  abstract = {By growing the size of signals in one-dimensional dictionary learning for sparse representation, memory consumption and complex computations restrict the learning procedure. In applications of sparse representation and dictionary learning in two-dimensional signals (e.g. in image processing), if one opts to convert two-dimensional signals to one-dimensional ones, and use the existing one-dimensional dictionary learning and sparse representation techniques, too huge signals and dictionaries will be encountered. Two-dimensional dictionary learning has been proposed to avoid this problem. In this paper, we propose two algorithms for two-dimensional dictionary learning. According to our simulations, the proposed algorithms have noticeable improvement in both convergence rate and computational load in comparison to one-dimensional methods.},\n  keywords = {Training;Dictionaries;Two dimensional displays;Signal processing algorithms;Machine learning;Signal processing;Convergence;Two-dimensional dictionary learning;sparse representation;2D Signals;convex approximation},\n  doi = {10.23919/Eusipco47968.2020.9287479},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002021.pdf},\n}\n\n
\n
\n\n\n
\n By growing the size of signals in one-dimensional dictionary learning for sparse representation, memory consumption and complex computations restrict the learning procedure. In applications of sparse representation and dictionary learning in two-dimensional signals (e.g. in image processing), if one opts to convert two-dimensional signals to one-dimensional ones, and use the existing one-dimensional dictionary learning and sparse representation techniques, too huge signals and dictionaries will be encountered. Two-dimensional dictionary learning has been proposed to avoid this problem. In this paper, we propose two algorithms for two-dimensional dictionary learning. According to our simulations, the proposed algorithms have noticeable improvement in both convergence rate and computational load in comparison to one-dimensional methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sea Target Classification Based on An A Priori Motion Model.\n \n \n \n \n\n\n \n Bondu, J.; Grivel, É.; Giremus, A.; Legrand, P.; Corretja, V.; and Pommier, M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2478-2482, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"SeaPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287480,\n  author = {J. Bondu and É. Grivel and A. Giremus and P. Legrand and V. Corretja and M. Pommier},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Sea Target Classification Based on An A Priori Motion Model},\n  year = {2020},\n  pages = {2478-2482},\n  abstract = {Target classification can be of real interest for sea surveillance in both civil and military contexts. To address this issue, we present two approaches based on the Singer model. The latter has the advantage of covering a wide range of motions depending on the values of its parameters. Given noisy observations, the first method aims at estimating the motion model parameters by taking advantage of the properties of the correlation function of the estimated acceleration. It is based on a genetic algorithm. The second approach is on-line and consists in deriving a joint tracking and classification (JTC) method. Based on various simulations, we study their respective relevance in different operational settings. The proposed JTC corresponds to the best compromise in terms of performance and number of samples required.},\n  keywords = {Target tracking;Surveillance;Signal processing algorithms;Europe;Signal processing;Noise measurement;Genetic algorithms;Sea target classification;Singer model;Joint tracking and classification;Genetic algorithm;Correlation function},\n  doi = {10.23919/Eusipco47968.2020.9287480},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002478.pdf},\n}\n
\n
\n\n\n
\n Target classification can be of real interest for sea surveillance in both civil and military contexts. To address this issue, we present two approaches based on the Singer model. The latter has the advantage of covering a wide range of motions depending on the values of its parameters. Given noisy observations, the first method aims at estimating the motion model parameters by taking advantage of the properties of the correlation function of the estimated acceleration. It is based on a genetic algorithm. The second approach is on-line and consists in deriving a joint tracking and classification (JTC) method. Based on various simulations, we study their respective relevance in different operational settings. The proposed JTC corresponds to the best compromise in terms of performance and number of samples required.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Gaussian Process State-Space Models with Time-Varying Parameters and Inducing Points.\n \n \n \n \n\n\n \n Liu, Y.; and Djurić, P. M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1462-1466, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"GaussianPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287481,\n  author = {Y. Liu and P. M. Djurić},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Gaussian Process State-Space Models with Time-Varying Parameters and Inducing Points},\n  year = {2020},\n  pages = {1462-1466},\n  abstract = {We propose time-varying Gaussian process state- space models (TVGPSSM) whose hyper-parameters vary with time. The models have the ability to estimate time-varying functions and thereby increase flexibility to extract information from observed data. The proposed inference approach makes use of time-varying inducing points to adapt to changes of the function, and it exploits hierarchical importance sampling. The experimental results show that the approach has better performance than that of the standard Gaussian process.},\n  keywords = {Adaptation models;Monte Carlo methods;Gaussian processes;State-space methods;Trajectory;Time-varying systems;Standards;System identification;Gaussian processes;state- space model;hierarchical importance sampling},\n  doi = {10.23919/Eusipco47968.2020.9287481},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001462.pdf},\n}\n\n
\n
\n\n\n
\n We propose time-varying Gaussian process state- space models (TVGPSSM) whose hyper-parameters vary with time. The models have the ability to estimate time-varying functions and thereby increase flexibility to extract information from observed data. The proposed inference approach makes use of time-varying inducing points to adapt to changes of the function, and it exploits hierarchical importance sampling. The experimental results show that the approach has better performance than that of the standard Gaussian process.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robust EEG Source Localization Using Subspace Principal Vector Projection Technique.\n \n \n \n \n\n\n \n Giri, A.; Kumar, L.; and Gandhi, T.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1075-1079, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"RobustPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287482,\n  author = {A. Giri and L. Kumar and T. Gandhi},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Robust EEG Source Localization Using Subspace Principal Vector Projection Technique},\n  year = {2020},\n  pages = {1075-1079},\n  abstract = {ElectroEncephaloGram (EEG) signals based Brain Source Localization (BSL) has been an active area of research. The performance of BSL algorithms is severely degraded in the presence of background interferences. Pre-Whitening (PW) based approach to deal with such interference assumes temporal stationarity of the data which does not hold good for EEG based processing. Null Projection (NP) based approach relaxes the temporal stationarity. However, the strict spatial stationarity of the number of interfering sources is maintained between control state and activity state measurement. In practical scenarios where an interference source that exists only in the control state, and does not appear in activity state, NP based approach removes a higher dimension space from the activity data leading to its poor performance. The proposed Subspace Principal Vector Projection (SPVP) based approach utilizes subspace correlation based common interference statistics and thus relaxing the strict spatial stationarity condition. In particular, SPVP based MUltiple SIgnal Classification (MUSIC) and Linearly Constrained Minimum Variance (LCMV) algorithms are presented for BSL. Simulation and experiment with real EEG data from Physionet dataset involving motor imagery task illustrate the effectiveness of the proposed algorithms in robust BSL with interference suppression.},\n  keywords = {electroencephalography;interference suppression;medical signal processing;neurophysiology;signal classification;statistical analysis;source localization;electroencephalogram signals;BSL algorithms;background interferences;EEG based processing;interfering sources;activity state measurement;interference source;NP based approach;subspace correlation;interference statistics;strict spatial stationarity condition;robust BSL;linearly constrained minimum variance algorithms;SPVP based multiple signal classification;subspace principal vector projection based approach;pre-whitening based approach;null projection based approach;Interference suppression;Correlation;Signal processing algorithms;Electroencephalography;Data models;Classification algorithms;Task analysis;EEG;Source localization;MUSIC;LCMV;Interference suppression;Subspace correlation;Principal vector},\n  doi = {10.23919/Eusipco47968.2020.9287482},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001075.pdf},\n}\n\n
\n
\n\n\n
\n ElectroEncephaloGram (EEG) signals based Brain Source Localization (BSL) has been an active area of research. The performance of BSL algorithms is severely degraded in the presence of background interferences. Pre-Whitening (PW) based approach to deal with such interference assumes temporal stationarity of the data which does not hold good for EEG based processing. Null Projection (NP) based approach relaxes the temporal stationarity. However, the strict spatial stationarity of the number of interfering sources is maintained between control state and activity state measurement. In practical scenarios where an interference source that exists only in the control state, and does not appear in activity state, NP based approach removes a higher dimension space from the activity data leading to its poor performance. The proposed Subspace Principal Vector Projection (SPVP) based approach utilizes subspace correlation based common interference statistics and thus relaxing the strict spatial stationarity condition. In particular, SPVP based MUltiple SIgnal Classification (MUSIC) and Linearly Constrained Minimum Variance (LCMV) algorithms are presented for BSL. Simulation and experiment with real EEG data from Physionet dataset involving motor imagery task illustrate the effectiveness of the proposed algorithms in robust BSL with interference suppression.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An End-to-end Multitask Learning Model to Improve Speech Emotion Recognition.\n \n \n \n \n\n\n \n Fu, C.; Liu, C.; Ishi, C. T.; and Ishiguro, H.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1-5, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287484,\n  author = {C. Fu and C. Liu and C. T. Ishi and H. Ishiguro},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {An End-to-end Multitask Learning Model to Improve Speech Emotion Recognition},\n  year = {2020},\n  pages = {1-5},\n  abstract = {In this paper, we propose an attention-based CNN-BLSTM model with the end-to-end (E2E) learning method. We first extract Mel-spectrogram from wav file instead of using handcrafted features. Then we adopt two types of attention mechanisms to let the model focuses on salient periods of speech emotions over the temporal dimension. Considering that there are many individual differences among people in expressing emotions, we incorporate speaker recognition as an auxiliary task. Moreover, since the training data set has a small sample size, we include data from another language as data augmentation. We evaluated the proposed method on SAVEE dataset by training it with single task, multitask, and cross-language. The evaluation shows that our proposed model achieves 73.62% for weighted accuracy and 71.11% for un-weighted accuracy in the task of speech emotion recognition, which outperforms the baseline with 11.13 points.},\n  keywords = {Training;Emotion recognition;Training data;Speech recognition;Feature extraction;Task analysis;Speech processing;speech emotion recognition;multitask learning;speaker recognition},\n  doi = {10.23919/Eusipco47968.2020.9287484},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000351.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose an attention-based CNN-BLSTM model with the end-to-end (E2E) learning method. We first extract Mel-spectrogram from wav file instead of using handcrafted features. Then we adopt two types of attention mechanisms to let the model focuses on salient periods of speech emotions over the temporal dimension. Considering that there are many individual differences among people in expressing emotions, we incorporate speaker recognition as an auxiliary task. Moreover, since the training data set has a small sample size, we include data from another language as data augmentation. We evaluated the proposed method on SAVEE dataset by training it with single task, multitask, and cross-language. The evaluation shows that our proposed model achieves 73.62% for weighted accuracy and 71.11% for un-weighted accuracy in the task of speech emotion recognition, which outperforms the baseline with 11.13 points.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Collision Resilient V2X Communication via Grant-Free NOMA.\n \n \n \n \n\n\n \n Tahir, B.; Schwarz, S.; and Rupp, M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1732-1736, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"CollisionPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287485,\n  author = {B. Tahir and S. Schwarz and M. Rupp},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Collision Resilient V2X Communication via Grant-Free NOMA},\n  year = {2020},\n  pages = {1732-1736},\n  abstract = {In vehicle-to-everything (V2X) communication, vehicles will perform the resources selection on their own, once there is no central coordination of the transmissions by the base station. If the vehicles choose to contest the same time-frequency resources, then a collision of the transmitted messages will occur. This can highly impact the reliability and latency of the vehicular link, which are crucial for safety-related applications. In this paper, we propose applying the framework of grant-free non-orthogonal multiple access (NOMA) for a collision resilient V2X communication. Grant-free NOMA allows for uncoordinated transmissions and can resolve collisions by means of specially designed transmit signatures, combined with an advanced receiver that utilizes the structure of those signatures. Our initial results suggest that such a framework has the potential to substantially enhance the system robustness to collisions, in terms of both reliability and latency.},\n  keywords = {NOMA;Time-frequency analysis;Europe;Receivers;Signal processing;Robustness;Vehicle-to-everything},\n  doi = {10.23919/Eusipco47968.2020.9287485},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001732.pdf},\n}\n\n
\n
\n\n\n
\n In vehicle-to-everything (V2X) communication, vehicles will perform the resources selection on their own, once there is no central coordination of the transmissions by the base station. If the vehicles choose to contest the same time-frequency resources, then a collision of the transmitted messages will occur. This can highly impact the reliability and latency of the vehicular link, which are crucial for safety-related applications. In this paper, we propose applying the framework of grant-free non-orthogonal multiple access (NOMA) for a collision resilient V2X communication. Grant-free NOMA allows for uncoordinated transmissions and can resolve collisions by means of specially designed transmit signatures, combined with an advanced receiver that utilizes the structure of those signatures. Our initial results suggest that such a framework has the potential to substantially enhance the system robustness to collisions, in terms of both reliability and latency.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Graph Signal Processing Framework for the Classification of Temporal Brain Data.\n \n \n \n \n\n\n \n Itani, S.; and Thanou, D.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1180-1184, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287486,\n  author = {S. Itani and D. Thanou},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A Graph Signal Processing Framework for the Classification of Temporal Brain Data},\n  year = {2020},\n  pages = {1180-1184},\n  abstract = {Graph Signal Processing (GSP) addresses the analysis of data living on an irregular domain which can be modeled with a graph. This capability is of great interest for the study of brain connectomes. In this case, data lying on the nodes of the graph are considered as signals (e.g., fMRI time-series) that have a strong dependency on the graph topology (e.g., brain structural connectivity). In this paper, we adopt GSP tools to build features related to the frequency content of the signals. To make these features highly discriminative, we apply an extension of the Fukunaga-Koontz transform. We then use these new features to train a decision tree for the prediction of autism spectrum disorder. Interestingly, our framework outperforms state-of-the-art methods on the publicly available ABIDE dataset.},\n  keywords = {biomedical MRI;brain;decision trees;graph theory;image classification;medical image processing;neurophysiology;transforms;fMRI time-series;graph topology;brain structural connectivity;GSP tools;graph signal processing framework;temporal brain data;irregular domain;Fukunaga-Koontz transform;autism spectrum disorder;Autism;Signal processing algorithms;Signal processing;Tools;Prediction algorithms;Topology;Classification algorithms;Graph signal processing;machine learning;explainability;decision trees;functional MRI;autism spectrum disorder},\n  doi = {10.23919/Eusipco47968.2020.9287486},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001180.pdf},\n}\n\n
\n
\n\n\n
\n Graph Signal Processing (GSP) addresses the analysis of data living on an irregular domain which can be modeled with a graph. This capability is of great interest for the study of brain connectomes. In this case, data lying on the nodes of the graph are considered as signals (e.g., fMRI time-series) that have a strong dependency on the graph topology (e.g., brain structural connectivity). In this paper, we adopt GSP tools to build features related to the frequency content of the signals. To make these features highly discriminative, we apply an extension of the Fukunaga-Koontz transform. We then use these new features to train a decision tree for the prediction of autism spectrum disorder. Interestingly, our framework outperforms state-of-the-art methods on the publicly available ABIDE dataset.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Recovery Algorithm based on the Kaczmarz Algorithm and ADMM Splitting with Application to Convex Optimization in Magnetic Particle Imaging.\n \n \n \n \n\n\n \n Maass, M.; Droigk, C.; Katzberg, F.; Koch, P.; and Mertins, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2135-2139, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287487,\n  author = {M. Maass and C. Droigk and F. Katzberg and P. Koch and A. Mertins},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A Recovery Algorithm based on the Kaczmarz Algorithm and ADMM Splitting with Application to Convex Optimization in Magnetic Particle Imaging},\n  year = {2020},\n  pages = {2135-2139},\n  abstract = {This work introduces a strategy for the extension of the standard Kaczmarz algorithm, which is popular for solving very large inverse problems, to priors other than the commonly used Tikhonov regularization. The proposed reformulation of the algorithm allows us to include more sophisticated priors while inheriting the row-wise operation structure of the classical Kaczmarz algorithm. The new method is developed with help of the alternating direction method of multipliers. The results show that also with suboptimal alternating direction method of multiplier steps, the proposed algorithm is able to solve convex optimization problems with very high accuracy. Especially, on the relative young preclinical medical imaging modality of magnetic particle imaging, the algorithm demonstrates high convergence rates. When the underlying matrix nearly shows mutually orthogonal rows, which is observed in the field of magnetic particle imaging, very high convergence rates can be expected.},\n  keywords = {Magnetic particles;Inverse problems;Signal processing algorithms;Signal processing;Convex functions;Standards;Convergence;Kaczmarz method;alternating direction method of multipliers;convex optimization;inverse problems;medical imaging},\n  doi = {10.23919/Eusipco47968.2020.9287487},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002135.pdf},\n}\n\n
\n
\n\n\n
\n This work introduces a strategy for the extension of the standard Kaczmarz algorithm, which is popular for solving very large inverse problems, to priors other than the commonly used Tikhonov regularization. The proposed reformulation of the algorithm allows us to include more sophisticated priors while inheriting the row-wise operation structure of the classical Kaczmarz algorithm. The new method is developed with help of the alternating direction method of multipliers. The results show that also with suboptimal alternating direction method of multiplier steps, the proposed algorithm is able to solve convex optimization problems with very high accuracy. Especially, on the relative young preclinical medical imaging modality of magnetic particle imaging, the algorithm demonstrates high convergence rates. When the underlying matrix nearly shows mutually orthogonal rows, which is observed in the field of magnetic particle imaging, very high convergence rates can be expected.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Noise-robust Attention Learning for End-to-End Speech Recognition.\n \n \n \n \n\n\n \n Higuchi, Y.; Tawara, N.; Ogawa, A.; Iwata, T.; Kobayashi, T.; and Ogawa, T.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 311-315, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Noise-robustPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287488,\n  author = {Y. Higuchi and N. Tawara and A. Ogawa and T. Iwata and T. Kobayashi and T. Ogawa},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Noise-robust Attention Learning for End-to-End Speech Recognition},\n  year = {2020},\n  pages = {311-315},\n  abstract = {We propose a method for improving the noise robustness of an end-to-end automatic speech recognition (ASR) model using attention weights. Several studies have adopted a combination of recurrent neural networks and attention mechanisms to achieve direct speech-to-text translation. In the real-world environment, however, noisy conditions make it difficult for the attention mechanisms to estimate the accurate alignment between the input speech frames and output characters, leading to the degradation of the recognition performance of the end-to-end model. In this work, we propose noise-robust attention learning (NRAL) which explicitly tells the attention mechanism where to {"}listen at{"} in a sequence of noisy speech features. Specifically, we train the attention weights estimated from a noisy speech to approximate the weights estimated from a clean speech. The experimental results based on the CHiME-4 task indicate that the proposed NRAL approach effectively improves the noise robustness of the end-to-end ASR model.},\n  keywords = {Training;Speech recognition;Signal processing;Noise robustness;Noise measurement;Task analysis;Speech processing;Attention mechanism;noise robustness;speech recognition;deep neural networks},\n  doi = {10.23919/Eusipco47968.2020.9287488},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000311.pdf},\n}\n\n
\n
\n\n\n
\n We propose a method for improving the noise robustness of an end-to-end automatic speech recognition (ASR) model using attention weights. Several studies have adopted a combination of recurrent neural networks and attention mechanisms to achieve direct speech-to-text translation. In the real-world environment, however, noisy conditions make it difficult for the attention mechanisms to estimate the accurate alignment between the input speech frames and output characters, leading to the degradation of the recognition performance of the end-to-end model. In this work, we propose noise-robust attention learning (NRAL) which explicitly tells the attention mechanism where to \"listen at\" in a sequence of noisy speech features. Specifically, we train the attention weights estimated from a noisy speech to approximate the weights estimated from a clean speech. The experimental results based on the CHiME-4 task indicate that the proposed NRAL approach effectively improves the noise robustness of the end-to-end ASR model.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Orientation-Matched Multiple Modeling for RSSI-based Indoor Localization via BLE Sensors.\n \n \n \n \n\n\n \n Atashi, M.; Malekzadeh, P.; Salimibeni, M.; Hajiakhondi-Meybodi, Z.; Plataniotis, K. N.; and Mohammadi, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1702-1706, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Orientation-MatchedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287489,\n  author = {M. Atashi and P. Malekzadeh and M. Salimibeni and Z. Hajiakhondi-Meybodi and K. N. Plataniotis and A. Mohammadi},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Orientation-Matched Multiple Modeling for RSSI-based Indoor Localization via BLE Sensors},\n  year = {2020},\n  pages = {1702-1706},\n  abstract = {Internet of Things (IoT) has penetrated different aspects of our modern life where smart sensors enabled with Bluetooth Low Energy (BLE) are deployed increasingly within our surrounding indoor environments. BLE-based localization is, typically, performed based on Received Signal Strength Indicator (RSSI), which suffers from different drawbacks due to its significant fluctuations. In this paper, we focus on a multiplemodel estimation framework for analyzing and addressing effects of orientation of a BLE-enabled device on indoor localization accuracy. The fusion unit of the proposed method would merge orientation estimated by RSSI values and heading estimated by Inertial Measurement Unit (IMU) sensors to gain higher accuracy in orientation classification. In contrary to existing RSSIbased solutions that use a single path-loss model, the proposed framework consists of eight orientation-matched path loss models coupled with a multi-sensor and data-driven classification model that estimates the orientation of a hand-held device with high accuracy of 99%. By estimating the orientation, we could mitigate the effect of orientation on the RSSI values and consequently improve RSSI-based distance estimates. In particular, the proposed data-driven and multiple-model framework is constructed based on over 10 million RSSI values and IMU sensor data collected via an implemented LBS platform.},\n  keywords = {Performance evaluation;Measurement units;Fluctuations;Estimation;Sensor fusion;Received signal strength indicator;Internet of Things;IMU;Pathloss model;Classification},\n  doi = {10.23919/Eusipco47968.2020.9287489},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001702.pdf},\n}\n\n
\n
\n\n\n
\n Internet of Things (IoT) has penetrated different aspects of our modern life where smart sensors enabled with Bluetooth Low Energy (BLE) are deployed increasingly within our surrounding indoor environments. BLE-based localization is, typically, performed based on Received Signal Strength Indicator (RSSI), which suffers from different drawbacks due to its significant fluctuations. In this paper, we focus on a multiplemodel estimation framework for analyzing and addressing effects of orientation of a BLE-enabled device on indoor localization accuracy. The fusion unit of the proposed method would merge orientation estimated by RSSI values and heading estimated by Inertial Measurement Unit (IMU) sensors to gain higher accuracy in orientation classification. In contrary to existing RSSIbased solutions that use a single path-loss model, the proposed framework consists of eight orientation-matched path loss models coupled with a multi-sensor and data-driven classification model that estimates the orientation of a hand-held device with high accuracy of 99%. By estimating the orientation, we could mitigate the effect of orientation on the RSSI values and consequently improve RSSI-based distance estimates. In particular, the proposed data-driven and multiple-model framework is constructed based on over 10 million RSSI values and IMU sensor data collected via an implemented LBS platform.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Estimation of Consecutively Missed Samples in Fetal Heart Rate Recordings.\n \n \n \n \n\n\n \n Feng, G.; Gerald Quirk, J.; Heiselman, C.; and Djurić, P. M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1080-1084, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"EstimationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287490,\n  author = {G. Feng and J. {Gerald Quirk} and C. Heiselman and P. M. Djurić},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Estimation of Consecutively Missed Samples in Fetal Heart Rate Recordings},\n  year = {2020},\n  pages = {1080-1084},\n  abstract = {During labor, fetal heart rate (FHR) is monitored externally using Doppler ultrasound. This is done continuously, but for various reasons (e.g., fetal or maternal movements) the system does not record any samples for varying periods of time. In many settings, it would be quite beneficial to estimate the missing samples. In this paper, we propose a (deep) Gaussian process-based approach for estimation of consecutively missing samples in FHR recordings. The method relies on similarities in the state space and on exploiting the concept of attractor manifolds. The proposed approach was tested on a short segment of real FHR recordings. The experimental results indicate that the proposed approach is able to provide more reliable results in comparison to several interpolation methods that are commonly applied for processing of FHR signals.},\n  keywords = {biomedical ultrasonics;cardiology;Gaussian processes;interpolation;medical signal processing;obstetrics;patient monitoring;maternal movements;Doppler ultrasound;fetal heart rate recordings;consecutively missed samples;FHR signals;FHR recordings;missing samples;Gaussian process-based approach;Manifolds;Ultrasonic imaging;Fetal heart rate;Estimation;Signal processing;Reliability;Monitoring;Fetal heart rate;deep Gaussian processes;attractor;state space;consecutively missed samples},\n  doi = {10.23919/Eusipco47968.2020.9287490},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001080.pdf},\n}\n\n
\n
\n\n\n
\n During labor, fetal heart rate (FHR) is monitored externally using Doppler ultrasound. This is done continuously, but for various reasons (e.g., fetal or maternal movements) the system does not record any samples for varying periods of time. In many settings, it would be quite beneficial to estimate the missing samples. In this paper, we propose a (deep) Gaussian process-based approach for estimation of consecutively missing samples in FHR recordings. The method relies on similarities in the state space and on exploiting the concept of attractor manifolds. The proposed approach was tested on a short segment of real FHR recordings. The experimental results indicate that the proposed approach is able to provide more reliable results in comparison to several interpolation methods that are commonly applied for processing of FHR signals.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Forward-Backward Hankel Matrix Fitting for Spectral Super-Resolution.\n \n \n \n \n\n\n \n Yang, Z.; and Wu, X.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1886-1890, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Forward-BackwardPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287491,\n  author = {Z. Yang and X. Wu},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Forward-Backward Hankel Matrix Fitting for Spectral Super-Resolution},\n  year = {2020},\n  pages = {1886-1890},\n  abstract = {Hankel-based approaches form an important class of methods for line spectral estimation within the recent spectral super-resolution framework. However, they suffer from the fundamental limitation that their estimated signal poles do not lie on the unit circle in general, causing difficulties of physical interpretation and performance loss. In this paper, we present a modified Hankel approach called forward-backward Hankel matrix fitting (FB-Hankel) that can be implemented by simply modifying the existing algorithms. We show analytically that the new approach has great potential to restrict the estimated poles on the unit circle. Numerical results are provided that corroborate our analysis and demonstrate the advantage of FB-Hankel in improving the estimation accuracy. 1},\n  keywords = {Fitting;Estimation;Signal processing algorithms;Europe;Numerical simulation;Matrix decomposition;Signal resolution;Forward-backward Hankel matrix fitting;line spectral estimation;spectral super-resolution;Vandermonde decomposition;Kronecker’s theorem},\n  doi = {10.23919/Eusipco47968.2020.9287491},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001886.pdf},\n}\n\n
\n
\n\n\n
\n Hankel-based approaches form an important class of methods for line spectral estimation within the recent spectral super-resolution framework. However, they suffer from the fundamental limitation that their estimated signal poles do not lie on the unit circle in general, causing difficulties of physical interpretation and performance loss. In this paper, we present a modified Hankel approach called forward-backward Hankel matrix fitting (FB-Hankel) that can be implemented by simply modifying the existing algorithms. We show analytically that the new approach has great potential to restrict the estimated poles on the unit circle. Numerical results are provided that corroborate our analysis and demonstrate the advantage of FB-Hankel in improving the estimation accuracy. 1\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Intensity Based Soundfield Reproduction over Multiple Sweet Spots Using an Irregular Loudspeaker Array.\n \n \n \n \n\n\n \n Zuo, H.; Samarasinghe, P. N.; and Abhayapala, T. D.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 486-490, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"IntensityPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287492,\n  author = {H. Zuo and P. N. Samarasinghe and T. D. Abhayapala},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Intensity Based Soundfield Reproduction over Multiple Sweet Spots Using an Irregular Loudspeaker Array},\n  year = {2020},\n  pages = {486-490},\n  abstract = {Intensity based soundfield reproduction methods are shown to provide impressive human perception of sound localization. However, most of the previous works in this domain either focus on a single sweet spot for the listener, or are constrained to a regular loudspeaker geometry, which is difficult to implement in real-world applications. This paper addresses both of the above challenges. We propose an intensity matching technique to optimally reproduce sound intensity at multiple sweet spots using an irregular loudspeaker array. The performance of the proposed method is evaluated by comparing it with the pressure and velocity matching method through numerical simulations and perceptual experiments. The results show that the proposed method has an improved performance.},\n  keywords = {Loudspeakers;Geometry;Array signal processing;Europe;Numerical simulation;Harmonic analysis;Sound intensity;soundfield reproduction;multiple sweet spots;irregular loudspeaker array},\n  doi = {10.23919/Eusipco47968.2020.9287492},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000486.pdf},\n}\n\n
\n
\n\n\n
\n Intensity based soundfield reproduction methods are shown to provide impressive human perception of sound localization. However, most of the previous works in this domain either focus on a single sweet spot for the listener, or are constrained to a regular loudspeaker geometry, which is difficult to implement in real-world applications. This paper addresses both of the above challenges. We propose an intensity matching technique to optimally reproduce sound intensity at multiple sweet spots using an irregular loudspeaker array. The performance of the proposed method is evaluated by comparing it with the pressure and velocity matching method through numerical simulations and perceptual experiments. The results show that the proposed method has an improved performance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Localized Interpolation for Graph Signals.\n \n \n \n \n\n\n \n Mazarguil, A.; Oudre, L.; and Vayatis, N.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2160-2164, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"LocalizedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287493,\n  author = {A. Mazarguil and L. Oudre and N. Vayatis},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Localized Interpolation for Graph Signals},\n  year = {2020},\n  pages = {2160-2164},\n  abstract = {Graph structure is a powerful framework for the characterization of interaction between entities. In the context of graph signal processing, interpolation methods are based on the smoothness assumption that assumes that neighboring nodes present similar values. However, in several contexts, the strong relationship between two connected nodes may express different behaviour. In this paper, we propose a graph signal interpolation algorithm that uses a graph localization penalization on the reconstruction weights. These weights are learned from the data, thus allowing the use of signal anti-correlation on connected nodes in order to perform a more robust interpolation. The results displayed in the paper show that our approach is relevant when dealing with real data, for both smooth and non-smooth signals.},\n  keywords = {Interpolation;Correlation;Signal processing algorithms;Europe;Signal processing;Graph Signal Processing;interpolation;missing data;imputation},\n  doi = {10.23919/Eusipco47968.2020.9287493},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002160.pdf},\n}\n\n
\n
\n\n\n
\n Graph structure is a powerful framework for the characterization of interaction between entities. In the context of graph signal processing, interpolation methods are based on the smoothness assumption that assumes that neighboring nodes present similar values. However, in several contexts, the strong relationship between two connected nodes may express different behaviour. In this paper, we propose a graph signal interpolation algorithm that uses a graph localization penalization on the reconstruction weights. These weights are learned from the data, thus allowing the use of signal anti-correlation on connected nodes in order to perform a more robust interpolation. The results displayed in the paper show that our approach is relevant when dealing with real data, for both smooth and non-smooth signals.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Coupled Tensor Model of Atrial Fibrillation ECG.\n \n \n \n \n\n\n \n de Oliveira , P. M. R.; Zarzoso, V.; and Fernandes, C. A. R.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 915-919, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"CoupledPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287494,\n  author = {P. M. R. {de Oliveira} and V. Zarzoso and C. A. R. Fernandes},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Coupled Tensor Model of Atrial Fibrillation ECG},\n  year = {2020},\n  pages = {915-919},\n  abstract = {Atrial fibrillation (AF) is the most frequent cardiac arrhythmia diagnosed in clinical practice, identified by an uncoordinated and irregular atrial depolarization. However, its electrophysiological mechanisms are still not clearly understood, increasing the intensive clinical research into this challenging cardiac condition in the past few years. The noninvasive extraction of the atrial activity (AA) from multi-lead electrocardiogram (ECG) recordings by signal processing techniques has helped in better understanding this complex arrhythmia. In particular, tensor decomposition techniques have proven to be powerful tools in this task, overcoming the limitations of matrix factorization methods. Exploring the spatial as well as the temporal diversity of ECG recordings, this contribution puts forward a novel noninvasive AA extraction method that models consecutive AF ECG segments as a coupled block-term tensor decomposition, assuming that they share the same spatial signatures. Experiments on synthetic and real data, the latter acquired from persistent AF patients, validate the proposed coupled tensor approach, which provides satisfactory performance with reduced computational cost.},\n  keywords = {diseases;electrocardiography;matrix decomposition;medical signal detection;medical signal processing;tensors;tensor model;atrial fibrillation ECG;frequent cardiac arrhythmia;uncoordinated depolarization;irregular atrial depolarization;electrophysiological mechanisms;cardiac condition;noninvasive extraction;atrial activity;multilead electrocardiogram recordings;tensor decomposition techniques;matrix factorization methods;ECG recordings;coupled block-term tensor decomposition;persistent AF patients;coupled tensor approach;noninvasive AA extraction method;Tensors;Source separation;Computational modeling;Atrial fibrillation;Electrocardiography;Tools;Task analysis;Atrial Fibrillation;Blind Source Separation;Block Term Decomposition;Coupled Tensor Model;Electrocardiogram},\n  doi = {10.23919/Eusipco47968.2020.9287494},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000915.pdf},\n}\n\n
\n
\n\n\n
\n Atrial fibrillation (AF) is the most frequent cardiac arrhythmia diagnosed in clinical practice, identified by an uncoordinated and irregular atrial depolarization. However, its electrophysiological mechanisms are still not clearly understood, increasing the intensive clinical research into this challenging cardiac condition in the past few years. The noninvasive extraction of the atrial activity (AA) from multi-lead electrocardiogram (ECG) recordings by signal processing techniques has helped in better understanding this complex arrhythmia. In particular, tensor decomposition techniques have proven to be powerful tools in this task, overcoming the limitations of matrix factorization methods. Exploring the spatial as well as the temporal diversity of ECG recordings, this contribution puts forward a novel noninvasive AA extraction method that models consecutive AF ECG segments as a coupled block-term tensor decomposition, assuming that they share the same spatial signatures. Experiments on synthetic and real data, the latter acquired from persistent AF patients, validate the proposed coupled tensor approach, which provides satisfactory performance with reduced computational cost.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Training of Neural Network Target Detectors Mentored by SO-CFAR.\n \n \n \n \n\n\n \n Akhtar, J.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1522-1526, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"TrainingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287495,\n  author = {J. Akhtar},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Training of Neural Network Target Detectors Mentored by SO-CFAR},\n  year = {2020},\n  pages = {1522-1526},\n  abstract = {A desirable objective in radar detection theory is the ability to detect and recognize targets in intricate scenarios such as in the presence of clutter or multiple closely spaced targets. Herein we propose the use of artificial neural networks for radar target detection where Smallest Of (SO)-CFAR detector is used as the basis for neural network training. The SO-CFAR detector has exceptional good detectional capabilities, however, suffers from a very high false alarm rate and has therefore only been given limited attention in the literature. We show that by appropriately training a neural network on SO-CFAR detections it is possible to significantly lower the false alarm rate with only marginal decrease in probability of detection.},\n  keywords = {Training;Target recognition;Two dimensional displays;Radar detection;Detectors;Artificial neural networks;Clutter;Constant false alarm rate (CFAR);radar;detection;Swerling targets;neural network},\n  doi = {10.23919/Eusipco47968.2020.9287495},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001522.pdf},\n}\n\n
\n
\n\n\n
\n A desirable objective in radar detection theory is the ability to detect and recognize targets in intricate scenarios such as in the presence of clutter or multiple closely spaced targets. Herein we propose the use of artificial neural networks for radar target detection where Smallest Of (SO)-CFAR detector is used as the basis for neural network training. The SO-CFAR detector has exceptional good detectional capabilities, however, suffers from a very high false alarm rate and has therefore only been given limited attention in the literature. We show that by appropriately training a neural network on SO-CFAR detections it is possible to significantly lower the false alarm rate with only marginal decrease in probability of detection.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An Accurate CNN Architecture For Atrial Fibrillation Detection Using Neural Architecture Search.\n \n \n \n \n\n\n \n Fayyazifar, N.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1135-1139, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287496,\n  author = {N. Fayyazifar},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {An Accurate CNN Architecture For Atrial Fibrillation Detection Using Neural Architecture Search},\n  year = {2020},\n  pages = {1135-1139},\n  abstract = {The accurate and timely diagnosis of Atrial Fibrillation (AF), a common condition presenting as an abnormal heartbeat that often results in serious disease, would assist in reducing morbidity. In this study, we make use of Electrocardiogram (ECG) data in order to create an automatic method for detecting AF. For this purpose, we employed a neural architecture search (NAS) algorithm. The efficiency of NAS algorithms on image classification tasks has been well established, however, studies on using NAS methods for ECG classification are very limited. Our experiments show that our automatically designed neural model performs very well and arguably outperforms currently available deep learning models. This model achieved the accuracy and F1-score of 84.15%± 0.6 and 82.45± 0.2 on the publicly available subset of PhysioNet challenge 2017 dataset, respectively.},\n  keywords = {Deep learning;Time series analysis;Atrial fibrillation;Signal processing algorithms;Electrocardiography;Classification algorithms;Task analysis;atrial fibrillation;neural architecture search;CNN architecture;deep learning},\n  doi = {10.23919/Eusipco47968.2020.9287496},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001135.pdf},\n}\n\n
\n
\n\n\n
\n The accurate and timely diagnosis of Atrial Fibrillation (AF), a common condition presenting as an abnormal heartbeat that often results in serious disease, would assist in reducing morbidity. In this study, we make use of Electrocardiogram (ECG) data in order to create an automatic method for detecting AF. For this purpose, we employed a neural architecture search (NAS) algorithm. The efficiency of NAS algorithms on image classification tasks has been well established, however, studies on using NAS methods for ECG classification are very limited. Our experiments show that our automatically designed neural model performs very well and arguably outperforms currently available deep learning models. This model achieved the accuracy and F1-score of 84.15%± 0.6 and 82.45± 0.2 on the publicly available subset of PhysioNet challenge 2017 dataset, respectively.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fully Automatic Blind Color Deconvolution of Histological Images Using Super Gaussians.\n \n \n \n \n\n\n \n Pérez-Bueno, F.; Vega, M.; Naranjo, V.; Molina, R.; and Katsaggelos, A. K.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1254-1258, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"FullyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287497,\n  author = {F. Pérez-Bueno and M. Vega and V. Naranjo and R. Molina and A. K. Katsaggelos},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Fully Automatic Blind Color Deconvolution of Histological Images Using Super Gaussians},\n  year = {2020},\n  pages = {1254-1258},\n  abstract = {In digital pathology blind color deconvolution techniques separate multi-stained images into single stained bands. These band images are then used for image analysis and classification purposes. This paper proposes the use of Super Gaussian priors for each stain band together with the similarity to a given reference matrix for the color vectors. Variational inference and an evidence lower bound are then utilized to automatically estimate the latent variables and model parameters. The proposed methodology is tested on real images and compared to classical and state-of-the-art methods for histopathological blind image color deconvolution. Its use as a preprocessing step in prostate cancer classification is also analysed.},\n  keywords = {biological tissues;biomedical optical imaging;cancer;deconvolution;image classification;image colour analysis;image segmentation;medical image processing;color vectors;histopathological blind image color deconvolution;histological images;Super gaussians;single stained bands;band images;image analysis;image classification;digital pathology blind color deconvolution techniques;superGaussian priors;Deconvolution;Image color analysis;Image edge detection;Signal processing;Prostate cancer;Image reconstruction;Image classification;Blind color deconvolution;histopathological images;variational Bayes;Super Gaussian},\n  doi = {10.23919/Eusipco47968.2020.9287497},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001254.pdf},\n}\n\n
\n
\n\n\n
\n In digital pathology blind color deconvolution techniques separate multi-stained images into single stained bands. These band images are then used for image analysis and classification purposes. This paper proposes the use of Super Gaussian priors for each stain band together with the similarity to a given reference matrix for the color vectors. Variational inference and an evidence lower bound are then utilized to automatically estimate the latent variables and model parameters. The proposed methodology is tested on real images and compared to classical and state-of-the-art methods for histopathological blind image color deconvolution. Its use as a preprocessing step in prostate cancer classification is also analysed.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Generating EEG features from Acoustic features.\n \n \n \n \n\n\n \n Krishna, G.; Tran, C.; Carnahan, M.; Han, Y.; and Tewfik, A. H.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1100-1104, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"GeneratingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287498,\n  author = {G. Krishna and C. Tran and M. Carnahan and Y. Han and A. H. Tewfik},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Generating EEG features from Acoustic features},\n  year = {2020},\n  pages = {1100-1104},\n  abstract = {In this paper we demonstrate predicting electroencephalography (EEG) features from acoustic features using recurrent neural network (RNN) based regression model and generative adversarial network (GAN). We predict various types of EEG features from acoustic features. We compare our results with the previously studied problem on speech synthesis using EEG and our results demonstrate that EEG features can be generated from acoustic features with lower root mean square error (RMSE), normalized RMSE values compared to generating acoustic features from EEG features (ie: speech synthesis using EEG) when tested using the same data sets.},\n  keywords = {Recurrent neural networks;Signal processing;Generative adversarial networks;Electroencephalography;Acoustics;Speech synthesis;Root mean square;electroencephalography (EEG);deep learning},\n  doi = {10.23919/Eusipco47968.2020.9287498},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001100.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we demonstrate predicting electroencephalography (EEG) features from acoustic features using recurrent neural network (RNN) based regression model and generative adversarial network (GAN). We predict various types of EEG features from acoustic features. We compare our results with the previously studied problem on speech synthesis using EEG and our results demonstrate that EEG features can be generated from acoustic features with lower root mean square error (RMSE), normalized RMSE values compared to generating acoustic features from EEG features (ie: speech synthesis using EEG) when tested using the same data sets.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Robust Period Estimation of Automated Cutting Systems by Improved Autocorrelation Linear Regression Techniques.\n \n \n \n\n\n \n McAtear, A.; Gielen, R.; and Madhu, N.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1901-1905, Aug 2020. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287499,\n  author = {A. McAtear and R. Gielen and N. Madhu},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Robust Period Estimation of Automated Cutting Systems by Improved Autocorrelation Linear Regression Techniques},\n  year = {2020},\n  pages = {1901-1905},\n  abstract = {Condition monitoring is an important asset in the industry to improve the safety and efficiency of the production chain. However, in heavy machinery – such as edge trimmers in steel mills – it is often impractical and unsafe to install intrusive sensors to get the data needed for condition monitoring. Non-intrusive monitoring techniques based, e.g., on acoustic data captured by microphones placed in the vicinity of the assembly being monitored are attractive options. Our application deals with the acoustic monitoring of rotational blades cutting steel strips at high speeds. Knowing the correct period of the cutting process is important for quality evaluation purposes. We propose two novel robust methods to estimate the periodicity based on the audio captured by a microphone near the blades. One is an improved autocorrelation function and the other is based on linear regression, both using incorporating an novel test for the correctness of the estimated period. We compare our methods against the standard autocorrelation-based periodicity measurement techniques on real data recordings. The proposed method estimates the correct period about 87% of the time, compared to an accuracy of only 51% using standard periodicity measurement approaches.},\n  keywords = {Linear regression;Signal processing algorithms;Estimation;Steel;Monitoring;Standards;Microphones;Period estimation;linear regression;autocor-relation;condition monitoring;non-intrusive system parameter estimation},\n  doi = {10.23919/Eusipco47968.2020.9287499},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n Condition monitoring is an important asset in the industry to improve the safety and efficiency of the production chain. However, in heavy machinery – such as edge trimmers in steel mills – it is often impractical and unsafe to install intrusive sensors to get the data needed for condition monitoring. Non-intrusive monitoring techniques based, e.g., on acoustic data captured by microphones placed in the vicinity of the assembly being monitored are attractive options. Our application deals with the acoustic monitoring of rotational blades cutting steel strips at high speeds. Knowing the correct period of the cutting process is important for quality evaluation purposes. We propose two novel robust methods to estimate the periodicity based on the audio captured by a microphone near the blades. One is an improved autocorrelation function and the other is based on linear regression, both using incorporating an novel test for the correctness of the estimated period. We compare our methods against the standard autocorrelation-based periodicity measurement techniques on real data recordings. The proposed method estimates the correct period about 87% of the time, compared to an accuracy of only 51% using standard periodicity measurement approaches.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Ambisonic Coding with Spatial Image Correction.\n \n \n \n \n\n\n \n MAHÉ, P.; RAGOT, S.; MARCHAND, S.; and DANIEL, J.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 471-475, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AmbisonicPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287500,\n  author = {P. MAHÉ and S. RAGOT and S. MARCHAND and J. DANIEL},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Ambisonic Coding with Spatial Image Correction},\n  year = {2020},\n  pages = {471-475},\n  abstract = {We present a new method to enhance multi-mono coding of ambisonic audio signals. In multi-mono coding, each component is represented independently by a mono core codec, this may introduce strong spatial artifacts. The proposed method is based on the correction of spatial images derived from the sound-field power map of original and coded ambisonic signals. The correction metadata is transmitted as side information to restore the spatial image by post-processing. The performance of the proposed method is compared against naive multi-mono coding (with no side information) at the same overall bitrate. Experimental results are provided for the case of First-Order Ambisonic (FOA) signals and two mono core codecs: EVS and Opus. The proposed method is shown to provide on average some audio quality improvement for both core codecs. ANOVA results are provided as a complementary analysis.},\n  keywords = {Codecs;Image coding;Bit rate;Signal processing;Encoding;MONOS devices;Analysis of variance;ambisonics;audio coding;spatial audio},\n  doi = {10.23919/Eusipco47968.2020.9287500},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000471.pdf},\n}\n\n
\n
\n\n\n
\n We present a new method to enhance multi-mono coding of ambisonic audio signals. In multi-mono coding, each component is represented independently by a mono core codec, this may introduce strong spatial artifacts. The proposed method is based on the correction of spatial images derived from the sound-field power map of original and coded ambisonic signals. The correction metadata is transmitted as side information to restore the spatial image by post-processing. The performance of the proposed method is compared against naive multi-mono coding (with no side information) at the same overall bitrate. Experimental results are provided for the case of First-Order Ambisonic (FOA) signals and two mono core codecs: EVS and Opus. The proposed method is shown to provide on average some audio quality improvement for both core codecs. ANOVA results are provided as a complementary analysis.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Weak Speech Supervision: A case study of Dysarthria Severity Classification.\n \n \n \n \n\n\n \n Purohit, M.; Parmar, M.; Patel, M.; Malaviya, H.; and Patii, H. A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 101-105, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"WeakPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287502,\n  author = {M. Purohit and M. Parmar and M. Patel and H. Malaviya and H. A. Patii},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Weak Speech Supervision: A case study of Dysarthria Severity Classification},\n  year = {2020},\n  pages = {101-105},\n  abstract = {Machine Learning methodologies are making a remarkable contribution, and yielding state-of-the-art results in different speech domains. With this exceptionally significant achievement, a large amount of labeled data is the largest bottleneck in the deployment of these speech systems. To generate massive data, hand-labeling training data is an intensively laborious task. This is problematic for clinical applications where obtaining such data labeled by speech pathologists is expensive and time-consuming. To overcome these problems, we introduce a new paradigm called Weak Speech Supervision (WSS), a first-of-its-kind system that helps users to train state-of-the-art classification models without hand-labeling training data. Users can write labeling functions (i.e., weak rules) to generate weak data from the unlabeled training set. In this paper, we provide the efficiency of this methodology via showing the case study of the severity-based binary classification of dysarthric speech. In WSS, we train a classifier on trusted data (labeled with 100% accuracy) via utilizing the weak data (labeled using weak supervision) to make our classifier model more efficient. Analysis of the proposed methodology is performed on Universal Access (UA) corpus. We got on an average 35.68% and 43.83% relative improvement in terms of accuracy and F1-score w.r.t. baselines, respectively.},\n  keywords = {Training;Training data;Machine learning;Signal processing;Data models;Task analysis;Speech processing;Dysarthria;Severity-based Classification;Data Scarcity;Weak Supervision;CNN},\n  doi = {10.23919/Eusipco47968.2020.9287502},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000101.pdf},\n}\n\n
\n
\n\n\n
\n Machine Learning methodologies are making a remarkable contribution, and yielding state-of-the-art results in different speech domains. With this exceptionally significant achievement, a large amount of labeled data is the largest bottleneck in the deployment of these speech systems. To generate massive data, hand-labeling training data is an intensively laborious task. This is problematic for clinical applications where obtaining such data labeled by speech pathologists is expensive and time-consuming. To overcome these problems, we introduce a new paradigm called Weak Speech Supervision (WSS), a first-of-its-kind system that helps users to train state-of-the-art classification models without hand-labeling training data. Users can write labeling functions (i.e., weak rules) to generate weak data from the unlabeled training set. In this paper, we provide the efficiency of this methodology via showing the case study of the severity-based binary classification of dysarthric speech. In WSS, we train a classifier on trusted data (labeled with 100% accuracy) via utilizing the weak data (labeled using weak supervision) to make our classifier model more efficient. Analysis of the proposed methodology is performed on Universal Access (UA) corpus. We got on an average 35.68% and 43.83% relative improvement in terms of accuracy and F1-score w.r.t. baselines, respectively.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Adaptation of cluster analysis methods to optimize a biomechanical motion model of humans in a nursing bed.\n \n \n \n \n\n\n \n Demmer, J.; Kitzig, A.; Stockmanns, G.; Naroska, E.; Viga, R.; and Grabmaier, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1323-1327, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AdaptationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287503,\n  author = {J. Demmer and A. Kitzig and G. Stockmanns and E. Naroska and R. Viga and A. Grabmaier},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Adaptation of cluster analysis methods to optimize a biomechanical motion model of humans in a nursing bed},\n  year = {2020},\n  pages = {1323-1327},\n  abstract = {The paper considers the optimization of a Hidden-Markov Model (HMM) based method for the generation of averaged motion sequences. To create averaged motion sequences, motion sequences of different test persons were originally recorded with a motion capture system (MoCap system) and then averaged using an HMM approach. The resulting averaged data sets, however, partly showed serious motion artifacts and uncoordinated intermediate movements, especially in the extremities. The aim of this work was to combine only movements with similar courses in the extremities by a suitable cluster analysis. For each test person, model body descriptions of 21 body elements are available, each of which is represented in three-dimensional time series. For optimization, the MoCap data are first compared using time warp edit distance (TWED) and clustered using an agglomerative hierarchical procedure. Finally, the data of the resulting clusters are used to generate new averaged motion sequences using the HMM approach. The resulting averaged data can be used, for example, in a simulation in a multilevel biomechanical model.},\n  keywords = {Biomechanics;Adaptation models;Biological system modeling;Hidden Markov models;Data models;Optimization;Extremities;Clusteranalysis;model driven development;biomechanical motion model;averaging motion pattern;TWED},\n  doi = {10.23919/Eusipco47968.2020.9287503},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001323.pdf},\n}\n\n
\n
\n\n\n
\n The paper considers the optimization of a Hidden-Markov Model (HMM) based method for the generation of averaged motion sequences. To create averaged motion sequences, motion sequences of different test persons were originally recorded with a motion capture system (MoCap system) and then averaged using an HMM approach. The resulting averaged data sets, however, partly showed serious motion artifacts and uncoordinated intermediate movements, especially in the extremities. The aim of this work was to combine only movements with similar courses in the extremities by a suitable cluster analysis. For each test person, model body descriptions of 21 body elements are available, each of which is represented in three-dimensional time series. For optimization, the MoCap data are first compared using time warp edit distance (TWED) and clustered using an agglomerative hierarchical procedure. Finally, the data of the resulting clusters are used to generate new averaged motion sequences using the HMM approach. The resulting averaged data can be used, for example, in a simulation in a multilevel biomechanical model.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Point Cloud Visualization Methods: a Study on Subjective Preferences.\n \n \n \n \n\n\n \n Dumic, E.; Battisti, F.; Carli, M.; and da Silva Cruz , L. A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 595-599, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"PointPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287504,\n  author = {E. Dumic and F. Battisti and M. Carli and L. A. {da Silva Cruz}},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Point Cloud Visualization Methods: a Study on Subjective Preferences},\n  year = {2020},\n  pages = {595-599},\n  abstract = {The availability of 3D range scanners and RGB-D cameras is pushing the spreading of point cloud-based applications. One of the main issues of this technology, in applications where the end user is a human observer, is the presentation of the data. Three-dimensional visual information represented as point clouds can be displayed in several ways, e.g. as sets of points with varying point size or as a surface rendered using one of several available methods, such as Poisson surface interpolation. Furthermore, to increase the feeling of presence, or immersiveness, novel hardware can be used such as 3D displays and head mounted devices. However, even if 3D-able visualization devices are available, common users are more accustomed to observing visual information displayed on a 2D screen and it is not clear which combination of presentation method and device are preferred by the users. In this contribution we assess the user preference of visualization of point clouds in terms of different rendering devices and methods. A set of subjective experiments is performed, involving point clouds presented as points or rendered surfaces displayed in 2D and 3D displays. The results obtained were analysed to measure user preferences.},\n  keywords = {Geometry;Visualization;Three-dimensional displays;Two dimensional displays;Laboratories;Gaussian distribution;Rendering (computer graphics);Point Clouds;Subjective Quality;Rendering;3D visual representation;Immersiveness},\n  doi = {10.23919/Eusipco47968.2020.9287504},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000595.pdf},\n}\n\n
\n
\n\n\n
\n The availability of 3D range scanners and RGB-D cameras is pushing the spreading of point cloud-based applications. One of the main issues of this technology, in applications where the end user is a human observer, is the presentation of the data. Three-dimensional visual information represented as point clouds can be displayed in several ways, e.g. as sets of points with varying point size or as a surface rendered using one of several available methods, such as Poisson surface interpolation. Furthermore, to increase the feeling of presence, or immersiveness, novel hardware can be used such as 3D displays and head mounted devices. However, even if 3D-able visualization devices are available, common users are more accustomed to observing visual information displayed on a 2D screen and it is not clear which combination of presentation method and device are preferred by the users. In this contribution we assess the user preference of visualization of point clouds in terms of different rendering devices and methods. A set of subjective experiments is performed, involving point clouds presented as points or rendered surfaces displayed in 2D and 3D displays. The results obtained were analysed to measure user preferences.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n ConFuse: Convolutional Transform Learning Fusion Framework For Multi-Channel Data Analysis.\n \n \n \n \n\n\n \n Gupta, P.; Maggu, J.; Majumdar, A.; Chouzenoux, E.; and Chierchia, G.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1986-1990, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ConFuse:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287506,\n  author = {P. Gupta and J. Maggu and A. Majumdar and E. Chouzenoux and G. Chierchia},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {ConFuse: Convolutional Transform Learning Fusion Framework For Multi-Channel Data Analysis},\n  year = {2020},\n  pages = {1986-1990},\n  abstract = {This work addresses the problem of analyzing multi-channel time series data by proposing an unsupervised fusion framework based on convolutional transform learning. Each channel is processed by a separate 1D convolutional transform; the output of all the channels are fused by a fully connected layer of transform learning. The training procedure takes advantage of the proximal interpretation of activation functions. We apply the developed framework to multi-channel financial data for stock forecasting and trading. We compare our proposed formulation with benchmark deep time series analysis networks. The results show that our method yields considerably better results than those compared against.},\n  keywords = {Training;Deep learning;Convolution;Time series analysis;Europe;Transforms;Forecasting;CNN;transform learning;information fusion;stock forecasting and trading;finance data processing},\n  doi = {10.23919/Eusipco47968.2020.9287506},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001986.pdf},\n}\n\n
\n
\n\n\n
\n This work addresses the problem of analyzing multi-channel time series data by proposing an unsupervised fusion framework based on convolutional transform learning. Each channel is processed by a separate 1D convolutional transform; the output of all the channels are fused by a fully connected layer of transform learning. The training procedure takes advantage of the proximal interpretation of activation functions. We apply the developed framework to multi-channel financial data for stock forecasting and trading. We compare our proposed formulation with benchmark deep time series analysis networks. The results show that our method yields considerably better results than those compared against.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Particle Filtering Under General Regime Switching.\n \n \n \n \n\n\n \n El-Laham, Y.; Yang, L.; Djurić, P. M.; and Bugallo, M. F.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2378-2382, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ParticlePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287507,\n  author = {Y. El-Laham and L. Yang and P. M. Djurić and M. F. Bugallo},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Particle Filtering Under General Regime Switching},\n  year = {2020},\n  pages = {2378-2382},\n  abstract = {In this paper, we consider a new framework for particle filtering under model uncertainty that operates beyond the scope of Markovian switching systems. Specifically, we develop a novel particle filtering algorithm that applies to general regime switching systems, where the model index is augmented as an unknown time-varying parameter in the system. The proposed approach does not require the use of multiple filters and can maintain a diverse set of particles for each considered model through appropriate choice of the particle filtering proposal distribution. The flexibility of the proposed approach allows for long-term dependencies between the models, which enables its use to a wider variety of real-world applications. We validate the method on a synthetic data experiment and show that it outperforms state-of-the-art multiple model particle filtering approaches that require the use of multiple filters.},\n  keywords = {Uncertainty;Filtering;Switching systems;Heuristic algorithms;Switches;Filtering algorithms;Time-varying systems},\n  doi = {10.23919/Eusipco47968.2020.9287507},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002378.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we consider a new framework for particle filtering under model uncertainty that operates beyond the scope of Markovian switching systems. Specifically, we develop a novel particle filtering algorithm that applies to general regime switching systems, where the model index is augmented as an unknown time-varying parameter in the system. The proposed approach does not require the use of multiple filters and can maintain a diverse set of particles for each considered model through appropriate choice of the particle filtering proposal distribution. The flexibility of the proposed approach allows for long-term dependencies between the models, which enables its use to a wider variety of real-world applications. We validate the method on a synthetic data experiment and show that it outperforms state-of-the-art multiple model particle filtering approaches that require the use of multiple filters.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Faster independent low-rank matrix analysis with pairwise updates of demixing vectors.\n \n \n \n \n\n\n \n Nakashima, T.; Scheibler, R.; Wakabayashi, Y.; and Ono, N.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 301-305, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"FasterPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287508,\n  author = {T. Nakashima and R. Scheibler and Y. Wakabayashi and N. Ono},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Faster independent low-rank matrix analysis with pairwise updates of demixing vectors},\n  year = {2020},\n  pages = {301-305},\n  abstract = {In this paper, we present an algorithm for independent low-rank matrix analysis (ILRMA) of three or more sources that is faster than that for conventional ILRMA. In conventional ILRMA, demixing vectors are updated one by one by the iterative projection (IP) method. The update rules of IP are derived from a system of quadratic equations obtained by differentiating the objective function of ILRMA with respect to demixing vectors. This system of quadratic equations is called hybrid exact-approximate joint diagonalization (HEAD) and no closed-form solution is known yet for three or more sources. Recently, a method that can update two demixing vectors simultaneously has been proposed for independent vector analysis. The method is derived by reducing HEAD for two sources to a generalized eigenvalue problem and solving the problem. Furthermore, the pairwise updates have recently been extended to the case of three or more sources. However, the efficacy of the pairwise updates for ILRMA has not yet been investigated. Therefore, in this work, we apply the pairwise updates of demixing vectors to ILRMA. By replacing the update rules of demixing vectors with the proposed pairwise updates, we accelerate the convergence of ILRMA. The experimental results show that the proposed method yields faster convergence and better performance than conventional ILRMA.},\n  keywords = {Signal processing algorithms;Signal processing;Linear programming;IP networks;Acceleration;Iterative methods;Convergence;Blind source separation;independent vector analysis;non-negative matrix factorization;independent low-rank matrix analysis},\n  doi = {10.23919/Eusipco47968.2020.9287508},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000301.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we present an algorithm for independent low-rank matrix analysis (ILRMA) of three or more sources that is faster than that for conventional ILRMA. In conventional ILRMA, demixing vectors are updated one by one by the iterative projection (IP) method. The update rules of IP are derived from a system of quadratic equations obtained by differentiating the objective function of ILRMA with respect to demixing vectors. This system of quadratic equations is called hybrid exact-approximate joint diagonalization (HEAD) and no closed-form solution is known yet for three or more sources. Recently, a method that can update two demixing vectors simultaneously has been proposed for independent vector analysis. The method is derived by reducing HEAD for two sources to a generalized eigenvalue problem and solving the problem. Furthermore, the pairwise updates have recently been extended to the case of three or more sources. However, the efficacy of the pairwise updates for ILRMA has not yet been investigated. Therefore, in this work, we apply the pairwise updates of demixing vectors to ILRMA. By replacing the update rules of demixing vectors with the proposed pairwise updates, we accelerate the convergence of ILRMA. The experimental results show that the proposed method yields faster convergence and better performance than conventional ILRMA.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Unbiased FIR Filtering under Bernoulli-Distributed Binary Randomly Delayed and Missing Data.\n \n \n \n \n\n\n \n Uribe-Murcia, K.; Andrade-Lucio, J. A.; Shmaliy, Y. S.; and Xu, Y.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2408-2412, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"UnbiasedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287509,\n  author = {K. Uribe-Murcia and J. A. Andrade-Lucio and Y. S. Shmaliy and Y. Xu},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Unbiased FIR Filtering under Bernoulli-Distributed Binary Randomly Delayed and Missing Data},\n  year = {2020},\n  pages = {2408-2412},\n  abstract = {This paper develops an unbiased finite impulse response (UFIR) filtering algorithm for networked systems where uncertain delays and packet dropouts can happen due to measurement failures and unreliable communication. The binary Bernoulli distribution with known delay probability is used to model the randomly arrived measures. A novel representation of the stochastic model is presented for FIR-type filter structures. To avoid packet dropouts and improve the estimation accuracy when a message arrives with no data, a predictive algorithm is used. An advantage of the UFIR filtering approach is demonstrated by comparing the mean square errors with the Kalman and H∞ filters under the same conditions. Experimental verifications are provided based on GPS vehicle tracking.},\n  keywords = {Finite impulse response filters;Filtering;Measurement uncertainty;Signal processing algorithms;Filtering algorithms;Prediction algorithms;Delays;delayed data;missing data;unbiased FIR filter},\n  doi = {10.23919/Eusipco47968.2020.9287509},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002408.pdf},\n}\n\n
\n
\n\n\n
\n This paper develops an unbiased finite impulse response (UFIR) filtering algorithm for networked systems where uncertain delays and packet dropouts can happen due to measurement failures and unreliable communication. The binary Bernoulli distribution with known delay probability is used to model the randomly arrived measures. A novel representation of the stochastic model is presented for FIR-type filter structures. To avoid packet dropouts and improve the estimation accuracy when a message arrives with no data, a predictive algorithm is used. An advantage of the UFIR filtering approach is demonstrated by comparing the mean square errors with the Kalman and H∞ filters under the same conditions. Experimental verifications are provided based on GPS vehicle tracking.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Deep Transform Learning for Multi-Sensor Fusion.\n \n \n \n \n\n\n \n Sahu, S.; Kumar, K.; Majumdar, A.; and Chandra, M. G.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1996-2000, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DeepPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287510,\n  author = {S. Sahu and K. Kumar and A. Majumdar and M. G. Chandra},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Deep Transform Learning for Multi-Sensor Fusion},\n  year = {2020},\n  pages = {1996-2000},\n  abstract = {This paper presents a Deep Transform Learning based framework for multi-sensor fusion. Deep representations are learnt for each of the sensors by stacking one transform after another. Subsequently, a common transform is utilized to fuse the deep representations of all sensors to estimate the output. Restricting to a regression use case, a joint optimization formulation is presented for learning the sensor-specific deep transforms, their coefficients, the common transform, its coefficient and the regression weights together. The requisite solution steps and the derivation of closed form updates for the transforms and associated coefficients are given. The performance of the proposed method is evaluated using two real-life datasets and comparisons with the state-of-the-art dictionary and transform learning techniques for regression are presented. Results show that the deep network has superior performance compared to other methods as it is able to learn the data representation more effectively than the other shallow variants. In addition to the multi-sensor case, estimation results with single sensors alone are also provided to demonstrate the importance of multi-sensor fusion.},\n  keywords = {Dictionaries;Stacking;Transforms;Sensor phenomena and characterization;Sensor fusion;Task analysis;Optimization;Multi-sensor Fusion;Transform Learning;Dictionary Learning;Deep Learning},\n  doi = {10.23919/Eusipco47968.2020.9287510},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001996.pdf},\n}\n\n
\n
\n\n\n
\n This paper presents a Deep Transform Learning based framework for multi-sensor fusion. Deep representations are learnt for each of the sensors by stacking one transform after another. Subsequently, a common transform is utilized to fuse the deep representations of all sensors to estimate the output. Restricting to a regression use case, a joint optimization formulation is presented for learning the sensor-specific deep transforms, their coefficients, the common transform, its coefficient and the regression weights together. The requisite solution steps and the derivation of closed form updates for the transforms and associated coefficients are given. The performance of the proposed method is evaluated using two real-life datasets and comparisons with the state-of-the-art dictionary and transform learning techniques for regression are presented. Results show that the deep network has superior performance compared to other methods as it is able to learn the data representation more effectively than the other shallow variants. In addition to the multi-sensor case, estimation results with single sensors alone are also provided to demonstrate the importance of multi-sensor fusion.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Globally Optimizing Owing to Tensor Decomposition.\n \n \n \n \n\n\n \n Marmin, A.; Castella, M.; and Pesquet, J. -.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 990-994, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"GloballyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287511,\n  author = {A. Marmin and M. Castella and J. -C. Pesquet},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Globally Optimizing Owing to Tensor Decomposition},\n  year = {2020},\n  pages = {990-994},\n  abstract = {While global optimization is a challenging topic in the nonconvex setting, a recent approach for optimizing polynomials reformulates the problem as an equivalent problem on measures, which is called a moment problem. It is then relaxed into a convex semidefinite programming problem whose solution gives the first moments of a measure supporting the optimal points. However, extracting the global solutions to the polynomial problem from those moments is still difficult, especially if the latter are poorly estimated. In this paper, we address the issue of extracting optimal points and interpret it as a tensor decomposition problem. By leveraging tools developed for noisy tensor decomposition, we propose a method to find the global solutions to a polynomial optimization problem from a noisy estimation of the solution of its corresponding moment problem. Finally, the interest of tensor decomposition methods for global polynomial optimization is shown through a detailed case study.},\n  keywords = {Tensors;Estimation;Signal processing algorithms;Tools;Noise measurement;Optimization;Standards},\n  doi = {10.23919/Eusipco47968.2020.9287511},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000990.pdf},\n}\n\n
\n
\n\n\n
\n While global optimization is a challenging topic in the nonconvex setting, a recent approach for optimizing polynomials reformulates the problem as an equivalent problem on measures, which is called a moment problem. It is then relaxed into a convex semidefinite programming problem whose solution gives the first moments of a measure supporting the optimal points. However, extracting the global solutions to the polynomial problem from those moments is still difficult, especially if the latter are poorly estimated. In this paper, we address the issue of extracting optimal points and interpret it as a tensor decomposition problem. By leveraging tools developed for noisy tensor decomposition, we propose a method to find the global solutions to a polynomial optimization problem from a noisy estimation of the solution of its corresponding moment problem. Finally, the interest of tensor decomposition methods for global polynomial optimization is shown through a detailed case study.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Semi-Supervised Enhancement and Suppression of Self-Produced Speech Using Correspondence between Air- and Body-Conducted Signals.\n \n \n \n \n\n\n \n Takada, M.; Saki, S.; Tobing, P. L.; and Toda, T.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 456-460, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Semi-SupervisedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287512,\n  author = {M. Takada and S. Saki and P. L. Tobing and T. Toda},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Semi-Supervised Enhancement and Suppression of Self-Produced Speech Using Correspondence between Air- and Body-Conducted Signals},\n  year = {2020},\n  pages = {456-460},\n  abstract = {We propose a semi-supervised method for enhancing and suppressing self-produced speech recorded with wearable air- and body-conductive microphones. Body-conducted signals are robust against external noise and predominantly contain self-produced speech. As a result, these signals provide informative acoustical clues when estimating a linear filter to separate a mixed signal into self-produced speech and background noise. In a previous study, we proposed a blind source separation method for handling air- and body-conducted signals as a multi-channel signal. While our previously proposed method demonstrated the superior performance that can be achieved by using air- and body-conducted signals in comparison to using only air-conducted signals, the enhanced and suppressed air-conducted signals tended to be contaminated with the acoustical characteristics of the body-conducted signals due to the nonlinear relationship between these signals. To address this issue, in this paper, we introduce a new source model which takes into consideration the correspondence between these signals and incorporates them within a semi-supervised framework. Our experimental results reveal that this new method alleviates the negative effects of using the acoustical characteristics of the body-conducted signals, outperforming our previously proposed method, as well as conventional methods, under a semi-supervised condition.},\n  keywords = {Performance evaluation;Atmospheric modeling;Wearable computers;Nonlinear filters;Speech enhancement;Signal processing;Microphones;Self-produced speech;Semi-supervised speech enhancement and suppression;Air- and body-conducted signals},\n  doi = {10.23919/Eusipco47968.2020.9287512},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000456.pdf},\n}\n\n
\n
\n\n\n
\n We propose a semi-supervised method for enhancing and suppressing self-produced speech recorded with wearable air- and body-conductive microphones. Body-conducted signals are robust against external noise and predominantly contain self-produced speech. As a result, these signals provide informative acoustical clues when estimating a linear filter to separate a mixed signal into self-produced speech and background noise. In a previous study, we proposed a blind source separation method for handling air- and body-conducted signals as a multi-channel signal. While our previously proposed method demonstrated the superior performance that can be achieved by using air- and body-conducted signals in comparison to using only air-conducted signals, the enhanced and suppressed air-conducted signals tended to be contaminated with the acoustical characteristics of the body-conducted signals due to the nonlinear relationship between these signals. To address this issue, in this paper, we introduce a new source model which takes into consideration the correspondence between these signals and incorporates them within a semi-supervised framework. Our experimental results reveal that this new method alleviates the negative effects of using the acoustical characteristics of the body-conducted signals, outperforming our previously proposed method, as well as conventional methods, under a semi-supervised condition.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Image Super-Resolution via Generative Adversarial Network Using an Orthogonal Projection.\n \n \n \n \n\n\n \n Yamamoto, H.; Kitahara, D.; and Hirabayashi, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 660-664, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ImagePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287515,\n  author = {H. Yamamoto and D. Kitahara and A. Hirabayashi},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Image Super-Resolution via Generative Adversarial Network Using an Orthogonal Projection},\n  year = {2020},\n  pages = {660-664},\n  abstract = {In this paper, we propose a simple but powerful idea to improve super-resolution (SR) methods based on convolutional neural networks (CNNs). We consider a linear manifold, which is the set of all SR images whose downsampling results are the same as the input image, and apply the orthogonal projection onto this linear manifold in the output layers of the CNNs. The proposed method can guarantee the consistency between the SR image and the input image and reduce the mean squared error. The proposed method is especially effective for SR methods based on generative adversarial networks (GANs), composed of one generator and one discriminator, since the generator can learn high-frequency components while maintaining low-frequency ones. Experiments show the effectiveness of the proposed technique for a GAN-based SR method. Finally we introduce an idea of extension to noisy images.},\n  keywords = {Manifolds;Europe;Generative adversarial networks;Generators;Noise measurement;Convolutional neural networks;Signal resolution;Single image super-resolution;generative adversarial network;orthogonal projection;constrained learning},\n  doi = {10.23919/Eusipco47968.2020.9287515},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000660.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a simple but powerful idea to improve super-resolution (SR) methods based on convolutional neural networks (CNNs). We consider a linear manifold, which is the set of all SR images whose downsampling results are the same as the input image, and apply the orthogonal projection onto this linear manifold in the output layers of the CNNs. The proposed method can guarantee the consistency between the SR image and the input image and reduce the mean squared error. The proposed method is especially effective for SR methods based on generative adversarial networks (GANs), composed of one generator and one discriminator, since the generator can learn high-frequency components while maintaining low-frequency ones. Experiments show the effectiveness of the proposed technique for a GAN-based SR method. Finally we introduce an idea of extension to noisy images.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Consistency-aware and Inconsistency-aware Graph-based Multi-view Clustering.\n \n \n \n \n\n\n \n Horie, M.; and Kasai, H.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1472-1476, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Consistency-awarePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287516,\n  author = {M. Horie and H. Kasai},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Consistency-aware and Inconsistency-aware Graph-based Multi-view Clustering},\n  year = {2020},\n  pages = {1472-1476},\n  abstract = {Multi-view data analysis has gained increasing popularity because multi-view data are frequently encountered in machine learning applications. A simple but promising approach for clustering of multi-view data is multi-view clustering (MVC), which has been developed extensively to classify given subjects into some clustered groups by learning latent common features that are shared across multi-view data. Among existing approaches, graph-based multi-view clustering (GMVC) achieves state-of-the-art performance by leveraging a shared graph matrix called the unified matrix. However, existing methods including GMVC do not explicitly address inconsistent parts of input graph matrices. Consequently, they are adversely affected by unacceptable clustering performance. To this end, this paper proposes a new GMVC method that incorporates consistent and inconsistent parts lying across multiple views. This proposal is designated as CI-GMVC. Numerical evaluations of real-world datasets demonstrate the effectiveness of the proposed CI-GMVC.},\n  keywords = {Data analysis;Clustering methods;Europe;Machine learning;Signal processing;Proposals},\n  doi = {10.23919/Eusipco47968.2020.9287516},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001472.pdf},\n}\n\n
\n
\n\n\n
\n Multi-view data analysis has gained increasing popularity because multi-view data are frequently encountered in machine learning applications. A simple but promising approach for clustering of multi-view data is multi-view clustering (MVC), which has been developed extensively to classify given subjects into some clustered groups by learning latent common features that are shared across multi-view data. Among existing approaches, graph-based multi-view clustering (GMVC) achieves state-of-the-art performance by leveraging a shared graph matrix called the unified matrix. However, existing methods including GMVC do not explicitly address inconsistent parts of input graph matrices. Consequently, they are adversely affected by unacceptable clustering performance. To this end, this paper proposes a new GMVC method that incorporates consistent and inconsistent parts lying across multiple views. This proposal is designated as CI-GMVC. Numerical evaluations of real-world datasets demonstrate the effectiveness of the proposed CI-GMVC.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Loss Functions for CNN-based Biometric Vein Recognition.\n \n \n \n \n\n\n \n Salih Kuzu, R.; Maiorana, E.; and Campisi, P.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 750-754, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"LossPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287517,\n  author = {R. {Salih Kuzu} and E. Maiorana and P. Campisi},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Loss Functions for CNN-based Biometric Vein Recognition},\n  year = {2020},\n  pages = {750-754},\n  abstract = {The recent progress in deep learning has led to a rapid change in the way biometric data can be handled, offering new opportunities for further research on physical, behavioral, and cognitive biometric recognition. In particular, conventional modalities for preprocessing, extracting features, and comparing templates derived from biometric traits have been swiftly altered, replacing the search for hand-crafted features with the ever-increasing use of generalized deep learning models and transfer learning, able to guarantee notably-high recognition performance. This study investigates the capabilities of deep learning approaches in performing vein pattern verification. Specifically, recent advances in the design of convolutional neural networks, introduced to increase the inter-class variability and decrease the intra-class variability of the generated representations, are here taken into account to speculate on the effects on recognition performance of the selection for the most suitable loss function. Experimental tests conducted on finger vein, palm vein, and hand dorsum vein patterns testify the effectiveness of the proposed frameworks, able to exceed current state-of-the-art performance on five different publicly available vein datasets.},\n  keywords = {Deep learning;Image recognition;Biometrics (access control);Veins;Europe;Signal processing;Feature extraction;Vein Biometrics;Finger Vein;Palm Vein;Hand Dorsum Vein;Convolutional Neural Networks;Loss function},\n  doi = {10.23919/Eusipco47968.2020.9287517},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000750.pdf},\n}\n\n
\n
\n\n\n
\n The recent progress in deep learning has led to a rapid change in the way biometric data can be handled, offering new opportunities for further research on physical, behavioral, and cognitive biometric recognition. In particular, conventional modalities for preprocessing, extracting features, and comparing templates derived from biometric traits have been swiftly altered, replacing the search for hand-crafted features with the ever-increasing use of generalized deep learning models and transfer learning, able to guarantee notably-high recognition performance. This study investigates the capabilities of deep learning approaches in performing vein pattern verification. Specifically, recent advances in the design of convolutional neural networks, introduced to increase the inter-class variability and decrease the intra-class variability of the generated representations, are here taken into account to speculate on the effects on recognition performance of the selection for the most suitable loss function. Experimental tests conducted on finger vein, palm vein, and hand dorsum vein patterns testify the effectiveness of the proposed frameworks, able to exceed current state-of-the-art performance on five different publicly available vein datasets.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Deep Learning Model for Automatic Sleep Scoring using Multimodality Time Series.\n \n \n \n \n\n\n \n Yan, R.; Li, F.; Zhou, D.; Ristaniemi, T.; and Cong, F.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1090-1094, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287518,\n  author = {R. Yan and F. Li and D. Zhou and T. Ristaniemi and F. Cong},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A Deep Learning Model for Automatic Sleep Scoring using Multimodality Time Series},\n  year = {2020},\n  pages = {1090-1094},\n  abstract = {Sleep scoring is a fundamental but time-consuming process in any sleep laboratory. Automatic sleep scoring is crucial and urgent to help address the increasing unmet need for sleep research. Therefore, this paper aims to develop an end-to-end deep learning architecture using raw polysomnographic recordings to automate sleep scoring. The proposed model adopts two-dimensional convolutional neural networks (2D-CNN) to automatically learn features from multi-modality signals, together with a {"}squeeze and excitation{"} block for recalibrating channel-wise feature responses. The learnt representations are finally fed to a softmax classifier to generate predictions for each sleep stage. The model performance is evaluated on two public sleep datasets (SHHS and Sleep-EDF) with different available channels. The results have shown that our model achieves an overall accuracy of 85.2% on the SHHS dataset and an accuracy of 85% on the Sleep-EDF dataset. We have also demonstrated that the proposed architecture not only is able to handle various numbers of input channels and several signal modalities from different datasets but also exhibits short runtimes and low computational cost.},\n  keywords = {Deep learning;Performance evaluation;Runtime;Computational modeling;Time series analysis;Computer architecture;Computational efficiency;polysomnography;automatic sleep scoring;multi-modality analysis;deep learning;transfer learning},\n  doi = {10.23919/Eusipco47968.2020.9287518},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001090.pdf},\n}\n\n
\n
\n\n\n
\n Sleep scoring is a fundamental but time-consuming process in any sleep laboratory. Automatic sleep scoring is crucial and urgent to help address the increasing unmet need for sleep research. Therefore, this paper aims to develop an end-to-end deep learning architecture using raw polysomnographic recordings to automate sleep scoring. The proposed model adopts two-dimensional convolutional neural networks (2D-CNN) to automatically learn features from multi-modality signals, together with a \"squeeze and excitation\" block for recalibrating channel-wise feature responses. The learnt representations are finally fed to a softmax classifier to generate predictions for each sleep stage. The model performance is evaluated on two public sleep datasets (SHHS and Sleep-EDF) with different available channels. The results have shown that our model achieves an overall accuracy of 85.2% on the SHHS dataset and an accuracy of 85% on the Sleep-EDF dataset. We have also demonstrated that the proposed architecture not only is able to handle various numbers of input channels and several signal modalities from different datasets but also exhibits short runtimes and low computational cost.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Wide Multimodal Dense U-Net for Fast Magnetic Resonance Imaging.\n \n \n \n \n\n\n \n Falvo, A.; Comminiello, D.; Scardapane, S.; Scarpiniti, M.; and Uncini, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1274-1278, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287519,\n  author = {A. Falvo and D. Comminiello and S. Scardapane and M. Scarpiniti and A. Uncini},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A Wide Multimodal Dense U-Net for Fast Magnetic Resonance Imaging},\n  year = {2020},\n  pages = {1274-1278},\n  abstract = {In this paper, a deep learning method for accelerating magnetic resonance imaging (MRI) is presented, which is able to reconstruct undersampled MR images obtained by reducing the k-space data in the direction of the phase encoding. In particular, we focus on the reconstruction of MR images related to patients affected by multiple sclerosis (MS) and we propose a new multimodal deep learning architecture that is able to exploit the joint information deriving from the combination of different types of MR images and to accelerate the MRI, while providing high quality of the reconstructed image. Experimental results show the performance improvement of the proposed method with respect to existing models in reconstructing images with an MRI acceleration of 4 times.},\n  keywords = {biomedical MRI;image reconstruction;learning (artificial intelligence);medical image processing;wide multimodal dense u-net;fast magnetic resonance imaging;deep learning method;k-space data;phase encoding;multiple sclerosis;multimodal deep learning architecture;reconstructed image;MRI acceleration;sundersampled MR images;Deep learning;Multiple sclerosis;Magnetic resonance imaging;Signal processing;Acceleration;Lesions;Image reconstruction;Fast MRI;MR Image Reconstruction;Deep Neural Network;Multimodal Dense U-Net;Multiple Sclerosis},\n  doi = {10.23919/Eusipco47968.2020.9287519},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001274.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, a deep learning method for accelerating magnetic resonance imaging (MRI) is presented, which is able to reconstruct undersampled MR images obtained by reducing the k-space data in the direction of the phase encoding. In particular, we focus on the reconstruction of MR images related to patients affected by multiple sclerosis (MS) and we propose a new multimodal deep learning architecture that is able to exploit the joint information deriving from the combination of different types of MR images and to accelerate the MRI, while providing high quality of the reconstructed image. Experimental results show the performance improvement of the proposed method with respect to existing models in reconstructing images with an MRI acceleration of 4 times.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multi-model Deep Learning Ensemble for ECG Heartbeat Arrhythmia Classification.\n \n \n \n \n\n\n \n Essa, E.; and Xie, X.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1085-1089, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Multi-modelPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287520,\n  author = {E. Essa and X. Xie},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Multi-model Deep Learning Ensemble for ECG Heartbeat Arrhythmia Classification},\n  year = {2020},\n  pages = {1085-1089},\n  abstract = {Managing and treating cardiovascular diseases can be substantially improved by automatic detection and classification of the heart arrhythmia. In this paper, we introduced a novel deep learning system for classifying the electrocardiogram (ECG) signals. The heartbeats are classified into different arrhythmia types using two proposed deep learning models. The first model is integrating the convolutional neural network (CNN) and long short-term memory (LSTM) network to extract useful features within the ECG signal. The second model combines several classical features with LSTM in order to effectively recognize abnormal classes. These deep learning models are trained using a bagging model then aggregated by a fusion classifier to form a robust unified model. The proposed system is evaluated on the MIT-BIH arrhythmia database and produces an overall accuracy of 95.81%, which significantly outperforms the state-of-the-art.},\n  keywords = {Deep learning;Training;Heart beat;Electrocardiography;Signal processing;Feature extraction;Bagging;CNN;LSTM;Bagging;Deep Learning Ensemble;ECG;Arrhythmia},\n  doi = {10.23919/Eusipco47968.2020.9287520},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001085.pdf},\n}\n\n
\n
\n\n\n
\n Managing and treating cardiovascular diseases can be substantially improved by automatic detection and classification of the heart arrhythmia. In this paper, we introduced a novel deep learning system for classifying the electrocardiogram (ECG) signals. The heartbeats are classified into different arrhythmia types using two proposed deep learning models. The first model is integrating the convolutional neural network (CNN) and long short-term memory (LSTM) network to extract useful features within the ECG signal. The second model combines several classical features with LSTM in order to effectively recognize abnormal classes. These deep learning models are trained using a bagging model then aggregated by a fusion classifier to form a robust unified model. The proposed system is evaluated on the MIT-BIH arrhythmia database and produces an overall accuracy of 95.81%, which significantly outperforms the state-of-the-art.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Gaussian Process Latent Variable Models Applied to Study Maritime Traffic Patterns from VIIRS Data.\n \n \n \n \n\n\n \n Grasso, R.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1447-1451, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"GaussianPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287521,\n  author = {R. Grasso},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Gaussian Process Latent Variable Models Applied to Study Maritime Traffic Patterns from VIIRS Data},\n  year = {2020},\n  pages = {1447-1451},\n  abstract = {Gaussian process latent variable models are used as a data dimensionality reduction technique and applied to analyze long spatio-temporal series of ship traffic patterns measured from data acquired by the Visible Infrared Imaging Radiometer Suite nighttime sensor on board the NOAA-Suomi National Polar-Orbiting Partnership spacecraft. The results show that these techniques are able to model traffic pattern with a number of variables much lower than the number of cells of the time-spatial grid supporting the input data. The use of a Bayesian formulation allows the introduction of spatio-temporal prior constraints that clearly improve the visualization of the time series in the reduced dimensionality space with respect to the classical principal component analysis.},\n  keywords = {Analytical models;Satellite broadcasting;Time series analysis;Gaussian processes;Traffic control;Data models;Marine vehicles;Machine Learning;Satellite imaging;Gaussian Process Latent Variable Models;time series analysis},\n  doi = {10.23919/Eusipco47968.2020.9287521},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001447.pdf},\n}\n\n
\n
\n\n\n
\n Gaussian process latent variable models are used as a data dimensionality reduction technique and applied to analyze long spatio-temporal series of ship traffic patterns measured from data acquired by the Visible Infrared Imaging Radiometer Suite nighttime sensor on board the NOAA-Suomi National Polar-Orbiting Partnership spacecraft. The results show that these techniques are able to model traffic pattern with a number of variables much lower than the number of cells of the time-spatial grid supporting the input data. The use of a Bayesian formulation allows the introduction of spatio-temporal prior constraints that clearly improve the visualization of the time series in the reduced dimensionality space with respect to the classical principal component analysis.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Denoising ECG Signals Using Unbiased FIR Smoother and Harmonic State-Space Model.\n \n \n \n \n\n\n \n Lastre-Domínguez, C.; Ibarra-Manzano, O.; Andrade-Lucio, J. A.; and Shmaliy, Y. S.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1279-1283, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DenoisingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287522,\n  author = {C. Lastre-Domínguez and O. Ibarra-Manzano and J. A. Andrade-Lucio and Y. S. Shmaliy},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Denoising ECG Signals Using Unbiased FIR Smoother and Harmonic State-Space Model},\n  year = {2020},\n  pages = {1279-1283},\n  abstract = {The electrocardiogram (ECG) signals provide information for making decisions about different kinds of heart diseases. During decades various approaches have been developed to denoise ECG data and extract useful features, although further increase in the accuracy is required. In this paper, we view the ECG signal as a quasi periodic process and employ the unbiased finite impulse response (UFIR) smoother on optimal horizons. It is shown that the UFIR smoother applied to a harmonic ECG model performs better than that recently developed for polynomial ECG models. Extensive investigation are provided for diverse ECG data. The results are compared in terms of the mean square error and signal-to-noise ratio.},\n  keywords = {Finite impulse response filters;Noise reduction;Electrocardiography;Harmonic analysis;Feature extraction;Power harmonic filters;Signal to noise ratio;ECG signals;denoising;harmonic model;polynomial model;unbiased FIR smoothing},\n  doi = {10.23919/Eusipco47968.2020.9287522},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001279.pdf},\n}\n\n
\n
\n\n\n
\n The electrocardiogram (ECG) signals provide information for making decisions about different kinds of heart diseases. During decades various approaches have been developed to denoise ECG data and extract useful features, although further increase in the accuracy is required. In this paper, we view the ECG signal as a quasi periodic process and employ the unbiased finite impulse response (UFIR) smoother on optimal horizons. It is shown that the UFIR smoother applied to a harmonic ECG model performs better than that recently developed for polynomial ECG models. Extensive investigation are provided for diverse ECG data. The results are compared in terms of the mean square error and signal-to-noise ratio.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multi-dimensional model order estimation using LineAr Regression of Global Eigenvalues (LaRGE) with applications to EEG and MEG recordings.\n \n \n \n \n\n\n \n Korobkov, A. A.; Diugurova, M. K.; Haueisen, J.; and Haardt, M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1005-1009, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Multi-dimensionalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287523,\n  author = {A. A. Korobkov and M. K. Diugurova and J. Haueisen and M. Haardt},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Multi-dimensional model order estimation using LineAr Regression of Global Eigenvalues (LaRGE) with applications to EEG and MEG recordings},\n  year = {2020},\n  pages = {1005-1009},\n  abstract = {The efficient estimation of an approximate model order is very important for real applications with multi-dimensional data if the observed low rank data is corrupted by additive noise. In this paper, we present a novel robust method for model order estimation of multi-dimensional data based on the LineAr Regression of Global Eigenvalues (LaRGE). The LaRGE method uses the multi-linear singular values obtained from the HOSVD of the measurement tensor to construct global eigenvalues. In contrast to the Modified Exponential Test (EFT) that also exploits the approximate exponential profile of the noise eigenvalues, LaRGE does not require the calculation of the probability of false alarm. Therefore, it is well suited for the analysis of biomedical data. The excellent performance of the LaRGE method is illustrated via simulations and results obtained from EEG as well as MEG recordings.},\n  keywords = {eigenvalues and eigenfunctions;electroencephalography;magnetoencephalography;medical signal processing;probability;singular value decomposition;tensors;multidimensional model order estimation;linear regression;global eigenvalues;multidimensional data;multilinear singular values;approximate exponential profile;noise eigenvalues;biomedical data;MEG recordings;EEG recordings;Computational modeling;Linear regression;Estimation;Brain modeling;Eigenvalues and eigenfunctions;Data models;Electroencephalography;tensor;the rank of the tensor;global eigenvalue;eigenvalue},\n  doi = {10.23919/Eusipco47968.2020.9287523},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001005.pdf},\n}\n\n
\n
\n\n\n
\n The efficient estimation of an approximate model order is very important for real applications with multi-dimensional data if the observed low rank data is corrupted by additive noise. In this paper, we present a novel robust method for model order estimation of multi-dimensional data based on the LineAr Regression of Global Eigenvalues (LaRGE). The LaRGE method uses the multi-linear singular values obtained from the HOSVD of the measurement tensor to construct global eigenvalues. In contrast to the Modified Exponential Test (EFT) that also exploits the approximate exponential profile of the noise eigenvalues, LaRGE does not require the calculation of the probability of false alarm. Therefore, it is well suited for the analysis of biomedical data. The excellent performance of the LaRGE method is illustrated via simulations and results obtained from EEG as well as MEG recordings.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Exploration of Mode Decomposition for Concurrent Cardiopulmonary Monitoring using Dual Radar.\n \n \n \n \n\n\n \n Ray, A.; Khasnobish, A.; Rani, S.; Chowdhury, A.; and Chakravarty, T.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1140-1144, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ExplorationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287524,\n  author = {A. Ray and A. Khasnobish and S. Rani and A. Chowdhury and T. Chakravarty},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Exploration of Mode Decomposition for Concurrent Cardiopulmonary Monitoring using Dual Radar},\n  year = {2020},\n  pages = {1140-1144},\n  abstract = {Cardiopulmonary monitoring involves surveilling the important physiological parameters of an individual like the breathing rate (BR) and the heart rate (HR). This paper uses a simple, off-the-shelf dual multifrequency Continuous Wave (CW) radar setup to monitor the BR and HR of a static individual. The source separation problem of extracting the HR signal in presence of a higher amplitude BR signal poses a huge challenge and has been effectively solved by using an optimal channel selection process and the Variational Mode Decomposition (VMD) algorithm in this paper. Frequency extraction from the nonstationary signal modes produced by VMD has been performed by using the Fourier-Bessel transform to extract precise frequency information. Results show that the proposed system is accurate and outperforms other existing mode decomposition methods like Empirical Mode Decomposition (EMD) and Ensemble Empirical Mode Decomposition (EEMD) with a mean absolute error of 5.1±5.4 with respect to the number of heartbeats per minute and an accuracy of 95.87%(±4.9) with respect to the number of breaths per minute.},\n  keywords = {Heart rate;Empirical mode decomposition;Source separation;Radar;Data mining;Biomedical monitoring;Monitoring;Continuous Wave (CW) radar;breathing rate;heart rate;vital signs;Variational Mode Decomposition;Fourier-Bessel transform},\n  doi = {10.23919/Eusipco47968.2020.9287524},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001140.pdf},\n}\n\n
\n
\n\n\n
\n Cardiopulmonary monitoring involves surveilling the important physiological parameters of an individual like the breathing rate (BR) and the heart rate (HR). This paper uses a simple, off-the-shelf dual multifrequency Continuous Wave (CW) radar setup to monitor the BR and HR of a static individual. The source separation problem of extracting the HR signal in presence of a higher amplitude BR signal poses a huge challenge and has been effectively solved by using an optimal channel selection process and the Variational Mode Decomposition (VMD) algorithm in this paper. Frequency extraction from the nonstationary signal modes produced by VMD has been performed by using the Fourier-Bessel transform to extract precise frequency information. Results show that the proposed system is accurate and outperforms other existing mode decomposition methods like Empirical Mode Decomposition (EMD) and Ensemble Empirical Mode Decomposition (EEMD) with a mean absolute error of 5.1±5.4 with respect to the number of heartbeats per minute and an accuracy of 95.87%(±4.9) with respect to the number of breaths per minute.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Micro-Doppler Signal Representation for Drone Classification by Deep Learning.\n \n \n \n \n\n\n \n Gérard, J.; Tomasik, J.; Morisseau, C.; Rimmel, A.; and Vieillard, G.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1561-1565, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Micro-DopplerPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287525,\n  author = {J. Gérard and J. Tomasik and C. Morisseau and A. Rimmel and G. Vieillard},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Micro-Doppler Signal Representation for Drone Classification by Deep Learning},\n  year = {2020},\n  pages = {1561-1565},\n  abstract = {There are numerous formats which represent the micro-Doppler signature. Our goal is to determine which one is the most adapted to classify small UAV (Unmanned Aerial Vehicules) with Deep Learning. To achieve this goal, we compare drone classification results with the different micro-Doppler signatures for a given neural network. This comparison has been performed on data obtained during a radar measurement campaign. We evaluate the classification performance in function of different use conditions we identified with a given neural network. According to the experiments conducted, the recommended format is a spectrum issued from long observations as its classification results are better for most criteria.},\n  keywords = {Deep learning;Training;Radar measurements;Neural networks;Signal to noise ratio;Drones;Testing},\n  doi = {10.23919/Eusipco47968.2020.9287525},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001561.pdf},\n}\n\n
\n
\n\n\n
\n There are numerous formats which represent the micro-Doppler signature. Our goal is to determine which one is the most adapted to classify small UAV (Unmanned Aerial Vehicules) with Deep Learning. To achieve this goal, we compare drone classification results with the different micro-Doppler signatures for a given neural network. This comparison has been performed on data obtained during a radar measurement campaign. We evaluate the classification performance in function of different use conditions we identified with a given neural network. According to the experiments conducted, the recommended format is a spectrum issued from long observations as its classification results are better for most criteria.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Blind Separation of Convolutive Speech Mixtures Based on Local Sparsity and K-means.\n \n \n \n \n\n\n \n Huang, Y.; Chu, P.; and Liao, B.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 271-275, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"BlindPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287526,\n  author = {Y. Huang and P. Chu and B. Liao},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Blind Separation of Convolutive Speech Mixtures Based on Local Sparsity and K-means},\n  year = {2020},\n  pages = {271-275},\n  abstract = {In this paper, an accurate and efficient blind source separation method based on local sparsity and K-means (LSK-BSS) is proposed. Specifically, the proposed LSK-BSS approach exploits the local sparsity of speech sources in the transformed domain to obtain closed-form solution for per-frequency mixing system estimation. On this basis, through designing superior initial points of clustering, the well-established K-means algorithm is employed to achieve accurate permutation alignment. Simulations with real reverberant speech sources show that the LSK-BSS approach yields competitive efficiency, robustness and effectiveness, in comparison with the state-of-the-arts methods.},\n  keywords = {Simulation;Frequency-domain analysis;Signal processing algorithms;Clustering algorithms;Signal processing;Robustness;Speech processing;Blind source separation;convolutive speech mixture;K-means;permutation ambiguity},\n  doi = {10.23919/Eusipco47968.2020.9287526},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000271.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, an accurate and efficient blind source separation method based on local sparsity and K-means (LSK-BSS) is proposed. Specifically, the proposed LSK-BSS approach exploits the local sparsity of speech sources in the transformed domain to obtain closed-form solution for per-frequency mixing system estimation. On this basis, through designing superior initial points of clustering, the well-established K-means algorithm is employed to achieve accurate permutation alignment. Simulations with real reverberant speech sources show that the LSK-BSS approach yields competitive efficiency, robustness and effectiveness, in comparison with the state-of-the-arts methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Block-Term Tensor Decomposition: Model Selection and Computation.\n \n \n \n \n\n\n \n Rontogiannis, A. A.; Kofidis, E.; and Giampouras, P. V.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1976-1980, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Block-TermPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287527,\n  author = {A. A. Rontogiannis and E. Kofidis and P. V. Giampouras},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Block-Term Tensor Decomposition: Model Selection and Computation},\n  year = {2020},\n  pages = {1976-1980},\n  abstract = {The so-called block-term decomposition (BTD) tensor model has been recently receiving increasing attention due to its enhanced ability of representing systems and signals that are composed of blocks of rank higher than one, a scenario encountered in numerous diverse applications. Its uniqueness and approximation have thus been thoroughly studied. Nevertheless, the problem of estimating the BTD model structure, namely the number of block terms and their individual ranks, has only recently started to attract significant attention, as it is more challenging compared to more classical tensor models such as canonical polyadic decomposition (CPD) and Tucker decomposition (TD). This paper reports our recent results on this topic, which are based on an appropriate extension to the BTD model of our earlier rank-revealing work on low-rank matrix approximation. The idea is to impose column sparsity jointly on the factors and successively estimate the ranks as the numbers of factor columns of non-negligible magnitude, with the aid of alternating iteratively reweighted least squares (IRLS). Simulation results are reported that demonstrate the effectiveness of our method in accurately estimating both the ranks and the factors of the least squares BTD approximation.},\n  keywords = {Tensors;Upper bound;Simulation;Europe;Signal processing;Minimization;Matrix decomposition;Alternating least squares (ALS);block coordinate descent (BCD);block successive upper bound minimization (BSUM);block-term tensor decomposition (BTD);iterative reweighted least squares (IRLS);rank;tensor},\n  doi = {10.23919/Eusipco47968.2020.9287527},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001976.pdf},\n}\n\n
\n
\n\n\n
\n The so-called block-term decomposition (BTD) tensor model has been recently receiving increasing attention due to its enhanced ability of representing systems and signals that are composed of blocks of rank higher than one, a scenario encountered in numerous diverse applications. Its uniqueness and approximation have thus been thoroughly studied. Nevertheless, the problem of estimating the BTD model structure, namely the number of block terms and their individual ranks, has only recently started to attract significant attention, as it is more challenging compared to more classical tensor models such as canonical polyadic decomposition (CPD) and Tucker decomposition (TD). This paper reports our recent results on this topic, which are based on an appropriate extension to the BTD model of our earlier rank-revealing work on low-rank matrix approximation. The idea is to impose column sparsity jointly on the factors and successively estimate the ranks as the numbers of factor columns of non-negligible magnitude, with the aid of alternating iteratively reweighted least squares (IRLS). Simulation results are reported that demonstrate the effectiveness of our method in accurately estimating both the ranks and the factors of the least squares BTD approximation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Shadow Detection and Removal Using GAN.\n \n \n \n \n\n\n \n Nagae, T.; Abiko, R.; Yamaguchi, T.; and Ikehara, M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 630-634, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ShadowPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287528,\n  author = {T. Nagae and R. Abiko and T. Yamaguchi and M. Ikehara},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Shadow Detection and Removal Using GAN},\n  year = {2020},\n  pages = {630-634},\n  abstract = {To remove shadowed region in a single image, it is important to obtain high accuracy in both two processes, shadow detection and removal. In order to improve the results, recent methods perform these two processes simultaneously and use GAN for the training. However, since these methods do not try to maintain the luminance of non-shadowed regions, the output images tend to be faded. In this paper, to overcome fading problem, we proposed a new GAN structure based on shadow model. Since our GAN-based method focus on the variation of the illuminance, the illuminances of the shadowed regions, whose amount of change are large, are effectively estimated. In addition, non-shadowed regions remain slightly faded due to our new GAN structure and training method. Owing to our novel GAN structure and training method, our method outperforms state-of-the-art methods in PSNR and SSIM.},\n  keywords = {Training;Fading channels;Degradation;Visualization;Europe;Numerical models;Gallium nitride;shadow detection;shadow removal;GAN;illuminance},\n  doi = {10.23919/Eusipco47968.2020.9287528},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000630.pdf},\n}\n\n
\n
\n\n\n
\n To remove shadowed region in a single image, it is important to obtain high accuracy in both two processes, shadow detection and removal. In order to improve the results, recent methods perform these two processes simultaneously and use GAN for the training. However, since these methods do not try to maintain the luminance of non-shadowed regions, the output images tend to be faded. In this paper, to overcome fading problem, we proposed a new GAN structure based on shadow model. Since our GAN-based method focus on the variation of the illuminance, the illuminances of the shadowed regions, whose amount of change are large, are effectively estimated. In addition, non-shadowed regions remain slightly faded due to our new GAN structure and training method. Owing to our novel GAN structure and training method, our method outperforms state-of-the-art methods in PSNR and SSIM.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multidimensional Unlimited Sampling: A Geometrical Perspective.\n \n \n \n \n\n\n \n Bouis, V.; Krahmer, F.; and Bhandari, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2314-2318, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"MultidimensionalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287529,\n  author = {V. Bouis and F. Krahmer and A. Bhandari},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Multidimensional Unlimited Sampling: A Geometrical Perspective},\n  year = {2020},\n  pages = {2314-2318},\n  abstract = {The recently introduced unlimited sampling theorem proves that a one-dimensional bandlimited function can be perfectly recovered from a constant factor oversampling of its modulo samples. The advantage of this approach is that arbitrary high-dynamic-range signals can be recovered without sensor saturation or clipping. In this paper, we prove a multidimensional version of the unlimited sampling theorem that works with arbitrary sampling lattices. We also present a geometrical perspective on the emerging class of modulo sampling problem that is based on the topology of quotient spaces.},\n  keywords = {Lattices;Europe;Signal processing;Topology;Multidimensional signal processing;lattice theory;Shannon sampling theory;quotient spaces},\n  doi = {10.23919/Eusipco47968.2020.9287529},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002314.pdf},\n}\n\n
\n
\n\n\n
\n The recently introduced unlimited sampling theorem proves that a one-dimensional bandlimited function can be perfectly recovered from a constant factor oversampling of its modulo samples. The advantage of this approach is that arbitrary high-dynamic-range signals can be recovered without sensor saturation or clipping. In this paper, we prove a multidimensional version of the unlimited sampling theorem that works with arbitrary sampling lattices. We also present a geometrical perspective on the emerging class of modulo sampling problem that is based on the topology of quotient spaces.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fast Multichannel Correlated Tensor Factorization for Blind Source Separation.\n \n \n \n \n\n\n \n Yoshii, K.; Sekiguchi, K.; Bando, Y.; Fontaine, M.; and Nugraha, A. A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 306-310, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"FastPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287530,\n  author = {K. Yoshii and K. Sekiguchi and Y. Bando and M. Fontaine and A. A. Nugraha},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Fast Multichannel Correlated Tensor Factorization for Blind Source Separation},\n  year = {2020},\n  pages = {306-310},\n  abstract = {This paper describes an ultimate covariance-aware multichannel extension of nonnegative matrix factorization (NMF) for blind source separation (BSS). A typical approach to BSS is to integrate a low-rank source model with a full-rank spatial model as multichannel NMF (MNMF) based on full-rank spatial covariance matrices (CMs) or its efficient version named FastMNMF based on jointly-diagonalizable spatial CMs do. The NMF-based phase-unaware source model, however, can deal with only the positive cooccurrence relations between time-frequency bins. To overcome this limitation, we propose an efficient multichannel extension of correlated tensor factorization (CTF) named FastMCTF based on jointly-diagonalizable temporal, frequency, and spatial CMs. Integration of the jointly-diagonalizable full-rank source model proposed by FastCTF with the jointly-diagonalizable full-rank spatial model proposed by FastMNMF enables us to completely consider the positive and negative covariance relations between frequency bins, time frames, and channels. We derive a convergence-guaranteed parameter estimation algorithm based on the multiplicative update and iterative projection and experimentally show the potential of the proposed method.},\n  keywords = {Time-frequency analysis;Tensors;Wiener filters;Signal processing algorithms;Blind source separation;Covariance matrices;Spectrogram},\n  doi = {10.23919/Eusipco47968.2020.9287530},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000306.pdf},\n}\n\n
\n
\n\n\n
\n This paper describes an ultimate covariance-aware multichannel extension of nonnegative matrix factorization (NMF) for blind source separation (BSS). A typical approach to BSS is to integrate a low-rank source model with a full-rank spatial model as multichannel NMF (MNMF) based on full-rank spatial covariance matrices (CMs) or its efficient version named FastMNMF based on jointly-diagonalizable spatial CMs do. The NMF-based phase-unaware source model, however, can deal with only the positive cooccurrence relations between time-frequency bins. To overcome this limitation, we propose an efficient multichannel extension of correlated tensor factorization (CTF) named FastMCTF based on jointly-diagonalizable temporal, frequency, and spatial CMs. Integration of the jointly-diagonalizable full-rank source model proposed by FastCTF with the jointly-diagonalizable full-rank spatial model proposed by FastMNMF enables us to completely consider the positive and negative covariance relations between frequency bins, time frames, and channels. We derive a convergence-guaranteed parameter estimation algorithm based on the multiplicative update and iterative projection and experimentally show the potential of the proposed method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Comparative Study of Supervised Learning Algorithms for Symmetric Positive Definite Features.\n \n \n \n \n\n\n \n Mian, A.; Raninen, E.; and Ollila, E.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 950-954, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287531,\n  author = {A. Mian and E. Raninen and E. Ollila},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A Comparative Study of Supervised Learning Algorithms for Symmetric Positive Definite Features},\n  year = {2020},\n  pages = {950-954},\n  abstract = {In recent years, the use of Riemannian geometry has reportedly shown an increased performance for machine learning problems whose features lie in the symmetric positive definite (SPD) manifold. The present paper aims at reviewing several approaches based on this paradigm and provide a reproducible comparison of their output on a classic learning task of pedestrian detection. Notably, the robustness of these approaches to corrupted data will be assessed.},\n  keywords = {Manifolds;Symmetric matrices;Supervised learning;Signal processing algorithms;Signal processing;Robustness;Task analysis;Supervised learning;Riemannian geometry;Covariance matrix;Pedestrian detection},\n  doi = {10.23919/Eusipco47968.2020.9287531},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000950.pdf},\n}\n\n
\n
\n\n\n
\n In recent years, the use of Riemannian geometry has reportedly shown an increased performance for machine learning problems whose features lie in the symmetric positive definite (SPD) manifold. The present paper aims at reviewing several approaches based on this paradigm and provide a reproducible comparison of their output on a classic learning task of pedestrian detection. Notably, the robustness of these approaches to corrupted data will be assessed.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A GAN-Based Image Transformation Scheme for Privacy-Preserving Deep Neural Networks.\n \n \n \n \n\n\n \n Sirichotedumrong, W.; and Kiya, H.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 745-749, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287532,\n  author = {W. Sirichotedumrong and H. Kiya},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A GAN-Based Image Transformation Scheme for Privacy-Preserving Deep Neural Networks},\n  year = {2020},\n  pages = {745-749},\n  abstract = {We propose a novel image transformation scheme using generative adversarial networks (GANs) for privacy-preserving deep neural networks (DNNs). The proposed scheme enables us not only to apply images without visual information to DNNs, but also to enhance robustness against ciphertext-only attacks (COAs) including DNN-based attacks. In this paper, the proposed transformation scheme is demonstrated to be able to protect visual information on plain images, and the visually-protected images are directly applied to DNNs for privacy-preserving image classification. Since the proposed scheme utilizes GANs, there is no need to manage encryption keys. In an image classification experiment, we evaluate the effectiveness of the proposed scheme in terms of classification accuracy and robustness against COAs.},\n  keywords = {Visualization;Neural networks;Europe;Signal processing;Generative adversarial networks;Robustness;Image classification;Deep neural network;generative adversarial network;privacy-preserving;visual protection},\n  doi = {10.23919/Eusipco47968.2020.9287532},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000745.pdf},\n}\n\n
\n
\n\n\n
\n We propose a novel image transformation scheme using generative adversarial networks (GANs) for privacy-preserving deep neural networks (DNNs). The proposed scheme enables us not only to apply images without visual information to DNNs, but also to enhance robustness against ciphertext-only attacks (COAs) including DNN-based attacks. In this paper, the proposed transformation scheme is demonstrated to be able to protect visual information on plain images, and the visually-protected images are directly applied to DNNs for privacy-preserving image classification. Since the proposed scheme utilizes GANs, there is no need to manage encryption keys. In an image classification experiment, we evaluate the effectiveness of the proposed scheme in terms of classification accuracy and robustness against COAs.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Unsupervised Domain Adaptation for Acoustic Scene Classification Using Band-Wise Statistics Matching.\n \n \n \n \n\n\n \n Mezza, A. I.; Habets, E. A. P.; Müller, M.; and Sarti, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 11-15, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"UnsupervisedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287533,\n  author = {A. I. Mezza and E. A. P. Habets and M. Müller and A. Sarti},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Unsupervised Domain Adaptation for Acoustic Scene Classification Using Band-Wise Statistics Matching},\n  year = {2020},\n  pages = {11-15},\n  abstract = {The performance of machine learning algorithms is known to be negatively affected by possible mismatches between training (source) and test (target) data distributions. In fact, this problem emerges whenever an acoustic scene classification system which has been trained on data recorded by a given device is applied to samples acquired under different acoustic conditions or captured by mismatched recording devices. To address this issue, we propose an unsupervised domain adaptation method that consists of aligning the first- and second-order sample statistics of each frequency band of target-domain acoustic scenes to the ones of the source-domain training dataset. This approach is devised to adapt audio samples from unseen devices before they are fed to a pre-trained classifier, thus avoiding any further learning phase. Using the DCASE 2018 Task 1-B development dataset, we show that the proposed method outperforms the state-of-the-art unsupervised methods found in the literature in terms of both source- and target-domain classification accuracy.},\n  keywords = {Training;Performance evaluation;Image analysis;Machine learning algorithms;Signal processing;Acoustics;Task analysis;Unsupervised domain adaptation;mismatched recording devices;acoustic scene classification},\n  doi = {10.23919/Eusipco47968.2020.9287533},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000011.pdf},\n}\n\n
\n
\n\n\n
\n The performance of machine learning algorithms is known to be negatively affected by possible mismatches between training (source) and test (target) data distributions. In fact, this problem emerges whenever an acoustic scene classification system which has been trained on data recorded by a given device is applied to samples acquired under different acoustic conditions or captured by mismatched recording devices. To address this issue, we propose an unsupervised domain adaptation method that consists of aligning the first- and second-order sample statistics of each frequency band of target-domain acoustic scenes to the ones of the source-domain training dataset. This approach is devised to adapt audio samples from unseen devices before they are fed to a pre-trained classifier, thus avoiding any further learning phase. Using the DCASE 2018 Task 1-B development dataset, we show that the proposed method outperforms the state-of-the-art unsupervised methods found in the literature in terms of both source- and target-domain classification accuracy.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Joint Design of Radar Transmit Waveform and Mismatched Filter with Low Sidelobes.\n \n \n \n \n\n\n \n Jing, Y.; Liang, J.; Vorobyov, S. A.; Fan, X.; and Zhou, D.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1936-1940, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"JointPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287535,\n  author = {Y. Jing and J. Liang and S. A. Vorobyov and X. Fan and D. Zhou},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Joint Design of Radar Transmit Waveform and Mismatched Filter with Low Sidelobes},\n  year = {2020},\n  pages = {1936-1940},\n  abstract = {The paper focuses on joint design of transmit waveform and mismatched filter to achieve low sidelobe level for improving the resolution of pulse compression (PC). An Lp-norm, P ≥ 1, of the power ratio of sidelobe to mainlobe levels is used in the corresponding PC optimization problem as a metric. The use of Lp -norm minimization contains as special cases the integrated sidelobe level and peak sidelobe level (PSL) minimization problems which corresponds to specific selections of different p values. The main contribution of this work is the development of a new iterative algorithm to solve the aforementioned optimization problem. It is based on using Dinkelbach’s scheme together with majorization minimization method. The computational complexity of the proposed algorithm is also analyzed. Numerical examples demonstrate that waveforms and mismatched filters designed by using the proposed method produce lower PSL than the existing counterparts.},\n  keywords = {Measurement;Minimization methods;Signal processing algorithms;Radar;Filtering algorithms;Signal processing;Optimization;Transmit waveform;receive filter;sidelobe levels;Dinkelbach algorithm;majorization minimization},\n  doi = {10.23919/Eusipco47968.2020.9287535},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001936.pdf},\n}\n\n
\n
\n\n\n
\n The paper focuses on joint design of transmit waveform and mismatched filter to achieve low sidelobe level for improving the resolution of pulse compression (PC). An Lp-norm, P ≥ 1, of the power ratio of sidelobe to mainlobe levels is used in the corresponding PC optimization problem as a metric. The use of Lp -norm minimization contains as special cases the integrated sidelobe level and peak sidelobe level (PSL) minimization problems which corresponds to specific selections of different p values. The main contribution of this work is the development of a new iterative algorithm to solve the aforementioned optimization problem. It is based on using Dinkelbach’s scheme together with majorization minimization method. The computational complexity of the proposed algorithm is also analyzed. Numerical examples demonstrate that waveforms and mismatched filters designed by using the proposed method produce lower PSL than the existing counterparts.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The Benefits of Side Information for Structured Phase Retrieval.\n \n \n \n \n\n\n \n Asif, M. S.; and Hegde, C.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 775-778, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287536,\n  author = {M. S. Asif and C. Hegde},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {The Benefits of Side Information for Structured Phase Retrieval},\n  year = {2020},\n  pages = {775-778},\n  abstract = {Phase retrieval, or signal recovery from magnitude-only measurements, is a challenging signal processing problem. Recent progress has revealed that measurement- and computational-complexity challenges can be alleviated if the underlying signal belongs to certain low-dimensional model families, including sparsity, low-rank, or neural generative models. However, the remaining bottleneck in most of these approaches is the requirement of a carefully chosen initial signal estimate. In this paper, we assume that a portion of the signal is already known a priori as {"}side information{"} (this assumption is natural in applications such as holographic coherent diffraction imaging). When such side information is available, we show that a much simpler initialization can provably succeed with considerably reduced costs. We supplement our theory with a range of simulation results.},\n  keywords = {Phase measurement;Diffraction;Computational modeling;Simulation;Imaging;Europe;Signal processing},\n  doi = {10.23919/Eusipco47968.2020.9287536},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000775.pdf},\n}\n\n
\n
\n\n\n
\n Phase retrieval, or signal recovery from magnitude-only measurements, is a challenging signal processing problem. Recent progress has revealed that measurement- and computational-complexity challenges can be alleviated if the underlying signal belongs to certain low-dimensional model families, including sparsity, low-rank, or neural generative models. However, the remaining bottleneck in most of these approaches is the requirement of a carefully chosen initial signal estimate. In this paper, we assume that a portion of the signal is already known a priori as \"side information\" (this assumption is natural in applications such as holographic coherent diffraction imaging). When such side information is available, we show that a much simpler initialization can provably succeed with considerably reduced costs. We supplement our theory with a range of simulation results.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Online Hyperparameter Search Interleaved with Proximal Parameter Updates.\n \n \n \n \n\n\n \n Lopez-Ramos, L. M.; and Beferull-Lozano, B.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2085-2089, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"OnlinePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287537,\n  author = {L. M. Lopez-Ramos and B. Beferull-Lozano},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Online Hyperparameter Search Interleaved with Proximal Parameter Updates},\n  year = {2020},\n  pages = {2085-2089},\n  abstract = {There is a clear need for efficient hyperparameter optimization (HO) algorithms for statistical learning, since commonly applied search methods (such as grid search with N-fold cross-validation) are inefficient and/or approximate. Previously existing gradient-based HO algorithms that rely on the smoothness of the cost function cannot be applied in problems such as Lasso regression. In this contribution, we develop a HO method that relies on the structure of proximal gradient methods and does not require a smooth cost function. Such a method is applied to Leave-one-out (LOO)-validated Lasso and Group Lasso, and an online variant is proposed. Numerical experiments corroborate the convergence of the proposed methods to stationary points of the LOO validation error curve, and the improved efficiency and stability of the online algorithm.},\n  keywords = {Search methods;Statistical learning;Signal processing algorithms;Signal processing;Power system stability;Cost function;Approximation algorithms;Hyperparameter optimization;regression;online learning;proximal gradient descent},\n  doi = {10.23919/Eusipco47968.2020.9287537},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002085.pdf},\n}\n\n
\n
\n\n\n
\n There is a clear need for efficient hyperparameter optimization (HO) algorithms for statistical learning, since commonly applied search methods (such as grid search with N-fold cross-validation) are inefficient and/or approximate. Previously existing gradient-based HO algorithms that rely on the smoothness of the cost function cannot be applied in problems such as Lasso regression. In this contribution, we develop a HO method that relies on the structure of proximal gradient methods and does not require a smooth cost function. Such a method is applied to Leave-one-out (LOO)-validated Lasso and Group Lasso, and an online variant is proposed. Numerical experiments corroborate the convergence of the proposed methods to stationary points of the LOO validation error curve, and the improved efficiency and stability of the online algorithm.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Salt Dome Detection Using Context-Aware Saliency.\n \n \n \n \n\n\n \n Lawal, A.; Mayyala, Q.; Zerguine, A.; and Beghdadi, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1906-1910, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"SaltPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287538,\n  author = {A. Lawal and Q. Mayyala and A. Zerguine and A. Beghdadi},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Salt Dome Detection Using Context-Aware Saliency},\n  year = {2020},\n  pages = {1906-1910},\n  abstract = {This work presents a method for salt dome detection in seismic images based on a Context-Aware Saliency (CAS) detection model. Seismic data can easily add up to hundred of gigabytes and terabytes in size. However, the key features or structural information that are of interest to the seismic interpreters are quite few. These features include salt domes, fault and other geological features that have the potential of indicating the presence of oil reservoir. A new method for extracting the most perceptual relevant features in seismic images based on the CAS model is proposed. The efficiency of this method in detecting the most salient structures in a seismic image such as salt dome is demonstrated through a series of experiment on real data set with various spatial contents.},\n  keywords = {Oils;Geology;Signal processing;Feature extraction;Reservoirs;Tuning;Geologic measurements},\n  doi = {10.23919/Eusipco47968.2020.9287538},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001906.pdf},\n}\n\n
\n
\n\n\n
\n This work presents a method for salt dome detection in seismic images based on a Context-Aware Saliency (CAS) detection model. Seismic data can easily add up to hundred of gigabytes and terabytes in size. However, the key features or structural information that are of interest to the seismic interpreters are quite few. These features include salt domes, fault and other geological features that have the potential of indicating the presence of oil reservoir. A new method for extracting the most perceptual relevant features in seismic images based on the CAS model is proposed. The efficiency of this method in detecting the most salient structures in a seismic image such as salt dome is demonstrated through a series of experiment on real data set with various spatial contents.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n BP-DIP: A Backprojection based Deep Image Prior.\n \n \n \n \n\n\n \n Zukerman, J.; Tirer, T.; and Giryes, R.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 675-679, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"BP-DIP:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287540,\n  author = {J. Zukerman and T. Tirer and R. Giryes},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {BP-DIP: A Backprojection based Deep Image Prior},\n  year = {2020},\n  pages = {675-679},\n  abstract = {Deep neural networks are a very powerful tool for many computer vision tasks, including image restoration, exhibiting state-of-the-art results. However, the performance of deep learning methods tends to drop once the observation model used in training mismatches the one in test time. In addition, most deep learning methods require vast amounts of training data, which are not accessible in many applications. To mitigate these disadvantages, we propose to combine two image restoration approaches: (i) Deep Image Prior (DIP), which trains a convolutional neural network (CNN) from scratch in test time using the given degraded image. It does not require any training data and builds on the implicit prior imposed by the CNN architecture; and (ii) a backprojection (BP) fidelity term, which is an alternative to the standard least squares loss that is usually used in previous DIP works. We demonstrate the performance of the proposed method, termed BP-DIP, on the deblurring task and show its advantages over the plain DIP, with both higher PSNR values and better inference run-time.},\n  keywords = {Deep learning;Training;Training data;Tools;Image restoration;Task analysis;Electronics packaging;Deep learning;loss functions;image deblurring},\n  doi = {10.23919/Eusipco47968.2020.9287540},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000675.pdf},\n}\n\n
\n
\n\n\n
\n Deep neural networks are a very powerful tool for many computer vision tasks, including image restoration, exhibiting state-of-the-art results. However, the performance of deep learning methods tends to drop once the observation model used in training mismatches the one in test time. In addition, most deep learning methods require vast amounts of training data, which are not accessible in many applications. To mitigate these disadvantages, we propose to combine two image restoration approaches: (i) Deep Image Prior (DIP), which trains a convolutional neural network (CNN) from scratch in test time using the given degraded image. It does not require any training data and builds on the implicit prior imposed by the CNN architecture; and (ii) a backprojection (BP) fidelity term, which is an alternative to the standard least squares loss that is usually used in previous DIP works. We demonstrate the performance of the proposed method, termed BP-DIP, on the deblurring task and show its advantages over the plain DIP, with both higher PSNR values and better inference run-time.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Analyzing the impact of speaker localization errors on speech separation for automatic speech recognition.\n \n \n \n \n\n\n \n Sivasankaran, S.; Vincent, E.; and Fohr, D.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 346-350, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AnalyzingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287541,\n  author = {S. Sivasankaran and E. Vincent and D. Fohr},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Analyzing the impact of speaker localization errors on speech separation for automatic speech recognition},\n  year = {2020},\n  pages = {346-350},\n  abstract = {We investigate the effect of speaker localization on the performance of speech recognition systems in a multispeaker, multichannel environment. Given the speaker location information, speech separation is performed in three stages. In the first stage, a simple delay-and-sum (DS) beamformer is used to enhance the signal impinging from the speaker location which is then used to estimate a time-frequency mask corresponding to the localized speaker using a neural network. This mask is used to compute the second order statistics and to derive an adaptive beamformer in the third stage. We generated a multichannel, multispeaker, reverberated, noisy dataset inspired from the well studied WSJ0-2mix and study the performance of the proposed pipeline in terms of the word error rate (WER). An average WER of 29.4% was achieved using the ground truth localization information and 42.4% using the localization information estimated via GCC-PHAT. Though higher signal-to-interference ratio (SIR) between the speakers was found to positively impact the speech separation performance, equivalent performances were obtained for mixtures with lower SIR values when the speakers are well separated in space.},\n  keywords = {Time-frequency analysis;Pipelines;Neural networks;Signal processing;Noise measurement;Speech processing;Signal to noise ratio;Multichannel speech separation;WSJ0-2mix reverberated},\n  doi = {10.23919/Eusipco47968.2020.9287541},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000346.pdf},\n}\n\n
\n
\n\n\n
\n We investigate the effect of speaker localization on the performance of speech recognition systems in a multispeaker, multichannel environment. Given the speaker location information, speech separation is performed in three stages. In the first stage, a simple delay-and-sum (DS) beamformer is used to enhance the signal impinging from the speaker location which is then used to estimate a time-frequency mask corresponding to the localized speaker using a neural network. This mask is used to compute the second order statistics and to derive an adaptive beamformer in the third stage. We generated a multichannel, multispeaker, reverberated, noisy dataset inspired from the well studied WSJ0-2mix and study the performance of the proposed pipeline in terms of the word error rate (WER). An average WER of 29.4% was achieved using the ground truth localization information and 42.4% using the localization information estimated via GCC-PHAT. Though higher signal-to-interference ratio (SIR) between the speakers was found to positively impact the speech separation performance, equivalent performances were obtained for mixtures with lower SIR values when the speakers are well separated in space.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n WaveNet based architectures for denoising periodic discontinuous signals and application to friction signals.\n \n \n \n \n\n\n \n Rio, J.; Momey, F.; Ducottet, C.; and Alata, O.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1580-1584, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"WaveNetPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287543,\n  author = {J. Rio and F. Momey and C. Ducottet and O. Alata},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {WaveNet based architectures for denoising periodic discontinuous signals and application to friction signals},\n  year = {2020},\n  pages = {1580-1584},\n  abstract = {In this paper, we introduce a deep learning model based on Wavenet to denoise periodic signals containing some strong discontinuities, where the dataset used for training contains only synthetic data. We introduce a new cost function using a total variation term. The synthetic data which contain strong discontinuities, are generated as the sum of a sine wave, a square signal and a white gaussian noise. This simple model is very time-efficient to compute and allows us to perform data generation for each training of the architecture instead of physically storing the dataset. We specifically apply this model to real friction signals obtained through a rotating tribological system. We also compared our method with an improved TV denoising algorithm.},\n  keywords = {Training;Deep learning;Friction;Computational modeling;Noise reduction;Computer architecture;Data models;Denoising;Wavenet;Total variation;Synthetic data;Friction signals},\n  doi = {10.23919/Eusipco47968.2020.9287543},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001580.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we introduce a deep learning model based on Wavenet to denoise periodic signals containing some strong discontinuities, where the dataset used for training contains only synthetic data. We introduce a new cost function using a total variation term. The synthetic data which contain strong discontinuities, are generated as the sum of a sine wave, a square signal and a white gaussian noise. This simple model is very time-efficient to compute and allows us to perform data generation for each training of the architecture instead of physically storing the dataset. We specifically apply this model to real friction signals obtained through a rotating tribological system. We also compared our method with an improved TV denoising algorithm.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Controlled accuracy for discrete Chebyshev polynomials.\n \n \n \n \n\n\n \n den Brinker , A. C.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2279-2283, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ControlledPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287544,\n  author = {A. C. {den Brinker}},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Controlled accuracy for discrete Chebyshev polynomials},\n  year = {2020},\n  pages = {2279-2283},\n  abstract = {An algorithm is proposed for stable determination of the normalized discrete Chebyshev polynomials. This is achieved by adaptively restricting the range over which the recurrence relation or difference equation is applied. The adaptation of the range is controlled by a factor indicating how much the normalized basis functions are allowed to deviate from unit norm.},\n  keywords = {Difference equations;Signal processing algorithms;Europe;Chebyshev approximation;Signal processing;Orthogonal polynomials;discrete Chebyshev polynomials;numerical accuracy},\n  doi = {10.23919/Eusipco47968.2020.9287544},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002279.pdf},\n}\n\n
\n
\n\n\n
\n An algorithm is proposed for stable determination of the normalized discrete Chebyshev polynomials. This is achieved by adaptively restricting the range over which the recurrence relation or difference equation is applied. The adaptation of the range is controlled by a factor indicating how much the normalized basis functions are allowed to deviate from unit norm.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Deep Learning for LiDAR Waveforms with Multiple Returns.\n \n \n \n \n\n\n \n Aßmann, A.; Stewart, B.; and Wallace, A. M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1571-1575, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DeepPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287545,\n  author = {A. Aßmann and B. Stewart and A. M. Wallace},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Deep Learning for LiDAR Waveforms with Multiple Returns},\n  year = {2020},\n  pages = {1571-1575},\n  abstract = {We present LiDARNet, a novel data driven approach to LiDAR waveform processing utilising convolutional neural networks to extract depth information. To effectively leverage deep learning, an efficient LiDAR toolchain was developed, which can generate realistic waveform datasets based on either specific experimental parameters or synthetic scenes at scale. This enables us to generate a large volume of waveforms in varying conditions with meaningful underlying data. To validate our simulation approach, we model a super resolution benchmark and cross-validate the network with real unseen data. We demonstrate the ability to resolve peaks in close proximity, as well as to extract multiple returns from waveforms with low signal-to-noise ratio simultaneously with over 99% accuracy. This approach is fast, flexible and highly parallelizable for arrayed imagers. We provide explainability in the deep learning process by matching intermediate outputs to a robust underlying signal model.},\n  keywords = {Deep learning;Laser radar;Three-dimensional displays;Benchmark testing;Data models;Data mining;Signal resolution;LiDAR;Deep Learning;Convolutional Neural Networks;Super-Resolution;Time-of-Flight Imaging},\n  doi = {10.23919/Eusipco47968.2020.9287545},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001571.pdf},\n}\n\n
\n
\n\n\n
\n We present LiDARNet, a novel data driven approach to LiDAR waveform processing utilising convolutional neural networks to extract depth information. To effectively leverage deep learning, an efficient LiDAR toolchain was developed, which can generate realistic waveform datasets based on either specific experimental parameters or synthetic scenes at scale. This enables us to generate a large volume of waveforms in varying conditions with meaningful underlying data. To validate our simulation approach, we model a super resolution benchmark and cross-validate the network with real unseen data. We demonstrate the ability to resolve peaks in close proximity, as well as to extract multiple returns from waveforms with low signal-to-noise ratio simultaneously with over 99% accuracy. This approach is fast, flexible and highly parallelizable for arrayed imagers. We provide explainability in the deep learning process by matching intermediate outputs to a robust underlying signal model.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An Iterative Multichannel Wiener Filter Based on a Kronecker Product Decomposition.\n \n \n \n \n\n\n \n Benesty, J.; Paleologu, C.; Oprea, C. -.; and Ciochina, S.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 211-215, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287546,\n  author = {J. Benesty and C. Paleologu and C. -C. Oprea and S. Ciochina},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {An Iterative Multichannel Wiener Filter Based on a Kronecker Product Decomposition},\n  year = {2020},\n  pages = {211-215},\n  abstract = {Multiple-input single-output (MISO) system identification problems appear in the context of many important applications. Due to their nature, they are usually addressed based on multichannel algorithms. However, the identification of long length impulse responses (e.g., like in echo cancellation) raises significant challenges, especially in terms of complexity and accuracy of the solution. In this paper, we develop an iterative multichannel Wiener filter for such MISO system identification scenarios. This algorithm is based on a Kronecker product decomposition of the impulse response, in conjunction with low- rank approximations. Simulation results indicate a good accuracy of the proposed solution, even when a small amount of data is available for the estimation of the statistics.},\n  keywords = {Wiener filters;Echo cancellers;Simulation;Signal processing algorithms;MISO communication;Approximation algorithms;System identification;Echo cancellation;Kronecker product decomposition;low-rank approximation;multichannel Wiener filter;multiple-input single-output (MISO) system identification},\n  doi = {10.23919/Eusipco47968.2020.9287546},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000211.pdf},\n}\n\n
\n
\n\n\n
\n Multiple-input single-output (MISO) system identification problems appear in the context of many important applications. Due to their nature, they are usually addressed based on multichannel algorithms. However, the identification of long length impulse responses (e.g., like in echo cancellation) raises significant challenges, especially in terms of complexity and accuracy of the solution. In this paper, we develop an iterative multichannel Wiener filter for such MISO system identification scenarios. This algorithm is based on a Kronecker product decomposition of the impulse response, in conjunction with low- rank approximations. Simulation results indicate a good accuracy of the proposed solution, even when a small amount of data is available for the estimation of the statistics.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n IoT-TD: IoT Dataset for Multiple Model BLE-based Indoor Localization/Tracking.\n \n \n \n \n\n\n \n Salimibeni, M.; Hajiakhondi-Meybodi, Z.; Malekzadeh, P.; Atashi, M.; Plataniotis, K. N.; and Mohammadi, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1697-1701, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"IoT-TD:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287547,\n  author = {M. Salimibeni and Z. Hajiakhondi-Meybodi and P. Malekzadeh and M. Atashi and K. N. Plataniotis and A. Mohammadi},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {IoT-TD: IoT Dataset for Multiple Model BLE-based Indoor Localization/Tracking},\n  year = {2020},\n  pages = {1697-1701},\n  abstract = {Bluetooth Low Energy (BLE) is one of the key enabling technologies of the emerging Internet of Things (IoT) concept. When it comes to BLE-based dynamic indoor tracking, however, due to drastic fluctuations of the Received Signal Strength Indicator (RSSI), highly acceptable accuracies are not yet achieved. Although very recent introduction of BLE v 5.1 promises prosperous future for BLE-based dynamic tracking, the following two key issues are in the path: (i) Despite of being in the age of big data with huge emphasis on reproducibility of research, there is no unified dataset with precise ground truth available for performing dynamic BLE-based tracking, and; (ii) The main focus of existing works are on utilization of stand-alone models. The paper addresses these gaps. At one hand, we introduce a reliable dataset, referred to as the IoT-TD, leveraging specific set of four optical cameras to provide ground truth with millimeter accuracies. The introduced IoT-TD dataset consists of RSSI values collected from five BLE sensors together with synchronized Inertial Measurement Unit (IMU) signals from the target’s mobile device. On the other hand, the paper introduces a multiple-model dynamic estimation framework coupling RSSI-based particle filtering with IMU-based Pedestrian Dead Reckoning (PDR). Experimental results based on the IoT-TD dataset corroborate effectiveness of multiple modeling fusion frameworks for providing enhanced BLE-based tracking accuracies.},\n  keywords = {Target tracking;Internet of Things;Synchronization;Received signal strength indicator;Optical sensors;State estimation;Optical signal processing;Indoor Tracking;Internet of Things (IoT);Pedestrian Dead Reckoning;Bluetooth Low Energy (BLE)},\n  doi = {10.23919/Eusipco47968.2020.9287547},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001697.pdf},\n}\n\n
\n
\n\n\n
\n Bluetooth Low Energy (BLE) is one of the key enabling technologies of the emerging Internet of Things (IoT) concept. When it comes to BLE-based dynamic indoor tracking, however, due to drastic fluctuations of the Received Signal Strength Indicator (RSSI), highly acceptable accuracies are not yet achieved. Although very recent introduction of BLE v 5.1 promises prosperous future for BLE-based dynamic tracking, the following two key issues are in the path: (i) Despite of being in the age of big data with huge emphasis on reproducibility of research, there is no unified dataset with precise ground truth available for performing dynamic BLE-based tracking, and; (ii) The main focus of existing works are on utilization of stand-alone models. The paper addresses these gaps. At one hand, we introduce a reliable dataset, referred to as the IoT-TD, leveraging specific set of four optical cameras to provide ground truth with millimeter accuracies. The introduced IoT-TD dataset consists of RSSI values collected from five BLE sensors together with synchronized Inertial Measurement Unit (IMU) signals from the target’s mobile device. On the other hand, the paper introduces a multiple-model dynamic estimation framework coupling RSSI-based particle filtering with IMU-based Pedestrian Dead Reckoning (PDR). Experimental results based on the IoT-TD dataset corroborate effectiveness of multiple modeling fusion frameworks for providing enhanced BLE-based tracking accuracies.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Transfer learning from speech to music: towards language-sensitive emotion recognition models.\n \n \n \n \n\n\n \n Gómez Cañón, J. S.; Cano, E.; Herrera, P.; and Gómez, E.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 136-140, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"TransferPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287548,\n  author = {J. S. {Gómez Cañón} and E. Cano and P. Herrera and E. Gómez},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Transfer learning from speech to music: towards language-sensitive emotion recognition models},\n  year = {2020},\n  pages = {136-140},\n  abstract = {In this study, we address emotion recognition using unsupervised feature learning from speech data, and test its transferability to music. Our approach is to pre-train models using speech in English and Mandarin, and then fine-tune them with excerpts of music labeled with categories of emotion. Our initial hypothesis is that features automatically learned from speech should be transferable to music. Namely, we expect the intra-linguistic setting (e.g., pre-training on speech in English and fine-tuning on music in English) should result in improved performance over the cross-linguistic setting (e.g., pre-training on speech in English and fine-tuning on music in Mandarin). Our results confirm previous research on cross-domain transferability, and encourage research towards language-sensitive Music Emotion Recognition (MER) models.},\n  keywords = {Emotion recognition;Convolution;Speech recognition;Feature extraction;Speech processing;Task analysis;Unsupervised learning;Sparse convolutional autoencoder;speech emotion recognition;music emotion recognition;unsupervised learning;transfer learning;multi-task learning},\n  doi = {10.23919/Eusipco47968.2020.9287548},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000136.pdf},\n}\n\n
\n
\n\n\n
\n In this study, we address emotion recognition using unsupervised feature learning from speech data, and test its transferability to music. Our approach is to pre-train models using speech in English and Mandarin, and then fine-tune them with excerpts of music labeled with categories of emotion. Our initial hypothesis is that features automatically learned from speech should be transferable to music. Namely, we expect the intra-linguistic setting (e.g., pre-training on speech in English and fine-tuning on music in English) should result in improved performance over the cross-linguistic setting (e.g., pre-training on speech in English and fine-tuning on music in Mandarin). Our results confirm previous research on cross-domain transferability, and encourage research towards language-sensitive Music Emotion Recognition (MER) models.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Quadratically Convergent Proximal Algorithm For Nonnegative Tensor Decomposition.\n \n \n \n \n\n\n \n Vervliet, N.; Themelis, A.; Patrinos, P.; and De Lathauwer, L.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1020-1024, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287549,\n  author = {N. Vervliet and A. Themelis and P. Patrinos and L. {De Lathauwer}},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A Quadratically Convergent Proximal Algorithm For Nonnegative Tensor Decomposition},\n  year = {2020},\n  pages = {1020-1024},\n  abstract = {The decomposition of tensors into simple rank-1 terms is key in a variety of applications in signal processing, data analysis and machine learning. While this canonical polyadic decomposition (CPD) is unique under mild conditions, including prior knowledge such as nonnegativity can facilitate interpretation of the components. Inspired by the effectiveness and efficiency of Gauss-Newton (GN) for unconstrained CPD, we derive a proximal, semismooth GN type algorithm for non-negative tensor factorization. Global convergence to local minima is achieved via backtracking on the forward-backward envelope function. If the algorithm converges to a global optimum, we show that Q-quadratic rates are obtained in the exact case. Such fast rates are verified experimentally, and we illustrate that using the GN step significantly reduces number of (expensive) gradient computations compared to proximal gradient descent.},\n  keywords = {Tensors;Machine learning algorithms;Data analysis;Signal processing algorithms;Europe;Machine learning;Signal processing;nonnegative tensor factorization;canonical polyadic decomposition;proximal methods;Gauss-Newton},\n  doi = {10.23919/Eusipco47968.2020.9287549},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001020.pdf},\n}\n\n
\n
\n\n\n
\n The decomposition of tensors into simple rank-1 terms is key in a variety of applications in signal processing, data analysis and machine learning. While this canonical polyadic decomposition (CPD) is unique under mild conditions, including prior knowledge such as nonnegativity can facilitate interpretation of the components. Inspired by the effectiveness and efficiency of Gauss-Newton (GN) for unconstrained CPD, we derive a proximal, semismooth GN type algorithm for non-negative tensor factorization. Global convergence to local minima is achieved via backtracking on the forward-backward envelope function. If the algorithm converges to a global optimum, we show that Q-quadratic rates are obtained in the exact case. Such fast rates are verified experimentally, and we illustrate that using the GN step significantly reduces number of (expensive) gradient computations compared to proximal gradient descent.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Online proximal gradient for learning graphs from streaming signals.\n \n \n \n \n\n\n \n Shafipour, R.; and Mateos, G.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 865-869, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"OnlinePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287550,\n  author = {R. Shafipour and G. Mateos},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Online proximal gradient for learning graphs from streaming signals},\n  year = {2020},\n  pages = {865-869},\n  abstract = {We leverage proximal gradient iterations to develop an online graph learning algorithm from streaming network data. Our goal is to track the (possibly) time-varying network topology, and effect memory and computational savings by processing the data on-the-fly as they are acquired. The setup entails observations modeled as stationary graph signals generated by local diffusion dynamics on the unknown network. Moreover, we may have a priori information on the presence or absence of a few edges as in the link prediction problem. The stationarity assumption implies that the observations’ covariance matrix and the so-called graph shift operator (GSO – a matrix encoding the graph topology) commute under mild requirements. This motivates formulating the topology inference task as an inverse problem, whereby one searches for a (e.g., sparse) GSO that is structurally admissible and approximately commutes with the observations’ empirical covariance matrix. For streaming data said covariance can be updated recursively, and we show online proximal gradient iterations can be brought to bear to efficiently track the time-varying solution of the inverse problem with quantifiable guarantees. Specifically, we derive conditions under which the GSO recovery cost is strongly convex and use this property to prove that the online algorithm converges to within a neighborhood of the optimal time-varying batch solution. Preliminary numerical tests illustrate the effectiveness of the proposed graph learning approach in adapting to streaming information and tracking changes in the sought dynamic network.},\n  keywords = {Network topology;Inverse problems;Signal processing algorithms;Search problems;Topology;Covariance matrices;Task analysis;Network topology inference;graph signal processing;proximal gradient algorithm;online optimization},\n  doi = {10.23919/Eusipco47968.2020.9287550},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000865.pdf},\n}\n\n
\n
\n\n\n
\n We leverage proximal gradient iterations to develop an online graph learning algorithm from streaming network data. Our goal is to track the (possibly) time-varying network topology, and effect memory and computational savings by processing the data on-the-fly as they are acquired. The setup entails observations modeled as stationary graph signals generated by local diffusion dynamics on the unknown network. Moreover, we may have a priori information on the presence or absence of a few edges as in the link prediction problem. The stationarity assumption implies that the observations’ covariance matrix and the so-called graph shift operator (GSO – a matrix encoding the graph topology) commute under mild requirements. This motivates formulating the topology inference task as an inverse problem, whereby one searches for a (e.g., sparse) GSO that is structurally admissible and approximately commutes with the observations’ empirical covariance matrix. For streaming data said covariance can be updated recursively, and we show online proximal gradient iterations can be brought to bear to efficiently track the time-varying solution of the inverse problem with quantifiable guarantees. Specifically, we derive conditions under which the GSO recovery cost is strongly convex and use this property to prove that the online algorithm converges to within a neighborhood of the optimal time-varying batch solution. Preliminary numerical tests illustrate the effectiveness of the proposed graph learning approach in adapting to streaming information and tracking changes in the sought dynamic network.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Hardware Architecture for the Regular Interpolation Filter of the AV1 Video Coding Standard.\n \n \n \n \n\n\n \n Freitas, D.; da Silva , R.; Siqueira, Í.; Diniz, C. M.; Reis, R. A. L.; and Grellert, M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 560-564, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"HardwarePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287551,\n  author = {D. Freitas and R. {da Silva} and Í. Siqueira and C. M. Diniz and R. A. L. Reis and M. Grellert},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Hardware Architecture for the Regular Interpolation Filter of the AV1 Video Coding Standard},\n  year = {2020},\n  pages = {560-564},\n  abstract = {This work proposes a hardware architecture for fractional-pixel interpolation filter defined in the royalty-free AV1 video coding standard. Analysis conducted in this work shows that the AV1 Regular family of filters has the highest usage especially when considering high resolution videos. The proposed architecture implements the 15 interpolation filters of the AV1 Regular family and is capable to interpolate videos of up to 8K video resolution at 120 fps. The proposed architecture achieves the highest throughput compared to related works.},\n  keywords = {Video coding;Interpolation;Computer architecture;Parallel processing;Filtering algorithms;Hardware;Standards;Video coding;AV1;interpolation filter;hard-ware;architecture},\n  doi = {10.23919/Eusipco47968.2020.9287551},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000560.pdf},\n}\n\n
\n
\n\n\n
\n This work proposes a hardware architecture for fractional-pixel interpolation filter defined in the royalty-free AV1 video coding standard. Analysis conducted in this work shows that the AV1 Regular family of filters has the highest usage especially when considering high resolution videos. The proposed architecture implements the 15 interpolation filters of the AV1 Regular family and is capable to interpolate videos of up to 8K video resolution at 120 fps. The proposed architecture achieves the highest throughput compared to related works.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Analysis of Phonetic Dependence of Segmentation Errors in Speaker Diarization.\n \n \n \n \n\n\n \n McKnight, S. W.; Hogg, A. O. T.; and Naylor, P. A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 381-385, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AnalysisPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287552,\n  author = {S. W. McKnight and A. O. T. Hogg and P. A. Naylor},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Analysis of Phonetic Dependence of Segmentation Errors in Speaker Diarization},\n  year = {2020},\n  pages = {381-385},\n  abstract = {Evaluation of speaker segmentation and diarization normally makes use of forgiveness collars around ground truth speaker segment boundaries such that estimated speaker segment boundaries with such collars are considered completely correct. This paper shows that the popular recent approach of removing forgiveness collars from speaker diarization evaluation tools can unfairly penalize speaker diarization systems that correctly estimate speaker segment boundaries. The uncertainty in identifying the start and/or end of a particular phoneme means that the ground truth segmentation is not perfectly accurate, and even trained human listeners are unable to identify phoneme boundaries with full consistency. This research analyses the phoneme dependence of this uncertainty, and shows that it depends on (i) whether the phoneme being detected is at the start or end of an utterance and (ii) what the phoneme is, so that the use of a uniform forgiveness collar is inadequate. This analysis is expected to point the way towards more indicative and repeatable assessment of the performance of speaker diarization systems.},\n  keywords = {Uncertainty;Europe;Tools;Signal processing;Phonetics;Labeling;Speaker diarization;forgiveness collar;phoneme boundary;diarization scoring},\n  doi = {10.23919/Eusipco47968.2020.9287552},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000381.pdf},\n}\n\n
\n
\n\n\n
\n Evaluation of speaker segmentation and diarization normally makes use of forgiveness collars around ground truth speaker segment boundaries such that estimated speaker segment boundaries with such collars are considered completely correct. This paper shows that the popular recent approach of removing forgiveness collars from speaker diarization evaluation tools can unfairly penalize speaker diarization systems that correctly estimate speaker segment boundaries. The uncertainty in identifying the start and/or end of a particular phoneme means that the ground truth segmentation is not perfectly accurate, and even trained human listeners are unable to identify phoneme boundaries with full consistency. This research analyses the phoneme dependence of this uncertainty, and shows that it depends on (i) whether the phoneme being detected is at the start or end of an utterance and (ii) what the phoneme is, so that the use of a uniform forgiveness collar is inadequate. This analysis is expected to point the way towards more indicative and repeatable assessment of the performance of speaker diarization systems.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Collaborative Learning based Symbol Detection in Massive MIMO.\n \n \n \n \n\n\n \n Datta, A.; Deo, M. T.; and Bhatia, V.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1678-1682, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"CollaborativePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287554,\n  author = {A. Datta and M. T. Deo and V. Bhatia},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Collaborative Learning based Symbol Detection in Massive MIMO},\n  year = {2020},\n  pages = {1678-1682},\n  abstract = {Massive multiple-input multiple-output (MIMO) system is a core technology to realize high-speed data for 5G and beyond systems. Though machine learning-based MIMO detection techniques outperform conventional symbol detection techniques, in large user massive MIMO, they suffer from maintaining an optimal bias-variance trade-off to yield optimal performance from an individual model. Hence, in this article, collaborative learning based low complexity detection technique is proposed for uplink symbol detection in large user massive MIMO systems. The proposed detection technique strategically ensembles multiple fully connected neural network models utilizing iterative meta-predictor and reduces the final estimation error by smoothing the variance associated with individual estimation errors. Simulations are carried out to validate the performance of the proposed detection technique under both perfect and imperfect channel state information scenarios. Simulation results reveal that the proposed detection technique achieves a lower bit error rate while maintaining a low computational complexity as compared to several existing uplink massive MIMO detection techniques.},\n  keywords = {Massive MIMO;collaborative learning;deep learning;maximum likelihood},\n  doi = {10.23919/Eusipco47968.2020.9287554},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001678.pdf},\n}\n\n
\n
\n\n\n
\n Massive multiple-input multiple-output (MIMO) system is a core technology to realize high-speed data for 5G and beyond systems. Though machine learning-based MIMO detection techniques outperform conventional symbol detection techniques, in large user massive MIMO, they suffer from maintaining an optimal bias-variance trade-off to yield optimal performance from an individual model. Hence, in this article, collaborative learning based low complexity detection technique is proposed for uplink symbol detection in large user massive MIMO systems. The proposed detection technique strategically ensembles multiple fully connected neural network models utilizing iterative meta-predictor and reduces the final estimation error by smoothing the variance associated with individual estimation errors. Simulations are carried out to validate the performance of the proposed detection technique under both perfect and imperfect channel state information scenarios. Simulation results reveal that the proposed detection technique achieves a lower bit error rate while maintaining a low computational complexity as compared to several existing uplink massive MIMO detection techniques.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Head Orientation Estimation from Multiple Microphone Arrays.\n \n \n \n \n\n\n \n Felsheim, R. C.; Brendel, A.; Naylor, P. A.; and Kellermann, W.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 491-495, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"HeadPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287555,\n  author = {R. C. Felsheim and A. Brendel and P. A. Naylor and W. Kellermann},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Head Orientation Estimation from Multiple Microphone Arrays},\n  year = {2020},\n  pages = {491-495},\n  abstract = {Knowledge of head orientation is important for various audio signal processing tasks involving human speakers, including speech enhancement and attention tracking. Most of the methods estimate the head orientation using video information which, however, is not always available. In this work, two known audio features for head orientation estimation are reviewed and three new features are proposed. Furthermore, all evaluated features have been combined in two different ways: with a linear combination and a small artificial neural network. The resulting algorithms are able to detect the head orientation in our experiments with high precision and show superior performance over state-of-the-art methods.},\n  keywords = {Estimation;Signal processing algorithms;Europe;Artificial neural networks;Speech enhancement;Microphone arrays;Task analysis;acoustic head orientation estimation;OGCF;HLBR;CDR},\n  doi = {10.23919/Eusipco47968.2020.9287555},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000491.pdf},\n}\n\n
\n
\n\n\n
\n Knowledge of head orientation is important for various audio signal processing tasks involving human speakers, including speech enhancement and attention tracking. Most of the methods estimate the head orientation using video information which, however, is not always available. In this work, two known audio features for head orientation estimation are reviewed and three new features are proposed. Furthermore, all evaluated features have been combined in two different ways: with a linear combination and a small artificial neural network. The resulting algorithms are able to detect the head orientation in our experiments with high precision and show superior performance over state-of-the-art methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Exact Algebraic Blind Source Separation Using Side Information.\n \n \n \n \n\n\n \n Weiss, A.; and Yeredor, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1941-1945, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ExactPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287556,\n  author = {A. Weiss and A. Yeredor},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Exact Algebraic Blind Source Separation Using Side Information},\n  year = {2020},\n  pages = {1941-1945},\n  abstract = {Classical Blind Source Separation (BSS) methods rarely attain exact separation, even under noiseless conditions. In addition, they often rely on particular structural or statistical assumptions (e.g., mutual independence) regarding the sources. In this work we consider a (realistic) {"}twist{"} in the classical linear BSS plot, which, quite surprisingly, not only enables perfect separation (under noiseless conditions), but is also free of any assumptions (except for regularity assumptions) regarding the sources or the mixing matrix. In particular, we consider the standard linear mixture model, augmented by a single ancillary, unknown linear mixture of some known linear transformations of the sources. We derive a closed-form expression for an exact algebraic solution, free of any statistical considerations whatsoever, attaining perfect separation in the noiseless case. In addition, we propose a well-behaved solution for the same model in the presence of noise or other measurement inaccuracies. Our derivations are corroborated by several simulation results.},\n  keywords = {Closed-form solutions;Simulation;Mixture models;Signal processing;Blind source separation;Noise measurement;Standards;Blind source separation;algebraic methods;side information;total least squares},\n  doi = {10.23919/Eusipco47968.2020.9287556},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001941.pdf},\n}\n\n
\n
\n\n\n
\n Classical Blind Source Separation (BSS) methods rarely attain exact separation, even under noiseless conditions. In addition, they often rely on particular structural or statistical assumptions (e.g., mutual independence) regarding the sources. In this work we consider a (realistic) \"twist\" in the classical linear BSS plot, which, quite surprisingly, not only enables perfect separation (under noiseless conditions), but is also free of any assumptions (except for regularity assumptions) regarding the sources or the mixing matrix. In particular, we consider the standard linear mixture model, augmented by a single ancillary, unknown linear mixture of some known linear transformations of the sources. We derive a closed-form expression for an exact algebraic solution, free of any statistical considerations whatsoever, attaining perfect separation in the noiseless case. In addition, we propose a well-behaved solution for the same model in the presence of noise or other measurement inaccuracies. Our derivations are corroborated by several simulation results.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robust Fast Subclass Discriminant Analysis.\n \n \n \n \n\n\n \n Chumachenko, K.; Iosifidis, A.; and Gabbouj, M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1397-1401, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"RobustPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287557,\n  author = {K. Chumachenko and A. Iosifidis and M. Gabbouj},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Robust Fast Subclass Discriminant Analysis},\n  year = {2020},\n  pages = {1397-1401},\n  abstract = {In this paper, we propose novel methods to address the challenges of dimensionality reduction related to potential outlier classes and imbalanced classes often present in data. In particular, we propose extensions to Fast Subclass Discriminant Analysis and Subclass Discriminant Analysis that allow to put more attention on uder-represented classes or classes that are likely to be confused with each other. Furthermore, the kernelized variants of the proposed algorithms are presented. The proposed methods lead to faster training time and improved accuracy as shown by experiments on eight datasets of different domains, tasks, and sizes.},\n  keywords = {Training;Dimensionality reduction;Signal processing algorithms;Europe;Signal processing;Robustness;Task analysis;subclass discriminant analysis;subspace learning;dimensionality reduction},\n  doi = {10.23919/Eusipco47968.2020.9287557},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001397.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose novel methods to address the challenges of dimensionality reduction related to potential outlier classes and imbalanced classes often present in data. In particular, we propose extensions to Fast Subclass Discriminant Analysis and Subclass Discriminant Analysis that allow to put more attention on uder-represented classes or classes that are likely to be confused with each other. Furthermore, the kernelized variants of the proposed algorithms are presented. The proposed methods lead to faster training time and improved accuracy as shown by experiments on eight datasets of different domains, tasks, and sizes.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Detection of Package Edges in Distance Maps.\n \n \n \n \n\n\n \n Vasileva, E.; Avramovski, N.; and Ivanovski, Z.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 600-604, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DetectionPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287558,\n  author = {E. Vasileva and N. Avramovski and Z. Ivanovski},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Detection of Package Edges in Distance Maps},\n  year = {2020},\n  pages = {600-604},\n  abstract = {This paper presents a CNN-based algorithm for detecting package edges in a scene represented with a distance map (range image), trained on a custom dataset of packaging scenarios. The proposed algorithm represents the basis for package recognition for automatic trailer loading/unloading. The main focus of this paper is designing a semantic segmentation CNN model capable of detecting different types of package edges in a distance map containing distance errors characteristic of Time-of-Flight (ToF) scanning, and differentiating box edges from edges belonging to other types of packaging objects (bags, irregular objects, etc.). The proposed CNN is optimized for training with a limited number of samples containing heavily imbalanced classes. Generating a binary mask of edges with 1-pixel thickness from the probability maps outputted from the CNN is achieved through a custom non-maximum suppression-based edge thinning algorithm. The proposed algorithm shows promising results in detecting box edges.},\n  keywords = {Training;Visualization;Image edge detection;Semantics;Signal processing algorithms;Packaging;Signal processing;edge detection;semantic segmentation;depth maps;CNN;package recognition;automatic unloading},\n  doi = {10.23919/Eusipco47968.2020.9287558},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000600.pdf},\n}\n\n
\n
\n\n\n
\n This paper presents a CNN-based algorithm for detecting package edges in a scene represented with a distance map (range image), trained on a custom dataset of packaging scenarios. The proposed algorithm represents the basis for package recognition for automatic trailer loading/unloading. The main focus of this paper is designing a semantic segmentation CNN model capable of detecting different types of package edges in a distance map containing distance errors characteristic of Time-of-Flight (ToF) scanning, and differentiating box edges from edges belonging to other types of packaging objects (bags, irregular objects, etc.). The proposed CNN is optimized for training with a limited number of samples containing heavily imbalanced classes. Generating a binary mask of edges with 1-pixel thickness from the probability maps outputted from the CNN is achieved through a custom non-maximum suppression-based edge thinning algorithm. The proposed algorithm shows promising results in detecting box edges.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Epileptic Seizure Detection and Anticipation using Deep Learning with Ordered Encoding of Spectrogram Features.\n \n \n \n \n\n\n \n Ranjan Sahu, S.; Krishna Sai Subrahmanyam Gorthi, R.; and Gorthi, S.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1065-1069, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"EpilepticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287559,\n  author = {S. {Ranjan Sahu} and R. {Krishna Sai Subrahmanyam Gorthi} and S. Gorthi},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Epileptic Seizure Detection and Anticipation using Deep Learning with Ordered Encoding of Spectrogram Features},\n  year = {2020},\n  pages = {1065-1069},\n  abstract = {Electroencephalogram (EEG) signals of the brain play a vital role in the detection of epileptic seizures. This paper proposes a new spectrogram-based deep learning method for the detection and anticipation of epileptic seizures. Unlike the existing methods, the proposed method formulates the feature descriptor such that it retains the neighborhood order of spectrograms both in time and frequency, while significantly reducing the dimensionality of the feature descriptor. The spectrogram in each of the 18 EEG channels is constructed by dividing each EEG signal into 3 time-blocks and 19 frequency-blocks. The mean magnitude value of each of these blocks is computed and thereby compactly representing the input EEG signal by a 3D tensor of size 18×19×3. This tensor descriptor is given as an input to the proposed convolution neural network for learning high-level features. Evaluations are performed on a publicly available EEG dataset of 23 patients and the results from the proposed method are compared with 9 other existing methods. Further, a five-class classification is performed using the proposed method for the anticipation of seizures. The proposed method is found to outperform the existing state-of-the-art methods both in detection and anticipation of epileptic seizures.},\n  keywords = {Deep learning;Time-frequency analysis;Tensors;Three-dimensional displays;Electroencephalography;Spectrogram;Biological neural networks;Epileptic Seizures;Seizure Detection;Seizure Anticipation;Electroencephalogram (EEG) Signal;Multi-channel EEG;Spectrogram;Deep Learning;Convolutional Neural Network (CNN)},\n  doi = {10.23919/Eusipco47968.2020.9287559},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001065.pdf},\n}\n\n
\n
\n\n\n
\n Electroencephalogram (EEG) signals of the brain play a vital role in the detection of epileptic seizures. This paper proposes a new spectrogram-based deep learning method for the detection and anticipation of epileptic seizures. Unlike the existing methods, the proposed method formulates the feature descriptor such that it retains the neighborhood order of spectrograms both in time and frequency, while significantly reducing the dimensionality of the feature descriptor. The spectrogram in each of the 18 EEG channels is constructed by dividing each EEG signal into 3 time-blocks and 19 frequency-blocks. The mean magnitude value of each of these blocks is computed and thereby compactly representing the input EEG signal by a 3D tensor of size 18×19×3. This tensor descriptor is given as an input to the proposed convolution neural network for learning high-level features. Evaluations are performed on a publicly available EEG dataset of 23 patients and the results from the proposed method are compared with 9 other existing methods. Further, a five-class classification is performed using the proposed method for the anticipation of seizures. The proposed method is found to outperform the existing state-of-the-art methods both in detection and anticipation of epileptic seizures.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Positive Semidefinite Matrix Factorization Based on Truncated Wirtinger Flow.\n \n \n \n \n\n\n \n Lahat, D.; and Févotte, C.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1035-1039, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"PositivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287560,\n  author = {D. Lahat and C. Févotte},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Positive Semidefinite Matrix Factorization Based on Truncated Wirtinger Flow},\n  year = {2020},\n  pages = {1035-1039},\n  abstract = {This paper deals with algorithms for positive semidefinite matrix factorization (PSDMF). PSDMF is a recently-proposed extension of nonnegative matrix factorization with applications in combinatorial optimization, among others. In this paper, we focus on improving the local convergence of an alternating block gradient (ABC) method for PSDMF in a noise-free setting by replacing the quadratic objective function with the Poisson log-likelihood. This idea is based on truncated Wirtinger flow (TWF), a phase retrieval (PR) method that trims outliers in the gradient and thus regularizes it. Our motivation is a recent result linking PR with PSDMF. Our numerical experiments validate that the numerical benefits of TWF may carry over to PSDMF despite the more challenging setting, when initialized within its region of convergence. We then extend TWF from PR to affine rank minimization (ARM), and show that although the outliers are no longer an issue in the ARM setting, PSDMF with the new objective function may still achieves a smaller error for the same number of iterations. In a broader view, our results indicate that a proper choice of objective function may enhance convergence of matrix (or tensor) factorization methods.},\n  keywords = {Tensors;Signal processing algorithms;Signal processing;Linear programming;Minimization;Optimization;Convergence;Positive semidefinite factorization;nonnegative factorizations;phase retrieval;low-rank approximations;optimization},\n  doi = {10.23919/Eusipco47968.2020.9287560},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001035.pdf},\n}\n\n
\n
\n\n\n
\n This paper deals with algorithms for positive semidefinite matrix factorization (PSDMF). PSDMF is a recently-proposed extension of nonnegative matrix factorization with applications in combinatorial optimization, among others. In this paper, we focus on improving the local convergence of an alternating block gradient (ABC) method for PSDMF in a noise-free setting by replacing the quadratic objective function with the Poisson log-likelihood. This idea is based on truncated Wirtinger flow (TWF), a phase retrieval (PR) method that trims outliers in the gradient and thus regularizes it. Our motivation is a recent result linking PR with PSDMF. Our numerical experiments validate that the numerical benefits of TWF may carry over to PSDMF despite the more challenging setting, when initialized within its region of convergence. We then extend TWF from PR to affine rank minimization (ARM), and show that although the outliers are no longer an issue in the ARM setting, PSDMF with the new objective function may still achieves a smaller error for the same number of iterations. In a broader view, our results indicate that a proper choice of objective function may enhance convergence of matrix (or tensor) factorization methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n MPG-Net: Multi-Prediction Guided Network for Segmentation of Retinal Layers in OCT Images.\n \n \n \n \n\n\n \n Fu, Z.; Sun, Y.; Zhang, X.; Stainton, S.; Barney, S.; Hogg, J.; Innes, W.; and Dlay, S.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1299-1303, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"MPG-Net:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287561,\n  author = {Z. Fu and Y. Sun and X. Zhang and S. Stainton and S. Barney and J. Hogg and W. Innes and S. Dlay},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {MPG-Net: Multi-Prediction Guided Network for Segmentation of Retinal Layers in OCT Images},\n  year = {2020},\n  pages = {1299-1303},\n  abstract = {Optical coherence tomography (OCT) is a commonly-used method of extracting high resolution retinal information. Moreover there is an increasing demand for the automated retinal layer segmentation which facilitates the retinal disease diagnosis. In this paper, we propose a novel multi-prediction guided attention network (MPG-Net) for automated retinal layer segmentation in OCT images. The proposed method consists of two major steps to strengthen the discriminative power of a U-shape Fully convolutional network (FCN) for reliable automated segmentation. Firstly, the feature refinement module which adaptively re-weights the feature channels is exploited in the encoder to capture more informative features and discard information in irrelevant regions. Furthermore, we propose a multi-prediction guided attention mechanism which provides pixel-wise semantic prediction guidance to better recover the segmentation mask at each scale. This mechanism which transforms the deep supervision to supervised attention is able to guide feature aggregation with more semantic information between intermediate layers. Experiments on the publicly available Duke OCT dataset confirm the effectiveness of the proposed method as well as an improved performance over other state-of-the-art approaches.},\n  keywords = {biomedical optical imaging;diseases;eye;image classification;image segmentation;medical image processing;optical tomography;MPG-Net;retinal layers;OCT images;optical coherence tomography;high resolution retinal information;automated retinal layer segmentation;retinal disease diagnosis;U-shape Fully convolutional network;reliable automated segmentation;feature refinement module;informative features;multiprediction guided attention mechanism;pixel-wise semantic prediction guidance;intermediate layers;Duke OCT dataset;Image segmentation;Convolution;Optical coherence tomography;Semantics;Transforms;Retina;Reliability;Optical coherence tomography (OCT);retinal layer segmentation;fully convolutional network (FCN);self attention;semantic prediction},\n  doi = {10.23919/Eusipco47968.2020.9287561},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001299.pdf},\n}\n\n
\n
\n\n\n
\n Optical coherence tomography (OCT) is a commonly-used method of extracting high resolution retinal information. Moreover there is an increasing demand for the automated retinal layer segmentation which facilitates the retinal disease diagnosis. In this paper, we propose a novel multi-prediction guided attention network (MPG-Net) for automated retinal layer segmentation in OCT images. The proposed method consists of two major steps to strengthen the discriminative power of a U-shape Fully convolutional network (FCN) for reliable automated segmentation. Firstly, the feature refinement module which adaptively re-weights the feature channels is exploited in the encoder to capture more informative features and discard information in irrelevant regions. Furthermore, we propose a multi-prediction guided attention mechanism which provides pixel-wise semantic prediction guidance to better recover the segmentation mask at each scale. This mechanism which transforms the deep supervision to supervised attention is able to guide feature aggregation with more semantic information between intermediate layers. Experiments on the publicly available Duke OCT dataset confirm the effectiveness of the proposed method as well as an improved performance over other state-of-the-art approaches.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Improved Patch-based View Rendering for Focused Plenoptic Cameras with Extended Depth-of-Field.\n \n \n \n \n\n\n \n Filipe, J. N.; Assuncao, P. A. A.; Tavora, L. M. N.; Fonseca-Pinto, R.; Thomaz, L. A.; and Faria, S. M. M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 680-684, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ImprovedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287562,\n  author = {J. N. Filipe and P. A. A. Assuncao and L. M. N. Tavora and R. Fonseca-Pinto and L. A. Thomaz and S. M. M. Faria},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Improved Patch-based View Rendering for Focused Plenoptic Cameras with Extended Depth-of-Field},\n  year = {2020},\n  pages = {680-684},\n  abstract = {This paper proposes a patch-based framework to improve view rendering from light fields captured by focused plenoptic cameras with extended depth-of-field. A three-step approach is performed to minimise discontinuities across neighbouring patches and to maximise the information captured from different types of micro-lens. Firstly, the useful patch size of each micro-image is determined to limit the contribution of low light intensity regions into the rendered images. Secondly, anisotropic inpainting is applied to smooth the discontinuities between neighbouring patches. Thirdly, a multi-focus image fusion algorithm is used to efficiently combine the information captured by the different types of micro-lens comprising plenoptic cameras with extended depth-of-field. Significant improvements are obtained in the rendered views, by reducing visible discontinuities between adjacent micro-images, while at the same time increasing the average sharpness of image content by 20%, measured by the Energy of Laplacian (i.e., a focus metric), in comparison to the conventional rendering approach, which always selects the best focused lens type.},\n  keywords = {image fusion;image resolution;image restoration;image sensors;microlenses;rendering (computer graphics);depth-of-field;visible discontinuities;adjacent microimages;image content;focus metric;conventional rendering approach;focused lens type;improved patch-based view rendering;focused plenoptic cameras;patch-based framework;light fields;three-step approach;neighbouring patches;patch size;microimage;rendered images;multifocus image fusion algorithm;microlens comprising plenoptic cameras;light intensity regions;Simulation;Signal processing algorithms;Rendering (computer graphics);Cameras;Time measurement;Image fusion;Lenses;Patch-based rendering;Focused plenoptic camera;Extended depth-of-field},\n  doi = {10.23919/Eusipco47968.2020.9287562},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000680.pdf},\n}\n\n
\n
\n\n\n
\n This paper proposes a patch-based framework to improve view rendering from light fields captured by focused plenoptic cameras with extended depth-of-field. A three-step approach is performed to minimise discontinuities across neighbouring patches and to maximise the information captured from different types of micro-lens. Firstly, the useful patch size of each micro-image is determined to limit the contribution of low light intensity regions into the rendered images. Secondly, anisotropic inpainting is applied to smooth the discontinuities between neighbouring patches. Thirdly, a multi-focus image fusion algorithm is used to efficiently combine the information captured by the different types of micro-lens comprising plenoptic cameras with extended depth-of-field. Significant improvements are obtained in the rendered views, by reducing visible discontinuities between adjacent micro-images, while at the same time increasing the average sharpness of image content by 20%, measured by the Energy of Laplacian (i.e., a focus metric), in comparison to the conventional rendering approach, which always selects the best focused lens type.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Subjective Quality Evaluation of Light Field Data Under Coding Distortions.\n \n \n \n \n\n\n \n Palma, E.; Battisti, F.; Carli, M.; Astola, P.; and Tabus, I.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 526-530, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"SubjectivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287564,\n  author = {E. Palma and F. Battisti and M. Carli and P. Astola and I. Tabus},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Subjective Quality Evaluation of Light Field Data Under Coding Distortions},\n  year = {2020},\n  pages = {526-530},\n  abstract = {This contribution presents the subjective evaluation of the compressed light field datasets obtained with four state-of- the-art codecs: two from the JPEG Pleno Light Field Verification Model and two recent methods for which codecs are publicly available. To the best of our knowledge, currently no subjective testing has been carried out to compare the performances of the four considered codecs. The evaluation methodology is based on Bradley-Terry scores, obtained from pairwise comparisons of the four codecs at four target bit-rates, for four light field datasets. The subset of pairs for which the comparisons are performed is selected according to the square design method, under two design variants, resulting in two datasets of subjective results. The analysis of the collected data, obtained by ranking the subjective scores of the codecs at various bitrates, shows high correlation with the available objective quality metrics.},\n  keywords = {Measurement;Codecs;Correlation;Image coding;Distortion;Light fields;Encoding},\n  doi = {10.23919/Eusipco47968.2020.9287564},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000526.pdf},\n}\n\n
\n
\n\n\n
\n This contribution presents the subjective evaluation of the compressed light field datasets obtained with four state-of- the-art codecs: two from the JPEG Pleno Light Field Verification Model and two recent methods for which codecs are publicly available. To the best of our knowledge, currently no subjective testing has been carried out to compare the performances of the four considered codecs. The evaluation methodology is based on Bradley-Terry scores, obtained from pairwise comparisons of the four codecs at four target bit-rates, for four light field datasets. The subset of pairs for which the comparisons are performed is selected according to the square design method, under two design variants, resulting in two datasets of subjective results. The analysis of the collected data, obtained by ranking the subjective scores of the codecs at various bitrates, shows high correlation with the available objective quality metrics.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Methods to Improve the Robustness of Right Whale Detection using CNNs in Changing Conditions.\n \n \n \n \n\n\n \n Vickers, W.; Milner, B.; Gorpincenko, A.; and Lee, R.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 106-110, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"MethodsPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287565,\n  author = {W. Vickers and B. Milner and A. Gorpincenko and R. Lee},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Methods to Improve the Robustness of Right Whale Detection using CNNs in Changing Conditions},\n  year = {2020},\n  pages = {106-110},\n  abstract = {This paper is concerned with developing a method of detecting right whales from autonomous surface vehicles (ASVs) that is robust to changing operating conditions. A baseline convolutional neural network (CNN) is first trained using data taken from a single operating condition. Its detection accuracy is then found to degrade when applied to different operating conditions. Two methods are then investigated to restore performance using just a single model. The first method is an augmented training approach where progressively more data from the new condition is mixed with the original data. The second method uses unsupervised adaptation to adapt the original model to the new conditions. Evaluation under changing environmental and noise conditions reveals the model produced from augmented training data to achieve higher detection accuracy across all conditions than the adapted model. However, the adapted model does not require label data from the new environment and in these situations is a more realistic solution.},\n  keywords = {Training;Adaptation models;Whales;Training data;Signal processing;Data models;Surface treatment;cetacean detection;autonomous surface vehicles;passive acoustic monitoring;CNN;augmentation;adaptation},\n  doi = {10.23919/Eusipco47968.2020.9287565},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000106.pdf},\n}\n\n
\n
\n\n\n
\n This paper is concerned with developing a method of detecting right whales from autonomous surface vehicles (ASVs) that is robust to changing operating conditions. A baseline convolutional neural network (CNN) is first trained using data taken from a single operating condition. Its detection accuracy is then found to degrade when applied to different operating conditions. Two methods are then investigated to restore performance using just a single model. The first method is an augmented training approach where progressively more data from the new condition is mixed with the original data. The second method uses unsupervised adaptation to adapt the original model to the new conditions. Evaluation under changing environmental and noise conditions reveals the model produced from augmented training data to achieve higher detection accuracy across all conditions than the adapted model. However, the adapted model does not require label data from the new environment and in these situations is a more realistic solution.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Application of the Singular Spectrum Analysis to the time variations of the amplitude of Schumann resonance measurements.\n \n \n \n \n\n\n \n Rodríguez-Camacho, J.; Blanco-Navarro, D.; Gómez-Lopera, J. F.; and Carrión, M. C.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2215-2219, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ApplicationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287566,\n  author = {J. Rodríguez-Camacho and D. Blanco-Navarro and J. F. Gómez-Lopera and M. C. Carrión},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Application of the Singular Spectrum Analysis to the time variations of the amplitude of Schumann resonance measurements},\n  year = {2020},\n  pages = {2215-2219},\n  abstract = {The Singular Spectrum Analysis technique has been applied to the amplitude of the first Schumann resonance measured at the Sierra Nevada station (Spain) in order to obtain the principal components of its time evolution. The results of this study confirm the appearance of the annual and semiannual components that have been pointed out for this resonance and also reveal other components corresponding to 45 to 120 days variations, which matches the variations of athmospheric waves like the Madden-Julian oscillation and the Kelvin waves. A preprocessing of the measurements is required since the station has experienced some technical failures and there are thus some gaps in the measured data. The application of the technique has been made taking one data per month and one data per day, and the results have been compared.},\n  keywords = {atmospheric movements;atmospheric techniques;Schumann resonance measurements;singular spectrum analysis technique;Sierra Nevada station;Spain;athmospheric waves;Madden-Julian oscillation;Kelvin waves;Time-frequency analysis;Kelvin;Time series analysis;Signal processing;Time measurement;Spectral analysis;Oscillators;Schumann resonance;Singular Spectrum Analysis;Principal Components},\n  doi = {10.23919/Eusipco47968.2020.9287566},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002215.pdf},\n}\n\n
\n
\n\n\n
\n The Singular Spectrum Analysis technique has been applied to the amplitude of the first Schumann resonance measured at the Sierra Nevada station (Spain) in order to obtain the principal components of its time evolution. The results of this study confirm the appearance of the annual and semiannual components that have been pointed out for this resonance and also reveal other components corresponding to 45 to 120 days variations, which matches the variations of athmospheric waves like the Madden-Julian oscillation and the Kelvin waves. A preprocessing of the measurements is required since the station has experienced some technical failures and there are thus some gaps in the measured data. The application of the technique has been made taking one data per month and one data per day, and the results have been compared.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Improving Energy Compaction of Adaptive Fourier Decomposition.\n \n \n \n \n\n\n \n Borowicz, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2348-2352, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ImprovingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287567,\n  author = {A. Borowicz},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Improving Energy Compaction of Adaptive Fourier Decomposition},\n  year = {2020},\n  pages = {2348-2352},\n  abstract = {Adaptive Fourier decomposition (AFD) provides an expansion of an analytic function into a sum of basic signals, called mono-components. Unlike the Fourier series decomposition, the AFD is based on an adaptive rational orthogonal system, hence it is better suited for analyzing non-stationary data. The most popular algorithm for the AFD decomposes any signal in such a way that the energy of the low-frequency components is maximized. Unfortunately, this results in poor energy compaction of high-frequency components. In this paper, we develop a novel algorithm for the AFD. The key idea is to maximize the energy of any components no matter how big or small the corresponding frequencies are. A comparative evaluation was conducted of the signal reconstruction efficiency of the proposed approach and several conventional algorithms by using speech recordings. The experimental results show that with the new algorithm, it is possible to get a better performance in terms of the reconstruction quality and energy compaction property.},\n  keywords = {Signal processing algorithms;Europe;Signal reconstruction;Compaction;Fourier series;Speech processing;Optimization;Fourier series;Takenaka-Malmquist system;mono-components;adaptive decomposition;energy compaction},\n  doi = {10.23919/Eusipco47968.2020.9287567},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002348.pdf},\n}\n\n
\n
\n\n\n
\n Adaptive Fourier decomposition (AFD) provides an expansion of an analytic function into a sum of basic signals, called mono-components. Unlike the Fourier series decomposition, the AFD is based on an adaptive rational orthogonal system, hence it is better suited for analyzing non-stationary data. The most popular algorithm for the AFD decomposes any signal in such a way that the energy of the low-frequency components is maximized. Unfortunately, this results in poor energy compaction of high-frequency components. In this paper, we develop a novel algorithm for the AFD. The key idea is to maximize the energy of any components no matter how big or small the corresponding frequencies are. A comparative evaluation was conducted of the signal reconstruction efficiency of the proposed approach and several conventional algorithms by using speech recordings. The experimental results show that with the new algorithm, it is possible to get a better performance in terms of the reconstruction quality and energy compaction property.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Nonparametric Adaptive Value-at-Risk Quantification Based on the Multiscale Energy Distribution of Asset Returns.\n \n \n \n \n\n\n \n Tzagkarakis, G.; Maurer, F.; and Dionysopoulos, T.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2393-2397, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"NonparametricPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287568,\n  author = {G. Tzagkarakis and F. Maurer and T. Dionysopoulos},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Nonparametric Adaptive Value-at-Risk Quantification Based on the Multiscale Energy Distribution of Asset Returns},\n  year = {2020},\n  pages = {2393-2397},\n  abstract = {Quantifying risk is pivotal for every financial institution, with the temporal dimension being the key aspect for all the well-established risk measures. However, exploiting the frequency information conveyed by financial data, could yield improved insights about the inherent risk evolution in a joint time-frequency fashion. Nevertheless, the great majority of risk managers make no explicit distinction between the information captured by patterns of different frequency content, while relying on the full time-resolution data, regardless of the trading horizon. To address this problem, a novel value-at-risk (VaR) quantification method is proposed, which combines nonlinearly the time-evolving energy profile of returns series at multiple frequency scales, determined by the predefined trading horizon. Most importantly, our proposed method can be coupled with any quantile-based risk measure to enhance its performance. Experimental evaluation with real data reveals an increased robustness of our method in efficiently controlling under-/over-estimated VaR values.},\n  keywords = {Time-frequency analysis;Reactive power;Europe;Signal processing;Robustness;Risk quantification;adaptive value-at-risk;time-scale decompositions;energy distribution},\n  doi = {10.23919/Eusipco47968.2020.9287568},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002393.pdf},\n}\n\n
\n
\n\n\n
\n Quantifying risk is pivotal for every financial institution, with the temporal dimension being the key aspect for all the well-established risk measures. However, exploiting the frequency information conveyed by financial data, could yield improved insights about the inherent risk evolution in a joint time-frequency fashion. Nevertheless, the great majority of risk managers make no explicit distinction between the information captured by patterns of different frequency content, while relying on the full time-resolution data, regardless of the trading horizon. To address this problem, a novel value-at-risk (VaR) quantification method is proposed, which combines nonlinearly the time-evolving energy profile of returns series at multiple frequency scales, determined by the predefined trading horizon. Most importantly, our proposed method can be coupled with any quantile-based risk measure to enhance its performance. Experimental evaluation with real data reveals an increased robustness of our method in efficiently controlling under-/over-estimated VaR values.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Blind Equalization Via Polynomial Optimization.\n \n \n \n \n\n\n \n Jiang, X.; Zeng, W. -.; Chen, J.; Zoubir, A. M.; and Liu, X.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1946-1950, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"BlindPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287570,\n  author = {X. Jiang and W. -J. Zeng and J. Chen and A. M. Zoubir and X. Liu},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Blind Equalization Via Polynomial Optimization},\n  year = {2020},\n  pages = {1946-1950},\n  abstract = {A polynomial optimization based blind equalizer (POBE) is proposed. Different from the popular constant modulus algorithm and its variants, the POBE adopts an eighth-order multivariate polynomial as the loss function. Since the loss function is sensitive to phase rotation, the POBE can achieve automatic carrier phase recovery. A gradient descent method with optimal step size is developed for solving the optimization problem. We reveal that this optimal step size is one root of a seventh-order univariate polynomial and hence, can be computed easily. Compared with the blind equalizers based on stochastic gradient descent with empirical step size, which suffers from slow convergence or even divergence, the POBE significantly accelerates the convergence rate. Moreover, it attains a much lower inter-symbol interference (ISI), resulting in a noticeable improvement of equalization performance. Simulation results demonstrate the superiority of POBE over several representative blind equalizers.},\n  keywords = {Blind equalizers;Sensitivity;Simulation;Signal processing algorithms;Signal processing;Optimization;Convergence;Blind equalization;carrier phase recovery;constant modulus algorithm;polynomial optimization;root finding},\n  doi = {10.23919/Eusipco47968.2020.9287570},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001946.pdf},\n}\n\n
\n
\n\n\n
\n A polynomial optimization based blind equalizer (POBE) is proposed. Different from the popular constant modulus algorithm and its variants, the POBE adopts an eighth-order multivariate polynomial as the loss function. Since the loss function is sensitive to phase rotation, the POBE can achieve automatic carrier phase recovery. A gradient descent method with optimal step size is developed for solving the optimization problem. We reveal that this optimal step size is one root of a seventh-order univariate polynomial and hence, can be computed easily. Compared with the blind equalizers based on stochastic gradient descent with empirical step size, which suffers from slow convergence or even divergence, the POBE significantly accelerates the convergence rate. Moreover, it attains a much lower inter-symbol interference (ISI), resulting in a noticeable improvement of equalization performance. Simulation results demonstrate the superiority of POBE over several representative blind equalizers.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Feature-based Response Prediction to Immunotherapy of late-stage Melanoma Patients Using PET/MR Imaging.\n \n \n \n \n\n\n \n Liebgott, A.; Gatidis, S.; Vu, V. C.; Haueise, T.; Nikolaou, K.; and Yang, B.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1229-1233, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Feature-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287571,\n  author = {A. Liebgott and S. Gatidis and V. C. Vu and T. Haueise and K. Nikolaou and B. Yang},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Feature-based Response Prediction to Immunotherapy of late-stage Melanoma Patients Using PET/MR Imaging},\n  year = {2020},\n  pages = {1229-1233},\n  abstract = {The treatment of malignant melanoma with immunotherapy is a promising approach to treat advanced stages of the disease. However, the treatment can cause serious side effects and not every patient responds to it. This means, crucial time may be wasted on an ineffective treatment. Assessment of the possible therapy response is hence an important research issue. The research presented in this study focuses on the investigation of the potential of medical imaging and machine learning to solve this task. To this end, we extracted image features from multi-modal images and trained a classifier to differentiate non-responsive patients from responsive ones.},\n  keywords = {cancer;diseases;feature extraction;learning (artificial intelligence);medical image processing;patient diagnosis;radiation therapy;skin;tumours;important research issue;possible therapy response;ineffective treatment;patient responds;serious side effects;disease;advanced stages;malignant melanoma;late-stage melanoma patients;immunotherapy;feature-based response prediction;responsive ones;nonresponsive patients;multimodal images;image features;machine learning;medical imaging;Support vector machines;Liver;Melanoma;Signal processing;Feature extraction;Task analysis;Immune system;Support Vector Machine;Random Forest;PET/MR imaging;Therapy Response Prediction},\n  doi = {10.23919/Eusipco47968.2020.9287571},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001229.pdf},\n}\n\n
\n
\n\n\n
\n The treatment of malignant melanoma with immunotherapy is a promising approach to treat advanced stages of the disease. However, the treatment can cause serious side effects and not every patient responds to it. This means, crucial time may be wasted on an ineffective treatment. Assessment of the possible therapy response is hence an important research issue. The research presented in this study focuses on the investigation of the potential of medical imaging and machine learning to solve this task. To this end, we extracted image features from multi-modal images and trained a classifier to differentiate non-responsive patients from responsive ones.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Joint-Diagonalizability-Constrained Multichannel Nonnegative Matrix Factorization Based on Multivariate Complex Sub-Gaussian Distribution.\n \n \n \n \n\n\n \n Kamo, K.; Kubo, Y.; Takamune, N.; Kitamura, D.; Saruwatari, H.; Takahashi, Y.; and Kondo, K.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 890-894, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Joint-Diagonalizability-ConstrainedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287572,\n  author = {K. Kamo and Y. Kubo and N. Takamune and D. Kitamura and H. Saruwatari and Y. Takahashi and K. Kondo},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Joint-Diagonalizability-Constrained Multichannel Nonnegative Matrix Factorization Based on Multivariate Complex Sub-Gaussian Distribution},\n  year = {2020},\n  pages = {890-894},\n  abstract = {In this paper, we address a statistical model extension of multichannel nonnegative matrix factorization (MNMF) for blind source separation, and we propose a new parameter up-date algorithm used in the sub-Gaussian model. MNMF employs full-rank spatial covariance matrices and can simulate situations in which the reverberation is strong and the sources are not point sources. In conventional MNMF, spectrograms of observed signals are assumed to follow a multivariate Gaussian distribution. In this paper, first, to extend the MNMF model, we introduce the multivariate generalized Gaussian distribution as the multivariate sub-Gaussian distribution. Since the cost function of MNMF based on this multivariate sub-Gaussian model is difficult to minimize, we additionally introduce the joint-diagonalizability constraint in spatial covariance matrices to MNMF similarly to FastMNMF, and transform the cost function to the form to which we can apply the auxiliary functions to derive the valid parameter update rules. Finally, from blind source separation experiments, we show that the proposed method outperforms the conventional methods in source-separation accuracy.},\n  keywords = {Signal processing algorithms;Transforms;Gaussian distribution;Cost function;Blind source separation;Covariance matrices;Spectrogram;blind source separation;spatial covariance matrix;joint diagonalizability;sub-Gaussian distribution},\n  doi = {10.23919/Eusipco47968.2020.9287572},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000890.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we address a statistical model extension of multichannel nonnegative matrix factorization (MNMF) for blind source separation, and we propose a new parameter up-date algorithm used in the sub-Gaussian model. MNMF employs full-rank spatial covariance matrices and can simulate situations in which the reverberation is strong and the sources are not point sources. In conventional MNMF, spectrograms of observed signals are assumed to follow a multivariate Gaussian distribution. In this paper, first, to extend the MNMF model, we introduce the multivariate generalized Gaussian distribution as the multivariate sub-Gaussian distribution. Since the cost function of MNMF based on this multivariate sub-Gaussian model is difficult to minimize, we additionally introduce the joint-diagonalizability constraint in spatial covariance matrices to MNMF similarly to FastMNMF, and transform the cost function to the form to which we can apply the auxiliary functions to derive the valid parameter update rules. Finally, from blind source separation experiments, we show that the proposed method outperforms the conventional methods in source-separation accuracy.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Second-Order Horizontal Synchrosqueezing of the S-transform: a Specific Wavelet Case Study.\n \n \n \n \n\n\n \n Fourer, D.; and Auger, F.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2200-2204, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Second-OrderPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287573,\n  author = {D. Fourer and F. Auger},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Second-Order Horizontal Synchrosqueezing of the S-transform: a Specific Wavelet Case Study},\n  year = {2020},\n  pages = {2200-2204},\n  abstract = {We address the problem of computing efficient time-frequency and time-scale representations of non stationary multicomponent signals. Recently, a new time-reassigned synchrosqueezing method designed for the short-time Fourier transform (STFT) was introduced to improve the energy concentration of impulsive or strongly modulated signals. Following this idea, we now propose to extend this approach to the Stockwell transform (S-transform) that is related to the Morlet wavelet transform and which provides time-frequency representations with a frequency-dependent resolution. In this study, we derive an enhanced second-order group delay estimator designed for the S-transform to deal with strongly amplitude- and frequency-modulated signals. Hence, this estimator is used to obtain a novel horizontal synchrosqueezing transform that is evaluated in numerical experiments involving multicomponent non-stationary signals.},\n  keywords = {Time-frequency analysis;Frequency modulation;Fourier transforms;Europe;Frequency estimation;Delays;Signal resolution;S-transform;wavelet;time-frequency;horizontal synchrosqueezing;group-delay estimation},\n  doi = {10.23919/Eusipco47968.2020.9287573},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002200.pdf},\n}\n\n
\n
\n\n\n
\n We address the problem of computing efficient time-frequency and time-scale representations of non stationary multicomponent signals. Recently, a new time-reassigned synchrosqueezing method designed for the short-time Fourier transform (STFT) was introduced to improve the energy concentration of impulsive or strongly modulated signals. Following this idea, we now propose to extend this approach to the Stockwell transform (S-transform) that is related to the Morlet wavelet transform and which provides time-frequency representations with a frequency-dependent resolution. In this study, we derive an enhanced second-order group delay estimator designed for the S-transform to deal with strongly amplitude- and frequency-modulated signals. Hence, this estimator is used to obtain a novel horizontal synchrosqueezing transform that is evaluated in numerical experiments involving multicomponent non-stationary signals.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Offline Training for Memristor-based Neural Networks.\n \n \n \n \n\n\n \n Boquet, G.; Macias, E.; Morell, A.; Serrano, J.; Miranda, E.; and Vicario, J. L.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1547-1551, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"OfflinePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287574,\n  author = {G. Boquet and E. Macias and A. Morell and J. Serrano and E. Miranda and J. L. Vicario},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Offline Training for Memristor-based Neural Networks},\n  year = {2020},\n  pages = {1547-1551},\n  abstract = {Neuromorphic systems based on Hardware Neural Networks (HNN) are expected to be an energy-efficient computing architecture for solving complex tasks. Due to the variability common to all nano-electronic devices, HNN success depends on the development of reliable weight storage or mitigation techniques against weight variation. In this manuscript, we propose a neural network training technique to mitigate the impact of device-to-device variation due to conductance imperfections at weight import in offline-learning. To that aim, we propose to add said variation to the weights during training in order to force the network to learn robust computations against that variation. Then, we experiment using a neural network architecture with quantized weights adapted to the design constrains imposed by memristive devices. Finally, we validate our proposal against real-world road traffic data and the MNIST image data set, showing improvements on the classification metrics.},\n  keywords = {Training;Performance evaluation;Neuromorphics;Roads;Neural networks;Computer architecture;Reliability;Neuromorphic;Deep learning;RRAM;Memristor;Traffic forecasting},\n  doi = {10.23919/Eusipco47968.2020.9287574},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001547.pdf},\n}\n\n
\n
\n\n\n
\n Neuromorphic systems based on Hardware Neural Networks (HNN) are expected to be an energy-efficient computing architecture for solving complex tasks. Due to the variability common to all nano-electronic devices, HNN success depends on the development of reliable weight storage or mitigation techniques against weight variation. In this manuscript, we propose a neural network training technique to mitigate the impact of device-to-device variation due to conductance imperfections at weight import in offline-learning. To that aim, we propose to add said variation to the weights during training in order to force the network to learn robust computations against that variation. Then, we experiment using a neural network architecture with quantized weights adapted to the design constrains imposed by memristive devices. Finally, we validate our proposal against real-world road traffic data and the MNIST image data set, showing improvements on the classification metrics.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Signal-Adapted Analytic Wavelet Packets in Arbitrary Dimensions.\n \n \n \n \n\n\n \n Matthias, B.; Maximilian, S.; and Fernando, P. L.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2230-2234, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Signal-AdaptedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287575,\n  author = {B. Matthias and S. Maximilian and P. L. Fernando},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Signal-Adapted Analytic Wavelet Packets in Arbitrary Dimensions},\n  year = {2020},\n  pages = {2230-2234},\n  abstract = {The analytic wavelet packet transform, based on the dual-tree approach, represents a complex-valued extension of the wavelet packet transform. A generalization to multiple dimensions can be realized using fully separable wavelet trees, but this restricts the possible subband combinations. To overcome these limitations, we present a flexible framework to calculate N-D analytic wavelet packets with configurable decomposition structures and filter types. By introducing a new subband notation for the nodes of the N-D wavelet binary tree, both anisotropic and isotropic decomposition structures can be realized. Based on this subband notation, a full frame in N dimensions is defined and combined with an optimal basis selection, which we generalized to arbitrary dimensions, to find signal-adapted decomposition structures. As a multi-dimensional example, the framework is applied to the compression and denoising of a 4D light field. The results are evaluated in terms of the PSNR and SSIM and compared with the discrete cosine transform.},\n  keywords = {Noise reduction;Signal processing;Wavelet analysis;Wavelet packets;Light fields;Discrete wavelet transforms;Standards;analytic wavelet packets;signal-adapted filtering;signal compression;signal denoising},\n  doi = {10.23919/Eusipco47968.2020.9287575},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002230.pdf},\n}\n\n
\n
\n\n\n
\n The analytic wavelet packet transform, based on the dual-tree approach, represents a complex-valued extension of the wavelet packet transform. A generalization to multiple dimensions can be realized using fully separable wavelet trees, but this restricts the possible subband combinations. To overcome these limitations, we present a flexible framework to calculate N-D analytic wavelet packets with configurable decomposition structures and filter types. By introducing a new subband notation for the nodes of the N-D wavelet binary tree, both anisotropic and isotropic decomposition structures can be realized. Based on this subband notation, a full frame in N dimensions is defined and combined with an optimal basis selection, which we generalized to arbitrary dimensions, to find signal-adapted decomposition structures. As a multi-dimensional example, the framework is applied to the compression and denoising of a 4D light field. The results are evaluated in terms of the PSNR and SSIM and compared with the discrete cosine transform.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Transform Learning Assisted Graph Signal Processing for Low Rate Electrical Load Disaggregation.\n \n \n \n \n\n\n \n Kumar, K.; and Chandra, M. G.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1673-1677, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"TransformPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287576,\n  author = {K. Kumar and M. G. Chandra},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Transform Learning Assisted Graph Signal Processing for Low Rate Electrical Load Disaggregation},\n  year = {2020},\n  pages = {1673-1677},\n  abstract = {Separating the individual sources with less differentiating features and random time overlaps in a superposition is a challenging problem that arises in many scenarios. Disaggregating low sampled smart meter data is one such important and interesting use case. In this paper, a novel joint optimization formulation using Transform Learning (TL) assisted with Graph Signal Processing (GSP) is presented to reconstruct the individual operational waveforms of the electrical loads. Data-driven transforms are utilized to learn the individual load characteristics. Treating the transform coefficients as load activations, graph signal smoothness is exploited to estimate the coefficients in the test phase using the coefficients learnt during the training phase. The requisite optimization formulation and the derivation of the necessary update steps are presented. The efficacy of the proposal is demonstrated by the load identification and consumption estimation results obtained for residential load disaggregation, considering both real and simulated data along with comparisons against some of the recent works in this domain.},\n  keywords = {Training;Source separation;Transforms;Smart meters;Timing;Sensors;Optimization;Transform Learning;Dictionary Learning;Graph Signal Processing;Electrical Load Disaggregation},\n  doi = {10.23919/Eusipco47968.2020.9287576},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001673.pdf},\n}\n\n
\n
\n\n\n
\n Separating the individual sources with less differentiating features and random time overlaps in a superposition is a challenging problem that arises in many scenarios. Disaggregating low sampled smart meter data is one such important and interesting use case. In this paper, a novel joint optimization formulation using Transform Learning (TL) assisted with Graph Signal Processing (GSP) is presented to reconstruct the individual operational waveforms of the electrical loads. Data-driven transforms are utilized to learn the individual load characteristics. Treating the transform coefficients as load activations, graph signal smoothness is exploited to estimate the coefficients in the test phase using the coefficients learnt during the training phase. The requisite optimization formulation and the derivation of the necessary update steps are presented. The efficacy of the proposal is demonstrated by the load identification and consumption estimation results obtained for residential load disaggregation, considering both real and simulated data along with comparisons against some of the recent works in this domain.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Energy Separation Based Features for Replay Spoof Detection for Voice Assistant.\n \n \n \n \n\n\n \n Prajapati, G. P.; Kamble, M. R.; and Patil, H. A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 386-390, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"EnergyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287577,\n  author = {G. P. Prajapati and M. R. Kamble and H. A. Patil},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Energy Separation Based Features for Replay Spoof Detection for Voice Assistant},\n  year = {2020},\n  pages = {386-390},\n  abstract = {Voice Assistant (VA) now-a-days plays a very important role for the smart home applications. However, the VA along with ease also brings security issue too, such as possibility of being attacked by replay, hidden voice commands, etc. This paper presents replay Spoof Speech Detection (SSD) system for VA using Energy Separation Algorithm (ESA)-based features to capture Instantaneous Amplitude and Frequency Cepstral Coefficients (i.e., ESA-IACC and ESA-IFCC), and Gaussian Mixture Model (GMM) as a pattern classifier. Teager Energy Operator (TEO) has the characteristics to suppress the noise and hence, it is robust to noise sensitivity. For noisy acoustic environment, the ESA-based features that employ TEO perform well compared to the clean environment. We performed the experiments on the ReMASC database, which contains four different acoustic environments. Proposed features performed better in clean and noisy environments. In addition, to obtain possible complementary information, we performed score-level fusion of ESA-IACC and ESA-IFCC that resulted in low Equal Error Rate (EER) for different environments. Furthermore, we compared our proposed feature sets with Constant-Q Cepstral Coefficients (CQCC), and Linear Frequency Cepstral Coefficients (LFCC) resulting in an relative improvement of approximately 21.88 % for clean environments and 66.34 % for noisy environments (in EER), respectively.},\n  keywords = {Voice activity detection;Cepstral analysis;Signal processing algorithms;Smart homes;Signal processing;Feature extraction;Noise measurement;Replay Spoofing;Voice Assistant (VA);Teager Energy Operator (TEO);Energy Separation Algorithm (ESA)},\n  doi = {10.23919/Eusipco47968.2020.9287577},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000386.pdf},\n}\n\n
\n
\n\n\n
\n Voice Assistant (VA) now-a-days plays a very important role for the smart home applications. However, the VA along with ease also brings security issue too, such as possibility of being attacked by replay, hidden voice commands, etc. This paper presents replay Spoof Speech Detection (SSD) system for VA using Energy Separation Algorithm (ESA)-based features to capture Instantaneous Amplitude and Frequency Cepstral Coefficients (i.e., ESA-IACC and ESA-IFCC), and Gaussian Mixture Model (GMM) as a pattern classifier. Teager Energy Operator (TEO) has the characteristics to suppress the noise and hence, it is robust to noise sensitivity. For noisy acoustic environment, the ESA-based features that employ TEO perform well compared to the clean environment. We performed the experiments on the ReMASC database, which contains four different acoustic environments. Proposed features performed better in clean and noisy environments. In addition, to obtain possible complementary information, we performed score-level fusion of ESA-IACC and ESA-IFCC that resulted in low Equal Error Rate (EER) for different environments. Furthermore, we compared our proposed feature sets with Constant-Q Cepstral Coefficients (CQCC), and Linear Frequency Cepstral Coefficients (LFCC) resulting in an relative improvement of approximately 21.88 % for clean environments and 66.34 % for noisy environments (in EER), respectively.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Affine Intra-prediction for Versatile Video Coding.\n \n \n \n \n\n\n \n Adhuran, J.; Fernando, A.; Kulupana, G.; and Blasi, S.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 545-549, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AffinePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287579,\n  author = {J. Adhuran and A. Fernando and G. Kulupana and S. Blasi},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Affine Intra-prediction for Versatile Video Coding},\n  year = {2020},\n  pages = {545-549},\n  abstract = {New algorithms are being investigated in the context of next generation video coding to achieve higher compression efficiency. In particular, intra block copy is well known to accurately predict screen content and artificially generated content where patterns and edges may repeat within the frame. On the other hand, such type of video content is often subject to geometrical transformations such as transitions, rotations, zooms etc, which may not be accurately captured by simply copying neighbouring pixels. A new intra-prediction scheme is presented in this paper whereby blocks of samples from already reconstructed areas are processed by means of an affine-type transformation. This allows more accurate prediction of blocks which improve compression efficiency in a variety of conditions. Experiments in the context of Versatile Video Coding standard show the proposed method can improve intra-coding compression efficiency by 2.01% BD-rates on average, and up to 4.81%, with negligible impact on the decoder complexity.},\n  keywords = {Video coding;Signal processing algorithms;Tools;Encoding;Complexity theory;Next generation networking;Standards;VVC;intra-coding;affine;intra block copy},\n  doi = {10.23919/Eusipco47968.2020.9287579},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000545.pdf},\n}\n\n
\n
\n\n\n
\n New algorithms are being investigated in the context of next generation video coding to achieve higher compression efficiency. In particular, intra block copy is well known to accurately predict screen content and artificially generated content where patterns and edges may repeat within the frame. On the other hand, such type of video content is often subject to geometrical transformations such as transitions, rotations, zooms etc, which may not be accurately captured by simply copying neighbouring pixels. A new intra-prediction scheme is presented in this paper whereby blocks of samples from already reconstructed areas are processed by means of an affine-type transformation. This allows more accurate prediction of blocks which improve compression efficiency in a variety of conditions. Experiments in the context of Versatile Video Coding standard show the proposed method can improve intra-coding compression efficiency by 2.01% BD-rates on average, and up to 4.81%, with negligible impact on the decoder complexity.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Neighborhood-aware autoencoder for missing value imputation.\n \n \n \n \n\n\n \n Aidos, H.; and Tomás, P.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1542-1546, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Neighborhood-awarePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287580,\n  author = {H. Aidos and P. Tomás},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Neighborhood-aware autoencoder for missing value imputation},\n  year = {2020},\n  pages = {1542-1546},\n  abstract = {Missing values are a fundamental issue in many applications by constraining the application of different learning methods or by impairing the attained results. Many solutions have been proposed by relying on statistical or machine learning techniques. However, in most cases, the results are not yet satisfactory. Hence, motivated by the advent of deep learning, different solutions have also been proposed, such as by adopting autoencoders and adversarial training. However, in most of these solutions, the results are impaired by the network structure and training strategy, constraining the accuracy of missing value imputation. In this paper, we revisit autoencoder networks and show that through a careful selection of network structure and optimization strategy we outperform other deep learning solutions. We further study the impact of a previously proposed technique, stochastic corruption of inputs, to show that when the network is well designed and trained, it actually impairs the results.},\n  keywords = {Deep learning;Training;Learning systems;Europe;Signal processing;Tuning;Optimization;Missing values;deep learning;autoencoder architecture},\n  doi = {10.23919/Eusipco47968.2020.9287580},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001542.pdf},\n}\n\n
\n
\n\n\n
\n Missing values are a fundamental issue in many applications by constraining the application of different learning methods or by impairing the attained results. Many solutions have been proposed by relying on statistical or machine learning techniques. However, in most cases, the results are not yet satisfactory. Hence, motivated by the advent of deep learning, different solutions have also been proposed, such as by adopting autoencoders and adversarial training. However, in most of these solutions, the results are impaired by the network structure and training strategy, constraining the accuracy of missing value imputation. In this paper, we revisit autoencoder networks and show that through a careful selection of network structure and optimization strategy we outperform other deep learning solutions. We further study the impact of a previously proposed technique, stochastic corruption of inputs, to show that when the network is well designed and trained, it actually impairs the results.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Reconstruction of Finite Rate of Innovation Spherical Signals in the Presence of Noise Using Deep Learning Architecture.\n \n \n \n \n\n\n \n Tarar, M. O.; and Khalid, Z.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1487-1491, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ReconstructionPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287581,\n  author = {M. O. Tarar and Z. Khalid},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Reconstruction of Finite Rate of Innovation Spherical Signals in the Presence of Noise Using Deep Learning Architecture},\n  year = {2020},\n  pages = {1487-1491},\n  abstract = {We propose a method for the accurate reconstruction (recovery of parameters) of non-bandlimited finite rate of innovation (FRI) signals on the sphere from its measurements contaminated by additive isotropic noise. We propose a framework that takes the optimal number of noisy measurements and employs autoencoder (deep learning architecture) to enhance the signal consisting of a finite number of Diracs before estimating the parameters using the annihilating filter method. We use convolutional and fully connected autoencoders for signal enhancement in the spatial and spectral domains respectively. We analyse the denoising performance of both the overcomplete and undercomplete autoencoders and demonstrate the superior performance, measured as a gain in the signal to noise ratio (SNR) of the output signal, of the fully connected overcomplete autoencoder that filters the signal in the spectral domain. Through numerical experiments, we demonstrate the improvement enabled by the proposed framework in the accuracy of recovery of the parameters of the FRI signal.},\n  keywords = {Deep learning;Technological innovation;Noise reduction;Pollution measurement;Noise measurement;Spectral analysis;Signal to noise ratio;Finite rate of innovation;sphere;spherical harmonics;isotropic noise;autoencoders;deep learning},\n  doi = {10.23919/Eusipco47968.2020.9287581},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001487.pdf},\n}\n\n
\n
\n\n\n
\n We propose a method for the accurate reconstruction (recovery of parameters) of non-bandlimited finite rate of innovation (FRI) signals on the sphere from its measurements contaminated by additive isotropic noise. We propose a framework that takes the optimal number of noisy measurements and employs autoencoder (deep learning architecture) to enhance the signal consisting of a finite number of Diracs before estimating the parameters using the annihilating filter method. We use convolutional and fully connected autoencoders for signal enhancement in the spatial and spectral domains respectively. We analyse the denoising performance of both the overcomplete and undercomplete autoencoders and demonstrate the superior performance, measured as a gain in the signal to noise ratio (SNR) of the output signal, of the fully connected overcomplete autoencoder that filters the signal in the spectral domain. Through numerical experiments, we demonstrate the improvement enabled by the proposed framework in the accuracy of recovery of the parameters of the FRI signal.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Robust Deep Unfolded Network for Sparse Signal Recovery from Noisy Binary Measurements.\n \n \n \n \n\n\n \n Yang, Y.; Xiao, P.; Liao, B.; and Deligiannis, N.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2060-2064, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287582,\n  author = {Y. Yang and P. Xiao and B. Liao and N. Deligiannis},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A Robust Deep Unfolded Network for Sparse Signal Recovery from Noisy Binary Measurements},\n  year = {2020},\n  pages = {2060-2064},\n  abstract = {We propose a novel deep neural network, coined DeepFPC -ℓ2, for solving the 1-bit compressed sensing problem. The network is designed by unfolding the iterations of the fixed-point continuation (FPC) algorithm with one-sided ℓ2-norm (FPC-ℓ2). The DeepFPC-ℓ2 method shows higher signal reconstruction accuracy and convergence speed than the traditional FPC-ℓ2 algorithm. Furthermore, we compare its robustness to noise with the previously proposed DeepFPC network—which stemmed from unfolding the FPC-ℓ1 algorithm—for different signal to noise ratio (SNR) and sign-flipped ratio (flip ratio) scenarios. We show that the proposed network has better noise immunity than the previous DeepFPC method. This result indicates that the robustness of a deep-unfolded neural network is related with that of the algorithm it stems from.},\n  keywords = {Neural networks;Signal processing algorithms;Robustness;Pollution measurement;Noise measurement;Signal to noise ratio;Convergence;1-bit compressed sensing;Deep unfolding;Gaussian noise;sign flipping},\n  doi = {10.23919/Eusipco47968.2020.9287582},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002060.pdf},\n}\n\n
\n
\n\n\n
\n We propose a novel deep neural network, coined DeepFPC -ℓ2, for solving the 1-bit compressed sensing problem. The network is designed by unfolding the iterations of the fixed-point continuation (FPC) algorithm with one-sided ℓ2-norm (FPC-ℓ2). The DeepFPC-ℓ2 method shows higher signal reconstruction accuracy and convergence speed than the traditional FPC-ℓ2 algorithm. Furthermore, we compare its robustness to noise with the previously proposed DeepFPC network—which stemmed from unfolding the FPC-ℓ1 algorithm—for different signal to noise ratio (SNR) and sign-flipped ratio (flip ratio) scenarios. We show that the proposed network has better noise immunity than the previous DeepFPC method. This result indicates that the robustness of a deep-unfolded neural network is related with that of the algorithm it stems from.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Deep Neural Network based Distance Estimation for Geometry Calibration in Acoustic Sensor Networks.\n \n \n \n \n\n\n \n Gburrek, T.; Schmalenstroeer, J.; Brendel, A.; Kellermann, W.; and Haeb-Umbach, R.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 196-200, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DeepPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287583,\n  author = {T. Gburrek and J. Schmalenstroeer and A. Brendel and W. Kellermann and R. Haeb-Umbach},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Deep Neural Network based Distance Estimation for Geometry Calibration in Acoustic Sensor Networks},\n  year = {2020},\n  pages = {196-200},\n  abstract = {We present an approach to deep neural network based (DNN-based) distance estimation in reverberant rooms for supporting geometry calibration tasks in wireless acoustic sensor networks. Signal diffuseness information from acoustic signals is aggregated via the coherent-to-diffuse power ratio to obtain a distance-related feature, which is mapped to a source-to-microphone distance estimate by means of a DNN. This information is then combined with direction-of-arrival estimates from compact microphone arrays to infer the geometry of the sensor network. Unlike many other approaches to geometry calibration, the proposed scheme does only require that the sampling clocks of the sensor nodes are roughly synchronized. In simulations we show that the proposed DNN-based distance estimator generalizes to unseen acoustic environments and that precise estimates of the sensor node positions are obtained.},\n  keywords = {Geometry;Wireless sensor networks;Neural networks;Estimation;Acoustic sensors;Acoustics;Calibration;DNN;CDR;acoustic distance estimation;geometry calibration},\n  doi = {10.23919/Eusipco47968.2020.9287583},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000196.pdf},\n}\n\n
\n
\n\n\n
\n We present an approach to deep neural network based (DNN-based) distance estimation in reverberant rooms for supporting geometry calibration tasks in wireless acoustic sensor networks. Signal diffuseness information from acoustic signals is aggregated via the coherent-to-diffuse power ratio to obtain a distance-related feature, which is mapped to a source-to-microphone distance estimate by means of a DNN. This information is then combined with direction-of-arrival estimates from compact microphone arrays to infer the geometry of the sensor network. Unlike many other approaches to geometry calibration, the proposed scheme does only require that the sampling clocks of the sensor nodes are roughly synchronized. In simulations we show that the proposed DNN-based distance estimator generalizes to unseen acoustic environments and that precise estimates of the sensor node positions are obtained.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multiplication-Free Detection Algorithm of the Primary Synchronization Signal in LTE.\n \n \n \n \n\n\n \n Nassralla, M. H.; Ayoub, H.; Akl, N.; Jichi, R.; and Mansour, M. M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1772-1776, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Multiplication-FreePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287585,\n  author = {M. H. Nassralla and H. Ayoub and N. Akl and R. Jichi and M. M. Mansour},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Multiplication-Free Detection Algorithm of the Primary Synchronization Signal in LTE},\n  year = {2020},\n  pages = {1772-1776},\n  abstract = {Frame synchronization is an important functionality that should be supported in the design of an LTE baseband receiver. Detecting the start of a frame is regularly repeated during transmission and to identify the cells of the network. Typically, the synchronization is performed using correlation-based methods that incur a large number of multiplications and thus increase the power consumption and hardware complexity. Approaches to reduce the rate of multiplications come at the expense of reduced performance of the algorithm. In this paper, we present a multiplication-free synchronization algorithm that is based on K-means clustering and distributed arithmetic and uses the primary synchronization signal (PSS). We also show through simulations that the immense improvement in power consumption and hardware complexity does not entail a degradation in performance compared to current synchronization techniques.},\n  keywords = {Power demand;Signal processing algorithms;Clustering algorithms;Hardware;Complexity theory;Synchronization;Long Term Evolution;3GPP LTE;PSS;synchronization;K-means clustering;distributed arithmetic},\n  doi = {10.23919/Eusipco47968.2020.9287585},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001772.pdf},\n}\n\n
\n
\n\n\n
\n Frame synchronization is an important functionality that should be supported in the design of an LTE baseband receiver. Detecting the start of a frame is regularly repeated during transmission and to identify the cells of the network. Typically, the synchronization is performed using correlation-based methods that incur a large number of multiplications and thus increase the power consumption and hardware complexity. Approaches to reduce the rate of multiplications come at the expense of reduced performance of the algorithm. In this paper, we present a multiplication-free synchronization algorithm that is based on K-means clustering and distributed arithmetic and uses the primary synchronization signal (PSS). We also show through simulations that the immense improvement in power consumption and hardware complexity does not entail a degradation in performance compared to current synchronization techniques.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The Modulo Radon Transform and its Inversion.\n \n \n \n \n\n\n \n Bhandari, A.; Beckmann, M.; and Krahmer, F.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 770-774, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287586,\n  author = {A. Bhandari and M. Beckmann and F. Krahmer},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {The Modulo Radon Transform and its Inversion},\n  year = {2020},\n  pages = {770-774},\n  abstract = {In this paper, we introduce the Modulo Radon Transform (MRT) which is complemented by an inversion algorithm. The MRT generalizes the conventional Radon Transform and is obtained via computing modulo of the line integral of a two-dimensional function at a given angle. Since the modulo operation has an aliasing effect on the range of a function, the recorded MRT sinograms are always bounded, thus avoiding information loss arising from saturation or clipping effects. This paves a new pathway for imaging applications such as high dynamic range tomography, a topic that is in its early stages of development. By capitalizing on the recent results on Unlimited Sensing architecture, we prove that the Modulo Radon Transform can be inverted when the resultant (discrete/continuous) measurements map to a band-limited function. Thus, the MRT leads to new possibilities for both conceptualization of inversion algorithms as well as development of new hardware, for instance, for single-shot high dynamic range tomography.},\n  keywords = {Heuristic algorithms;Radon;Signal processing algorithms;Transforms;Tomography;Dynamic range;Image reconstruction;Computational imaging;computer tomography;filtered back projection;modulo;sampling and Radon transform},\n  doi = {10.23919/Eusipco47968.2020.9287586},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000770.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we introduce the Modulo Radon Transform (MRT) which is complemented by an inversion algorithm. The MRT generalizes the conventional Radon Transform and is obtained via computing modulo of the line integral of a two-dimensional function at a given angle. Since the modulo operation has an aliasing effect on the range of a function, the recorded MRT sinograms are always bounded, thus avoiding information loss arising from saturation or clipping effects. This paves a new pathway for imaging applications such as high dynamic range tomography, a topic that is in its early stages of development. By capitalizing on the recent results on Unlimited Sensing architecture, we prove that the Modulo Radon Transform can be inverted when the resultant (discrete/continuous) measurements map to a band-limited function. Thus, the MRT leads to new possibilities for both conceptualization of inversion algorithms as well as development of new hardware, for instance, for single-shot high dynamic range tomography.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Dictionary-Based Tensor-Train Sparse Coding.\n \n \n \n \n\n\n \n Boudehane, A.; Zniyed, Y.; Tenenhaus, A.; Le Brusquet, L.; and Boyer, R.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1000-1004, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Dictionary-BasedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287587,\n  author = {A. Boudehane and Y. Zniyed and A. Tenenhaus and L. {Le Brusquet} and R. Boyer},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Dictionary-Based Tensor-Train Sparse Coding},\n  year = {2020},\n  pages = {1000-1004},\n  abstract = {Multidimensional signal processing is receiving a lot of interest recently due to the wide spread appearance of multidimensional signals in different applications of data science. Many of these fields rely on prior knowledge of particular properties, such as sparsity for instance, in order to enhance the performance and the efficiency of the estimation algorithms. However, these multidimensional signals are, often, structured into high-order tensors, where the computational complexity and storage requirements become an issue for growing tensor orders. In this paper, we present a sparse-based Joint dImensionality Reduction And Factors rEtrieval (JIRAFE). More specifically, we assume that an arbitrary factor admits a decomposition into a redundant dictionary coded as a sparse matrix, called the sparse coding matrix. The goal is to estimate the sparse coding matrix in the Tensor-Train model framework.},\n  keywords = {Tensors;Signal processing algorithms;Estimation;Encoding;Sparse matrices;Matrix decomposition;Computational complexity;Sparse coding;High-order tensors;Tensor train;Constrained tensor decomposition;Fast algorithms},\n  doi = {10.23919/Eusipco47968.2020.9287587},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001000.pdf},\n}\n\n
\n
\n\n\n
\n Multidimensional signal processing is receiving a lot of interest recently due to the wide spread appearance of multidimensional signals in different applications of data science. Many of these fields rely on prior knowledge of particular properties, such as sparsity for instance, in order to enhance the performance and the efficiency of the estimation algorithms. However, these multidimensional signals are, often, structured into high-order tensors, where the computational complexity and storage requirements become an issue for growing tensor orders. In this paper, we present a sparse-based Joint dImensionality Reduction And Factors rEtrieval (JIRAFE). More specifically, we assume that an arbitrary factor admits a decomposition into a redundant dictionary coded as a sparse matrix, called the sparse coding matrix. The goal is to estimate the sparse coding matrix in the Tensor-Train model framework.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Pupil Diameter Estimation in Visible Light.\n \n \n \n \n\n\n \n Ricciuti, M.; and Gambi, E.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1244-1248, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"PupilPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287588,\n  author = {M. Ricciuti and E. Gambi},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Pupil Diameter Estimation in Visible Light},\n  year = {2020},\n  pages = {1244-1248},\n  abstract = {Pupil size is a valuable source of information since it can reveal the emotional state, fatigue and ageing process. A lot of research has been carried out in this area with clinical and even psychiatric validity, since the fluctuations in the size of the pupil are closely linked to the autonomic nervous system. The pupil size analysis of oscillations due to contraction and dilation could be a useful instrument for diagnosis of disorders related to their own control mechanisms and an index of neurological disease affecting other nerve centres. Pupillography is the pupil size clinical examination which involves the use of infrared light, which allows performing an optical analysis of the pupil, varying the light conditions and measuring the pupillary diameter in different luminance levels. The aim of the proposed work is to exploit video processing techniques in visible light to calculate the pupil diameter and analyse the pupil diameter changing as a result of the lighting conditions variation.},\n  keywords = {biomedical optical imaging;diseases;eye;neurophysiology;optical analysis;light conditions;video processing techniques;visible light;pupil diameter estimation;emotional state;fatigue;ageing process;clinical validity;nervous system;dilation;pupil size clinical examination;infrared light;psychiatric validity;Lighting;Optical variables measurement;Size measurement;Fatigue;Pupils;Heart rate variability;Oscillators},\n  doi = {10.23919/Eusipco47968.2020.9287588},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001244.pdf},\n}\n\n
\n
\n\n\n
\n Pupil size is a valuable source of information since it can reveal the emotional state, fatigue and ageing process. A lot of research has been carried out in this area with clinical and even psychiatric validity, since the fluctuations in the size of the pupil are closely linked to the autonomic nervous system. The pupil size analysis of oscillations due to contraction and dilation could be a useful instrument for diagnosis of disorders related to their own control mechanisms and an index of neurological disease affecting other nerve centres. Pupillography is the pupil size clinical examination which involves the use of infrared light, which allows performing an optical analysis of the pupil, varying the light conditions and measuring the pupillary diameter in different luminance levels. The aim of the proposed work is to exploit video processing techniques in visible light to calculate the pupil diameter and analyse the pupil diameter changing as a result of the lighting conditions variation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributed Trace Ratio Optimization in Fully-Connected Sensor Networks.\n \n \n \n \n\n\n \n Musluoglu, C. A.; and Bertrand, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1991-1995, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DistributedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287589,\n  author = {C. A. Musluoglu and A. Bertrand},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Distributed Trace Ratio Optimization in Fully-Connected Sensor Networks},\n  year = {2020},\n  pages = {1991-1995},\n  abstract = {The trace ratio optimization problem consists of maximizing a ratio between two trace operators and often appears in dimensionality reduction problems for denoising or discriminant analysis. In this paper, we propose a distributed and adaptive algorithm to solve the trace ratio optimization problem over network-wide covariance matrices, which capture the spatial correlation across sensors in a wireless sensor network. We focus on fully-connected network topologies, in which case the distributed algorithm reduces the communication bottleneck by only sharing a compressed version of the observed signals at each given node. Despite this compression, the algorithm can be shown to converge to the maximal trace ratio as if all nodes would have access to all signals in the network. We provide simulation results to demonstrate the convergence and optimality properties of the proposed algorithm.},\n  keywords = {Wireless sensor networks;Simulation;Noise reduction;Signal processing algorithms;Signal processing;Sensors;Optimization;Dimensionality reduction;distributed optimization;trace ratio;discriminant analysis;SNR optimization;wireless sensor networks},\n  doi = {10.23919/Eusipco47968.2020.9287589},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001991.pdf},\n}\n\n
\n
\n\n\n
\n The trace ratio optimization problem consists of maximizing a ratio between two trace operators and often appears in dimensionality reduction problems for denoising or discriminant analysis. In this paper, we propose a distributed and adaptive algorithm to solve the trace ratio optimization problem over network-wide covariance matrices, which capture the spatial correlation across sensors in a wireless sensor network. We focus on fully-connected network topologies, in which case the distributed algorithm reduces the communication bottleneck by only sharing a compressed version of the observed signals at each given node. Despite this compression, the algorithm can be shown to converge to the maximal trace ratio as if all nodes would have access to all signals in the network. We provide simulation results to demonstrate the convergence and optimality properties of the proposed algorithm.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automatic Extraction of Spontaneous Cries of Preterm Newborns in Neonatal Intensive Care Units.\n \n \n \n \n\n\n \n Cabon, S.; Met-Montot, B.; Porée, F.; Rosec, O.; Simon, A.; and Carrault, G.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1200-1204, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AutomaticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287590,\n  author = {S. Cabon and B. Met-Montot and F. Porée and O. Rosec and A. Simon and G. Carrault},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Automatic Extraction of Spontaneous Cries of Preterm Newborns in Neonatal Intensive Care Units},\n  year = {2020},\n  pages = {1200-1204},\n  abstract = {Cry analysis has been proven to be an inescapable tool to evaluate the development of preterm infants. However, to date, only a few authors proposed to automatically extract spontaneous cry events in the real context of Neonatal Intensive Care Units. In fact, this is challenging since a wide variety of sounds can also occur (e.g., alarms, adult voice). In this communication, a new method for spontaneous cry extraction from real life recordings of long duration is presented. A strategy based on an initial segmentation between silence and sound events, followed by a classification of the resulting audio segments into two classes (cry and non-cry) is proposed. To build the classification model, 198 cry events coming from 21 newborns and 439 non-cry events, representing the richness of the clinical sound environment were annotated. Then, a set of features, including Mel-Frequency Cepstral Coefficients, was computed in order to describe each audio segment. It was obtained after Harmonic plus Noise analysis which is commonly used for speech synthesis although never applied for newborn cry analysis. Finally, six machine learning approaches have been compared. K-Nearest Neighbours approach showed an accuracy of 94.1%. To experience the precision of the retained classifier, 412 hours of recordings of 23 newborns were also automatically processed. Results show that despite a difficult clinical context an automatic extraction of cry is achievable. This supports the idea that a new generation of non-invasive monitoring of neuro-behavioral development of premature newborns could emerge.},\n  keywords = {Pediatrics;Machine learning;Tools;Signal processing;Harmonic analysis;Speech synthesis;Monitoring;audio processing;spontaneous cries;prematurity;newborns;Neonatal Intensive Care Units;neuro-behavioral development;Harmonic plus Noise Analysis},\n  doi = {10.23919/Eusipco47968.2020.9287590},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001200.pdf},\n}\n\n
\n
\n\n\n
\n Cry analysis has been proven to be an inescapable tool to evaluate the development of preterm infants. However, to date, only a few authors proposed to automatically extract spontaneous cry events in the real context of Neonatal Intensive Care Units. In fact, this is challenging since a wide variety of sounds can also occur (e.g., alarms, adult voice). In this communication, a new method for spontaneous cry extraction from real life recordings of long duration is presented. A strategy based on an initial segmentation between silence and sound events, followed by a classification of the resulting audio segments into two classes (cry and non-cry) is proposed. To build the classification model, 198 cry events coming from 21 newborns and 439 non-cry events, representing the richness of the clinical sound environment were annotated. Then, a set of features, including Mel-Frequency Cepstral Coefficients, was computed in order to describe each audio segment. It was obtained after Harmonic plus Noise analysis which is commonly used for speech synthesis although never applied for newborn cry analysis. Finally, six machine learning approaches have been compared. K-Nearest Neighbours approach showed an accuracy of 94.1%. To experience the precision of the retained classifier, 412 hours of recordings of 23 newborns were also automatically processed. Results show that despite a difficult clinical context an automatic extraction of cry is achievable. This supports the idea that a new generation of non-invasive monitoring of neuro-behavioral development of premature newborns could emerge.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Estimation of Directed Dependencies in Time Series Using Conditional Mutual Information and Non-linear Prediction.\n \n \n \n \n\n\n \n Baboukani, P. S.; Graversen, C.; and Østergaard, J.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2388-2392, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"EstimationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287592,\n  author = {P. S. Baboukani and C. Graversen and J. Østergaard},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Estimation of Directed Dependencies in Time Series Using Conditional Mutual Information and Non-linear Prediction},\n  year = {2020},\n  pages = {2388-2392},\n  abstract = {It is well-known that estimation of the directed dependency between high-dimensional data sequences suffers from the {"}curse of dimensionality{"} problem. To reduce the dimensionality of the data, and thereby improve the accuracy of the estimation, we propose a new progressive input variable selection technique. Specifically, in each iteration, the remaining input variables are ranked according to a weighted sum of the amount of new information provided by the variable and the variable's prediction accuracy. Then, the highest ranked variable is included, if it is significant enough to improve the accuracy of the prediction. A simulation study on synthetic non-linear autoregressive and Henon maps data, shows a significant improvement over existing estimator, especially in the case of small amounts of high-dimensional and highly correlated data.},\n  keywords = {autoregressive processes;Henon mapping;iterative methods;sequences;statistical analysis;time series;Henon maps data;high-dimensional correlated data;weighted sum;progressive input variable selection technique;curse of dimensionality problem;high-dimensional data sequences;directed dependency;nonlinear prediction;conditional mutual information;time series;Input variables;Time series analysis;Estimation;Europe;Signal processing;Data models;Mutual information;directed dependency;input variable selection;non-linear prediction;conditional mutual information},\n  doi = {10.23919/Eusipco47968.2020.9287592},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002388.pdf},\n}\n\n
\n
\n\n\n
\n It is well-known that estimation of the directed dependency between high-dimensional data sequences suffers from the \"curse of dimensionality\" problem. To reduce the dimensionality of the data, and thereby improve the accuracy of the estimation, we propose a new progressive input variable selection technique. Specifically, in each iteration, the remaining input variables are ranked according to a weighted sum of the amount of new information provided by the variable and the variable's prediction accuracy. Then, the highest ranked variable is included, if it is significant enough to improve the accuracy of the prediction. A simulation study on synthetic non-linear autoregressive and Henon maps data, shows a significant improvement over existing estimator, especially in the case of small amounts of high-dimensional and highly correlated data.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Exploiting the scaling indetermination of bi-linear models in inverse problems.\n \n \n \n \n\n\n \n Thé, S.; Thiébaut, É.; Denis, L.; and Soulez, F.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2358-2362, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ExploitingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287593,\n  author = {S. Thé and É. Thiébaut and L. Denis and F. Soulez},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Exploiting the scaling indetermination of bi-linear models in inverse problems},\n  year = {2020},\n  pages = {2358-2362},\n  abstract = {Many inverse problems in imaging require estimating the parameters of a bi-linear model, e.g., the crisp image and the blur in blind deconvolution. In all these models, there is a scaling indetermination: multiplication of one term by an arbitrary factor can be compensated for by dividing the other by the same factor.To solve such inverse problems and identify each term of the bi-linear model, reconstruction methods rely on prior models that enforce some form of regularity. If these regularization terms verify a homogeneity property, the optimal scaling with respect to the regularization functions can be determined. This has two benefits: hyper-parameter tuning is simplified (a single parameter needs to be chosen) and the computation of the maximum a posteriori estimate is more efficient.Illustrations on a blind deconvolution problem are given with an unsupervised strategy to tune the hyper-parameter.},\n  keywords = {Deconvolution;Computational modeling;Image edge detection;Signal processing algorithms;Signal processing;Reconstruction algorithms;Tuning;Inverse problem;Bi-linear models;Scaling indetermination;Blind deconvolution},\n  doi = {10.23919/Eusipco47968.2020.9287593},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002358.pdf},\n}\n\n
\n
\n\n\n
\n Many inverse problems in imaging require estimating the parameters of a bi-linear model, e.g., the crisp image and the blur in blind deconvolution. In all these models, there is a scaling indetermination: multiplication of one term by an arbitrary factor can be compensated for by dividing the other by the same factor.To solve such inverse problems and identify each term of the bi-linear model, reconstruction methods rely on prior models that enforce some form of regularity. If these regularization terms verify a homogeneity property, the optimal scaling with respect to the regularization functions can be determined. This has two benefits: hyper-parameter tuning is simplified (a single parameter needs to be chosen) and the computation of the maximum a posteriori estimate is more efficient.Illustrations on a blind deconvolution problem are given with an unsupervised strategy to tune the hyper-parameter.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n DoA Estimation via Unlimited Sensing.\n \n \n \n \n\n\n \n Fernández-Menduiña, S.; Krahmer, F.; Leus, G.; and Bhandari, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1866-1870, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DoAPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287595,\n  author = {S. Fernández-Menduiña and F. Krahmer and G. Leus and A. Bhandari},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {DoA Estimation via Unlimited Sensing},\n  year = {2020},\n  pages = {1866-1870},\n  abstract = {Direction-of-arrival (DoA) estimation is a mature topic with decades of history. Despite the progress in the field, very few papers have looked at the problem of DoA estimation with unknown dynamic range. Consider the case of space exploration or near-field and far-field emitters. In such settings, the amplitude of the impinging wavefront can be much higher than the maximum recordable range of the sensor, resulting in information loss via clipping or sensor saturation. In this paper, we present a novel sensing approach for DoA estimation that exploits hardware-software co-design and is pivoted around the theme of unlimited sensing. On the hardware front, we capitalize on a radically new breed of analog-to-digital converters (ADCs) which, instead of saturating, produce modulo measurements. On the algorithmic front, we develop a mathematically guaranteed DoA estimation technique which is non-iterative and backwards compatible with existing DoA algorithms. Our computer experiments show the efficiency of our approach by estimating DoAs from signals which are orders of magnitude higher than the ADC threshold. Hence, our work paves a new path for inverse problems linked with DoA estimation and at the same time provides guidelines for new hardware development.},\n  keywords = {Direction-of-arrival estimation;Array signal processing;Estimation;Signal processing algorithms;Dynamic range;Hardware;Sensors;Direction of arrival (DoA) estimation;multi-channel;non-linear sensing;sensor arrays;sampling theory},\n  doi = {10.23919/Eusipco47968.2020.9287595},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001866.pdf},\n}\n\n
\n
\n\n\n
\n Direction-of-arrival (DoA) estimation is a mature topic with decades of history. Despite the progress in the field, very few papers have looked at the problem of DoA estimation with unknown dynamic range. Consider the case of space exploration or near-field and far-field emitters. In such settings, the amplitude of the impinging wavefront can be much higher than the maximum recordable range of the sensor, resulting in information loss via clipping or sensor saturation. In this paper, we present a novel sensing approach for DoA estimation that exploits hardware-software co-design and is pivoted around the theme of unlimited sensing. On the hardware front, we capitalize on a radically new breed of analog-to-digital converters (ADCs) which, instead of saturating, produce modulo measurements. On the algorithmic front, we develop a mathematically guaranteed DoA estimation technique which is non-iterative and backwards compatible with existing DoA algorithms. Our computer experiments show the efficiency of our approach by estimating DoAs from signals which are orders of magnitude higher than the ADC threshold. Hence, our work paves a new path for inverse problems linked with DoA estimation and at the same time provides guidelines for new hardware development.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Rao Test With Improved Robustness for Range-Spread Target Detection.\n \n \n \n \n\n\n \n Sun, S.; Liu, J.; and Liu, W.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1916-1920, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"RaoPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287596,\n  author = {S. Sun and J. Liu and W. Liu},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Rao Test With Improved Robustness for Range-Spread Target Detection},\n  year = {2020},\n  pages = {1916-1920},\n  abstract = {This paper deals with the problem of detecting range-spread targets in Gaussian noise with unknown covariance matrix. We model the received signal under the signal-plus-noise hypothesis as the sum of noise, useful target echoes and fictitious signals, which makes the signal-plus-noise hypothesis more plausible in the mismatched case. An adaptive detector is designed according to the Rao test. We prove the proposed Rao test exhibits constant false alarm rate property against the covariance matrix. Numerical examples show that the proposed Rao outperforms its counterparts in the mismatched case.},\n  keywords = {Adaptation models;Gaussian noise;Simulation;Object detection;Detectors;Robustness;Covariance matrices;Adaptive detection;distributed targets;constant false alarm rate;steering vector mismatch;Rao test},\n  doi = {10.23919/Eusipco47968.2020.9287596},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001916.pdf},\n}\n\n
\n
\n\n\n
\n This paper deals with the problem of detecting range-spread targets in Gaussian noise with unknown covariance matrix. We model the received signal under the signal-plus-noise hypothesis as the sum of noise, useful target echoes and fictitious signals, which makes the signal-plus-noise hypothesis more plausible in the mismatched case. An adaptive detector is designed according to the Rao test. We prove the proposed Rao test exhibits constant false alarm rate property against the covariance matrix. Numerical examples show that the proposed Rao outperforms its counterparts in the mismatched case.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Related Inference: A Supervised Learning Approach to Detect Signal Variation in Genome Data.\n \n \n \n \n\n\n \n Banuelos, M.; DeGuchy, O.; Sindi, S.; and Marcia, R. F.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1215-1219, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"RelatedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287597,\n  author = {M. Banuelos and O. DeGuchy and S. Sindi and R. F. Marcia},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Related Inference: A Supervised Learning Approach to Detect Signal Variation in Genome Data},\n  year = {2020},\n  pages = {1215-1219},\n  abstract = {The human genome, composed of nucleotides, is represented by a long sequence of the letters A,C,G,T. Typically, organisms in the same species have similar genomes that differ by only a few sequences of varying lengths at varying positions. These differences can be observed in the form of regions where letters are inserted, deleted or inverted. These anomalies are known as structural variants (SVs) and are difficult to detect. The standard approach for identifying SVs involves comparing fragments of DNA from the genome of interest and comparing them to a reference genome. This process is usually complicated by errors produced in both the sequencing and mapping process which may result in an increase in false positive detections. In this work we propose two different approaches for reducing the number of false positives. We focus our attention on refining deletions detected by the popular SV tool delly. In particular, we consider the ability of simultaneously considering sequencing data from a parent and a child using a neural network and gradient boosting as a post-processing step. We compare the performance of each method on simulated and real parent-child data and show that including related individuals in training data greatly improves the ability to detect true SVs.},\n  keywords = {Sequential analysis;Refining;Genomics;Training data;Tools;Bioinformatics;Standards;Computational genomics;structural variants;machine learning;deep learning},\n  doi = {10.23919/Eusipco47968.2020.9287597},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001215.pdf},\n}\n\n
\n
\n\n\n
\n The human genome, composed of nucleotides, is represented by a long sequence of the letters A,C,G,T. Typically, organisms in the same species have similar genomes that differ by only a few sequences of varying lengths at varying positions. These differences can be observed in the form of regions where letters are inserted, deleted or inverted. These anomalies are known as structural variants (SVs) and are difficult to detect. The standard approach for identifying SVs involves comparing fragments of DNA from the genome of interest and comparing them to a reference genome. This process is usually complicated by errors produced in both the sequencing and mapping process which may result in an increase in false positive detections. In this work we propose two different approaches for reducing the number of false positives. We focus our attention on refining deletions detected by the popular SV tool delly. In particular, we consider the ability of simultaneously considering sequencing data from a parent and a child using a neural network and gradient boosting as a post-processing step. We compare the performance of each method on simulated and real parent-child data and show that including related individuals in training data greatly improves the ability to detect true SVs.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Activity Recognition using Ultra Wide Band Range-Time Scan.\n \n \n \n \n\n\n \n Chowdhury, A.; Das, T.; Rani, S.; Khasnobish, A.; and Chakravarty, T.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1338-1342, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ActivityPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287598,\n  author = {A. Chowdhury and T. Das and S. Rani and A. Khasnobish and T. Chakravarty},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Activity Recognition using Ultra Wide Band Range-Time Scan},\n  year = {2020},\n  pages = {1338-1342},\n  abstract = {Automated detection of Activities of Daily Living (ADL) is gaining importance for its application in smart living, elderly car, healthcare, to name a few. In this paper a novel automated method for detection of human activity is proposed using range time scan data of Ultra Wide Band (UWB) radar. Unlike earlier methods of activity recognition using multi sensor fusion and multi radar setup, we have used a single UWB radar in monostatic mode. In this work, intensity of the reflected UWB signal is used to quantify the amount of scattering leading to formation of range time data matrix. Relevant feature extraction from the range time data via two-dimensional two-directional principal component analysis (2D2D-PCA) is carried out. These features are subsequently used by a random subspace ensemble classifier with k-nearest neighbors algorithm. The proposed method is highly efficient with an average training and testing accuracy of 91.4% and 86.4%, respectively, even on unknown subjects. Moreover the technique resulted in average precision and recall of 0.87 and 0.84, respectively.},\n  keywords = {Training;Signal processing algorithms;Activity recognition;Signal processing;Sensor fusion;Ultra wideband radar;Testing;UWB radar;Activity detection;range time scan;2D2DPCA;Subspace kNN},\n  doi = {10.23919/Eusipco47968.2020.9287598},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001338.pdf},\n}\n\n
\n
\n\n\n
\n Automated detection of Activities of Daily Living (ADL) is gaining importance for its application in smart living, elderly car, healthcare, to name a few. In this paper a novel automated method for detection of human activity is proposed using range time scan data of Ultra Wide Band (UWB) radar. Unlike earlier methods of activity recognition using multi sensor fusion and multi radar setup, we have used a single UWB radar in monostatic mode. In this work, intensity of the reflected UWB signal is used to quantify the amount of scattering leading to formation of range time data matrix. Relevant feature extraction from the range time data via two-dimensional two-directional principal component analysis (2D2D-PCA) is carried out. These features are subsequently used by a random subspace ensemble classifier with k-nearest neighbors algorithm. The proposed method is highly efficient with an average training and testing accuracy of 91.4% and 86.4%, respectively, even on unknown subjects. Moreover the technique resulted in average precision and recall of 0.87 and 0.84, respectively.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Classifying Imaginary Vowels from Frontal Lobe EEG via Deep Learning.\n \n \n \n \n\n\n \n Parhi, M.; and Tewfik, A. H.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1195-1199, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ClassifyingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287599,\n  author = {M. Parhi and A. H. Tewfik},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Classifying Imaginary Vowels from Frontal Lobe EEG via Deep Learning},\n  year = {2020},\n  pages = {1195-1199},\n  abstract = {Brain-Computer Interface (BCI) is a promising technology for individuals who suffer from motor or speech disabilities due to the process of decoding brain signals. This paper uses a dataset for imagined speech to classify vowels based on the neurological areas of the brain. The normalized cross-correlation matrices between two electrodes are used as features. We demonstrate that by using the EEG from the frontal region of the brain, we obtain higher than 85 percent accuracy for correct vowel decoding by using two types of neural networks: convolutional neural network (CNN) and long short-term memory (LSTM). This accuracy is higher than previous studies that have classified the dataset using the entire brain region. This work shows great promise for task decoding where the physiological regions of the brain associated with specific tasks are exploited. The proposed approach has the potential to be deployed in future BCI applications.},\n  keywords = {Signal processing algorithms;Signal processing;Electroencephalography;Real-time systems;Decoding;Task analysis;Speech processing},\n  doi = {10.23919/Eusipco47968.2020.9287599},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001195.pdf},\n}\n\n
\n
\n\n\n
\n Brain-Computer Interface (BCI) is a promising technology for individuals who suffer from motor or speech disabilities due to the process of decoding brain signals. This paper uses a dataset for imagined speech to classify vowels based on the neurological areas of the brain. The normalized cross-correlation matrices between two electrodes are used as features. We demonstrate that by using the EEG from the frontal region of the brain, we obtain higher than 85 percent accuracy for correct vowel decoding by using two types of neural networks: convolutional neural network (CNN) and long short-term memory (LSTM). This accuracy is higher than previous studies that have classified the dataset using the entire brain region. This work shows great promise for task decoding where the physiological regions of the brain associated with specific tasks are exploited. The proposed approach has the potential to be deployed in future BCI applications.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Iterative Channel Estimation for Large Scale MIMO with Highly Quantized Measurements in 5G.\n \n \n \n \n\n\n \n Zhang, Z.; McGuire, M.; and Sima, M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1643-1647, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"IterativePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287600,\n  author = {Z. Zhang and M. McGuire and M. Sima},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Iterative Channel Estimation for Large Scale MIMO with Highly Quantized Measurements in 5G},\n  year = {2020},\n  pages = {1643-1647},\n  abstract = {Large-scale MIMO systems offer high spectral efficiency with excellent error performance at low power so long as accurate channel estimates are available. When channel estimation is performed using only pilot signals, undesirably long pilot sequences are needed to achieve the required accuracy. This paper describes an iterative receiver algorithm where detected/decoded data symbols extend the pilot sequences as virtual pilot signals. By using extrinsic feedback, where only information on how the error correction code decoder modifies a posteriori bit probabilities from the detector output is fed back to the channel estimation and detection system, the errors made by the detector and channel estimator do not lead to instability. The proposed system is able to estimate time domain multipath channels with high accuracy. Communications with this system only requires 0.5 dB more power than the system using ideal channel state information, and about 2.5 dB less power than the system that estimates the channel using only the pilot signal. The receiver is also able to operate with coarsely quantized measurements so that low cost receivers can be used at each antenna.},\n  keywords = {Antenna measurements;Channel estimation;Signal processing algorithms;Receiving antennas;Iterative algorithms;Pollution measurement;MIMO communication;Massive MIMO;Channel estimation;Iterative algorithms;5G mobile communications},\n  doi = {10.23919/Eusipco47968.2020.9287600},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001643.pdf},\n}\n\n
\n
\n\n\n
\n Large-scale MIMO systems offer high spectral efficiency with excellent error performance at low power so long as accurate channel estimates are available. When channel estimation is performed using only pilot signals, undesirably long pilot sequences are needed to achieve the required accuracy. This paper describes an iterative receiver algorithm where detected/decoded data symbols extend the pilot sequences as virtual pilot signals. By using extrinsic feedback, where only information on how the error correction code decoder modifies a posteriori bit probabilities from the detector output is fed back to the channel estimation and detection system, the errors made by the detector and channel estimator do not lead to instability. The proposed system is able to estimate time domain multipath channels with high accuracy. Communications with this system only requires 0.5 dB more power than the system using ideal channel state information, and about 2.5 dB less power than the system that estimates the channel using only the pilot signal. The receiver is also able to operate with coarsely quantized measurements so that low cost receivers can be used at each antenna.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Gradient of Mutual Information in Linear Vector Gaussian Channels in the Presence of Input Noise.\n \n \n \n \n\n\n \n Coutts, F. K.; Thompson, J.; and Mulgrew, B.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2264-2268, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"GradientPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287602,\n  author = {F. K. Coutts and J. Thompson and B. Mulgrew},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Gradient of Mutual Information in Linear Vector Gaussian Channels in the Presence of Input Noise},\n  year = {2020},\n  pages = {2264-2268},\n  abstract = {This paper considers a general linear vector Gaussian channel with arbitrary signalling in the presence of Gaussian or Gaussian mixture input noise — i.e., noise added to a desired signal prior to its measurement. Generalising the fundamental relationship unveiled by Guo and extended by Palomar, we show for this scenario that the gradient of the mutual information between a desired signal — or its discrete class label — and a measured output with respect to the measurement matrix can be expressed in a novel form without a requirement for the approximations made in previous papers. We demonstrate that the derived expressions can outperform approximate gradient terms when integrated within a gradient ascent multi-objective optimisation approach.},\n  keywords = {Simulation;Europe;Signal processing;Noise measurement;Optimization;Mutual information},\n  doi = {10.23919/Eusipco47968.2020.9287602},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002264.pdf},\n}\n\n
\n
\n\n\n
\n This paper considers a general linear vector Gaussian channel with arbitrary signalling in the presence of Gaussian or Gaussian mixture input noise — i.e., noise added to a desired signal prior to its measurement. Generalising the fundamental relationship unveiled by Guo and extended by Palomar, we show for this scenario that the gradient of the mutual information between a desired signal — or its discrete class label — and a measured output with respect to the measurement matrix can be expressed in a novel form without a requirement for the approximations made in previous papers. We demonstrate that the derived expressions can outperform approximate gradient terms when integrated within a gradient ascent multi-objective optimisation approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Modelling a Microscope as Low Dimensional Subspace of Operators.\n \n \n \n \n\n\n \n Debarnot, V.; Escande, P.; Mangeat, T.; and Weiss, P.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 765-769, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ModellingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287603,\n  author = {V. Debarnot and P. Escande and T. Mangeat and P. Weiss},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Modelling a Microscope as Low Dimensional Subspace of Operators},\n  year = {2020},\n  pages = {765-769},\n  abstract = {We propose a novel approach to calibrate a microscope. Instead of seeking a single linear integral operator (e.g. a convolution with a point spread function) that describes its action, we propose to describe it as a low-dimensional linear subspace of operators. By doing so, we are able to capture its variations with respect to multiple factors such as changes of temperatures and refraction indexes, tilts of optical elements or different states of spatial light modulator. While richer than usual, this description however suffers from a serious limitation: it cannot be used directly to solve the typical inverse problems arising in computational imaging. As a second contribution, we therefore design an original algorithm to identify the operator from the image of a few isolated spikes. This can be achieved experimentally by adding fluorescent micro-beads around the sample. We demonstrate the potential of the approach on a challenging deblurring problem.Important note: this paper is an abridged version of a preprint [3] by the same authors, submitted for a journal publication.},\n  keywords = {fluorescence;image reconstruction;image restoration;inverse problems;optical transfer function;refractive index;spatial light modulators;challenging deblurring problem;low dimensional subspace;single linear integral operator;convolution;point spread function;low-dimensional linear subspace;refraction indexes;optical elements;spatial light modulator;serious limitation;inverse problems;computational imaging;Temperature;Microscopy;Signal processing algorithms;Fluorescence;Task analysis;Optical signal processing;Standards;fluorescence microscopy;PSF;calibration;product-convolution operators;blind deblurring},\n  doi = {10.23919/Eusipco47968.2020.9287603},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000765.pdf},\n}\n\n
\n
\n\n\n
\n We propose a novel approach to calibrate a microscope. Instead of seeking a single linear integral operator (e.g. a convolution with a point spread function) that describes its action, we propose to describe it as a low-dimensional linear subspace of operators. By doing so, we are able to capture its variations with respect to multiple factors such as changes of temperatures and refraction indexes, tilts of optical elements or different states of spatial light modulator. While richer than usual, this description however suffers from a serious limitation: it cannot be used directly to solve the typical inverse problems arising in computational imaging. As a second contribution, we therefore design an original algorithm to identify the operator from the image of a few isolated spikes. This can be achieved experimentally by adding fluorescent micro-beads around the sample. We demonstrate the potential of the approach on a challenging deblurring problem.Important note: this paper is an abridged version of a preprint [3] by the same authors, submitted for a journal publication.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Multi-Stage Parallel LMS Structure and its Stability Analysis Using Transfer Function Approximation.\n \n \n \n \n\n\n \n Akkad, G.; Mansour, A.; ElHassan, B.; and Inaty, E.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1851-1855, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287604,\n  author = {G. Akkad and A. Mansour and B. ElHassan and E. Inaty},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A Multi-Stage Parallel LMS Structure and its Stability Analysis Using Transfer Function Approximation},\n  year = {2020},\n  pages = {1851-1855},\n  abstract = {Generally, the least mean square (LMS) adaptive algorithm is widely used in antenna array beamforming given its target tracking capability and its low computational requirements. However, the classical LMS implementation still suffers from a trade-off between convergence speed and residual error floor. Numerous variants to the classical LMS have been suggested as a solution for the previous problem at the cost of a considerable increase in the computational complexity and degraded performance in low signal to noise ratio (SNR). Thus, in this paper, we propose a multi-stage parallel LMS structure with an error feedback for accelerating the LMS convergence while maintaining a minimal steady state error and a computational complexity of order O(N), where N represents the number of antenna elements. In parallel LMS (pLMS), the second LMS stage (LMS2) error is delayed by one sample and fed-back to combine with that of the first LMS stage (LMS1) to form the total pLMS error. A transfer function approximation to the pLMS is derived in order to numerically assess the pLMS stability and to determine the approximate maximum parametric value of the step size for which the pLMS remains stable. Simulation result highlight the superior performance of the pLMS in demonstrating accelerated convergence and low steady state error compared to previous variants and for different SNR environment.},\n  keywords = {Transfer functions;Adaptive arrays;Stability analysis;Steady-state;Computational complexity;Signal to noise ratio;Convergence;LMS;Parallel LMS;Adaptive Beamforming;Transfer Function;Farrow Filter},\n  doi = {10.23919/Eusipco47968.2020.9287604},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001851.pdf},\n}\n\n
\n
\n\n\n
\n Generally, the least mean square (LMS) adaptive algorithm is widely used in antenna array beamforming given its target tracking capability and its low computational requirements. However, the classical LMS implementation still suffers from a trade-off between convergence speed and residual error floor. Numerous variants to the classical LMS have been suggested as a solution for the previous problem at the cost of a considerable increase in the computational complexity and degraded performance in low signal to noise ratio (SNR). Thus, in this paper, we propose a multi-stage parallel LMS structure with an error feedback for accelerating the LMS convergence while maintaining a minimal steady state error and a computational complexity of order O(N), where N represents the number of antenna elements. In parallel LMS (pLMS), the second LMS stage (LMS2) error is delayed by one sample and fed-back to combine with that of the first LMS stage (LMS1) to form the total pLMS error. A transfer function approximation to the pLMS is derived in order to numerically assess the pLMS stability and to determine the approximate maximum parametric value of the step size for which the pLMS remains stable. Simulation result highlight the superior performance of the pLMS in demonstrating accelerated convergence and low steady state error compared to previous variants and for different SNR environment.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Convolutional Neural Networks for Underwater Pipeline Segmentation using Imperfect Datasets.\n \n \n \n \n\n\n \n Medina, E.; Campos, R.; Gomes, J. G. R. C.; Petraglia, M. R.; and Petraglia, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1585-1589, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ConvolutionalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287605,\n  author = {E. Medina and R. Campos and J. G. R. C. Gomes and M. R. Petraglia and A. Petraglia},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Convolutional Neural Networks for Underwater Pipeline Segmentation using Imperfect Datasets},\n  year = {2020},\n  pages = {1585-1589},\n  abstract = {In this paper, we investigate a solution to the problem of underwater pipeline segmentation, based on an unbalanced dataset generated by a deterministic algorithm which employs computer vision techniques. We use manually selected masks to train two types of neural networks, U-Net and Deeplabv3+, to solve the same semantic segmentation task. We show that neural networks are able to learn from imperfect datasets, artificially generated by other algorithms. Deep convolutional architectures outperform the algorithm based on computer vision techniques. In order to find the best model, a comparison was made between the two architectures, thereby concluding that Deeplabv3+ achieves better results and features robust operation under adverse environmental conditions.},\n  keywords = {Training;Computer vision;Pipelines;Neural networks;Signal processing algorithms;Computer architecture;Convolutional neural networks;Deep Learning;Convolutional Neural Networks;Semantic Segmentation},\n  doi = {10.23919/Eusipco47968.2020.9287605},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001585.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we investigate a solution to the problem of underwater pipeline segmentation, based on an unbalanced dataset generated by a deterministic algorithm which employs computer vision techniques. We use manually selected masks to train two types of neural networks, U-Net and Deeplabv3+, to solve the same semantic segmentation task. We show that neural networks are able to learn from imperfect datasets, artificially generated by other algorithms. Deep convolutional architectures outperform the algorithm based on computer vision techniques. In order to find the best model, a comparison was made between the two architectures, thereby concluding that Deeplabv3+ achieves better results and features robust operation under adverse environmental conditions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n AeGAN: Time-Frequency Speech Denoising via Generative Adversarial Networks.\n \n \n \n \n\n\n \n Abdulatif, S.; Armanious, K.; Guirguis, K.; Sajeev, J. T.; and Yang, B.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 451-455, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AeGAN:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287606,\n  author = {S. Abdulatif and K. Armanious and K. Guirguis and J. T. Sajeev and B. Yang},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {AeGAN: Time-Frequency Speech Denoising via Generative Adversarial Networks},\n  year = {2020},\n  pages = {451-455},\n  abstract = {Automatic speech recognition (ASR) systems are of vital importance nowadays in commonplace tasks such as speech-to-text processing and language translation. This created the need for an ASR system that can operate in realistic crowded environments. Thus, speech enhancement is a valuable building block in ASR systems and other applications such as hearing aids, smartphones and teleconferencing systems. In this paper, a generative adversarial network (GAN) based framework is investigated for the task of speech enhancement, more specifically speech denoising of audio tracks. A new architecture based on CasNet generator and an additional feature-based loss are incorporated to get realistically denoised speech phonetics. Finally, the proposed framework is shown to outperform other learning and traditional model-based speech enhancement approaches.},\n  keywords = {Training;Time-frequency analysis;Noise reduction;Speech enhancement;Generative adversarial networks;Generators;Task analysis;Speech enhancement;generative adversarial networks;automatic speech recognition;deep learning},\n  doi = {10.23919/Eusipco47968.2020.9287606},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000451.pdf},\n}\n\n
\n
\n\n\n
\n Automatic speech recognition (ASR) systems are of vital importance nowadays in commonplace tasks such as speech-to-text processing and language translation. This created the need for an ASR system that can operate in realistic crowded environments. Thus, speech enhancement is a valuable building block in ASR systems and other applications such as hearing aids, smartphones and teleconferencing systems. In this paper, a generative adversarial network (GAN) based framework is investigated for the task of speech enhancement, more specifically speech denoising of audio tracks. A new architecture based on CasNet generator and an additional feature-based loss are incorporated to get realistically denoised speech phonetics. Finally, the proposed framework is shown to outperform other learning and traditional model-based speech enhancement approaches.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A residual U-Net network with image prior for 3D image denoising.\n \n \n \n \n\n\n \n Abascal, J. F. P. J.; Bussod, S.; Ducros, N.; Si-Mohamed, S.; Douek, P.; Chappard, C.; and Peyrin, F.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1264-1268, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287607,\n  author = {J. F. P. J. Abascal and S. Bussod and N. Ducros and S. Si-Mohamed and P. Douek and C. Chappard and F. Peyrin},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A residual U-Net network with image prior for 3D image denoising},\n  year = {2020},\n  pages = {1264-1268},\n  abstract = {Denoising algorithms via sparse representation are among the state-of-the art for image restoration. On previous work, we proposed SPADE - a sparse- and prior-based method for 3D-image denoising. In this work, we extend this idea to learning approaches and propose a novel residual-U-Net prior-based (ResPrU-Net) method that exploits a prior image. The proposed ResPrU-Net architecture has two inputs, the noisy image and the prior image, and a residual connection that connects the prior image to the output of the network. We compare ResPrU-Net to U-Net and SPADE on human knee data acquired on a spectral computerized tomography scanner. The prior image is built from the noisy image by combining information from neighbor slices and it is the same for both SPADE and ResPrU-Net. For deep learning approaches, we use four knee samples and data augmentation for training, one knee for validation and two for test. Results show that for high noise, U-Net leads to worst results, with images that are excessively blurred. Prior-based methods, SPADE and ResPrU-Net, outperformed U-Net, leading to restored images that present similar image quality than the target. ResPrU-Net provides slightly better results than SPADE. For low noise, methods present similar results.},\n  keywords = {Training;Three-dimensional displays;Noise reduction;Signal processing algorithms;Signal processing;Image restoration;Noise measurement;Image denoising;deep learning;U-Net},\n  doi = {10.23919/Eusipco47968.2020.9287607},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001264.pdf},\n}\n\n
\n
\n\n\n
\n Denoising algorithms via sparse representation are among the state-of-the art for image restoration. On previous work, we proposed SPADE - a sparse- and prior-based method for 3D-image denoising. In this work, we extend this idea to learning approaches and propose a novel residual-U-Net prior-based (ResPrU-Net) method that exploits a prior image. The proposed ResPrU-Net architecture has two inputs, the noisy image and the prior image, and a residual connection that connects the prior image to the output of the network. We compare ResPrU-Net to U-Net and SPADE on human knee data acquired on a spectral computerized tomography scanner. The prior image is built from the noisy image by combining information from neighbor slices and it is the same for both SPADE and ResPrU-Net. For deep learning approaches, we use four knee samples and data augmentation for training, one knee for validation and two for test. Results show that for high noise, U-Net leads to worst results, with images that are excessively blurred. Prior-based methods, SPADE and ResPrU-Net, outperformed U-Net, leading to restored images that present similar image quality than the target. ResPrU-Net provides slightly better results than SPADE. For low noise, methods present similar results.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Few-Shot Learning of Signal Modulation Recognition based on Attention Relation Network.\n \n \n \n \n\n\n \n Zhang, Z.; Li, Y.; and Gao, M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1372-1376, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Few-ShotPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287608,\n  author = {Z. Zhang and Y. Li and M. Gao},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Few-Shot Learning of Signal Modulation Recognition based on Attention Relation Network},\n  year = {2020},\n  pages = {1372-1376},\n  abstract = {Most of existing signal modulation recognition methods attempt to establish a machine learning mechanism by training with a large number of annotated samples, which is hardly applied to the real-world electronic reconnaissance scenario where only a few samples can be intercepted in advance. Few-Shot Learning (FSL) aims to learn from training classes with a lot of samples and transform the knowledge to support classes with only a few samples, thus realizing model generalization. In this paper, a novel FSL framework called Attention Relation Network (ARN) is proposed, which introduces channel and spatial attention respectively to learn a more effective feature representation of support samples. The experimental results show that the proposed method can achieve excellent performance for fine-grained signal modulation recognition even with only one support sample and is robust to low signal-to-noise-ratio conditions.},\n  keywords = {Training;Modulation;Transforms;Reconnaissance;Feature extraction;Robustness;Signal to noise ratio;Signal Modulation Recognition;Few-Shot Learning;Attention},\n  doi = {10.23919/Eusipco47968.2020.9287608},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001372.pdf},\n}\n\n
\n
\n\n\n
\n Most of existing signal modulation recognition methods attempt to establish a machine learning mechanism by training with a large number of annotated samples, which is hardly applied to the real-world electronic reconnaissance scenario where only a few samples can be intercepted in advance. Few-Shot Learning (FSL) aims to learn from training classes with a lot of samples and transform the knowledge to support classes with only a few samples, thus realizing model generalization. In this paper, a novel FSL framework called Attention Relation Network (ARN) is proposed, which introduces channel and spatial attention respectively to learn a more effective feature representation of support samples. The experimental results show that the proposed method can achieve excellent performance for fine-grained signal modulation recognition even with only one support sample and is robust to low signal-to-noise-ratio conditions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Nesterov-type Acceleration with Adaptive Localized Cayley Parametrization for Optimization over the Stiefel Manifold.\n \n \n \n \n\n\n \n Kume, K.; and Yamada, I.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2105-2109, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287609,\n  author = {K. Kume and I. Yamada},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A Nesterov-type Acceleration with Adaptive Localized Cayley Parametrization for Optimization over the Stiefel Manifold},\n  year = {2020},\n  pages = {2105-2109},\n  abstract = {Despite certain singular-point issues, the Cayley parametrization (CP) has great potential to serve as a key to import many powerful strategies, developed originally for optimization over a vector space, into the task for optimization over the Stiefel manifold. In this paper, we newly present (i) a computationally efficient CP that can circumvent the singularpoint issues and (ii) a Nesterov type accelerated gradient method, based on the proposed CP, with its convergence analysis. To guarantee the convergence, we also evaluate a Lipschitz constant of the gradient of the cost function in the CP domain. Numerical experiments show excellent performance of the proposed accelerated algorithm compared with the standard algorithms, e.g., the Barzilai-Borwein method and L-BFGS method, combined with a vector transport for optimization over the Stiefel manifold as a special instance of the Riemannian manifold.},\n  keywords = {Manifolds;Signal processing algorithms;Signal processing;Acceleration;Task analysis;Standards;Convergence;Stiefel manifold;orthogonal group;Cayley parametrization;non-convex optimization;Nesterov acceleration},\n  doi = {10.23919/Eusipco47968.2020.9287609},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002105.pdf},\n}\n\n
\n
\n\n\n
\n Despite certain singular-point issues, the Cayley parametrization (CP) has great potential to serve as a key to import many powerful strategies, developed originally for optimization over a vector space, into the task for optimization over the Stiefel manifold. In this paper, we newly present (i) a computationally efficient CP that can circumvent the singularpoint issues and (ii) a Nesterov type accelerated gradient method, based on the proposed CP, with its convergence analysis. To guarantee the convergence, we also evaluate a Lipschitz constant of the gradient of the cost function in the CP domain. Numerical experiments show excellent performance of the proposed accelerated algorithm compared with the standard algorithms, e.g., the Barzilai-Borwein method and L-BFGS method, combined with a vector transport for optimization over the Stiefel manifold as a special instance of the Riemannian manifold.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Towards Finite-Time Consensus with Graph Convolutional Neural Networks.\n \n \n \n \n\n\n \n Iancu, B.; and Isufi, E.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2145-2149, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"TowardsPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287610,\n  author = {B. Iancu and E. Isufi},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Towards Finite-Time Consensus with Graph Convolutional Neural Networks},\n  year = {2020},\n  pages = {2145-2149},\n  abstract = {This work proposes a learning framework for distributed finite-time consensus with graph convolutional neural networks (GCNNs). Consensus is a central problem in distributed and adaptive optimisation, signal processing, and control. We leverage the link between finite-time consensus and graph filters, and between graph filters and GCNNs to study the potential of a readily distributed architecture for reaching consensus. We have found GCNNs outperform classical graph filters for distributed consensus and generalize better to unseen topologies such as distributed networks affected by link losses.},\n  keywords = {Network topology;Convolution;Neural networks;Europe;Topology;Convolutional neural networks;Optimization;Finite-time consensus;graph convolutions;graph signal processing;graph neural networks},\n  doi = {10.23919/Eusipco47968.2020.9287610},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002145.pdf},\n}\n\n
\n
\n\n\n
\n This work proposes a learning framework for distributed finite-time consensus with graph convolutional neural networks (GCNNs). Consensus is a central problem in distributed and adaptive optimisation, signal processing, and control. We leverage the link between finite-time consensus and graph filters, and between graph filters and GCNNs to study the potential of a readily distributed architecture for reaching consensus. We have found GCNNs outperform classical graph filters for distributed consensus and generalize better to unseen topologies such as distributed networks affected by link losses.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Decoupled Direction-of-Arrival Estimations Using Relative Harmonic Coefficients.\n \n \n \n \n\n\n \n Hu, Y.; Abhayapala, T. D.; Samarasinghe, P. N.; and Gannot, S.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 246-250, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DecoupledPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287611,\n  author = {Y. Hu and T. D. Abhayapala and P. N. Samarasinghe and S. Gannot},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Decoupled Direction-of-Arrival Estimations Using Relative Harmonic Coefficients},\n  year = {2020},\n  pages = {246-250},\n  abstract = {Traditional source direction-of-arrival (DOA) estimation algorithms generally localize the elevation and azimuth simultaneously, requiring an exhaustive search over the two-dimensional (2-D) space. By contrast, this paper presents two decoupled source DOA estimation algorithms using a recently introduced source feature called the relative harmonic coefficients. They are capable to recover the source's elevation and azimuth separately, since the elevation and azimuth components in the relative harmonic coefficients are decoupled. The proposed algorithms are highlighted by a large reduction of computational complexity, thus enable a direct application for sound source tracking. Simulation results, using both a static and moving sound source, confirm the proposed methods are computationally efficient while achieving competitive localization accuracy.},\n  keywords = {Direction-of-arrival estimation;Azimuth;Two dimensional displays;Signal processing algorithms;Estimation;Harmonic analysis;Computational complexity;Decoupled DOA estimation;relative harmonic coefficients;fast speed;DOA tracking},\n  doi = {10.23919/Eusipco47968.2020.9287611},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000246.pdf},\n}\n\n
\n
\n\n\n
\n Traditional source direction-of-arrival (DOA) estimation algorithms generally localize the elevation and azimuth simultaneously, requiring an exhaustive search over the two-dimensional (2-D) space. By contrast, this paper presents two decoupled source DOA estimation algorithms using a recently introduced source feature called the relative harmonic coefficients. They are capable to recover the source's elevation and azimuth separately, since the elevation and azimuth components in the relative harmonic coefficients are decoupled. The proposed algorithms are highlighted by a large reduction of computational complexity, thus enable a direct application for sound source tracking. Simulation results, using both a static and moving sound source, confirm the proposed methods are computationally efficient while achieving competitive localization accuracy.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Audio-Visual Speech Classification based on Absent Class Detection.\n \n \n \n \n\n\n \n Sad, G. D.; and Carlos Gómez, J.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 336-340, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Audio-VisualPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287615,\n  author = {G. D. Sad and J. {Carlos Gómez}},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Audio-Visual Speech Classification based on Absent Class Detection},\n  year = {2020},\n  pages = {336-340},\n  abstract = {In the present paper, a novel method for Audio-Visual Speech Recognition is introduced, aiming to minimize the intra-class errors. Based on a novel training procedure, the Complementary Models are introduced. These models aim to detect the absence of a class, in contrast to traditional models that aim to detect the presence of a class. In the proposed method, traditional models are employed in the first stage of a cascade scheme, and then the proposed complementary models are used to make the final decision on the recognition results. Experimental results in all the scenarios evaluated (different inputs modalities, three databases, four classifiers, and acoustic noisy conditions), show that a good performance is achieved with the proposed scheme. Also, better results than other reported methods in the literature over two public databases are achieved.},\n  keywords = {Training;Performance evaluation;Databases;Speech recognition;Signal processing;Task analysis;Speech processing;Audio-Visual Speech Recognition;Complementary Models;Ensemble Models},\n  doi = {10.23919/Eusipco47968.2020.9287615},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000336.pdf},\n}\n\n
\n
\n\n\n
\n In the present paper, a novel method for Audio-Visual Speech Recognition is introduced, aiming to minimize the intra-class errors. Based on a novel training procedure, the Complementary Models are introduced. These models aim to detect the absence of a class, in contrast to traditional models that aim to detect the presence of a class. In the proposed method, traditional models are employed in the first stage of a cascade scheme, and then the proposed complementary models are used to make the final decision on the recognition results. Experimental results in all the scenarios evaluated (different inputs modalities, three databases, four classifiers, and acoustic noisy conditions), show that a good performance is achieved with the proposed scheme. Also, better results than other reported methods in the literature over two public databases are achieved.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sequential Learning and Regularization in Variational Recurrent Autoencoder.\n \n \n \n \n\n\n \n Chien, J. -.; and Tsai, C. -.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1613-1617, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"SequentialPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287616,\n  author = {J. -T. Chien and C. -J. Tsai},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Sequential Learning and Regularization in Variational Recurrent Autoencoder},\n  year = {2020},\n  pages = {1613-1617},\n  abstract = {Latent variable model based on variational autoen-coder (VAE) is influential in machine learning for signal processing. VAE basically suffers from the issue of posterior collapse in sequential learning procedure where the variational posterior easily collapses to a prior as standard Gaussian. Latent semantics are then neglected in optimization process. The recurrent decoder therefore generates noninformative or repeated sequence data. To capture sufficient latent semantics from sequence data, this study simultaneously fulfills an amortized regularization for encoder, extends a Gaussian mixture prior for latent variable, and runs a skip connection for decoder. The noise robust prior, learned from the amortized encoder, is likely aware of temporal features. A variational prior based on the amortized mixture density is formulated in implementation of variational recurrent autoencoder for sequence reconstruction and representation. Owing to skip connection, the sequence samples are continuously predicted in decoder with contextual precision at each time step. Experiments on language model and sentiment classification show that the proposed method mitigates the issue of posterior collapse and learns the meaningful latent features to improve the inference and generation for semantic representation.},\n  keywords = {Smoothing methods;Semantics;Signal processing;Decoding;Task analysis;Standards;Optimization;sequential learning;Bayesian learning;recurrent neural network;variational autoencoder;language model},\n  doi = {10.23919/Eusipco47968.2020.9287616},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001613.pdf},\n}\n\n
\n
\n\n\n
\n Latent variable model based on variational autoen-coder (VAE) is influential in machine learning for signal processing. VAE basically suffers from the issue of posterior collapse in sequential learning procedure where the variational posterior easily collapses to a prior as standard Gaussian. Latent semantics are then neglected in optimization process. The recurrent decoder therefore generates noninformative or repeated sequence data. To capture sufficient latent semantics from sequence data, this study simultaneously fulfills an amortized regularization for encoder, extends a Gaussian mixture prior for latent variable, and runs a skip connection for decoder. The noise robust prior, learned from the amortized encoder, is likely aware of temporal features. A variational prior based on the amortized mixture density is formulated in implementation of variational recurrent autoencoder for sequence reconstruction and representation. Owing to skip connection, the sequence samples are continuously predicted in decoder with contextual precision at each time step. Experiments on language model and sentiment classification show that the proposed method mitigates the issue of posterior collapse and learns the meaningful latent features to improve the inference and generation for semantic representation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Independent Vector Analysis for Molecular Data Fusion: Application to Property Prediction and Knowledge Discovery of Energetic Materials.\n \n \n \n \n\n\n \n Boukouvalas, Z.; Puerto, M.; Elton, D. C.; Chung, P. W.; and Fuge, M. D.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1030-1034, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"IndependentPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287617,\n  author = {Z. Boukouvalas and M. Puerto and D. C. Elton and P. W. Chung and M. D. Fuge},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Independent Vector Analysis for Molecular Data Fusion: Application to Property Prediction and Knowledge Discovery of Energetic Materials},\n  year = {2020},\n  pages = {1030-1034},\n  abstract = {Due to its high computational speed and accuracy compared to ab-initio quantum chemistry and forcefield modeling, the prediction of molecular properties using machine learning has received great attention in the fields of materials design and drug discovery. A main ingredient required for machine learning is a training dataset consisting of molecular features—for example fingerprint bits, chemical descriptors, etc. that adequately characterize the corresponding molecules. However, choosing features for any application is highly non-trivial, since no {"}universal{"} method for feature selection exists. In this work, we propose a data fusion framework that uses Independent Vector Analysis to uncover underlying complementary information contained in different molecular featurization methods. Our approach takes an arbitrary number of individual feature vectors and generates a low dimensional set of features—molecular signatures—that can be used for the prediction of molecular properties and for knowledge discovery. We demonstrate this on a small and diverse dataset consisting of energetic compounds for the prediction of several energetic properties as well as for demonstrating how to provide insights onto the relationships between molecular structures and properties.},\n  keywords = {Training;Data integration;Machine learning;Signal processing;Predictive models;Fingerprint recognition;Knowledge discovery;Data fusion;blind source separation;inter-pretability},\n  doi = {10.23919/Eusipco47968.2020.9287617},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001030.pdf},\n}\n\n
\n
\n\n\n
\n Due to its high computational speed and accuracy compared to ab-initio quantum chemistry and forcefield modeling, the prediction of molecular properties using machine learning has received great attention in the fields of materials design and drug discovery. A main ingredient required for machine learning is a training dataset consisting of molecular features—for example fingerprint bits, chemical descriptors, etc. that adequately characterize the corresponding molecules. However, choosing features for any application is highly non-trivial, since no \"universal\" method for feature selection exists. In this work, we propose a data fusion framework that uses Independent Vector Analysis to uncover underlying complementary information contained in different molecular featurization methods. Our approach takes an arbitrary number of individual feature vectors and generates a low dimensional set of features—molecular signatures—that can be used for the prediction of molecular properties and for knowledge discovery. We demonstrate this on a small and diverse dataset consisting of energetic compounds for the prediction of several energetic properties as well as for demonstrating how to provide insights onto the relationships between molecular structures and properties.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multi-Scale Residual Convolutional Encoder Decoder with Bidirectional Long Short-Term Memory for Single Channel Speech Enhancement.\n \n \n \n \n\n\n \n Xian, Y.; Sun, Y.; Wang, W.; and Naqvi, S. M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 431-435, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Multi-ScalePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287618,\n  author = {Y. Xian and Y. Sun and W. Wang and S. M. Naqvi},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Multi-Scale Residual Convolutional Encoder Decoder with Bidirectional Long Short-Term Memory for Single Channel Speech Enhancement},\n  year = {2020},\n  pages = {431-435},\n  abstract = {The existing convolutional neural network (CNN) based methods still have limitations in model accuracy, latency and computational cost for single channel speech enhancement. In order to address these limitations, we propose a multi-scale convolutional bidirectional long short-term memory (BLSTM) recurrent neural network, which is named as McbNet, a deep learning framework for end-to-end single channel speech enhancement. The proposed McbNet enlarges the receptive fields in two aspects. Firstly, every convolutional layer employs filters with varied dimensions to capture local and global information. Secondly, the BLSTM is applied to evaluate the interdependency of past, current and future temporal frames. The experimental results confirm the proposed McbNet offers consistent improvement over the state-of-the-art methods and public datasets.},\n  keywords = {Deep learning;Recurrent neural networks;Convolution;Europe;Speech enhancement;Information filters;Kernel;CNN;single channel;speech enhancement;BLSTM;McbNet;receptive field},\n  doi = {10.23919/Eusipco47968.2020.9287618},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000431.pdf},\n}\n\n
\n
\n\n\n
\n The existing convolutional neural network (CNN) based methods still have limitations in model accuracy, latency and computational cost for single channel speech enhancement. In order to address these limitations, we propose a multi-scale convolutional bidirectional long short-term memory (BLSTM) recurrent neural network, which is named as McbNet, a deep learning framework for end-to-end single channel speech enhancement. The proposed McbNet enlarges the receptive fields in two aspects. Firstly, every convolutional layer employs filters with varied dimensions to capture local and global information. Secondly, the BLSTM is applied to evaluate the interdependency of past, current and future temporal frames. The experimental results confirm the proposed McbNet offers consistent improvement over the state-of-the-art methods and public datasets.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Efficient Dynamic Analysis of Low-similarity Proteins for Structural Class Prediction.\n \n \n \n \n\n\n \n Zervou, M. A.; Doutsi, E.; Pavlidis, P.; and Tsakalides, P.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1328-1332, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"EfficientPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287619,\n  author = {M. A. Zervou and E. Doutsi and P. Pavlidis and P. Tsakalides},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Efficient Dynamic Analysis of Low-similarity Proteins for Structural Class Prediction},\n  year = {2020},\n  pages = {1328-1332},\n  abstract = {Prediction of protein structural classes from amino acid sequences is a challenging problem as it is profitable for analyzing protein function, interactions, and regulation. The majority of existing prediction methods for low-homology sequences utilize numerous amount of features and require an exhausting search for optimal parameter tuning. To address this problem, this work proposes a novel self-tuned architecture for feature extraction by modeling directly the inherent dynamics of the data in higher-dimensional phase space via chaos game representation (CGR) and generalized multidimensional recurrence quantification analysis (GmdRQA). Experimental evaluation on a real benchmark dataset demonstrates the superiority of the herein proposed architecture when compared against the state-of-the-art unidimensional RQA taking under consideration that our method achieves similar performance in a data-driven manner with a smaller computational cost.},\n  keywords = {Proteins;Time series analysis;Games;Computer architecture;Signal processing;Regulation;Tuning;Protein structure prediction;chaos game representation;multidimensional recurrence quantification analysis;nonlinear time series analysis},\n  doi = {10.23919/Eusipco47968.2020.9287619},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001328.pdf},\n}\n\n
\n
\n\n\n
\n Prediction of protein structural classes from amino acid sequences is a challenging problem as it is profitable for analyzing protein function, interactions, and regulation. The majority of existing prediction methods for low-homology sequences utilize numerous amount of features and require an exhausting search for optimal parameter tuning. To address this problem, this work proposes a novel self-tuned architecture for feature extraction by modeling directly the inherent dynamics of the data in higher-dimensional phase space via chaos game representation (CGR) and generalized multidimensional recurrence quantification analysis (GmdRQA). Experimental evaluation on a real benchmark dataset demonstrates the superiority of the herein proposed architecture when compared against the state-of-the-art unidimensional RQA taking under consideration that our method achieves similar performance in a data-driven manner with a smaller computational cost.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Analysis of Brain-Heart Couplings in Epilepsy: Dealing With the Highly Complex Structure of Resulting Interaction Pattern.\n \n \n \n \n\n\n \n Schiecke, K.; Benninger, F.; and Feucht, M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 935-939, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AnalysisPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287620,\n  author = {K. Schiecke and F. Benninger and M. Feucht},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Analysis of Brain-Heart Couplings in Epilepsy: Dealing With the Highly Complex Structure of Resulting Interaction Pattern},\n  year = {2020},\n  pages = {935-939},\n  abstract = {Investigations into brain-heart interactions are gaining increasing importance in various fields of research including epilepsy. Convergent Cross Mapping (CCM) is one method to quantify such interactions and was adapted for the analysis of children with temporal lobe epilepsy (TLE) in the past. Increasing amount of data and data features available produce a high and still rising complexity of results of such interaction analyses. Therefore, aim of this study was the investigation of generalized presentation of those results using our benchmark data set of children with TLE. Tensor decomposition was adapted to take into account spatial, time, frequency, directional and focus side related modes of interactions results achieved by CCM analysis.},\n  keywords = {brain;electroencephalography;medical disorders;medical signal processing;neurophysiology;tensors;time series;brain-heart couplings;highly complex structure;interaction pattern;brain-heart interactions;convergent cross mapping;children;temporal lobe epilepsy;TLE;data features;interaction analyses;benchmark data;CCM analysis;Couplings;Time-frequency analysis;Temporal lobe;Tensors;Epilepsy;Europe;Signal processing;brain-heart interaction;epilepsy;convergent cross mapping;empirical mode decomposition;tensor decomposition},\n  doi = {10.23919/Eusipco47968.2020.9287620},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000935.pdf},\n}\n\n
\n
\n\n\n
\n Investigations into brain-heart interactions are gaining increasing importance in various fields of research including epilepsy. Convergent Cross Mapping (CCM) is one method to quantify such interactions and was adapted for the analysis of children with temporal lobe epilepsy (TLE) in the past. Increasing amount of data and data features available produce a high and still rising complexity of results of such interaction analyses. Therefore, aim of this study was the investigation of generalized presentation of those results using our benchmark data set of children with TLE. Tensor decomposition was adapted to take into account spatial, time, frequency, directional and focus side related modes of interactions results achieved by CCM analysis.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n CNN-based Note Onset Detection using Synthetic Data Augmentation.\n \n \n \n \n\n\n \n Mounir, M.; Karsmakers, P.; and v. Waterschoot, T.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 171-175, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"CNN-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287621,\n  author = {M. Mounir and P. Karsmakers and T. v. Waterschoot},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {CNN-based Note Onset Detection using Synthetic Data Augmentation},\n  year = {2020},\n  pages = {171-175},\n  abstract = {Detecting the onset of notes in music excerpts is a fundamental problem in many music signal processing tasks, including analysis, synthesis, and information retrieval. When addressing the note onset detection (NOD) problem using a data-driven methodology, a major challenge is the availability and quality of labeled datasets used for both model training/tuning and evaluation. As most of the available datasets are manually annotated, the amount of annotated music excerpts is limited and the annotation strategy and quality varies across data sets. To counter both problems, in this paper we propose to use semi-synthetic datasets where the music excerpts are mixes of isolated note recordings. The advantage resides in the annotations being automatically generated while mixing the notes, as isolated note onsets are straightforward to detect using a simple energy measure. A semi-synthetic dataset is used in this work for augmenting a real piano dataset when training a convolutional Neural Network (CNN) with three novel model training strategies. Training the CNN on a semi-synthetic dataset and retraining only the CNN classification layers on a real dataset results in higher average F1-score (F1) scores with lower variance.},\n  keywords = {Training;Annotations;Europe;Signal processing;Information retrieval;Multiple signal classification;Task analysis;CNN;data augmentation;note onset detection},\n  doi = {10.23919/Eusipco47968.2020.9287621},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000171.pdf},\n}\n\n
\n
\n\n\n
\n Detecting the onset of notes in music excerpts is a fundamental problem in many music signal processing tasks, including analysis, synthesis, and information retrieval. When addressing the note onset detection (NOD) problem using a data-driven methodology, a major challenge is the availability and quality of labeled datasets used for both model training/tuning and evaluation. As most of the available datasets are manually annotated, the amount of annotated music excerpts is limited and the annotation strategy and quality varies across data sets. To counter both problems, in this paper we propose to use semi-synthetic datasets where the music excerpts are mixes of isolated note recordings. The advantage resides in the annotations being automatically generated while mixing the notes, as isolated note onsets are straightforward to detect using a simple energy measure. A semi-synthetic dataset is used in this work for augmenting a real piano dataset when training a convolutional Neural Network (CNN) with three novel model training strategies. Training the CNN on a semi-synthetic dataset and retraining only the CNN classification layers on a real dataset results in higher average F1-score (F1) scores with lower variance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Optimal Microphone Placement for Localizing Tonal Sound Sources.\n \n \n \n \n\n\n \n Juhlin, M.; and Jakobsson, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 236-240, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"OptimalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287622,\n  author = {M. Juhlin and A. Jakobsson},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Optimal Microphone Placement for Localizing Tonal Sound Sources},\n  year = {2020},\n  pages = {236-240},\n  abstract = {This work is concerned with determining optimal microphone placements that allow for an accurate location estimate of the sound sources, taking into account the expected signal structure of voiced speech, as well as the expected location areas and the typical range of the fundamental frequencies of the speakers. To determine preferable microphone placements, we propose a scheme that minimizes a theoretical lower bound on the variance of the location estimates over the possible sensor placements, while taking into account the expected variability in the impinging signals. Numerical examples and real measurements illustrate the performance of the proposed scheme.},\n  keywords = {Sensor placement;Europe;Signal processing;Frequency estimation;Optimization;Microphones;Sensor placement;worst case Cramér-Rao lower bound;Convex optimization},\n  doi = {10.23919/Eusipco47968.2020.9287622},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000236.pdf},\n}\n\n
\n
\n\n\n
\n This work is concerned with determining optimal microphone placements that allow for an accurate location estimate of the sound sources, taking into account the expected signal structure of voiced speech, as well as the expected location areas and the typical range of the fundamental frequencies of the speakers. To determine preferable microphone placements, we propose a scheme that minimizes a theoretical lower bound on the variance of the location estimates over the possible sensor placements, while taking into account the expected variability in the impinging signals. Numerical examples and real measurements illustrate the performance of the proposed scheme.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributed Resource Allocation Algorithms for Multi-Operator Cognitive Communication Systems.\n \n \n \n \n\n\n \n Tohidi, E.; Gesbert, D.; and Ciblat, P.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1737-1741, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DistributedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287623,\n  author = {E. Tohidi and D. Gesbert and P. Ciblat},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Distributed Resource Allocation Algorithms for Multi-Operator Cognitive Communication Systems},\n  year = {2020},\n  pages = {1737-1741},\n  abstract = {We address the problem of resource allocation (RA) in a cognitive radio (CR) communication system with multiple secondary operators sharing spectrum with an incumbent primary operator. The key challenge of the RA problem is the inter-operator coordination arising in the optimization problem so that the aggregated interference at the primary users (PUs) does not exceed the target threshold. While this problem is easily solvable if a centralized unit could access information of all secondary operators, it becomes challenging in a realistic scenario. In this paper, considering a satellite setting, we alleviate this problem by proposing two approaches to reduce the information exchange level among the secondary operators. In the first approach, we formulate an RA scheme based on a partial information sharing method which enables distributed optimization across secondary operators. In the second approach, instead of exchanging secondary users (SUs) information, the operators only exchange their contributions of the interference-level and RA is performed locally across secondary operators. These two approaches, for the first time in this context, provide a trade-off between performance and level of inter-operator information exchange. Through the numerical simulations, we explain this trade-off and illustrate the penalty resulting from partial information exchange.},\n  keywords = {Communication systems;Satellite broadcasting;Signal processing algorithms;Signal processing;Resource management;Information exchange;Optimization},\n  doi = {10.23919/Eusipco47968.2020.9287623},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001737.pdf},\n}\n\n
\n
\n\n\n
\n We address the problem of resource allocation (RA) in a cognitive radio (CR) communication system with multiple secondary operators sharing spectrum with an incumbent primary operator. The key challenge of the RA problem is the inter-operator coordination arising in the optimization problem so that the aggregated interference at the primary users (PUs) does not exceed the target threshold. While this problem is easily solvable if a centralized unit could access information of all secondary operators, it becomes challenging in a realistic scenario. In this paper, considering a satellite setting, we alleviate this problem by proposing two approaches to reduce the information exchange level among the secondary operators. In the first approach, we formulate an RA scheme based on a partial information sharing method which enables distributed optimization across secondary operators. In the second approach, instead of exchanging secondary users (SUs) information, the operators only exchange their contributions of the interference-level and RA is performed locally across secondary operators. These two approaches, for the first time in this context, provide a trade-off between performance and level of inter-operator information exchange. Through the numerical simulations, we explain this trade-off and illustrate the penalty resulting from partial information exchange.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Online Kernel-Based Graph Topology Identification with Partial-Derivative-Imposed Sparsity.\n \n \n \n \n\n\n \n Moscu, M.; Borsoi, R.; and Richard, C.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2190-2194, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"OnlinePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 10 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287624,\n  author = {M. Moscu and R. Borsoi and C. Richard},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Online Kernel-Based Graph Topology Identification with Partial-Derivative-Imposed Sparsity},\n  year = {2020},\n  pages = {2190-2194},\n  abstract = {In many applications, such as brain network connectivity or shopping recommendations, the underlying graph explaining the different interactions between participating agents is unknown. Moreover, many of these interactions may be based on nonlinear relationships, rendering the topology inference problem more complex. This paper presents a new topology inference method that estimates a possibly directed adjacency matrix in an online manner. In contrast to previous approaches which are based on additive models, the proposed model is able to explain general nonlinear interactions between the agents. Partial-derivative-imposed sparsity is implemented, while reproducing kernels are used to model nonlinearities. The impact of the increasing number of data points is alleviated by using dictionaries of kernel functions. A comparison with a previously developed method showcases the generality of the new model.},\n  keywords = {Dictionaries;Additives;Network topology;Signal processing algorithms;Topology;Sparse matrices;Kernel;topology inference;partial derivative sparsity;brain connectivity;nonlinear interactions;kernels},\n  doi = {10.23919/Eusipco47968.2020.9287624},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002190.pdf},\n}\n\n
\n
\n\n\n
\n In many applications, such as brain network connectivity or shopping recommendations, the underlying graph explaining the different interactions between participating agents is unknown. Moreover, many of these interactions may be based on nonlinear relationships, rendering the topology inference problem more complex. This paper presents a new topology inference method that estimates a possibly directed adjacency matrix in an online manner. In contrast to previous approaches which are based on additive models, the proposed model is able to explain general nonlinear interactions between the agents. Partial-derivative-imposed sparsity is implemented, while reproducing kernels are used to model nonlinearities. The impact of the increasing number of data points is alleviated by using dictionaries of kernel functions. A comparison with a previously developed method showcases the generality of the new model.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Learning Frame Similarity using Siamese networks for Audio-to-Score Alignment.\n \n \n \n \n\n\n \n Agrawal, R.; and Dixon, S.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 141-145, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"LearningPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287625,\n  author = {R. Agrawal and S. Dixon},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Learning Frame Similarity using Siamese networks for Audio-to-Score Alignment},\n  year = {2020},\n  pages = {141-145},\n  abstract = {Audio-to-score alignment aims at generating an accurate mapping between a performance audio and the score of a given piece. Standard alignment methods are based on Dynamic Time Warping (DTW) and employ handcrafted features, which cannot be adapted to different acoustic conditions. We propose a method to overcome this limitation using learned frame similarity for audio-to-score alignment. We focus on offline audio-to-score alignment of piano music. Experiments on music data from different acoustic conditions demonstrate that our method achieves higher alignment accuracy than a standard DTW-based method that uses handcrafted features, and generates robust alignments whilst being adaptable to different domains at the same time.},\n  keywords = {Neural networks;Music;Europe;Signal processing;Multiple signal classification;Standards;Music Information Retrieval;Audio-to-Score Alignment;Siamese networks;Convolutional Neural Networks;Dynamic Time Warping},\n  doi = {10.23919/Eusipco47968.2020.9287625},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000141.pdf},\n}\n\n
\n
\n\n\n
\n Audio-to-score alignment aims at generating an accurate mapping between a performance audio and the score of a given piece. Standard alignment methods are based on Dynamic Time Warping (DTW) and employ handcrafted features, which cannot be adapted to different acoustic conditions. We propose a method to overcome this limitation using learned frame similarity for audio-to-score alignment. We focus on offline audio-to-score alignment of piano music. Experiments on music data from different acoustic conditions demonstrate that our method achieves higher alignment accuracy than a standard DTW-based method that uses handcrafted features, and generates robust alignments whilst being adaptable to different domains at the same time.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Genomic Signal Processing for Variant Detection in Diploid Parent-Child Trios.\n \n \n \n \n\n\n \n Spence, M.; Banuelos, M.; Marcia, R. F.; and Sindi, S.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1318-1322, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"GenomicPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287626,\n  author = {M. Spence and M. Banuelos and R. F. Marcia and S. Sindi},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Genomic Signal Processing for Variant Detection in Diploid Parent-Child Trios},\n  year = {2020},\n  pages = {1318-1322},\n  abstract = {Structural variants (SVs) are rearrangements in the DNA sequence of members within the same species. Detecting SVs is challenging because most approaches suffer from high-false positive rates. In this work, we improve the accuracy of SV detection by exploiting familial relationships and the rare occurence of these rearrangements. Mathematically, we pose SV detection as a constrained optimization problem regularized by a sparsity promoting term. Furthermore, we generalize our previous methods in two ways. First, we consider a biologically realistic scenario of a parent-child-trio, where each individual may carry zero, one, or two copies of any potential SV. Second, we employ a novel block-coordinate descent approach with orthogonal projection to efficiently minimize the objective and to enforce feasibility within the biological constraint space. Numerical results using both simulated and real trios demonstrate that our proposed approach improves our ability to separate true SVs from false positives.},\n  keywords = {Sequential analysis;Genomics;Optimization methods;Biology;Signal reconstruction;Noise measurement;Bioinformatics;Sparse signal recovery;convex optimization;next-generation sequencing data;structural variants;computational genomics},\n  doi = {10.23919/Eusipco47968.2020.9287626},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001318.pdf},\n}\n\n
\n
\n\n\n
\n Structural variants (SVs) are rearrangements in the DNA sequence of members within the same species. Detecting SVs is challenging because most approaches suffer from high-false positive rates. In this work, we improve the accuracy of SV detection by exploiting familial relationships and the rare occurence of these rearrangements. Mathematically, we pose SV detection as a constrained optimization problem regularized by a sparsity promoting term. Furthermore, we generalize our previous methods in two ways. First, we consider a biologically realistic scenario of a parent-child-trio, where each individual may carry zero, one, or two copies of any potential SV. Second, we employ a novel block-coordinate descent approach with orthogonal projection to efficiently minimize the objective and to enforce feasibility within the biological constraint space. Numerical results using both simulated and real trios demonstrate that our proposed approach improves our ability to separate true SVs from false positives.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Speech Privacy Protection based on Optimal Controlling Estimated Speech Transmission Index in Noisy Reverberant Environments.\n \n \n \n \n\n\n \n Duangpummet, S.; Kraikhun, P.; Phunruangsakao, C.; Karnjana, J.; Unoki, M.; and Kongprawechnon, W.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 76-80, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"SpeechPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287627,\n  author = {S. Duangpummet and P. Kraikhun and C. Phunruangsakao and J. Karnjana and M. Unoki and W. Kongprawechnon},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Speech Privacy Protection based on Optimal Controlling Estimated Speech Transmission Index in Noisy Reverberant Environments},\n  year = {2020},\n  pages = {76-80},\n  abstract = {Protecting the privacy of conversations containing confidential and sensitive information in semi-open rooms, such as in banks and hospitals, is essential but challenging because their acoustical characteristics, such as room impulse response (RIR) and background noise, are unknown and prone to change. This study proposes a scheme for protecting the privacy of conversations on the basis of feedback control of an estimated speech transmission index (STI). The STI is an objective index related to listening difficulty and is a function of RIR. Without measuring the RIR of the environment where a supposedly private conversation occurs, an STI-estimation method and one RIR model are utilized. The scheme modifies speech signals in such a way that, for an unintended listener, the signals are as unintelligible as they would be in a room with a low STI. To control the late reverberant parameter of the RIR model, a proportional-integral-derivative controller is used whose controller gains are tuned by using a differential evolution optimizer. Simulations and subjective tests were conducted to evaluate the proposed method. The average error between the actual and target estimated STIs is 0.01. Furthermore, the subjective tests showed that the proposed method can effectively protect privacy under different conditions and is preferable to an open-loop control method.},\n  keywords = {Privacy;Real-time systems;Feedback control;Indexes;Noise measurement;PD control;Speech processing;Speech privacy protection;speech transmission index;room impulse response;PID control with differential evolution},\n  doi = {10.23919/Eusipco47968.2020.9287627},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000076.pdf},\n}\n\n
\n
\n\n\n
\n Protecting the privacy of conversations containing confidential and sensitive information in semi-open rooms, such as in banks and hospitals, is essential but challenging because their acoustical characteristics, such as room impulse response (RIR) and background noise, are unknown and prone to change. This study proposes a scheme for protecting the privacy of conversations on the basis of feedback control of an estimated speech transmission index (STI). The STI is an objective index related to listening difficulty and is a function of RIR. Without measuring the RIR of the environment where a supposedly private conversation occurs, an STI-estimation method and one RIR model are utilized. The scheme modifies speech signals in such a way that, for an unintended listener, the signals are as unintelligible as they would be in a room with a low STI. To control the late reverberant parameter of the RIR model, a proportional-integral-derivative controller is used whose controller gains are tuned by using a differential evolution optimizer. Simulations and subjective tests were conducted to evaluate the proposed method. The average error between the actual and target estimated STIs is 0.01. Furthermore, the subjective tests showed that the proposed method can effectively protect privacy under different conditions and is preferable to an open-loop control method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Managing Single or Multi-Users Channel Allocation for the Priority Cognitive Access.\n \n \n \n \n\n\n \n Almasri, M.; Mansour, A.; Moy, C.; Assoum, A.; Le Jeune, D.; and Osswald, C.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1722-1726, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ManagingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287628,\n  author = {M. Almasri and A. Mansour and C. Moy and A. Assoum and D. {Le Jeune} and C. Osswald},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Managing Single or Multi-Users Channel Allocation for the Priority Cognitive Access},\n  year = {2020},\n  pages = {1722-1726},\n  abstract = {This manuscript investigates the problem of the Multi-Armed Bandit (MAB) in the context of the Opportunistic Spectrum Access (OSA) case with priority management (e.g. military applications). The main aim of a Secondary User (SU) in OSA is to increase his transmission throughput by seeking the best channel with the highest vacancy probability. In this manuscript, we propose a novel MAB algorithm called ϵ -UCB in order to enhance the spectrum learning of a SU and decrease the regret, i.e. the loss of reward due to the selection of worst channels. We analytically prove, and corroborate with simulations, that the regret of the proposed algorithm has a logarithmic behavior. So, after a finite number of time slots, the SU can estimate the vacancy probability of channels in order to target the best one for transmitting. Hereinafter, we extend ϵ -UCB to consider multiple priority users, where a SU can selfishly estimate and access the channels according to his prior rank. The simulation results show the superiority of the proposed algorithm for a single or multi-user cases compared to the well-known MAB algorithms.},\n  keywords = {Upper bound;Simulation;Signal processing algorithms;Channel estimation;Signal processing;Throughput;Resource management;Cognitive Networks;Multi-Armed Bandit;Priority Access;Logarithmic Regret},\n  doi = {10.23919/Eusipco47968.2020.9287628},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001722.pdf},\n}\n\n
\n
\n\n\n
\n This manuscript investigates the problem of the Multi-Armed Bandit (MAB) in the context of the Opportunistic Spectrum Access (OSA) case with priority management (e.g. military applications). The main aim of a Secondary User (SU) in OSA is to increase his transmission throughput by seeking the best channel with the highest vacancy probability. In this manuscript, we propose a novel MAB algorithm called ϵ -UCB in order to enhance the spectrum learning of a SU and decrease the regret, i.e. the loss of reward due to the selection of worst channels. We analytically prove, and corroborate with simulations, that the regret of the proposed algorithm has a logarithmic behavior. So, after a finite number of time slots, the SU can estimate the vacancy probability of channels in order to target the best one for transmitting. Hereinafter, we extend ϵ -UCB to consider multiple priority users, where a SU can selfishly estimate and access the channels according to his prior rank. The simulation results show the superiority of the proposed algorithm for a single or multi-user cases compared to the well-known MAB algorithms.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Signal Denoising Using a New Class of Robust Neural Networks.\n \n \n \n \n\n\n \n Neacsu, A.; Gupta, K.; Pesquet, J. -.; and Burileanu, C.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1492-1496, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"SignalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287630,\n  author = {A. Neacsu and K. Gupta and J. -C. Pesquet and C. Burileanu},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Signal Denoising Using a New Class of Robust Neural Networks},\n  year = {2020},\n  pages = {1492-1496},\n  abstract = {In this work, we propose a novel neural network architecture, called Adaptive Convolutional Neural Network (ACNN), which can be viewed as an intermediate solution between a standard convolutional network and a fully connected one. A constrained training strategy is developed to learn the parameters of such a network. The proposed algorithm allows us to control the Lipschitz constant of our ACNN to secure its robustness to adversarial noise. The resulting learning approach is evaluated for signal denoising based on a database of music recordings. Both qualitative and quantitative results show that the designed network is successful in removing Gaussian noise with unknown variance.},\n  keywords = {Training;Adaptive systems;Neural networks;Signal processing algorithms;Robustness;Signal denoising;Standards;stability;fully connected networks;audio denoising;perturbations},\n  doi = {10.23919/Eusipco47968.2020.9287630},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001492.pdf},\n}\n\n
\n
\n\n\n
\n In this work, we propose a novel neural network architecture, called Adaptive Convolutional Neural Network (ACNN), which can be viewed as an intermediate solution between a standard convolutional network and a fully connected one. A constrained training strategy is developed to learn the parameters of such a network. The proposed algorithm allows us to control the Lipschitz constant of our ACNN to secure its robustness to adversarial noise. The resulting learning approach is evaluated for signal denoising based on a database of music recordings. Both qualitative and quantitative results show that the designed network is successful in removing Gaussian noise with unknown variance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Range-based Radar Model Structure Selection.\n \n \n \n \n\n\n \n Jansson, A.; Elvander, F.; Almers, P.; and Jakobsson, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2269-2273, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Range-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287631,\n  author = {A. Jansson and F. Elvander and P. Almers and A. Jakobsson},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Range-based Radar Model Structure Selection},\n  year = {2020},\n  pages = {2269-2273},\n  abstract = {In this work, we study under which circumstances it is appropriate to use simplified models for range determination using radar. Typically, pulsed radar systems result in the backscattered, demodulated, and matched signal having a chirp signal structure, with the frequency rate being related to the range to the reflecting target and the relative velocity of the transmitter and reflector. Far from the target, and at low relative velocities, one may achieve preferable location estimates by neglecting the frequency rate, treating the received signal as being purely sinusoidal, whereas at close range, neglecting the frequency rate notably reduces the achievable performance. Using misspecified estimation theory, we derive a lower bound of the achievable performance when neglecting the true signal structure, and show at which ranges one model is preferable to the other. Numerical results from a mm-wave radar system illustrate the results.},\n  keywords = {Transmitters;Radar;Estimation theory;Particle measurements;Frequency estimation;Sensor systems;Numerical models;Range estimation;Radar systems;Misspecified Cramer-Rao lower bound},\n  doi = {10.23919/Eusipco47968.2020.9287631},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002269.pdf},\n}\n\n
\n
\n\n\n
\n In this work, we study under which circumstances it is appropriate to use simplified models for range determination using radar. Typically, pulsed radar systems result in the backscattered, demodulated, and matched signal having a chirp signal structure, with the frequency rate being related to the range to the reflecting target and the relative velocity of the transmitter and reflector. Far from the target, and at low relative velocities, one may achieve preferable location estimates by neglecting the frequency rate, treating the received signal as being purely sinusoidal, whereas at close range, neglecting the frequency rate notably reduces the achievable performance. Using misspecified estimation theory, we derive a lower bound of the achievable performance when neglecting the true signal structure, and show at which ranges one model is preferable to the other. Numerical results from a mm-wave radar system illustrate the results.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Adaptive Learning without Forgetting via Low-Complexity Convex Networks.\n \n \n \n \n\n\n \n Javid, A. M.; Liang, X.; Skoglund, M.; and Chatterjee, S.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1623-1627, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AdaptivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287632,\n  author = {A. M. Javid and X. Liang and M. Skoglund and S. Chatterjee},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Adaptive Learning without Forgetting via Low-Complexity Convex Networks},\n  year = {2020},\n  pages = {1623-1627},\n  abstract = {We study the problem of learning without forgetting (LwF) in which a deep learning model learns new tasks without a significant drop in the classification performance on the previously learned tasks. We propose an LwF algorithm for multilayer feedforward neural networks in which we can adapt the number of layers of the network from the old task to the new task. To this end, we limit ourselves to convex loss functions in order to train the network in a layer-wise manner. Layer-wise convex optimization leads to low-computational complexity and provides a more interpretable understanding of the network. We compare the effectiveness of the proposed adaptive LwF algorithm with the standard LwF over image classification datasets.},\n  keywords = {Training;Signal processing algorithms;Convex functions;Complexity theory;Classification algorithms;Task analysis;Standards;learning without forgetting;convex neural networks;size adaptive;low complexity},\n  doi = {10.23919/Eusipco47968.2020.9287632},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001623.pdf},\n}\n\n
\n
\n\n\n
\n We study the problem of learning without forgetting (LwF) in which a deep learning model learns new tasks without a significant drop in the classification performance on the previously learned tasks. We propose an LwF algorithm for multilayer feedforward neural networks in which we can adapt the number of layers of the network from the old task to the new task. To this end, we limit ourselves to convex loss functions in order to train the network in a layer-wise manner. Layer-wise convex optimization leads to low-computational complexity and provides a more interpretable understanding of the network. We compare the effectiveness of the proposed adaptive LwF algorithm with the standard LwF over image classification datasets.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Teager Energy Cepstral Coefficients for Classification of Normal vs. Whisper Speech.\n \n \n \n \n\n\n \n Khoria, K.; Kamble, M. R.; and Patil, H. A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1-5, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"TeagerPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287634,\n  author = {K. Khoria and M. R. Kamble and H. A. Patil},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Teager Energy Cepstral Coefficients for Classification of Normal vs. Whisper Speech},\n  year = {2020},\n  pages = {1-5},\n  abstract = {The whispered speech is quite different from natural speech in the context of nature, acoustic characteristics, and generation mechanism. In order to improve the robustness of Automatic Speech Recognition (ASR) system, it is very important to analyze the mismatched training and testing situations and propose a robust acoustic features to enhance the whisper recognition. In this paper we propose to use Teager Energy Cepstral Coefficients (TECC) which uses Teager Energy Operator (TEO) for estimating {"}true{"} total energy of the signal, i.e., the sum of kinetic and potential energies which is contradictory to the traditional signal energy approximation, which only takes kinetic energy into account, i.e., L2 norm of the signal. In this study, experiments are performed on wTIMIT and CHAINS corpus. For wTIMIT corpus, frame-level accuracy of 92.22 % is obtained and for CHAINS corpus, it is 95.61 %. We have also estimated the performance measure of the classifier by using Matthew Correlation Coefficient (MCC), F-measure, and J-statistics. Furthermore, experiments are performed by considering latency period from a practical deployment viewpoint, and the trade-off between latency period vs. accuracy is discussed for both the corpora.},\n  keywords = {Training;Potential energy;Cepstral analysis;Natural languages;Signal processing;Robustness;Speech processing;Whispered Speech Recognition (WSR);Teager Energy Operator;Equal Error Rate (EER);Latency},\n  doi = {10.23919/Eusipco47968.2020.9287634},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000371.pdf},\n}\n\n
\n
\n\n\n
\n The whispered speech is quite different from natural speech in the context of nature, acoustic characteristics, and generation mechanism. In order to improve the robustness of Automatic Speech Recognition (ASR) system, it is very important to analyze the mismatched training and testing situations and propose a robust acoustic features to enhance the whisper recognition. In this paper we propose to use Teager Energy Cepstral Coefficients (TECC) which uses Teager Energy Operator (TEO) for estimating \"true\" total energy of the signal, i.e., the sum of kinetic and potential energies which is contradictory to the traditional signal energy approximation, which only takes kinetic energy into account, i.e., L2 norm of the signal. In this study, experiments are performed on wTIMIT and CHAINS corpus. For wTIMIT corpus, frame-level accuracy of 92.22 % is obtained and for CHAINS corpus, it is 95.61 %. We have also estimated the performance measure of the classifier by using Matthew Correlation Coefficient (MCC), F-measure, and J-statistics. Furthermore, experiments are performed by considering latency period from a practical deployment viewpoint, and the trade-off between latency period vs. accuracy is discussed for both the corpora.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robust Jointly-Sparse Signal Recovery Based on Minimax Concave Loss Function.\n \n \n \n \n\n\n \n Suzuki, K.; and Masahiro, Y.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2070-2074, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"RobustPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287635,\n  author = {K. Suzuki and Y. Masahiro},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Robust Jointly-Sparse Signal Recovery Based on Minimax Concave Loss Function},\n  year = {2020},\n  pages = {2070-2074},\n  abstract = {We propose a robust approach to recovering the jointly-sparse signals in the presence of outliers. We formulate the recovering task as a minimization problem involving three terms: (i) the minimax concave (MC) loss function, (ii) the MC penalty function, and (iii) the squared Frobenius norm. The MC-based loss and penalty functions enhance robustness and group sparsity, respectively, while the squared Frobenius norm induces the convexity. The problem is solved, via reformulation, by the primal-dual splitting method, for which the convergence condition is derived. Numerical examples show that the proposed approach enjoys remarkable outlier robustness.},\n  keywords = {Radio frequency;Signal processing algorithms;Signal processing;Minimization;Robustness;Task analysis;Convergence;robustness;minimax concave function;jointly-sparse signals;multiple measurement vector problem;feature selection},\n  doi = {10.23919/Eusipco47968.2020.9287635},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002070.pdf},\n}\n\n
\n
\n\n\n
\n We propose a robust approach to recovering the jointly-sparse signals in the presence of outliers. We formulate the recovering task as a minimization problem involving three terms: (i) the minimax concave (MC) loss function, (ii) the MC penalty function, and (iii) the squared Frobenius norm. The MC-based loss and penalty functions enhance robustness and group sparsity, respectively, while the squared Frobenius norm induces the convexity. The problem is solved, via reformulation, by the primal-dual splitting method, for which the convergence condition is derived. Numerical examples show that the proposed approach enjoys remarkable outlier robustness.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multi-Channel Electronic Stethoscope for Enhanced Cardiac Auscultation using Beamforming and Equalisation Techniques.\n \n \n \n \n\n\n \n Pasha, S.; Lundgren, J.; and Ritz, C.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1289-1293, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Multi-ChannelPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287636,\n  author = {S. Pasha and J. Lundgren and C. Ritz},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Multi-Channel Electronic Stethoscope for Enhanced Cardiac Auscultation using Beamforming and Equalisation Techniques},\n  year = {2020},\n  pages = {1289-1293},\n  abstract = {This paper reports on the implementation of a multi-channel electronic stethoscope designed to isolate the heart sound from the interfering sounds of the lungs and blood vessels. The multi-channel stethoscope comprises four piezo contact microphones arranged in rectangular and linear arrays. Beamforming and channel equalisation techniques are applied to the multi-channel recordings made in the aortic, pulmonary, tricuspid, and mitral valve areas. The proposed channel equaliser cancels out the distorting effect of the chest and rib cage on the heart sound frequency spectrum. It is shown that the applied beamforming methods effectively suppress the interfering lung noise and improve the signal to interference and noise ratio by 16 dB. The results confirm the superior performance of the implemented multi-channel stethoscope compared with commercially available single-channel electronic stethoscopes.},\n  keywords = {Heart;Transducers;Array signal processing;Lung;Stethoscope;Valves;Microphone arrays;Beamforming;Channel equalisation;Chest acoustics;Microphone arrays;Multi-channel electronic stethoscope},\n  doi = {10.23919/Eusipco47968.2020.9287636},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001289.pdf},\n}\n\n
\n
\n\n\n
\n This paper reports on the implementation of a multi-channel electronic stethoscope designed to isolate the heart sound from the interfering sounds of the lungs and blood vessels. The multi-channel stethoscope comprises four piezo contact microphones arranged in rectangular and linear arrays. Beamforming and channel equalisation techniques are applied to the multi-channel recordings made in the aortic, pulmonary, tricuspid, and mitral valve areas. The proposed channel equaliser cancels out the distorting effect of the chest and rib cage on the heart sound frequency spectrum. It is shown that the applied beamforming methods effectively suppress the interfering lung noise and improve the signal to interference and noise ratio by 16 dB. The results confirm the superior performance of the implemented multi-channel stethoscope compared with commercially available single-channel electronic stethoscopes.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n High-Resolution Speaker Counting in Reverberant Rooms Using CRNN with Ambisonics Features.\n \n \n \n \n\n\n \n Grumiaux, P. -.; Kitić, S.; Girin, L.; and Guérin, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 71-75, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"High-ResolutionPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287637,\n  author = {P. -A. Grumiaux and S. Kitić and L. Girin and A. Guérin},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {High-Resolution Speaker Counting in Reverberant Rooms Using CRNN with Ambisonics Features},\n  year = {2020},\n  pages = {71-75},\n  abstract = {Speaker counting is the task of estimating the number of people that are simultaneously speaking in an audio recording. For several audio processing tasks such as speaker diarization, separation, localization and tracking, knowing the number of speakers at each timestep is a prerequisite, or at least it can be a strong advantage, in addition to enabling a low latency processing. For that purpose, we address the speaker counting problem with a multichannel convolutional recurrent neural network which produces an estimation at a short-term frame resolution. We trained the network to predict up to 5 concurrent speakers in a multichannel mixture, with simulated data including many different conditions in terms of source and microphone positions, reverberation, and noise. The network can predict the number of speakers with good accuracy at frame resolution.},\n  keywords = {Speech analysis;Recurrent neural networks;Europe;Estimation;Reverberation;Task analysis;Microphones;Speaker counting;CRNN;reverberation},\n  doi = {10.23919/Eusipco47968.2020.9287637},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000071.pdf},\n}\n\n
\n
\n\n\n
\n Speaker counting is the task of estimating the number of people that are simultaneously speaking in an audio recording. For several audio processing tasks such as speaker diarization, separation, localization and tracking, knowing the number of speakers at each timestep is a prerequisite, or at least it can be a strong advantage, in addition to enabling a low latency processing. For that purpose, we address the speaker counting problem with a multichannel convolutional recurrent neural network which produces an estimation at a short-term frame resolution. We trained the network to predict up to 5 concurrent speakers in a multichannel mixture, with simulated data including many different conditions in terms of source and microphone positions, reverberation, and noise. The network can predict the number of speakers with good accuracy at frame resolution.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multipitch tracking in music signals using Echo State Networks.\n \n \n \n \n\n\n \n Steiner, P.; Stone, S.; Birkholz, P.; and Jalalvand, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 126-130, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"MultipitchPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287638,\n  author = {P. Steiner and S. Stone and P. Birkholz and A. Jalalvand},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Multipitch tracking in music signals using Echo State Networks},\n  year = {2020},\n  pages = {126-130},\n  abstract = {Currently, convolutional neural networks (CNNs) define the state of the art for multipitch tracking in music signals. Echo State Networks (ESNs), a recently introduced recurrent neural network architecture, achieved similar results as CNNs for various tasks, such as phoneme or digit recognition. However, they have not yet received much attention in the community of Music Information Retrieval. The core of ESNs is a group of unordered, randomly connected neurons, i.e., the reservoir, by which the low-dimensional input space is non-linearly transformed into a high-dimensional feature space. Because only the weights of the connections between the reservoir and the output are trained using linear regression, ESNs are easier to train than deep neural networks. This paper presents a first exploration of ESNs for the challenging task of multipitch tracking in music signals. The best results presented in this paper were achieved with a bidirectional two-layer ESN with 20 000 neurons in each layer. Although the final F-score of 0.7198 still falls below the state of the art (0.7370), the proposed ESN-based approach serves as a baseline for further investigations of ESNs in audio signal processing in the future.},\n  keywords = {Recurrent neural networks;Neurons;Signal processing;Reservoirs;Multiple signal classification;Task analysis;Music information retrieval;Reservoir Computing;Echo State Network;Multipitch;RNN;MIR},\n  doi = {10.23919/Eusipco47968.2020.9287638},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000126.pdf},\n}\n\n
\n
\n\n\n
\n Currently, convolutional neural networks (CNNs) define the state of the art for multipitch tracking in music signals. Echo State Networks (ESNs), a recently introduced recurrent neural network architecture, achieved similar results as CNNs for various tasks, such as phoneme or digit recognition. However, they have not yet received much attention in the community of Music Information Retrieval. The core of ESNs is a group of unordered, randomly connected neurons, i.e., the reservoir, by which the low-dimensional input space is non-linearly transformed into a high-dimensional feature space. Because only the weights of the connections between the reservoir and the output are trained using linear regression, ESNs are easier to train than deep neural networks. This paper presents a first exploration of ESNs for the challenging task of multipitch tracking in music signals. The best results presented in this paper were achieved with a bidirectional two-layer ESN with 20 000 neurons in each layer. Although the final F-score of 0.7198 still falls below the state of the art (0.7370), the proposed ESN-based approach serves as a baseline for further investigations of ESNs in audio signal processing in the future.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Curriculum Learning for Face Recognition.\n \n \n \n \n\n\n \n Büyüktaş, B.; Erdem, Ç. E.; and Erdem, T.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 650-654, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"CurriculumPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287639,\n  author = {B. Büyüktaş and Ç. E. Erdem and T. Erdem},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Curriculum Learning for Face Recognition},\n  year = {2020},\n  pages = {650-654},\n  abstract = {We present a novel curriculum learning (CL) algorithm for face recognition using convolutional neural networks. Curriculum learning is inspired by the fact that humans learn better, when the presented information is organized in a way that covers the easy concepts first, followed by more complex ones. It has been shown in the literature that that CL is also beneficial for machine learning tasks by enabling convergence to a better local minimum. In the proposed CL algorithm for face recognition, we divide the training set of face images into subsets of increasing difficulty based on the head pose angle obtained from the absolute sum of yaw, pitch and roll angles. These subsets are introduced to the deep CNN in order of increasing difficulty. Experimental results on the large-scale CASIA-WebFace-Sub dataset show that the increase in face recognition accuracy is statistically significant when CL is used, as compared to organizing the training data in random batches.},\n  keywords = {Training;Head;Image recognition;Face recognition;Signal processing algorithms;Training data;Magnetic heads;face recognition;deep learning;curriculum learning},\n  doi = {10.23919/Eusipco47968.2020.9287639},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000650.pdf},\n}\n\n
\n
\n\n\n
\n We present a novel curriculum learning (CL) algorithm for face recognition using convolutional neural networks. Curriculum learning is inspired by the fact that humans learn better, when the presented information is organized in a way that covers the easy concepts first, followed by more complex ones. It has been shown in the literature that that CL is also beneficial for machine learning tasks by enabling convergence to a better local minimum. In the proposed CL algorithm for face recognition, we divide the training set of face images into subsets of increasing difficulty based on the head pose angle obtained from the absolute sum of yaw, pitch and roll angles. These subsets are introduced to the deep CNN in order of increasing difficulty. Experimental results on the large-scale CASIA-WebFace-Sub dataset show that the increase in face recognition accuracy is statistically significant when CL is used, as compared to organizing the training data in random batches.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n One and Two Dimensional Convolutional Neural Networks for Seizure Detection Using EEG Signals.\n \n \n \n \n\n\n \n Wang, X.; Ristaniemi, T.; and Cong, F.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1387-1391, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"OnePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287640,\n  author = {X. Wang and T. Ristaniemi and F. Cong},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {One and Two Dimensional Convolutional Neural Networks for Seizure Detection Using EEG Signals},\n  year = {2020},\n  pages = {1387-1391},\n  abstract = {Deep learning for the automated detection of epileptic seizures has received much attention during recent years. In this work, one dimensional convolutional neural network (1D-CNN) and two dimensional convolutional neural network (2D-CNN) are simultaneously used on electroencephalogram (EEG) data for seizure detection. Firstly, using sliding windows without overlap on raw EEG to obtain the definite one-dimension time EEG segments (1D-T), and continuous wavelet transform (CWT) for 1D-T signals to obtain the two-dimension time-frequency representations (2D-TF). Then, 1D-CNN and 2D-CNN model architectures are used on 1D-T and 2D-TF signals for automatic classification, respectively. Finally, the classification results from 1D-CNN and 2D-CNN are showed. In the two-classification and three-classification problems of seizure detection, the highest accuracy can reach 99.92% and 99.55%, respectively. It shows that the proposed method for a benchmark clinical dataset can achieve good performance in terms of seizure detection.},\n  keywords = {Time-frequency analysis;Continuous wavelet transforms;Neural networks;Europe;Signal processing;Electroencephalography;Convolutional neural networks;Electroencephalogram (EEG);seizure detection;convolutional neural networks (CNN);deep learning;time-frequency representation},\n  doi = {10.23919/Eusipco47968.2020.9287640},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001387.pdf},\n}\n\n
\n
\n\n\n
\n Deep learning for the automated detection of epileptic seizures has received much attention during recent years. In this work, one dimensional convolutional neural network (1D-CNN) and two dimensional convolutional neural network (2D-CNN) are simultaneously used on electroencephalogram (EEG) data for seizure detection. Firstly, using sliding windows without overlap on raw EEG to obtain the definite one-dimension time EEG segments (1D-T), and continuous wavelet transform (CWT) for 1D-T signals to obtain the two-dimension time-frequency representations (2D-TF). Then, 1D-CNN and 2D-CNN model architectures are used on 1D-T and 2D-TF signals for automatic classification, respectively. Finally, the classification results from 1D-CNN and 2D-CNN are showed. In the two-classification and three-classification problems of seizure detection, the highest accuracy can reach 99.92% and 99.55%, respectively. It shows that the proposed method for a benchmark clinical dataset can achieve good performance in terms of seizure detection.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Lie-group based modelling for centroid and shape estimation of a cluster of space debris.\n \n \n \n \n\n\n \n Labsir, S.; Giremus, A.; Yver, B.; and Benoudiba–Campanini, T.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 960-964, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287641,\n  author = {S. Labsir and A. Giremus and B. Yver and T. Benoudiba–Campanini},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A Lie-group based modelling for centroid and shape estimation of a cluster of space debris},\n  year = {2020},\n  pages = {960-964},\n  abstract = {In the context of spatial surveillance, we are interested in estimating the outline and centroid position of a cluster of debris from a set of noisy sensor observations. The motion of the pieces of debris is completely driven by Kepler's law, therefore they scatter taking a specific curvature. This spreading resembles that of samples drawn on the Lie group SE(3). For this reason, we propose a reformulation of the cluster observation model on Lie groups to intrinsically capture its shape. Then, we derive an optimization algorithm on Lie group to solve the estimation problem. The presented approach is validated on simulated data and compared to a state-of-the-art method based on a Gaussian process modelling.},\n  keywords = {Gaussian processes;Lie groups;centroid position;noisy sensor observations;Lie group;cluster observation model;estimation problem;Gaussian process modelling;space debris;spatial surveillance;Shape;Space debris;Surveillance;Estimation;Signal processing algorithms;Clustering algorithms;Optimization;Space debris;extended target;Bayesian estimation;optimization on Lie group},\n  doi = {10.23919/Eusipco47968.2020.9287641},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000960.pdf},\n}\n\n
\n
\n\n\n
\n In the context of spatial surveillance, we are interested in estimating the outline and centroid position of a cluster of debris from a set of noisy sensor observations. The motion of the pieces of debris is completely driven by Kepler's law, therefore they scatter taking a specific curvature. This spreading resembles that of samples drawn on the Lie group SE(3). For this reason, we propose a reformulation of the cluster observation model on Lie groups to intrinsically capture its shape. Then, we derive an optimization algorithm on Lie group to solve the estimation problem. The presented approach is validated on simulated data and compared to a state-of-the-art method based on a Gaussian process modelling.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Inclusion of Instantaneous Influences in the Spectral Decomposition of Causality: Application to the Control Mechanisms of Heart Rate Variability.\n \n \n \n \n\n\n \n Nuzzi, D.; Faes, L.; Javorka, M.; Marinazzo, D.; and Stramaglia, S.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 930-934, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"InclusionPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287642,\n  author = {D. Nuzzi and L. Faes and M. Javorka and D. Marinazzo and S. Stramaglia},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Inclusion of Instantaneous Influences in the Spectral Decomposition of Causality: Application to the Control Mechanisms of Heart Rate Variability},\n  year = {2020},\n  pages = {930-934},\n  abstract = {Heart rate variability is the result of several physiological regulation mechanisms, including cardiovascular and cardiorespiratory interactions. Since instantaneous influences occurring within the same cardiac beat are commonplace in this regulation, their inclusion is mandatory to get a realistic model of physiological causal interactions. Here we exploit a recently proposed framework for the spectral decomposition of causal influences between autoregressive processes [2] and generalize it by introducing instantaneous couplings in the vector autoregressive model (VAR). We show the effectiveness of the proposed approach on a toy model, and on real data consisting of heart period (RR), systolic pressure (SAP) and respiration (RESP) variability series measured in healthy subjects in baseline and head up tilt conditions. In particular, we show that our framework allows one to highlight patterns of frequency domain causality that are consistent with well-interpretable physiological interaction mechanisms like the weakening of respiratory sinus arrhythmia at high frequencies and the activation of the baroreflex control at lower frequencies, in response to postural stress.},\n  keywords = {autoregressive processes;biocontrol;cardiovascular system;electrocardiography;haemodynamics;medical signal processing;neurophysiology;pneumodynamics;time series;cardiorespiratory interactions;instantaneous influences;cardiac beat;physiological causal interactions;spectral decomposition;causal influences;autoregressive processes;instantaneous couplings;vector autoregressive model;toy model;frequency domain causality;well-interpretable physiological interaction mechanisms;heart rate variability;physiological regulation mechanisms;cardiovascular interactions;Time-frequency analysis;Physiology;Regulation;High frequency;Heart rate variability;Frequency control;Stress;Network physiology;Regression analysis;Spectral analysis;Stochastic processes},\n  doi = {10.23919/Eusipco47968.2020.9287642},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000930.pdf},\n}\n\n
\n
\n\n\n
\n Heart rate variability is the result of several physiological regulation mechanisms, including cardiovascular and cardiorespiratory interactions. Since instantaneous influences occurring within the same cardiac beat are commonplace in this regulation, their inclusion is mandatory to get a realistic model of physiological causal interactions. Here we exploit a recently proposed framework for the spectral decomposition of causal influences between autoregressive processes [2] and generalize it by introducing instantaneous couplings in the vector autoregressive model (VAR). We show the effectiveness of the proposed approach on a toy model, and on real data consisting of heart period (RR), systolic pressure (SAP) and respiration (RESP) variability series measured in healthy subjects in baseline and head up tilt conditions. In particular, we show that our framework allows one to highlight patterns of frequency domain causality that are consistent with well-interpretable physiological interaction mechanisms like the weakening of respiratory sinus arrhythmia at high frequencies and the activation of the baroreflex control at lower frequencies, in response to postural stress.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n CycleGAN Voice Conversion of Spectral Envelopes using Adversarial Weights.\n \n \n \n \n\n\n \n Ferro, R.; Obin, N.; and Roebel, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 406-410, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"CycleGANPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287643,\n  author = {R. Ferro and N. Obin and A. Roebel},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {CycleGAN Voice Conversion of Spectral Envelopes using Adversarial Weights},\n  year = {2020},\n  pages = {406-410},\n  abstract = {This paper tackles GAN optimization and stability issues in the context of voice conversion. First, to simplify the conversion task, we propose to use spectral envelopes as inputs. Second we propose two adversarial weight training paradigms, the generalized weighted GAN and the generator impact GAN, both aim at reducing the impact of the generator on the discriminator, so both can learn more gradually and efficiently during training. Applying an energy constraint to the cycleGAN paradigm considerably improved conversion quality. A subjective experiment conducted on a voice conversion task on the voice conversion challenge 2018 dataset shows first that despite a significantly reduced network complexity, the proposed method achieves state-of-the-art results, and second that the proposed weighted GAN methods outperform a previously proposed one.},\n  keywords = {Training;Europe;Signal processing;Generative adversarial networks;Generators;Task analysis;Optimization;voice conversion;cycleGAN;GAN stability;adversarial weights},\n  doi = {10.23919/Eusipco47968.2020.9287643},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000406.pdf},\n}\n\n
\n
\n\n\n
\n This paper tackles GAN optimization and stability issues in the context of voice conversion. First, to simplify the conversion task, we propose to use spectral envelopes as inputs. Second we propose two adversarial weight training paradigms, the generalized weighted GAN and the generator impact GAN, both aim at reducing the impact of the generator on the discriminator, so both can learn more gradually and efficiently during training. Applying an energy constraint to the cycleGAN paradigm considerably improved conversion quality. A subjective experiment conducted on a voice conversion task on the voice conversion challenge 2018 dataset shows first that despite a significantly reduced network complexity, the proposed method achieves state-of-the-art results, and second that the proposed weighted GAN methods outperform a previously proposed one.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multichannel Acoustic Echo Cancellation Applied to Microphone Leakage Reduction in Meetings.\n \n \n \n \n\n\n \n Meyer, P.; Elshamy, S.; Franzen, J.; and Fingscheidt, T.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 201-205, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"MultichannelPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287644,\n  author = {P. Meyer and S. Elshamy and J. Franzen and T. Fingscheidt},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Multichannel Acoustic Echo Cancellation Applied to Microphone Leakage Reduction in Meetings},\n  year = {2020},\n  pages = {201-205},\n  abstract = {Microphone leakage occurs in multichannel close-talk audio recordings of a meeting, when speech of an active speaker couples into both the dedicated target microphone and all other microphone channels. For an automatic transcription or analysis of a meeting, the interferer signals in the target microphone channels have to be eliminated. Therefore, we apply a frequency domain adaptive filtering-based multichannel acoustic echo cancellation (MAEC) method, which typically requires clean reference channels. We consider a wide range of different speech-to-interferer ratios and evaluate two cascading schemes for the MAEC, which leads to an improved speech component quality and interferer reduction by up to 0.1MOS points and 0.5dB, respectively. However, the purpose of this work is not to improve the MAEC method, but instead to show that it can be successfully applied to microphone leakage reduction, such as in meetings with headset-equipped participants. Therefore, we analyze and point out why the MAEC method is able to cancel the interferer signals in this scenario even though the reference signals are themselves disturbed by interfering speech portions.},\n  keywords = {Echo cancellers;Frequency-domain analysis;Europe;Signal processing;Audio recording;Acoustics;Microphones;speaker interference reduction;Kalman filter;meeting;social signal processing;crosstalk;microphone leakage},\n  doi = {10.23919/Eusipco47968.2020.9287644},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000201.pdf},\n}\n\n
\n
\n\n\n
\n Microphone leakage occurs in multichannel close-talk audio recordings of a meeting, when speech of an active speaker couples into both the dedicated target microphone and all other microphone channels. For an automatic transcription or analysis of a meeting, the interferer signals in the target microphone channels have to be eliminated. Therefore, we apply a frequency domain adaptive filtering-based multichannel acoustic echo cancellation (MAEC) method, which typically requires clean reference channels. We consider a wide range of different speech-to-interferer ratios and evaluate two cascading schemes for the MAEC, which leads to an improved speech component quality and interferer reduction by up to 0.1MOS points and 0.5dB, respectively. However, the purpose of this work is not to improve the MAEC method, but instead to show that it can be successfully applied to microphone leakage reduction, such as in meetings with headset-equipped participants. Therefore, we analyze and point out why the MAEC method is able to cancel the interferer signals in this scenario even though the reference signals are themselves disturbed by interfering speech portions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Optimizing an Image Coding Framework with Deep Learning-based Pre- and Post-Processing.\n \n \n \n \n\n\n \n Eusébio, P.; Ascenso, J.; and Pereira, F.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 506-510, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"OptimizingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287645,\n  author = {P. Eusébio and J. Ascenso and F. Pereira},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Optimizing an Image Coding Framework with Deep Learning-based Pre- and Post-Processing},\n  year = {2020},\n  pages = {506-510},\n  abstract = {Convolutional neural networks (CNN) are a popular machine learning architecture used to address multiple image-based tasks from understanding to coding. This paper targets improving image compression efficiency by designing and optimizing an image coding framework where a standard image codec, e.g. JPEG, is combined with deep neural network based pre- and post-processing. While the pre-processing CNN targets simplifying the image to make it more amenable to compression, notably involving its down-sampling, the post-processing CNN targets enhancing the decoded image, also involving its up-sampling. To optimize the compression performance, the processing CNNs are trained involving a third CNN, so-called CNN-FakeCodec, which targets modeling the image codec output, since the encoder-decoder pair is not differentiable, thus not allowing any training. Since the available alternative coding solutions focus on minimizing the image distortion, this paper proposes a new loss function which also considers a rate component, thus allowing to jointly minimize the rate and distortion. The performance results show that the proposed coding solutions can outperform the selected benchmarks, both classical and CNN-based.},\n  keywords = {Training;Image coding;Codecs;Transform coding;Distortion;Task analysis;Standards;Image coding;deep neural networks;rate-distortion optimization;pre-processing;post-processing},\n  doi = {10.23919/Eusipco47968.2020.9287645},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000506.pdf},\n}\n\n
\n
\n\n\n
\n Convolutional neural networks (CNN) are a popular machine learning architecture used to address multiple image-based tasks from understanding to coding. This paper targets improving image compression efficiency by designing and optimizing an image coding framework where a standard image codec, e.g. JPEG, is combined with deep neural network based pre- and post-processing. While the pre-processing CNN targets simplifying the image to make it more amenable to compression, notably involving its down-sampling, the post-processing CNN targets enhancing the decoded image, also involving its up-sampling. To optimize the compression performance, the processing CNNs are trained involving a third CNN, so-called CNN-FakeCodec, which targets modeling the image codec output, since the encoder-decoder pair is not differentiable, thus not allowing any training. Since the available alternative coding solutions focus on minimizing the image distortion, this paper proposes a new loss function which also considers a rate component, thus allowing to jointly minimize the rate and distortion. The performance results show that the proposed coding solutions can outperform the selected benchmarks, both classical and CNN-based.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n MIRaGe: Multichannel Database of Room Impulse Responses Measured on High-Resolution Cube-Shaped Grid.\n \n \n \n \n\n\n \n Čmejla, J.; Kounovský, T.; Gannot, S.; Koldovský, Z.; and Tandeitnik, P.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 56-60, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"MIRaGe:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287646,\n  author = {J. Čmejla and T. Kounovský and S. Gannot and Z. Koldovský and P. Tandeitnik},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {MIRaGe: Multichannel Database of Room Impulse Responses Measured on High-Resolution Cube-Shaped Grid},\n  year = {2020},\n  pages = {56-60},\n  abstract = {We introduce a database of multi-channel recordings performed in an acoustic lab with adjustable reverberation time. The recordings provide detailed information about room acoustics for positions of a source within a confined area. In particular, the main positions correspond to 4104 vertices of a cube-shaped dense grid within a 46 × 36 × 32 cm volume. The database can serve for simulations of a real-world situations and as a tool for detailed analyses of beampatterns of spatial processing methods. It could be used also for training and testing of mathematical models of the acoustic field.},\n  keywords = {Training;Area measurement;Position measurement;Tools;Acoustic measurements;Spatial databases;Mathematical model;Room Impulse Response;Acoustic Transfer Function;Microphone Array;Database},\n  doi = {10.23919/Eusipco47968.2020.9287646},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000056.pdf},\n}\n\n
\n
\n\n\n
\n We introduce a database of multi-channel recordings performed in an acoustic lab with adjustable reverberation time. The recordings provide detailed information about room acoustics for positions of a source within a confined area. In particular, the main positions correspond to 4104 vertices of a cube-shaped dense grid within a 46 × 36 × 32 cm volume. The database can serve for simulations of a real-world situations and as a tool for detailed analyses of beampatterns of spatial processing methods. It could be used also for training and testing of mathematical models of the acoustic field.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n CBL: A Clothing Brand Logo Dataset and a New Method for Clothing Brand Recognition.\n \n \n \n \n\n\n \n Liu, K. -.; Liu, T. -.; and Wang, F.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 655-659, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"CBL:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287647,\n  author = {K. -H. Liu and T. -J. Liu and F. Wang},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {CBL: A Clothing Brand Logo Dataset and a New Method for Clothing Brand Recognition},\n  year = {2020},\n  pages = {655-659},\n  abstract = {In this work, we presented a novel clothing brand logo prediction method which is rooted on a dense-block based deep convolutional neural network for brand logo detection and recognition. To learn convolutional neural networks deeper and more accurately, we adopted dense blocks into deep convolutional networks to make connections between layers shorter. In our work, we propose several dense-block structure designs to improve detection and recognition accuracy on clothing brand logos. We also built a new large-scale clothing brand logo (CBL) dataset with the brand attribute and logo information to facilitate this task. To reduce complexity for the proposed framework, two pixel search steps for the bounding movement are implemented in the training procedure. In the experiment, we demonstrate our search reduced model can outperform some state-of-the-art methods and achieve very good results.},\n  keywords = {Training;Clothing;Neural networks;Prediction methods;Complexity theory;Convolutional neural networks;Task analysis;clothing brand logo dataset;detection and recognition;prediction;dense block;convolutional neural network},\n  doi = {10.23919/Eusipco47968.2020.9287647},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000655.pdf},\n}\n\n
\n
\n\n\n
\n In this work, we presented a novel clothing brand logo prediction method which is rooted on a dense-block based deep convolutional neural network for brand logo detection and recognition. To learn convolutional neural networks deeper and more accurately, we adopted dense blocks into deep convolutional networks to make connections between layers shorter. In our work, we propose several dense-block structure designs to improve detection and recognition accuracy on clothing brand logos. We also built a new large-scale clothing brand logo (CBL) dataset with the brand attribute and logo information to facilitate this task. To reduce complexity for the proposed framework, two pixel search steps for the bounding movement are implemented in the training procedure. In the experiment, we demonstrate our search reduced model can outperform some state-of-the-art methods and achieve very good results.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Training Noise-Resilient Recurrent Photonic Networks for Financial Time Series Analysis.\n \n \n \n \n\n\n \n Passalis, N.; Kirtas, M.; Mourgias-Alexandris, G.; Dabos, G.; Pleros, N.; and Tefas, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1556-1560, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"TrainingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287649,\n  author = {N. Passalis and M. Kirtas and G. Mourgias-Alexandris and G. Dabos and N. Pleros and A. Tefas},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Training Noise-Resilient Recurrent Photonic Networks for Financial Time Series Analysis},\n  year = {2020},\n  pages = {1556-1560},\n  abstract = {Photonic-based neuromorphic hardware holds the credentials for providing fast and energy efficient implementations of computationally complex Deep Learning (DL) models. At the same time, the unique nature of neuromorphic photonics also imposes a number of limitations that hinders its application, including the need to re-train DL models in order to be compliant with the underlying hardware architecture, as well as the existence of various noise sources, which are prevalent in virtually all neuromorphic photonic architectures and negatively affect the accuracy of the deployed models. In this paper we propose a novel noise-aware approach for training neural networks realized on photonic hardware, which can alleviate some of these limitations. To this end we first provide an extensive characterization of the various noise sources that affect sigmoid-based recurrent photonic architectures, as well as provide an extensive study on the effect of various signal-to-noise-ratios (SNRs) levels on the performance of such DL models. The effectiveness of the proposed method is demonstrated on a challenging forecasting problem that involves high frequency financial time series using a state-of-the-art recurrent photonic architecture, which natu-rally fits the requirements of such latency-critical applications. Apart from providing more accurate models, the proposed method opens several interesting future research directions on co-designing neuromorphic photonics, including developing DL models that can work on lower SNRs, leading to more energy efficient solutions.},\n  keywords = {Training;Neuromorphics;Time series analysis;Computer architecture;Hardware;Energy efficiency;Photonics;Photonic Deep Learning;Neural Network Ini-tialization;Noise-aware Training},\n  doi = {10.23919/Eusipco47968.2020.9287649},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001556.pdf},\n}\n\n
\n
\n\n\n
\n Photonic-based neuromorphic hardware holds the credentials for providing fast and energy efficient implementations of computationally complex Deep Learning (DL) models. At the same time, the unique nature of neuromorphic photonics also imposes a number of limitations that hinders its application, including the need to re-train DL models in order to be compliant with the underlying hardware architecture, as well as the existence of various noise sources, which are prevalent in virtually all neuromorphic photonic architectures and negatively affect the accuracy of the deployed models. In this paper we propose a novel noise-aware approach for training neural networks realized on photonic hardware, which can alleviate some of these limitations. To this end we first provide an extensive characterization of the various noise sources that affect sigmoid-based recurrent photonic architectures, as well as provide an extensive study on the effect of various signal-to-noise-ratios (SNRs) levels on the performance of such DL models. The effectiveness of the proposed method is demonstrated on a challenging forecasting problem that involves high frequency financial time series using a state-of-the-art recurrent photonic architecture, which natu-rally fits the requirements of such latency-critical applications. Apart from providing more accurate models, the proposed method opens several interesting future research directions on co-designing neuromorphic photonics, including developing DL models that can work on lower SNRs, leading to more energy efficient solutions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Random Illumination Microscopy from Variance Images.\n \n \n \n \n\n\n \n Labouesse, S.; Idier, J.; Sentenac, A.; Mangeat, T.; and Allain, M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 785-789, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"RandomPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287651,\n  author = {S. Labouesse and J. Idier and A. Sentenac and T. Mangeat and M. Allain},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Random Illumination Microscopy from Variance Images},\n  year = {2020},\n  pages = {785-789},\n  abstract = {We propose a reconstruction algorithm called algoRIM for super-resolution fluorescence microscopy, based on speckle illuminations and image variance matching. Superresolution with a factor two or close can be achieved under realistic conditions in terms of number of images and signal to noise ratio. Here, our key result is an approximation of the statistical variance equation, leading to a drastic reduction of the computational complexity. Moreover, we demonstrate that the unmodulated out-of-focus light does not contribute to the data variance, and that the statistical component due to noise can be estimated and removed in an unsupervised way, which is a crucial contribution to the practical robustness of algoRIM.},\n  keywords = {Microscopy;Lighting;Signal processing algorithms;Fluorescence;Speckle;Signal resolution;Signal to noise ratio;Fluorescence microscopy;structured illumination;super-resolution;optical sectioning;variance;estimation},\n  doi = {10.23919/Eusipco47968.2020.9287651},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000785.pdf},\n}\n\n
\n
\n\n\n
\n We propose a reconstruction algorithm called algoRIM for super-resolution fluorescence microscopy, based on speckle illuminations and image variance matching. Superresolution with a factor two or close can be achieved under realistic conditions in terms of number of images and signal to noise ratio. Here, our key result is an approximation of the statistical variance equation, leading to a drastic reduction of the computational complexity. Moreover, we demonstrate that the unmodulated out-of-focus light does not contribute to the data variance, and that the statistical component due to noise can be estimated and removed in an unsupervised way, which is a crucial contribution to the practical robustness of algoRIM.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Shape-Based Glioma Mutation Prediction Using Magnetic Resonance Imaging.\n \n \n \n \n\n\n \n Schielen, S. J. C.; Spoor, J. K. H.; Fleischeuer, R. E. M.; Verheul, H. B.; Leenstra, S.; and Zinger, S.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1125-1129, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Shape-BasedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287652,\n  author = {S. J. C. Schielen and J. K. H. Spoor and R. E. M. Fleischeuer and H. B. Verheul and S. Leenstra and S. Zinger},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Shape-Based Glioma Mutation Prediction Using Magnetic Resonance Imaging},\n  year = {2020},\n  pages = {1125-1129},\n  abstract = {Gliomas are the most frequently occurring primary brain tumors. Determination of the IDH-mutation (Isocitrate De-Hydrogenase) in these tumors improves classification and predicts survival. Currently, the only way of determining the mutation status is through a brain biopsy, which is an invasive procedure. This paper concerns the classification of a brain tumor's mutation status through medical imaging. This study proposes a method based on shape description and machine learning. Magnetic resonance images of brain tumors were manually segmented through contour drawing, then analyzed through mathematical shape description. The extracted features were classified using multiple algorithms of which Random Undersampling Boosted Trees gave the highest accuracy. An accuracy of 86.4% was found using leave-one-out cross-validation on a data set of 13 IDH-positive and 9 IDH-wild-type gliomas. The results indicate the feasibility of the proposed approach, but further research on a larger data set is required.},\n  keywords = {biomedical MRI;brain;feature extraction;image classification;image segmentation;learning (artificial intelligence);medical image processing;tumours;gliomas;primary brain tumors;IDH-mutation;brain biopsy;brain tumor;medical imaging;machine learning;magnetic resonance images;mathematical shape description;shape-based glioma mutation prediction;random undersampling boosted trees;Isocitrate De-hydrogenase;IDH-positive gliomas;IDH-wild-type gliomas;Shape;Magnetic resonance imaging;Biopsy;Machine learning;Feature extraction;Classification algorithms;Tumors},\n  doi = {10.23919/Eusipco47968.2020.9287652},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001125.pdf},\n}\n\n
\n
\n\n\n
\n Gliomas are the most frequently occurring primary brain tumors. Determination of the IDH-mutation (Isocitrate De-Hydrogenase) in these tumors improves classification and predicts survival. Currently, the only way of determining the mutation status is through a brain biopsy, which is an invasive procedure. This paper concerns the classification of a brain tumor's mutation status through medical imaging. This study proposes a method based on shape description and machine learning. Magnetic resonance images of brain tumors were manually segmented through contour drawing, then analyzed through mathematical shape description. The extracted features were classified using multiple algorithms of which Random Undersampling Boosted Trees gave the highest accuracy. An accuracy of 86.4% was found using leave-one-out cross-validation on a data set of 13 IDH-positive and 9 IDH-wild-type gliomas. The results indicate the feasibility of the proposed approach, but further research on a larger data set is required.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Blind Traffic Classification in Wireless Networks.\n \n \n \n \n\n\n \n Testi, E.; Pucci, L.; Favarelli, E.; and Giorgetti, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1747-1751, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"BlindPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287654,\n  author = {E. Testi and L. Pucci and E. Favarelli and A. Giorgetti},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Blind Traffic Classification in Wireless Networks},\n  year = {2020},\n  pages = {1747-1751},\n  abstract = {In this paper, we propose a non-collaborative radiofrequency (RF) sensor network that, observing the radio spectrum generated by the users of a wireless network, can separate and classify their activities. Numerical results demonstrate that using blind source separation (BSS) and some well-known classifiers, over-the-air user traffic identification is possible. Moreover, we demonstrate that the performance of the proposed methodology is remarkably good in the presence of multiple classes and rather robust when channel impairments (i.e., presence of shadowing) degrade BSS. For example, we show that using a neural network (NN) outstanding classification performance can be achieved even using a relatively low number of RF sensors with very short observation windows (i.e., 30ms).},\n  keywords = {Radio frequency;Wireless networks;Signal processing algorithms;Artificial neural networks;Signal processing;Blind source separation;Shadow mapping},\n  doi = {10.23919/Eusipco47968.2020.9287654},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001747.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a non-collaborative radiofrequency (RF) sensor network that, observing the radio spectrum generated by the users of a wireless network, can separate and classify their activities. Numerical results demonstrate that using blind source separation (BSS) and some well-known classifiers, over-the-air user traffic identification is possible. Moreover, we demonstrate that the performance of the proposed methodology is remarkably good in the presence of multiple classes and rather robust when channel impairments (i.e., presence of shadowing) degrade BSS. For example, we show that using a neural network (NN) outstanding classification performance can be achieved even using a relatively low number of RF sensors with very short observation windows (i.e., 30ms).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multi-subject Resting-State fMRI Data Analysis via Generalized Canonical Correlation Analysis.\n \n \n \n \n\n\n \n Karakasis, P. A.; Liavas, A. P.; Sidiropoulos, N. D.; Simos, P. G.; and Papadaki, E.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1040-1044, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Multi-subjectPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287655,\n  author = {P. A. Karakasis and A. P. Liavas and N. D. Sidiropoulos and P. G. Simos and E. Papadaki},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Multi-subject Resting-State fMRI Data Analysis via Generalized Canonical Correlation Analysis},\n  year = {2020},\n  pages = {1040-1044},\n  abstract = {Functional magnetic resonance imaging (fMRI) is one of the most widespread methods for studying the functionality of the brain. Even at rest, the Blood Oxygen Level Dependent (BOLD) signal reflects systematic fluctuations in the regional brain activity that are attributed to the existence of resting-state brain networks. In many studies, it is assumed that these networks have a common spatially non-overlapping manifestation across subjects, defining a common brain parcellation. In this work, we propose an fMRI data generating model that captures the existence of the common brain parcellation and present a procedure for its estimation. At first, we employ generalized Canonical Correlation Analysis (gCCA) - a well-known statistical method, which can be used for the estimation of a common linear subspace - and recover the subspace that is associated with the common brain parcellation. Then, we obtain an estimate of the common whole-brain parcellation map by solving a semi-orthogonal nonnegative matrix factorization (s-ONMF) problem. We test our theoretical results using both synthetic and real-world fMRI data. Our experimental findings corroborate our theoretical results, rendering our approach a very competitive candidate for multi-subject resting-state whole-brain parcellation.},\n  keywords = {biomedical MRI;blood;brain;data analysis;matrix decomposition;medical image processing;neurophysiology;statistical analysis;multisubject resting-state fMRI data Analysis;generalized canonical correlation analysis;functional magnetic resonance imaging;multisubject resting-state whole-brain parcellation;real-world fMRI data;common whole-brain parcellation map;common linear subspace;common brain parcellation;common spatially nonoverlapping manifestation;resting-state brain networks;regional brain activity;blood oxygen level dependent signal;Correlation;Systematics;Statistical analysis;Estimation;Functional magnetic resonance imaging;Signal processing;Rendering (computer graphics);fMRI;Resting-State;gCCA;MAX-VAR},\n  doi = {10.23919/Eusipco47968.2020.9287655},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001040.pdf},\n}\n\n
\n
\n\n\n
\n Functional magnetic resonance imaging (fMRI) is one of the most widespread methods for studying the functionality of the brain. Even at rest, the Blood Oxygen Level Dependent (BOLD) signal reflects systematic fluctuations in the regional brain activity that are attributed to the existence of resting-state brain networks. In many studies, it is assumed that these networks have a common spatially non-overlapping manifestation across subjects, defining a common brain parcellation. In this work, we propose an fMRI data generating model that captures the existence of the common brain parcellation and present a procedure for its estimation. At first, we employ generalized Canonical Correlation Analysis (gCCA) - a well-known statistical method, which can be used for the estimation of a common linear subspace - and recover the subspace that is associated with the common brain parcellation. Then, we obtain an estimate of the common whole-brain parcellation map by solving a semi-orthogonal nonnegative matrix factorization (s-ONMF) problem. We test our theoretical results using both synthetic and real-world fMRI data. Our experimental findings corroborate our theoretical results, rendering our approach a very competitive candidate for multi-subject resting-state whole-brain parcellation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributed Semi-Private Image Classification Based on Information-Bottleneck Principle.\n \n \n \n \n\n\n \n Rezaeifar, S.; Diephuis, M.; Razeghi, B.; Ullmann, D.; Taran, O.; and Voloshynovskiy, S.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 755-759, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DistributedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287656,\n  author = {S. Rezaeifar and M. Diephuis and B. Razeghi and D. Ullmann and O. Taran and S. Voloshynovskiy},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Distributed Semi-Private Image Classification Based on Information-Bottleneck Principle},\n  year = {2020},\n  pages = {755-759},\n  abstract = {In this paper, we propose a framework for semi-privacy-preserving image classification. It allows each user to train a model on her/his own particular data class, after which the output features are shared centrally. The model parameters are never shared. Individual users each use an auto-encoder to empirically ascertain their private data distribution. The resulting features are sufficiently discriminative between the private datasets. A central server aggregates all labeled output features together with a subset of the private data into a final classifier over all classes from all users. The latter forms a trade-off between privacy and classification performance. We demonstrate the viability of this scheme empirically and showcase the privacy performance compromise.},\n  keywords = {Privacy;Aggregates;Europe;Signal processing;Data models;Servers;Image classification;privacy;information bottleneck;image classification;semi-private model},\n  doi = {10.23919/Eusipco47968.2020.9287656},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000755.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a framework for semi-privacy-preserving image classification. It allows each user to train a model on her/his own particular data class, after which the output features are shared centrally. The model parameters are never shared. Individual users each use an auto-encoder to empirically ascertain their private data distribution. The resulting features are sufficiently discriminative between the private datasets. A central server aggregates all labeled output features together with a subset of the private data into a final classifier over all classes from all users. The latter forms a trade-off between privacy and classification performance. We demonstrate the viability of this scheme empirically and showcase the privacy performance compromise.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Sampling Algorithm for Diffusion Networks.\n \n \n \n \n\n\n \n Tiglea, D. G.; Candido, R.; and Silva, M. T. M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2175-2179, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287657,\n  author = {D. G. Tiglea and R. Candido and M. T. M. Silva},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A Sampling Algorithm for Diffusion Networks},\n  year = {2020},\n  pages = {2175-2179},\n  abstract = {In this paper, we propose a sampling mechanism for adaptive diffusion networks that adaptively changes the amount of sampled nodes based on mean-squared error in the neighborhood of each node. It presents fast convergence during transient and a significant reduction in the number of sampled nodes in steady state. Besides reducing the computational cost, the proposed mechanism can also be used as a censoring technique, thus saving energy by reducing the amount of communication between nodes. We also present a theoretical analysis to obtain lower and upper bounds for the number of network nodes sampled in steady state.},\n  keywords = {Energy consumption;Computational modeling;Signal processing algorithms;Steady-state;Computational efficiency;Transient analysis;Convergence;Diffusion strategies;energy efficiency;adaptive networks;distributed estimation;convex combination},\n  doi = {10.23919/Eusipco47968.2020.9287657},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002175.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a sampling mechanism for adaptive diffusion networks that adaptively changes the amount of sampled nodes based on mean-squared error in the neighborhood of each node. It presents fast convergence during transient and a significant reduction in the number of sampled nodes in steady state. Besides reducing the computational cost, the proposed mechanism can also be used as a censoring technique, thus saving energy by reducing the amount of communication between nodes. We also present a theoretical analysis to obtain lower and upper bounds for the number of network nodes sampled in steady state.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Acoustic Object Canceller Using Blind Compensation for Sampling Frequency Mismatch.\n \n \n \n \n\n\n \n Kawamura, T.; Ono, N.; Scheibler, R.; Wakabayashi, Y.; and Miyazaki, R.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 880-884, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AcousticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287658,\n  author = {T. Kawamura and N. Ono and R. Scheibler and Y. Wakabayashi and R. Miyazaki},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Acoustic Object Canceller Using Blind Compensation for Sampling Frequency Mismatch},\n  year = {2020},\n  pages = {880-884},\n  abstract = {In this paper, we propose a method of removing a known interference from a monaural recording. Generally, the elimination of a nonstationary interference from a monaural recording is difficult. However, if it is a known sound, such as the ringtone of a cell phone, radio and TV broadcasts, and commercially available music provided by a CD or streaming, their signals can be easily obtained. In our proposed method, we define such interference as an acoustic object. Although the sampling frequencies of the recording and the available acoustic object might be mismatched, we compensate the mismatch and remove the acoustic object from the recording by maximum likelihood estimation using the auxiliary function technique. We confirm the effectiveness of our method by experimental evaluations.},\n  keywords = {TV;Interference;Signal processing;Acoustics;Frequency estimation;Multiple signal classification;Signal to noise ratio;noise suppression;noise canceller;acoustic object;sampling frequency mismatch;auxiliary function},\n  doi = {10.23919/Eusipco47968.2020.9287658},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000880.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a method of removing a known interference from a monaural recording. Generally, the elimination of a nonstationary interference from a monaural recording is difficult. However, if it is a known sound, such as the ringtone of a cell phone, radio and TV broadcasts, and commercially available music provided by a CD or streaming, their signals can be easily obtained. In our proposed method, we define such interference as an acoustic object. Although the sampling frequencies of the recording and the available acoustic object might be mismatched, we compensate the mismatch and remove the acoustic object from the recording by maximum likelihood estimation using the auxiliary function technique. We confirm the effectiveness of our method by experimental evaluations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Universal System for Cough Detection in Domestic Acoustic Environments.\n \n \n \n \n\n\n \n Simou, N.; Stefanakis, N.; and Zervas, P.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 111-115, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287659,\n  author = {N. Simou and N. Stefanakis and P. Zervas},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A Universal System for Cough Detection in Domestic Acoustic Environments},\n  year = {2020},\n  pages = {111-115},\n  abstract = {Automated cough detection may provide valuable clinical information for monitoring a patient’s health condition. In this paper, we present a cough detection system that utilises an acoustic onset detector as a pre-processing step, aiming to detect impulsive patterns in the audio stream. In a subsequent step, discrimination of coughing events from other impulsive sounds is handled as a binary classification task. In contrast to existing works, the proposed cough discrimination models are trained and tested with heterogeneous data uploaded by different users to online audio repositories. In that way, our system achieves robust performance to a wide range of audio recording devices and to varying noise and/or reverberation conditions. Our evaluation results showed that a sensitivity in the order of 90% and a specificity in the order of 99% can be achieved in a domestic environment with the utilization of Long-Short-Term-Memory deep neural network architecture.},\n  keywords = {Performance evaluation;Sensitivity;Neural networks;Signal processing;Audio recording;Reverberation;Task analysis},\n  doi = {10.23919/Eusipco47968.2020.9287659},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000111.pdf},\n}\n\n
\n
\n\n\n
\n Automated cough detection may provide valuable clinical information for monitoring a patient’s health condition. In this paper, we present a cough detection system that utilises an acoustic onset detector as a pre-processing step, aiming to detect impulsive patterns in the audio stream. In a subsequent step, discrimination of coughing events from other impulsive sounds is handled as a binary classification task. In contrast to existing works, the proposed cough discrimination models are trained and tested with heterogeneous data uploaded by different users to online audio repositories. In that way, our system achieves robust performance to a wide range of audio recording devices and to varying noise and/or reverberation conditions. Our evaluation results showed that a sensitivity in the order of 90% and a specificity in the order of 99% can be achieved in a domestic environment with the utilization of Long-Short-Term-Memory deep neural network architecture.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Almost-Zero Duality Gaps in Model-Free Resource Allocation for Wireless Systems.\n \n \n \n \n\n\n \n Kalogerias, D. S.; Eisen, M.; Pappas, G. J.; and Ribeiro, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1727-1731, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Almost-ZeroPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287660,\n  author = {D. S. Kalogerias and M. Eisen and G. J. Pappas and A. Ribeiro},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Almost-Zero Duality Gaps in Model-Free Resource Allocation for Wireless Systems},\n  year = {2020},\n  pages = {1727-1731},\n  abstract = {We investigate optimal resource management in wireless systems, directly in the model-free setting. Starting with a generic resource allocation task formulated as a variational program with nonconvex stochastic constraints, we leverage classical results on Gaussian smoothing to formulate a finite dimensional, smoothed problem surrogate, effectively solvable in a model-free fashion, without the need of a baseline system model. Further assuming a near-universal policy parameterization, we present explicit upper and lower bounds on the gap between the optimal value of the original variational problem, and the dual optimal value of the smoothed surrogate. In fact, we show that this duality gap depends linearly on smoothing and near-universality parameters, and therefore, it can be made arbitrarily small at will. Our results effectively quantify the effects of both policy parameterization and smoothing on approximating both the value and optimal solution of the original variational program via surrogate dualization, and provide explicit near-optimality guarantees in the model-free regime. We also provide empirical illustration via indicative numerical simulations.},\n  keywords = {Wireless communication;Training;Smoothing methods;Stochastic processes;Numerical models;Resource management;Task analysis;Wireless Systems;Resource Allocation;Zeroth-order Learning;Reinforcement Learning;Lagrangian Duality},\n  doi = {10.23919/Eusipco47968.2020.9287660},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001727.pdf},\n}\n\n
\n
\n\n\n
\n We investigate optimal resource management in wireless systems, directly in the model-free setting. Starting with a generic resource allocation task formulated as a variational program with nonconvex stochastic constraints, we leverage classical results on Gaussian smoothing to formulate a finite dimensional, smoothed problem surrogate, effectively solvable in a model-free fashion, without the need of a baseline system model. Further assuming a near-universal policy parameterization, we present explicit upper and lower bounds on the gap between the optimal value of the original variational problem, and the dual optimal value of the smoothed surrogate. In fact, we show that this duality gap depends linearly on smoothing and near-universality parameters, and therefore, it can be made arbitrarily small at will. Our results effectively quantify the effects of both policy parameterization and smoothing on approximating both the value and optimal solution of the original variational program via surrogate dualization, and provide explicit near-optimality guarantees in the model-free regime. We also provide empirical illustration via indicative numerical simulations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Dynamic K-Graphs: an Algorithm for Dynamic Graph Learning and Temporal Graph Signal Clustering.\n \n \n \n \n\n\n \n Araghi, H.; Babaie-Zadeh, M.; and Achard, S.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2195-2199, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DynamicPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287661,\n  author = {H. Araghi and M. Babaie-Zadeh and S. Achard},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Dynamic K-Graphs: an Algorithm for Dynamic Graph Learning and Temporal Graph Signal Clustering},\n  year = {2020},\n  pages = {2195-2199},\n  abstract = {Graph signal processing (GSP) have found many applications in different domains. The underlying graph may not be available in all applications, and it should be learned from the data. There exist complicated data, where the graph changes over time. Hence, it is necessary to estimate the dynamic graph. In this paper, a new dynamic graph learning algorithm, called dynamic K -graphs, is proposed. This algorithm is capable of both estimating the time-varying graph and clustering the temporal graph signals. Numerical experiments demonstrate the high performance of this algorithm compared with other algorithms.},\n  keywords = {Laplace equations;Heuristic algorithms;Signal processing algorithms;Clustering algorithms;Signal processing;Programming;Dynamic programming;Dynamic K-graphs;dynamic graph learning;graph Laplacian matrix;temporal graph signal clustering;dynamic programming},\n  doi = {10.23919/Eusipco47968.2020.9287661},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002195.pdf},\n}\n\n
\n
\n\n\n
\n Graph signal processing (GSP) have found many applications in different domains. The underlying graph may not be available in all applications, and it should be learned from the data. There exist complicated data, where the graph changes over time. Hence, it is necessary to estimate the dynamic graph. In this paper, a new dynamic graph learning algorithm, called dynamic K -graphs, is proposed. This algorithm is capable of both estimating the time-varying graph and clustering the temporal graph signals. Numerical experiments demonstrate the high performance of this algorithm compared with other algorithms.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n IDrISS: Intrusion Detection for IT Systems Security : Toward a semantic modelling of side-channel signals.\n \n \n \n\n\n \n Mboula, N.; and Nogues, E.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 735-739, Aug 2020. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287662,\n  author = {N. Mboula and E. Nogues},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {IDrISS: Intrusion Detection for IT Systems Security : Toward a semantic modelling of side-channel signals},\n  year = {2020},\n  pages = {735-739},\n  abstract = {This paper proposes a novel approach called IDrISS that exploits electromagnetic (EM) side-channel signals to design non-protocol based Intrusion Detection System (IDS). EM emanations side-channels are captured on power lines of an infrastructure. They are used to identify the presence of any type of electronic devices onto a physical network. IDrISS can learn the structure of the EM unintentional emanations of the legit devices composing the infrastructure as a reference profile. In a second step, it records and analyses the current emanations to compare and detect any kind of unwanted emanations. IDrISS is used as a Intrusion Detection System (IDS) that can trig an alarm as soon as a intrusion is detected. The results show that intrusion can be detected in various scenarios whatever the activity of the legit computers of the network. Furthermore, the capture device used is based on inexpensive off-the-shelf components that makes the deployment onto real network easy.},\n  keywords = {Vocabulary;Recurrent neural networks;Intrusion detection;Syntactics;Security;Object recognition;Signal analysis;Side-channel signals;intrusion detection;dictionary learning;sparsity;recurrent neural networks},\n  doi = {10.23919/Eusipco47968.2020.9287662},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n This paper proposes a novel approach called IDrISS that exploits electromagnetic (EM) side-channel signals to design non-protocol based Intrusion Detection System (IDS). EM emanations side-channels are captured on power lines of an infrastructure. They are used to identify the presence of any type of electronic devices onto a physical network. IDrISS can learn the structure of the EM unintentional emanations of the legit devices composing the infrastructure as a reference profile. In a second step, it records and analyses the current emanations to compare and detect any kind of unwanted emanations. IDrISS is used as a Intrusion Detection System (IDS) that can trig an alarm as soon as a intrusion is detected. The results show that intrusion can be detected in various scenarios whatever the activity of the legit computers of the network. Furthermore, the capture device used is based on inexpensive off-the-shelf components that makes the deployment onto real network easy.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Generalized ANOVA Test for GNSS Spoofing Detection with a Dual-Polarized Antenna.\n \n \n \n \n\n\n \n Egea-Roca, D.; López-Salcedo, J. A.; Seco-Granados, G.; and de Wilde , W.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2428-2432, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"GeneralizedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287663,\n  author = {D. Egea-Roca and J. A. López-Salcedo and G. Seco-Granados and W. {de Wilde}},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Generalized ANOVA Test for GNSS Spoofing Detection with a Dual-Polarized Antenna},\n  year = {2020},\n  pages = {2428-2432},\n  abstract = {The analysis of variance (ANOVA) is used in many applications to decide whether the outcomes of different experiments are the same or not. Nevertheless, one of the most restrictive assumptions made by ANOVA is to suppose that all the experiments have the same variance. This may not be common in many applications as it is the case of spoofing detection in global navigation satellite systems (GNSS). Moreover, there may be cases in which the experimental data is complex with different variances for the real and imaginary components. For these reasons, in this paper, we provide a general formulation of ANOVA and we show its application to spoofing detection in GNSS. Numerical results will show the superiority of the proposed generalized ANOVA solution with respect to traditional ANOVA and state-of-the-art techniques for spoofing detection in GNSS.},\n  keywords = {Global navigation satellite system;Europe;Signal processing;Robustness;Complexity theory;Numerical models;Analysis of variance;ANOVA;spoofing;GNSS;polarization;complex data},\n  doi = {10.23919/Eusipco47968.2020.9287663},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002428.pdf},\n}\n\n
\n
\n\n\n
\n The analysis of variance (ANOVA) is used in many applications to decide whether the outcomes of different experiments are the same or not. Nevertheless, one of the most restrictive assumptions made by ANOVA is to suppose that all the experiments have the same variance. This may not be common in many applications as it is the case of spoofing detection in global navigation satellite systems (GNSS). Moreover, there may be cases in which the experimental data is complex with different variances for the real and imaginary components. For these reasons, in this paper, we provide a general formulation of ANOVA and we show its application to spoofing detection in GNSS. Numerical results will show the superiority of the proposed generalized ANOVA solution with respect to traditional ANOVA and state-of-the-art techniques for spoofing detection in GNSS.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Phase-coherent multichannel SDR - Sparse array beamforming.\n \n \n \n \n\n\n \n Laakso, M.; Rajamäki, R.; Wichman, R.; and Koivunen, V.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1856-1860, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Phase-coherentPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287664,\n  author = {M. Laakso and R. Rajamäki and R. Wichman and V. Koivunen},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Phase-coherent multichannel SDR - Sparse array beamforming},\n  year = {2020},\n  pages = {1856-1860},\n  abstract = {We introduce a modular and affordable coherent multichannel software-defined radio (SDR) receiver and demonstrate its performance by direction-of-arrival (DOA) estimation on signals collected from a 7 X 3 element uniform rectangular array antenna, comparing the results between the full and sparse arrays. Sparse sensor arrays can reach the resolution of a fully populated array with reduced number of elements, which relaxes the required structural complexity of e.g. antenna arrays. Moreover, sparse arrays facilitate significant cost reduction since fewer expensive RF-IF front ends are needed. Results from the collected data set are analyzed with Multiple Signal Classification (MUSIC) DOA estimator. Generally, the sparse array estimates agree with the full array.},\n  keywords = {Phased arrays;Direction-of-arrival estimation;Array signal processing;Estimation;Receiving antennas;Directive antennas;Sensor arrays;sparse array;coherent receiver;DOA estimation},\n  doi = {10.23919/Eusipco47968.2020.9287664},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001856.pdf},\n}\n\n
\n
\n\n\n
\n We introduce a modular and affordable coherent multichannel software-defined radio (SDR) receiver and demonstrate its performance by direction-of-arrival (DOA) estimation on signals collected from a 7 X 3 element uniform rectangular array antenna, comparing the results between the full and sparse arrays. Sparse sensor arrays can reach the resolution of a fully populated array with reduced number of elements, which relaxes the required structural complexity of e.g. antenna arrays. Moreover, sparse arrays facilitate significant cost reduction since fewer expensive RF-IF front ends are needed. Results from the collected data set are analyzed with Multiple Signal Classification (MUSIC) DOA estimator. Generally, the sparse array estimates agree with the full array.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Step Size Determination for Finding Low-Rank Solutions Via Non-Convex Bi-Factored Matrix Factorization.\n \n \n \n \n\n\n \n Panhuber, R.; and Prünte, L.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2100-2104, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"StepPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287665,\n  author = {R. Panhuber and L. Prünte},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Step Size Determination for Finding Low-Rank Solutions Via Non-Convex Bi-Factored Matrix Factorization},\n  year = {2020},\n  pages = {2100-2104},\n  abstract = {In this paper we present an exact line search approach in order to find a suitable step size for the problem of recovering a low-rank matrix from linear measurements via non-convex bi-factored matrix factorization approaches as used in the bi-factored gradient descent (BFGD) algorithm. For the specific case of using the squared Frobenius norm as convex regularizer we prove that unique solutions for the step sizes exist. The computational complexity of the proposed method has the same order of magnitude than common inexact line search approaches, however, it needs only one execution of the sensing operator whereas inexact line search methods need at least two. As such our method requires less memory space and CPU time. We illustrate the functionality of the proposed method by use of simulations.},\n  keywords = {Memory management;Signal processing algorithms;Signal processing;Size measurement;Search problems;Sensors;Computational complexity;Low-rank matrix recovery;signal processing;BFGD algorithm},\n  doi = {10.23919/Eusipco47968.2020.9287665},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002100.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we present an exact line search approach in order to find a suitable step size for the problem of recovering a low-rank matrix from linear measurements via non-convex bi-factored matrix factorization approaches as used in the bi-factored gradient descent (BFGD) algorithm. For the specific case of using the squared Frobenius norm as convex regularizer we prove that unique solutions for the step sizes exist. The computational complexity of the proposed method has the same order of magnitude than common inexact line search approaches, however, it needs only one execution of the sensing operator whereas inexact line search methods need at least two. As such our method requires less memory space and CPU time. We illustrate the functionality of the proposed method by use of simulations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Persymmetric Detection of Subspace Signals Embedded in Subspace Interference and Gaussian Noise.\n \n \n \n \n\n\n \n Liu, J.; Li, J.; and Liu, W.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1926-1930, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"PersymmetricPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287666,\n  author = {J. Liu and J. Li and W. Liu},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Persymmetric Detection of Subspace Signals Embedded in Subspace Interference and Gaussian Noise},\n  year = {2020},\n  pages = {1926-1930},\n  abstract = {We consider the problem of detecting subspace signals embedded in subspace interference and Gaussian noise with unknown covariance matrix. According to the criterion of generalized likelihood ratio test, we exploit persymmetry to propose an adaptive detector. Moreover, the statistical characterization of the proposed detector in the absence of target signals is obtained, which exhibits a constant false alarm rate property against the noise covariance matrix. Numerical examples illustrate that the proposed detector outperforms its counterparts, especially when the number of training data is small.},\n  keywords = {Gaussian noise;Training data;Detectors;Interference;Signal processing;Covariance matrices;Signal to noise ratio;Adaptive detection;persymmetry;generalized likelihood ratio test;subspace signal;interference;constant false alarm rate},\n  doi = {10.23919/Eusipco47968.2020.9287666},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001926.pdf},\n}\n\n
\n
\n\n\n
\n We consider the problem of detecting subspace signals embedded in subspace interference and Gaussian noise with unknown covariance matrix. According to the criterion of generalized likelihood ratio test, we exploit persymmetry to propose an adaptive detector. Moreover, the statistical characterization of the proposed detector in the absence of target signals is obtained, which exhibits a constant false alarm rate property against the noise covariance matrix. Numerical examples illustrate that the proposed detector outperforms its counterparts, especially when the number of training data is small.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Adieu recurrence? End-to-end speech emotion recognition using a context stacking dilated convolutional network.\n \n \n \n \n\n\n \n Tang, D.; Kuppens, P.; Geurts, L.; and van Waterschoot , T.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1-5, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AdieuPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287667,\n  author = {D. Tang and P. Kuppens and L. Geurts and T. {van Waterschoot}},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Adieu recurrence? End-to-end speech emotion recognition using a context stacking dilated convolutional network},\n  year = {2020},\n  pages = {1-5},\n  abstract = {In state-of-the-art end-to-end Speech Emotion Recognition (SER) systems, Convolutional Neural Network (CNN) layers are typically used to extract affective features while Long Short-Term Memory (LSTM) layers model long-term temporal dependencies. However, these systems suffer from several problems: 1) the model largely ignores temporal structure in speech due to the limited receptive field of the CNN layers, 2) the model inherits the drawbacks of Recurrent Neural Network (RNN)s, e.g. the gradient exploding/vanishing problem, the polynomial growth of computation time with the input sequence length and the lack of parallelizability. In this work, we propose a novel end-to-end SER structure that does not contain any recurrent or fully connected layers. By levering the power of the dilated causal convolution, the receptive field of the proposed model largely increases with reasonably low computational cost. By also using context stacking, the proposed model is capable of exploiting long-term temporal dependencies and can be an alternative to RNN. Experiments on the RECOLA database publicly available partition show improved results compare to a state-of-the-art system. We also verify that both the proposed model and the state-of-the-art model learned from short sequences (i.e.20s) can make accurate predictions for very long sequences (e.g. ≥ 75s).},\n  keywords = {Emotion recognition;Convolution;Biological system modeling;Computational modeling;Stacking;Speech recognition;Predictive models;End-to-end learning;Speech Emotion Recognition;Dilated Causal Convolution;Context Stacking},\n  doi = {10.23919/Eusipco47968.2020.9287667},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000356.pdf},\n}\n\n
\n
\n\n\n
\n In state-of-the-art end-to-end Speech Emotion Recognition (SER) systems, Convolutional Neural Network (CNN) layers are typically used to extract affective features while Long Short-Term Memory (LSTM) layers model long-term temporal dependencies. However, these systems suffer from several problems: 1) the model largely ignores temporal structure in speech due to the limited receptive field of the CNN layers, 2) the model inherits the drawbacks of Recurrent Neural Network (RNN)s, e.g. the gradient exploding/vanishing problem, the polynomial growth of computation time with the input sequence length and the lack of parallelizability. In this work, we propose a novel end-to-end SER structure that does not contain any recurrent or fully connected layers. By levering the power of the dilated causal convolution, the receptive field of the proposed model largely increases with reasonably low computational cost. By also using context stacking, the proposed model is capable of exploiting long-term temporal dependencies and can be an alternative to RNN. Experiments on the RECOLA database publicly available partition show improved results compare to a state-of-the-art system. We also verify that both the proposed model and the state-of-the-art model learned from short sequences (i.e.20s) can make accurate predictions for very long sequences (e.g. ≥ 75s).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Design of a Non-negative Neural Network to Improve on NMF.\n \n \n \n \n\n\n \n Wen-Fwu Tsai, F.; Javid, A. M.; and Chatterjee, S.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 461-465, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DesignPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287668,\n  author = {F. {Wen-Fwu Tsai} and A. M. Javid and S. Chatterjee},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Design of a Non-negative Neural Network to Improve on NMF},\n  year = {2020},\n  pages = {461-465},\n  abstract = {For prediction of a non-negative target signal using a non-negative input, we design a feed-forward neural network to achieve a better performance than a non-negative matrix factorization (NMF) algorithm. We provide a mathematical relation between the neural network and NMF. The architecture of the neural network is built on a property of rectified-linear-unit (ReLU) activation function and a convex optimization layer-wise training approach. For an illustrative example, we choose a speech enhancement application where a clean speech spectrum is estimated from a noisy spectrum.},\n  keywords = {Training;Neural networks;Training data;Signal processing algorithms;Speech enhancement;Signal processing;Testing;Neural networks;non-negative matrix factorization;speech enhancement},\n  doi = {10.23919/Eusipco47968.2020.9287668},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000461.pdf},\n}\n\n
\n
\n\n\n
\n For prediction of a non-negative target signal using a non-negative input, we design a feed-forward neural network to achieve a better performance than a non-negative matrix factorization (NMF) algorithm. We provide a mathematical relation between the neural network and NMF. The architecture of the neural network is built on a property of rectified-linear-unit (ReLU) activation function and a convex optimization layer-wise training approach. For an illustrative example, we choose a speech enhancement application where a clean speech spectrum is estimated from a noisy spectrum.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Optimized 3D Scene Rendering on Projection-Based 3D Displays.\n \n \n \n \n\n\n \n Doronin, O.; Bregovic, R.; and Gotchev, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 580-584, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"OptimizedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287669,\n  author = {O. Doronin and R. Bregovic and A. Gotchev},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Optimized 3D Scene Rendering on Projection-Based 3D Displays},\n  year = {2020},\n  pages = {580-584},\n  abstract = {We address the problem of 3D scene rendering on projection-based light field displays and optimizing the input display images to obtain the best possible visual output. We discuss a display model comprising a set of projectors, an anisotropic diffuser and a viewing manifold. Based on this model, we render an initial set of projector images to be further optimized for the best perception at a specified set of viewing positions. We propose a least squares method, which minimizes the channel-wise color difference between the generated images for different viewer positions, and their ground-true counterparts. We formulate a constrained optimization problem and solve it iteratively by the descent method.},\n  keywords = {Visualization;Three-dimensional displays;Signal processing;Rendering (computer graphics);Probability distribution;Optimization;Software development management;light field;optimization;3d display},\n  doi = {10.23919/Eusipco47968.2020.9287669},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000580.pdf},\n}\n\n
\n
\n\n\n
\n We address the problem of 3D scene rendering on projection-based light field displays and optimizing the input display images to obtain the best possible visual output. We discuss a display model comprising a set of projectors, an anisotropic diffuser and a viewing manifold. Based on this model, we render an initial set of projector images to be further optimized for the best perception at a specified set of viewing positions. We propose a least squares method, which minimizes the channel-wise color difference between the generated images for different viewer positions, and their ground-true counterparts. We formulate a constrained optimization problem and solve it iteratively by the descent method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Omnipolar EGM Voltage Mapping for Atrial Fibrosis Identification Evaluated with an Electrophysiological Model.\n \n \n \n \n\n\n \n Riccio, J.; Alcaine, A.; Rocher, S.; Laguna, P.; Saiz, J.; and Martínez, J. P.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 920-924, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"OmnipolarPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287670,\n  author = {J. Riccio and A. Alcaine and S. Rocher and P. Laguna and J. Saiz and J. P. Martínez},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Omnipolar EGM Voltage Mapping for Atrial Fibrosis Identification Evaluated with an Electrophysiological Model},\n  year = {2020},\n  pages = {920-924},\n  abstract = {Atrial fibrillation (AF) is the most spread heart arrhythmia, whose mechanisms are not completely clear yet. Catheter ablation is a standard treatment, which isolates the area involved in the arrhythmia. Intracardiac electrograms (EGMs) are used to better understand the AF mechanisms and to find appropriate ablation sites. Bipolar EGMs (b-EGMs) are often employed, but their amplitude and shape depend on catheter orientation, lim-iting reliability. To avoid this uncertainty, an approach insensitive to catheter orientation, referred as Omnipolar EGM (OP-EGM) method, has been introduced, which uses an estimation of the electric field within a group of electrodes, referred as clique. In this work, we compare different mapping approaches based on b-EGMs and OP-EGM signals in simulation including fibrosis, so to evaluate their ability to detect fibrosis and reproduce the spatial distribution of the voltage. Maps have been computed using two clique configurations (square and triangular), introducing or not a previous time alignment of the b-EGMs. OP-EGM signals have been obtained by projecting the electric field along directions of its maximal excursion and its principal components. Results show that the proposed alignment of b-EGMs improves maps based on OP-EGM signals. Both cliques configurations present good performance, in terms of fibrosis detection and correlation with the reference voltage maps.},\n  keywords = {catheters;electrocardiography;medical disorders;medical signal processing;catheter ablation;AF mechanisms;appropriate ablation sites;bipolar EGMs;b-EGMs;catheter orientation;electric field;mapping approaches;OP-EGM signals;fibrosis;introducing;reference voltage maps;Omnipolar EGM voltage mapping;atrial fibrosis identification;atrial fibrillation;spread heart arrhythmia;Heart;Uncertainty;Shape;Reliability;Electric fields;Catheters;Standards;atrial fibrillation;atrial fibrosis;electro-grams;electroanatomical mapping;sinus rhythm;multi-electrode array;omnipolar;clique},\n  doi = {10.23919/Eusipco47968.2020.9287670},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000920.pdf},\n}\n\n
\n
\n\n\n
\n Atrial fibrillation (AF) is the most spread heart arrhythmia, whose mechanisms are not completely clear yet. Catheter ablation is a standard treatment, which isolates the area involved in the arrhythmia. Intracardiac electrograms (EGMs) are used to better understand the AF mechanisms and to find appropriate ablation sites. Bipolar EGMs (b-EGMs) are often employed, but their amplitude and shape depend on catheter orientation, lim-iting reliability. To avoid this uncertainty, an approach insensitive to catheter orientation, referred as Omnipolar EGM (OP-EGM) method, has been introduced, which uses an estimation of the electric field within a group of electrodes, referred as clique. In this work, we compare different mapping approaches based on b-EGMs and OP-EGM signals in simulation including fibrosis, so to evaluate their ability to detect fibrosis and reproduce the spatial distribution of the voltage. Maps have been computed using two clique configurations (square and triangular), introducing or not a previous time alignment of the b-EGMs. OP-EGM signals have been obtained by projecting the electric field along directions of its maximal excursion and its principal components. Results show that the proposed alignment of b-EGMs improves maps based on OP-EGM signals. Both cliques configurations present good performance, in terms of fibrosis detection and correlation with the reference voltage maps.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Content-based Image Retrieval Scheme Using Compressible Encrypted Images.\n \n \n \n \n\n\n \n Iida, K.; and Kiya, H.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 730-734, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287671,\n  author = {K. Iida and H. Kiya},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A Content-based Image Retrieval Scheme Using Compressible Encrypted Images},\n  year = {2020},\n  pages = {730-734},\n  abstract = {In this paper, we propose a novel content based-image retrieval scheme using compressible encrypted images, called encryption-then-compression (EtC) images. The proposed scheme allows us not only to directly retrieve images from visually protected images, but also to make the sensitive management of secret keys unnecessary. In addition, encrypted images can be compressed by using JPEG compression. Weighted SIMPLE image descriptors, which are generated from global descriptors of localized regions, are extended, and then the extended descriptors are applied to EtC images. In an experiment, the proposed scheme is demonstrated to have almost the same accuracy as conventional retrieval methods with plain images.},\n  keywords = {Image coding;Image retrieval;Transform coding;Europe;Signal processing;Encryption;Content-based image retrieval;encryption-then-compression system;SIMPLE image descriptors},\n  doi = {10.23919/Eusipco47968.2020.9287671},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000730.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a novel content based-image retrieval scheme using compressible encrypted images, called encryption-then-compression (EtC) images. The proposed scheme allows us not only to directly retrieve images from visually protected images, but also to make the sensitive management of secret keys unnecessary. In addition, encrypted images can be compressed by using JPEG compression. Weighted SIMPLE image descriptors, which are generated from global descriptors of localized regions, are extended, and then the extended descriptors are applied to EtC images. In an experiment, the proposed scheme is demonstrated to have almost the same accuracy as conventional retrieval methods with plain images.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Weighted Generalization of Dark Channel Prior with Adaptive Color Correction for Defogging.\n \n \n \n \n\n\n \n Ueki, Y.; and Ikehara, M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 685-689, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"WeightedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287672,\n  author = {Y. Ueki and M. Ikehara},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Weighted Generalization of Dark Channel Prior with Adaptive Color Correction for Defogging},\n  year = {2020},\n  pages = {685-689},\n  abstract = {Images and video captured in water or fog suffer from low contrast and color distortion due to light scattering and absorption. An image formation model for hazy images is commonly used to restore both underwater images and hazy images because of the similarity between the two types of images. However, red light is attenuated faster than blue and green light in underwater, and underwater images are distorted by changes of color tone. Therefore, most current methods are specialized for either hazy images or underwater images. In this paper, we propose a novel defogging method which is efficient for both hazy images and underwater images. Our method is composed of adaptive color correction and weighted generalization of dark channel prior (WGDCP). Experimental results show that our algorithm can recover both underwater images and hazy images.},\n  keywords = {Image color analysis;Signal processing algorithms;Light scattering;Europe;Estimation;Distortion;Image restoration;Image processing;image enhancement;image restoration;underwater image;dehazing;defogging},\n  doi = {10.23919/Eusipco47968.2020.9287672},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000685.pdf},\n}\n\n
\n
\n\n\n
\n Images and video captured in water or fog suffer from low contrast and color distortion due to light scattering and absorption. An image formation model for hazy images is commonly used to restore both underwater images and hazy images because of the similarity between the two types of images. However, red light is attenuated faster than blue and green light in underwater, and underwater images are distorted by changes of color tone. Therefore, most current methods are specialized for either hazy images or underwater images. In this paper, we propose a novel defogging method which is efficient for both hazy images and underwater images. Our method is composed of adaptive color correction and weighted generalization of dark channel prior (WGDCP). Experimental results show that our algorithm can recover both underwater images and hazy images.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Super-Resolution Time-of-Arrival Estimation using Neural Networks.\n \n \n \n \n\n\n \n Hsiao, Y. -.; Yang, M.; and Kim, H. -.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1692-1696, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Super-ResolutionPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287673,\n  author = {Y. -S. Hsiao and M. Yang and H. -S. Kim},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Super-Resolution Time-of-Arrival Estimation using Neural Networks},\n  year = {2020},\n  pages = {1692-1696},\n  abstract = {This paper presents a learning-based algorithm that estimates the time of arrival (ToA) of radio frequency (RF) signals from channel frequency response (CFR) measurements for wireless localization applications. A generator neural network is proposed to enhance the effective bandwidth of the narrowband CFR measurement and to produce a high-resolution estimation of channel impulse response (CIR). In addition, two regressor neural networks are introduced to perform a two-step coarsefine ToA estimation based on the enhanced CIR. For simulated channels, the proposed method achieves 9% – 58% improved root mean squared error (RMSE) for distance ranging and up to 22% improved false detection rate compared with conventional super-resolution algorithms. For real-world measured channels, the proposed method exhibits an improvement of 1.3m in distance error at 90 percentile.},\n  keywords = {Wireless communication;Time of arrival estimation;Signal processing algorithms;Channel estimation;Estimation;Training data;Generators;time-of-arrival (ToA) estimation;superresolution;neural networks (NN);deep learning},\n  doi = {10.23919/Eusipco47968.2020.9287673},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001692.pdf},\n}\n\n
\n
\n\n\n
\n This paper presents a learning-based algorithm that estimates the time of arrival (ToA) of radio frequency (RF) signals from channel frequency response (CFR) measurements for wireless localization applications. A generator neural network is proposed to enhance the effective bandwidth of the narrowband CFR measurement and to produce a high-resolution estimation of channel impulse response (CIR). In addition, two regressor neural networks are introduced to perform a two-step coarsefine ToA estimation based on the enhanced CIR. For simulated channels, the proposed method achieves 9% – 58% improved root mean squared error (RMSE) for distance ranging and up to 22% improved false detection rate compared with conventional super-resolution algorithms. For real-world measured channels, the proposed method exhibits an improvement of 1.3m in distance error at 90 percentile.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automatic Differentiating Wave Digital Filters with Multiple Nonlinearities.\n \n \n \n \n\n\n \n Kolonko, L.; Velten, J.; and Kummert, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 146-150, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AutomaticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287674,\n  author = {L. Kolonko and J. Velten and A. Kummert},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Automatic Differentiating Wave Digital Filters with Multiple Nonlinearities},\n  year = {2020},\n  pages = {146-150},\n  abstract = {In recent years, there has been an increasing interest in real-time Virtual Analog modeling algorithms for musical audio signal processing as Wave Digital (WD) realizations, where nonlinear circuit components are identified as the source of desirable sonic characteristics. The implementation of nonlinear elements in Wave Digital Filters (WDFs) is usually restricted to just one nonlinear one-port per structure. However, recent advances led to a strictly modular realization of multiple nonlinearities by identification of contractivity properties of WDFs, allowing an assured converging iterative procedure, which depends on the presence of artificial port resistances. In this paper, an enhanced semi-modular approach based on Multi-dimensional WDFs (MDWDFs) is presented for the realization of multiple nonlinearities. Therefore, the concept of Automatic Differentiating WDFs (ADWDFs) is extended for nonlinearities as a novel approach to overcome convergence speed limitations induced by said port resistances, making the proposed method virtually independent of the same.},\n  keywords = {Signal processing algorithms;Music;Signal processing;Real-time systems;Nonlinear circuits;Digital filters;Convergence;Wave Digital Filter;Automatic Differentiation;Multi-Dimensional;Multiple Nonlinearities;Diode Clipper},\n  doi = {10.23919/Eusipco47968.2020.9287674},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000146.pdf},\n}\n\n
\n
\n\n\n
\n In recent years, there has been an increasing interest in real-time Virtual Analog modeling algorithms for musical audio signal processing as Wave Digital (WD) realizations, where nonlinear circuit components are identified as the source of desirable sonic characteristics. The implementation of nonlinear elements in Wave Digital Filters (WDFs) is usually restricted to just one nonlinear one-port per structure. However, recent advances led to a strictly modular realization of multiple nonlinearities by identification of contractivity properties of WDFs, allowing an assured converging iterative procedure, which depends on the presence of artificial port resistances. In this paper, an enhanced semi-modular approach based on Multi-dimensional WDFs (MDWDFs) is presented for the realization of multiple nonlinearities. Therefore, the concept of Automatic Differentiating WDFs (ADWDFs) is extended for nonlinearities as a novel approach to overcome convergence speed limitations induced by said port resistances, making the proposed method virtually independent of the same.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n GAN-based Hyperspectral Anomaly Detection.\n \n \n \n \n\n\n \n Arisoy, S.; Nasrabadi, N. M.; and Kayabol, K.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1891-1895, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"GAN-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287675,\n  author = {S. Arisoy and N. M. Nasrabadi and K. Kayabol},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {GAN-based Hyperspectral Anomaly Detection},\n  year = {2020},\n  pages = {1891-1895},\n  abstract = {In this paper, we propose a generative adversarial network (GAN)-based hyperspectral anomaly detection algorithm. In the proposed algorithm, we train a GAN model to generate a synthetic background image which is close to the original background image as much as possible. By subtracting the synthetic image from the original one, we are able to remove the background from the hyperspectral image. Anomaly detection is performed by applying Reed-Xiaoli (RX) anomaly detector (AD) on the spectral difference image. In the experimental part, we compare our proposed method with the classical RX, Weighted-RX (WRX) and support vector data description (SVDD)-based anomaly detectors and deep autoencoder anomaly detection (DAEAD) method on synthetic and real hyperspectral images. The detection results show that our proposed algorithm outperforms the other methods in the benchmark.},\n  keywords = {Support vector machines;Signal processing algorithms;Detectors;Generative adversarial networks;Gallium nitride;Anomaly detection;Hyperspectral imaging;anomaly detection;hyperspectral imagery (HSI);generative adversarial networks (GANs);Reed-Xiaoli (RX)},\n  doi = {10.23919/Eusipco47968.2020.9287675},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001891.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a generative adversarial network (GAN)-based hyperspectral anomaly detection algorithm. In the proposed algorithm, we train a GAN model to generate a synthetic background image which is close to the original background image as much as possible. By subtracting the synthetic image from the original one, we are able to remove the background from the hyperspectral image. Anomaly detection is performed by applying Reed-Xiaoli (RX) anomaly detector (AD) on the spectral difference image. In the experimental part, we compare our proposed method with the classical RX, Weighted-RX (WRX) and support vector data description (SVDD)-based anomaly detectors and deep autoencoder anomaly detection (DAEAD) method on synthetic and real hyperspectral images. The detection results show that our proposed algorithm outperforms the other methods in the benchmark.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Diagnosis of attention deficit and hyperactivity disorder (ADHD) using Hidden Markov Models.\n \n \n \n \n\n\n \n Maya-Piedrahita, M. C.; Cárdenas-Peña, D.; and Orozco-Gutierrez, A. A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1205-1209, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287676,\n  author = {M. C. Maya-Piedrahita and D. Cárdenas-Peña and A. A. Orozco-Gutierrez},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Diagnosis of attention deficit and hyperactivity disorder (ADHD) using Hidden Markov Models},\n  year = {2020},\n  pages = {1205-1209},\n  abstract = {Attention deficit hyperactivity disorder (ADHD), most often present in childhood, may persist in adult life, hampering personal development. However, ADHD diagnosis is a real challenge since it highly depends on the clinical observation of the patient, the parental and scholar information, and the specialist expertise. Despite demanded objective diagnosis aids from biosignals, the physiological biomarkers lack robustness and significance under the non-stationary and non-linear electroencephalographic dynamics. Therefore, this work presents a supported diagnosis methodology for ADHD from the dynamic characterization of EEG based on hidden Markov models (HMM) and probability product kernels (PPK). Relying on the impulsivity symptom, the proposed approach trains an HMM for each subject from EEG signals at failing rewarded inhibition tasks. Then, PPK measures the similarity between subjects through the inner product between their trained HMMs. Therefore, a support vector machine supports ADHD diagnosis as a classification task using PPK as the inner product operator. Results in a real EEG dataset evidence that the proposed approach achieves an 90.0% accuracy rate, outperforming log-likelihood features as baseline HMM-based features. Besides, achieving such an accuracy at the highest reward level supports that ADHD patients seem to be particularly sensitive to the reward presence when they execute specific tasks.},\n  keywords = {electroencephalography;feature extraction;hidden Markov models;learning (artificial intelligence);medical disorders;medical signal processing;neurophysiology;probability;signal classification;support vector machines;hidden Markov models;hyperactivity disorder;adult life;ADHD diagnosis;clinical observation;nonlinear electroencephalographic dynamics;diagnosis methodology;dynamic characterization;probability product kernels;PPK;support vector machine;inner product operator;baseline HMM-based features;ADHD patients;EEG dataset;Support vector machines;Hidden Markov models;Signal processing;Electroencephalography;Robustness;Physiology;Task analysis;Attention deficit and hyperactiviy disorder;Hidden Markov models;Electroencephalography;Time-Series similarity},\n  doi = {10.23919/Eusipco47968.2020.9287676},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001205.pdf},\n}\n\n
\n
\n\n\n
\n Attention deficit hyperactivity disorder (ADHD), most often present in childhood, may persist in adult life, hampering personal development. However, ADHD diagnosis is a real challenge since it highly depends on the clinical observation of the patient, the parental and scholar information, and the specialist expertise. Despite demanded objective diagnosis aids from biosignals, the physiological biomarkers lack robustness and significance under the non-stationary and non-linear electroencephalographic dynamics. Therefore, this work presents a supported diagnosis methodology for ADHD from the dynamic characterization of EEG based on hidden Markov models (HMM) and probability product kernels (PPK). Relying on the impulsivity symptom, the proposed approach trains an HMM for each subject from EEG signals at failing rewarded inhibition tasks. Then, PPK measures the similarity between subjects through the inner product between their trained HMMs. Therefore, a support vector machine supports ADHD diagnosis as a classification task using PPK as the inner product operator. Results in a real EEG dataset evidence that the proposed approach achieves an 90.0% accuracy rate, outperforming log-likelihood features as baseline HMM-based features. Besides, achieving such an accuracy at the highest reward level supports that ADHD patients seem to be particularly sensitive to the reward presence when they execute specific tasks.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n 3D Audiovisual Speaker Tracking with Distributed Sensors Configuration.\n \n \n \n \n\n\n \n Sanabria-Macias, F.; Marron-Romera, M.; and Macias-Guarasa, J.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 256-260, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"3DPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287677,\n  author = {F. Sanabria-Macias and M. Marron-Romera and J. Macias-Guarasa},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {3D Audiovisual Speaker Tracking with Distributed Sensors Configuration},\n  year = {2020},\n  pages = {256-260},\n  abstract = {Smart spaces are environments equipped with a set of sensors with the main objective of understanding humans’ behavior within them, their interactions and to improve human-machine interfaces. Audiovisual tracking is used to know people’s position in the environment and if they are talking, through the use of cameras and microphones. In this work we present an audiovisual tracking solution with a single camera and microphone array in a distributed configuration. Our idea is to exploit the estimation of azimuth and elevation from audio information to be fused with the position estimation obtained from a Viola and Jones based observation model. The fact that the microphone array is not co-located with the camera will allow to reduce the distance estimation uncertainty from the video model and improve tracking accuracy. The system was evaluated on the AV16.3 database on single speaker sequences, outperforming results of state-of-the-art, under these conditions.},\n  keywords = {Uncertainty;Three-dimensional displays;Estimation;Cameras;Microphone arrays;Intelligent sensors;Smart spaces;Smart Space;Audiovisual Tracking;Particle Filter},\n  doi = {10.23919/Eusipco47968.2020.9287677},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000256.pdf},\n}\n\n
\n
\n\n\n
\n Smart spaces are environments equipped with a set of sensors with the main objective of understanding humans’ behavior within them, their interactions and to improve human-machine interfaces. Audiovisual tracking is used to know people’s position in the environment and if they are talking, through the use of cameras and microphones. In this work we present an audiovisual tracking solution with a single camera and microphone array in a distributed configuration. Our idea is to exploit the estimation of azimuth and elevation from audio information to be fused with the position estimation obtained from a Viola and Jones based observation model. The fact that the microphone array is not co-located with the camera will allow to reduce the distance estimation uncertainty from the video model and improve tracking accuracy. The system was evaluated on the AV16.3 database on single speaker sequences, outperforming results of state-of-the-art, under these conditions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Studying the Gaze Patterns of Expert Radiologists in Screening Mammography: A Case Study with Breast Test Wales.\n \n \n \n \n\n\n \n Lévêque, L.; Young, P.; and Liu, H.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1249-1253, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"StudyingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287678,\n  author = {L. Lévêque and P. Young and H. Liu},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Studying the Gaze Patterns of Expert Radiologists in Screening Mammography: A Case Study with Breast Test Wales},\n  year = {2020},\n  pages = {1249-1253},\n  abstract = {Eye-tracking technology has become a widely used means to understand how radiologists perceive and interpret medical images, providing useful information that can help improve diagnostic accuracy. However, existing eye-tracking studies in medical imaging remain limited due to the small number of stimuli and/or of subjects involved, and the lack of quantitative metrics to fully reveal readers' gaze behaviour. In this paper, we present the conduct of a larger scale eye-tracking study, where seven expert radiologists were asked to read 196 mammogram images. Furthermore, we carry out an analyse various gaze metrics including fixation duration, saccade amplitude, as well as gaze deployment, which quantify radiologists' gaze behaviour.},\n  keywords = {biological organs;gaze tracking;mammography;medical image processing;medical imaging;quantitative metrics;mammogram images;gaze patterns;screening mammography;breast test wales;eye-tracking technology;scale eye-tracking;saccade amplitude;Measurement;Statistical analysis;Signal processing algorithms;Breast;Signal processing;Mammography;Medical diagnostic imaging;medical imaging;screening mammography;visual attention;eye-tracking;saliency},\n  doi = {10.23919/Eusipco47968.2020.9287678},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001249.pdf},\n}\n\n
\n
\n\n\n
\n Eye-tracking technology has become a widely used means to understand how radiologists perceive and interpret medical images, providing useful information that can help improve diagnostic accuracy. However, existing eye-tracking studies in medical imaging remain limited due to the small number of stimuli and/or of subjects involved, and the lack of quantitative metrics to fully reveal readers' gaze behaviour. In this paper, we present the conduct of a larger scale eye-tracking study, where seven expert radiologists were asked to read 196 mammogram images. Furthermore, we carry out an analyse various gaze metrics including fixation duration, saccade amplitude, as well as gaze deployment, which quantify radiologists' gaze behaviour.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Adaptive Measurement Matrix Design in Compressed Sensing Based Direction of Arrival Estimation.\n \n \n \n \n\n\n \n Kılıç, B.; Güngör, A.; Kalfa, M.; and Arıkan, O.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1881-1885, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AdaptivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287679,\n  author = {B. Kılıç and A. Güngör and M. Kalfa and O. Arıkan},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Adaptive Measurement Matrix Design in Compressed Sensing Based Direction of Arrival Estimation},\n  year = {2020},\n  pages = {1881-1885},\n  abstract = {Design of measurement matrices is an important aspect of compressed sensing (CS) based direction of arrival (DoA) applications that enables reduction in the analog channels to be processed in sparse target environments. Here, a novel measurement matrix design methodology for CS based DoA estimation is proposed and its superior performance over alternative measurement matrix design methodologies is demonstrated. The proposed method uses prior probability distribution of the targets to improve performance. Compared to the state-of-the-art techniques, it is quantitatively demonstrated that the proposed measurement matrix design approach enables significant reduction in the number of analog channels to be processed and adapts to a priori information on the target scene.},\n  keywords = {Direction-of-arrival estimation;Target tracking;Design methodology;Estimation;Directive antennas;Hardware;Sparse matrices;Direction of arrival estimation;compressed sensing;measurement matrix design},\n  doi = {10.23919/Eusipco47968.2020.9287679},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001881.pdf},\n}\n\n
\n
\n\n\n
\n Design of measurement matrices is an important aspect of compressed sensing (CS) based direction of arrival (DoA) applications that enables reduction in the analog channels to be processed in sparse target environments. Here, a novel measurement matrix design methodology for CS based DoA estimation is proposed and its superior performance over alternative measurement matrix design methodologies is demonstrated. The proposed method uses prior probability distribution of the targets to improve performance. Compared to the state-of-the-art techniques, it is quantitatively demonstrated that the proposed measurement matrix design approach enables significant reduction in the number of analog channels to be processed and adapts to a priori information on the target scene.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Low-Complexity Gridless 2D Harmonic Retrieval via Decoupled-ANM Covariance Reconstruction.\n \n \n \n \n\n\n \n Zhang, Y.; Wang, Y.; Tian, Z.; Leus, G.; and Zhang, G.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1876-1880, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Low-ComplexityPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287680,\n  author = {Y. Zhang and Y. Wang and Z. Tian and G. Leus and G. Zhang},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Low-Complexity Gridless 2D Harmonic Retrieval via Decoupled-ANM Covariance Reconstruction},\n  year = {2020},\n  pages = {1876-1880},\n  abstract = {This paper aims at developing low-complexity solutions for super-resolution two-dimensional (2D) harmonic retrieval via covariance reconstruction. Given the collected sample covariance, a novel gridless compressed sensing approach is designed based on the atomic norm minimization (ANM) technique. The key is to perform a redundancy reduction (RR) transformation that effectively reduces the large problem size at hand, without loss of useful frequency information. For uncorrelated sources, the transformed 2D covariance matrices in the RR domain retain a salient structure, which permits a sparse representation over a matrix-form atom set with decoupled 1D frequency components. Accordingly, the decoupled ANM (DANM) framework can be applied for super-resolution 2D frequency estimation, at low computational complexity on the same order of the 1D case. An analysis of the complexity reduction of the proposed RR-D-ANM compared with benchmark methods is provided as well, which is verified by our simulation results.},\n  keywords = {Simulation;Two dimensional displays;Harmonic analysis;Sparse matrices;Covariance matrices;Computational complexity;Compressed sensing;Low complexity;2D harmonic retrieval;covariance reconstruction;D-ANM;RR transformation},\n  doi = {10.23919/Eusipco47968.2020.9287680},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001876.pdf},\n}\n\n
\n
\n\n\n
\n This paper aims at developing low-complexity solutions for super-resolution two-dimensional (2D) harmonic retrieval via covariance reconstruction. Given the collected sample covariance, a novel gridless compressed sensing approach is designed based on the atomic norm minimization (ANM) technique. The key is to perform a redundancy reduction (RR) transformation that effectively reduces the large problem size at hand, without loss of useful frequency information. For uncorrelated sources, the transformed 2D covariance matrices in the RR domain retain a salient structure, which permits a sparse representation over a matrix-form atom set with decoupled 1D frequency components. Accordingly, the decoupled ANM (DANM) framework can be applied for super-resolution 2D frequency estimation, at low computational complexity on the same order of the 1D case. An analysis of the complexity reduction of the proposed RR-D-ANM compared with benchmark methods is provided as well, which is verified by our simulation results.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Identification of Spatiotemporal Dispersion Electrograms in Persistent Atrial Fibrillation Ablation Using Maximal Voltage Absolute Values.\n \n \n \n \n\n\n \n Ghrissi, A.; Squara, F.; Montagnat, J.; and Zarzoso, V.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1239-1243, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"IdentificationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287681,\n  author = {A. Ghrissi and F. Squara and J. Montagnat and V. Zarzoso},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Identification of Spatiotemporal Dispersion Electrograms in Persistent Atrial Fibrillation Ablation Using Maximal Voltage Absolute Values},\n  year = {2020},\n  pages = {1239-1243},\n  abstract = {Atrial fibrillation (AF) is a sustained arrhythmia whose mechanisms are still largely unknown. A recent patient-tailored AF ablation therapy is based on the use of a multipolar mapping catheter called PentaRay. This new protocol targets areas of spatiotemporal dispersion (STD) in the atria as potential AF drivers. However, interventional cardiologists localize STD sites visually through the observation of intracardiac electrograms (EGMs). The present work aims to automatically characterize ablation sites in STD-based ablation. Recent research suggests that the distribution of the time series of maximal voltage absolute values at any of the PentaRay bipoles (VAVp) is affected by the STD pattern. Motivated by this finding, we consider VAVp as a key feature for STD identification. To our knowledge, this work applies for the first time statistical analysis and machine learning (ML) tools to automatically identify STD areas based on VAVp time series. Experiments are first conducted on synthetic data to quantify the effect of STD pattern characteristics (number of delayed leads, fractionation degree and number of fractionated leads) on engineered features of the VAVp time series like kurtosis, showing promising results. Then these features are tested on a real dataset of 23082 multichannel EGM signals from 16 different persistent AF patients. Statistical features like kurtosis and distribution (histogram) of VAVp values are extracted and fed to supervised ML classifiers, but no significant dissimilarity is obtained between the two categories. The classification of raw VAVp time series is finally conducted using ML tools like a shallow convolutional neural network combined with cross validation and data augmentation, reaching AUC values of 96%.},\n  keywords = {blood vessels;catheters;convolutional neural nets;electrocardiography;learning (artificial intelligence);medical disorders;medical signal processing;spatiotemporal phenomena;statistical analysis;time series;spatiotemporal dispersion electrograms;persistent atrial fibrillation ablation;sustained arrhythmia;multipolar mapping catheter;potential AF drivers;interventional cardiologists;STD sites;intracardiac electrograms;ablation sites;STD-based ablation;PentaRay bipoles;STD identification;time statistical analysis;machine learning;STD pattern characteristics;fractionated leads;engineered features;persistent AF patients;statistical features;VAVp values;raw VAVp time series;multichannel EGM signals;patient-tailored AF;shallow convolutional neural network;Statistical analysis;Time series analysis;Atrial fibrillation;Tools;Feature extraction;Spatiotemporal phenomena;Dispersion;persistent atrial fibrillation;spatiotemporal dispersion;ablation;PentaRay multipolar catheter;maximal voltage absolute values;machine learning;classification;clustering},\n  doi = {10.23919/Eusipco47968.2020.9287681},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001239.pdf},\n}\n\n
\n
\n\n\n
\n Atrial fibrillation (AF) is a sustained arrhythmia whose mechanisms are still largely unknown. A recent patient-tailored AF ablation therapy is based on the use of a multipolar mapping catheter called PentaRay. This new protocol targets areas of spatiotemporal dispersion (STD) in the atria as potential AF drivers. However, interventional cardiologists localize STD sites visually through the observation of intracardiac electrograms (EGMs). The present work aims to automatically characterize ablation sites in STD-based ablation. Recent research suggests that the distribution of the time series of maximal voltage absolute values at any of the PentaRay bipoles (VAVp) is affected by the STD pattern. Motivated by this finding, we consider VAVp as a key feature for STD identification. To our knowledge, this work applies for the first time statistical analysis and machine learning (ML) tools to automatically identify STD areas based on VAVp time series. Experiments are first conducted on synthetic data to quantify the effect of STD pattern characteristics (number of delayed leads, fractionation degree and number of fractionated leads) on engineered features of the VAVp time series like kurtosis, showing promising results. Then these features are tested on a real dataset of 23082 multichannel EGM signals from 16 different persistent AF patients. Statistical features like kurtosis and distribution (histogram) of VAVp values are extracted and fed to supervised ML classifiers, but no significant dissimilarity is obtained between the two categories. The classification of raw VAVp time series is finally conducted using ML tools like a shallow convolutional neural network combined with cross validation and data augmentation, reaching AUC values of 96%.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An Efficient Online Estimation Algorithm for Evolving Quantum States.\n \n \n \n \n\n\n \n Zhang, K.; Cong, S.; Tang, Y.; and Freris, N. M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2249-2253, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287682,\n  author = {K. Zhang and S. Cong and Y. Tang and N. M. Freris},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {An Efficient Online Estimation Algorithm for Evolving Quantum States},\n  year = {2020},\n  pages = {2249-2253},\n  abstract = {In this paper, we propose an online optimization algorithm for estimating state density in free evolution quantum systems from noisy continuous weak measurements. The problem is formulated via sparsity-promoting semidefinite programming, and an online quantum state estimation algorithm is developed based on online proximal gradient and alternating direction multiplier method. The proposed algorithm is computationally efficient and further features high robustness to measurement noise. The merits of the approach are illustrated by numerical experiments in 1-, 2-, 3-, and 4-qubit systems.},\n  keywords = {estimation theory;gradient methods;quantum computing;quantum entanglement;state estimation;online estimation algorithm;evolving quantum states;online optimization algorithm;state density;free evolution quantum systems;noisy continuous weak measurements;sparsity-promoting semidefinite programming;online quantum state estimation algorithm;online proximal gradient;alternating direction multiplier method;4-qubit systems;3-qubit systems;2-qubit systems;1-qubit systems;measurement noise;Signal processing algorithms;Signal processing;Robustness;Real-time systems;Noise measurement;State estimation;Optimization},\n  doi = {10.23919/Eusipco47968.2020.9287682},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002249.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose an online optimization algorithm for estimating state density in free evolution quantum systems from noisy continuous weak measurements. The problem is formulated via sparsity-promoting semidefinite programming, and an online quantum state estimation algorithm is developed based on online proximal gradient and alternating direction multiplier method. The proposed algorithm is computationally efficient and further features high robustness to measurement noise. The merits of the approach are illustrated by numerical experiments in 1-, 2-, 3-, and 4-qubit systems.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Motion sensor data anonymization by time-frequency filtering.\n \n \n \n \n\n\n \n Debs, N.; Jourdan, T.; Moukadem, A.; Boutet, A.; and Frindel, C.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1707-1711, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"MotionPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287683,\n  author = {N. Debs and T. Jourdan and A. Moukadem and A. Boutet and C. Frindel},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Motion sensor data anonymization by time-frequency filtering},\n  year = {2020},\n  pages = {1707-1711},\n  abstract = {Recent advances in wireless actimetry sensors allow recognizing human real-time activities with mobile devices. Although the analysis of data generated by these devices can have many benefits for healthcare, these data also contains private information about users without their awareness and may even cause their re-identification. In this paper, we propose a privacy-preserving framework for activity recognition. The method consists of a two-step process. First, acceleration signals are encoded in the time-frequency domain by three different linear transforms. Second, we propose a method to anonymize the acceleration signals by filtering in the time-frequency domain. Finally, we evaluate our approach for the three different linear transforms with a neural network classifier by comparing the performances for activity versus identity recognition. We extensively study the validity of our framework with a reference dataset: results show an accurate activity recognition (85%) while limiting the re-identifation rate (32%). This represents a large utility improvement (19%) against a slight privacy decrease (10%) compared to state-of-the-art baseline.},\n  keywords = {Time-frequency analysis;Wireless sensor networks;Filtering;Neural networks;Activity recognition;Motion detection;Acceleration;Activity Recognition;Privacy;Time-Frequency;Classification;Convolutional Neural Networks},\n  doi = {10.23919/Eusipco47968.2020.9287683},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001707.pdf},\n}\n\n
\n
\n\n\n
\n Recent advances in wireless actimetry sensors allow recognizing human real-time activities with mobile devices. Although the analysis of data generated by these devices can have many benefits for healthcare, these data also contains private information about users without their awareness and may even cause their re-identification. In this paper, we propose a privacy-preserving framework for activity recognition. The method consists of a two-step process. First, acceleration signals are encoded in the time-frequency domain by three different linear transforms. Second, we propose a method to anonymize the acceleration signals by filtering in the time-frequency domain. Finally, we evaluate our approach for the three different linear transforms with a neural network classifier by comparing the performances for activity versus identity recognition. We extensively study the validity of our framework with a reference dataset: results show an accurate activity recognition (85%) while limiting the re-identifation rate (32%). This represents a large utility improvement (19%) against a slight privacy decrease (10%) compared to state-of-the-art baseline.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Flexible parametric implantation of voicing in whispered speech under scarce training data.\n \n \n \n \n\n\n \n Silva, J.; Oliveira, M.; and Ferreira, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 416-420, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"FlexiblePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287684,\n  author = {J. Silva and M. Oliveira and A. Ferreira},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Flexible parametric implantation of voicing in whispered speech under scarce training data},\n  year = {2020},\n  pages = {416-420},\n  abstract = {Whispered-voice to normal-voice conversion is typically achieved using codec-based analysis and re-synthesis, using statistical conversion of important spectral and prosodic features, or using data-driven end-to-end signal conversion. These approaches are however highly constrained by the architecture of the codec, the statistical projection, or the size and quality of the training data. In this paper, we presume direct implantation of voiced phonemes in whispered speech and we focus on fully flexible parametric models that i) can be independently controlled, ii) synthesize natural and linguistically correct voiced phonemes, iii) preserve idiosyncratic characteristics of a given speaker, and iv) are amenable to co-articulation effects through simple model interpolation. We use natural spoken and sung vowels to illustrate these capabilities in a signal modeling and re-synthesis process where spectral magnitude, phase structure, F0 contour and sound morphing can be independently controlled in arbitrary ways.},\n  keywords = {Training data;Process control;Signal processing;Linguistics;Real-time systems;Parametric statistics;Speech processing;whispers;voice conversion;parametric models},\n  doi = {10.23919/Eusipco47968.2020.9287684},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000416.pdf},\n}\n\n
\n
\n\n\n
\n Whispered-voice to normal-voice conversion is typically achieved using codec-based analysis and re-synthesis, using statistical conversion of important spectral and prosodic features, or using data-driven end-to-end signal conversion. These approaches are however highly constrained by the architecture of the codec, the statistical projection, or the size and quality of the training data. In this paper, we presume direct implantation of voiced phonemes in whispered speech and we focus on fully flexible parametric models that i) can be independently controlled, ii) synthesize natural and linguistically correct voiced phonemes, iii) preserve idiosyncratic characteristics of a given speaker, and iv) are amenable to co-articulation effects through simple model interpolation. We use natural spoken and sung vowels to illustrate these capabilities in a signal modeling and re-synthesis process where spectral magnitude, phase structure, F0 contour and sound morphing can be independently controlled in arbitrary ways.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Graph Learning and Augmentation Based Interpolation of Signal Strength for Location-Aware Communications.\n \n \n \n \n\n\n \n Chiu, H. -.; Fung, C. C.; and Ortega, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2150-2154, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"GraphPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287685,\n  author = {H. -M. Chiu and C. C. Fung and A. Ortega},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Graph Learning and Augmentation Based Interpolation of Signal Strength for Location-Aware Communications},\n  year = {2020},\n  pages = {2150-2154},\n  abstract = {A graph learning and augmentation (GLA) technique is proposed herein to solve the received signal power interpolation problem, which is important for preemptive resource allocation in location-aware communications. A graph parameterization results in the proposed GLA interpolator having superior mean-squared error performance and lower computational complexity than the traditional Gaussian process method. Simulation results and analytical complexity analysis are used to prove the efficacy of the GLA interpolator.},\n  keywords = {Interpolation;Simulation;Signal processing algorithms;Europe;Gaussian processes;Signal processing;Resource management;Graph learning;graph augmentation;interpolation;location-aware;resource allocation},\n  doi = {10.23919/Eusipco47968.2020.9287685},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002150.pdf},\n}\n\n
\n
\n\n\n
\n A graph learning and augmentation (GLA) technique is proposed herein to solve the received signal power interpolation problem, which is important for preemptive resource allocation in location-aware communications. A graph parameterization results in the proposed GLA interpolator having superior mean-squared error performance and lower computational complexity than the traditional Gaussian process method. Simulation results and analytical complexity analysis are used to prove the efficacy of the GLA interpolator.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n H-V Shadow Detection Based on Electromagnetism-Like Optimization.\n \n \n \n \n\n\n \n Koutsiou, D. -. C.; Savelonas, M.; and Iakovidis, D. K.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 635-639, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"H-VPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287686,\n  author = {D. -C. C. Koutsiou and M. Savelonas and D. K. Iakovidis},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {H-V Shadow Detection Based on Electromagnetism-Like Optimization},\n  year = {2020},\n  pages = {635-639},\n  abstract = {Shadow detection is useful in a variety of image analysis applications, as it can improve scene understanding. Most of the recent shadow detection approaches use near-infrared (NIR) cameras and deep learning to provide enhanced segmentation of the shadow areas in images. In this paper a novel shadow detection method is proposed, exploiting the perceptual color representation of the HSV color space and a physics-inspired optimization algorithm for image segmentation. The comparative advantage of this method over the state-of-the-art ones is that its performance is comparable without requiring any special equipment, such as NIR cameras, while it is simpler. Quantitative and qualitative experiments on publicly available datasets in comparison with three state-of-the-art methods, validate its effectiveness.},\n  keywords = {Deep learning;Image segmentation;Image color analysis;Signal processing algorithms;Signal processing;Cameras;Optimization;Shadow Detection;Segmentation;Electromagnetism-Like Optimization;HSV},\n  doi = {10.23919/Eusipco47968.2020.9287686},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000635.pdf},\n}\n\n
\n
\n\n\n
\n Shadow detection is useful in a variety of image analysis applications, as it can improve scene understanding. Most of the recent shadow detection approaches use near-infrared (NIR) cameras and deep learning to provide enhanced segmentation of the shadow areas in images. In this paper a novel shadow detection method is proposed, exploiting the perceptual color representation of the HSV color space and a physics-inspired optimization algorithm for image segmentation. The comparative advantage of this method over the state-of-the-art ones is that its performance is comparable without requiring any special equipment, such as NIR cameras, while it is simpler. Quantitative and qualitative experiments on publicly available datasets in comparison with three state-of-the-art methods, validate its effectiveness.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Semi-supervised learning of glottal pulse positions in a neural analysis-synthesis framework.\n \n \n \n \n\n\n \n Bous, F.; Ardaillon, L.; and Roebel, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 401-405, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Semi-supervisedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287687,\n  author = {F. Bous and L. Ardaillon and A. Roebel},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Semi-supervised learning of glottal pulse positions in a neural analysis-synthesis framework},\n  year = {2020},\n  pages = {401-405},\n  abstract = {This article investigates into recently emerging approaches that use deep neural networks for the estimation of glottal closure instants (GCI). We build upon our previous approach that used synthetic speech exclusively to create perfectly annotated training data and that had been shown to compare favourably with other training approaches using electroglottograph (EGG) signals. Here we introduce a semi-supervised training strategy that allows refining the estimator by means of an analysis-synthesis setup using real speech signals, for which GCI ground truth does not exist. Evaluation of the analyser is performed by means of comparing the GCI extracted from the glottal flow signal generated by the analyser with the GCI extracted from EGG on the CMU arctic dataset, where EGG signals were recorded in addition to speech. We observe that (1.) the artificial increase of the diversity of pulse shapes that has been used in our previous construction of the synthetic database is beneficial, (2.) training the GCI network in the analysis-synthesis setup allows achieving a very significant improvement of the GCI analyser, (3.) additional regularisation strategies allow improving the final analysis network when trained in the analysis-synthesis setup.},\n  keywords = {Training;Databases;Neural networks;Training data;Signal processing;Speech synthesis;Task analysis;Glottal closure instance detection;speech analysis},\n  doi = {10.23919/Eusipco47968.2020.9287687},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000401.pdf},\n}\n\n
\n
\n\n\n
\n This article investigates into recently emerging approaches that use deep neural networks for the estimation of glottal closure instants (GCI). We build upon our previous approach that used synthetic speech exclusively to create perfectly annotated training data and that had been shown to compare favourably with other training approaches using electroglottograph (EGG) signals. Here we introduce a semi-supervised training strategy that allows refining the estimator by means of an analysis-synthesis setup using real speech signals, for which GCI ground truth does not exist. Evaluation of the analyser is performed by means of comparing the GCI extracted from the glottal flow signal generated by the analyser with the GCI extracted from EGG on the CMU arctic dataset, where EGG signals were recorded in addition to speech. We observe that (1.) the artificial increase of the diversity of pulse shapes that has been used in our previous construction of the synthetic database is beneficial, (2.) training the GCI network in the analysis-synthesis setup allows achieving a very significant improvement of the GCI analyser, (3.) additional regularisation strategies allow improving the final analysis network when trained in the analysis-synthesis setup.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Least Mean Square Nonlinear Regressor Algorithm.\n \n \n \n \n\n\n \n Koike, S.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2334-2337, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"LeastPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287688,\n  author = {S. Koike},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Least Mean Square Nonlinear Regressor Algorithm},\n  year = {2020},\n  pages = {2334-2337},\n  abstract = {This paper proposes a new adaptation algorithm named Least Mean Square Nonlinear Regressor Algorithm (LMS-NRA) that makes adaptive filters highly robust against impulse noise at the filter input for which a stochastic model is presented. The proposed algorithm uses a simple nonlinear function of the regressor. A Statistical analysis of the LMS-NRA is developed to calculate theoretical filter convergence. Through numerical experiments, we demonstrate that the proposed algorithm is effective in realizing a robust adaptive filter which is convergent as fast as with the LMS algorithm. Good agreement between simulated and theoretical filter convergence curves shows the validity and accuracy of the analysis.},\n  keywords = {Statistical analysis;Simulation;Adaptive filters;Signal processing algorithms;Stochastic processes;Filtering algorithms;Convergence;adaptive filter;LMS algorithm;impulse noise;nonlinear regressor;robust filtering},\n  doi = {10.23919/Eusipco47968.2020.9287688},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002334.pdf},\n}\n\n
\n
\n\n\n
\n This paper proposes a new adaptation algorithm named Least Mean Square Nonlinear Regressor Algorithm (LMS-NRA) that makes adaptive filters highly robust against impulse noise at the filter input for which a stochastic model is presented. The proposed algorithm uses a simple nonlinear function of the regressor. A Statistical analysis of the LMS-NRA is developed to calculate theoretical filter convergence. Through numerical experiments, we demonstrate that the proposed algorithm is effective in realizing a robust adaptive filter which is convergent as fast as with the LMS algorithm. Good agreement between simulated and theoretical filter convergence curves shows the validity and accuracy of the analysis.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Data augmentation versus noise compensation for x-vector speaker recognition systems in noisy environments.\n \n \n \n \n\n\n \n Mohammad Amini, M.; and Matrouf, D.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1-5, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DataPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287690,\n  author = {M. {Mohammad Amini} and D. Matrouf},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Data augmentation versus noise compensation for x-vector speaker recognition systems in noisy environments},\n  year = {2020},\n  pages = {1-5},\n  abstract = {The explosion of available speech data and new speaker modeling methods based on deep neural networks (DNN) have given the ability to develop more robust speaker recognition systems. Among DNN speaker modelling techniques, x-vector system has shown a degree of robustness in noisy environments. Previous studies suggest that by increasing the number of speakers in the training data and using data augmentation more robust speaker recognition systems are achievable in noisy environments. In this work, we want to know if explicit noise compensation techniques continue to be effective despite the general noise robustness of these systems. For this study, we will use two different x-vector networks: the first one is trained on Voxceleb1 (Protocol1), and the second one is trained on Voxceleb1+Voxveleb2 (Protocol2). We propose to add a denoising x-vector subsystem before scoring. Experimental results show that, the x-vector system used in Protocol2 is more robust than the other one used Protocol1. Despite this observation we will show that explicit noise compensation gives almost the same EER relative gain in both protocols. For example, in the Protocol2 we have 21% to 66% improvement of EER with denoising techniques.},\n  keywords = {System performance;Training data;Speech recognition;Signal processing;Speaker recognition;Noise measurement;Standards;speaker recognition;x-vector;data augmentation;noise compensation;denoising autoencoder;deep stacked denoising autoencoder},\n  doi = {10.23919/Eusipco47968.2020.9287690},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000376.pdf},\n}\n\n
\n
\n\n\n
\n The explosion of available speech data and new speaker modeling methods based on deep neural networks (DNN) have given the ability to develop more robust speaker recognition systems. Among DNN speaker modelling techniques, x-vector system has shown a degree of robustness in noisy environments. Previous studies suggest that by increasing the number of speakers in the training data and using data augmentation more robust speaker recognition systems are achievable in noisy environments. In this work, we want to know if explicit noise compensation techniques continue to be effective despite the general noise robustness of these systems. For this study, we will use two different x-vector networks: the first one is trained on Voxceleb1 (Protocol1), and the second one is trained on Voxceleb1+Voxveleb2 (Protocol2). We propose to add a denoising x-vector subsystem before scoring. Experimental results show that, the x-vector system used in Protocol2 is more robust than the other one used Protocol1. Despite this observation we will show that explicit noise compensation gives almost the same EER relative gain in both protocols. For example, in the Protocol2 we have 21% to 66% improvement of EER with denoising techniques.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Aircraft Fingerprinting Using Deep Learning.\n \n \n \n \n\n\n \n Nicolussi, A.; Tanner, S.; and Wattenhofer, R.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 740-744, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AircraftPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287691,\n  author = {A. Nicolussi and S. Tanner and R. Wattenhofer},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Aircraft Fingerprinting Using Deep Learning},\n  year = {2020},\n  pages = {740-744},\n  abstract = {Aircraft periodically broadcast their position, identity and other information using the ADS-B protocol. This allows safe air traffic flow as ground stations and other aircraft can depend on the sent information. However, these messages are not authenticated or encrypted. Therefore, this system is vulnerable to attacks from Software Defined Radios (SDRs) and other transmitters. We propose a deep learning-based approach for fingerprinting of aircraft messages based on physical characteristics. This helps to verify the origin of an observed message.},\n  keywords = {Deep learning;Time-frequency analysis;Fingerprint recognition;Air traffic control;Aircraft manufacture;Aircraft;Software radio;Aircraft fingerprinting;deep learning;radio frequency fingerprinting},\n  doi = {10.23919/Eusipco47968.2020.9287691},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000740.pdf},\n}\n\n
\n
\n\n\n
\n Aircraft periodically broadcast their position, identity and other information using the ADS-B protocol. This allows safe air traffic flow as ground stations and other aircraft can depend on the sent information. However, these messages are not authenticated or encrypted. Therefore, this system is vulnerable to attacks from Software Defined Radios (SDRs) and other transmitters. We propose a deep learning-based approach for fingerprinting of aircraft messages based on physical characteristics. This helps to verify the origin of an observed message.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n State-Space Based Network Topology Identification.\n \n \n \n \n\n\n \n Coutino, M.; Isufi, E.; Maehara, T.; and Leus, G.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1055-1059, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"State-SpacePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287692,\n  author = {M. Coutino and E. Isufi and T. Maehara and G. Leus},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {State-Space Based Network Topology Identification},\n  year = {2020},\n  pages = {1055-1059},\n  abstract = {In this work, we explore the state-space formulation of network processes to recover the underlying network structure (local connections). To do so, we employ subspace techniques borrowed from system identification literature and extend them to the network topology inference problem. This approach provides a unified view of the traditional network control theory and signal processing on networks. In addition, it provides theoretical guarantees for the recovery of the topological structure of a deterministic linear dynamical system from input-output observations even though the input and state evolution networks can differ.},\n  keywords = {Network topology;Europe;Signal processing;Topology;System identification;Numerical models;Dynamical systems;state-space models;topology identification;graph signal processing;signal processing over networks},\n  doi = {10.23919/Eusipco47968.2020.9287692},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001055.pdf},\n}\n\n
\n
\n\n\n
\n In this work, we explore the state-space formulation of network processes to recover the underlying network structure (local connections). To do so, we employ subspace techniques borrowed from system identification literature and extend them to the network topology inference problem. This approach provides a unified view of the traditional network control theory and signal processing on networks. In addition, it provides theoretical guarantees for the recovery of the topological structure of a deterministic linear dynamical system from input-output observations even though the input and state evolution networks can differ.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Measuring Smoothness of Trigonometric Interpolation Through Incomplete Sample Points.\n \n \n \n \n\n\n \n Weiss, S.; Selva, J.; and Macleod, M. D.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2319-2323, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"MeasuringPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287694,\n  author = {S. Weiss and J. Selva and M. D. Macleod},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Measuring Smoothness of Trigonometric Interpolation Through Incomplete Sample Points},\n  year = {2020},\n  pages = {2319-2323},\n  abstract = {In this paper we present a metric to assess the smoothness of a trigonometric interpolation through an in-complete set of sample points. We measure smoothness as the power of a particular derivative of a 2π-periodic Dirichlet interpolant through some sample points. We show that we do not need to explicitly complete the sample set or perform the interpolation, but can simply work with the available sample points, under the assumption that any missing points are chosen to minimise the metric, and present a simple and robust approach to the computation of this metric. We assess the accuracy and computational complexity of this approach, and compare it to benchmarks.},\n  keywords = {Measurement;Interpolation;Power measurement;Europe;Signal processing;Matrix decomposition;Standards},\n  doi = {10.23919/Eusipco47968.2020.9287694},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002319.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we present a metric to assess the smoothness of a trigonometric interpolation through an in-complete set of sample points. We measure smoothness as the power of a particular derivative of a 2π-periodic Dirichlet interpolant through some sample points. We show that we do not need to explicitly complete the sample set or perform the interpolation, but can simply work with the available sample points, under the assumption that any missing points are chosen to minimise the metric, and present a simple and robust approach to the computation of this metric. We assess the accuracy and computational complexity of this approach, and compare it to benchmarks.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Graphon Filters: Signal Processing in Very Large Graphs.\n \n \n \n \n\n\n \n Ruiz, L.; Chamon, L. F. O.; and Ribeiro, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1050-1054, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"GraphonPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287695,\n  author = {L. Ruiz and L. F. O. Chamon and A. Ribeiro},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Graphon Filters: Signal Processing in Very Large Graphs},\n  year = {2020},\n  pages = {1050-1054},\n  abstract = {Graph filters are at the core of network information processing architectures, with applications in machine learning and distributed collaborative systems. Yet, designing filters for very large graphs is challenging because filter design techniques do not always scale with graph size. We overcome this by using graphons, which are infinite-dimensional representations of graphs that are at once random graph models and limit objects of sequences of graphs. Explicitly, we define graphon filters and leverage convergence properties of graph sequences and spectral properties of both graphs and graphons to show that graph filters converge to graphon filters. Filters designed on a graphon can therefore be applied to finite graphs sampled from it with guaranteed convergence properties. We illustrate our findings in two experiments, which corroborate filter response convergence and illustrate transferability of graph filters even in graphs that are not related to one another through a graphon, but that are built from the same type of data.},\n  keywords = {Europe;Collaboration;Machine learning;Information processing;Signal processing;Information filters;Convergence;graphons;convergent graph sequences;graph filters;graph signal processing},\n  doi = {10.23919/Eusipco47968.2020.9287695},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001050.pdf},\n}\n\n
\n
\n\n\n
\n Graph filters are at the core of network information processing architectures, with applications in machine learning and distributed collaborative systems. Yet, designing filters for very large graphs is challenging because filter design techniques do not always scale with graph size. We overcome this by using graphons, which are infinite-dimensional representations of graphs that are at once random graph models and limit objects of sequences of graphs. Explicitly, we define graphon filters and leverage convergence properties of graph sequences and spectral properties of both graphs and graphons to show that graph filters converge to graphon filters. Filters designed on a graphon can therefore be applied to finite graphs sampled from it with guaranteed convergence properties. We illustrate our findings in two experiments, which corroborate filter response convergence and illustrate transferability of graph filters even in graphs that are not related to one another through a graphon, but that are built from the same type of data.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Dynamic Resolution ADC/DAC Massive MIMO FD Relaying system over Correlated Rician Channel.\n \n \n \n \n\n\n \n Dey, S.; Sharma, E.; and Budhiraja, R.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1653-1657, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DynamicPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{9287697,\n  author = {S. Dey and E. Sharma and R. Budhiraja},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Dynamic Resolution ADC/DAC Massive MIMO FD Relaying system over Correlated Rician Channel},\n  year = {2020},\n  pages = {1653-1657},\n  abstract = {We consider a multi-pair two-way full-duplex (FD) massive multiple-input multiple-output hardware-impaired relay over spatially correlated Rician fading channels exchanging information with hardware-impaired FD users. The relay employs a dynamic resolution ADC/DAC architecture wherein each antenna is equipped with a different resolution ADC/DAC. We derive a novel linear minimum mean square error (LMMSE) channel estimator with correlated Rician fading channel, incorporating dynamic resolution ADCs/DACs and RF impairments. We examine the impact of relay and user hardware impairments on the normalized mean squared error (NMSE) and show that the NMSE floors to a non-zero error floor even as pilot power goes to infinity.},\n  doi = {10.23919/Eusipco47968.2020.9287697},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001653.pdf},\n}\n\n
\n
\n\n\n
\n We consider a multi-pair two-way full-duplex (FD) massive multiple-input multiple-output hardware-impaired relay over spatially correlated Rician fading channels exchanging information with hardware-impaired FD users. The relay employs a dynamic resolution ADC/DAC architecture wherein each antenna is equipped with a different resolution ADC/DAC. We derive a novel linear minimum mean square error (LMMSE) channel estimator with correlated Rician fading channel, incorporating dynamic resolution ADCs/DACs and RF impairments. We examine the impact of relay and user hardware impairments on the normalized mean squared error (NMSE) and show that the NMSE floors to a non-zero error floor even as pilot power goes to infinity.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Provably Accurate Algorithm for Recovering Compactly Supported Smooth Functions from Spectrogram Measurements.\n \n \n \n \n\n\n \n Perlmutter, M.; Sissouno, N.; Viswantathan, A.; and Iwen, M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 970-974, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287698,\n  author = {M. Perlmutter and N. Sissouno and A. Viswantathan and M. Iwen},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A Provably Accurate Algorithm for Recovering Compactly Supported Smooth Functions from Spectrogram Measurements},\n  year = {2020},\n  pages = {970-974},\n  abstract = {We present an algorithm which is closely related to direct phase retrieval methods that have been shown to work well empirically [1], [2] and prove that it is guaranteed to recover (up to a global phase) a large class of compactly supported smooth functions from their spectrogram measurements. As a result, we take a first step toward developing a new class of practical phaseless imaging algorithms capable of producing provably accurate images of a given sample after it is masked by just a few shifts of a fixed periodic grating.},\n  keywords = {Phase measurement;Fourier transforms;Signal processing algorithms;Imaging;Time measurement;Gratings;Spectrogram;phase retrieval;phaseless imaging;spectrogram inversion;coded diffraction patterns;Short Time Fourier Transform (STFT) magnitude measurements},\n  doi = {10.23919/Eusipco47968.2020.9287698},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000970.pdf},\n}\n\n
\n
\n\n\n
\n We present an algorithm which is closely related to direct phase retrieval methods that have been shown to work well empirically [1], [2] and prove that it is guaranteed to recover (up to a global phase) a large class of compactly supported smooth functions from their spectrogram measurements. As a result, we take a first step toward developing a new class of practical phaseless imaging algorithms capable of producing provably accurate images of a given sample after it is masked by just a few shifts of a fixed periodic grating.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Low-Complexity Robust Beamforming for a Moving Source.\n \n \n \n \n\n\n \n Mahadi, M.; Ballal, T.; Moinuddin, M.; Al-Naffouri, T. Y.; and Al-Saggaf, U.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1846-1850, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Low-ComplexityPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287699,\n  author = {M. Mahadi and T. Ballal and M. Moinuddin and T. Y. Al-Naffouri and U. Al-Saggaf},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Low-Complexity Robust Beamforming for a Moving Source},\n  year = {2020},\n  pages = {1846-1850},\n  abstract = {This paper addresses the problem of robust beamforming for a moving source. To cope with the source movement, the regularization (or diagonal loading) parameter needs to be adjusted regularly. The need for repeatedly finding an adequate value of the regularization parameter increases the overall computational complexity. This paper proposes a simple and efficient initialization of Newton’s method to find the regularization parameter. The regularization parameter is chosen to minimize the mean-squared-error (MSE) of a regularized least-squares (RLS) problem by solving a certain function. We exploit some properties of this function to develop the method. Simulation results demonstrate that the proposed initialization method can reduce the runtime by 75%.},\n  keywords = {Runtime;Array signal processing;Simulation;Perturbation methods;Loading;Europe;Load modeling;Robust adaptive beamforming;MVDR;generalized sidelobe canceller;diagonal loading},\n  doi = {10.23919/Eusipco47968.2020.9287699},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001846.pdf},\n}\n\n
\n
\n\n\n
\n This paper addresses the problem of robust beamforming for a moving source. To cope with the source movement, the regularization (or diagonal loading) parameter needs to be adjusted regularly. The need for repeatedly finding an adequate value of the regularization parameter increases the overall computational complexity. This paper proposes a simple and efficient initialization of Newton’s method to find the regularization parameter. The regularization parameter is chosen to minimize the mean-squared-error (MSE) of a regularized least-squares (RLS) problem by solving a certain function. We exploit some properties of this function to develop the method. Simulation results demonstrate that the proposed initialization method can reduce the runtime by 75%.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Hodge and Podge: Hybrid Supervised Sound Event Detection with Multi-Hot MixMatch and Composition Consistence Training.\n \n \n \n \n\n\n \n Shi, Z.; Liu, L.; and Liu, R.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1-5, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"HodgePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287700,\n  author = {Z. Shi and L. Liu and R. Liu},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Hodge and Podge: Hybrid Supervised Sound Event Detection with Multi-Hot MixMatch and Composition Consistence Training},\n  year = {2020},\n  pages = {1-5},\n  abstract = {In this paper, we propose a method called Hodge and Podge for sound event detection. We demonstrate Hodge and Podge on the dataset of Detection and Classification of Acoustic Scenes and Events (DCASE) 2019 Challenge Task 4. This task aims to predict the presence or absence and the onset and offset times of sound events in home environments. Sound event detection is challenging due to the lack of large scale real strongly labeled data. Recently deep semi-supervised learning (SSL) has proven to be effective in modeling with weakly labeled and unlabeled data. This work explores how to extend deep SSL to result in a new, state-of-the-art sound event detection method called Hodge and Podge. With convolutional recurrent neural networks (CRNN) as the backbone network, first, a multi-scale squeeze-excitation mechanism is introduced and added to generate a pyramid squeeze-excitation CRNN. The pyramid squeeze-excitation layer can pay attention to the issue that different sound events have different durations, and to adaptively recalibrate channel-wise spectrogram responses. Further, in order to remedy the lack of real strongly labeled data problem, we propose multi-hot MixMatch and composition consistency training with temporal-frequency augmentation. Our experiments with the public DCASE2019 challenge task 4 validation data resulted in an event-based F-score of 43.4%, and is about absolutely 1.6% better than state-of-the-art methods in the challenge. While the F-score of the official baseline is 25.8%.},\n  keywords = {Training;Recurrent neural networks;Event detection;Europe;Semisupervised learning;Task analysis;Spectrogram;sound event detection;semi-supervised learning;multi-hot MixMatch;composition consistence training},\n  doi = {10.23919/Eusipco47968.2020.9287700},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000001.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a method called Hodge and Podge for sound event detection. We demonstrate Hodge and Podge on the dataset of Detection and Classification of Acoustic Scenes and Events (DCASE) 2019 Challenge Task 4. This task aims to predict the presence or absence and the onset and offset times of sound events in home environments. Sound event detection is challenging due to the lack of large scale real strongly labeled data. Recently deep semi-supervised learning (SSL) has proven to be effective in modeling with weakly labeled and unlabeled data. This work explores how to extend deep SSL to result in a new, state-of-the-art sound event detection method called Hodge and Podge. With convolutional recurrent neural networks (CRNN) as the backbone network, first, a multi-scale squeeze-excitation mechanism is introduced and added to generate a pyramid squeeze-excitation CRNN. The pyramid squeeze-excitation layer can pay attention to the issue that different sound events have different durations, and to adaptively recalibrate channel-wise spectrogram responses. Further, in order to remedy the lack of real strongly labeled data problem, we propose multi-hot MixMatch and composition consistency training with temporal-frequency augmentation. Our experiments with the public DCASE2019 challenge task 4 validation data resulted in an event-based F-score of 43.4%, and is about absolutely 1.6% better than state-of-the-art methods in the challenge. While the F-score of the official baseline is 25.8%.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Quality Control and Fault Classification of Laser Welded Hairpins in Electrical Motors.\n \n \n \n \n\n\n \n Vater, J.; Pollach, M.; Lenz, C.; Winkle, D.; and Knoll, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1377-1381, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"QualityPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287701,\n  author = {J. Vater and M. Pollach and C. Lenz and D. Winkle and A. Knoll},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Quality Control and Fault Classification of Laser Welded Hairpins in Electrical Motors},\n  year = {2020},\n  pages = {1377-1381},\n  abstract = {We present the development, evaluation, and comparison of different neural network architectures using different input data to detect and classify quality deviations in the welding of hairpins. Hairpins are copper rods that are located in the stator of electric motors in electric cars. We use both 3D data and grayscale images as input. The primary challenges are that only a small dataset is available and that high network accuracy is essential to prevent defects in the usage of an electrical engine and to enable a focused rework process. We were able to achieve a 99% accuracy using either 3D data or grayscale images.},\n  keywords = {Three-dimensional displays;Welding;Neural networks;Quality control;Gray-scale;Stators;Signal processing;machine learning;convolutional neural networks;electric motors;hairpin;quality control;production},\n  doi = {10.23919/Eusipco47968.2020.9287701},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001377.pdf},\n}\n\n
\n
\n\n\n
\n We present the development, evaluation, and comparison of different neural network architectures using different input data to detect and classify quality deviations in the welding of hairpins. Hairpins are copper rods that are located in the stator of electric motors in electric cars. We use both 3D data and grayscale images as input. The primary challenges are that only a small dataset is available and that high network accuracy is essential to prevent defects in the usage of an electrical engine and to enable a focused rework process. We were able to achieve a 99% accuracy using either 3D data or grayscale images.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n 3D Phase Retrieval at Nano-Scale via Accelerated Wirtinger Flow.\n \n \n \n \n\n\n \n Fabian, Z.; Haldar, J.; Leahy, R.; and Soltanolkotabi, M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2080-2084, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"3DPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287703,\n  author = {Z. Fabian and J. Haldar and R. Leahy and M. Soltanolkotabi},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {3D Phase Retrieval at Nano-Scale via Accelerated Wirtinger Flow},\n  year = {2020},\n  pages = {2080-2084},\n  abstract = {Imaging 3D nano-structures at very high resolution is crucial in a variety of scientific fields. However, due to fundamental limitations of light propagation we can only measure the object indirectly via 2D intensity measurements of the 3D specimen through highly nonlinear projection mappings where a variety of information (including phase) is lost. Reconstruction therefore involves inverting highly non-linear and seemingly noninvertible mappings. In this paper, we introduce a novel technique where the 3D object is directly reconstructed from an accurate non-linear propagation model. Furthermore, we characterize the ambiguities of this model and leverage a priori knowledge to mitigate their effect and also significantly reduce the required number of measurements and hence the acquisition time. We demonstrate the performance of our algorithm via numerical experiments aimed at nano-scale reconstruction of 3D integrated circuits. Moreover, we provide rigorous theoretical guarantees for convergence to stationarity.},\n  keywords = {Three-dimensional displays;Phase measurement;Two dimensional displays;Loss measurement;Numerical models;Integrated circuit modeling;Image reconstruction;ptychography;inverse problems;non-convex optimization;computational imaging;phase retrieval},\n  doi = {10.23919/Eusipco47968.2020.9287703},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002080.pdf},\n}\n\n
\n
\n\n\n
\n Imaging 3D nano-structures at very high resolution is crucial in a variety of scientific fields. However, due to fundamental limitations of light propagation we can only measure the object indirectly via 2D intensity measurements of the 3D specimen through highly nonlinear projection mappings where a variety of information (including phase) is lost. Reconstruction therefore involves inverting highly non-linear and seemingly noninvertible mappings. In this paper, we introduce a novel technique where the 3D object is directly reconstructed from an accurate non-linear propagation model. Furthermore, we characterize the ambiguities of this model and leverage a priori knowledge to mitigate their effect and also significantly reduce the required number of measurements and hence the acquisition time. We demonstrate the performance of our algorithm via numerical experiments aimed at nano-scale reconstruction of 3D integrated circuits. Moreover, we provide rigorous theoretical guarantees for convergence to stationarity.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On Open-Set Classification with L3-Net Embeddings for Machine Listening Applications.\n \n \n \n \n\n\n \n Wilkinghoff, K.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 800-804, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287705,\n  author = {K. Wilkinghoff},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {On Open-Set Classification with L3-Net Embeddings for Machine Listening Applications},\n  year = {2020},\n  pages = {800-804},\n  abstract = {Obtaining labeled data for machine listening applications is expensive because labeling audio data requires humans listening to recordings. However, state-of-the-art deep learning based systems usually require large amounts of labeled data to be trained with. A solution for this problem is to train a neural network with a large collection of unlabeled data to extract embeddings and then use these embeddings to train a shallow classifier on a small but labeled dataset suitable for the application. One example are Look, Listen, and Learn (L3-Net) embeddings, which are trained self-supervised to capture audio-visual correspondence in videos. Since shallow classifiers are trained discriminatively and thus tacitly assume a closed-set classification task, they do not perform well in open-set classification tasks. In this paper, a neural network that combines all L3-Net embeddings belonging to one recording into a single vector by using an x-vector mechanism as well as an open-set classification system based on that are presented. In experiments conducted on the open-set acoustic scene classification task belonging to the DCASE challenge 2019, the proposed system significantly outperforms a shallow discriminative classifier and all other previously published systems, while at the same time performing equally well as a shallow classifier on multiple closed-set machine listening datasets.},\n  keywords = {Image analysis;Neural networks;Europe;Signal processing;Labeling;Task analysis;Videos;open-set classification;deep audio embeddings;machine listening;acoustic scene classification;acoustic event classification},\n  doi = {10.23919/Eusipco47968.2020.9287705},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000800.pdf},\n}\n\n
\n
\n\n\n
\n Obtaining labeled data for machine listening applications is expensive because labeling audio data requires humans listening to recordings. However, state-of-the-art deep learning based systems usually require large amounts of labeled data to be trained with. A solution for this problem is to train a neural network with a large collection of unlabeled data to extract embeddings and then use these embeddings to train a shallow classifier on a small but labeled dataset suitable for the application. One example are Look, Listen, and Learn (L3-Net) embeddings, which are trained self-supervised to capture audio-visual correspondence in videos. Since shallow classifiers are trained discriminatively and thus tacitly assume a closed-set classification task, they do not perform well in open-set classification tasks. In this paper, a neural network that combines all L3-Net embeddings belonging to one recording into a single vector by using an x-vector mechanism as well as an open-set classification system based on that are presented. In experiments conducted on the open-set acoustic scene classification task belonging to the DCASE challenge 2019, the proposed system significantly outperforms a shallow discriminative classifier and all other previously published systems, while at the same time performing equally well as a shallow classifier on multiple closed-set machine listening datasets.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The Projected Belief Network Classifier: both Generative and Discriminative.\n \n \n \n \n\n\n \n Baggenstoss, P. M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 795-799, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287706,\n  author = {P. M. Baggenstoss},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {The Projected Belief Network Classifier: both Generative and Discriminative},\n  year = {2020},\n  pages = {795-799},\n  abstract = {The projected belief network (PBN) is a layered generative network with tractable likelihood function, and is based on a feed-forward neural network (FF-NN). It can therefore share an embodiment with a discriminative classifier and can inherit the best qualities of both types of network. In this paper, a convolutional PBN is constructed that is both fully discriminative and fully generative and is tested on spectrograms of spoken commands. It is shown that the network displays excellent qualities from either the discriminative or generative viewpoint. Random data synthesis and visible data reconstruction from low-dimensional hidden variables are shown, while classifier performance approaches that of a regularized discriminative network. Combination with a conventional discriminative CNN is also demonstrated.},\n  keywords = {Convolution;Neural networks;Europe;Spectrogram},\n  doi = {10.23919/Eusipco47968.2020.9287706},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000795.pdf},\n}\n\n
\n
\n\n\n
\n The projected belief network (PBN) is a layered generative network with tractable likelihood function, and is based on a feed-forward neural network (FF-NN). It can therefore share an embodiment with a discriminative classifier and can inherit the best qualities of both types of network. In this paper, a convolutional PBN is constructed that is both fully discriminative and fully generative and is tested on spectrograms of spoken commands. It is shown that the network displays excellent qualities from either the discriminative or generative viewpoint. Random data synthesis and visible data reconstruction from low-dimensional hidden variables are shown, while classifier performance approaches that of a regularized discriminative network. Combination with a conventional discriminative CNN is also demonstrated.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Generalized Swept Approximate Message Passing based Kalman Filtering for Dynamic Sparse Bayesian Learning.\n \n \n \n \n\n\n \n Kurisummoottil Thomas, C.; and Slock, D.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2065-2069, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"GeneralizedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287708,\n  author = {C. {Kurisummoottil Thomas} and D. Slock},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Generalized Swept Approximate Message Passing based Kalman Filtering for Dynamic Sparse Bayesian Learning},\n  year = {2020},\n  pages = {2065-2069},\n  abstract = {Sparse Bayesian Learning (SBL), initially proposed in the Machine Learning (ML) literature, is an efficient and well-studied framework for sparse signal recovery. SBL uses hierarchical Bayes with a decorrelated Gaussian prior in which the variance profile is also to be estimated. This is more sparsity inducing than e.g. a Laplacian prior. However, SBL does not scale with problem dimensions due to the computational complexity associated with the matrix inversion in Linear Mimimum Mean Squared Error (LMMSE) estimation. To address this issue, various low complexity approximate Bayesian inference techniques have been introduced for the LMMSE component, including Variational Bayesian (VB) inference, Space Alternating Variational Estimation (SAVE) or Message Passing (MP) algorithms such as Belief Propagation (BP) or Expectation Propagation (EP) or Approximate MP (AMP). These algorithms may converge to the correct LMMSE estimate. However, in ML we are often also interested in having posterior variance information. We observed that SBL via SAVE provides (largely) underestimated variance estimates. AMP style algorithms may provide more accurate variance information (per component) as we have shown recently. However, one practical issue associated with most AMP versions is that they may diverge even if for a slight deviation from i.i.d Gaussian or right orthogonally invariant measurement matrices. To this end we extend here the more robust Swept AMP (SwAMP) algorithm to Generalized SwAMP (GSwAMP), which handles independent but non-i.i.d. priors and to the case of dynamic SBL. The simulations illustrate the desirable convergence behavior of the proposed GSwAMP-SBL under different scenarios on the measurement matrix.},\n  keywords = {Heuristic algorithms;Message passing;Signal processing algorithms;Approximation algorithms;Inference algorithms;Bayes methods;Sparse matrices},\n  doi = {10.23919/Eusipco47968.2020.9287708},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002065.pdf},\n}\n\n
\n
\n\n\n
\n Sparse Bayesian Learning (SBL), initially proposed in the Machine Learning (ML) literature, is an efficient and well-studied framework for sparse signal recovery. SBL uses hierarchical Bayes with a decorrelated Gaussian prior in which the variance profile is also to be estimated. This is more sparsity inducing than e.g. a Laplacian prior. However, SBL does not scale with problem dimensions due to the computational complexity associated with the matrix inversion in Linear Mimimum Mean Squared Error (LMMSE) estimation. To address this issue, various low complexity approximate Bayesian inference techniques have been introduced for the LMMSE component, including Variational Bayesian (VB) inference, Space Alternating Variational Estimation (SAVE) or Message Passing (MP) algorithms such as Belief Propagation (BP) or Expectation Propagation (EP) or Approximate MP (AMP). These algorithms may converge to the correct LMMSE estimate. However, in ML we are often also interested in having posterior variance information. We observed that SBL via SAVE provides (largely) underestimated variance estimates. AMP style algorithms may provide more accurate variance information (per component) as we have shown recently. However, one practical issue associated with most AMP versions is that they may diverge even if for a slight deviation from i.i.d Gaussian or right orthogonally invariant measurement matrices. To this end we extend here the more robust Swept AMP (SwAMP) algorithm to Generalized SwAMP (GSwAMP), which handles independent but non-i.i.d. priors and to the case of dynamic SBL. The simulations illustrate the desirable convergence behavior of the proposed GSwAMP-SBL under different scenarios on the measurement matrix.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sequential Sensor Placement using Bayesian Compressed Sensing for Source Localization.\n \n \n \n \n\n\n \n Courcoux-Caro, M.; Vanwynsberghe, C.; Herzet, C.; and Baussard, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 241-245, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"SequentialPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287709,\n  author = {M. Courcoux-Caro and C. Vanwynsberghe and C. Herzet and A. Baussard},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Sequential Sensor Placement using Bayesian Compressed Sensing for Source Localization},\n  year = {2020},\n  pages = {241-245},\n  abstract = {This paper deals with the sensor placement problem for an array designed for source localization. When it involves the identification of a few sources, the compressed sensing framework is known to find directions effectively thanks to sparse approximation. The present contribution intends to provide an answer to the following question: given a set of observations, how should we make the next measurement to minimize (some form of) uncertainty on the localization of the sources? More specifically, we propose a methodology for sequential sensor placement inspired from the {"}Bayesian compressive sensing{"} framework introduced by Ji et al. Our method alternates between a step of sparse source localization estimation, and a step to choose the sensor position that minimizes the covariance of the estimation error. Numerical results show that an array designed by the proposed procedure leads to better performance than sensors positioned at random.},\n  keywords = {Sensor placement;Uncertainty;Transmission line matrix methods;Sparse representation;Bayes methods;Compressed sensing;Sensor arrays;Acoustic;source localization;sequential sensor placement;compressed sensing},\n  doi = {10.23919/Eusipco47968.2020.9287709},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000241.pdf},\n}\n\n
\n
\n\n\n
\n This paper deals with the sensor placement problem for an array designed for source localization. When it involves the identification of a few sources, the compressed sensing framework is known to find directions effectively thanks to sparse approximation. The present contribution intends to provide an answer to the following question: given a set of observations, how should we make the next measurement to minimize (some form of) uncertainty on the localization of the sources? More specifically, we propose a methodology for sequential sensor placement inspired from the \"Bayesian compressive sensing\" framework introduced by Ji et al. Our method alternates between a step of sparse source localization estimation, and a step to choose the sensor position that minimizes the covariance of the estimation error. Numerical results show that an array designed by the proposed procedure leads to better performance than sensors positioned at random.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Robust Blind Multichannel Identification based on a Phase Constraint and Different ℓp-norm Constraints.\n \n \n \n\n\n \n Jo, B.; and Calamia, P.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1966-1970, Aug 2020. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287710,\n  author = {B. Jo and P. Calamia},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Robust Blind Multichannel Identification based on a Phase Constraint and Different ℓp-norm Constraints},\n  year = {2020},\n  pages = {1966-1970},\n  abstract = {Blind multichannel identification has played a crucial role as a prerequisite for channel equalization, speech de-reverberation, and time-delay estimation for decades. Algorithms based on cross-relation (CR) errors have been widely studied, however, these algorithms have been reported to be fundamentally vulnerable to additive noise. Consequently, there have been many studies to improve robustness. Among them, the ℓp-robust normalized multichannel frequency-domain least-mean-square (ℓp-RNMCFLMS) algorithm was developed recently to blindly identify a single-input-multiple-output system in a reverberant environment. However, the additional penalty functions of conventional algorithms including ℓp-RNMCFLMS are based only on the magnitude spectra of the microphone channels. In this work, we propose a new penalty function applied to the phase spectra of the system. Furthermore, we develop an extended ℓp-RNMCFLMS to improve the steady-state performance by discriminating p values on impulse responses separated into significant and insignificant components. Numerical simulation results show that the proposed algorithm outperforms conventional algorithms even in low-SNR conditions.},\n  keywords = {Additive noise;Signal processing algorithms;Signal processing;Numerical simulation;Robustness;Steady-state;Microphones;Blind multichannel identification;adaptive algorithms;phase spectrum},\n  doi = {10.23919/Eusipco47968.2020.9287710},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n Blind multichannel identification has played a crucial role as a prerequisite for channel equalization, speech de-reverberation, and time-delay estimation for decades. Algorithms based on cross-relation (CR) errors have been widely studied, however, these algorithms have been reported to be fundamentally vulnerable to additive noise. Consequently, there have been many studies to improve robustness. Among them, the ℓp-robust normalized multichannel frequency-domain least-mean-square (ℓp-RNMCFLMS) algorithm was developed recently to blindly identify a single-input-multiple-output system in a reverberant environment. However, the additional penalty functions of conventional algorithms including ℓp-RNMCFLMS are based only on the magnitude spectra of the microphone channels. In this work, we propose a new penalty function applied to the phase spectra of the system. Furthermore, we develop an extended ℓp-RNMCFLMS to improve the steady-state performance by discriminating p values on impulse responses separated into significant and insignificant components. Numerical simulation results show that the proposed algorithm outperforms conventional algorithms even in low-SNR conditions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n DeepMP for Non–Negative Sparse Decomposition.\n \n \n \n\n\n \n Voulgaris, K. A.; Davies, M. E.; and Yaghoobi, M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2035-2039, Aug 2020. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287711,\n  author = {K. A. Voulgaris and M. E. Davies and M. Yaghoobi},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {DeepMP for Non–Negative Sparse Decomposition},\n  year = {2020},\n  pages = {2035-2039},\n  abstract = {Non–negative signals form an important class of sparse signals. Many algorithms have already been proposed to recover such non-negative representations, where greedy and convex relaxed algorithms are among the most popular methods. The greedy techniques are low computational cost algorithms, which have also been modified to incorporate the non-negativity of the representations. One such modification has been pro-posed for Matching Pursuit (MP) based algorithms, which first chooses positive coefficients and uses a non-negative optimisation technique that guarantees the non–negativity of the coefficients. The performance of greedy algorithms, like all non–exhaustive search methods, suffer from high coherence with the linear generative model, called the dictionary. We here first reformulate the non–negative matching pursuit algorithm in the form of a deep neural network. We then show that the proposed model after training yields a significant improvement in terms of exact recovery performance, compared to other non–trained greedy algorithms, while keeping the complexity low.},\n  keywords = {Greedy algorithms;Training;Search methods;Neural networks;Matching pursuit algorithms;Signal processing algorithms;Signal processing;Matching Pursuit;Non-negative Sparse Approximations;Multilabel Classification;Deep Neural Networks},\n  doi = {10.23919/Eusipco47968.2020.9287711},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n Non–negative signals form an important class of sparse signals. Many algorithms have already been proposed to recover such non-negative representations, where greedy and convex relaxed algorithms are among the most popular methods. The greedy techniques are low computational cost algorithms, which have also been modified to incorporate the non-negativity of the representations. One such modification has been pro-posed for Matching Pursuit (MP) based algorithms, which first chooses positive coefficients and uses a non-negative optimisation technique that guarantees the non–negativity of the coefficients. The performance of greedy algorithms, like all non–exhaustive search methods, suffer from high coherence with the linear generative model, called the dictionary. We here first reformulate the non–negative matching pursuit algorithm in the form of a deep neural network. We then show that the proposed model after training yields a significant improvement in terms of exact recovery performance, compared to other non–trained greedy algorithms, while keeping the complexity low.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sub-Nyquist Sampling in Shift-Invariant Spaces.\n \n \n \n \n\n\n \n Vlašić, T.; and Seršić, D.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2284-2288, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Sub-NyquistPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287712,\n  author = {T. Vlašić and D. Seršić},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Sub-Nyquist Sampling in Shift-Invariant Spaces},\n  year = {2020},\n  pages = {2284-2288},\n  abstract = {We introduce a novel framework for acquisition of analog signals by combining compressive sensing (CS) and the shift-invariant (SI) reconstruction procedure. We reinterpret the random demodulator as a system that acquires a linear combination of the samples in the conventional SI setting with the box function as the sampling kernel. The SI samples are recovered by solving the CS optimization problem and subsequently filtered by a correction filter in order to obtain expansion coefficients of the signal. The underlying model is inherently infinite dimensional, but the SI property allows for formulation of the problem within finite-dimensional CS. We provide experimental results of the proposed system at the end of the paper.},\n  keywords = {Tools;Noise measurement;Splines (mathematics);Image reconstruction;Standards;Optimization;Compressed sensing;B-splines;inverse problems;sampling;sparsity},\n  doi = {10.23919/Eusipco47968.2020.9287712},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002284.pdf},\n}\n\n
\n
\n\n\n
\n We introduce a novel framework for acquisition of analog signals by combining compressive sensing (CS) and the shift-invariant (SI) reconstruction procedure. We reinterpret the random demodulator as a system that acquires a linear combination of the samples in the conventional SI setting with the box function as the sampling kernel. The SI samples are recovered by solving the CS optimization problem and subsequently filtered by a correction filter in order to obtain expansion coefficients of the signal. The underlying model is inherently infinite dimensional, but the SI property allows for formulation of the problem within finite-dimensional CS. We provide experimental results of the proposed system at the end of the paper.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Gated Recurrent Networks for Video Super Resolution.\n \n \n \n \n\n\n \n López-Tapia, S.; Lucas, A.; Molina, R.; and Katsaggelos, A. K.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 700-704, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"GatedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287713,\n  author = {S. López-Tapia and A. Lucas and R. Molina and A. K. Katsaggelos},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Gated Recurrent Networks for Video Super Resolution},\n  year = {2020},\n  pages = {700-704},\n  abstract = {Despite the success of Recurrent Neural Networks in tasks involving temporal video processing, few works in Video Super-Resolution (VSR) have employed them. In this work we propose a new Gated Recurrent Convolutional Neural Network for VSR adapting some of the key components of a Gated Recurrent Unit. Our model employs a deformable attention module to align the features calculated at the previous time step with the ones in the current step and then uses a gated operation to combine them. This allows our model to effectively reuse previously calculated features and exploit longer temporal relationships between frames without the need of explicit motion compensation. The experimental validation shows that our approach outperforms current VSR learning based models in terms of perceptual quality and temporal consistency.},\n  keywords = {Deformable models;Training;Adaptation models;Recurrent neural networks;Logic gates;Signal processing;Task analysis;Video;Super-resolution;Convolutional Neuronal Networks;Recurrent Neural Networks},\n  doi = {10.23919/Eusipco47968.2020.9287713},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000700.pdf},\n}\n\n
\n
\n\n\n
\n Despite the success of Recurrent Neural Networks in tasks involving temporal video processing, few works in Video Super-Resolution (VSR) have employed them. In this work we propose a new Gated Recurrent Convolutional Neural Network for VSR adapting some of the key components of a Gated Recurrent Unit. Our model employs a deformable attention module to align the features calculated at the previous time step with the ones in the current step and then uses a gated operation to combine them. This allows our model to effectively reuse previously calculated features and exploit longer temporal relationships between frames without the need of explicit motion compensation. The experimental validation shows that our approach outperforms current VSR learning based models in terms of perceptual quality and temporal consistency.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Role of Brainwaves in Neural Speech Decoding.\n \n \n \n \n\n\n \n Dash, D.; Ferrari, P.; and Wang, J.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1357-1361, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"RolePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287714,\n  author = {D. Dash and P. Ferrari and J. Wang},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Role of Brainwaves in Neural Speech Decoding},\n  year = {2020},\n  pages = {1357-1361},\n  abstract = {Neural speech decoding aims at direct decoding of speech from the brain to restore speech communication in patients with locked-in syndrome (fully paralyzed but aware). Despite the recent progress, exactly which aspects of neural activities are characterizing the decoding process is still unclear. Neural oscillations have been associated with playing a key functional role in neural information processing and thus might provide significant insight into the decoding process. Previous research has investigated a limited range of neural frequencies for decoding, usually the high-gamma oscillations (70-200Hz) in electrocorticography (ECoG) and lower-frequency waves (1-70Hz) in electroencephalography (EEG). Hence, the exact contribution of specific frequency bands is still unclear. Magnetoencephalography (MEG) is a non-invasive method for directly measuring underlying brain activity and has the temporal resolution needed to investigate the role of cortical oscillations in speech decoding, which we attempted in this study. We used three machine learning classifiers (linear discriminant analysis (LDA), support vector machine (SVM), and artificial neural network (ANN) to classify different imagined and spoken phrases for finding the role of brainwaves in speech decoding. The experimental results showed a significant contribution of low-frequency Delta oscillations (0.1-4 Hz) in decoding and the best performance was achieved when all the brainwaves were combined.},\n  keywords = {brain;learning (artificial intelligence);magnetoencephalography;medical disorders;medical signal processing;neural nets;neurophysiology;speech coding;support vector machines;artificial neural network;brainwaves;low-frequency Delta oscillations;neural speech decoding;direct decoding;speech communication;neural activities;neural oscillations;neural frequencies;high-gamma oscillations;lower-frequency waves;specific frequency bands;brain activity;cortical oscillations;magnetoencephalography;MEG;support vector machine;linear discriminant analysis;SVM;ANN;LDA;frequency 0.1 Hz to 4.0 Hz;Support vector machines;Neural activity;Artificial neural networks;Electroencephalography;Decoding;Speech processing;Oscillators;brainwave;magnetoencephalography;neural speech decoding;LDA;SVM;ANN},\n  doi = {10.23919/Eusipco47968.2020.9287714},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001357.pdf},\n}\n\n
\n
\n\n\n
\n Neural speech decoding aims at direct decoding of speech from the brain to restore speech communication in patients with locked-in syndrome (fully paralyzed but aware). Despite the recent progress, exactly which aspects of neural activities are characterizing the decoding process is still unclear. Neural oscillations have been associated with playing a key functional role in neural information processing and thus might provide significant insight into the decoding process. Previous research has investigated a limited range of neural frequencies for decoding, usually the high-gamma oscillations (70-200Hz) in electrocorticography (ECoG) and lower-frequency waves (1-70Hz) in electroencephalography (EEG). Hence, the exact contribution of specific frequency bands is still unclear. Magnetoencephalography (MEG) is a non-invasive method for directly measuring underlying brain activity and has the temporal resolution needed to investigate the role of cortical oscillations in speech decoding, which we attempted in this study. We used three machine learning classifiers (linear discriminant analysis (LDA), support vector machine (SVM), and artificial neural network (ANN) to classify different imagined and spoken phrases for finding the role of brainwaves in speech decoding. The experimental results showed a significant contribution of low-frequency Delta oscillations (0.1-4 Hz) in decoding and the best performance was achieved when all the brainwaves were combined.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Smooth Strongly Convex Regression.\n \n \n \n \n\n\n \n Simonetto, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2130-2134, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"SmoothPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287715,\n  author = {A. Simonetto},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Smooth Strongly Convex Regression},\n  year = {2020},\n  pages = {2130-2134},\n  abstract = {Convex regression (CR) is the problem of fitting a convex function to a finite number of noisy observations of an underlying convex function. CR is important in many domains and one of its workhorses is the non-parametric least square estimator (LSE). Currently, LSE delivers only non-smooth non-strongly convex function estimates. In this paper, leveraging recent results in convex interpolation, we generalize LSE to smooth strongly convex regression problems. The resulting algorithm relies on a convex quadratically constrained quadratic program. We also propose a parallel implementation, which leverages ADMM, that lessens the overall computational complexity to a tight O(n2) for n observations. Numerical results support our findings.},\n  keywords = {Interpolation;Fitting;Signal processing algorithms;Signal processing;Convex functions;Noise measurement;Computational complexity},\n  doi = {10.23919/Eusipco47968.2020.9287715},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002130.pdf},\n}\n\n
\n
\n\n\n
\n Convex regression (CR) is the problem of fitting a convex function to a finite number of noisy observations of an underlying convex function. CR is important in many domains and one of its workhorses is the non-parametric least square estimator (LSE). Currently, LSE delivers only non-smooth non-strongly convex function estimates. In this paper, leveraging recent results in convex interpolation, we generalize LSE to smooth strongly convex regression problems. The resulting algorithm relies on a convex quadratically constrained quadratic program. We also propose a parallel implementation, which leverages ADMM, that lessens the overall computational complexity to a tight O(n2) for n observations. Numerical results support our findings.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n SELD-TCN: Sound Event Localization Detection via Temporal Convolutional Networks.\n \n \n \n\n\n \n Guirguis, K.; Schorn, C.; Guntoro, A.; Abdulatif, S.; and Yang, B.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 16-20, Aug 2020. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287716,\n  author = {K. Guirguis and C. Schorn and A. Guntoro and S. Abdulatif and B. Yang},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {SELD-TCN: Sound Event Localization Detection via Temporal Convolutional Networks},\n  year = {2020},\n  pages = {16-20},\n  abstract = {The understanding of the surrounding environment plays a critical role in autonomous robotic systems, such as self-driving cars. Extensive research has been carried out concerning visual perception. Yet, to obtain a more complete perception of the environment, autonomous systems of the future should also take acoustic information into account. Recent sound event localization and detection (SELD) frameworks utilize convolutional recurrent neural networks (CRNNs). However, considering the recurrent nature of CRNNs, it becomes challenging to implement them efficiently on embedded hardware. Not only are their computations strenuous to parallelize, but they also require high memory bandwidth and large memory buffers. In this work, we develop a more robust and hardware-friendly novel architecture based on a temporal convolutional network (TCN). The proposed framework (SELD-TCN) outperforms the state-of-the-art SELDnet performance on four different datasets. Moreover, SELD-TCN achieves 4x faster training time per epoch and 40x faster inference time on an ordinary graphics processing unit (GPU).},\n  keywords = {Training;Recurrent neural networks;Convolution;Memory management;Graphics processing units;Task analysis;Visual perception;Sound event localization and detection;deep learning;convolutional recurrent neural network;temporal convolutional networks},\n  doi = {10.23919/Eusipco47968.2020.9287716},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n The understanding of the surrounding environment plays a critical role in autonomous robotic systems, such as self-driving cars. Extensive research has been carried out concerning visual perception. Yet, to obtain a more complete perception of the environment, autonomous systems of the future should also take acoustic information into account. Recent sound event localization and detection (SELD) frameworks utilize convolutional recurrent neural networks (CRNNs). However, considering the recurrent nature of CRNNs, it becomes challenging to implement them efficiently on embedded hardware. Not only are their computations strenuous to parallelize, but they also require high memory bandwidth and large memory buffers. In this work, we develop a more robust and hardware-friendly novel architecture based on a temporal convolutional network (TCN). The proposed framework (SELD-TCN) outperforms the state-of-the-art SELDnet performance on four different datasets. Moreover, SELD-TCN achieves 4x faster training time per epoch and 40x faster inference time on an ordinary graphics processing unit (GPU).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Joint Calibration and Tomography based on Separable Least Squares Approach with Constraints on Linear and Non-Linear Parameters.\n \n \n \n \n\n\n \n PATHURI-BHUVANA, V.; SCHUSTER, S.; and OCH, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1931-1935, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"JointPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287717,\n  author = {V. PATHURI-BHUVANA and S. SCHUSTER and A. OCH},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Joint Calibration and Tomography based on Separable Least Squares Approach with Constraints on Linear and Non-Linear Parameters},\n  year = {2020},\n  pages = {1931-1935},\n  abstract = {Most of the existing tomography techniques rely on accurate calibration to reconstruct the features of interest. In several industrial applications, the calibration is typically performed off-line and has to be repeated frequently to counter time varying perturbation caused by aging, operating conditions, and so on. In this paper, a novel online joint calibration and tomography method based on variable projection based separable least squares approach with constraints on linear and non-linear parameters is proposed. The constraints on the linear parameters improve the estimation accuracy of the ill-posed and under determined tomography problem. The constraints on the non-linear parameters restricts the proposed method from departing far away from the initial guess, especially when a good initial guess is available. The proposed method is used to reconstruct the temperature distribution inside a blast furnace and simultaneously to calibrate the positions of acoustic transducers based on simulated acoustic time of flight measurements.},\n  keywords = {Temperature measurement;Jacobian matrices;Temperature distribution;Tomography;Position measurement;Time measurement;Calibration;Tomography;online sensor calibration;constrained optimization},\n  doi = {10.23919/Eusipco47968.2020.9287717},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001931.pdf},\n}\n\n
\n
\n\n\n
\n Most of the existing tomography techniques rely on accurate calibration to reconstruct the features of interest. In several industrial applications, the calibration is typically performed off-line and has to be repeated frequently to counter time varying perturbation caused by aging, operating conditions, and so on. In this paper, a novel online joint calibration and tomography method based on variable projection based separable least squares approach with constraints on linear and non-linear parameters is proposed. The constraints on the linear parameters improve the estimation accuracy of the ill-posed and under determined tomography problem. The constraints on the non-linear parameters restricts the proposed method from departing far away from the initial guess, especially when a good initial guess is available. The proposed method is used to reconstruct the temperature distribution inside a blast furnace and simultaneously to calibrate the positions of acoustic transducers based on simulated acoustic time of flight measurements.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Tensor-based Detection of Paroxysmal and Persistent Atrial Fibrillation from Multi-channel ECG.\n \n \n \n \n\n\n \n Moghaddasi, H.; van der Veen , A. -.; de Groot , N. M. S.; and Hunyadi, B.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1155-1159, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Tensor-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287718,\n  author = {H. Moghaddasi and A. -J. {van der Veen} and N. M. S. {de Groot} and B. Hunyadi},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Tensor-based Detection of Paroxysmal and Persistent Atrial Fibrillation from Multi-channel ECG},\n  year = {2020},\n  pages = {1155-1159},\n  abstract = {Atrial fibrillation (AF) is the most common arrhythmia in the heart. Two main types of AF are defined as paroxysmal and persistent. In this paper, we present a method to discriminate between the characteristics of paroxysmal and persistent using tensor decompositions of a multi-channel electrocardiogram (ECG) signal. For this purpose, ECG signals are segmented by applying a Hilbert transform on the thresholded signal. Dynamic time warping is used to align the separated segments of each channel and then a tensor is constructed with three dimensions as time, heartbeats and channels. A Canonical polyadic decomposition with rank 2 is computed from this tensor and the resulting loading vectors describe the characteristics of paroxysmal and persistent AF in these three dimensions. The time loading vector reveals the pattern of a single P wave or abnormal AF patterns. The heartbeat loading vector shows whether the pattern is present or absent in a specific beat. The results can be used to distinguish between the patterns in paroxysmal AF and persistent AF.},\n  keywords = {electrocardiography;Hilbert transforms;medical signal detection;medical signal processing;tensors;tensor-based detection;Hilbert transform;paroxysmal AF;heartbeat loading vector;persistent AF;Canonical polyadic decomposition;heartbeats;separated segments;dynamic time warping;thresholded signal;multichannel electrocardiogram signal;multichannel ECG signals;persistent atrial fibrillation;Tensors;Heart beat;Loading;Atrial fibrillation;Transforms;Electrocardiography;Signal processing;Atrial fibrillation;Tensor decomposition;Electrocardiogram and Canonical polyadic decomposition},\n  doi = {10.23919/Eusipco47968.2020.9287718},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001155.pdf},\n}\n\n
\n
\n\n\n
\n Atrial fibrillation (AF) is the most common arrhythmia in the heart. Two main types of AF are defined as paroxysmal and persistent. In this paper, we present a method to discriminate between the characteristics of paroxysmal and persistent using tensor decompositions of a multi-channel electrocardiogram (ECG) signal. For this purpose, ECG signals are segmented by applying a Hilbert transform on the thresholded signal. Dynamic time warping is used to align the separated segments of each channel and then a tensor is constructed with three dimensions as time, heartbeats and channels. A Canonical polyadic decomposition with rank 2 is computed from this tensor and the resulting loading vectors describe the characteristics of paroxysmal and persistent AF in these three dimensions. The time loading vector reveals the pattern of a single P wave or abnormal AF patterns. The heartbeat loading vector shows whether the pattern is present or absent in a specific beat. The results can be used to distinguish between the patterns in paroxysmal AF and persistent AF.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Dynamic Mode Decomposition Based Approach for Epileptic EEG Classification.\n \n \n \n \n\n\n \n Karabiber Cura, O.; Akif Ozdemir, M.; Pehlivan, S.; and Akan, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1070-1074, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287719,\n  author = {O. {Karabiber Cura} and M. {Akif Ozdemir} and S. Pehlivan and A. Akan},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A Dynamic Mode Decomposition Based Approach for Epileptic EEG Classification},\n  year = {2020},\n  pages = {1070-1074},\n  abstract = {Epilepsy is a neurological disorder that affects many people all around the world, and its early detection is a topic of research widely studied in signal processing community. In this paper, a new technique that was introduced to solve problems of fluid dynamics called Dynamic Mode Decomposition (DMD), is used to classify seizure and non-seizure epileptic EEG signals. The DMD decomposes a given signal into the intrinsic oscillations called modes which are used to define a DMD spectrum. In the proposed approach, the DMD spectrum is obtained by applying either multi-channel or single-channel based DMD technique. Then, subband and total power features extracted from the DMD spectrum and various classifiers are utilized to classify seizure and non-seizure epileptic EEG segments. Outstanding classification results are achieved by both the single-channel based (96.7%), and the multi-channel based (96%) DMD approaches.},\n  keywords = {electroencephalography;feature extraction;medical disorders;medical signal processing;neurophysiology;signal classification;DMD spectrum;single-channel based DMD technique;nonseizure;signal classification;multichannel based DMD approaches;dynamic mode decomposition;epileptic EEG classification;neurological disorder;fluid dynamics;intrinsic oscillations;Neurological diseases;Fluid dynamics;Epilepsy;Signal processing;Feature extraction;Electroencephalography;Oscillators;Dynamic mode decomposition (DMD);epileptic EEG classification;DMD spectrum},\n  doi = {10.23919/Eusipco47968.2020.9287719},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001070.pdf},\n}\n\n
\n
\n\n\n
\n Epilepsy is a neurological disorder that affects many people all around the world, and its early detection is a topic of research widely studied in signal processing community. In this paper, a new technique that was introduced to solve problems of fluid dynamics called Dynamic Mode Decomposition (DMD), is used to classify seizure and non-seizure epileptic EEG signals. The DMD decomposes a given signal into the intrinsic oscillations called modes which are used to define a DMD spectrum. In the proposed approach, the DMD spectrum is obtained by applying either multi-channel or single-channel based DMD technique. Then, subband and total power features extracted from the DMD spectrum and various classifiers are utilized to classify seizure and non-seizure epileptic EEG segments. Outstanding classification results are achieved by both the single-channel based (96.7%), and the multi-channel based (96%) DMD approaches.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Privacy-Preserving Asynchronous Averaging Algorithm based on State Decomposition.\n \n \n \n \n\n\n \n Calis, M.; Heusdens, R.; and Hendriks, R. C.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2115-2119, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287720,\n  author = {M. Calis and R. Heusdens and R. C. Hendriks},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A Privacy-Preserving Asynchronous Averaging Algorithm based on State Decomposition},\n  year = {2020},\n  pages = {2115-2119},\n  abstract = {Average consensus algorithms are used in many distributed systems such as distributed optimization, sensor fusion and the control of dynamic systems. Consensus algorithms converge through an explicit exchange of state variables. In some cases, however, the state variables are confidential. In this paper, a privacy-preserving asynchronous distributed average consensus method is proposed, which decomposes the initial values into two states; alpha states and beta states. These states are initialized such that their sum is twice the initial value. The alpha states are used to communicate with the other nodes, while the beta states are used internally. Although beta states are not shared, they are used in the update of the alpha states. Unlike differential privacy based methods, the proposed algorithm achieves the exact average consensus, while providing privacy to the initial values. Compared to the synchronous state decomposition algorithm, the convergence rate is improved without any privacy compromise. As the variances of coupling weights become infinitely large, the semi-honest adversary does not have any range to estimate the initial value of the nodes given that there is at least one coupling weight hidden from the adversary.},\n  keywords = {Couplings;Privacy;Signal processing algorithms;Consensus algorithm;Signal processing;Standards;Convergence;Privacy-preserving averaging;Distributed averaging;State decomposition;Asynchronous averaging},\n  doi = {10.23919/Eusipco47968.2020.9287720},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002115.pdf},\n}\n\n
\n
\n\n\n
\n Average consensus algorithms are used in many distributed systems such as distributed optimization, sensor fusion and the control of dynamic systems. Consensus algorithms converge through an explicit exchange of state variables. In some cases, however, the state variables are confidential. In this paper, a privacy-preserving asynchronous distributed average consensus method is proposed, which decomposes the initial values into two states; alpha states and beta states. These states are initialized such that their sum is twice the initial value. The alpha states are used to communicate with the other nodes, while the beta states are used internally. Although beta states are not shared, they are used in the update of the alpha states. Unlike differential privacy based methods, the proposed algorithm achieves the exact average consensus, while providing privacy to the initial values. Compared to the synchronous state decomposition algorithm, the convergence rate is improved without any privacy compromise. As the variances of coupling weights become infinitely large, the semi-honest adversary does not have any range to estimate the initial value of the nodes given that there is at least one coupling weight hidden from the adversary.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Implementation of low-latency electrolaryngeal speech enhancement based on multi-task CLDNN.\n \n \n \n \n\n\n \n Kobayashi, K.; and Toda, T.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 396-400, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ImplementationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287721,\n  author = {K. Kobayashi and T. Toda},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Implementation of low-latency electrolaryngeal speech enhancement based on multi-task CLDNN},\n  year = {2020},\n  pages = {396-400},\n  abstract = {In this paper, we propose a low-latency speech enhancement technique for electrolaryngeal (EL) speech based on multi-task CLDNN. Although the EL speech can generate relatively intelligible speech, laryngectomees always suffer quality degradation of speech naturalness due to the mechanical excitation signals. To solve this problem, an EL speech enhancement technique based on CLDNN consisting of convolution, recurrent, and fully connected layers has been proposed. In this technique, an input feature vector of the EL speech is converted into several vocoder parameters such as excitation parameters and spectral parameters based on expert CLDNNs optimized for each feature. However, it is difficult to utilize speech communication because its bi-directional recurrent layers cause a large delay to wait for the end of the utterance. To address this issue, in this paper, we propose multi-task CLDNN with uni-directional recurrent layers for the low-latency EL speech enhancement. Moreover, to achieve comparable performance to the bi-directional CLDNN, we also propose the following techniques: 1) knowledge distillation, 2) data augmentation, and 3) phonetic regularization. The experimental results demonstrate that the proposed method makes it possible to achieve comparable objective results to the bi-directional CLDNN and outperform naturalness and speech intelligibility in the noisy condition.},\n  keywords = {Convolution;Vocoders;Bidirectional control;Speech enhancement;Phonetics;Planning;Noise measurement;electrolaryngeal speech;low-latency speech enhancement;voice conversion;deep neural network},\n  doi = {10.23919/Eusipco47968.2020.9287721},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000396.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a low-latency speech enhancement technique for electrolaryngeal (EL) speech based on multi-task CLDNN. Although the EL speech can generate relatively intelligible speech, laryngectomees always suffer quality degradation of speech naturalness due to the mechanical excitation signals. To solve this problem, an EL speech enhancement technique based on CLDNN consisting of convolution, recurrent, and fully connected layers has been proposed. In this technique, an input feature vector of the EL speech is converted into several vocoder parameters such as excitation parameters and spectral parameters based on expert CLDNNs optimized for each feature. However, it is difficult to utilize speech communication because its bi-directional recurrent layers cause a large delay to wait for the end of the utterance. To address this issue, in this paper, we propose multi-task CLDNN with uni-directional recurrent layers for the low-latency EL speech enhancement. Moreover, to achieve comparable performance to the bi-directional CLDNN, we also propose the following techniques: 1) knowledge distillation, 2) data augmentation, and 3) phonetic regularization. The experimental results demonstrate that the proposed method makes it possible to achieve comparable objective results to the bi-directional CLDNN and outperform naturalness and speech intelligibility in the noisy condition.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Demographic Bias: A Challenge for Fingervein Recognition Systems?.\n \n \n \n \n\n\n \n Drozdowski, P.; Prommegger, B.; Wimmer, G.; Schraml, R.; Rathgeb, C.; Uhl, A.; and Busch, C.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 825-829, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DemographicPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287722,\n  author = {P. Drozdowski and B. Prommegger and G. Wimmer and R. Schraml and C. Rathgeb and A. Uhl and C. Busch},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Demographic Bias: A Challenge for Fingervein Recognition Systems?},\n  year = {2020},\n  pages = {825-829},\n  abstract = {Recently, concerns regarding potential biases in the underlying algorithms of many automated systems (including biometrics) have been raised. In this context, a biased algorithm produces statistically different outcomes for different groups of individuals based on certain (often protected by antidiscrimination legislation) attributes such as sex and age. While several preliminary studies investigating this matter for facial recognition algorithms do exist, said topic has not yet been addressed for vascular biometric characteristics. Accordingly, in this paper, several popular types of recognition algorithms are benchmarked to ascertain the matter for fingervein recognition. The experimental evaluation suggests lack of bias for the tested algorithms, although future works with larger datasets are needed to validate and confirm those preliminary results.},\n  keywords = {Face recognition;Scalability;Signal processing algorithms;Legislation;Europe;Signal processing;Metadata;Biometrics;Fingervein Recognition;Bias},\n  doi = {10.23919/Eusipco47968.2020.9287722},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000825.pdf},\n}\n\n
\n
\n\n\n
\n Recently, concerns regarding potential biases in the underlying algorithms of many automated systems (including biometrics) have been raised. In this context, a biased algorithm produces statistically different outcomes for different groups of individuals based on certain (often protected by antidiscrimination legislation) attributes such as sex and age. While several preliminary studies investigating this matter for facial recognition algorithms do exist, said topic has not yet been addressed for vascular biometric characteristics. Accordingly, in this paper, several popular types of recognition algorithms are benchmarked to ascertain the matter for fingervein recognition. The experimental evaluation suggests lack of bias for the tested algorithms, although future works with larger datasets are needed to validate and confirm those preliminary results.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A general framework for directional intra prediction with varying angle for video coding.\n \n \n \n \n\n\n \n Rath, G.; Racape, F.; Urban, F.; Leleannec, F.; Galpin, F.; and Naser, K.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1-4, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287723,\n  author = {G. Rath and F. Racape and F. Urban and F. Leleannec and F. Galpin and K. Naser},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A general framework for directional intra prediction with varying angle for video coding},\n  year = {2020},\n  pages = {1-4},\n  abstract = {Video coding standards such as H.264/AVC, HEVC, VVC, etc., all employ intra prediction where a directional prediction uses a single prediction direction inside a coding block. The underlying idea is that the object directionalities remain unchanged over the entire coding block. This is a simplistic model as the object directionalities over a block can change, especially if the block size is relatively large. In such a case, using a single prediction direction over a coding block will lead to smaller coding blocks after rate-distortion (R-D) optimization with side information for partitioning into smaller blocks. A better idea would be to allow the prediction direction to change gradually over the same coding block such that objects with slow changing directionalities can be modelled in larger blocks without splitting them. This would demand additional complexity and the side information required for model parameters. This paper proposes a general framework which supports this change of prediction direction inside a block and presents practical solutions for low complexity implementations. Simulation results with VVC test model (VTM 4) software show Luma BD-rate gain of 0.11% in All Intra (AI) configuration.},\n  keywords = {Simulation;Rate-distortion;Signal processing;Encoding;Software;Complexity theory;Standards;Intra prediction;coding unit;prediction mode;directional modes;prediction direction;VVC},\n  doi = {10.23919/Eusipco47968.2020.9287723},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000531.pdf},\n}\n\n
\n
\n\n\n
\n Video coding standards such as H.264/AVC, HEVC, VVC, etc., all employ intra prediction where a directional prediction uses a single prediction direction inside a coding block. The underlying idea is that the object directionalities remain unchanged over the entire coding block. This is a simplistic model as the object directionalities over a block can change, especially if the block size is relatively large. In such a case, using a single prediction direction over a coding block will lead to smaller coding blocks after rate-distortion (R-D) optimization with side information for partitioning into smaller blocks. A better idea would be to allow the prediction direction to change gradually over the same coding block such that objects with slow changing directionalities can be modelled in larger blocks without splitting them. This would demand additional complexity and the side information required for model parameters. This paper proposes a general framework which supports this change of prediction direction inside a block and presents practical solutions for low complexity implementations. Simulation results with VVC test model (VTM 4) software show Luma BD-rate gain of 0.11% in All Intra (AI) configuration.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n OFDM Receiver Using Deep Learning: Redundancy Issues.\n \n \n \n \n\n\n \n Mendonça, M. O. K.; and Diniz, P. S. R.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1687-1691, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"OFDMPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287725,\n  author = {M. O. K. Mendonça and P. S. R. Diniz},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {OFDM Receiver Using Deep Learning: Redundancy Issues},\n  year = {2020},\n  pages = {1687-1691},\n  abstract = {To combat the inter-symbol interference (ISI) and the inter-block interference (IBI) caused by multi-path fading in orthogonal frequency-division multiplexing (OFDM) systems, it is usually recommended employing a cyclic prefix (CP) with length equal to the channel order. In some practical cases, however, the channel order is not exactly known. Looking for a balance between a full-sized CP and its absence, we investigate the redundancy issues and propose a minimum redundancy OFDM receiver using deep-learning (DL) tools. In this way, we can benefit from an improved reception performance, when compared with CP-free case, and also a better spectrum utilization when compared with the CP-OFDM case. Moreover, compared with the CP-free case, improved performance can be obtained even when the channel order is not available. Simulation results indicate that a good BER level can be achieved and the proposed technique can also be applied in other DL-based receivers.},\n  keywords = {OFDM;Simulation;Redundancy;Receivers;Interference;Tools;Frequency division multiplexing;deep-learning;channel-estimation;symboldetection;OFDM;minimum-redundancy},\n  doi = {10.23919/Eusipco47968.2020.9287725},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001687.pdf},\n}\n\n
\n
\n\n\n
\n To combat the inter-symbol interference (ISI) and the inter-block interference (IBI) caused by multi-path fading in orthogonal frequency-division multiplexing (OFDM) systems, it is usually recommended employing a cyclic prefix (CP) with length equal to the channel order. In some practical cases, however, the channel order is not exactly known. Looking for a balance between a full-sized CP and its absence, we investigate the redundancy issues and propose a minimum redundancy OFDM receiver using deep-learning (DL) tools. In this way, we can benefit from an improved reception performance, when compared with CP-free case, and also a better spectrum utilization when compared with the CP-OFDM case. Moreover, compared with the CP-free case, improved performance can be obtained even when the channel order is not available. Simulation results indicate that a good BER level can be achieved and the proposed technique can also be applied in other DL-based receivers.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n SURE Based Truncated Tensor Nuclear Norm Regularization for Low Rank Tensor Completion.\n \n \n \n \n\n\n \n Morison, G.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2001-2005, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"SUREPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287726,\n  author = {G. Morison},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {SURE Based Truncated Tensor Nuclear Norm Regularization for Low Rank Tensor Completion},\n  year = {2020},\n  pages = {2001-2005},\n  abstract = {Low rank tensor completion aims to recover the underlying low rank tensor obtained from its partial observations, this has a wide range of applications in Signal Processing and Machine Learning. A number of recent low rank tensor methods have successfully utilised the tensor singular value decomposition method with tensor nuclear norm minimisation via tensor singular value thresholding. This approach while proving to be effective has the potential issue that it may over or under shrink the singular values which will effect the overall performance. A truncated nuclear norm based method has been introduced which explicitly exploits the low rank assumption within the optimization in combination with tensor singular value thresholding. In this work the truncated nuclear norm approach is extended to incorporate a data driven approach based on Stein’s unbiased risk estimation method which efficiently thresholds the singular values. Experimental results in a colour image denoising problem demonstrate the efficiency and accuracy of the method.},\n  keywords = {Tensors;Image color analysis;Signal processing algorithms;Signal processing;Minimization;Optimization;Singular value decomposition;Truncated Tensor Nuclear Norm;Singular Value Shrinkage;SURE},\n  doi = {10.23919/Eusipco47968.2020.9287726},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002001.pdf},\n}\n\n
\n
\n\n\n
\n Low rank tensor completion aims to recover the underlying low rank tensor obtained from its partial observations, this has a wide range of applications in Signal Processing and Machine Learning. A number of recent low rank tensor methods have successfully utilised the tensor singular value decomposition method with tensor nuclear norm minimisation via tensor singular value thresholding. This approach while proving to be effective has the potential issue that it may over or under shrink the singular values which will effect the overall performance. A truncated nuclear norm based method has been introduced which explicitly exploits the low rank assumption within the optimization in combination with tensor singular value thresholding. In this work the truncated nuclear norm approach is extended to incorporate a data driven approach based on Stein’s unbiased risk estimation method which efficiently thresholds the singular values. Experimental results in a colour image denoising problem demonstrate the efficiency and accuracy of the method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Wing 3D Reconstruction by Constraining the Bundle Adjustment with Mechanical Limitations.\n \n \n \n \n\n\n \n Demoulin, Q.; Lefebvre-Albaret, F.; Basarab, A.; Kouamé, D.; and Tourneret, J. -.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 570-574, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"WingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287727,\n  author = {Q. Demoulin and F. Lefebvre-Albaret and A. Basarab and D. Kouamé and J. -Y. Tourneret},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Wing 3D Reconstruction by Constraining the Bundle Adjustment with Mechanical Limitations},\n  year = {2020},\n  pages = {570-574},\n  abstract = {The estimation of wing deformation is part of the certification of an aircraft. Wing deformation can be obtained from 3D reconstructions based on conventional multi-view photogrammetry. However, 3D reconstructions are generally degraded by the variable flight environments that degrade the quality of 2D images. This paper addresses this issue by taking benefit from a priori knowledge of the wing mechanical behaviour. Specifically, mechanical limits are considered to regularize the bundle adjustment within the photogrammetry reconstruction. The performance of the proposed approach is evaluated on a real case, using data acquired on an aircraft A350-900.},\n  keywords = {Bundle adjustment;Two dimensional displays;Estimation;Bending;Aircraft;Image reconstruction;Strain;Bundle adjustment;optimization under constraints;wing deformations;mechanical limits},\n  doi = {10.23919/Eusipco47968.2020.9287727},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000570.pdf},\n}\n\n
\n
\n\n\n
\n The estimation of wing deformation is part of the certification of an aircraft. Wing deformation can be obtained from 3D reconstructions based on conventional multi-view photogrammetry. However, 3D reconstructions are generally degraded by the variable flight environments that degrade the quality of 2D images. This paper addresses this issue by taking benefit from a priori knowledge of the wing mechanical behaviour. Specifically, mechanical limits are considered to regularize the bundle adjustment within the photogrammetry reconstruction. The performance of the proposed approach is evaluated on a real case, using data acquired on an aircraft A350-900.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Breaking the Limits of Gamma-Ray Spectrometry by Exploiting Sparsity of Photon Arrivals.\n \n \n \n \n\n\n \n Sánchez El Ryfaie, S. C.; and Heredia Conde, M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2075-2079, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"BreakingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287728,\n  author = {S. C. {Sánchez El Ryfaie} and M. {Heredia Conde}},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Breaking the Limits of Gamma-Ray Spectrometry by Exploiting Sparsity of Photon Arrivals},\n  year = {2020},\n  pages = {2075-2079},\n  abstract = {State-of-the-art gamma-ray spectrometry suffers from physical limitations of the sensing system. Pulse pile-up and dead time losses lead to wrong measurements of both the number of pulses and their amplitude. Current techniques based on maxima detection, be in analog domain or after fine digitalization, offer limited resolution, bounded by the instrument response function (IRF). In this work we show how spectral sampling can help breaking the current limits of gamma-ray spectrometry. By means of a sparsity-based sensing model and using a fast and robust parametric spectral estimation method, we show that the unknown parameters, namely time of arrival and energy of the γ-photons, can be accurately estimated from few m ≥ 2K + 1 frequency samples. In the noiseless case parameter estimation is exact to machine precision and the method has virtually no resolution limit. A thorough experimental evaluation using an empirical IRF unveiled excellent performance, even in overpessimistic conditions.},\n  keywords = {Spectroscopy;Energy resolution;Gamma-rays;Sensors;Image reconstruction;Signal to noise ratio;Photonics;Gamma-ray spectrometry;sparsity;spectral analysis;superresolution;SPAD},\n  doi = {10.23919/Eusipco47968.2020.9287728},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002075.pdf},\n}\n\n
\n
\n\n\n
\n State-of-the-art gamma-ray spectrometry suffers from physical limitations of the sensing system. Pulse pile-up and dead time losses lead to wrong measurements of both the number of pulses and their amplitude. Current techniques based on maxima detection, be in analog domain or after fine digitalization, offer limited resolution, bounded by the instrument response function (IRF). In this work we show how spectral sampling can help breaking the current limits of gamma-ray spectrometry. By means of a sparsity-based sensing model and using a fast and robust parametric spectral estimation method, we show that the unknown parameters, namely time of arrival and energy of the γ-photons, can be accurately estimated from few m ≥ 2K + 1 frequency samples. In the noiseless case parameter estimation is exact to machine precision and the method has virtually no resolution limit. A thorough experimental evaluation using an empirical IRF unveiled excellent performance, even in overpessimistic conditions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Extrapolation of Bandlimited Multidimensional Signals from Continuous Measurements.\n \n \n \n \n\n\n \n Frankenbach, C.; Martínez-Nuevo, P.; Møller, M.; and Kellermann, W.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2309-2313, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ExtrapolationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287729,\n  author = {C. Frankenbach and P. Martínez-Nuevo and M. Møller and W. Kellermann},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Extrapolation of Bandlimited Multidimensional Signals from Continuous Measurements},\n  year = {2020},\n  pages = {2309-2313},\n  abstract = {Conventional sampling and interpolation commonly rely on discrete measurements. In this paper, we develop a theoretical framework for extrapolation of signals in higher dimensions from knowledge of the continuous waveform on bounded high-dimensional regions. In particular, we propose an iterative method to reconstruct bandlimited multidimensional signals based on truncated versions of the original signal to bounded regions—herein referred to as continuous measurements. In the proposed method, the reconstruction is performed by iterating on a convex combination of region-limiting and bandlimiting operations. We show that this iteration consists of a firmly nonexpansive operator and prove strong convergence for multidimensional bandlimited signals. In order to improve numerical stability, we introduce a regularized iteration and show its connection to Tikhonov regularization. The method is illustrated numerically for two-dimensional signals.},\n  keywords = {Weight measurement;Extrapolation;Atmospheric measurements;Particle measurements;Iterative methods;Numerical stability;Convergence;Signal extrapolation;Papoulis’algorithm;signal reconstruction;Tikhonov regularization},\n  doi = {10.23919/Eusipco47968.2020.9287729},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002309.pdf},\n}\n\n
\n
\n\n\n
\n Conventional sampling and interpolation commonly rely on discrete measurements. In this paper, we develop a theoretical framework for extrapolation of signals in higher dimensions from knowledge of the continuous waveform on bounded high-dimensional regions. In particular, we propose an iterative method to reconstruct bandlimited multidimensional signals based on truncated versions of the original signal to bounded regions—herein referred to as continuous measurements. In the proposed method, the reconstruction is performed by iterating on a convex combination of region-limiting and bandlimiting operations. We show that this iteration consists of a firmly nonexpansive operator and prove strong convergence for multidimensional bandlimited signals. In order to improve numerical stability, we introduce a regularized iteration and show its connection to Tikhonov regularization. The method is illustrated numerically for two-dimensional signals.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Grad-LAM: Visualization of Deep Neural Networks for Unsupervised Learning.\n \n \n \n \n\n\n \n Bartler, A.; Hinderer, D.; and Yang, B.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1407-1411, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Grad-LAM:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287730,\n  author = {A. Bartler and D. Hinderer and B. Yang},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Grad-LAM: Visualization of Deep Neural Networks for Unsupervised Learning},\n  year = {2020},\n  pages = {1407-1411},\n  abstract = {Nowadays, the explainability of deep neural networks is an essential part of machine learning. In the last years, many methods were developed to visualize important regions of an input image for the decision of the deep neural network. Since almost all methods are designed for supervised trained models, we propose in this work a visualization technique for unsupervised trained autoencoders called Gradient-weighted Latent Activation Mapping (Grad-LAM). We adapt the idea of Grad-CAM and propose a novel weighting based on the knowledge of the autoencoder’s decoder. Our method will help to get insights into the highly nonlinear mapping of an input image to a latent space. We show that the visualization maps of Grad-LAM are meaningful on simple datasets like MNIST and the method is even applicable to real-world datasets like ImageNet.},\n  keywords = {Visualization;Neural networks;Europe;Machine learning;Signal processing;Decoding;Unsupervised learning;deep visualization;transparency;unsupervised learning;explainable artificial intelligence;Grad-LAM},\n  doi = {10.23919/Eusipco47968.2020.9287730},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001407.pdf},\n}\n\n
\n
\n\n\n
\n Nowadays, the explainability of deep neural networks is an essential part of machine learning. In the last years, many methods were developed to visualize important regions of an input image for the decision of the deep neural network. Since almost all methods are designed for supervised trained models, we propose in this work a visualization technique for unsupervised trained autoencoders called Gradient-weighted Latent Activation Mapping (Grad-LAM). We adapt the idea of Grad-CAM and propose a novel weighting based on the knowledge of the autoencoder’s decoder. Our method will help to get insights into the highly nonlinear mapping of an input image to a latent space. We show that the visualization maps of Grad-LAM are meaningful on simple datasets like MNIST and the method is even applicable to real-world datasets like ImageNet.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Augmented Sigma-Point Lagrangian Splitting Method for Sparse Nonlinear State Estimation.\n \n \n \n \n\n\n \n Gao, R.; and Särkkä, S.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2090-2094, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AugmentedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287731,\n  author = {R. Gao and S. Särkkä},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Augmented Sigma-Point Lagrangian Splitting Method for Sparse Nonlinear State Estimation},\n  year = {2020},\n  pages = {2090-2094},\n  abstract = {Nonlinear state estimation using Bayesian filtering and smoothing is still an active area of research, especially when sparsity-inducing regularization is used. However, even the latest filtering and smoothing methods, such as unscented Kalman filters and smoothers and other sigma-point methods, lack a mechanism to promote sparsity in estimation process. Here, we formulate a sparse nonlinear state estimation problem as a generalized L1 -regularized minimization problem. Then, we develop an augmented sigma-point Lagrangian splitting method, which leads to iterated unscented, cubature, and Gauss-Hermite Kalman smoothers for computation in the primal space. The resulting method is demonstrated to outperform conventional methods in numerical experimentals.},\n  keywords = {Smoothing methods;Filtering;Europe;Signal processing;Minimization;Kalman filters;State estimation;Nonlinear state estimation;sparsity;sigma-point;Kalman filter;variable splitting},\n  doi = {10.23919/Eusipco47968.2020.9287731},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002090.pdf},\n}\n\n
\n
\n\n\n
\n Nonlinear state estimation using Bayesian filtering and smoothing is still an active area of research, especially when sparsity-inducing regularization is used. However, even the latest filtering and smoothing methods, such as unscented Kalman filters and smoothers and other sigma-point methods, lack a mechanism to promote sparsity in estimation process. Here, we formulate a sparse nonlinear state estimation problem as a generalized L1 -regularized minimization problem. Then, we develop an augmented sigma-point Lagrangian splitting method, which leads to iterated unscented, cubature, and Gauss-Hermite Kalman smoothers for computation in the primal space. The resulting method is demonstrated to outperform conventional methods in numerical experimentals.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n L0-Sparse DOA Estimation of Close Sources with Modeling Errors.\n \n \n \n \n\n\n \n Delmer, A.; Ferréol, A.; and Larzabal, P.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1861-1865, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"L0-SparsePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287732,\n  author = {A. Delmer and A. Ferréol and P. Larzabal},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {L0-Sparse DOA Estimation of Close Sources with Modeling Errors},\n  year = {2020},\n  pages = {1861-1865},\n  abstract = {In the field of array processing, Direction-Of-Arrival (DOA) estimation of close sources in the presence of modeling errors is a challenging problem. Indeed, the degradation of high-resolution methods on such scenario is well known and documented in the literature. This paper proposes an operational sparse L0-regularized method as an alternative. In sparse DOA estimation methods, the determination of the regularization parameter is a critical point, and it is generally empirically tuned. We first provide, in the presence of modeling errors, a theoretical statistical study to estimate the admissible range for this parameter in the presence of two incoming sources. For close sources, we therefore show that the admissible range is shortened. For an operational system, an off-line predetermination of the regularization parameter is required. We show that its selection is closely connected to the resolution limit for a given modeling error. Numerical simulations are presented to demonstrate the efficiency of the proposed implementation and its superiority in comparison with high-resolution methods.},\n  keywords = {Direction-of-arrival estimation;Estimation;Europe;Signal processing;Numerical simulation;Numerical models;Multiple signal classification;DOA estimation;sparse estimation;modeling errors;close sources},\n  doi = {10.23919/Eusipco47968.2020.9287732},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001861.pdf},\n}\n\n
\n
\n\n\n
\n In the field of array processing, Direction-Of-Arrival (DOA) estimation of close sources in the presence of modeling errors is a challenging problem. Indeed, the degradation of high-resolution methods on such scenario is well known and documented in the literature. This paper proposes an operational sparse L0-regularized method as an alternative. In sparse DOA estimation methods, the determination of the regularization parameter is a critical point, and it is generally empirically tuned. We first provide, in the presence of modeling errors, a theoretical statistical study to estimate the admissible range for this parameter in the presence of two incoming sources. For close sources, we therefore show that the admissible range is shortened. For an operational system, an off-line predetermination of the regularization parameter is required. We show that its selection is closely connected to the resolution limit for a given modeling error. Numerical simulations are presented to demonstrate the efficiency of the proposed implementation and its superiority in comparison with high-resolution methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Informed Source Extraction based on Independent Vector Analysis using Eigenvalue Decomposition.\n \n \n \n \n\n\n \n Brendel, A.; and Kellermann, W.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 875-879, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"InformedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287733,\n  author = {A. Brendel and W. Kellermann},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Informed Source Extraction based on Independent Vector Analysis using Eigenvalue Decomposition},\n  year = {2020},\n  pages = {875-879},\n  abstract = {A desired acoustic source can very often only be observed in a mixture together with interfering sources in a real-life scenario. Hence, extracting the desired signal with a minimum amount of information about the geometric and acoustic setup is a problem of great interest. Recently, methods for blind source extraction based on Independent Vector Analysis (IVA) have been proposed. These algorithms are entirely blind, which prevents them from focussing on a specific source in the mixture. In this contribution, we guide the convergence of the extraction filter by a free-field prior within a Bayesian model towards the desired solution and use recently proposed update rules relying on the Eigenvalue Decomposition (EVD) for its optimization. The superiority of the presented update rules over a recently proposed state-of-the-art method is shown in experiments using measured Room Impulse Responses (RIRs).},\n  keywords = {Runtime;Signal processing algorithms;Signal processing;Eigenvalues and eigenfunctions;Acoustics;Optimization;Convergence;Independent Vector Analysis;Signal Extraction;Eigenvalue Decomposition;MM Algorithm},\n  doi = {10.23919/Eusipco47968.2020.9287733},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000875.pdf},\n}\n\n
\n
\n\n\n
\n A desired acoustic source can very often only be observed in a mixture together with interfering sources in a real-life scenario. Hence, extracting the desired signal with a minimum amount of information about the geometric and acoustic setup is a problem of great interest. Recently, methods for blind source extraction based on Independent Vector Analysis (IVA) have been proposed. These algorithms are entirely blind, which prevents them from focussing on a specific source in the mixture. In this contribution, we guide the convergence of the extraction filter by a free-field prior within a Bayesian model towards the desired solution and use recently proposed update rules relying on the Eigenvalue Decomposition (EVD) for its optimization. The superiority of the presented update rules over a recently proposed state-of-the-art method is shown in experiments using measured Room Impulse Responses (RIRs).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robust Acoustic Scene Classification to Multiple Devices Using Maximum Classifier Discrepancy and Knowledge Distillation.\n \n \n \n \n\n\n \n Takeyama, S.; Komatsu, T.; Miyazaki, K.; Togami, M.; and Ono, S.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 36-40, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"RobustPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287734,\n  author = {S. Takeyama and T. Komatsu and K. Miyazaki and M. Togami and S. Ono},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Robust Acoustic Scene Classification to Multiple Devices Using Maximum Classifier Discrepancy and Knowledge Distillation},\n  year = {2020},\n  pages = {36-40},\n  abstract = {This paper proposes robust acoustic scene classification (ASC) to multiple devices using maximum classifier discrepancy (MCD) and knowledge distillation (KD). The proposed method employs domain adaptation to train multiple ASC models dedicated to each device and combines these multiple device-specific models using a KD technique into a multi-domain ASC model. For domain adaptation, the proposed method utilizes MCD to align class distributions that conventional DA for ASC methods have ignored. The multi-device robust ASC model is obtained by KD, combining the multiple device-specific ASC models by MCD that may have a lower performance for non-target devices. Our experiments show that the proposed MCD-based device-specific model improved ASC accuracy by at most 12.22% for target samples, and the proposed KD-based device-general model improved ASC accuracy by 2.13% on average for all devices.},\n  keywords = {Performance evaluation;Adaptation models;Image analysis;Europe;Signal processing;Acoustics;Faces;acoustic scene classification;domain adaptation;maximum classifier discrepancy;convolutional neural network;knowledge distillation},\n  doi = {10.23919/Eusipco47968.2020.9287734},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000036.pdf},\n}\n\n
\n
\n\n\n
\n This paper proposes robust acoustic scene classification (ASC) to multiple devices using maximum classifier discrepancy (MCD) and knowledge distillation (KD). The proposed method employs domain adaptation to train multiple ASC models dedicated to each device and combines these multiple device-specific models using a KD technique into a multi-domain ASC model. For domain adaptation, the proposed method utilizes MCD to align class distributions that conventional DA for ASC methods have ignored. The multi-device robust ASC model is obtained by KD, combining the multiple device-specific ASC models by MCD that may have a lower performance for non-target devices. Our experiments show that the proposed MCD-based device-specific model improved ASC accuracy by at most 12.22% for target samples, and the proposed KD-based device-general model improved ASC accuracy by 2.13% on average for all devices.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Graphon Pooling in Graph Neural Networks.\n \n \n \n \n\n\n \n Parada-Mayorga, A.; Ruiz, L.; and Ribeiro, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 860-864, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"GraphonPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287735,\n  author = {A. Parada-Mayorga and L. Ruiz and A. Ribeiro},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Graphon Pooling in Graph Neural Networks},\n  year = {2020},\n  pages = {860-864},\n  abstract = {Graph neural networks (GNNs) have been used effectively in different applications involving the processing of signals on irregular structures modeled by graphs. Relying on the use of shift-invariant graph filters, GNNs extend the operation of convolution to graphs. However, the operations of pooling and sampling are still not clearly defined and the approaches proposed in the literature either modify the graph structure in a way that does not preserve its spectral properties, or require defining a policy for selecting which nodes to keep. In this work, we propose a new strategy for pooling and sampling on GNNs using graphons which preserves the spectral properties of the graph. To do so, we consider the graph layers in a GNN as elements of a sequence of graphs that converge to a graphon. In this way we have no ambiguity in the node labeling when mapping signals from one layer to the other and a spectral representation that is consistent throughout the layers. We evaluate this strategy in a synthetic and a real-world numerical experiment where we show that graphon pooling GNNs are less prone to overfitting and improve upon other pooling techniques, especially when the dimensionality reduction ratios between layers is large.},\n  keywords = {Dimensionality reduction;Uncertainty;Filtering;Convolution;Europe;Tools;Labeling;graph neural networks;pooling;graphons},\n  doi = {10.23919/Eusipco47968.2020.9287735},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000860.pdf},\n}\n\n
\n
\n\n\n
\n Graph neural networks (GNNs) have been used effectively in different applications involving the processing of signals on irregular structures modeled by graphs. Relying on the use of shift-invariant graph filters, GNNs extend the operation of convolution to graphs. However, the operations of pooling and sampling are still not clearly defined and the approaches proposed in the literature either modify the graph structure in a way that does not preserve its spectral properties, or require defining a policy for selecting which nodes to keep. In this work, we propose a new strategy for pooling and sampling on GNNs using graphons which preserves the spectral properties of the graph. To do so, we consider the graph layers in a GNN as elements of a sequence of graphs that converge to a graphon. In this way we have no ambiguity in the node labeling when mapping signals from one layer to the other and a spectral representation that is consistent throughout the layers. We evaluate this strategy in a synthetic and a real-world numerical experiment where we show that graphon pooling GNNs are less prone to overfitting and improve upon other pooling techniques, especially when the dimensionality reduction ratios between layers is large.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Fixed Point Framework for Recovering Signals from Nonlinear Transformations.\n \n \n \n \n\n\n \n Combettes, P. L.; and Woodstock, Z. C.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2120-2124, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287736,\n  author = {P. L. Combettes and Z. C. Woodstock},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A Fixed Point Framework for Recovering Signals from Nonlinear Transformations},\n  year = {2020},\n  pages = {2120-2124},\n  abstract = {We consider the problem of recovering a signal from nonlinear transformations, under convex constraints modeling a priori information. Standard feasibility and optimization methods are ill-suited to tackle this problem due to the nonlinearities. We show that, in many common applications, the transformation model can be associated with fixed point equations involving firmly nonexpansive operators. In turn, the recovery problem is reduced to a tractable common fixed point formulation, which is solved efficiently by a provably convergent, block-iterative algorithm. Applications to signal and image recovery are demonstrated. Inconsistent problems are also addressed.},\n  keywords = {Signal processing algorithms;Optimization methods;Europe;Signal processing;Mathematical model;Standards;firmly nonexpansive operator;fixed point model;nonlinear transformation;signal recovery},\n  doi = {10.23919/Eusipco47968.2020.9287736},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002120.pdf},\n}\n\n
\n
\n\n\n
\n We consider the problem of recovering a signal from nonlinear transformations, under convex constraints modeling a priori information. Standard feasibility and optimization methods are ill-suited to tackle this problem due to the nonlinearities. We show that, in many common applications, the transformation model can be associated with fixed point equations involving firmly nonexpansive operators. In turn, the recovery problem is reduced to a tractable common fixed point formulation, which is solved efficiently by a provably convergent, block-iterative algorithm. Applications to signal and image recovery are demonstrated. Inconsistent problems are also addressed.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automatic Object Classification with Active Sonar using Unsupervised Anomaly Detection.\n \n \n \n \n\n\n \n Stinco, P.; De Magistris, G.; Tesei, A.; and LePage, K. D.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 46-50, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AutomaticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287737,\n  author = {P. Stinco and G. {De Magistris} and A. Tesei and K. D. LePage},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Automatic Object Classification with Active Sonar using Unsupervised Anomaly Detection},\n  year = {2020},\n  pages = {46-50},\n  abstract = {This work describes an unsupervised anomaly detection method for automatic contacts classification of an active sonar system. The proposed method refers to littoral, shallow water environments where there is a significant amount of clutter contacts from the seafloor and coastal reverberation. This huge amount of undesired contacts can be exploited to learn the {"}finger-print{"} of the clutter and then to identify the object related contacts as anomalies. The paper describes the proposed classification method and shows its performance with real data collected at sea using an echo-repeater as an artificial object.},\n  keywords = {geophysical signal processing;object detection;oceanographic techniques;sonar;target tracking;underwater sound;classification method;artificial object;automatic object classification;unsupervised anomaly detection method;automatic contacts classification;active sonar system;littoral water environments;shallow water environments;clutter contacts;seafloor reverberation;coastal reverberation;undesired contacts;object related contacts;clutter finger-print;Machine learning algorithms;Signal processing algorithms;Sonar;Classification algorithms;Clutter;Anomaly detection;Sonar detection;Unsupervised Learning;Anomaly Detection;Active Sonar;Sonar Contacts Classification},\n  doi = {10.23919/Eusipco47968.2020.9287737},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000046.pdf},\n}\n\n
\n
\n\n\n
\n This work describes an unsupervised anomaly detection method for automatic contacts classification of an active sonar system. The proposed method refers to littoral, shallow water environments where there is a significant amount of clutter contacts from the seafloor and coastal reverberation. This huge amount of undesired contacts can be exploited to learn the \"finger-print\" of the clutter and then to identify the object related contacts as anomalies. The paper describes the proposed classification method and shows its performance with real data collected at sea using an echo-repeater as an artificial object.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Adversarial Signal Denoising with Encoder-Decoder Networks.\n \n \n \n \n\n\n \n Casas, L.; Klimmek, A.; Navab, N.; and Belagiannis, V.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1467-1471, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AdversarialPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287738,\n  author = {L. Casas and A. Klimmek and N. Navab and V. Belagiannis},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Adversarial Signal Denoising with Encoder-Decoder Networks},\n  year = {2020},\n  pages = {1467-1471},\n  abstract = {The presence of noise is common in signal processing regardless the signal type. Deep neural networks have shown good performance in noise removal, especially on the image domain. In this work, we consider deep neural networks as a denoising tool where our focus is on one dimensional signals. We introduce an encoder-decoder architecture to denoise signals, represented by a sequence of measurements. Instead of relying only on the standard reconstruction error to train the encoder-decoder network, we treat the task of denoising as distribution alignment between the clean and noisy signals. Then, we propose an adversarial learning formulation where the goal is to align the clean and noisy signal latent representation given that both signals pass through the encoder. In our approach, the discriminator has the role of detecting whether the latent representation comes from clean or noisy signals. We evaluate on electrocardiogram and motion signal denoising; and show better performance than learning-based and non-learning approaches.},\n  keywords = {decoding;electrocardiography;encoding;image reconstruction;learning (artificial intelligence);medical signal processing;neural nets;signal denoising;one dimensional signals;noisy signal latent representation;clean signal latent representation;adversarial learning formulation;standard reconstruction error;denoise signals;encoder-decoder architecture;image domain;noise removal;deep neural networks;signal type;signal processing;encoder-decoder network;adversarial signal denoising;motion signal;electrocardiogram;noisy signals;clean signals;Noise reduction;Neural networks;Tools;Signal processing;Signal denoising;Noise measurement;Task analysis;signal denoising;adversarial learning;electrocardiogram signal;motion signal},\n  doi = {10.23919/Eusipco47968.2020.9287738},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001467.pdf},\n}\n\n
\n
\n\n\n
\n The presence of noise is common in signal processing regardless the signal type. Deep neural networks have shown good performance in noise removal, especially on the image domain. In this work, we consider deep neural networks as a denoising tool where our focus is on one dimensional signals. We introduce an encoder-decoder architecture to denoise signals, represented by a sequence of measurements. Instead of relying only on the standard reconstruction error to train the encoder-decoder network, we treat the task of denoising as distribution alignment between the clean and noisy signals. Then, we propose an adversarial learning formulation where the goal is to align the clean and noisy signal latent representation given that both signals pass through the encoder. In our approach, the discriminator has the role of detecting whether the latent representation comes from clean or noisy signals. We evaluate on electrocardiogram and motion signal denoising; and show better performance than learning-based and non-learning approaches.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Memory Requirement Reduction of Deep Neural Networks for Field Programmable Gate Arrays Using Low-Bit Quantization of Parameters.\n \n \n \n \n\n\n \n Nicodemo, N.; Naithani, G.; Drossos, K.; Virtanen, T.; and Saletti, R.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 466-470, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"MemoryPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287739,\n  author = {N. Nicodemo and G. Naithani and K. Drossos and T. Virtanen and R. Saletti},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Memory Requirement Reduction of Deep Neural Networks for Field Programmable Gate Arrays Using Low-Bit Quantization of Parameters},\n  year = {2020},\n  pages = {466-470},\n  abstract = {Effective employment of deep neural networks (DNNs) in mobile devices and embedded systems, like field programmable gate arrays, is hampered by requirements for memory and computational power. In this paper we propose a method that employs a non-uniform fixed-point quantization and a virtual bit shift (VBS) to improve the accuracy of the quantization of the DNN weights. We evaluate our method in a speech enhancement application, where a fully connected DNN is used to predict the clean speech spectrum from the input noisy speech spectrum. A DNN is optimized, its memory requirement is calculated, and its performance is evaluated using the short-time objective intelligibility (STOI) metric. The application of the low-bit quantization leads to a 50% reduction of the DNN memory requirement while the STOI performance drops only by 2.7%.},\n  keywords = {Quantization (signal);Neural networks;Memory management;Speech enhancement;Logic gates;Table lookup;Field programmable gate arrays;neural network quantization;memory footprint reduction;FPGA;hardware accelerators},\n  doi = {10.23919/Eusipco47968.2020.9287739},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000466.pdf},\n}\n\n
\n
\n\n\n
\n Effective employment of deep neural networks (DNNs) in mobile devices and embedded systems, like field programmable gate arrays, is hampered by requirements for memory and computational power. In this paper we propose a method that employs a non-uniform fixed-point quantization and a virtual bit shift (VBS) to improve the accuracy of the quantization of the DNN weights. We evaluate our method in a speech enhancement application, where a fully connected DNN is used to predict the clean speech spectrum from the input noisy speech spectrum. A DNN is optimized, its memory requirement is calculated, and its performance is evaluated using the short-time objective intelligibility (STOI) metric. The application of the low-bit quantization leads to a 50% reduction of the DNN memory requirement while the STOI performance drops only by 2.7%.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A Multiple-Input Multiple-Output Extension of the Mueller and Müller Timing Error Detector.\n \n \n \n\n\n \n Sadeghian, E. B.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1633-1637, Aug 2020. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287740,\n  author = {E. B. Sadeghian},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A Multiple-Input Multiple-Output Extension of the Mueller and Müller Timing Error Detector},\n  year = {2020},\n  pages = {1633-1637},\n  abstract = {We present a multiple-input multiple-output (MIMO) extension of the timing error detector that was originally developed by Mueller and Müller for synchronization on pulse amplitude modulation over 1-D channels. This MIMO extension applies to joint detection of multiple signals with different timing offsets received over MIMO channels. We provide accurate theoretical expressions of the performance of the proposed scheme, and verify its applicability on a modern Two-Dimensional Magnetic Recording channel. Performance results show that the proposed timing error detector outperforms the conventional 1-D timing error detector that completely ignores the presence of crosstalk interference in MIMO channels.},\n  keywords = {Timing recovery;multiple-input multiple-output (MIMO) channel;joint detection;crosstalk interference;two-dimensional magnetic recording (TDMR)},\n  doi = {10.23919/Eusipco47968.2020.9287740},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n We present a multiple-input multiple-output (MIMO) extension of the timing error detector that was originally developed by Mueller and Müller for synchronization on pulse amplitude modulation over 1-D channels. This MIMO extension applies to joint detection of multiple signals with different timing offsets received over MIMO channels. We provide accurate theoretical expressions of the performance of the proposed scheme, and verify its applicability on a modern Two-Dimensional Magnetic Recording channel. Performance results show that the proposed timing error detector outperforms the conventional 1-D timing error detector that completely ignores the presence of crosstalk interference in MIMO channels.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automated Dysarthria Severity Classification Using Deep Learning Frameworks.\n \n \n \n \n\n\n \n Joshy, A. A.; and Rajan, R.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 116-120, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AutomatedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287741,\n  author = {A. A. Joshy and R. Rajan},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Automated Dysarthria Severity Classification Using Deep Learning Frameworks},\n  year = {2020},\n  pages = {116-120},\n  abstract = {Dysarthria is a neuro-motor speech disorder that renders speech unintelligible, in proportional to its severity. Assessing the severity level of dysarthria, apart from being a diagnostic step to evaluate the patient's improvement, is also capable of aiding automatic dysarthric speech recognition systems. In this paper, a detailed study on dysarthia severity classification using various deep learning architectural choices, namely deep neural network (DNN), convolutional neural network (CNN) and long short-term memory network (LSTM) is carried out. Mel frequency cepstral coefficients (MFCCs) and its derivatives are used as features. Performance of these models are compared with a baseline support vector machine (SVM) classifier using the UA-Speech corpus and the TORGO database. The highest classification accuracy of 96.18% and 93.24% are reported for TORGO and UA-Speech respectively. Detailed analysis on performance of these models shows that a proper choice of a deep learning architecture can ensure better performance than the conventionally used SVM classifier.},\n  keywords = {Deep learning;Support vector machines;Neural networks;Speech recognition;Signal processing;Reliability;Mel frequency cepstral coefficient;dysarthria;intelligibility;automatic assessment;deep learning},\n  doi = {10.23919/Eusipco47968.2020.9287741},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000116.pdf},\n}\n\n
\n
\n\n\n
\n Dysarthria is a neuro-motor speech disorder that renders speech unintelligible, in proportional to its severity. Assessing the severity level of dysarthria, apart from being a diagnostic step to evaluate the patient's improvement, is also capable of aiding automatic dysarthric speech recognition systems. In this paper, a detailed study on dysarthia severity classification using various deep learning architectural choices, namely deep neural network (DNN), convolutional neural network (CNN) and long short-term memory network (LSTM) is carried out. Mel frequency cepstral coefficients (MFCCs) and its derivatives are used as features. Performance of these models are compared with a baseline support vector machine (SVM) classifier using the UA-Speech corpus and the TORGO database. The highest classification accuracy of 96.18% and 93.24% are reported for TORGO and UA-Speech respectively. Detailed analysis on performance of these models shows that a proper choice of a deep learning architecture can ensure better performance than the conventionally used SVM classifier.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Gaussian Processes Regression with Joint Learning of Precision Matrix.\n \n \n \n \n\n\n \n Miao, X.; Jiang, A.; and Xu, N.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1437-1441, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"GaussianPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287742,\n  author = {X. Miao and A. Jiang and N. Xu},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Gaussian Processes Regression with Joint Learning of Precision Matrix},\n  year = {2020},\n  pages = {1437-1441},\n  abstract = {In the traditional Gaussian process regression (GPR), covariance matrix is modeled by a kernel function, which is dominated by a set of hyper-parameters. However, the estimation of such hyper-parameters are generally a highly nonconvex optimization problem, which imposes computational difficulties and undermines the practical performance. To improve the prediction accuracy, we propose in this paper a novel GPR algorithm that introduces the estimate of precision matrix of target values. Covariance and precision matrices are coupled by a regularized approximation error term. In practice, the precision matrix and hyper-parameters are trained by the alternating optimization. Experimental results demonstrate that the performance of the joint-learning formulation is superior to traditional GPR.},\n  keywords = {Signal processing algorithms;Gaussian processes;Approximation algorithms;Approximation error;Covariance matrices;Task analysis;Optimization;Alternating optimization;Bayesian;Gaussian process regression (GPR);joint learning;kernel;precision matrix},\n  doi = {10.23919/Eusipco47968.2020.9287742},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001437.pdf},\n}\n\n
\n
\n\n\n
\n In the traditional Gaussian process regression (GPR), covariance matrix is modeled by a kernel function, which is dominated by a set of hyper-parameters. However, the estimation of such hyper-parameters are generally a highly nonconvex optimization problem, which imposes computational difficulties and undermines the practical performance. To improve the prediction accuracy, we propose in this paper a novel GPR algorithm that introduces the estimate of precision matrix of target values. Covariance and precision matrices are coupled by a regularized approximation error term. In practice, the precision matrix and hyper-parameters are trained by the alternating optimization. Experimental results demonstrate that the performance of the joint-learning formulation is superior to traditional GPR.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Analyzing the Potential of Pre-Trained Embeddings for Audio Classification Tasks.\n \n \n \n \n\n\n \n Grollmisch, S.; Cano, E.; Kehling, C.; and Taenzer, M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 790-794, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AnalyzingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287743,\n  author = {S. Grollmisch and E. Cano and C. Kehling and M. Taenzer},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Analyzing the Potential of Pre-Trained Embeddings for Audio Classification Tasks},\n  year = {2020},\n  pages = {790-794},\n  abstract = {In the context of deep learning, the availability of large amounts of training data can play a critical role in a model’s performance. Recently, several models for audio classification have been pre-trained in a supervised or self-supervised fashion on large datasets to learn complex feature representations, socalled embeddings. These embeddings can then be extracted from smaller datasets and used to train subsequent classifiers. In the field of audio event detection (AED) for example, classifiers using these features have achieved high accuracy without the need of additional domain knowledge. This paper evaluates three state-of-the-art embeddings on six audio classification tasks from the fields of music information retrieval and industrial sound analysis. The embeddings are systematically evaluated by analyzing the influence on classification accuracy of classifier architecture, fusion methods for file-wise predictions, amount of training data, and initial training domain of the embeddings. To better understand the impact of the pre-training step, results are also compared with those acquired with models trained from scratch. On average, the OpenL3 embeddings performed best with a linear SVM classifier. For a reduced amount of training examples, OpenL3 outperforms the initial baseline.},\n  keywords = {Training;Support vector machines;Training data;Signal processing;Feature extraction;Task analysis;Music information retrieval;audio classification;transfer learning;audio embeddings;industrial sound analysis;music information retrieval},\n  doi = {10.23919/Eusipco47968.2020.9287743},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000790.pdf},\n}\n\n
\n
\n\n\n
\n In the context of deep learning, the availability of large amounts of training data can play a critical role in a model’s performance. Recently, several models for audio classification have been pre-trained in a supervised or self-supervised fashion on large datasets to learn complex feature representations, socalled embeddings. These embeddings can then be extracted from smaller datasets and used to train subsequent classifiers. In the field of audio event detection (AED) for example, classifiers using these features have achieved high accuracy without the need of additional domain knowledge. This paper evaluates three state-of-the-art embeddings on six audio classification tasks from the fields of music information retrieval and industrial sound analysis. The embeddings are systematically evaluated by analyzing the influence on classification accuracy of classifier architecture, fusion methods for file-wise predictions, amount of training data, and initial training domain of the embeddings. To better understand the impact of the pre-training step, results are also compared with those acquired with models trained from scratch. On average, the OpenL3 embeddings performed best with a linear SVM classifier. For a reduced amount of training examples, OpenL3 outperforms the initial baseline.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Augmentation Methods on Monophonic Audio for Instrument Classification in Polyphonic Music.\n \n \n \n \n\n\n \n Kratimenos, A.; Avramidis, K.; Garoufis, C.; Zlatintsi, A.; and Maragos, P.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 156-160, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AugmentationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287745,\n  author = {A. Kratimenos and K. Avramidis and C. Garoufis and A. Zlatintsi and P. Maragos},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Augmentation Methods on Monophonic Audio for Instrument Classification in Polyphonic Music},\n  year = {2020},\n  pages = {156-160},\n  abstract = {Instrument classification is one of the fields in Music Information Retrieval (MIR) that has attracted a lot of research interest. However, the majority of that is dealing with monophonic music, while efforts on polyphonic material mainly focus on predominant instrument recognition. In this paper, we propose an approach for instrument classification in polyphonic music from predominantly monophonic data that involves performing data augmentation by mixing different audio segments. A variety of data augmentation techniques focusing on different sonic aspects, such as overlaying audio segments of the same genre, as well as pitch and tempo-based synchronization, are explored. We utilize Convolutional Neural Networks for the classification task, comparing shallow to deep network architectures. We further investigate the usage of a combination of the above classifiers, each trained on a single augmented dataset. An ensemble of VGG-like classifiers, trained on non-augmented, pitch-synchronized, tempo-synchronized and genre-similar excerpts, respectively, yields the best results, achieving slightly above 80% in terms of label ranking average precision (LRAP) in the IRMAS test set.},\n  keywords = {Instruments;Focusing;Signal processing;Network architecture;Synchronization;Task analysis;Music information retrieval;instrument classification;audio mixing;data augmentation;deep learning;ensemble learning},\n  doi = {10.23919/Eusipco47968.2020.9287745},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000156.pdf},\n}\n\n
\n
\n\n\n
\n Instrument classification is one of the fields in Music Information Retrieval (MIR) that has attracted a lot of research interest. However, the majority of that is dealing with monophonic music, while efforts on polyphonic material mainly focus on predominant instrument recognition. In this paper, we propose an approach for instrument classification in polyphonic music from predominantly monophonic data that involves performing data augmentation by mixing different audio segments. A variety of data augmentation techniques focusing on different sonic aspects, such as overlaying audio segments of the same genre, as well as pitch and tempo-based synchronization, are explored. We utilize Convolutional Neural Networks for the classification task, comparing shallow to deep network architectures. We further investigate the usage of a combination of the above classifiers, each trained on a single augmented dataset. An ensemble of VGG-like classifiers, trained on non-augmented, pitch-synchronized, tempo-synchronized and genre-similar excerpts, respectively, yields the best results, achieving slightly above 80% in terms of label ranking average precision (LRAP) in the IRMAS test set.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Spectral Efficiency for Massive MIMO Multi-Relay NOMA Systems with CSI errors.\n \n \n \n \n\n\n \n Mandawaria, V.; Sharma, E.; and Budhiraja, R.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1648-1652, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"SpectralPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287746,\n  author = {V. Mandawaria and E. Sharma and R. Budhiraja},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Spectral Efficiency for Massive MIMO Multi-Relay NOMA Systems with CSI errors},\n  year = {2020},\n  pages = {1648-1652},\n  abstract = {We consider a multiple-relay-aided massive multi-input multi-output (MIMO) non-orthogonal multiple access (NOMA) system. We practically model this system by considering channel estimation error, and the consequent imperfect successive interference cancellation, which the existing literature has ignored. We derive a novel lower bound for the ergodic spectral efficiency of this multi-relay NOMA system, and characterize its degradation with respect to orthogonal multiple access (OMA), due to the aforementioned artifacts. We crucially show that a multi-relay massive MIMO NOMA system requires accurate channel information to outperform OMA.},\n  keywords = {Degradation;NOMA;Interference cancellation;Spectral efficiency;Silicon carbide;Massive MIMO;Signal processing;Massive antennas;relays;spectral efficiency},\n  doi = {10.23919/Eusipco47968.2020.9287746},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001648.pdf},\n}\n\n
\n
\n\n\n
\n We consider a multiple-relay-aided massive multi-input multi-output (MIMO) non-orthogonal multiple access (NOMA) system. We practically model this system by considering channel estimation error, and the consequent imperfect successive interference cancellation, which the existing literature has ignored. We derive a novel lower bound for the ergodic spectral efficiency of this multi-relay NOMA system, and characterize its degradation with respect to orthogonal multiple access (OMA), due to the aforementioned artifacts. We crucially show that a multi-relay massive MIMO NOMA system requires accurate channel information to outperform OMA.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Online Graph-Based Change Point Detection in Multiband Image Sequences.\n \n \n \n \n\n\n \n Borsoi, R. A.; Richard, C.; Ferrari, A.; Chen, J.; and Bermudez, J. C. M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 850-854, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"OnlinePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 8 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287747,\n  author = {R. A. Borsoi and C. Richard and A. Ferrari and J. Chen and J. C. M. Bermudez},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Online Graph-Based Change Point Detection in Multiband Image Sequences},\n  year = {2020},\n  pages = {850-854},\n  abstract = {The automatic detection of changes or anomalies between multispectral and hyperspectral images collected at different time instants is an active and challenging research topic. To effectively perform change-point detection in multitemporal images, it is important to devise techniques that are computationally efficient for processing large datasets, and that do not require knowledge about the nature of the changes. In this paper, we introduce a novel online framework for detecting changes in multitemporal remote sensing images. Acting on neighboring spectra as adjacent vertices in a graph, this algorithm focuses on anomalies concurrently activating groups of vertices corresponding to compact, well-connected and spectrally homogeneous image regions. It fully benefits from recent advances in graph signal processing to exploit the characteristics of the data that lie on irregular supports. Moreover, the graph is estimated directly from the images using superpixel decomposition algorithms. The learning algorithm is scalable in the sense that it is efficient and spatially distributed. Experiments illustrate the detection and localization performance of the method.},\n  keywords = {Change detection algorithms;Signal processing algorithms;Europe;Signal processing;Computational efficiency;Image sequences;Hyperspectral imaging;Hyperspectral images;change detection;graphs;multitemporal;superpixels},\n  doi = {10.23919/Eusipco47968.2020.9287747},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000850.pdf},\n}\n\n
\n
\n\n\n
\n The automatic detection of changes or anomalies between multispectral and hyperspectral images collected at different time instants is an active and challenging research topic. To effectively perform change-point detection in multitemporal images, it is important to devise techniques that are computationally efficient for processing large datasets, and that do not require knowledge about the nature of the changes. In this paper, we introduce a novel online framework for detecting changes in multitemporal remote sensing images. Acting on neighboring spectra as adjacent vertices in a graph, this algorithm focuses on anomalies concurrently activating groups of vertices corresponding to compact, well-connected and spectrally homogeneous image regions. It fully benefits from recent advances in graph signal processing to exploit the characteristics of the data that lie on irregular supports. Moreover, the graph is estimated directly from the images using superpixel decomposition algorithms. The learning algorithm is scalable in the sense that it is efficient and spatially distributed. Experiments illustrate the detection and localization performance of the method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Overparametrized Deep Encoder-Decoder Schemes for Inputs and Outputs Defined over Graphs.\n \n \n \n \n\n\n \n Rey, S.; Tenorio, V.; Rozada, S.; Martino, L.; and Marques, A. G.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 855-859, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"OverparametrizedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287748,\n  author = {S. Rey and V. Tenorio and S. Rozada and L. Martino and A. G. Marques},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Overparametrized Deep Encoder-Decoder Schemes for Inputs and Outputs Defined over Graphs},\n  year = {2020},\n  pages = {855-859},\n  abstract = {There is a growing interest in the joint application of graph signal processing and neural networks (NNs) for learning problems involving complex, non-linear and/or non-Euclidean datasets. This paper proposes an overparametrized graph-aware NN architecture able to represent a non-linear mapping between two graph signals, each defined on a different graph. The considered architecture is based on two NNs and a common latent space. Specifically, we consider an overparametrized graph-aware NN encoder which maps the input graph signal to a latent space, followed by an overparametrized graph-aware NN decoder that transforms the latent representation to the output graph signal. The parameters of the two NNs are jointly tuned by applying the back-propagation algorithm with an early stopping procedure to prevent overfitting. The overall architecture can be interpreted as an overparametrized graph-aware encoder/decoder NN operating over two different graphs. A key element in the encoder (decoding) scheme is the consideration of a nested collection of parametric graph-aware (down-) up-sampling operators, whose design will be studied in detail. We show by numerical simulations that the proposed scheme outperforms the corresponding benchmark NN architectures, previously introduced in the literature.},\n  keywords = {Signal processing algorithms;Europe;Artificial neural networks;Transforms;Signal processing;Numerical simulation;Decoding;Graph Neural Networks;Graph Autoencoders;Non-Euclidean Data;Geometric Deep Learning},\n  doi = {10.23919/Eusipco47968.2020.9287748},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000855.pdf},\n}\n\n
\n
\n\n\n
\n There is a growing interest in the joint application of graph signal processing and neural networks (NNs) for learning problems involving complex, non-linear and/or non-Euclidean datasets. This paper proposes an overparametrized graph-aware NN architecture able to represent a non-linear mapping between two graph signals, each defined on a different graph. The considered architecture is based on two NNs and a common latent space. Specifically, we consider an overparametrized graph-aware NN encoder which maps the input graph signal to a latent space, followed by an overparametrized graph-aware NN decoder that transforms the latent representation to the output graph signal. The parameters of the two NNs are jointly tuned by applying the back-propagation algorithm with an early stopping procedure to prevent overfitting. The overall architecture can be interpreted as an overparametrized graph-aware encoder/decoder NN operating over two different graphs. A key element in the encoder (decoding) scheme is the consideration of a nested collection of parametric graph-aware (down-) up-sampling operators, whose design will be studied in detail. We show by numerical simulations that the proposed scheme outperforms the corresponding benchmark NN architectures, previously introduced in the literature.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Accelerated 3D Image Reconstruction for Resource Constrained Systems.\n \n \n \n \n\n\n \n Aßmann, A.; Wu, Y.; Stewart, B.; and Wallace, A. M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 565-569, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AcceleratedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287749,\n  author = {A. Aßmann and Y. Wu and B. Stewart and A. M. Wallace},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Accelerated 3D Image Reconstruction for Resource Constrained Systems},\n  year = {2020},\n  pages = {565-569},\n  abstract = {We demonstrate an efficient and accelerated implementation of a parallel sparse depth reconstruction framework using compressed sensing (CS) techniques. Recent work suggests that CS can be split up into smaller sub problems. This allows us to efficiently pre-compute important components of the LU decomposition and subsequent linear algebra to solve a set of linear equations found in algorithms such as the alternating direction method of multipliers (ADMM). For comparison, a fully discrete least square reconstruction method is also presented.We also investigate how reduced precision is leveraged to reduce the number of logic units in field-programmable gate array (FPGA) implementations for such sparse imaging systems. We show that the amount of logic units, memory requirements and power consumption is reduced significantly by over 70% with minimal impact on the quality of reconstruction. This demonstrates the feasibility of novel high resolution, low power and high frame rate light detection and ranging (LiDAR) depth imagers based on sparse illumination.},\n  keywords = {Image resolution;Imaging;Lighting;Bandwidth;Image reconstruction;Compressed sensing;Field programmable gate arrays;Approximate Computing;FPGA;LiDAR;Compressed Sensing;Parallelization},\n  doi = {10.23919/Eusipco47968.2020.9287749},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000565.pdf},\n}\n\n
\n
\n\n\n
\n We demonstrate an efficient and accelerated implementation of a parallel sparse depth reconstruction framework using compressed sensing (CS) techniques. Recent work suggests that CS can be split up into smaller sub problems. This allows us to efficiently pre-compute important components of the LU decomposition and subsequent linear algebra to solve a set of linear equations found in algorithms such as the alternating direction method of multipliers (ADMM). For comparison, a fully discrete least square reconstruction method is also presented.We also investigate how reduced precision is leveraged to reduce the number of logic units in field-programmable gate array (FPGA) implementations for such sparse imaging systems. We show that the amount of logic units, memory requirements and power consumption is reduced significantly by over 70% with minimal impact on the quality of reconstruction. This demonstrates the feasibility of novel high resolution, low power and high frame rate light detection and ranging (LiDAR) depth imagers based on sparse illumination.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Arbitrary Length Reducible and Irreducible Perfect Gaussian Integer Sequences with A Pre-Given Gaussian Integer.\n \n \n \n \n\n\n \n Pei, S. -.; and Chang, K. -.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2274-2278, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ArbitraryPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287751,\n  author = {S. -C. Pei and K. -W. Chang},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Arbitrary Length Reducible and Irreducible Perfect Gaussian Integer Sequences with A Pre-Given Gaussian Integer},\n  year = {2020},\n  pages = {2274-2278},\n  abstract = {In this paper we will discuss two construction schemes on arbitrary length perfect Gaussian integer sequence (PGIS) with a pre-given constant. The first scheme uses geometric series, which brings reducible PGIS. We will also discuss the irreducible case and find an easy way to obtain PGIS for even length. Moreover, the same concept can be applied for some odd length N = 3p by using Ramanujan’s Sum. Concrete examples are provided.},\n  keywords = {Europe;Signal processing;Discrete Fourier transform;perfect Gaussian integer sequences;zero autocorrelation;Ramanujan’s Sum},\n  doi = {10.23919/Eusipco47968.2020.9287751},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002274.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we will discuss two construction schemes on arbitrary length perfect Gaussian integer sequence (PGIS) with a pre-given constant. The first scheme uses geometric series, which brings reducible PGIS. We will also discuss the irreducible case and find an easy way to obtain PGIS for even length. Moreover, the same concept can be applied for some odd length N = 3p by using Ramanujan’s Sum. Concrete examples are provided.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robust IIR Digital Filter Sharpening.\n \n \n \n \n\n\n \n Cain, G. D.; Yardim, A.; and Harris, F. J.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2329-2333, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"RobustPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287752,\n  author = {G. D. Cain and A. Yardim and F. J. Harris},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Robust IIR Digital Filter Sharpening},\n  year = {2020},\n  pages = {2329-2333},\n  abstract = {Digital filter sharpening aims to improve the performance of a prototype filter by cascading it with a {"}partial compensator{"} incorporating multiple uses of that same coefficient set. Although the Kaiser & Hamming FIR sharpening structure has long enjoyed popularity, there has been a widespread mistaken impression that no corresponding structure exists for IIR sharpening. In this paper we review the features of three available methods and also introduce a new sharpener that operates with no restrictions on phase conditions for prototypes and without requiring root-finding. The use of conjugate-reversal of numerator coefficient vectors, along with prototype pairing and allpass filtering, combine to facilitate robust internal sharpener delay alignment. Performance of the new structure is highlighted here in one complex and three real filter sharpening examples.},\n  keywords = {Matched filters;Finite impulse response filters;Prototypes;IIR filters;Signal processing;Passband;Poles and zeros;IIR filter sharpening;complex filters;frequency response;sharpening polynomials},\n  doi = {10.23919/Eusipco47968.2020.9287752},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002329.pdf},\n}\n\n
\n
\n\n\n
\n Digital filter sharpening aims to improve the performance of a prototype filter by cascading it with a \"partial compensator\" incorporating multiple uses of that same coefficient set. Although the Kaiser & Hamming FIR sharpening structure has long enjoyed popularity, there has been a widespread mistaken impression that no corresponding structure exists for IIR sharpening. In this paper we review the features of three available methods and also introduce a new sharpener that operates with no restrictions on phase conditions for prototypes and without requiring root-finding. The use of conjugate-reversal of numerator coefficient vectors, along with prototype pairing and allpass filtering, combine to facilitate robust internal sharpener delay alignment. Performance of the new structure is highlighted here in one complex and three real filter sharpening examples.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Investigation of Network Architecture for Single-Channel End-to-End Denoising.\n \n \n \n \n\n\n \n Hasumi, T.; Kobayashi, T.; and Ogawa, T.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 441-445, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"InvestigationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287753,\n  author = {T. Hasumi and T. Kobayashi and T. Ogawa},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Investigation of Network Architecture for Single-Channel End-to-End Denoising},\n  year = {2020},\n  pages = {441-445},\n  abstract = {This paper examines the effectiveness of a fully convolutional time-domain audio separation network (Conv-TasNet) on single-channel denoising. Conv-TasNet, which has a structure to explicitly estimate a mask for encoded features, has shown to be effective in single-channel sound source separation in noise-free environments, but it has not been applied to denoising. Therefore, the present study investigates a method of learning Conv-TasNet for denoising and clarifies the optimal structure for single-channel end-to-end modeling. Experimental comparisons conducted using the CHiME-3 dataset demonstrate that Conv-TasNet performs well in denoising and yields improvements in single-channel end-to-end denoising over existing denoising autoencoder-based modeling.},\n  keywords = {Convolution;Noise reduction;Speech recognition;Data models;Decoding;Time-domain analysis;Periodic structures;fully convolutional time-domain audio separation network;time-domain convolutional denoising autoencoders;end-to-end modeling;single-channel denoising;speech recognition},\n  doi = {10.23919/Eusipco47968.2020.9287753},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000441.pdf},\n}\n\n
\n
\n\n\n
\n This paper examines the effectiveness of a fully convolutional time-domain audio separation network (Conv-TasNet) on single-channel denoising. Conv-TasNet, which has a structure to explicitly estimate a mask for encoded features, has shown to be effective in single-channel sound source separation in noise-free environments, but it has not been applied to denoising. Therefore, the present study investigates a method of learning Conv-TasNet for denoising and clarifies the optimal structure for single-channel end-to-end modeling. Experimental comparisons conducted using the CHiME-3 dataset demonstrate that Conv-TasNet performs well in denoising and yields improvements in single-channel end-to-end denoising over existing denoising autoencoder-based modeling.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An ADMM-Net for Data Recovery in Wireless Sensor Networks.\n \n \n \n \n\n\n \n Yang, L.; Eldar, Y. C.; Wang, H.; Kang, K.; and Qian, H.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1712-1716, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287754,\n  author = {L. Yang and Y. C. Eldar and H. Wang and K. Kang and H. Qian},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {An ADMM-Net for Data Recovery in Wireless Sensor Networks},\n  year = {2020},\n  pages = {1712-1716},\n  abstract = {Data collection plays an important role in wireless sensor networks. Recovery of spatio-temporal data from incomplete sensing data is vital to the network lifetime. Many works have utilized the spatial and temporal correlations to achieve satisfactory data recovery results. However, these methods introduce large computational overhead at the fusion center. In this paper, we develop an ADMM-Net framework for correlated spatio-temporal data recovery. Both the spatial correlation and temporal correlation of sensing data are considered in a convex optimization problem, which is solved by the alternating direction method of multipliers (ADMM) algorithm. We then unfold the ADMM algorithm into a fixed-length neural network that reduces the iterations dramatically and does not require additional location information of nodes. Experimental results on a realworld dataset demonstrate that the proposed method can achieve faster convergence speed than the baseline ADMM algorithm with slight accuracy loss.},\n  keywords = {Wireless sensor networks;Correlation;Neural networks;Signal processing algorithms;Convex functions;Sensors;Convergence;Data recovery;ADMM;unfolding;wireless sensor networks},\n  doi = {10.23919/Eusipco47968.2020.9287754},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001712.pdf},\n}\n\n
\n
\n\n\n
\n Data collection plays an important role in wireless sensor networks. Recovery of spatio-temporal data from incomplete sensing data is vital to the network lifetime. Many works have utilized the spatial and temporal correlations to achieve satisfactory data recovery results. However, these methods introduce large computational overhead at the fusion center. In this paper, we develop an ADMM-Net framework for correlated spatio-temporal data recovery. Both the spatial correlation and temporal correlation of sensing data are considered in a convex optimization problem, which is solved by the alternating direction method of multipliers (ADMM) algorithm. We then unfold the ADMM algorithm into a fixed-length neural network that reduces the iterations dramatically and does not require additional location information of nodes. Experimental results on a realworld dataset demonstrate that the proposed method can achieve faster convergence speed than the baseline ADMM algorithm with slight accuracy loss.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Blind calibration for arrays with an aberration layer in ultrasound imaging.\n \n \n \n \n\n\n \n van der Meulen , P.; Coutino, M.; Kruizinga, P.; Bosch, J. G.; and Leus, G.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1269-1273, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"BlindPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287755,\n  author = {P. {van der Meulen} and M. Coutino and P. Kruizinga and J. G. Bosch and G. Leus},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Blind calibration for arrays with an aberration layer in ultrasound imaging},\n  year = {2020},\n  pages = {1269-1273},\n  abstract = {We consider the scenario of finding the transfer function of an aberrating layer in front of an ultrasound array. We are interested in blindly estimating this transfer function without prior knowledge of the unknown ultrasound sources or ultrasound contrast image. The algorithm gives an exact solution if the matrix representing the aberration layer's transfer function is full rank, up to a scaling and reordering of its columns, which has to be resolved using some prior knowledge of the matrix structure. We provide conditions for the robustness of blind calibration in noise. Numerical simulations show that the method becomes more robust for shorter wavelengths, as the transfer function matrices then tend to be less ill-conditioned. Image reconstruction from simulated data using the k-Wave toolbox show that a well calibrated model removes some of the distortions introduced by an uncalibrated model, and improves the resolution for some of the sources.},\n  keywords = {aberrations;biomedical ultrasonics;calibration;image reconstruction;matrix algebra;transfer function matrices;aberration layer;matrix structure;blind calibration;transfer function matrices;image reconstruction;calibrated model;ultrasound imaging;aberrating layer;ultrasound array;ultrasound sources;ultrasound contrast image;numerical simulation;k-Wave toolbox;Ultrasonic imaging;Image resolution;Transfer functions;Imaging;Distortion;Calibration;Numerical models},\n  doi = {10.23919/Eusipco47968.2020.9287755},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001269.pdf},\n}\n\n
\n
\n\n\n
\n We consider the scenario of finding the transfer function of an aberrating layer in front of an ultrasound array. We are interested in blindly estimating this transfer function without prior knowledge of the unknown ultrasound sources or ultrasound contrast image. The algorithm gives an exact solution if the matrix representing the aberration layer's transfer function is full rank, up to a scaling and reordering of its columns, which has to be resolved using some prior knowledge of the matrix structure. We provide conditions for the robustness of blind calibration in noise. Numerical simulations show that the method becomes more robust for shorter wavelengths, as the transfer function matrices then tend to be less ill-conditioned. Image reconstruction from simulated data using the k-Wave toolbox show that a well calibrated model removes some of the distortions introduced by an uncalibrated model, and improves the resolution for some of the sources.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Group Nonnegative Matrix Factorization with Sparse Regularization in Multi-set Data.\n \n \n \n \n\n\n \n Wang, X.; Liu, W.; Cong, F.; and Ristaniemi, T.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2125-2129, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"GroupPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287756,\n  author = {X. Wang and W. Liu and F. Cong and T. Ristaniemi},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Group Nonnegative Matrix Factorization with Sparse Regularization in Multi-set Data},\n  year = {2020},\n  pages = {2125-2129},\n  abstract = {Constrained joint analysis of data from multiple sources has received widespread attention for that it allows us to explore potential connections and extract meaningful hidden components. In this paper, we formulate a flexible joint source separation model termed as group nonnegative matrix factorization with sparse regularization (GNMF-SR), which aims to jointly analyze the partially coupled multi-set data. In the GNMF-SR model, common and individual patterns of particular underlying factors can be extracted simultaneously with imposing nonnegative constraint and sparse penalty. Alternating optimization and alternating direction method of multipliers (ADMM) are combined to solve the GNMF-SR model. Using the experiment of simulated fMRI-like data, we demonstrate the ADMM-based GNMF-SR algorithm can achieve the better performance.},\n  keywords = {biomedical MRI;blind source separation;matrix decomposition;medical image processing;optimisation;source separation;alternating direction method;GNMF-SR model;simulated fMRI-like data;ADMM-based GNMF-SR algorithm;group nonnegative matrix factorization;sparse regularization;hidden components;flexible joint source separation model;partially coupled multiset data;nonnegative constraint;sparse penalty;Analytical models;Source separation;Signal processing algorithms;Data models;Convex functions;Sparse matrices;Optimization;Alternating direction method of multipliers;coupled;group nonnegative matrix factorization;joint analysis;sparse representation},\n  doi = {10.23919/Eusipco47968.2020.9287756},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002125.pdf},\n}\n\n
\n
\n\n\n
\n Constrained joint analysis of data from multiple sources has received widespread attention for that it allows us to explore potential connections and extract meaningful hidden components. In this paper, we formulate a flexible joint source separation model termed as group nonnegative matrix factorization with sparse regularization (GNMF-SR), which aims to jointly analyze the partially coupled multi-set data. In the GNMF-SR model, common and individual patterns of particular underlying factors can be extracted simultaneously with imposing nonnegative constraint and sparse penalty. Alternating optimization and alternating direction method of multipliers (ADMM) are combined to solve the GNMF-SR model. Using the experiment of simulated fMRI-like data, we demonstrate the ADMM-based GNMF-SR algorithm can achieve the better performance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On the computation of marginal likelihood via MCMC for model selection and hypothesis testing.\n \n \n \n \n\n\n \n Llorente, F.; Martino, L.; Delgado, D.; and López-Santiago, J.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2373-2377, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287757,\n  author = {F. Llorente and L. Martino and D. Delgado and J. López-Santiago},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {On the computation of marginal likelihood via MCMC for model selection and hypothesis testing},\n  year = {2020},\n  pages = {2373-2377},\n  abstract = {In the Bayesian setting, the marginal likelihood is the key quantity for model selection purposes. Several computational methods have been proposed in the literature for the computation of the marginal likelihood. In this paper, we briefly review different estimators based on MCMC simulations. We also suggest the use of a kernel density estimation procedure, based on a clustering scheme, within some of them. Numerical comparisons are also provided.},\n  keywords = {Bayes methods;Markov processes;maximum likelihood estimation;Monte Carlo methods;marginal likelihood;MCMC simulations;hypothesis testing;Bayesian setting;computational methods;Monte Carlo methods;Computational modeling;Signal processing;Numerical models;Bayes methods;Kernel;Testing;Bayesian evidence;marginal likelihood;Markov Chain Monte Carlo (MCMC);importance sampling},\n  doi = {10.23919/Eusipco47968.2020.9287757},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002373.pdf},\n}\n\n
\n
\n\n\n
\n In the Bayesian setting, the marginal likelihood is the key quantity for model selection purposes. Several computational methods have been proposed in the literature for the computation of the marginal likelihood. In this paper, we briefly review different estimators based on MCMC simulations. We also suggest the use of a kernel density estimation procedure, based on a clustering scheme, within some of them. Numerical comparisons are also provided.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Comparison of Light-Weight Multi-Scale CNNs for Texture Regression in Agricultural Context.\n \n \n \n \n\n\n \n Strutz, T.; and Leipnitz, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 645-649, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ComparisonPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287758,\n  author = {T. Strutz and A. Leipnitz},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Comparison of Light-Weight Multi-Scale CNNs for Texture Regression in Agricultural Context},\n  year = {2020},\n  pages = {645-649},\n  abstract = {While texture classification has a long history in image preprocessing tasks, its application in agriculture has only recently gained attention in the context of digital farming. The usage of camera drones allows the inspection of fields with a view from above. This paper proposes a method for the patch-based classification of different and basic ground regions using texture regression with convolutional neural networks. Two shallow multi-scale architectures are compared, which differ in the re-use of feature maps. It can be shown that a light-weight network is able to classify the textures with high accuracy. The performance is checked using the standard data set KTH-TIPS2b. The classification information can be effectively utilised for semantic image segmentation.},\n  keywords = {Image segmentation;Semantics;Signal processing;Inspection;History;Task analysis;Standards;texture classification;convolutional neural network;multi-scale;aerial images;image segmentation},\n  doi = {10.23919/Eusipco47968.2020.9287758},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000645.pdf},\n}\n\n
\n
\n\n\n
\n While texture classification has a long history in image preprocessing tasks, its application in agriculture has only recently gained attention in the context of digital farming. The usage of camera drones allows the inspection of fields with a view from above. This paper proposes a method for the patch-based classification of different and basic ground regions using texture regression with convolutional neural networks. Two shallow multi-scale architectures are compared, which differ in the re-use of feature maps. It can be shown that a light-weight network is able to classify the textures with high accuracy. The performance is checked using the standard data set KTH-TIPS2b. The classification information can be effectively utilised for semantic image segmentation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Combining Deep and Manifold Learning For Nonlinear Feature Extraction in Texture Images.\n \n \n \n \n\n\n \n Nsimba, C. B.; and Levada, A. L. M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1552-1555, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"CombiningPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287759,\n  author = {C. B. Nsimba and A. L. M. Levada},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Combining Deep and Manifold Learning For Nonlinear Feature Extraction in Texture Images},\n  year = {2020},\n  pages = {1552-1555},\n  abstract = {This paper applies a two-step approach for texture classification by combining Manifold learning with Deep CNN feature extractors. The first step is to use CNN architecture to compute the feature vector of a given image. The second step is to apply Manifold Learning algorithms on the features computed in the first step to making a refined feature vector. Eventually, this final representation is used to train SVM classifier. In the first step, we adopted VGG-19 network trained from scratch in order to extract texture features. In the next step, we used the DIMAL (Deep Isometric Manifold Learning Using Sparse Geodesic Sampling) configuration to train a neural network to reduce the dimensionality of the feature space in a nonlinear manner for generating the refined feature vector of the input image. Our concept is that the combination of a deep-learning framework with manifold learning techniques has the potential to select discriminative texture features from a high dimensional space. Based on this idea, we adopted this combination to perform nonlinear feature extraction in texture images. The resulting learned features were then used to train SVM classifier. The experiments demonstrated that our approach achieved better accuracy in texture classification than existing models if trained from scratch.},\n  keywords = {Support vector machines;Neural networks;Signal processing algorithms;Signal processing;Feature extraction;Classification algorithms;Task analysis;Texture classification;Manifold learning;Deep learning;Feature extraction;Nonlinear dimensionality reduction},\n  doi = {10.23919/Eusipco47968.2020.9287759},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001552.pdf},\n}\n\n
\n
\n\n\n
\n This paper applies a two-step approach for texture classification by combining Manifold learning with Deep CNN feature extractors. The first step is to use CNN architecture to compute the feature vector of a given image. The second step is to apply Manifold Learning algorithms on the features computed in the first step to making a refined feature vector. Eventually, this final representation is used to train SVM classifier. In the first step, we adopted VGG-19 network trained from scratch in order to extract texture features. In the next step, we used the DIMAL (Deep Isometric Manifold Learning Using Sparse Geodesic Sampling) configuration to train a neural network to reduce the dimensionality of the feature space in a nonlinear manner for generating the refined feature vector of the input image. Our concept is that the combination of a deep-learning framework with manifold learning techniques has the potential to select discriminative texture features from a high dimensional space. Based on this idea, we adopted this combination to perform nonlinear feature extraction in texture images. The resulting learned features were then used to train SVM classifier. The experiments demonstrated that our approach achieved better accuracy in texture classification than existing models if trained from scratch.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Analysis of Baseband IQ Data Compression Methods for Centralized RAN.\n \n \n \n \n\n\n \n Shehata, A.; Crussière, M.; and Mary, P.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1762-1766, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AnalysisPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287760,\n  author = {A. Shehata and M. Crussière and P. Mary},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Analysis of Baseband IQ Data Compression Methods for Centralized RAN},\n  year = {2020},\n  pages = {1762-1766},\n  abstract = {Through recent wireless technologies, such as Centralized Radio Access Network, baseband unit aid remote radio heads are physically separated and connected using fronthaul links. Compressing complex baseband signal samples prior transmission over fronthaul link is an effective Way to satisfy the pressing need to decrease the huge required transported data rates. In this paper, we analyze the easting IQ data compression schemes exploiting time and spectral signal characteristics. We consider compression system evaluation parameters to have a smooth trade-off between required signal quality and complexity performance while achieving an acceptable compression gain. We propose an optimized uniform quantization technique combined with entropy coding achieving non-uniform quantization performance by exploiting signal temporal statistical characteristics with much less computational complexity. We also present a comparison between simulation results analyzing the trade-off between the removal of the signal spectral redundancies and vector quantization in terms of performance and complexity.},\n  keywords = {Wireless communication;Baseband;Vector quantization;Data compression;Entropy coding;Time-domain analysis;Gain;CPRI;Uniform and Non-Uniform Quantization;Decimation;Vector Quantization;Compression Ratio},\n  doi = {10.23919/Eusipco47968.2020.9287760},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001762.pdf},\n}\n\n
\n
\n\n\n
\n Through recent wireless technologies, such as Centralized Radio Access Network, baseband unit aid remote radio heads are physically separated and connected using fronthaul links. Compressing complex baseband signal samples prior transmission over fronthaul link is an effective Way to satisfy the pressing need to decrease the huge required transported data rates. In this paper, we analyze the easting IQ data compression schemes exploiting time and spectral signal characteristics. We consider compression system evaluation parameters to have a smooth trade-off between required signal quality and complexity performance while achieving an acceptable compression gain. We propose an optimized uniform quantization technique combined with entropy coding achieving non-uniform quantization performance by exploiting signal temporal statistical characteristics with much less computational complexity. We also present a comparison between simulation results analyzing the trade-off between the removal of the signal spectral redundancies and vector quantization in terms of performance and complexity.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robust Texture Features For Emphysema Classification In CT Images.\n \n \n \n \n\n\n \n Li, H.; and Mukundan, R.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1220-1224, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"RobustPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287761,\n  author = {H. Li and R. Mukundan},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Robust Texture Features For Emphysema Classification In CT Images},\n  year = {2020},\n  pages = {1220-1224},\n  abstract = {In this paper, we propose a novel feature extraction method based on local quinary patterns (LQP), multifractal features and intensity histograms for classifying emphysema into three subtypes in computed tomography images. Compared to local binary patterns, LQP method computes more image local patterns to represent texture features. Multifractal features which enhancing local textures are combined with other features to constitute a feature vector for this classification task. An autoencoder network and principal components analysis are used to reduce the dimensionality of the feature vector before using an SVM classifier. The proposed method is tested on an emphysema database containing 168 annotated regions of interest of three different subtypes. The experimental results demonstrate that our method outperforms most other state-of-the-art approaches with the best classification accuracy of 92.3% using the least dimensionality (15) of the feature vector.},\n  keywords = {Support vector machines;Histograms;Computed tomography;Feature extraction;Fractals;Task analysis;Principal component analysis;local quinary patterns;multifractal analysis;emphysema classification;autoencoder network},\n  doi = {10.23919/Eusipco47968.2020.9287761},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001220.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a novel feature extraction method based on local quinary patterns (LQP), multifractal features and intensity histograms for classifying emphysema into three subtypes in computed tomography images. Compared to local binary patterns, LQP method computes more image local patterns to represent texture features. Multifractal features which enhancing local textures are combined with other features to constitute a feature vector for this classification task. An autoencoder network and principal components analysis are used to reduce the dimensionality of the feature vector before using an SVM classifier. The proposed method is tested on an emphysema database containing 168 annotated regions of interest of three different subtypes. The experimental results demonstrate that our method outperforms most other state-of-the-art approaches with the best classification accuracy of 92.3% using the least dimensionality (15) of the feature vector.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Efficiency of TV-regularized algorithms in computed tomography with Poisson-Gaussian noise.\n \n \n \n \n\n\n \n Leuliet, T.; Friot–Giroux, L.; Baaziz, W.; Bretin, É.; Ersen, O.; Peyrin, F.; Sixou, B.; and Maxim, V.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1294-1298, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"EfficiencyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287762,\n  author = {T. Leuliet and L. Friot--Giroux and W. Baaziz and É. Bretin and O. Ersen and F. Peyrin and B. Sixou and V. Maxim},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Efficiency of TV-regularized algorithms in computed tomography with Poisson-Gaussian noise},\n  year = {2020},\n  pages = {1294-1298},\n  abstract = {Regularized algorithms are the state-of-the-art in computed tomography, but they are also very demanding in computer resources. In this work we test two data-fidelity formulations and some associated algorithms for the resolution of the Total-Variation regularized tomographic problem. We compare their computational cost for a mixture of Poisson and Gaussian noises. We show that a recently proposed MAP-EM algorithm outperforms the TV-regularized SIRT and the Chambolle-Pock algorithms on synthetic data for the considered noise. We illustrate this result on experimental data from transmission electron microscopy.},\n  keywords = {computerised tomography;Gaussian noise;image denoising;image reconstruction;medical image processing;Poisson equation;transmission electron microscopy;MAP-EM algorithm;TV-regularized SIRT;Chambolle-Pock algorithms;TV-regularized algorithms;computed tomography;Poisson-Gaussian noise;computer resources;data-fidelity formulations;associated algorithms;total-variation regularized tomographic problem;transmission electron microscopy;TV;Transmission electron microscopy;Microscopy;Gaussian noise;Computed tomography;Signal processing algorithms;Signal processing;Tomographic reconstruction;Poisson noise;Gaussian noise;electron microscopy;total variation;FISTA},\n  doi = {10.23919/Eusipco47968.2020.9287762},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001294.pdf},\n}\n\n
\n
\n\n\n
\n Regularized algorithms are the state-of-the-art in computed tomography, but they are also very demanding in computer resources. In this work we test two data-fidelity formulations and some associated algorithms for the resolution of the Total-Variation regularized tomographic problem. We compare their computational cost for a mixture of Poisson and Gaussian noises. We show that a recently proposed MAP-EM algorithm outperforms the TV-regularized SIRT and the Chambolle-Pock algorithms on synthetic data for the considered noise. We illustrate this result on experimental data from transmission electron microscopy.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sensor commissioning detection in single-pixel thermopile sensing systems.\n \n \n \n \n\n\n \n Hagenaars, E.; Pandharipande, A.; Frimout, E.; and Leus, G.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1807-1811, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"SensorPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287763,\n  author = {E. Hagenaars and A. Pandharipande and E. Frimout and G. Leus},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Sensor commissioning detection in single-pixel thermopile sensing systems},\n  year = {2020},\n  pages = {1807-1811},\n  abstract = {We consider the problem of detecting sensor commissioning in the form of determining the sensor layout. We address this problem for single-pixel thermopile sensors, located at the ceiling, that provide remote temperature measurements for people counting applications and HVAC controls. We employ a random forest classifier to determine the deployed layout in an area. For this classifier, we propose spatio-temporal distance features using two-sided cumulative sum recursive least squares (CUSUM RLS) filtering of the thermopile temperature sensor signals. Using sensor data generated with simulated occupancy patterns and a thermopile signal model, we show that the proposed method achieves a true positive rate (determining the correct layout) of 90.2% and false positive rate of 1.3%.},\n  keywords = {Temperature sensors;Temperature measurement;HVAC;Layout;Signal processing;Topology;Sensors},\n  doi = {10.23919/Eusipco47968.2020.9287763},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001807.pdf},\n}\n\n
\n
\n\n\n
\n We consider the problem of detecting sensor commissioning in the form of determining the sensor layout. We address this problem for single-pixel thermopile sensors, located at the ceiling, that provide remote temperature measurements for people counting applications and HVAC controls. We employ a random forest classifier to determine the deployed layout in an area. For this classifier, we propose spatio-temporal distance features using two-sided cumulative sum recursive least squares (CUSUM RLS) filtering of the thermopile temperature sensor signals. Using sensor data generated with simulated occupancy patterns and a thermopile signal model, we show that the proposed method achieves a true positive rate (determining the correct layout) of 90.2% and false positive rate of 1.3%.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Variational Auto-encoder-based Detection of Electricity Stealth Cyber-attacks in AMI Networks.\n \n \n \n \n\n\n \n Takiddin, A.; Ismail, M.; Zafar, U.; and Serpedin, E.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1590-1594, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"VariationalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287764,\n  author = {A. Takiddin and M. Ismail and U. Zafar and E. Serpedin},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Variational Auto-encoder-based Detection of Electricity Stealth Cyber-attacks in AMI Networks},\n  year = {2020},\n  pages = {1590-1594},\n  abstract = {Current efforts to detect electricity theft cyber-attacks in advanced metering infrastructures (AMIs) are hindered by the lack of malicious electricity theft datasets. Therefore, anomaly detectors trained with the energy consumption profiles of honest customers appear as a plausible solution to overcome the lack of malicious datasets. Taking into account this constraint, this paper examines the performance of two structures of variational auto-encoders (VAEs); fully-connected (FC) VAE and long-short-term-memory (LSTM) VAE in detecting electricity thefts. The proposed structures are promising and exhibit an improvement of 11 − 15% in detection rate, 9 − 22% in false alarm rate, and 27 − 37% in the highest difference compared to existing state-of-the-art anomaly detectors that are shallow and static, such as single-class support vector machine (SVM) and auto-regressive integrated moving average (ARIMA) models.},\n  keywords = {Support vector machines;Energy consumption;Europe;Detectors;Signal processing;Feeds;Periodic structures;electricity theft;auto-encoders;deep learning},\n  doi = {10.23919/Eusipco47968.2020.9287764},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001590.pdf},\n}\n\n
\n
\n\n\n
\n Current efforts to detect electricity theft cyber-attacks in advanced metering infrastructures (AMIs) are hindered by the lack of malicious electricity theft datasets. Therefore, anomaly detectors trained with the energy consumption profiles of honest customers appear as a plausible solution to overcome the lack of malicious datasets. Taking into account this constraint, this paper examines the performance of two structures of variational auto-encoders (VAEs); fully-connected (FC) VAE and long-short-term-memory (LSTM) VAE in detecting electricity thefts. The proposed structures are promising and exhibit an improvement of 11 − 15% in detection rate, 9 − 22% in false alarm rate, and 27 − 37% in the highest difference compared to existing state-of-the-art anomaly detectors that are shallow and static, such as single-class support vector machine (SVM) and auto-regressive integrated moving average (ARIMA) models.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Canonical polyadic and block term decompositions to fuse EEG, phenotypic scores, and structural MRI of children with early-onset epilepsy.\n \n \n \n \n\n\n \n Dron, N.; Chin, R. F. M.; and Escudero, J.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1145-1149, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"CanonicalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287765,\n  author = {N. Dron and R. F. M. Chin and J. Escudero},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Canonical polyadic and block term decompositions to fuse EEG, phenotypic scores, and structural MRI of children with early-onset epilepsy},\n  year = {2020},\n  pages = {1145-1149},\n  abstract = {We investigated two popular tensor decomposition models, canonical polyadic decomposition (CPD) and block term decomposition (BTD), to test their ability to fuse datasets from three different modalities related to neuroscience. We fused electroencephalogram (EEG) spectral power, regional brain volume from magnetic resonance imaging (MRI) and phenotypic scores from 29 preschool children aged <; 5 y.o. who have a diagnosis of epilepsy. We used CPD and BTD in a coupled matrix-matrix-tensor factorisation setting to find shared components across data modalities. In addition, we imposed a hard constraint on the model to extract factors directly interpretable in terms of childhood development. We evaluated the model performance to extract components in agreement with prior clinical knowledge. We found that both models revealed similar patterns of relationships between regional brain volumes and developmental scores following prior clinical knowledge but BTD was slightly more sensitive than CPD.},\n  keywords = {biomedical MRI;brain;diseases;electroencephalography;matrix decomposition;medical image processing;medical signal processing;neurophysiology;tensors;preschool children;CPD;BTD;coupled matrix-matrix-tensor factorisation;data modalities;prior clinical knowledge;regional brain volume;developmental scores;block term decompositions;phenotypic scores;early-onset epilepsy;popular tensor decomposition models;canonical polyadic decomposition;term decomposition;electroencephalogram spectral power;magnetic resonance imaging;Tensors;Fuses;Magnetic resonance imaging;Epilepsy;Brain modeling;Electroencephalography;Matrix decomposition;Tensor factorisation;Data fusion;Joint decomposition;Block term decomposition;Canonical polyadic decomposition},\n  doi = {10.23919/Eusipco47968.2020.9287765},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001145.pdf},\n}\n\n
\n
\n\n\n
\n We investigated two popular tensor decomposition models, canonical polyadic decomposition (CPD) and block term decomposition (BTD), to test their ability to fuse datasets from three different modalities related to neuroscience. We fused electroencephalogram (EEG) spectral power, regional brain volume from magnetic resonance imaging (MRI) and phenotypic scores from 29 preschool children aged <; 5 y.o. who have a diagnosis of epilepsy. We used CPD and BTD in a coupled matrix-matrix-tensor factorisation setting to find shared components across data modalities. In addition, we imposed a hard constraint on the model to extract factors directly interpretable in terms of childhood development. We evaluated the model performance to extract components in agreement with prior clinical knowledge. We found that both models revealed similar patterns of relationships between regional brain volumes and developmental scores following prior clinical knowledge but BTD was slightly more sensitive than CPD.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Direct Position Estimation of a Mobile Receiver in Multipath Environments via Adaptive Beamforming.\n \n \n \n \n\n\n \n Fascista, A.; Coluccia, A.; and Ricci, G.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1782-1786, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DirectPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287766,\n  author = {A. Fascista and A. Coluccia and G. Ricci},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Direct Position Estimation of a Mobile Receiver in Multipath Environments via Adaptive Beamforming},\n  year = {2020},\n  pages = {1782-1786},\n  abstract = {The paper presents a novel direct position estimation approach to localize a mobile receiver in multipath environments, where the different paths are coherent hence standard AOA estimation methods fail. The proposed algorithm combines the angular information from signals transmitted by a set of static transmitters with the velocity information obtained from the onboard kinematic sensors. To this aim, the received signals are first decorrelated through spatial smoothing, then an adaptive beamforming strategy is applied to mitigate the detrimental effects of multipath propagation. Simulation results in a realistic multipath environment demonstrate that the proposed algorithm can achieve satisfactory localization performance, outperforming existing AOA-based (indirect) position estimation approaches.},\n  keywords = {Array signal processing;Transmitters;Signal processing algorithms;Estimation;Receivers;Trajectory;Standards;direct position estimation;array processing;direction of arrival;beamforming},\n  doi = {10.23919/Eusipco47968.2020.9287766},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001782.pdf},\n}\n\n
\n
\n\n\n
\n The paper presents a novel direct position estimation approach to localize a mobile receiver in multipath environments, where the different paths are coherent hence standard AOA estimation methods fail. The proposed algorithm combines the angular information from signals transmitted by a set of static transmitters with the velocity information obtained from the onboard kinematic sensors. To this aim, the received signals are first decorrelated through spatial smoothing, then an adaptive beamforming strategy is applied to mitigate the detrimental effects of multipath propagation. Simulation results in a realistic multipath environment demonstrate that the proposed algorithm can achieve satisfactory localization performance, outperforming existing AOA-based (indirect) position estimation approaches.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Dictionary Learning with Statistical Sparsity in the Presence of Noise.\n \n \n \n \n\n\n \n Aziznejad, S.; Soubies, E.; and Unser, M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2026-2029, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DictionaryPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287767,\n  author = {S. Aziznejad and E. Soubies and M. Unser},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Dictionary Learning with Statistical Sparsity in the Presence of Noise},\n  year = {2020},\n  pages = {2026-2029},\n  abstract = {We consider a new stochastic formulation of sparse representations that is based on the family of symmetric α-stable (SαS) distributions. Within this framework, we develop a novel dictionary-learning algorithm that involves a new estimation technique based on the empirical characteristic function. It finds the unknown parameters of an SαS law from a set of its noisy samples. We assess the robustness of our algorithm with numerical examples.},\n  keywords = {Signal processing algorithms;Estimation;Stochastic processes;Machine learning;Robustness;Numerical models;Noise measurement;Dictionary learning;sparse coding;sparse representation;stable distribution;empirical characteristic function},\n  doi = {10.23919/Eusipco47968.2020.9287767},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002026.pdf},\n}\n\n
\n
\n\n\n
\n We consider a new stochastic formulation of sparse representations that is based on the family of symmetric α-stable (SαS) distributions. Within this framework, we develop a novel dictionary-learning algorithm that involves a new estimation technique based on the empirical characteristic function. It finds the unknown parameters of an SαS law from a set of its noisy samples. We assess the robustness of our algorithm with numerical examples.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n How Low Can You Go? Reducing Frequency and Time Resolution in Current CNN Architectures for Music Auto-tagging.\n \n \n \n \n\n\n \n Ferraro, A.; Bogdanov, D.; Jay, X. S.; Jeon, H.; and Yoon, J.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 131-135, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"HowPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287769,\n  author = {A. Ferraro and D. Bogdanov and X. S. Jay and H. Jeon and J. Yoon},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {How Low Can You Go? Reducing Frequency and Time Resolution in Current CNN Architectures for Music Auto-tagging},\n  year = {2020},\n  pages = {131-135},\n  abstract = {Automatic tagging of music is an important research topic in Music Information Retrieval and audio analysis algorithms proposed for this task have achieved improvements with advances in deep learning. In particular, many state-of-the-art systems use Convolutional Neural Networks and operate on mel-spectrogram representations of the audio. In this paper, we compare commonly used mel-spectrogram representations and evaluate model performances that can be achieved by reducing the input size in terms of both lesser amount of frequency bands and larger frame rates. We use the MagnaTagaTune dataset for comprehensive performance comparisons and then compare selected configurations on the larger Million Song Dataset. The results of this study can serve researchers and practitioners in their trade-off decision between accuracy of the models, data storage size and training and inference times.},\n  keywords = {Training;Time-frequency analysis;Memory;Computer architecture;Tagging;Data models;Task analysis;music auto-tagging;audio classification;convolutional neural networks},\n  doi = {10.23919/Eusipco47968.2020.9287769},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000131.pdf},\n}\n\n
\n
\n\n\n
\n Automatic tagging of music is an important research topic in Music Information Retrieval and audio analysis algorithms proposed for this task have achieved improvements with advances in deep learning. In particular, many state-of-the-art systems use Convolutional Neural Networks and operate on mel-spectrogram representations of the audio. In this paper, we compare commonly used mel-spectrogram representations and evaluate model performances that can be achieved by reducing the input size in terms of both lesser amount of frequency bands and larger frame rates. We use the MagnaTagaTune dataset for comprehensive performance comparisons and then compare selected configurations on the larger Million Song Dataset. The results of this study can serve researchers and practitioners in their trade-off decision between accuracy of the models, data storage size and training and inference times.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Two Stages Parallel LMS Structure: A Pipelined Hardware Architecture.\n \n \n \n \n\n\n \n Akkad, G.; Mansour, A.; ElHassan, B.; Inaty, E.; and Ayoubi, R.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2363-2367, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"TwoPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287770,\n  author = {G. Akkad and A. Mansour and B. ElHassan and E. Inaty and R. Ayoubi},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Two Stages Parallel LMS Structure: A Pipelined Hardware Architecture},\n  year = {2020},\n  pages = {2363-2367},\n  abstract = {Modern wireless communication systems have tighten the requirements of adaptive beamformers when implemented on Field Programmable Gate Array (FPGA). The set requirements imposed additional constraints such as designing a high throughput, low complexity system with fast convergence and low steady state error. Recently, a parallel multi-stage least mean square (pLMS) structure is proposed to mitigate the listed constraints. pLMS is a two stages least mean square (LMS) operating in parallel and connected by an error feedback. To form the total pLMS error, the second LMS stage (LMS2) error is delayed by one sample and fed-back to combine with that of the first LMS stage (LMS1). pLMS provides accelerated convergence while maintaining minimal steady state error and a computational complexity of order O(N), where N represent the number of antenna elements. However, pipelining the pLMS structure is still difficult due to the LMS coefficient update loop. Thus, in this paper, we propose the application of the delay and sum relaxed look ahead technique to design a high throughput pipelined hardware architecture for the pLMS. Hence, the delayed pLMS (DpLMS) is obtained. Simulation and synthesis result, highlight the superior performance of the DpLMS in presenting a high throughput architecture while preserving accelerated convergence, low steady state error and low computational complexity. DpLMS operates at a maximum frequency of 208.33 MHz and is obtained at the cost of a marginal increase in resource requirements, i.e. additional delay registers compared to the original pLMS design.},\n  keywords = {Computer architecture;Throughput;Hardware;Steady-state;Delays;Field programmable gate arrays;Convergence;LMS;Parallel LMS;Relaxed Look-Ahead;FPGA;Antenna Array;Adaptive Beamforming},\n  doi = {10.23919/Eusipco47968.2020.9287770},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002363.pdf},\n}\n\n
\n
\n\n\n
\n Modern wireless communication systems have tighten the requirements of adaptive beamformers when implemented on Field Programmable Gate Array (FPGA). The set requirements imposed additional constraints such as designing a high throughput, low complexity system with fast convergence and low steady state error. Recently, a parallel multi-stage least mean square (pLMS) structure is proposed to mitigate the listed constraints. pLMS is a two stages least mean square (LMS) operating in parallel and connected by an error feedback. To form the total pLMS error, the second LMS stage (LMS2) error is delayed by one sample and fed-back to combine with that of the first LMS stage (LMS1). pLMS provides accelerated convergence while maintaining minimal steady state error and a computational complexity of order O(N), where N represent the number of antenna elements. However, pipelining the pLMS structure is still difficult due to the LMS coefficient update loop. Thus, in this paper, we propose the application of the delay and sum relaxed look ahead technique to design a high throughput pipelined hardware architecture for the pLMS. Hence, the delayed pLMS (DpLMS) is obtained. Simulation and synthesis result, highlight the superior performance of the DpLMS in presenting a high throughput architecture while preserving accelerated convergence, low steady state error and low computational complexity. DpLMS operates at a maximum frequency of 208.33 MHz and is obtained at the cost of a marginal increase in resource requirements, i.e. additional delay registers compared to the original pLMS design.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributed Adaptive Acoustic Contrast Control for Node-specific Sound Zoning in a Wireless Acoustic Sensor and Actuator Network.\n \n \n \n \n\n\n \n Van Rompaey, R.; and Moonen, M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 481-485, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DistributedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287771,\n  author = {R. {Van Rompaey} and M. Moonen},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Distributed Adaptive Acoustic Contrast Control for Node-specific Sound Zoning in a Wireless Acoustic Sensor and Actuator Network},\n  year = {2020},\n  pages = {481-485},\n  abstract = {This paper presents a distributed adaptive algorithm for node-specific sound zoning in a wireless acoustic sensor and actuator network (WASAN), based on a network-wide acoustic contrast control (ACC) method. The goal of the ACC method is to simultaneously create node-specific zones with high signal power (bright zones) while minimizing power leakage in other node-specific zones (dark zones). To obtain this, a network-wide objective involving the acoustic coupling between all the loudspeakers and microphones in the WASAN is proposed where the optimal solution is based on a centralized generalized eigenvalue decomposition (GEVD). To allow for distributed processing, a gradient based GEVD algorithm is first proposed that minimizes the same objective. This algorithm can then be modified to allow for a fully distributed implementation, involving in-network summations and simple local processing. The algorithm is referred to as the distributed adaptive gradient based ACC algorithm (DAGACC). The proposed algorithm outperforms the non-cooperative distributed solution after only a few iterations and converges to the centralized solution, as illustrated by computer simulations.},\n  keywords = {Wireless communication;Actuators;Wireless sensor networks;Computer simulation;Signal processing algorithms;Acoustics;Eigenvalues and eigenfunctions;Acoustic Contrast Control;Sound Zoning;Wireless Sensor Network;Wireless Sensor and Actuator Network (WASAN);Generalized Eigenvalue Decomposition (GEVD)},\n  doi = {10.23919/Eusipco47968.2020.9287771},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000481.pdf},\n}\n\n
\n
\n\n\n
\n This paper presents a distributed adaptive algorithm for node-specific sound zoning in a wireless acoustic sensor and actuator network (WASAN), based on a network-wide acoustic contrast control (ACC) method. The goal of the ACC method is to simultaneously create node-specific zones with high signal power (bright zones) while minimizing power leakage in other node-specific zones (dark zones). To obtain this, a network-wide objective involving the acoustic coupling between all the loudspeakers and microphones in the WASAN is proposed where the optimal solution is based on a centralized generalized eigenvalue decomposition (GEVD). To allow for distributed processing, a gradient based GEVD algorithm is first proposed that minimizes the same objective. This algorithm can then be modified to allow for a fully distributed implementation, involving in-network summations and simple local processing. The algorithm is referred to as the distributed adaptive gradient based ACC algorithm (DAGACC). The proposed algorithm outperforms the non-cooperative distributed solution after only a few iterations and converges to the centralized solution, as illustrated by computer simulations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Exploring Filterbank Learning for Keyword Spotting.\n \n \n \n \n\n\n \n López-Espejo, I.; Tan, Z. -.; and Jensen, J.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 331-335, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ExploringPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287772,\n  author = {I. López-Espejo and Z. -H. Tan and J. Jensen},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Exploring Filterbank Learning for Keyword Spotting},\n  year = {2020},\n  pages = {331-335},\n  abstract = {Despite their great performance over the years, handcrafted speech features are not necessarily optimal for any particular speech application. Consequently, with greater or lesser success, optimal filterbank learning has been studied for different speech processing tasks. In this paper, we fill in a gap by exploring filterbank learning for keyword spotting (KWS). Two approaches are examined: filterbank matrix learning in the power spectral domain and parameter learning of a psychoacoustically-motivated gammachirp filterbank. Filterbank parameters are optimized jointly with a modern deep residual neural network-based KWS back-end. Our experimental results reveal that, in general, there are no statistically significant differences, in terms of KWS accuracy, between using a learned filterbank and handcrafted speech features. Thus, while we conclude that the latter are still a wise choice when using modern KWS back-ends, we also hypothesize that this could be a symptom of information redundancy, which opens up new research possibilities in the field of small-footprint KWS.},\n  keywords = {Redundancy;Filter banks;Signal processing;Information filters;Speech processing;Task analysis;Spectral analysis;Filterbank learning;keyword spotting;end-to-end;gammachirp filterbank;gammatone filterbank},\n  doi = {10.23919/Eusipco47968.2020.9287772},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000331.pdf},\n}\n\n
\n
\n\n\n
\n Despite their great performance over the years, handcrafted speech features are not necessarily optimal for any particular speech application. Consequently, with greater or lesser success, optimal filterbank learning has been studied for different speech processing tasks. In this paper, we fill in a gap by exploring filterbank learning for keyword spotting (KWS). Two approaches are examined: filterbank matrix learning in the power spectral domain and parameter learning of a psychoacoustically-motivated gammachirp filterbank. Filterbank parameters are optimized jointly with a modern deep residual neural network-based KWS back-end. Our experimental results reveal that, in general, there are no statistically significant differences, in terms of KWS accuracy, between using a learned filterbank and handcrafted speech features. Thus, while we conclude that the latter are still a wise choice when using modern KWS back-ends, we also hypothesize that this could be a symptom of information redundancy, which opens up new research possibilities in the field of small-footprint KWS.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Robust variable selection and distributed inference using τ-based estimators for large-scale data.\n \n \n \n\n\n \n Mozafari-Majd, E.; and Koivunen, V.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2453-2457, Aug 2020. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287773,\n  author = {E. Mozafari-Majd and V. Koivunen},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Robust variable selection and distributed inference using τ-based estimators for large-scale data},\n  year = {2020},\n  pages = {2453-2457},\n  abstract = {In this paper, we address the problem of performing robust statistical inference for large-scale data sets whose volume and dimensionality maybe so high that distributed storage and processing is required. Here, the large-scale data are assumed to be contaminated by outliers and exhibit sparseness. We propose a distributed and robust two-stage statistical inference method. In the first stage, robust variable selection is done by exploiting τ-Lasso to find the sparse basis in each node with distinct subset of data. The selected variables are communicated to a fusion center (FC) in which the variables for the complete data are chosen using a majority voting rule. In the second stage, confidence intervals and parameter estimates are found in each node using robust τ-estimator combined with bootstrapping and then combined in FC. The simulation results demonstrate the validity and reliability of the algorithm in variable selection and constructing confidence intervals even if the estimation problem in the subsets is slightly underdetermined.},\n  keywords = {inference mechanisms;parameter estimation;regression analysis;statistical analysis;robust variable selection;robust statistical inference;distributed storage;two-stage statistical inference method;sparse basis;parameter estimates;constructing confidence intervals;estimation problem;robust tau-estimator;exploiting tau-Lasso;fusion center;Error analysis;Input variables;Signal processing algorithms;Distributed databases;Inference algorithms;Classification algorithms;Reliability;statistical inference;robust;sparse;high-dimensional;large-scale data;variable selection;bootstrap},\n  doi = {10.23919/Eusipco47968.2020.9287773},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n In this paper, we address the problem of performing robust statistical inference for large-scale data sets whose volume and dimensionality maybe so high that distributed storage and processing is required. Here, the large-scale data are assumed to be contaminated by outliers and exhibit sparseness. We propose a distributed and robust two-stage statistical inference method. In the first stage, robust variable selection is done by exploiting τ-Lasso to find the sparse basis in each node with distinct subset of data. The selected variables are communicated to a fusion center (FC) in which the variables for the complete data are chosen using a majority voting rule. In the second stage, confidence intervals and parameter estimates are found in each node using robust τ-estimator combined with bootstrapping and then combined in FC. The simulation results demonstrate the validity and reliability of the algorithm in variable selection and constructing confidence intervals even if the estimation problem in the subsets is slightly underdetermined.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n ESA360 - Early SKIP Mode Decision Algorithm for Fast ERP 360 Video Coding.\n \n \n \n \n\n\n \n Storch, I.; Correa, G.; Zatt, B.; Agostini, L.; and Palomino, D.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 535-539, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ESA360Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287774,\n  author = {I. Storch and G. Correa and B. Zatt and L. Agostini and D. Palomino},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {ESA360 - Early SKIP Mode Decision Algorithm for Fast ERP 360 Video Coding},\n  year = {2020},\n  pages = {535-539},\n  abstract = {This paper presents ESA360 - Early SKIP mode decision Algorithm for fast ERP 360 video coding. ESA360 performs early SKIP mode selection based on block position and homogeneity to avoid testing the remaining modes and reduce the encoder complexity. The algorithm is developed based on an analysis of the specific behavior of SKIP mode occurrences throughout the video frame when coding equirectangular projected (ERP) 360 videos. This analysis pointed out that due to ERP distortion, the homogeneity of the blocks and the region being coded poses a more significant influence in the selection of SKIP mode for ERP 360 videos when compared to conventional videos. Experimental results show that ESA360 is able to achieve up to 21.44% of complexity reduction with negligible coding efficiency loss, and it is competitive with related works.},\n  keywords = {Video coding;Signal processing algorithms;Tools;Distortion;Encoding;Complexity theory;Testing;360 video;early decision;complexity reduction},\n  doi = {10.23919/Eusipco47968.2020.9287774},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000535.pdf},\n}\n\n
\n
\n\n\n
\n This paper presents ESA360 - Early SKIP mode decision Algorithm for fast ERP 360 video coding. ESA360 performs early SKIP mode selection based on block position and homogeneity to avoid testing the remaining modes and reduce the encoder complexity. The algorithm is developed based on an analysis of the specific behavior of SKIP mode occurrences throughout the video frame when coding equirectangular projected (ERP) 360 videos. This analysis pointed out that due to ERP distortion, the homogeneity of the blocks and the region being coded poses a more significant influence in the selection of SKIP mode for ERP 360 videos when compared to conventional videos. Experimental results show that ESA360 is able to achieve up to 21.44% of complexity reduction with negligible coding efficiency loss, and it is competitive with related works.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Inferring the location of reflecting surfaces exploiting loudspeaker directivity.\n \n \n \n \n\n\n \n Zaccà, V.; Martínez-Nuevo, P.; Møller, M.; Martínez, J.; and Heusdens, R.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 61-65, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"InferringPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287776,\n  author = {V. {Zaccà} and P. Martínez-Nuevo and M. Møller and J. Martínez and R. Heusdens},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Inferring the location of reflecting surfaces exploiting loudspeaker directivity},\n  year = {2020},\n  pages = {61-65},\n  abstract = {Accurate sound field reproduction in rooms is often limited by the lack of knowledge of the room characteristics. Information about the room shape or nearby reflecting boundaries can, in principle, be used to improve the accuracy of the reproduction. In this paper, we propose a method to infer the location of nearby reflecting boundaries from measurements on a microphone array. As opposed to traditional methods, we explicitly exploit the loudspeaker directivity model—beyond omnidirectional radiation—and the microphone array geometry. This approach does not require noiseless timing information of the echoes as input, nor a tailored loudspeaker-wall-microphone measurement step. Simulations show the proposed model outperforms current methods that disregard directivity in reverberant environments.},\n  keywords = {Loudspeakers;Geometry;Shape;Current measurement;Signal processing;Microphone arrays;Timing;Room geometry estimation;sparse recovery;beamforming;room acoustics;image source model;spatial room impulse response;loudspeaker directivity model},\n  doi = {10.23919/Eusipco47968.2020.9287776},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000061.pdf},\n}\n\n
\n
\n\n\n
\n Accurate sound field reproduction in rooms is often limited by the lack of knowledge of the room characteristics. Information about the room shape or nearby reflecting boundaries can, in principle, be used to improve the accuracy of the reproduction. In this paper, we propose a method to infer the location of nearby reflecting boundaries from measurements on a microphone array. As opposed to traditional methods, we explicitly exploit the loudspeaker directivity model—beyond omnidirectional radiation—and the microphone array geometry. This approach does not require noiseless timing information of the echoes as input, nor a tailored loudspeaker-wall-microphone measurement step. Simulations show the proposed model outperforms current methods that disregard directivity in reverberant environments.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Learning without Forgetting for Decentralized Neural Nets with Low Communication Overhead.\n \n \n \n \n\n\n \n Liang, X.; Javid, A. M.; Skoglund, M.; and Chatterjee, S.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2185-2189, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"LearningPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287777,\n  author = {X. Liang and A. M. Javid and M. Skoglund and S. Chatterjee},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Learning without Forgetting for Decentralized Neural Nets with Low Communication Overhead},\n  year = {2020},\n  pages = {2185-2189},\n  abstract = {We consider the problem of training a neural net over a decentralized scenario with a low communication over-head. The problem is addressed by adapting a recently proposed incremental learning approach, called ‘learning without forgetting’. While an incremental learning approach assumes data availability in a sequence, nodes of the decentralized scenario can not share data between them and there is no master node. Nodes can communicate information about model parameters among neighbors. Communication of model parameters is the key to adapt the ‘learning without forgetting’ approach to the decentralized scenario. We use random walk based communication to handle a highly limited communication resource.},\n  keywords = {Training;Adaptation models;Neural networks;Signal processing algorithms;Europe;Signal processing;Feeds;Decentralized learning;feedforward neural net;learning without forgetting;low communication overhead},\n  doi = {10.23919/Eusipco47968.2020.9287777},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002185.pdf},\n}\n\n
\n
\n\n\n
\n We consider the problem of training a neural net over a decentralized scenario with a low communication over-head. The problem is addressed by adapting a recently proposed incremental learning approach, called ‘learning without forgetting’. While an incremental learning approach assumes data availability in a sequence, nodes of the decentralized scenario can not share data between them and there is no master node. Nodes can communicate information about model parameters among neighbors. Communication of model parameters is the key to adapt the ‘learning without forgetting’ approach to the decentralized scenario. We use random walk based communication to handle a highly limited communication resource.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Realistic Lip Animation from Speech for Unseen Subjects using Few-shot Cross-modal Learning.\n \n \n \n \n\n\n \n Agarwal, S.; Das, D.; and Bhowmick, B.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 690-694, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"RealisticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287778,\n  author = {S. Agarwal and D. Das and B. Bhowmick},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Realistic Lip Animation from Speech for Unseen Subjects using Few-shot Cross-modal Learning},\n  year = {2020},\n  pages = {690-694},\n  abstract = {Recent advances in Convolutional Neural Network (CNN) based approaches have been able to generate convincing talking heads. Personalization of such talking heads requires training of the model with a large number of examples of the target person. This is also time consuming. In this paper, we propose a meta-learning based few-shot approach for generating personalized 2D talking heads where the lip animation is driven by a given audio. The idea is that the model is meta-trained with a dataset consisting of a large variety of subjects’ ethnicity and vocabulary. We show that our meta-trained model is then capable of generating realistic animation for previously unseen face and unseen audio when finetuned with only a few-shot examples for a very short time (180 seconds). Considering the fact that facial expressions driven by audio are mainly expressed through motion around lips, we restrict ourselves to animating lip only. We have done the experiments on two publicly available datasets: GRID and TCD-TIMIT and our own captured data of Asian people. Both qualitative and quantitative analysis show that animations generated by such meta-learned model surpasses the state-of-the-art methods both in terms of realism and time taken.},\n  keywords = {Training;Vocabulary;Statistical analysis;Lips;Two dimensional displays;Animation;Speech processing;MAML;lip animation;meta-learning},\n  doi = {10.23919/Eusipco47968.2020.9287778},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000690.pdf},\n}\n\n
\n
\n\n\n
\n Recent advances in Convolutional Neural Network (CNN) based approaches have been able to generate convincing talking heads. Personalization of such talking heads requires training of the model with a large number of examples of the target person. This is also time consuming. In this paper, we propose a meta-learning based few-shot approach for generating personalized 2D talking heads where the lip animation is driven by a given audio. The idea is that the model is meta-trained with a dataset consisting of a large variety of subjects’ ethnicity and vocabulary. We show that our meta-trained model is then capable of generating realistic animation for previously unseen face and unseen audio when finetuned with only a few-shot examples for a very short time (180 seconds). Considering the fact that facial expressions driven by audio are mainly expressed through motion around lips, we restrict ourselves to animating lip only. We have done the experiments on two publicly available datasets: GRID and TCD-TIMIT and our own captured data of Asian people. Both qualitative and quantitative analysis show that animations generated by such meta-learned model surpasses the state-of-the-art methods both in terms of realism and time taken.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n End-to-end Learned Image Compression with Conditional Latent Space Modeling for Entropy Coding.\n \n \n \n \n\n\n \n Yeşilyurt, A. B.; and Kamışlı, F.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 501-505, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"End-to-endPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287779,\n  author = {A. B. Yeşilyurt and F. Kamışlı},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {End-to-end Learned Image Compression with Conditional Latent Space Modeling for Entropy Coding},\n  year = {2020},\n  pages = {501-505},\n  abstract = {The use of neural networks in image compression enables transforms and probability models for entropy coding which can process images based on much more complex models than the simple Gauss-Markov models in traditional compression methods. All at the expense of higher computational complexity. In the neural-network based image compression literature, various methods to model the dependencies in the transform domain/latent space are proposed. This work uses an alternative method to exploit the dependencies of the latent representation. The joint density of the latent representation is modeled as a product of conditional densities, which are learned using neural networks. However, each latent variable is not conditioned on all previous latent variables as in the chain rule of factoring joint distributions, but only on a few previous variables, in particular the left, upper and upperleft spatial neighbor variables based on a Markov property assumption for a simpler model and algorthm. The compression performance is comparable with the state- of-the-art compression models, while the conditional densities require a much simpler network and training time due to their simplicity and less number of parameters then its counterparts.},\n  keywords = {Image coding;Computational modeling;Neural networks;Rate-distortion;Transforms;Markov processes;Entropy coding;image compression;transform coding;deep learning;conditional modeling},\n  doi = {10.23919/Eusipco47968.2020.9287779},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000501.pdf},\n}\n\n
\n
\n\n\n
\n The use of neural networks in image compression enables transforms and probability models for entropy coding which can process images based on much more complex models than the simple Gauss-Markov models in traditional compression methods. All at the expense of higher computational complexity. In the neural-network based image compression literature, various methods to model the dependencies in the transform domain/latent space are proposed. This work uses an alternative method to exploit the dependencies of the latent representation. The joint density of the latent representation is modeled as a product of conditional densities, which are learned using neural networks. However, each latent variable is not conditioned on all previous latent variables as in the chain rule of factoring joint distributions, but only on a few previous variables, in particular the left, upper and upperleft spatial neighbor variables based on a Markov property assumption for a simpler model and algorthm. The compression performance is comparable with the state- of-the-art compression models, while the conditional densities require a much simpler network and training time due to their simplicity and less number of parameters then its counterparts.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Adaptive Algorithms for Tracking Tensor-Train Decomposition of Streaming Tensors.\n \n \n \n \n\n\n \n Thanh, L. T.; Abed-Meraim, K.; Trung, N. L.; and Boyer, R.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 995-999, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AdaptivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287780,\n  author = {L. T. Thanh and K. Abed-Meraim and N. L. Trung and R. Boyer},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Adaptive Algorithms for Tracking Tensor-Train Decomposition of Streaming Tensors},\n  year = {2020},\n  pages = {995-999},\n  abstract = {Tensor-train (TT) decomposition has been an efficient tool to find low order approximation of large-scale, high-order tensors. Existing TT decomposition algorithms are either of high computational complexity or operating in batch-mode, hence quite inefficient for (near) real-time processing. In this paper, we propose a novel adaptive algorithm for TT decomposition of streaming tensors whose slices are serially acquired over time. By leveraging the alternating minimization framework, our estimator minimizes an exponentially weighted least-squares cost function in an efficient way. The proposed method can yield an estimation accuracy very close to the error bound. Numerical experiments show that the proposed algorithm is capable of adaptive TT decomposition with a competitive performance evaluation on both synthetic and real data.},\n  keywords = {Tensors;Signal processing algorithms;Adaptive algorithms;Tools;Streaming media;Signal processing;Approximation algorithms;Tensor-train decomposition;adaptive algorithms;streaming tensors},\n  doi = {10.23919/Eusipco47968.2020.9287780},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000995.pdf},\n}\n\n
\n
\n\n\n
\n Tensor-train (TT) decomposition has been an efficient tool to find low order approximation of large-scale, high-order tensors. Existing TT decomposition algorithms are either of high computational complexity or operating in batch-mode, hence quite inefficient for (near) real-time processing. In this paper, we propose a novel adaptive algorithm for TT decomposition of streaming tensors whose slices are serially acquired over time. By leveraging the alternating minimization framework, our estimator minimizes an exponentially weighted least-squares cost function in an efficient way. The proposed method can yield an estimation accuracy very close to the error bound. Numerical experiments show that the proposed algorithm is capable of adaptive TT decomposition with a competitive performance evaluation on both synthetic and real data.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Convolutional Neural Network for Material Decomposition in Spectral CT Scans.\n \n \n \n \n\n\n \n Bussod, S.; Abascal, J. F. P. J.; Arridge, S.; Hauptmann, A.; Chappard, C.; Ducros, N.; and Peyrin, F.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1259-1263, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ConvolutionalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287781,\n  author = {S. Bussod and J. F. P. J. Abascal and S. Arridge and A. Hauptmann and C. Chappard and N. Ducros and F. Peyrin},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Convolutional Neural Network for Material Decomposition in Spectral CT Scans},\n  year = {2020},\n  pages = {1259-1263},\n  abstract = {Spectral computed tomography acquires energy-resolved data that allows recovery of densities of constituents of an object. This can be achieved by decomposing the measured spectral projection into material projections, and passing these decomposed projections through a tomographic reconstruction algorithm, to get the volumetric mass density of each material. Material decomposition is a nonlinear inverse problem that has been traditionally solved using model-based material decomposition algorithms. However, the forward model is difficult to estimate in real prototypes. Moreover, the traditional regularizers used to stabilized inversions are not fully relevant in the projection domain.In this study, we propose a deep-learning method for material decomposition in the projection domain. We validate our methodology with numerical phantoms of human knees that are created from synchrotron CT scans. We consider four different scans for training, and one for validation. The measurements are corrupted by Poisson noise, assuming that at most 105 photons hit the detector. Compared to a regularized Gauss-Newton algorithm, the proposed deep-learning approach provides a compromise between noise and resolution, which reduces the computation time by a factor of 100.},\n  keywords = {computerised tomography;convolutional neural nets;image reconstruction;inverse problems;learning (artificial intelligence);medical image processing;phantoms;energy-resolved data;measured spectral projection;spectral computed tomography;spectral CT scans;convolutional neural network;regularized Gauss-Newton algorithm;synchrotron CT scans;projection domain;model-based material decomposition algorithms;nonlinear inverse problem;volumetric mass density;tomographic reconstruction algorithm;decomposed projections;material projections;Training;Computed tomography;Volume measurement;Phantoms;Numerical models;Osteoarthritis;Photonics;Spectral computed tomography;Convolutional neural network;Material decomposition;Knee phantom},\n  doi = {10.23919/Eusipco47968.2020.9287781},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001259.pdf},\n}\n\n
\n
\n\n\n
\n Spectral computed tomography acquires energy-resolved data that allows recovery of densities of constituents of an object. This can be achieved by decomposing the measured spectral projection into material projections, and passing these decomposed projections through a tomographic reconstruction algorithm, to get the volumetric mass density of each material. Material decomposition is a nonlinear inverse problem that has been traditionally solved using model-based material decomposition algorithms. However, the forward model is difficult to estimate in real prototypes. Moreover, the traditional regularizers used to stabilized inversions are not fully relevant in the projection domain.In this study, we propose a deep-learning method for material decomposition in the projection domain. We validate our methodology with numerical phantoms of human knees that are created from synchrotron CT scans. We consider four different scans for training, and one for validation. The measurements are corrupted by Poisson noise, assuming that at most 105 photons hit the detector. Compared to a regularized Gauss-Newton algorithm, the proposed deep-learning approach provides a compromise between noise and resolution, which reduces the computation time by a factor of 100.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Riemannian approach to blind separation of t-distributed sources.\n \n \n \n \n\n\n \n Bouchard, F.; Breloy, A.; Ginolhac, G.; and Renaux, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 965-969, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287783,\n  author = {F. Bouchard and A. Breloy and G. Ginolhac and A. Renaux},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A Riemannian approach to blind separation of t-distributed sources},\n  year = {2020},\n  pages = {965-969},\n  abstract = {The blind source separation problem is considered through the approach based on non-stationarity and coloration. In both cases, the sources are usually assumed to be Gaussian. In this paper, we extend previous works in order to handle sources drawn from the multivariate Student t-distribution. After studying the structure of the parameter manifold in this case, a new blind source separation criterion based on the log-likelihood of the considered distribution is proposed. To solve the resulting optimization problem, Riemannian optimization on the parameter manifold is leveraged. Practical expressions of the mathematical tools required by first order based Riemmanian optimization methods for this parameter manifold are derived to this end. The performance of the proposed method is illustrated on simulated data.},\n  keywords = {Manifolds;Optimization methods;Europe;Tools;Signal processing;Blind source separation;blind source separation;Student t-distribution;Riemannian optimization},\n  doi = {10.23919/Eusipco47968.2020.9287783},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000965.pdf},\n}\n\n
\n
\n\n\n
\n The blind source separation problem is considered through the approach based on non-stationarity and coloration. In both cases, the sources are usually assumed to be Gaussian. In this paper, we extend previous works in order to handle sources drawn from the multivariate Student t-distribution. After studying the structure of the parameter manifold in this case, a new blind source separation criterion based on the log-likelihood of the considered distribution is proposed. To solve the resulting optimization problem, Riemannian optimization on the parameter manifold is leveraged. Practical expressions of the mathematical tools required by first order based Riemmanian optimization methods for this parameter manifold are derived to this end. The performance of the proposed method is illustrated on simulated data.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Deep Video Canonical Correlation Analysis for Steady State motion Visual Evoked Potential Feature Extraction.\n \n \n \n \n\n\n \n Karimi, R.; Mohammadi, A.; Rosero, L.; and Asif, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1130-1134, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DeepPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287784,\n  author = {R. Karimi and A. Mohammadi and L. Rosero and A. Asif},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Deep Video Canonical Correlation Analysis for Steady State motion Visual Evoked Potential Feature Extraction},\n  year = {2020},\n  pages = {1130-1134},\n  abstract = {Recently, there has been a surge of interest in development of Brain Computer Interface (BCI) systems based on Steady-State motion-Visual Evoked Potentials (SSmVEP), where motion stimulation is utilized to address high brightness and uncomfortably issues associated with conventional light-flashing/flickering. In this paper, we propose a deep learning-based classification model that extracts features of the SSmVEPs directly from the videos of stimuli. More specifically, the proposed deep architecture, referred to as the Deep Video Canonical Correlation Analysis (DvCCA), consists of a Video Feature Extractor (VFE) layer that uses characteristics of videos utilized for SSmVEP stimulation to fit the template EEG signals of each individual, independently. The proposed VFE layer extracts features that are more correlated with the stimulation video signal as such eliminates problems, typically, associated with deep networks such as overfitting and lack of availability of sufficient training data. The proposed DvCCA is evaluated based on a real EEG dataset and the results corroborate its superiority against recently proposed state-of-the-art deep models.},\n  keywords = {brain-computer interfaces;electroencephalography;feature extraction;learning (artificial intelligence);medical signal processing;neurophysiology;signal classification;visual evoked potentials;brain computer interface systems;motion stimulation;deep learning-based classification model;deep architecture;SSmVEP stimulation;stimulation video signal;deep networks;video feature extractor layer;steady state motion visual evoked potential feature extraction;deep video canonical correlation analysis;EEG signals;EEG dataset;Correlation;Training data;Feature extraction;Brain modeling;Electroencephalography;Steady-state;Videos;Steady State Motion Evoked Potentials;EEG Signals;Brain Computer Interfaces;Deep CCA},\n  doi = {10.23919/Eusipco47968.2020.9287784},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001130.pdf},\n}\n\n
\n
\n\n\n
\n Recently, there has been a surge of interest in development of Brain Computer Interface (BCI) systems based on Steady-State motion-Visual Evoked Potentials (SSmVEP), where motion stimulation is utilized to address high brightness and uncomfortably issues associated with conventional light-flashing/flickering. In this paper, we propose a deep learning-based classification model that extracts features of the SSmVEPs directly from the videos of stimuli. More specifically, the proposed deep architecture, referred to as the Deep Video Canonical Correlation Analysis (DvCCA), consists of a Video Feature Extractor (VFE) layer that uses characteristics of videos utilized for SSmVEP stimulation to fit the template EEG signals of each individual, independently. The proposed VFE layer extracts features that are more correlated with the stimulation video signal as such eliminates problems, typically, associated with deep networks such as overfitting and lack of availability of sufficient training data. The proposed DvCCA is evaluated based on a real EEG dataset and the results corroborate its superiority against recently proposed state-of-the-art deep models.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Non-Intrusive Estimation of Speech Signal Parameters using a Frame-based Machine Learning Approach.\n \n \n \n \n\n\n \n Sharma, D.; Berger, L.; Quillen, C.; and Naylor, P. A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 446-450, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Non-IntrusivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287785,\n  author = {D. Sharma and L. Berger and C. Quillen and P. A. Naylor},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Non-Intrusive Estimation of Speech Signal Parameters using a Frame-based Machine Learning Approach},\n  year = {2020},\n  pages = {446-450},\n  abstract = {We present a novel, non-intrusive method that jointly estimates acoustic signal properties associated with the perceptual speech quality, level of reverberation and noise in a speech signal. We explore various machine learning frameworks, consisting of popular feature extraction front-ends and two types of regression models and show the trade-off in performance that must be considered with each combination. We show that a short-time framework consisting of an 80-dimension log-Mel filter bank feature front-end employing spectral augmentation, followed by a 3 layer LSTM recurrent neural network model achieves a mean absolute error of 3.3 dB for C50, 2.3 dB for segmental SNR and 0.3 for PESQ estimation on the Libri Augmented (LA) database. The internal VAD for this system achieves an F1 score of 0.93 on this data. The proposed system also achieves a 2.4 dB mean absolute error for C50 estimation on the ACE test set. Furthermore, we show how each type of acoustic parameter correlates with ASR performance in terms of ground truth labels and additionally show that the estimated C50, SNR and PESQ from our proposed method have a high correlation (greater than 0.92) with WER on the LA test set.},\n  keywords = {Estimation;Machine learning;Signal processing;Feature extraction;Complexity theory;Reverberation;Signal to noise ratio;deep neural networks;clarity index;speech quality},\n  doi = {10.23919/Eusipco47968.2020.9287785},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000446.pdf},\n}\n\n
\n
\n\n\n
\n We present a novel, non-intrusive method that jointly estimates acoustic signal properties associated with the perceptual speech quality, level of reverberation and noise in a speech signal. We explore various machine learning frameworks, consisting of popular feature extraction front-ends and two types of regression models and show the trade-off in performance that must be considered with each combination. We show that a short-time framework consisting of an 80-dimension log-Mel filter bank feature front-end employing spectral augmentation, followed by a 3 layer LSTM recurrent neural network model achieves a mean absolute error of 3.3 dB for C50, 2.3 dB for segmental SNR and 0.3 for PESQ estimation on the Libri Augmented (LA) database. The internal VAD for this system achieves an F1 score of 0.93 on this data. The proposed system also achieves a 2.4 dB mean absolute error for C50 estimation on the ACE test set. Furthermore, we show how each type of acoustic parameter correlates with ASR performance in terms of ground truth labels and additionally show that the estimated C50, SNR and PESQ from our proposed method have a high correlation (greater than 0.92) with WER on the LA test set.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n CFAR Detector for Compressed Sensing Radar Based on l1-norm Minimisation.\n \n \n \n \n\n\n \n Kozlov, D.; and Ott, P.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2050-2054, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"CFARPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287786,\n  author = {D. Kozlov and P. Ott},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {CFAR Detector for Compressed Sensing Radar Based on l1-norm Minimisation},\n  year = {2020},\n  pages = {2050-2054},\n  abstract = {Rapidly developing Compressed Sensing theory looks promising for many practical applications, since it allows us to reconstruct K-sparce signals and to reduce some hardware requirements. In this work, we consider the problem of changing noise properties after recovering and its influence on the radar false alarm rate. Due to nonlinearity of the recovering algorithm there is no analytical solution allowing finding a noise distribution after the reconstruction. Therefore, by an empirical approach we come to a solution, where the well-known cell averaging constant false alarm rate detector can be used for a compressed sensing radar. We analyze its performance by simulation and test it with real radar data.},\n  keywords = {Signal processing algorithms;Radar detection;Radar;Detectors;Minimization;Solids;Compressed sensing;radar;compressed sensing;detection},\n  doi = {10.23919/Eusipco47968.2020.9287786},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002050.pdf},\n}\n\n
\n
\n\n\n
\n Rapidly developing Compressed Sensing theory looks promising for many practical applications, since it allows us to reconstruct K-sparce signals and to reduce some hardware requirements. In this work, we consider the problem of changing noise properties after recovering and its influence on the radar false alarm rate. Due to nonlinearity of the recovering algorithm there is no analytical solution allowing finding a noise distribution after the reconstruction. Therefore, by an empirical approach we come to a solution, where the well-known cell averaging constant false alarm rate detector can be used for a compressed sensing radar. We analyze its performance by simulation and test it with real radar data.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Successive Nonnegative Projection Algorithm for Linear Quadratic Mixtures.\n \n \n \n \n\n\n \n Kervazo, C.; Gillis, N.; and Dobigeon, N.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1951-1955, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"SuccessivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287788,\n  author = {C. Kervazo and N. Gillis and N. Dobigeon},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Successive Nonnegative Projection Algorithm for Linear Quadratic Mixtures},\n  year = {2020},\n  pages = {1951-1955},\n  abstract = {In this work, we tackle the problem of hyperspectral unmixing by departing from the usual linear model and focusing on a linear-quadratic (LQ) one. The algorithm we propose, coined Successive Nonnegative Projection Algorithm for Linear Quadratic mixtures (SNPALQ), extends the Successive Nonnegative Projection Algorithm (SNPA), specifically designed to address the unmixing problem under a linear non-negative model and the pure-pixel assumption (a.k.a. near-separable assumption). By explicitly modeling the product terms inherent to the LQ model along the iterations of the SNPA scheme, the nonlinear contributions of the mixing are mitigated, thus improving the separation quality. The approach is shown to be relevant in realistic numerical experiments, which further highlight that SNPALQ is robust to noise.},\n  keywords = {Signal processing algorithms;Focusing;Signal processing;Robustness;Numerical models;Projection algorithms;Hyperspectral imaging;Nonnegative Matrix Factorization;Non-linear Hyperspectral Unmixing;Linear-Quadratic Models;Separability and Pure-Pixel Assumption;Non-linear Blind Source Separation},\n  doi = {10.23919/Eusipco47968.2020.9287788},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001951.pdf},\n}\n\n
\n
\n\n\n
\n In this work, we tackle the problem of hyperspectral unmixing by departing from the usual linear model and focusing on a linear-quadratic (LQ) one. The algorithm we propose, coined Successive Nonnegative Projection Algorithm for Linear Quadratic mixtures (SNPALQ), extends the Successive Nonnegative Projection Algorithm (SNPA), specifically designed to address the unmixing problem under a linear non-negative model and the pure-pixel assumption (a.k.a. near-separable assumption). By explicitly modeling the product terms inherent to the LQ model along the iterations of the SNPA scheme, the nonlinear contributions of the mixing are mitigated, thus improving the separation quality. The approach is shown to be relevant in realistic numerical experiments, which further highlight that SNPALQ is robust to noise.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Low-Complexity HEVC Transrating Based on Prediction Unit Mode Inheritance.\n \n \n \n \n\n\n \n Lindino, M.; Bubolz, T.; Zatt, B.; Palomino, D.; and Correa, G.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 550-554, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Low-ComplexityPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287789,\n  author = {M. Lindino and T. Bubolz and B. Zatt and D. Palomino and G. Correa},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Low-Complexity HEVC Transrating Based on Prediction Unit Mode Inheritance},\n  year = {2020},\n  pages = {550-554},\n  abstract = {Video transcoding for bit rate adaptation has become mandatory for over-the-top applications that deliver multimedia content in heterogeneous environments under different network conditions and user capabilities. As transcoding requires sequentially decoding and re-encoding the video bitstream, the computational cost involved in the process is too high, especially when considering current state-of-the-art codecs, such as the High Efficiency Video Coding (HEVC). This work presents a fast HEVC transcoder for bit rate adaptation based on Prediction Unit (PU) mode inheritance, which uses information gathered from the HEVC decoding process to accelerate PU mode decision in the re-encoding process. Experimental results show that the proposed method achieves an average transrating time reduction of 42% at the cost of a bitrate increase of 0.54%.},\n  keywords = {Statistical analysis;Bit rate;Transcoding;Streaming media;Decoding;Complexity theory;High efficiency video coding;HEVC;complexity reduction;transrating;transcoding},\n  doi = {10.23919/Eusipco47968.2020.9287789},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000550.pdf},\n}\n\n
\n
\n\n\n
\n Video transcoding for bit rate adaptation has become mandatory for over-the-top applications that deliver multimedia content in heterogeneous environments under different network conditions and user capabilities. As transcoding requires sequentially decoding and re-encoding the video bitstream, the computational cost involved in the process is too high, especially when considering current state-of-the-art codecs, such as the High Efficiency Video Coding (HEVC). This work presents a fast HEVC transcoder for bit rate adaptation based on Prediction Unit (PU) mode inheritance, which uses information gathered from the HEVC decoding process to accelerate PU mode decision in the re-encoding process. Experimental results show that the proposed method achieves an average transrating time reduction of 42% at the cost of a bitrate increase of 0.54%.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Compressing Piecewise Smooth Images with the Mumford-Shah Cartoon Model.\n \n \n \n \n\n\n \n Jost, F.; Peter, P.; and Weickert, J.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 511-515, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"CompressingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287790,\n  author = {F. Jost and P. Peter and J. Weickert},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Compressing Piecewise Smooth Images with the Mumford-Shah Cartoon Model},\n  year = {2020},\n  pages = {511-515},\n  abstract = {Compressing piecewise smooth images is important for many data types such as depth maps in 3D videos or optic flow fields for motion compensation. Specialised codecs that rely on explicitly stored segmentations excel in this task since they preserve discontinuities between smooth regions. However, current approaches rely on ad hoc segmentations that lack a clean interpretation in terms of energy minimisation. As a remedy, we derive a generic region merging algorithm from the Mumford- Shah cartoon model. It adapts the segmentation to arbitrary reconstruction operators for the segment content. In spite of its conceptual simplicity, our framework can outperform previous segment-based compression methods as well as BPG by up to 3 dB.},\n  keywords = {Image segmentation;Adaptation models;Image coding;Three-dimensional displays;Motion segmentation;Task analysis;Videos;Compression;segmentation;inpainting},\n  doi = {10.23919/Eusipco47968.2020.9287790},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000511.pdf},\n}\n\n
\n
\n\n\n
\n Compressing piecewise smooth images is important for many data types such as depth maps in 3D videos or optic flow fields for motion compensation. Specialised codecs that rely on explicitly stored segmentations excel in this task since they preserve discontinuities between smooth regions. However, current approaches rely on ad hoc segmentations that lack a clean interpretation in terms of energy minimisation. As a remedy, we derive a generic region merging algorithm from the Mumford- Shah cartoon model. It adapts the segmentation to arbitrary reconstruction operators for the segment content. In spite of its conceptual simplicity, our framework can outperform previous segment-based compression methods as well as BPG by up to 3 dB.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sample drop detection for asynchronous devices distributed in space.\n \n \n \n \n\n\n \n Raissi, T.; Pascual, S.; and Omologo, M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 815-819, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"SamplePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287791,\n  author = {T. Raissi and S. Pascual and M. Omologo},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Sample drop detection for asynchronous devices distributed in space},\n  year = {2020},\n  pages = {815-819},\n  abstract = {In many applications of multi-microphone multi-device processing, the synchronization among different input channels can be affected by the lack of a common clock and isolated drops of samples. In this work, we address the issue of sample drop detection in the context of a conversational speech scenario, recorded by a set of microphones distributed in space. The goal is to design a neural-based model that given a short window in the time domain, detects whether one or more devices have been subjected to a sample drop event. The candidate time windows are selected from a set of large time intervals, possibly including a sample drop, and by using a preprocessing step. The latter is based on the application of normalized cross-correlation between signals acquired by different devices. The architecture of the neural network relies on a CNN-LSTM encoder, followed by multi-head attention. The experiments are conducted using both artificial and real data. Our proposed approach obtained F1 score of 88% on an evaluation set extracted from the CHiME-5 corpus. A comparable performance was found in a larger set of experiments conducted on a set of multi-channel artificial scenes.},\n  keywords = {Performance evaluation;Array signal processing;Speech recognition;Synchronization;Time-domain analysis;Task analysis;Microphones;Far-field speech recognition;conversational speech;microphone array synchronization;sample drop detection;CHiME-5 challenge},\n  doi = {10.23919/Eusipco47968.2020.9287791},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000815.pdf},\n}\n\n
\n
\n\n\n
\n In many applications of multi-microphone multi-device processing, the synchronization among different input channels can be affected by the lack of a common clock and isolated drops of samples. In this work, we address the issue of sample drop detection in the context of a conversational speech scenario, recorded by a set of microphones distributed in space. The goal is to design a neural-based model that given a short window in the time domain, detects whether one or more devices have been subjected to a sample drop event. The candidate time windows are selected from a set of large time intervals, possibly including a sample drop, and by using a preprocessing step. The latter is based on the application of normalized cross-correlation between signals acquired by different devices. The architecture of the neural network relies on a CNN-LSTM encoder, followed by multi-head attention. The experiments are conducted using both artificial and real data. Our proposed approach obtained F1 score of 88% on an evaluation set extracted from the CHiME-5 corpus. A comparable performance was found in a larger set of experiments conducted on a set of multi-channel artificial scenes.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automatic Image Colorization based on Multi-Discriminators Generative Adversarial Networks.\n \n \n \n \n\n\n \n Mourchid, Y.; Donias, M.; and Berthoumieu, Y.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1532-1536, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AutomaticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287792,\n  author = {Y. Mourchid and M. Donias and Y. Berthoumieu},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Automatic Image Colorization based on Multi-Discriminators Generative Adversarial Networks},\n  year = {2020},\n  pages = {1532-1536},\n  abstract = {This paper presents a deep automatic colorization approach which avoids any manual intervention. Recently Generative Adversarial Network (GANs) approaches have proven their effectiveness for image colorization tasks. Inspired by GANs methods, we propose a novel colorization model that produces more realistic quality results. The model employs an additional discriminator which works in the feature domain. Using a feature discriminator, our generator produces structural high-frequency features instead of noisy artifacts. To achieve the required level of details in the colorization process, we incorporate non-adversarial losses from recent image style transfer techniques. Besides, the generator architecture follows the general shape of U-Net, to transfer information more effectively between distant layers. The performance of the proposed model was evaluated quantitatively as well as qualitatively with places365 dataset. Results show that the proposed model achieves more realistic colors with less artifacts compared to the state-of-the-art approaches.},\n  keywords = {Visualization;Shape;Signal processing;Generative adversarial networks;Generators;Noise measurement;Task analysis},\n  doi = {10.23919/Eusipco47968.2020.9287792},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001532.pdf},\n}\n\n
\n
\n\n\n
\n This paper presents a deep automatic colorization approach which avoids any manual intervention. Recently Generative Adversarial Network (GANs) approaches have proven their effectiveness for image colorization tasks. Inspired by GANs methods, we propose a novel colorization model that produces more realistic quality results. The model employs an additional discriminator which works in the feature domain. Using a feature discriminator, our generator produces structural high-frequency features instead of noisy artifacts. To achieve the required level of details in the colorization process, we incorporate non-adversarial losses from recent image style transfer techniques. Besides, the generator architecture follows the general shape of U-Net, to transfer information more effectively between distant layers. The performance of the proposed model was evaluated quantitatively as well as qualitatively with places365 dataset. Results show that the proposed model achieves more realistic colors with less artifacts compared to the state-of-the-art approaches.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Flashlight CNN Image Denoising.\n \n \n \n \n\n\n \n Thanh Binh, P. H.; Cruz, C.; and Egiazarian, K.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 670-674, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"FlashlightPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287793,\n  author = {P. H. {Thanh Binh} and C. Cruz and K. Egiazarian},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Flashlight CNN Image Denoising},\n  year = {2020},\n  pages = {670-674},\n  abstract = {This paper proposes a learning-based denoising method called FlashLight CNN (FLCNN) that implements a deep neural network for image denoising. The proposed approach is based on deep residual networks and inception networks and it is able to leverage many more parameters than residual networks alone for denoising grayscale images corrupted by additive white Gaussian noise (AWGN). FlashLight CNN demonstrates state of the art performance when compared quantitatively and visually with the current state of the art image denoising methods.},\n  keywords = {AWGN;Noise reduction;Neural networks;Signal processing;Gray-scale;Image denoising;Residual neural networks;Image Denoising;Convolutional Neural Networks;Inception;Residual Learning;Gaussian Noise},\n  doi = {10.23919/Eusipco47968.2020.9287793},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000670.pdf},\n}\n\n
\n
\n\n\n
\n This paper proposes a learning-based denoising method called FlashLight CNN (FLCNN) that implements a deep neural network for image denoising. The proposed approach is based on deep residual networks and inception networks and it is able to leverage many more parameters than residual networks alone for denoising grayscale images corrupted by additive white Gaussian noise (AWGN). FlashLight CNN demonstrates state of the art performance when compared quantitatively and visually with the current state of the art image denoising methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Revisiting SincNet: An Evaluation of Feature and Network Hyperparameters for Speaker Recognition.\n \n \n \n \n\n\n \n Oneață, D.; Georgescu, L.; Cucu, H.; Burileanu, D.; and Burileanu, C.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1-5, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"RevisitingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287794,\n  author = {D. Oneață and L. Georgescu and H. Cucu and D. Burileanu and C. Burileanu},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Revisiting SincNet: An Evaluation of Feature and Network Hyperparameters for Speaker Recognition},\n  year = {2020},\n  pages = {1-5},\n  abstract = {The SincNet architecture [1] was recently introduced as an approach to the speaker recognition task. Its main innovation was the sinc layer—an elegant and lightweight way of extracting features from speech. Despite good performance on multiple datasets, little information was provided on the architectural choices. In this work, we aim to shed some light on the importance of the network topology and various hyperparameters. We replace the original network trunk with a lightweight trunk inspired from residual networks (ResNets) and optimize its hyperparameters. Furthermore, we carry an extensive study on the sinc layer’s hyperparameters. Our main finding is that the stride and window size of the feature extractor plays a crucial role in obtaining good performance. Further experiments on conventional features, such as MFCCs and FBANKs, yield similar conclusions; in fact, by using optimal values for these two hyperparameters, traditional features are able to match the performance of sinc features. Surprisingly, the best results obtained go against conventional wisdom: an analysis window of only a couple of milliseconds and a stride of only a couple of samples are found to give the best results. Our code is available at https://bitbucket.org/doneata/sincnet.},\n  keywords = {Technological innovation;Signal processing;Feature extraction;Speaker recognition;Task analysis;Optimization;Residual neural networks;deep learning;speaker recognition;features;hyperparameter optimization},\n  doi = {10.23919/Eusipco47968.2020.9287794},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000361.pdf},\n}\n\n
\n
\n\n\n
\n The SincNet architecture [1] was recently introduced as an approach to the speaker recognition task. Its main innovation was the sinc layer—an elegant and lightweight way of extracting features from speech. Despite good performance on multiple datasets, little information was provided on the architectural choices. In this work, we aim to shed some light on the importance of the network topology and various hyperparameters. We replace the original network trunk with a lightweight trunk inspired from residual networks (ResNets) and optimize its hyperparameters. Furthermore, we carry an extensive study on the sinc layer’s hyperparameters. Our main finding is that the stride and window size of the feature extractor plays a crucial role in obtaining good performance. Further experiments on conventional features, such as MFCCs and FBANKs, yield similar conclusions; in fact, by using optimal values for these two hyperparameters, traditional features are able to match the performance of sinc features. Surprisingly, the best results obtained go against conventional wisdom: an analysis window of only a couple of milliseconds and a stride of only a couple of samples are found to give the best results. Our code is available at https://bitbucket.org/doneata/sincnet.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sparse Array Receiver Beamformer Design for Multi-Functional Antenna.\n \n \n \n \n\n\n \n Hamza, S. A.; and Amin, M. G.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1836-1840, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"SparsePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287795,\n  author = {S. A. Hamza and M. G. Amin},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Sparse Array Receiver Beamformer Design for Multi-Functional Antenna},\n  year = {2020},\n  pages = {1836-1840},\n  abstract = {Planning sensor locations can potentially economize the receiver cost by minimizing the hardware and computational needs, while satisfying a predetermined design criterion. In this paper, we consider a sparse array receive beamformer design approach for Multi-Functional Antennas (MFA), operating in different frequency bands. In this approach, antenna positions are selected from uniformly spaced locations that are served by a limited number of transceiver chains. The design objective is to maximize the beamformer output Signal-to-Interference-plus-noise-ratio (MaxSINR) for desired sources with disjoint frequency bands operating in a wideband jamming environment. The problem is solved efficiently through alternating direction method of multipliers (ADMM) and simplified to parallel quadratically constraint quadratic programs (QCQP) with a single associated constraint. The re-weighted group sparsity is adopted to ensure a common sparse configuration across all frequency bands. The efficacy of the proposed algorithm is demonstrated with the help of a design example.},\n  keywords = {Receiving antennas;Signal processing algorithms;Convex functions;Transceivers;Planning;Wideband;Antenna arrays;Sparse arrays;QCQP;ADMM},\n  doi = {10.23919/Eusipco47968.2020.9287795},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001836.pdf},\n}\n\n
\n
\n\n\n
\n Planning sensor locations can potentially economize the receiver cost by minimizing the hardware and computational needs, while satisfying a predetermined design criterion. In this paper, we consider a sparse array receive beamformer design approach for Multi-Functional Antennas (MFA), operating in different frequency bands. In this approach, antenna positions are selected from uniformly spaced locations that are served by a limited number of transceiver chains. The design objective is to maximize the beamformer output Signal-to-Interference-plus-noise-ratio (MaxSINR) for desired sources with disjoint frequency bands operating in a wideband jamming environment. The problem is solved efficiently through alternating direction method of multipliers (ADMM) and simplified to parallel quadratically constraint quadratic programs (QCQP) with a single associated constraint. The re-weighted group sparsity is adopted to ensure a common sparse configuration across all frequency bands. The efficacy of the proposed algorithm is demonstrated with the help of a design example.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multiple Speaker Localization using Mixture of Gaussian Model with Manifold-based Centroids.\n \n \n \n \n\n\n \n Bross, A.; Laufer-Goldshtein, B.; and Gannot, S.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 895-899, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"MultiplePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287796,\n  author = {A. Bross and B. Laufer-Goldshtein and S. Gannot},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Multiple Speaker Localization using Mixture of Gaussian Model with Manifold-based Centroids},\n  year = {2020},\n  pages = {895-899},\n  abstract = {A data-driven approach for multiple speakers localization in reverberant enclosures is presented. The approach combines semi-supervised learning on multiple manifolds with unsupervised maximum likelihood estimation. The relative transfer functions (RTFs) are used in both stages of the proposed algorithm as feature vectors, which are known to be related to source positions. The microphone positions are not known. In the training stage, a nonlinear, manifold-based, mapping between RTFs and source locations is inferred using single-speaker utterances. The inference procedure utilizes two RTF datasets: A small set of RTFs with their associated position labels; and a large set of unlabelled RTFs. This mapping is used to generate a dense grid of localized sources that serve as the centroids of a Mixture of Gaussians (MoG) model, used in the test stage of the algorithm to cluster RTFs extracted from multiple-speakers utterances. Clustering is applied by applying the expectation-maximization (EM) procedure that relies on the sparsity and intermittency of the speech signals. A preliminary experimental study, with either two or three overlapping speakers in various reverberation levels, demonstrates that the proposed scheme achieves high localization accuracy compared to a baseline method using a simpler propagation model.},\n  keywords = {Training;Signal processing algorithms;Transfer functions;Signal processing;Semisupervised learning;Position measurement;Reverberation;Manifold-learning;semi-supervised inference;mixture of Gaussians},\n  doi = {10.23919/Eusipco47968.2020.9287796},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000895.pdf},\n}\n\n
\n
\n\n\n
\n A data-driven approach for multiple speakers localization in reverberant enclosures is presented. The approach combines semi-supervised learning on multiple manifolds with unsupervised maximum likelihood estimation. The relative transfer functions (RTFs) are used in both stages of the proposed algorithm as feature vectors, which are known to be related to source positions. The microphone positions are not known. In the training stage, a nonlinear, manifold-based, mapping between RTFs and source locations is inferred using single-speaker utterances. The inference procedure utilizes two RTF datasets: A small set of RTFs with their associated position labels; and a large set of unlabelled RTFs. This mapping is used to generate a dense grid of localized sources that serve as the centroids of a Mixture of Gaussians (MoG) model, used in the test stage of the algorithm to cluster RTFs extracted from multiple-speakers utterances. Clustering is applied by applying the expectation-maximization (EM) procedure that relies on the sparsity and intermittency of the speech signals. A preliminary experimental study, with either two or three overlapping speakers in various reverberation levels, demonstrates that the proposed scheme achieves high localization accuracy compared to a baseline method using a simpler propagation model.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Comparing Representations for Audio Synthesis Using Generative Adversarial Networks.\n \n \n \n \n\n\n \n Nistal, J.; Lattner, S.; and Richard, G.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 161-165, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ComparingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287799,\n  author = {J. Nistal and S. Lattner and G. Richard},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Comparing Representations for Audio Synthesis Using Generative Adversarial Networks},\n  year = {2020},\n  pages = {161-165},\n  abstract = {In this paper, we compare different audio signal representations, including the raw audio waveform and a variety of time-frequency representations, for the task of audio synthesis with Generative Adversarial Networks (GANs). We conduct the experiments on a subset of the NSynth dataset. The architecture follows the benchmark Progressive Growing Wasserstein GAN. We perform experiments both in a fully non-conditional manner as well as conditioning the network on the pitch information. We quantitatively evaluate the generated material utilizing standard metrics for assessing generative models, and compare training and sampling times. We show that complex-valued as well as the magnitude and Instantaneous Frequency of the Short-Time Fourier Transform achieve the best results, and yield fast generation and inversion times. The code for feature extraction, training and evaluating the model is available online.1},\n  keywords = {Training;Measurement;Time-frequency analysis;Generative adversarial networks;Signal representation;Task analysis;Standards;audio;representations;synthesis;generative;adversarial},\n  doi = {10.23919/Eusipco47968.2020.9287799},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000161.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we compare different audio signal representations, including the raw audio waveform and a variety of time-frequency representations, for the task of audio synthesis with Generative Adversarial Networks (GANs). We conduct the experiments on a subset of the NSynth dataset. The architecture follows the benchmark Progressive Growing Wasserstein GAN. We perform experiments both in a fully non-conditional manner as well as conditioning the network on the pitch information. We quantitatively evaluate the generated material utilizing standard metrics for assessing generative models, and compare training and sampling times. We show that complex-valued as well as the magnitude and Instantaneous Frequency of the Short-Time Fourier Transform achieve the best results, and yield fast generation and inversion times. The code for feature extraction, training and evaluating the model is available online.1\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Identification of Ischemic Heart Disease by using machine learning technique based on parameters measuring Heart Rate Variability.\n \n \n \n \n\n\n \n Silveri, G.; Merlo, M.; Restivo, L.; De Paola, B.; Miladinović, A.; Ajčević, M.; Sinagra, G.; and Accardo, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1309-1312, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"IdentificationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287800,\n  author = {G. Silveri and M. Merlo and L. Restivo and B. {De Paola} and A. Miladinović and M. Ajčević and G. Sinagra and A. Accardo},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Identification of Ischemic Heart Disease by using machine learning technique based on parameters measuring Heart Rate Variability},\n  year = {2020},\n  pages = {1309-1312},\n  abstract = {The diagnosis of heart diseases is a difficult task generally addressed by an appropriate examination of patients’ clinical data. Recently, the use of heart rate variability (HRV) analysis as well as of some machine learning algorithms, has proved to be a valuable support in the diagnosis process. However, till now, ischemic heart disease (IHD) has been diagnosed on the basis of Artificial Neural Networks (ANN) applied only to signs, symptoms and sequential ECG and coronary angiography, an invasive tool, while could be probably identified in a non-invasive way by using parameters extracted from HRV, a signal easily obtained from the ECG. In this study, 18 non-invasive features (age, gender, left ventricular ejection fraction and 15 obtained from HRV) of 243 subjects (156 normal subjects and 87 IHD patients) were used to train and validate a series of several ANN, different for number of input and hidden nodes. The best result was obtained using 7 input parameters and 7 hidden nodes with an accuracy of 98.9% and 82% for the training and validation dataset, respectively.},\n  keywords = {Heart;Training;Artificial neural networks;Electrocardiography;Tools;Heart rate variability;Diseases;artificial neural network;heart rate variability;ischemic heart disease},\n  doi = {10.23919/Eusipco47968.2020.9287800},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001309.pdf},\n}\n\n
\n
\n\n\n
\n The diagnosis of heart diseases is a difficult task generally addressed by an appropriate examination of patients’ clinical data. Recently, the use of heart rate variability (HRV) analysis as well as of some machine learning algorithms, has proved to be a valuable support in the diagnosis process. However, till now, ischemic heart disease (IHD) has been diagnosed on the basis of Artificial Neural Networks (ANN) applied only to signs, symptoms and sequential ECG and coronary angiography, an invasive tool, while could be probably identified in a non-invasive way by using parameters extracted from HRV, a signal easily obtained from the ECG. In this study, 18 non-invasive features (age, gender, left ventricular ejection fraction and 15 obtained from HRV) of 243 subjects (156 normal subjects and 87 IHD patients) were used to train and validate a series of several ANN, different for number of input and hidden nodes. The best result was obtained using 7 input parameters and 7 hidden nodes with an accuracy of 98.9% and 82% for the training and validation dataset, respectively.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Signal Analysis Using Local Polynomial Approximations.\n \n \n \n \n\n\n \n Wildhaber, R. A.; Ren, E.; Waldmann, F.; and Loeliger, H. -.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2239-2243, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"SignalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287801,\n  author = {R. A. Wildhaber and E. Ren and F. Waldmann and H. -A. Loeliger},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Signal Analysis Using Local Polynomial Approximations},\n  year = {2020},\n  pages = {2239-2243},\n  abstract = {Local polynomial approximations represent a versatile feature space for time-domain signal analysis. The parameters of such polynomial approximations can be computed by efficient recursions using autonomous linear state space models and often allow analytical solutions for quantities of interest. The approach is illustrated by practical examples including the estimation of the delay difference between two acoustic signals and template matching in electrocardiogram signals with local variations in amplitude and time scale.},\n  keywords = {Analytical models;Parameter estimation;Estimation;Europe;Delays;Time-domain analysis;Signal resolution;localized polynomials;localized feature space;delay estimation;time-scale estimation;local signal approximation;autonomous linear state space models},\n  doi = {10.23919/Eusipco47968.2020.9287801},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002239.pdf},\n}\n\n
\n
\n\n\n
\n Local polynomial approximations represent a versatile feature space for time-domain signal analysis. The parameters of such polynomial approximations can be computed by efficient recursions using autonomous linear state space models and often allow analytical solutions for quantities of interest. The approach is illustrated by practical examples including the estimation of the delay difference between two acoustic signals and template matching in electrocardiogram signals with local variations in amplitude and time scale.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Deep Recurrent Neural Networks for Audio Classification in Construction Sites.\n \n \n \n \n\n\n \n Scarpiniti, M.; Comminiello, D.; Uncini, A.; and Lee, Y. -.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 810-814, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DeepPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287802,\n  author = {M. Scarpiniti and D. Comminiello and A. Uncini and Y. -C. Lee},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Deep Recurrent Neural Networks for Audio Classification in Construction Sites},\n  year = {2020},\n  pages = {810-814},\n  abstract = {In this paper, we propose a Deep Recurrent Neural Network (DRNN) approach based on Long-Short Term Memory (LSTM) units for the classification of audio signals recorded in construction sites. Five classes of multiple vehicles and tools, normally used in construction sites, have been considered. The input provided to the DRNN consists in the concatenation of several spectral features, like MFCCs, mel-scaled spectrogram, chroma and spectral contrast. The proposed architecture and the feature extraction have been described. Some experimental results, obtained by using real-world recordings, demonstrate the effectiveness of the proposed idea. The final overall accuracy on the test set is up to 97% and overcomes other state-of-the-art approaches.},\n  keywords = {Recurrent neural networks;Architecture;Tools;Time factors;Reliability;Spectrogram;Monitoring;Deep learning;Recurrent neural networks (RNNs);LSTM units;Audio processing;Environmental sound classification;Construction sites},\n  doi = {10.23919/Eusipco47968.2020.9287802},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000810.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a Deep Recurrent Neural Network (DRNN) approach based on Long-Short Term Memory (LSTM) units for the classification of audio signals recorded in construction sites. Five classes of multiple vehicles and tools, normally used in construction sites, have been considered. The input provided to the DRNN consists in the concatenation of several spectral features, like MFCCs, mel-scaled spectrogram, chroma and spectral contrast. The proposed architecture and the feature extraction have been described. Some experimental results, obtained by using real-world recordings, demonstrate the effectiveness of the proposed idea. The final overall accuracy on the test set is up to 97% and overcomes other state-of-the-art approaches.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Manifold Optimization Based Beamforming for DoA and DoD Estimation with a Single Multi-Mode Antenna.\n \n \n \n \n\n\n \n Pöhlmann, R.; Zhang, S.; Dammann, A.; and Hoeher, P. A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1841-1845, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ManifoldPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287803,\n  author = {R. Pöhlmann and S. Zhang and A. Dammann and P. A. Hoeher},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Manifold Optimization Based Beamforming for DoA and DoD Estimation with a Single Multi-Mode Antenna},\n  year = {2020},\n  pages = {1841-1845},\n  abstract = {Both direction-of-arrival (DoA) and direction-of-departure (DoD) of a radio signal contain valuable information for localization. Their estimation with antenna arrays is well known. More recently, multi-mode antennas (MMAs), building on the theory of characteristic modes, have been investigated for DoA estimation. This paper introduces joint DoA and DoD estimation with a single MMA on transmitter and receiver side. In general, the polarization of a signal transmitted by an MMA varies with the direction, which makes an appropriate signal model necessary. For best performance, optimized transmit beamforming should be performed. We derive the Cramér-Rao bound (CRB) for DoA and DoD estimation with MMAs, propose an optimized beamformer (OBF), which minimizes the CRB, and evaluate its performance.},\n  keywords = {Direction-of-arrival estimation;Array signal processing;Radio transmitters;Estimation;Receivers;US Department of Defense;Antenna arrays},\n  doi = {10.23919/Eusipco47968.2020.9287803},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001841.pdf},\n}\n\n
\n
\n\n\n
\n Both direction-of-arrival (DoA) and direction-of-departure (DoD) of a radio signal contain valuable information for localization. Their estimation with antenna arrays is well known. More recently, multi-mode antennas (MMAs), building on the theory of characteristic modes, have been investigated for DoA estimation. This paper introduces joint DoA and DoD estimation with a single MMA on transmitter and receiver side. In general, the polarization of a signal transmitted by an MMA varies with the direction, which makes an appropriate signal model necessary. For best performance, optimized transmit beamforming should be performed. We derive the Cramér-Rao bound (CRB) for DoA and DoD estimation with MMAs, propose an optimized beamformer (OBF), which minimizes the CRB, and evaluate its performance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Efficient Iteratively Reweighted LASSO Algorithm for Cross-Products Penalized Sparse Solutions.\n \n \n \n \n\n\n \n Luengo, D.; Vía, J.; and Trigano, T.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2045-2049, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"EfficientPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287804,\n  author = {D. Luengo and J. Vía and T. Trigano},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Efficient Iteratively Reweighted LASSO Algorithm for Cross-Products Penalized Sparse Solutions},\n  year = {2020},\n  pages = {2045-2049},\n  abstract = {In this paper, we describe an efficient iterative algorithm for finding sparse solutions to a linear system. Apart from the well-known L1 norm regularization, we introduce an additional cost term promoting solutions without too-close activations. This additional term, which is expressed as a sum of cross-products of absolute values, makes the problem non-convex and difficult to solve. However, the application of the successive convex approximations approach allows us to obtain an efficient algorithm consisting in the solution of a sequence of iteratively reweighted LASSO problems. Numerical simulations on randomly generated waveforms and ECG signals show the good performance of the proposed method.},\n  keywords = {approximation theory;convex programming;electrocardiography;iterative methods;medical signal processing;penalized sparse solutions;convex approximations;efficient iteratively reweighted LASSO algorithm;numerical simulations;ECG signals;Spectroscopy;Signal processing algorithms;Electrocardiography;Signal processing;Approximation algorithms;Numerical simulation;Iterative methods;sparsity-aware learning;LASSO;sparse coding;non-convex optimization},\n  doi = {10.23919/Eusipco47968.2020.9287804},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002045.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we describe an efficient iterative algorithm for finding sparse solutions to a linear system. Apart from the well-known L1 norm regularization, we introduce an additional cost term promoting solutions without too-close activations. This additional term, which is expressed as a sum of cross-products of absolute values, makes the problem non-convex and difficult to solve. However, the application of the successive convex approximations approach allows us to obtain an efficient algorithm consisting in the solution of a sequence of iteratively reweighted LASSO problems. Numerical simulations on randomly generated waveforms and ECG signals show the good performance of the proposed method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Target Tracking on Sensing Surface with Electrical Impedance Tomography.\n \n \n \n \n\n\n \n Huuhtanen, T.; Lankinen, A.; and Jung, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1817-1821, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"TargetPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287805,\n  author = {T. Huuhtanen and A. Lankinen and A. Jung},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Target Tracking on Sensing Surface with Electrical Impedance Tomography},\n  year = {2020},\n  pages = {1817-1821},\n  abstract = {An emerging class of applications uses sensing surfaces, where sensor data is collected from a 2-dimensional surface covering a large spatial area. Sensing surface applications range from observing human activity to detecting failures of construction materials. Electrical impedance tomography (EIT) is an imaging technology, which has been successfully applied to imaging in several important application domains such as medicine, geophysics, and process industry. EIT is a low-cost technology offering high temporal resolution, which makes it a potential technology sensing surfaces. In this paper, we evaluate the applicability of EIT algorithms for tracking a small moving object on a 2D sensing surface. We compare standard EIT algorithms for this purpose and develop a method which models the movement of a small target on a sensing surface using hidden Markov models (HMM). Existing EIT methods are geared towards high image quality instead of smooth target trajectories, which makes them suboptimal for target tracking. Numerical experiments indicate that our proposed method outperforms existing EIT methods in target tracking accuracy.},\n  keywords = {electric impedance imaging;hidden Markov models;image reconstruction;medical image processing;sensor fusion;target tracking;electrical impedance tomography;2-dimensional surface;surface applications;imaging technology;potential technology sensing surfaces;standard EIT algorithms;hidden Markov models;image quality;Surface impedance;Target tracking;Hidden Markov models;Signal processing algorithms;Kinematics;Sensors;Surface treatment;Electrical impedance tomography;hidden Markov models;sensing surface},\n  doi = {10.23919/Eusipco47968.2020.9287805},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001817.pdf},\n}\n\n
\n
\n\n\n
\n An emerging class of applications uses sensing surfaces, where sensor data is collected from a 2-dimensional surface covering a large spatial area. Sensing surface applications range from observing human activity to detecting failures of construction materials. Electrical impedance tomography (EIT) is an imaging technology, which has been successfully applied to imaging in several important application domains such as medicine, geophysics, and process industry. EIT is a low-cost technology offering high temporal resolution, which makes it a potential technology sensing surfaces. In this paper, we evaluate the applicability of EIT algorithms for tracking a small moving object on a 2D sensing surface. We compare standard EIT algorithms for this purpose and develop a method which models the movement of a small target on a sensing surface using hidden Markov models (HMM). Existing EIT methods are geared towards high image quality instead of smooth target trajectories, which makes them suboptimal for target tracking. Numerical experiments indicate that our proposed method outperforms existing EIT methods in target tracking accuracy.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Time Encoding Using the Hyperbolic Secant Kernel.\n \n \n \n \n\n\n \n Hilton, M.; Alexandru, R.; and Dragotti, P. L.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2304-2308, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"TimePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287806,\n  author = {M. Hilton and R. Alexandru and P. L. Dragotti},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Time Encoding Using the Hyperbolic Secant Kernel},\n  year = {2020},\n  pages = {2304-2308},\n  abstract = {We investigate the problem of reconstructing signals with finite rate of innovation from non-uniform samples obtained using an integrate-and-fire system. We assume that the signal is first filtered using the derivative of a hyperbolic secant as a sampling kernel. Timing information is then obtained using an integrator and a threshold detector. The reconstruction method we propose achieves perfect reconstruction of streams of K Diracs at arbitrary time locations, or equivalently piecewise constant signals with discontinuities at arbitrary time locations, using as few as 3K+1 non-uniform samples.},\n  keywords = {Technological innovation;Signal processing;Reconstruction algorithms;Information filters;Encoding;Timing;Kernel;Non-uniform sampling;time encoding;integrate-and-fire;finite rate of innovation;hyperbolic secant function},\n  doi = {10.23919/Eusipco47968.2020.9287806},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002304.pdf},\n}\n\n
\n
\n\n\n
\n We investigate the problem of reconstructing signals with finite rate of innovation from non-uniform samples obtained using an integrate-and-fire system. We assume that the signal is first filtered using the derivative of a hyperbolic secant as a sampling kernel. Timing information is then obtained using an integrator and a threshold detector. The reconstruction method we propose achieves perfect reconstruction of streams of K Diracs at arbitrary time locations, or equivalently piecewise constant signals with discontinuities at arbitrary time locations, using as few as 3K+1 non-uniform samples.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Node Varying Regularization for Graph Signals.\n \n \n \n \n\n\n \n Yang, M.; Coutino, M.; Isufi, E.; and Leus, G.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 845-849, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"NodePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287807,\n  author = {M. Yang and M. Coutino and E. Isufi and G. Leus},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Node Varying Regularization for Graph Signals},\n  year = {2020},\n  pages = {845-849},\n  abstract = {While regularization on graphs has been successful for signal reconstruction, strategies for controlling the bias-variance trade-off of such methods have not been completely explored. In this work, we put forth a node varying regularizer for graph signal reconstruction and develop a minmax approach to design the vector of regularization parameters. The proposed design only requires as prior information an upper bound on the underlying signal energy; a reasonable assumption in practice. With such formulation, an iterative method is introduced to obtain a solution meeting global equilibrium. The approach is numerically efficient and has convergence guarantees. Numerical simulations using real data support the proposed design scheme.},\n  keywords = {Upper bound;Signal processing algorithms;Numerical simulation;Nash equilibrium;Signal reconstruction;Convergence;Signal to noise ratio;graph signal processing;bias-variance trade-off;graph regularization;graph signal denoising;minmax problems},\n  doi = {10.23919/Eusipco47968.2020.9287807},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000845.pdf},\n}\n\n
\n
\n\n\n
\n While regularization on graphs has been successful for signal reconstruction, strategies for controlling the bias-variance trade-off of such methods have not been completely explored. In this work, we put forth a node varying regularizer for graph signal reconstruction and develop a minmax approach to design the vector of regularization parameters. The proposed design only requires as prior information an upper bound on the underlying signal energy; a reasonable assumption in practice. With such formulation, an iterative method is introduced to obtain a solution meeting global equilibrium. The approach is numerically efficient and has convergence guarantees. Numerical simulations using real data support the proposed design scheme.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Stable Ptychographic Phase Retrieval via Lost Subspace Completion.\n \n \n \n \n\n\n \n Melnyk, O.; Forstner, A.; Krahmer, F.; and Sissouno, N.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 975-979, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"StablePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287808,\n  author = {O. Melnyk and A. Forstner and F. Krahmer and N. Sissouno},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Stable Ptychographic Phase Retrieval via Lost Subspace Completion},\n  year = {2020},\n  pages = {975-979},\n  abstract = {In this paper, we consider a special case of the phase retrieval problem called ptychography. Its is a popular technique of imaging, based on local illuminations of a specimen and further reconstruction from the far field diffraction patterns. The stability and success of the recovery process is heavily based on the choice of the illumination function commonly called a window. It describes the distribution of the light along the measured region. While for some windows the conditioning can be controlled, many important classes of windows, such as Gaussian windows, were not covered. We present a subspace completion method, which allows for a well-conditioned reconstruction for a much wider choice of windows, including Gaussian windows.},\n  keywords = {image reconstruction;light diffraction;lost subspace completion;phase retrieval problem;ptychography;local illuminations;field diffraction patterns;recovery process;illumination function;measured region;Gaussian windows;subspace completion method;stable ptychographic phase retrieval;Diffraction;Lighting;Imaging;Europe;Signal processing;Image reconstruction;phase retrieval;ptychography;regularization},\n  doi = {10.23919/Eusipco47968.2020.9287808},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000975.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we consider a special case of the phase retrieval problem called ptychography. Its is a popular technique of imaging, based on local illuminations of a specimen and further reconstruction from the far field diffraction patterns. The stability and success of the recovery process is heavily based on the choice of the illumination function commonly called a window. It describes the distribution of the light along the measured region. While for some windows the conditioning can be controlled, many important classes of windows, such as Gaussian windows, were not covered. We present a subspace completion method, which allows for a well-conditioned reconstruction for a much wider choice of windows, including Gaussian windows.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Deep Multi-channel Speech Source Separation with Time-frequency Masking for Spatially Filtered Microphone Input Signal.\n \n \n \n \n\n\n \n Togami, M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 266-270, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DeepPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287810,\n  author = {M. Togami},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Deep Multi-channel Speech Source Separation with Time-frequency Masking for Spatially Filtered Microphone Input Signal},\n  year = {2020},\n  pages = {266-270},\n  abstract = {In this paper, we propose a multi-channel speech source separation technique which connects an unsupervised spatial filtering without a deep neural network (DNN) to a DNN-based speech source separation in a cascade manner. In the speech source separation technique, estimation of a covariance matrix is a highly important part. Recent studies showed that it is effective to estimate the covariance matrix by multiplying cross-correlation of microphone input signal with a time-frequency mask (TFM) inferred by the DNN. However, this assumption is not valid actually and overlapping of multiple speech sources lead to degradation of estimation accuracy of the multi-channel covariance matrix. Instead, we propose a multichannel covariance matrix estimation technique which estimates the covariance matrix by a TFM for the separated speech signal by the unsupervised spatial filtering. Pre-filtered signal can reduce overlapping of multiple speech sources and increase estimation accuracy of the covariance matrix. Experimental results show that the proposed estimation technique of the multichannel covariance matrix is effective.},\n  keywords = {Time-frequency analysis;Source separation;Filtering;Neural networks;Estimation;Covariance matrices;Microphones;Speech source separation;time-frequency masking;deep neural network;multi-channel covariance matrix estimation},\n  doi = {10.23919/Eusipco47968.2020.9287810},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000266.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a multi-channel speech source separation technique which connects an unsupervised spatial filtering without a deep neural network (DNN) to a DNN-based speech source separation in a cascade manner. In the speech source separation technique, estimation of a covariance matrix is a highly important part. Recent studies showed that it is effective to estimate the covariance matrix by multiplying cross-correlation of microphone input signal with a time-frequency mask (TFM) inferred by the DNN. However, this assumption is not valid actually and overlapping of multiple speech sources lead to degradation of estimation accuracy of the multi-channel covariance matrix. Instead, we propose a multichannel covariance matrix estimation technique which estimates the covariance matrix by a TFM for the separated speech signal by the unsupervised spatial filtering. Pre-filtered signal can reduce overlapping of multiple speech sources and increase estimation accuracy of the covariance matrix. Experimental results show that the proposed estimation technique of the multichannel covariance matrix is effective.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Robust LCMP Beamformer with Limited Snapshots.\n \n \n \n \n\n\n \n Mahadi, M.; Ballal, T.; Moinuddin, M.; Al-Naffouri, T. Y.; and Al-Saggaf, U.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1831-1835, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287811,\n  author = {M. Mahadi and T. Ballal and M. Moinuddin and T. Y. Al-Naffouri and U. Al-Saggaf},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A Robust LCMP Beamformer with Limited Snapshots},\n  year = {2020},\n  pages = {1831-1835},\n  abstract = {This paper deals with the problem of automatic diagonal loading for the linear constrained minimum power beamformer. To find the beamformer’s weights, the linear constrained minimum power problem is reformulated into its generalized sidelobe canceller implementation, which is an unconstrained least-squares problem. To solve this problem, we utilize a bounded perturbation regularization approach where a perturbation matrix with a bounded norm is added to the linear transformation matrix of the least-squares problem in order to enhance the singular-value structure of the matrix. Compared to different diagonal loading methods, the proposed method shows superiority in performance when the number of snapshots is limited.},\n  keywords = {Perturbation methods;Simulation;Loading;Europe;Signal processing;Periodic structures;Robust adaptive beamforming;LCMP;generalized sidelobe canceller;diagonal loading},\n  doi = {10.23919/Eusipco47968.2020.9287811},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001831.pdf},\n}\n\n
\n
\n\n\n
\n This paper deals with the problem of automatic diagonal loading for the linear constrained minimum power beamformer. To find the beamformer’s weights, the linear constrained minimum power problem is reformulated into its generalized sidelobe canceller implementation, which is an unconstrained least-squares problem. To solve this problem, we utilize a bounded perturbation regularization approach where a perturbation matrix with a bounded norm is added to the linear transformation matrix of the least-squares problem in order to enhance the singular-value structure of the matrix. Compared to different diagonal loading methods, the proposed method shows superiority in performance when the number of snapshots is limited.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robust 2d Indoor Positioning Algorithm in the Presence of Non-Line-of-Sight Signals.\n \n \n \n \n\n\n \n AlSharif, M. H.; Ahmed, M.; Felemban, A.; Zayat, A.; Muqaibel, A.; Masood, M.; and Al-Naffouri, T. Y.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1802-1806, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"RobustPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287812,\n  author = {M. H. AlSharif and M. Ahmed and A. Felemban and A. Zayat and A. Muqaibel and M. Masood and T. Y. Al-Naffouri},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Robust 2d Indoor Positioning Algorithm in the Presence of Non-Line-of-Sight Signals},\n  year = {2020},\n  pages = {1802-1806},\n  abstract = {The presence of non-line-of-sight (NLOS) signals in indoor positioning systems can severely degrade the positioning accuracy. This paper proposes a novel and computationally efficient algorithm to determine the line-of-sight (LOS) signals and the 2D position of a target in an indoor positioning system. The proposed algorithm was evaluated by simulating an indoor positioning system in 8 m × 8 m room under the presence of NLOS signals. When benchmarked with COFFEE and Triangle-Inequality methods, the proposed method shows significant improvement in computational time (151ms to 768ms) and marginal improvements over COFFEE in terms of F1-Score (at least 5% gain in F1-Score). The 2D position estimates are in less than 4.1 cm mean squared error. Moreover, the proposed algorithm was evaluated experimentally using a low-cost ultrasonic hardware.},\n  keywords = {Three-dimensional displays;Two dimensional displays;Signal processing algorithms;Benchmark testing;Signal processing;Hardware;Computational efficiency;positioning;localization;classification;clustering;line-of-sight;non-line-of-sight},\n  doi = {10.23919/Eusipco47968.2020.9287812},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001802.pdf},\n}\n\n
\n
\n\n\n
\n The presence of non-line-of-sight (NLOS) signals in indoor positioning systems can severely degrade the positioning accuracy. This paper proposes a novel and computationally efficient algorithm to determine the line-of-sight (LOS) signals and the 2D position of a target in an indoor positioning system. The proposed algorithm was evaluated by simulating an indoor positioning system in 8 m × 8 m room under the presence of NLOS signals. When benchmarked with COFFEE and Triangle-Inequality methods, the proposed method shows significant improvement in computational time (151ms to 768ms) and marginal improvements over COFFEE in terms of F1-Score (at least 5% gain in F1-Score). The 2D position estimates are in less than 4.1 cm mean squared error. Moreover, the proposed algorithm was evaluated experimentally using a low-cost ultrasonic hardware.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Maximum Likelihood Approach to Speed Estimation of Foreground Objects in Video Signals.\n \n \n \n \n\n\n \n Mattioli, V.; Alinovi, D.; and Raheli, R.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 715-719, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287813,\n  author = {V. Mattioli and D. Alinovi and R. Raheli},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A Maximum Likelihood Approach to Speed Estimation of Foreground Objects in Video Signals},\n  year = {2020},\n  pages = {715-719},\n  abstract = {Motion and speed estimation play a key role in computer vision and video processing for various application scenarios. Existing algorithms are mainly based on projected and apparent motion models and are currently used in many contexts, such as automotive security and driver assistance, industrial automation and inspection systems, video surveillance, human activity tracking and biomedical solutions, including monitoring of vital signs. In this paper, a general Maximum Likelihood (ML) approach to speed estimation of foreground objects in video streams is proposed. Application examples are presented and the performance of the proposed algorithms is discussed and compared with more conventional solutions.},\n  keywords = {Maximum likelihood estimation;Maximum likelihood detection;Tracking;Signal processing algorithms;Streaming media;Signal processing;Video surveillance;Maximum likelihood;foreground detection;speed estimation;video signals},\n  doi = {10.23919/Eusipco47968.2020.9287813},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000715.pdf},\n}\n\n
\n
\n\n\n
\n Motion and speed estimation play a key role in computer vision and video processing for various application scenarios. Existing algorithms are mainly based on projected and apparent motion models and are currently used in many contexts, such as automotive security and driver assistance, industrial automation and inspection systems, video surveillance, human activity tracking and biomedical solutions, including monitoring of vital signs. In this paper, a general Maximum Likelihood (ML) approach to speed estimation of foreground objects in video streams is proposed. Application examples are presented and the performance of the proposed algorithms is discussed and compared with more conventional solutions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Full-Duplex mmWave Communication with Hybrid Precoding and Combining.\n \n \n \n \n\n\n \n López-Valcarce, R.; and Martínez-Cotelo, M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1752-1756, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Full-DuplexPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287814,\n  author = {R. López-Valcarce and M. Martínez-Cotelo},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Full-Duplex mmWave Communication with Hybrid Precoding and Combining},\n  year = {2020},\n  pages = {1752-1756},\n  abstract = {We investigate the design of hybrid precoders and combiners for a millimeter wave (mmWave) point-to-point bidirectional link in which both nodes transmit and receive simultaneously and on the same carrier frequency. In such full-duplex configuration, mitigation of self-interference (SI) becomes critical. Large antenna arrays provide an opportunity for spatial SI suppression in mmWave. We assume a phase-shifter based, fully connected architecture for the analog part of the precoder and combiner. The proposed design, which aims at cancelling SI in the analog domain to avoid frontend saturation, significantly improves on the performance of previous approaches.},\n  keywords = {Interference cancellation;Precoding;Millimeter wave technology;Europe;Signal processing;Millimeter wave communication;Antenna arrays;Millimeter wave communication;full-duplex;hybrid precoding and combining},\n  doi = {10.23919/Eusipco47968.2020.9287814},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001752.pdf},\n}\n\n
\n
\n\n\n
\n We investigate the design of hybrid precoders and combiners for a millimeter wave (mmWave) point-to-point bidirectional link in which both nodes transmit and receive simultaneously and on the same carrier frequency. In such full-duplex configuration, mitigation of self-interference (SI) becomes critical. Large antenna arrays provide an opportunity for spatial SI suppression in mmWave. We assume a phase-shifter based, fully connected architecture for the analog part of the precoder and combiner. The proposed design, which aims at cancelling SI in the analog domain to avoid frontend saturation, significantly improves on the performance of previous approaches.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributed Sequential Joint Detection and Estimation for non-Gaussian Noise.\n \n \n \n \n\n\n \n Reinhard, D.; and Zoubir, A. M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2438-2442, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DistributedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287815,\n  author = {D. Reinhard and A. M. Zoubir},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Distributed Sequential Joint Detection and Estimation for non-Gaussian Noise},\n  year = {2020},\n  pages = {2438-2442},\n  abstract = {The problem of jointly testing a hypothesis and estimating a random parameter in non-Gaussian noise is investigated in a sequential and distributed setup. The non-Gaussian noise is modeled by a mixture of a completely known Gaussian distribution and an unknown contaminating distribution. Starting from the consensus+innovations approach, we present two robust communication schemes that are insensitive to the contaminating distribution. After deriving upper bounds for the variances of the estimators, a sequential scheme is designed at every sensor such that i) detection and estimation errors are limited for all possible contaminating distributions ii) the resulting scheme uses a minimum number of samples on average. A numerical example validates the proposed method.},\n  keywords = {Estimation error;Upper bound;Uncertainty;Gaussian distribution;Signal processing;Numerical models;Testing;joint detection and estimation;sequential analysis;distributed inference;statistical robustness},\n  doi = {10.23919/Eusipco47968.2020.9287815},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002438.pdf},\n}\n\n
\n
\n\n\n
\n The problem of jointly testing a hypothesis and estimating a random parameter in non-Gaussian noise is investigated in a sequential and distributed setup. The non-Gaussian noise is modeled by a mixture of a completely known Gaussian distribution and an unknown contaminating distribution. Starting from the consensus+innovations approach, we present two robust communication schemes that are insensitive to the contaminating distribution. After deriving upper bounds for the variances of the estimators, a sequential scheme is designed at every sensor such that i) detection and estimation errors are limited for all possible contaminating distributions ii) the resulting scheme uses a minimum number of samples on average. A numerical example validates the proposed method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n RNN With Stacked Architecture for sEMG based Sequence-to-Sequence Hand Gesture Recognition.\n \n \n \n \n\n\n \n Koch, P.; Dreier, M.; Maass, M.; Phan, H.; and Mertins, A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1600-1604, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"RNNPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287828,\n  author = {P. Koch and M. Dreier and M. Maass and H. Phan and A. Mertins},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {RNN With Stacked Architecture for sEMG based Sequence-to-Sequence Hand Gesture Recognition},\n  year = {2020},\n  pages = {1600-1604},\n  abstract = {Having proven their suitability for real world applications, surface electromyography signals are the means of choice for hand gesture recognition especially in medical applications like upper limb prosthesis. So far, mostly hand-crafted features combined with a standard classifier or neural networks are adopted for signal analysis. However, the performance of the standard approaches is insufficient and the networks are inappropriate for embedded applications due to their sheer size. To address these problems, a small recurrent neural network is proposed to fully utilize the sequential nature of the biosignals. Our network architecture features a special recurrent neural network cell for feature learning and extraction instead of convolutional layers and another type of cell for further processing. To evaluate the suitability of this inhomogenously stacked recurrent neural network, experiments on three different databases were conducted. The results reveal that this small network significantly outperforms state-of-the-art systems and sets new records. In addition, we demonstrate that it is possible to achieve relatively equal performance across all subjects.},\n  keywords = {Recurrent neural networks;Microprocessors;Computer architecture;Feature extraction;Nonhomogeneous media;Standards;Prosthetics;hand movement classification;surface electromyography;recurrent neural network;hand prosthesis},\n  doi = {10.23919/Eusipco47968.2020.9287828},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001600.pdf},\n}\n\n
\n
\n\n\n
\n Having proven their suitability for real world applications, surface electromyography signals are the means of choice for hand gesture recognition especially in medical applications like upper limb prosthesis. So far, mostly hand-crafted features combined with a standard classifier or neural networks are adopted for signal analysis. However, the performance of the standard approaches is insufficient and the networks are inappropriate for embedded applications due to their sheer size. To address these problems, a small recurrent neural network is proposed to fully utilize the sequential nature of the biosignals. Our network architecture features a special recurrent neural network cell for feature learning and extraction instead of convolutional layers and another type of cell for further processing. To evaluate the suitability of this inhomogenously stacked recurrent neural network, experiments on three different databases were conducted. The results reveal that this small network significantly outperforms state-of-the-art systems and sets new records. In addition, we demonstrate that it is possible to achieve relatively equal performance across all subjects.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Scale-free Texture Segmentation: Expert Feature-based versus Deep Learning strategies.\n \n \n \n \n\n\n \n Pascal, B.; Mauduit, V.; Pustelnik, N.; and Abry, P.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1367-1371, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Scale-freePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287829,\n  author = {B. Pascal and V. Mauduit and N. Pustelnik and P. Abry},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Scale-free Texture Segmentation: Expert Feature-based versus Deep Learning strategies},\n  year = {2020},\n  pages = {1367-1371},\n  abstract = {Texture segmentation constitutes a central task in image processing, classically based on two-step procedures consisting first in computing hand-crafted features devised from a priori expert knowledge and second in combining them into clustering algorithms. Deep learning approaches can be seen as merging these two steps into a single one with both discovering features and performing segmentation. Using fractal textures, often seen as relevant models in real-world applications, the present work compares a recently devised texture segmentation algorithm incorporating expert-driven scale-free features estimation into a Joint TV optimization framework against convolutional neural network architectures. From realistic synthetic textures, comparisons are drawn not only for segmentation performance, but also with respect to computational costs, architecture complexities and robustness against departures between training and testing datasets.},\n  keywords = {Deep learning;TV;Signal processing algorithms;Computer architecture;Robustness;Complexity theory;Optimization;Deep learning;CNN;Texture;Segmentation;Fractal;Total variation;Wavelets},\n  doi = {10.23919/Eusipco47968.2020.9287829},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001367.pdf},\n}\n\n
\n
\n\n\n
\n Texture segmentation constitutes a central task in image processing, classically based on two-step procedures consisting first in computing hand-crafted features devised from a priori expert knowledge and second in combining them into clustering algorithms. Deep learning approaches can be seen as merging these two steps into a single one with both discovering features and performing segmentation. Using fractal textures, often seen as relevant models in real-world applications, the present work compares a recently devised texture segmentation algorithm incorporating expert-driven scale-free features estimation into a Joint TV optimization framework against convolutional neural network architectures. From realistic synthetic textures, comparisons are drawn not only for segmentation performance, but also with respect to computational costs, architecture complexities and robustness against departures between training and testing datasets.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Dementia Classification using Acoustic Descriptors Derived from Subsampled Signals.\n \n \n \n \n\n\n \n Triapthi, A.; Chakraborty, R.; and Kopparapu, S. K.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 91-95, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DementiaPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287830,\n  author = {A. Triapthi and R. Chakraborty and S. K. Kopparapu},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Dementia Classification using Acoustic Descriptors Derived from Subsampled Signals},\n  year = {2020},\n  pages = {91-95},\n  abstract = {Dementia is a chronic syndrome characterized by deteriorating cognitive functions, thereby impacting the person's daily life. It is often confused with decline in normal behavior due to natural aging and hence is hard to diagnose. Although, prior research has shown that dementia affects the subject's speech, but it is not studied which frequency bands are being affected, and up to what extent, that in turn might influence identifying the different stages of dementia automatically. This work investigates the acoustic cues in different subsampled speech signals, to automatically differentiate Healthy Controls (HC) from stages of dementia such as Mild Cognitive Impairment (MCI) or Alzheimer's Disease (AD). We use the Pitt corpus of DementiaBank database, to identify a set of features best suited for distinguishing between HC, MCI and AD speech, and achieve an F-score of 0.857 which is an absolute improvement of 2.8% over the state of the art.},\n  keywords = {biomedical MRI;cognition;diseases;geriatrics;image classification;medical image processing;neurophysiology;natural aging;acoustic cues;subsampled speech signals;automatically differentiate healthy controls;mild cognitive impairment;dementia classification;acoustic descriptors;subsampled signals;chronic syndrome;cognitive functions;Alzheimer disease;Pitt corpus;DementiaBank database;Filtering;Databases;Europe;Aging;Signal processing;Acoustics;Dementia;Dementia;classification;feature reduction;Alzheimer’s disease;mild cognitive impairment},\n  doi = {10.23919/Eusipco47968.2020.9287830},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000091.pdf},\n}\n\n
\n
\n\n\n
\n Dementia is a chronic syndrome characterized by deteriorating cognitive functions, thereby impacting the person's daily life. It is often confused with decline in normal behavior due to natural aging and hence is hard to diagnose. Although, prior research has shown that dementia affects the subject's speech, but it is not studied which frequency bands are being affected, and up to what extent, that in turn might influence identifying the different stages of dementia automatically. This work investigates the acoustic cues in different subsampled speech signals, to automatically differentiate Healthy Controls (HC) from stages of dementia such as Mild Cognitive Impairment (MCI) or Alzheimer's Disease (AD). We use the Pitt corpus of DementiaBank database, to identify a set of features best suited for distinguishing between HC, MCI and AD speech, and achieve an F-score of 0.857 which is an absolute improvement of 2.8% over the state of the art.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On The Use of Discrete Cosine Transform Polarity Spectrum in Speech Enhancement.\n \n \n \n \n\n\n \n Shi, S.; Busch, A.; Paliwal, K.; and Fickenscher, T.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 421-425, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287832,\n  author = {S. Shi and A. Busch and K. Paliwal and T. Fickenscher},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {On The Use of Discrete Cosine Transform Polarity Spectrum in Speech Enhancement},\n  year = {2020},\n  pages = {421-425},\n  abstract = {This paper investigates the use of short-time Discrete Cosine Transform (DCT) for speech enhancement. We denote the absolute values and signs of the DCT spectral coefficients as the Absolute Spectrum (AS) and Polarity Spectrum (PoS), respectively. We theoretically show that the noisy PoS is the best estimate of the original, under the constrained MMSE criterion. To verify this experimentally, the effect of using the noisy PoS for signal resynthesis is analysed through objective and subjective measures. The results show that when the Instantaneous SNR (ISNR) is above 0 dB, deemed as perfect, recovery of the original speech signal can be obtained only by modifying the DCT absolute spectrum. However, an accurate DFT Phase Spectrum (PhS) estimation might be required to achieve the same improvement in perceived speech quality. When the perceived quality is measured against the Segmental SNR (SSNR), it shows the PoS is more capable to conserve the speech quality than the PhS for the same level of global distortion. The results show that the noisy PoS can be used as an estimate of the clean PoS without perceivable degradation in speech quality, only if the ISNR of the noisy speech signal is above 0 dB or the SSNR is above 10.5 dB.},\n  keywords = {Discrete Fourier transforms;Speech enhancement;Discrete cosine transforms;Noise measurement;Phase distortion;Testing;Signal to noise ratio;Speech enhancement;Discrete cosine transform (DCT);Just noticeable difference (JND)},\n  doi = {10.23919/Eusipco47968.2020.9287832},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000421.pdf},\n}\n\n
\n
\n\n\n
\n This paper investigates the use of short-time Discrete Cosine Transform (DCT) for speech enhancement. We denote the absolute values and signs of the DCT spectral coefficients as the Absolute Spectrum (AS) and Polarity Spectrum (PoS), respectively. We theoretically show that the noisy PoS is the best estimate of the original, under the constrained MMSE criterion. To verify this experimentally, the effect of using the noisy PoS for signal resynthesis is analysed through objective and subjective measures. The results show that when the Instantaneous SNR (ISNR) is above 0 dB, deemed as perfect, recovery of the original speech signal can be obtained only by modifying the DCT absolute spectrum. However, an accurate DFT Phase Spectrum (PhS) estimation might be required to achieve the same improvement in perceived speech quality. When the perceived quality is measured against the Segmental SNR (SSNR), it shows the PoS is more capable to conserve the speech quality than the PhS for the same level of global distortion. The results show that the noisy PoS can be used as an estimate of the clean PoS without perceivable degradation in speech quality, only if the ISNR of the noisy speech signal is above 0 dB or the SSNR is above 10.5 dB.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fast Sparse Coding Algorithms for Piece-wise Smooth Signals.\n \n \n \n \n\n\n \n Gkillas, A.; Ampeliotis, D.; and Berberidis, K.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2040-2044, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"FastPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287833,\n  author = {A. Gkillas and D. Ampeliotis and K. Berberidis},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Fast Sparse Coding Algorithms for Piece-wise Smooth Signals},\n  year = {2020},\n  pages = {2040-2044},\n  abstract = {The problem of computing a proper sparse representation matrix for a signal matrix that obeys some local smoothness property, given an over-complete dictionary, is considered. The focus is on piece-wise smooth signals, defined as signals that comprise a number of blocks that each fulfills the considered smoothness property. A computationally efficient sparse coding algorithm is derived by limiting the number of times that a new support set of dictionary atoms is computed, exploiting the smoothness of the signal. Furthermore, a new, total-variation regularized problem is proposed for computing the required sparse coding coefficients, exploiting further the smoothness priors of the signals. The considered problem is solved using the alternating direction method of multipliers. Finally, numerical results considering hyperspectral images are provided, that demonstrate the applicability and complexity -denoising performance benefits of the novel algorithms.},\n  keywords = {Dictionaries;Noise reduction;Signal processing algorithms;Cost function;Encoding;Computational efficiency;Sparse matrices;Sparse coding;piece-wise smooth signals;total variation;hyperspectral imaging;Alternating Direction Method of Multipliers},\n  doi = {10.23919/Eusipco47968.2020.9287833},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002040.pdf},\n}\n\n
\n
\n\n\n
\n The problem of computing a proper sparse representation matrix for a signal matrix that obeys some local smoothness property, given an over-complete dictionary, is considered. The focus is on piece-wise smooth signals, defined as signals that comprise a number of blocks that each fulfills the considered smoothness property. A computationally efficient sparse coding algorithm is derived by limiting the number of times that a new support set of dictionary atoms is computed, exploiting the smoothness of the signal. Furthermore, a new, total-variation regularized problem is proposed for computing the required sparse coding coefficients, exploiting further the smoothness priors of the signals. The considered problem is solved using the alternating direction method of multipliers. Finally, numerical results considering hyperspectral images are provided, that demonstrate the applicability and complexity -denoising performance benefits of the novel algorithms.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n MRI Vocal Tract Sagittal Slices Estimation During Speech Production of CV.\n \n \n \n \n\n\n \n Douros, I. K.; Kulkarni, A.; Xie, Y.; Dourou, C.; Felblinger, J.; Isaieva, K.; Vuissoz, P. -.; and Laprie, Y.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1115-1119, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"MRIPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287834,\n  author = {I. K. Douros and A. Kulkarni and Y. Xie and C. Dourou and J. Felblinger and K. Isaieva and P. -A. Vuissoz and Y. Laprie},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {MRI Vocal Tract Sagittal Slices Estimation During Speech Production of CV},\n  year = {2020},\n  pages = {1115-1119},\n  abstract = {In this paper we propose an algorithm for estimating vocal tract para sagittal slices in order to have a better overview of the behaviour of the articulators during speech production. The first step is to align the consonant-vowel (CV) data of the sagittal plains between them for the train speaker. Sets of transformations that connect the midsagittal frames with the neighbouring ones is acquired for the train speaker. Another set of transformations is calculated which transforms the midsagittal frames of the train speaker to the corresponding midsagittal frames of the test speaker and is used to adapt to the test speaker domain the previously computed sets of transformations. The newly adapted transformations are applied to the midsagittal frames of the test speaker in order to estimate the neighbouring sagittal frames. Several mono speaker models are combined to produce the final frame estimation. To evaluate the results, image cross-correlation between the original and the estimated frames was used. Results show good agreement between the original and the estimated frames.},\n  keywords = {biomedical MRI;medical image processing;speaker recognition;speech;speech processing;speech recognition;MRI vocal tract sagittal slices estimation;speech production;vocal tract para sagittal slices;consonant-vowel;sagittal plains;train speaker;neighbouring ones;corresponding midsagittal frames;test speaker domain;newly adapted transformations;neighbouring sagittal frames;mono speaker models;final frame estimation;estimated frames;Signal processing algorithms;Estimation;Production;Transforms;Signal processing;Numerical models;Standards;image transformation;rtMRI data;speech resources enrichment;vocal tract},\n  doi = {10.23919/Eusipco47968.2020.9287834},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001115.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we propose an algorithm for estimating vocal tract para sagittal slices in order to have a better overview of the behaviour of the articulators during speech production. The first step is to align the consonant-vowel (CV) data of the sagittal plains between them for the train speaker. Sets of transformations that connect the midsagittal frames with the neighbouring ones is acquired for the train speaker. Another set of transformations is calculated which transforms the midsagittal frames of the train speaker to the corresponding midsagittal frames of the test speaker and is used to adapt to the test speaker domain the previously computed sets of transformations. The newly adapted transformations are applied to the midsagittal frames of the test speaker in order to estimate the neighbouring sagittal frames. Several mono speaker models are combined to produce the final frame estimation. To evaluate the results, image cross-correlation between the original and the estimated frames was used. Results show good agreement between the original and the estimated frames.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Short-Term Prediction of the Attenuation in a Commercial Microwave Link Using LSTM-based RNN.\n \n \n \n \n\n\n \n Jacoby, D.; Ostrometzky, J.; and Messer, H.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1628-1632, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Short-TermPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287835,\n  author = {D. Jacoby and J. Ostrometzky and H. Messer},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Short-Term Prediction of the Attenuation in a Commercial Microwave Link Using LSTM-based RNN},\n  year = {2020},\n  pages = {1628-1632},\n  abstract = {The signals of microwave links used for wireless communications are prone to attenuation that can be significant due to rain. This attenuation may limit the capacity of the communication channel and cause irreversible damage. Accurate prediction of the attenuation opens the possibility to take appropriate actions to minimize such damage. In this paper, we present the use of the Long Short Time Memory (LSTM) machine learning method for short term prediction of the attenuation in commercial microwave links (CMLs), where only past measurements of the attenuation in a given link are used to predict future attenuation, with no side information. We demonstrate the operation of the proposed method on real-data signal level measurements of CMLs during rain events in Sweden. Moreover, this method is compared to a widely used statistical method for time series forecasting, the Auto-Regression Moving Average (ARIMA). The results show that learning patterns from previous attenuation values during rain events in a given CML are sufficient for generating accurate attenuation predictions.},\n  keywords = {Microwave measurement;Rain;Microwave communication;Attenuation measurement;Attenuation;Microwave theory and techniques;Forecasting;RNN;Machine Learning Applications;Rain Attenuation Prediction;Time Series;ARIMA},\n  doi = {10.23919/Eusipco47968.2020.9287835},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001628.pdf},\n}\n\n
\n
\n\n\n
\n The signals of microwave links used for wireless communications are prone to attenuation that can be significant due to rain. This attenuation may limit the capacity of the communication channel and cause irreversible damage. Accurate prediction of the attenuation opens the possibility to take appropriate actions to minimize such damage. In this paper, we present the use of the Long Short Time Memory (LSTM) machine learning method for short term prediction of the attenuation in commercial microwave links (CMLs), where only past measurements of the attenuation in a given link are used to predict future attenuation, with no side information. We demonstrate the operation of the proposed method on real-data signal level measurements of CMLs during rain events in Sweden. Moreover, this method is compared to a widely used statistical method for time series forecasting, the Auto-Regression Moving Average (ARIMA). The results show that learning patterns from previous attenuation values during rain events in a given CML are sufficient for generating accurate attenuation predictions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Selective Adaptation of End-to-End Speech Recognition using Hybrid CTC/Attention Architecture for Noise Robustness.\n \n \n \n \n\n\n \n Do, C. -.; Zhang, S.; and Hain, T.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 321-325, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"SelectivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287836,\n  author = {C. -T. Do and S. Zhang and T. Hain},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Selective Adaptation of End-to-End Speech Recognition using Hybrid CTC/Attention Architecture for Noise Robustness},\n  year = {2020},\n  pages = {321-325},\n  abstract = {This paper investigates supervised adaptation of end-to-end speech recognition, which uses hybrid connectionist temporal classification (CTC)/Attention architecture, for noise robustness. The components of the architecture, namely the shared encoder, the attention decoder’s long short-term memory (LSTM) layers, and the soft-max layers of the CTC part and attention part, are adapted separately or together using limited amount of adaptation data. When adapting the shared encoder, we propose to adapt only the connections of the memory cells in the memory blocks of bidirectional LSTM (BLSTM) layers to improve performance and reduce the time for adapting the models. In within-domain and cross-domain adaptation scenarios, experimental results show that adaptation of end-to-end speech recognition using the hybrid CTC/Attention architecture is effective even when the amount of adaptation data is limited. In cross-domain adaptation, substantial performance improvement can be achieved with only 2.4 minutes of adaptation data. In both adaptation scenarios, adapting only the memory cells of the BLSTM layers in the shared encoder yields comparable or slightly better performance while yielding smaller adaptation time than the adaptation of other components or the whole architecture, especially when the amount of adaptation data is less than or equal to 10 minutes.},\n  keywords = {Adaptation models;Memory management;Europe;Speech recognition;Signal processing;Noise robustness;Speech processing;End-to-end speech recognition;noise robustness;adaptation;connectionist temporal classification;attention},\n  doi = {10.23919/Eusipco47968.2020.9287836},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000321.pdf},\n}\n\n
\n
\n\n\n
\n This paper investigates supervised adaptation of end-to-end speech recognition, which uses hybrid connectionist temporal classification (CTC)/Attention architecture, for noise robustness. The components of the architecture, namely the shared encoder, the attention decoder’s long short-term memory (LSTM) layers, and the soft-max layers of the CTC part and attention part, are adapted separately or together using limited amount of adaptation data. When adapting the shared encoder, we propose to adapt only the connections of the memory cells in the memory blocks of bidirectional LSTM (BLSTM) layers to improve performance and reduce the time for adapting the models. In within-domain and cross-domain adaptation scenarios, experimental results show that adaptation of end-to-end speech recognition using the hybrid CTC/Attention architecture is effective even when the amount of adaptation data is limited. In cross-domain adaptation, substantial performance improvement can be achieved with only 2.4 minutes of adaptation data. In both adaptation scenarios, adapting only the memory cells of the BLSTM layers in the shared encoder yields comparable or slightly better performance while yielding smaller adaptation time than the adaptation of other components or the whole architecture, especially when the amount of adaptation data is less than or equal to 10 minutes.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Image Brightness Quantification for HDR.\n \n \n \n \n\n\n \n Ploumis, S.; Boitard, R.; and Nasiopoulos, P.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 640-644, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ImagePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287838,\n  author = {S. Ploumis and R. Boitard and P. Nasiopoulos},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Image Brightness Quantification for HDR},\n  year = {2020},\n  pages = {640-644},\n  abstract = {Images’ brightness quantification is challenging due to the perceptual nature of brightness. In Standard Dynamic Range (SDR), the most common brightness quantification metrics are the Average Picture Level (APL) and the Frame Average Luminance Level (FALL). These metrics rely on simple image statistics and they adequately quantify SDR images’ brightness. However, they fail to sufficiently characterize the High Dynamic Range (HDR) broader luminance range and larger color volume. In this work, we propose a novel HDR image brightness quantification metric that weights each pixel contribution to overall brightness based on their color intensity and location. The proposed method is computationally inexpensive, thus suitable for real time applications. Results show that the proposed method outperforms the state-of-the-art HDR brightness quantification metrics.},\n  keywords = {Image color analysis;Brightness;Europe;Dynamic range;Signal processing;Real-time systems;Standards;brightness quantification;brightness perception;high dynamic range},\n  doi = {10.23919/Eusipco47968.2020.9287838},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000640.pdf},\n}\n\n
\n
\n\n\n
\n Images’ brightness quantification is challenging due to the perceptual nature of brightness. In Standard Dynamic Range (SDR), the most common brightness quantification metrics are the Average Picture Level (APL) and the Frame Average Luminance Level (FALL). These metrics rely on simple image statistics and they adequately quantify SDR images’ brightness. However, they fail to sufficiently characterize the High Dynamic Range (HDR) broader luminance range and larger color volume. In this work, we propose a novel HDR image brightness quantification metric that weights each pixel contribution to overall brightness based on their color intensity and location. The proposed method is computationally inexpensive, thus suitable for real time applications. Results show that the proposed method outperforms the state-of-the-art HDR brightness quantification metrics.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Instantaneous PSD Estimation for Speech Enhancement based on Generalized Principal Components.\n \n \n \n \n\n\n \n Dietzen, T.; Moonen, M.; and v. Waterschoot, T.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 191-195, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"InstantaneousPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287839,\n  author = {T. Dietzen and M. Moonen and T. v. Waterschoot},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Instantaneous PSD Estimation for Speech Enhancement based on Generalized Principal Components},\n  year = {2020},\n  pages = {191-195},\n  abstract = {Power spectral density (PSD) estimates of various microphone signal components are essential to many speech enhancement procedures. As speech is highly non-nonstationary, performance improvements may be gained by maintaining time-variations in PSD estimates. In this paper, we propose an instantaneous PSD estimation approach based on generalized principal components. Similarly to other eigenspace-based PSD estimation approaches, we rely on recursive averaging in order to obtain a microphone signal correlation matrix estimate to be decomposed. However, instead of estimating the PSDs directly from the temporally smooth generalized eigenvalues of this matrix, yielding temporally smooth PSD estimates, we propose to estimate the PSDs from newly defined instantaneous generalized eigenvalues, yielding instantaneous PSD estimates. The instantaneous generalized eigenvalues are defined from the generalized principal components, i.e. a generalized eigenvector-based transform of the microphone signals. We further show that the smooth generalized eigenvalues can be understood as a recursive average of the instantaneous generalized eigenvalues. Simulation results comparing the multi-channel Wiener filter (MWF) with smooth and instantaneous PSD estimates indicate better speech enhancement performance for the latter. A MATLAB implementation is available online.},\n  keywords = {Wiener filters;Estimation;Transforms;Speech enhancement;Eigenvalues and eigenfunctions;Matrix decomposition;Microphones;speech enhancement;instantaneous PSD estimation;generalized eigenvalue decomposition;generalized principal components},\n  doi = {10.23919/Eusipco47968.2020.9287839},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000191.pdf},\n}\n\n
\n
\n\n\n
\n Power spectral density (PSD) estimates of various microphone signal components are essential to many speech enhancement procedures. As speech is highly non-nonstationary, performance improvements may be gained by maintaining time-variations in PSD estimates. In this paper, we propose an instantaneous PSD estimation approach based on generalized principal components. Similarly to other eigenspace-based PSD estimation approaches, we rely on recursive averaging in order to obtain a microphone signal correlation matrix estimate to be decomposed. However, instead of estimating the PSDs directly from the temporally smooth generalized eigenvalues of this matrix, yielding temporally smooth PSD estimates, we propose to estimate the PSDs from newly defined instantaneous generalized eigenvalues, yielding instantaneous PSD estimates. The instantaneous generalized eigenvalues are defined from the generalized principal components, i.e. a generalized eigenvector-based transform of the microphone signals. We further show that the smooth generalized eigenvalues can be understood as a recursive average of the instantaneous generalized eigenvalues. Simulation results comparing the multi-channel Wiener filter (MWF) with smooth and instantaneous PSD estimates indicate better speech enhancement performance for the latter. A MATLAB implementation is available online.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Transmit Beampattern Synthesis for MIMO Radar with One-Bit DACs.\n \n \n \n \n\n\n \n Wei, T.; Liao, B.; Xiao, P.; and Cheng, Z.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1827-1830, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"TransmitPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287840,\n  author = {T. Wei and B. Liao and P. Xiao and Z. Cheng},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Transmit Beampattern Synthesis for MIMO Radar with One-Bit DACs},\n  year = {2020},\n  pages = {1827-1830},\n  abstract = {In this paper, the problem of transmit beampattern synthesis (i.e., transmit beamforming) in multiple input multiple output (MIMO) radar which deploys one-bit digital-to-analog converts (DACs) is investigated. We aim to design appropriate transmit signal sequences, which are quantized by one-bit DACs, such that the amount of transmit energy can be focused into mainlobe region as much as possible, meanwhile, the leakage power of sidelobe region is minimized. It is shown that these requirements can be simultaneously fulfilled by minimizing the integrated sidelobe to mainlobe ratio (ISMR) of transmit beampattern with discrete binary constraints. According to this concept, we utilize the alternating direction multiplier method (ADMM) framework to solve the resulting nonconvex problem. Simulation results will demonstrate the effectiveness and improved performance of the proposed method.},\n  keywords = {MIMO radar;Focusing;MIMO communication;Linear programming;Two dimensional displays;Simulation;Radio frequency;Multiple-input multiple-output (MIMO) radar;transmit beampattern synthesis;one-bit DAC},\n  doi = {10.23919/Eusipco47968.2020.9287840},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001827.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, the problem of transmit beampattern synthesis (i.e., transmit beamforming) in multiple input multiple output (MIMO) radar which deploys one-bit digital-to-analog converts (DACs) is investigated. We aim to design appropriate transmit signal sequences, which are quantized by one-bit DACs, such that the amount of transmit energy can be focused into mainlobe region as much as possible, meanwhile, the leakage power of sidelobe region is minimized. It is shown that these requirements can be simultaneously fulfilled by minimizing the integrated sidelobe to mainlobe ratio (ISMR) of transmit beampattern with discrete binary constraints. According to this concept, we utilize the alternating direction multiplier method (ADMM) framework to solve the resulting nonconvex problem. Simulation results will demonstrate the effectiveness and improved performance of the proposed method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multimodal Integration for Large-Vocabulary Audio-Visual Speech Recognition.\n \n \n \n \n\n\n \n Yu, W.; Zeiler, S.; and Kolossa, D.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 341-345, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"MultimodalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287841,\n  author = {W. Yu and S. Zeiler and D. Kolossa},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Multimodal Integration for Large-Vocabulary Audio-Visual Speech Recognition},\n  year = {2020},\n  pages = {341-345},\n  abstract = {For many small- and medium-vocabulary tasks, audio-visual speech recognition can significantly improve the recognition rates compared to audio-only systems. However, there is still an ongoing debate regarding the best combination strategy for multi-modal information, which should allow for the translation of these gains to large-vocabulary recognition. While an integration at the level of state-posterior probabilities, using dynamic stream weighting, is almost universally helpful for small-vocabulary systems, in large-vocabulary speech recognition, the recognition accuracy remains difficult to improve. In the following, we specifically consider the large-vocabulary task of the LRS2 database, and we investigate a broad range of integration strategies, comparing early integration and end-to-end learning with many versions of hybrid recognition and dynamic stream weighting. One aspect, which is shown to provide much benefit here, is the use of dynamic stream reliability indicators, which allow for hybrid architectures to strongly profit from the inclusion of visual information whenever the audio channel is distorted even slightly.},\n  keywords = {Visualization;Image recognition;Speech recognition;Streaming media;Topology;Reliability;Task analysis;Audiovisual Speech Recognition;Multi-modal Integration;Dynamic Stream Weighting},\n  doi = {10.23919/Eusipco47968.2020.9287841},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000341.pdf},\n}\n\n
\n
\n\n\n
\n For many small- and medium-vocabulary tasks, audio-visual speech recognition can significantly improve the recognition rates compared to audio-only systems. However, there is still an ongoing debate regarding the best combination strategy for multi-modal information, which should allow for the translation of these gains to large-vocabulary recognition. While an integration at the level of state-posterior probabilities, using dynamic stream weighting, is almost universally helpful for small-vocabulary systems, in large-vocabulary speech recognition, the recognition accuracy remains difficult to improve. In the following, we specifically consider the large-vocabulary task of the LRS2 database, and we investigate a broad range of integration strategies, comparing early integration and end-to-end learning with many versions of hybrid recognition and dynamic stream weighting. One aspect, which is shown to provide much benefit here, is the use of dynamic stream reliability indicators, which allow for hybrid architectures to strongly profit from the inclusion of visual information whenever the audio channel is distorted even slightly.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Migrating Monarch Butterfly Localization Using Multi-Modal Sensor Fusion Neural Networks.\n \n \n \n \n\n\n \n Yang, M.; Hsiao, R.; Carichner, G.; Ernst, K.; Lim, J.; Green, D. A.; Lee, I.; Blaauw, D.; and Kim, H. -.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1792-1796, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"MigratingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287842,\n  author = {M. Yang and R. Hsiao and G. Carichner and K. Ernst and J. Lim and D. A. Green and I. Lee and D. Blaauw and H. -S. Kim},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Migrating Monarch Butterfly Localization Using Multi-Modal Sensor Fusion Neural Networks},\n  year = {2020},\n  pages = {1792-1796},\n  abstract = {Details of Monarch butterfly migration from the U.S. to Mexico remain a mystery due to lack of a proper localization technology to accurately localize and track butterfly migration. In this paper, we propose a deep learning based butterfly localization algorithm that can estimate a butterfly’s daily location by analyzing a light and temperature sensor data log continuously obtained from an ultra-low power, millimeter (mm)-scale sensor attached to the butterfly. To train and test the proposed neural network based multi-modal sensor fusion localization algorithm, we collected over 1500 days of real world sensor measurement data by 82 volunteers all over the U.S. The proposed algorithm exhibits a mean absolute error of < 1.7° in latitude and < 0.6° in longitude Earth coordinate, satisfying our target goal for the Monarch butterfly migration study.},\n  keywords = {Temperature sensors;Temperature measurement;Multimodal sensors;Neural networks;Signal processing algorithms;Low-power electronics;Testing;light-level geolocation;Monarch migration;neu-ral networks;maximum likelihood estimation},\n  doi = {10.23919/Eusipco47968.2020.9287842},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001792.pdf},\n}\n\n
\n
\n\n\n
\n Details of Monarch butterfly migration from the U.S. to Mexico remain a mystery due to lack of a proper localization technology to accurately localize and track butterfly migration. In this paper, we propose a deep learning based butterfly localization algorithm that can estimate a butterfly’s daily location by analyzing a light and temperature sensor data log continuously obtained from an ultra-low power, millimeter (mm)-scale sensor attached to the butterfly. To train and test the proposed neural network based multi-modal sensor fusion localization algorithm, we collected over 1500 days of real world sensor measurement data by 82 volunteers all over the U.S. The proposed algorithm exhibits a mean absolute error of < 1.7° in latitude and < 0.6° in longitude Earth coordinate, satisfying our target goal for the Monarch butterfly migration study.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Piecewise Linear Regression under Noise Level Variation via Convex Optimization.\n \n \n \n \n\n\n \n Kuroda, H.; and Ogata, J.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2259-2263, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"PiecewisePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287844,\n  author = {H. Kuroda and J. Ogata},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Piecewise Linear Regression under Noise Level Variation via Convex Optimization},\n  year = {2020},\n  pages = {2259-2263},\n  abstract = {Piecewise linear regression is a fundamental challenge in science and engineering. For typical applications where noise level varies in observations, the problem becomes much more challenging. In this paper, we propose a convex optimization based piecewise linear regression method which incorporates variation of the noise level. More precisely, we newly design a convex data-fidelity function as a weighted sum of approximation errors to mitigate effect of the noise level variation. The weights are automatically adjusted to the varying noise level within the framework of convex optimization. Numerical examples show performance improvements by the proposed method.},\n  keywords = {Linear regression;Europe;Signal processing;Approximation error;Convex functions;Noise level;Piecewise linear regression;noise level variation;convex optimization;change detection},\n  doi = {10.23919/Eusipco47968.2020.9287844},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002259.pdf},\n}\n\n
\n
\n\n\n
\n Piecewise linear regression is a fundamental challenge in science and engineering. For typical applications where noise level varies in observations, the problem becomes much more challenging. In this paper, we propose a convex optimization based piecewise linear regression method which incorporates variation of the noise level. More precisely, we newly design a convex data-fidelity function as a weighted sum of approximation errors to mitigate effect of the noise level variation. The weights are automatically adjusted to the varying noise level within the framework of convex optimization. Numerical examples show performance improvements by the proposed method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Explicit quaternion krawtchouk moment invariants for finger-spelling sign language recognition.\n \n \n \n \n\n\n \n Elouariachi, I.; Benouini, R.; Zenkouar, K.; Zarghili, A.; and El Fadili, H.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 620-624, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ExplicitPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287845,\n  author = {I. Elouariachi and R. Benouini and K. Zenkouar and A. Zarghili and H. {El Fadili}},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Explicit quaternion krawtchouk moment invariants for finger-spelling sign language recognition},\n  year = {2020},\n  pages = {620-624},\n  abstract = {Sign recognition is a difficult task due to the complexity of its composition which uses signs of different levels, words, facial expression, body posture and finger-spelling to convey meaning. With the development of recent technologies, such as Kinect sensor, new opportunities have emerged in the field of human computer interaction and sign language, allowing to capture both RGB and Depth (RGB-D) information. In the regard to feature extraction, the traditional methods process the RGB and Depth images independently. In this paper, we propose a robust static finger-spelling sign language recognition system adopting the Quaternion algebra that provide a more robust and holistical representation, based on fusing RGB images and Depth information simultaneously. Indeed, we propose, for the first time, a new sets of Quaternion Krawtchouk moments(QKMs) and Explicit Quaternion Krawtchouk Moment Invariants (EQKMIs). The proposed system is evaluated on three well-known finger-spelling datasets, demonstrate the performance of the novel method compared to other methods used in the literature, against geometrical distortion, noisy conditions and complex background, indicating that it could be highly effective for many other computer vision applications.},\n  keywords = {Quaternions;Assistive technology;Signal processing algorithms;Gesture recognition;Robustness;Noise measurement;Task analysis;Finger-spelling Recognition;Moment Invariants;Krawtchouk;Quaternion Algebra;RST Invariants},\n  doi = {10.23919/Eusipco47968.2020.9287845},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000620.pdf},\n}\n\n
\n
\n\n\n
\n Sign recognition is a difficult task due to the complexity of its composition which uses signs of different levels, words, facial expression, body posture and finger-spelling to convey meaning. With the development of recent technologies, such as Kinect sensor, new opportunities have emerged in the field of human computer interaction and sign language, allowing to capture both RGB and Depth (RGB-D) information. In the regard to feature extraction, the traditional methods process the RGB and Depth images independently. In this paper, we propose a robust static finger-spelling sign language recognition system adopting the Quaternion algebra that provide a more robust and holistical representation, based on fusing RGB images and Depth information simultaneously. Indeed, we propose, for the first time, a new sets of Quaternion Krawtchouk moments(QKMs) and Explicit Quaternion Krawtchouk Moment Invariants (EQKMIs). The proposed system is evaluated on three well-known finger-spelling datasets, demonstrate the performance of the novel method compared to other methods used in the literature, against geometrical distortion, noisy conditions and complex background, indicating that it could be highly effective for many other computer vision applications.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Entropy-based Sample Selection for Online Continual Learning.\n \n \n \n \n\n\n \n Wiewel, F.; and Yang, B.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1477-1481, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Entropy-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287846,\n  author = {F. Wiewel and B. Yang},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Entropy-based Sample Selection for Online Continual Learning},\n  year = {2020},\n  pages = {1477-1481},\n  abstract = {Deep neural networks (DNNs) suffer from catastrophic forgetting, a rapid decrease in performance when trained on a sequence of tasks where only data of the most recent task is available. Most previous research has focused on the case where all data of a task is available simultaneously and boundaries between tasks are known. In this paper, we focus on the online setting where data arrives one-by-one or in small batches ordered by tasks and task boundaries are unknown. Avoiding catastrophic forgetting in such a setting is of great interest since it would allow DNNs to accumulate knowledge without the need to store all previously seen data even if task boundaries are unknown. For this, we propose a novel rehearsal algorithm for online continual learning that is derived from basic concepts of information theory. We demonstrate on commonly used data sets that our method can avoid catastrophic forgetting, achieve competitive results when compared with the current state-of-the-art and even outperform it in most cases.},\n  keywords = {Neural networks;Signal processing algorithms;Europe;Signal processing;Entropy;Task analysis;Information theory;Online Continual Learning;Entropy;Rehearsal},\n  doi = {10.23919/Eusipco47968.2020.9287846},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001477.pdf},\n}\n\n
\n
\n\n\n
\n Deep neural networks (DNNs) suffer from catastrophic forgetting, a rapid decrease in performance when trained on a sequence of tasks where only data of the most recent task is available. Most previous research has focused on the case where all data of a task is available simultaneously and boundaries between tasks are known. In this paper, we focus on the online setting where data arrives one-by-one or in small batches ordered by tasks and task boundaries are unknown. Avoiding catastrophic forgetting in such a setting is of great interest since it would allow DNNs to accumulate knowledge without the need to store all previously seen data even if task boundaries are unknown. For this, we propose a novel rehearsal algorithm for online continual learning that is derived from basic concepts of information theory. We demonstrate on commonly used data sets that our method can avoid catastrophic forgetting, achieve competitive results when compared with the current state-of-the-art and even outperform it in most cases.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Finding Meaningful Detections: False Discovery Rate Control in Correlated Detection Maps.\n \n \n \n \n\n\n \n Flasseur, O.; Denis, L.; Thiébaut, É.; and Langlois, M.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1896-1900, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"FindingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287847,\n  author = {O. Flasseur and L. Denis and É. Thiébaut and M. Langlois},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Finding Meaningful Detections: False Discovery Rate Control in Correlated Detection Maps},\n  year = {2020},\n  pages = {1896-1900},\n  abstract = {The detection of faint sources is a key step in several areas of signal and image processing. The reliability of the detection depends on two key components: (i) the detection criterion used to derive detection maps in which the signature of a source takes the form of a detection peak, and (ii) the extraction procedure identifying the meaningful detections.In this work, the expected false discovery rate guides the selection of meaningful detections. A procedure is designed to account for correlations in the detection maps. This prevents the issue of the multiple detections of a single source and corrects the number of effective independent tests performed. The proposed approach is evaluated on an astrophysical application: the detection of exoplanets by high-contrast imaging.},\n  keywords = {biomedical MRI;maximum likelihood estimation;medical image processing;sensitivity analysis;statistical analysis;meaningful detections;detection maps;multiple detections;false discovery rate control;correlated detection;detection criterion;detection peak;expected false discovery rate guides;Correlation;Image processing;Extrasolar planets;Imaging;Europe;Signal processing;Reliability;detection;FDR;correlated data;matched filter},\n  doi = {10.23919/Eusipco47968.2020.9287847},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001896.pdf},\n}\n\n
\n
\n\n\n
\n The detection of faint sources is a key step in several areas of signal and image processing. The reliability of the detection depends on two key components: (i) the detection criterion used to derive detection maps in which the signature of a source takes the form of a detection peak, and (ii) the extraction procedure identifying the meaningful detections.In this work, the expected false discovery rate guides the selection of meaningful detections. A procedure is designed to account for correlations in the detection maps. This prevents the issue of the multiple detections of a single source and corrects the number of effective independent tests performed. The proposed approach is evaluated on an astrophysical application: the detection of exoplanets by high-contrast imaging.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Kernel Bi-Linear Modeling for Reconstructing Data on Manifolds: The Dynamic-MRI Case.\n \n \n \n \n\n\n \n Shetty, G. N.; Slavakis, K.; Nakarmi, U.; Scutari, G.; and Ying, L.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1482-1486, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"KernelPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287848,\n  author = {G. N. Shetty and K. Slavakis and U. Nakarmi and G. Scutari and L. Ying},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Kernel Bi-Linear Modeling for Reconstructing Data on Manifolds: The Dynamic-MRI Case},\n  year = {2020},\n  pages = {1482-1486},\n  abstract = {This paper establishes a kernel-based framework for reconstructing data on manifolds, tailored to fit the dynamic-(d)MRI-data recovery problem. The proposed methodology exploits simple tangent-space geometries of manifolds in reproducing kernel Hilbert spaces, and follows classical kernel-approximation arguments to form the data-recovery task as a bilinear inverse problem. Departing from mainstream approaches, the proposed methodology uses no training data, employs no graph Laplacian matrix to penalize the optimization task, uses no costly (kernel) preimaging step to map feature points back to the input space, and utilizes complex-valued kernel functions to account for k-space data. The framework is validated on synthetically generated dMRI data, where comparisons against state-of-the-art schemes highlight the rich potential of the proposed approach in data-recovery problems.},\n  keywords = {biomedical MRI;geometry;graph theory;Hilbert spaces;inverse problems;matrix algebra;medical image processing;dynamic-MRI case;kernel-based framework;simple tangent-space geometries;kernel Hilbert spaces;classical kernel-approximation arguments;data-recovery task;bilinear inverse problem;graph Laplacian matrix;optimization task;k-space data;dMRI data;data-recovery problems;Kernel Bi-linear modeling;Manifolds;Laplace equations;Training data;Signal processing;Kernel;Task analysis;Optimization;Manifold;kernel;signal recovery;dynamic MRI;low rank;sparsity;dimensionality reduction},\n  doi = {10.23919/Eusipco47968.2020.9287848},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001482.pdf},\n}\n\n
\n
\n\n\n
\n This paper establishes a kernel-based framework for reconstructing data on manifolds, tailored to fit the dynamic-(d)MRI-data recovery problem. The proposed methodology exploits simple tangent-space geometries of manifolds in reproducing kernel Hilbert spaces, and follows classical kernel-approximation arguments to form the data-recovery task as a bilinear inverse problem. Departing from mainstream approaches, the proposed methodology uses no training data, employs no graph Laplacian matrix to penalize the optimization task, uses no costly (kernel) preimaging step to map feature points back to the input space, and utilizes complex-valued kernel functions to account for k-space data. The framework is validated on synthetically generated dMRI data, where comparisons against state-of-the-art schemes highlight the rich potential of the proposed approach in data-recovery problems.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Tucker-Regularized Tensor Bregman Co-clustering.\n \n \n \n \n\n\n \n Forero, P. A.; and Baxley, P. A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1497-1501, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Tucker-RegularizedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287850,\n  author = {P. A. Forero and P. A. Baxley},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Tucker-Regularized Tensor Bregman Co-clustering},\n  year = {2020},\n  pages = {1497-1501},\n  abstract = {Co-clustering of tensor data is an unsupervised learning task aiming to identify multidimensional structures hidden in a tensor. These structures are critical for understanding interdependencies across variables belonging to different tensor dimensions, often referred to as modes, which are frequently disregarded when tensor data are represented via one- or two-dimensional data structures. This work proposes a new tensor co-clustering algorithm that uses a class of Bregman divergences to measure the coherence of co-clusters on an individual mode basis, while ensuring that the interactions of their prototyping elements capture the tensor intra-modal structure. A co-clustering algorithm based on the alternating-direction method of multipliers is developed. The proposed algorithm decouples the co-clustering problem into an iterative two-step process whose steps are reminiscent of classical one-way clustering and Tucker decomposition problems. The performance of the proposed method is illustrated via numerical tests.},\n  keywords = {Tensors;Signal processing algorithms;Clustering algorithms;Signal processing;Iterative algorithms;Task analysis;Unsupervised learning;Tucker decomposition;Tensor co-clustering;Bregman co-clustering},\n  doi = {10.23919/Eusipco47968.2020.9287850},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001497.pdf},\n}\n\n
\n
\n\n\n
\n Co-clustering of tensor data is an unsupervised learning task aiming to identify multidimensional structures hidden in a tensor. These structures are critical for understanding interdependencies across variables belonging to different tensor dimensions, often referred to as modes, which are frequently disregarded when tensor data are represented via one- or two-dimensional data structures. This work proposes a new tensor co-clustering algorithm that uses a class of Bregman divergences to measure the coherence of co-clusters on an individual mode basis, while ensuring that the interactions of their prototyping elements capture the tensor intra-modal structure. A co-clustering algorithm based on the alternating-direction method of multipliers is developed. The proposed algorithm decouples the co-clustering problem into an iterative two-step process whose steps are reminiscent of classical one-way clustering and Tucker decomposition problems. The performance of the proposed method is illustrated via numerical tests.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Neural Discrete Abstraction of High-Dimensional Spaces: A Case Study In Reinforcement Learning.\n \n \n \n \n\n\n \n Giannakopoulos, P.; Pikrakis, A.; and Cotronis, Y.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1517-1521, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"NeuralPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287851,\n  author = {P. Giannakopoulos and A. Pikrakis and Y. Cotronis},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Neural Discrete Abstraction of High-Dimensional Spaces: A Case Study In Reinforcement Learning},\n  year = {2020},\n  pages = {1517-1521},\n  abstract = {We employ Neural Discrete Representation Learning to map a high-dimensional state space, made up from raw video frames of a Reinforcement Learning agent’s interactions with the environment, into a low-dimensional state space made up from learned discrete latent representations. We show experimentally that the discrete latents learned by the encoder of a Vector Quantized Auto-Encoder (VQ-AE) model trained to reconstruct the raw video frames making up the high-dimensional state space, can serve as meaningful abstractions of clusters of correlated frames. A low-dimensional state space can then be successfully constructed, where each individual state is a quantized vector encoding representing a cluster of correlated frames of the high-dimensional state space. Experimental results for a 3D navigation task in a maze environment constructed in Minecraft demonstrate that this discrete mapping can be used in addition to, or in place of, the high-dimensional space to improve the agent’s learning performance.},\n  keywords = {Three-dimensional displays;Navigation;Computational modeling;Signal processing algorithms;Reinforcement learning;Task analysis;Signal to noise ratio;state abstraction;discrete representations;reinforcement learning;vector-quantized auto-encoder},\n  doi = {10.23919/Eusipco47968.2020.9287851},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001517.pdf},\n}\n\n
\n
\n\n\n
\n We employ Neural Discrete Representation Learning to map a high-dimensional state space, made up from raw video frames of a Reinforcement Learning agent’s interactions with the environment, into a low-dimensional state space made up from learned discrete latent representations. We show experimentally that the discrete latents learned by the encoder of a Vector Quantized Auto-Encoder (VQ-AE) model trained to reconstruct the raw video frames making up the high-dimensional state space, can serve as meaningful abstractions of clusters of correlated frames. A low-dimensional state space can then be successfully constructed, where each individual state is a quantized vector encoding representing a cluster of correlated frames of the high-dimensional state space. Experimental results for a 3D navigation task in a maze environment constructed in Minecraft demonstrate that this discrete mapping can be used in addition to, or in place of, the high-dimensional space to improve the agent’s learning performance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Clock-Offset and Microphone Gain Mismatch Invariant Beamforming.\n \n \n \n \n\n\n \n Kotti, S. -.; Heusdens, R.; and Hendriks, R. C.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 176-180, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Clock-OffsetPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287852,\n  author = {S. -E. Kotti and R. Heusdens and R. C. Hendriks},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Clock-Offset and Microphone Gain Mismatch Invariant Beamforming},\n  year = {2020},\n  pages = {176-180},\n  abstract = {The use of wireless acoustic sensor networks (WASNs) has received increased attention over the last decade. The advantages of WASNs over stand-alone multi-microphone devices are that the microphone array is not anymore limited by the dimensions of a single device, and that microphones can be placed at arbitrary locations. One of the disadvantages, however, is that for many applications, like beamforming, the clocks of all devices in the network need to be synchronised and that the microphone gains need to be equalised. In this paper we will prove that a specific class of beamformers is clock-offset and gain mismatch invariant. The parameters for these beamformers (acoustic transfer function and power spectral density matrices) can be estimated directly from the uncalibrated microphone signals, instead of first synchronising the clocks and equalising the gains and then estimating them. The resulting beamformers are applied to the non-calibrated microphone signals. We will substantiate, by means of computer simulations, that the proposed approach gives identical results compared to the setup where microphone signals are first calibrated, so that clock-offset compensation and microphone gain equalisation becomes unnecessary.},\n  keywords = {Wireless communication;Wireless sensor networks;Array signal processing;Transfer functions;Acoustic sensors;Microphones;Clocks;Beamforming;clock synchronisation;micro-phone gain equalisation;wireless acoustic sensor networks},\n  doi = {10.23919/Eusipco47968.2020.9287852},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000176.pdf},\n}\n\n
\n
\n\n\n
\n The use of wireless acoustic sensor networks (WASNs) has received increased attention over the last decade. The advantages of WASNs over stand-alone multi-microphone devices are that the microphone array is not anymore limited by the dimensions of a single device, and that microphones can be placed at arbitrary locations. One of the disadvantages, however, is that for many applications, like beamforming, the clocks of all devices in the network need to be synchronised and that the microphone gains need to be equalised. In this paper we will prove that a specific class of beamformers is clock-offset and gain mismatch invariant. The parameters for these beamformers (acoustic transfer function and power spectral density matrices) can be estimated directly from the uncalibrated microphone signals, instead of first synchronising the clocks and equalising the gains and then estimating them. The resulting beamformers are applied to the non-calibrated microphone signals. We will substantiate, by means of computer simulations, that the proposed approach gives identical results compared to the setup where microphone signals are first calibrated, so that clock-offset compensation and microphone gain equalisation becomes unnecessary.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Small-Scale Network for Seismic Patterns Classification.\n \n \n \n \n\n\n \n da Silva , M.; Charléty, J.; Fraysse, A.; and Pesquet, J. -.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1402-1406, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287853,\n  author = {M. {da Silva} and J. Charléty and A. Fraysse and J. -C. Pesquet},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A Small-Scale Network for Seismic Patterns Classification},\n  year = {2020},\n  pages = {1402-1406},\n  abstract = {Deep Convolutional Neural Networks (DCNNs) correspond to the state-of-art for image classification. However to train such systems it is necessary to have access to a large number of samples and powerful computational resources, given the huge number of involved parameters. In the field of seismic images, large and freely available databases are scarce due to their strategic interest. In this situation, large architectures lead to hardly tractable problems in terms of overfitting. In this paper, we propose a reduced-size CNN with low computational cost that allows high accuracy performance on two small seismic datasets. The results are compared with KNN, SVM and LeNet.},\n  keywords = {Support vector machines;Databases;Pattern classification;Europe;Signal processing;Convolutional neural networks;Image classification;Seismic Data Analysis;Machine Learning;Deep Neural Network;Classification;Small Datasets},\n  doi = {10.23919/Eusipco47968.2020.9287853},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001402.pdf},\n}\n\n
\n
\n\n\n
\n Deep Convolutional Neural Networks (DCNNs) correspond to the state-of-art for image classification. However to train such systems it is necessary to have access to a large number of samples and powerful computational resources, given the huge number of involved parameters. In the field of seismic images, large and freely available databases are scarce due to their strategic interest. In this situation, large architectures lead to hardly tractable problems in terms of overfitting. In this paper, we propose a reduced-size CNN with low computational cost that allows high accuracy performance on two small seismic datasets. The results are compared with KNN, SVM and LeNet.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributed Extended Object Tracking Based on Diffusion Strategy.\n \n \n \n \n\n\n \n Ren, Y.; and Xia, W.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2338-2342, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DistributedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287854,\n  author = {Y. Ren and W. Xia},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Distributed Extended Object Tracking Based on Diffusion Strategy},\n  year = {2020},\n  pages = {2338-2342},\n  abstract = {In this work, we study the problem of ellipse extended object tracking with multiple measurements. We propose a distributed extended object tracking algorithm for heterogeneous networks based on the diffusion extended Kalman filter. We use a set of nodes with different parameters to estimate the kinematic state and extension of the extended object simultaneously. Simulation results verify that the proposed distributed approach could outperform the method without cooperation.},\n  keywords = {Simulation;Signal processing algorithms;Kinematics;Heterogeneous networks;Trajectory;Object tracking;Kalman filters;object tracking;extended object;extended Kalman filter;diffusion strategy;distributed network},\n  doi = {10.23919/Eusipco47968.2020.9287854},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002338.pdf},\n}\n\n
\n
\n\n\n
\n In this work, we study the problem of ellipse extended object tracking with multiple measurements. We propose a distributed extended object tracking algorithm for heterogeneous networks based on the diffusion extended Kalman filter. We use a set of nodes with different parameters to estimate the kinematic state and extension of the extended object simultaneously. Simulation results verify that the proposed distributed approach could outperform the method without cooperation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Rank Detection Thresholds for Hankel or Toeplitz Data Matrices.\n \n \n \n \n\n\n \n v. der Veen , A. -.; Romme, J.; and Cui, Y.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1911-1915, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"RankPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287856,\n  author = {A. -J. v. {der Veen} and J. Romme and Y. Cui},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Rank Detection Thresholds for Hankel or Toeplitz Data Matrices},\n  year = {2020},\n  pages = {1911-1915},\n  abstract = {In Principal Component Analysis (PCA), the dimension of the signal subspace is detected by counting the number of eigenvalues of a covariance matrix that are above a threshold. Random matrix theory provides accurate estimates for this threshold if the underlying data matrix has independent identically distributed columns. However, in time series analysis, the underlying data matrix has a Hankel or Toeplitz structure, and the columns are not independent. Using an empirical approach, we observe that the largest eigenvalue is fitted well by a Generalized Extreme Value (GEV) distribution, and we obtain accurate estimates for the thresholds to be used in a sequential rank detection test. In contrast to AIC or MDL, this provides a parameter that controls the probability of false alarm. Also a lower bound is presented for the rank detection rate of threshold-based detection for rank-1 problems.},\n  keywords = {Time series analysis;Europe;Signal processing;Harmonic analysis;Eigenvalues and eigenfunctions;Covariance matrices;Principal component analysis;PCA;structured Wishart matrix;rank detection;Generalized Extreme Value},\n  doi = {10.23919/Eusipco47968.2020.9287856},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001911.pdf},\n}\n\n
\n
\n\n\n
\n In Principal Component Analysis (PCA), the dimension of the signal subspace is detected by counting the number of eigenvalues of a covariance matrix that are above a threshold. Random matrix theory provides accurate estimates for this threshold if the underlying data matrix has independent identically distributed columns. However, in time series analysis, the underlying data matrix has a Hankel or Toeplitz structure, and the columns are not independent. Using an empirical approach, we observe that the largest eigenvalue is fitted well by a Generalized Extreme Value (GEV) distribution, and we obtain accurate estimates for the thresholds to be used in a sequential rank detection test. In contrast to AIC or MDL, this provides a parameter that controls the probability of false alarm. Also a lower bound is presented for the rank detection rate of threshold-based detection for rank-1 problems.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Evaluating Transfer Learning for Macular Fluid Detection with Limited Data.\n \n \n \n \n\n\n \n Cazañas-Gordón, A.; Parra-Mora, E.; and da Silva Cruz , L. A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1348-1352, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"EvaluatingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287859,\n  author = {A. Cazañas-Gordón and E. Parra-Mora and L. A. {da Silva Cruz}},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Evaluating Transfer Learning for Macular Fluid Detection with Limited Data},\n  year = {2020},\n  pages = {1348-1352},\n  abstract = {The ability to transfer knowledge learned with large datasets to train classifiers where labeled data is scarce, has made transfer learning a prime step on deep learning applications. The availability of models pre-trained on large datasets makes possible to apply deep learning to a wide variety of computer aided diagnosis tasks including retinal image processing. However, with dozens of deep convolutional network architectures, choosing the right model for transfer learning is not a trivial task. While current art implies that the deeper the model the better, here we verified this assertion by fine-tuning a suite of deep convolutional network architectures on a scenario with limited resources. Contrary to what was expected, deeper architectures did not perform as well with limited data as with large datasets.},\n  keywords = {Deep learning;Fluids;Convolution;Computational modeling;Computer architecture;Network architecture;Task analysis;transfer learning;fine tuning;convolutional neural networks;macular fluid detection;optical coherence tomography},\n  doi = {10.23919/Eusipco47968.2020.9287859},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001348.pdf},\n}\n\n
\n
\n\n\n
\n The ability to transfer knowledge learned with large datasets to train classifiers where labeled data is scarce, has made transfer learning a prime step on deep learning applications. The availability of models pre-trained on large datasets makes possible to apply deep learning to a wide variety of computer aided diagnosis tasks including retinal image processing. However, with dozens of deep convolutional network architectures, choosing the right model for transfer learning is not a trivial task. While current art implies that the deeper the model the better, here we verified this assertion by fine-tuning a suite of deep convolutional network architectures on a scenario with limited resources. Contrary to what was expected, deeper architectures did not perform as well with limited data as with large datasets.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Performance Requirements for Cough Classifiers in Real-World Applications.\n \n \n \n \n\n\n \n den Brinker , A. C.; Coman, M.; Ouweltjes, O.; Crooks, M. G.; Thackray-Nocera, S.; and Morice, A. H.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 96-100, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"PerformancePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287860,\n  author = {A. C. {den Brinker} and M. Coman and O. Ouweltjes and M. G. Crooks and S. Thackray-Nocera and A. H. Morice},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Performance Requirements for Cough Classifiers in Real-World Applications},\n  year = {2020},\n  pages = {96-100},\n  abstract = {In the context of monitoring respiratory diseases, an unobtrusive cough monitor is an attractive tool. Preferably, such tool requires little or no customization. We address the question of the feasibility of such a device. A large database of sounds including coughs and other events was available. Using deep learning, a general cough classifier was constructed. The plug-and-play feasibility of such cough classifier is addressed by a leave-one-patient-out procedure. For a large part of the cohort (80%), the performance of the classifier is excellent meaning an area under the curve (AUC) of larger than 0.9. On top of that, estimates are derived for its success in practical scenarios by considering the prevalence of cough and the required specificity. It is shown that the acoustic environment can be harsh, requiring very high specificities. From the results, we argue that for real-world applications customization will be required. For part of the population, it suffices to set a patient-specific operation point in generic cough classifier, but for some part a personalized cough classifier will be needed.},\n  keywords = {diseases;learning (artificial intelligence);medical computing;patient diagnosis;patient monitoring;pneumodynamics;performance requirements;respiratory diseases;unobtrusive cough monitor;attractive tool;general cough classifier;leave-one-patient-out procedure;real-world applications customization;patient-specific operation point;generic cough classifier;personalized cough classifier;Deep learning;Sociology;Tools;Signal processing;Acoustics;Statistics;Monitoring;Respiratory diseases;COPD;cough;machine learning;deep learning},\n  doi = {10.23919/Eusipco47968.2020.9287860},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000096.pdf},\n}\n\n
\n
\n\n\n
\n In the context of monitoring respiratory diseases, an unobtrusive cough monitor is an attractive tool. Preferably, such tool requires little or no customization. We address the question of the feasibility of such a device. A large database of sounds including coughs and other events was available. Using deep learning, a general cough classifier was constructed. The plug-and-play feasibility of such cough classifier is addressed by a leave-one-patient-out procedure. For a large part of the cohort (80%), the performance of the classifier is excellent meaning an area under the curve (AUC) of larger than 0.9. On top of that, estimates are derived for its success in practical scenarios by considering the prevalence of cough and the required specificity. It is shown that the acoustic environment can be harsh, requiring very high specificities. From the results, we argue that for real-world applications customization will be required. For part of the population, it suffices to set a patient-specific operation point in generic cough classifier, but for some part a personalized cough classifier will be needed.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Numerically stable multi-channel depth scene flow with adaptive weighting of regularization terms.\n \n \n \n \n\n\n \n Kameda, Y.; Matsuda, I.; and Itoh, S.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 605-609, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"NumericallyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287861,\n  author = {Y. Kameda and I. Matsuda and S. Itoh},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Numerically stable multi-channel depth scene flow with adaptive weighting of regularization terms},\n  year = {2020},\n  pages = {605-609},\n  abstract = {Scene flow is a three-dimensional (3D) vector field with velocity in the depth direction and optical flow that represents the apparent motion, which can be estimated from RGB-D videos. Scene flow can be used to estimate the 3D motion of objects with a camera; thus, it is used for obstacle detection and self-localization. It can potentially be applied to inter prediction in 3D video coding. The scene-flow estimation method based on the variational method requires numerical computations of nonlinear equations that control the regularization strength to prevent excessive smoothing due to scene-flow regularization. Because numerical stability depends on multi-channel images and computational parameters such as regularization weights, it is difficult to determine appropriate parameters that satisfy the stability requirements. Therefore, we propose a numerical computation method to derive a numerical stability condition that does not depend on the color of the image or the weight of the regularization term. This simplifies the traditional method and facilitates the setting up of various regularization weight functions. Finally, we evaluate the performance of the proposed method.},\n  keywords = {Video coding;Three-dimensional displays;Smoothing methods;Estimation;Signal processing;Numerical stability;Videos;numerical stability;scene flow;RGB-D;variational method;multi-channel},\n  doi = {10.23919/Eusipco47968.2020.9287861},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000605.pdf},\n}\n\n
\n
\n\n\n
\n Scene flow is a three-dimensional (3D) vector field with velocity in the depth direction and optical flow that represents the apparent motion, which can be estimated from RGB-D videos. Scene flow can be used to estimate the 3D motion of objects with a camera; thus, it is used for obstacle detection and self-localization. It can potentially be applied to inter prediction in 3D video coding. The scene-flow estimation method based on the variational method requires numerical computations of nonlinear equations that control the regularization strength to prevent excessive smoothing due to scene-flow regularization. Because numerical stability depends on multi-channel images and computational parameters such as regularization weights, it is difficult to determine appropriate parameters that satisfy the stability requirements. Therefore, we propose a numerical computation method to derive a numerical stability condition that does not depend on the color of the image or the weight of the regularization term. This simplifies the traditional method and facilitates the setting up of various regularization weight functions. Finally, we evaluate the performance of the proposed method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fast Volumetric Registration in MR Images Based on an Accelerated Viscous Fluid Model.\n \n \n \n \n\n\n \n Chang, H. -.; and Chao, Y. -.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1284-1288, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"FastPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287863,\n  author = {H. -H. Chang and Y. -H. Chao},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Fast Volumetric Registration in MR Images Based on an Accelerated Viscous Fluid Model},\n  year = {2020},\n  pages = {1284-1288},\n  abstract = {Medical image registration plays an essential role in subsequent image processing and analysis. The demand of direct 3D registration of volumetric image data has been arisen due to the increasing amount of medical image data. This paper investigates a fast volumetric image registration algorithm based on an incompressible viscous fluid model. While direct extension and implementation from 2D fluid registration is inaccessible, we develop numerical techniques to accelerate the computation based on the alternating direction implicit (ADI) scheme. In consequence, the computational complexity is significantly reduced from O(N3) to O(N). Not only does the computation time expedite, but the memory usage is also cut down. Massive experiments with both simulated and clinical magnetic resonance (MR) image data were administered to qualitatively and quantitatively evaluate the proposed 3D image registration framework. Experimental results suggested that our accelerated image registration algorithm produced high accuracy on both 2D and 3D image registration scenarios and outperformed competing methods. We believe that the proposed volumetric image registration scheme is promising in processing MR image volumes for further medical applications.},\n  keywords = {biomedical MRI;image registration;medical image processing;fast volumetric registration;accelerated viscous fluid model;medical image registration;subsequent image processing;direct 3D registration;volumetric image data;medical image data;fast volumetric image registration algorithm;incompressible viscous fluid model;direct extension;2D fluid registration;alternating direction implicit scheme;computational complexity;computation time;3D image registration framework;accelerated image registration algorithm;3D image registration scenarios;volumetric image registration scheme;MR image volumes;medical applications;Image registration;Fluids;Three-dimensional displays;Two dimensional displays;Signal processing algorithms;Numerical models;Acceleration;volumetric image registration;viscous fluid model;acceleration;alternating direction implicit (ADI);MRI},\n  doi = {10.23919/Eusipco47968.2020.9287863},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001284.pdf},\n}\n\n
\n
\n\n\n
\n Medical image registration plays an essential role in subsequent image processing and analysis. The demand of direct 3D registration of volumetric image data has been arisen due to the increasing amount of medical image data. This paper investigates a fast volumetric image registration algorithm based on an incompressible viscous fluid model. While direct extension and implementation from 2D fluid registration is inaccessible, we develop numerical techniques to accelerate the computation based on the alternating direction implicit (ADI) scheme. In consequence, the computational complexity is significantly reduced from O(N3) to O(N). Not only does the computation time expedite, but the memory usage is also cut down. Massive experiments with both simulated and clinical magnetic resonance (MR) image data were administered to qualitatively and quantitatively evaluate the proposed 3D image registration framework. Experimental results suggested that our accelerated image registration algorithm produced high accuracy on both 2D and 3D image registration scenarios and outperformed competing methods. We believe that the proposed volumetric image registration scheme is promising in processing MR image volumes for further medical applications.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Variable Step-Size for Sparse Nonlinear Adaptive Filters.\n \n \n \n \n\n\n \n Carini, A.; Lima, M. V. S.; Yazdanpanah, H.; Orcioni, S.; and Cecchi, S.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2383-2387, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287864,\n  author = {A. Carini and M. V. S. Lima and H. Yazdanpanah and S. Orcioni and S. Cecchi},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A Variable Step-Size for Sparse Nonlinear Adaptive Filters},\n  year = {2020},\n  pages = {2383-2387},\n  abstract = {The paper deals with the identification of nonlinear systems with adaptive filters. In particular, adaptive filters for functional link polynomial (FLiP) filters, a broad class of linear-in-the-parameters (LIP) nonlinear filters, are considered. FLiP filters include many popular LIP filters, as the Volterra filters, the Wiener nonlinear filters, and many others. Given the large number of coefficients of these filters modeling real systems, especially for high orders, the solution is often very sparse. Thus, an adaptive filter exploiting sparsity is considered, the improved proportionate NLMS algorithm (IPNLMS), and an optimal step-size is obtained for the filter. The optimal step-size alters the characteristics of the IPNLMS algorithm and provides a novel gradient descent adaptive filter. Simulation results involving the identification of a real nonlinear device illustrate the achievable performance in comparison with competing similar approaches.},\n  keywords = {Adaptive filters;Signal processing algorithms;Stochastic processes;Nonlinear filters;Filtering algorithms;Nonlinear systems;Signal to noise ratio;Adaptive filters;linear-in-the-parameters non-linear filters;functional link polynomial filters;optimal step-size},\n  doi = {10.23919/Eusipco47968.2020.9287864},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002383.pdf},\n}\n\n
\n
\n\n\n
\n The paper deals with the identification of nonlinear systems with adaptive filters. In particular, adaptive filters for functional link polynomial (FLiP) filters, a broad class of linear-in-the-parameters (LIP) nonlinear filters, are considered. FLiP filters include many popular LIP filters, as the Volterra filters, the Wiener nonlinear filters, and many others. Given the large number of coefficients of these filters modeling real systems, especially for high orders, the solution is often very sparse. Thus, an adaptive filter exploiting sparsity is considered, the improved proportionate NLMS algorithm (IPNLMS), and an optimal step-size is obtained for the filter. The optimal step-size alters the characteristics of the IPNLMS algorithm and provides a novel gradient descent adaptive filter. Simulation results involving the identification of a real nonlinear device illustrate the achievable performance in comparison with competing similar approaches.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A graph-theoretic sensor-selection scheme for covariance-based Motor Imagery (MI) decoding.\n \n \n \n \n\n\n \n Georgiadis, K.; Adamos, D. A.; Nikolopoulos, S.; Laskaris, N.; and Kompatsiaris, I.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1234-1238, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287865,\n  author = {K. Georgiadis and D. A. Adamos and S. Nikolopoulos and N. Laskaris and I. Kompatsiaris},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {A graph-theoretic sensor-selection scheme for covariance-based Motor Imagery (MI) decoding},\n  year = {2020},\n  pages = {1234-1238},\n  abstract = {Optimal sensor selection is an issue of paramount importance in brain decoding. When associated with estimates of covariance, its implications concern not only classification accuracy, but also computational efficiency. However, very few attempts have been made so far, since it constitutes a challenging mathematical problem. Herein, we propose an efficient heuristic scheme that combines discriminative learning (from a small training dataset of labelled trials) with unsupervised learning (the automated detection of sensors that collectively maximize the trial discriminability of the induced Covariance structure). The approach is motivated from a complex network modelling perspective. Its efficacy and efficiency are demonstrated experimentally, based on BCI-competition datasets concerning MI-tasks, and compared against popular techniques in the field.},\n  keywords = {electroencephalography;graph theory;medical signal processing;signal classification;unsupervised learning;complex network modelling perspective;MI-tasks;graph-theoretic sensor-selection scheme;optimal sensor selection;brain decoding;computational efficiency;challenging mathematical problem;efficient heuristic scheme;discriminative learning;training dataset;unsupervised learning;trial discriminability;induced covariance structure;covariance-based motor imagery decoding;Training;Europe;Signal processing;Decoding;Computational efficiency;Mathematical model;Unsupervised learning;discriminative learning;complex network modelling;graph clustering},\n  doi = {10.23919/Eusipco47968.2020.9287865},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001234.pdf},\n}\n\n
\n
\n\n\n
\n Optimal sensor selection is an issue of paramount importance in brain decoding. When associated with estimates of covariance, its implications concern not only classification accuracy, but also computational efficiency. However, very few attempts have been made so far, since it constitutes a challenging mathematical problem. Herein, we propose an efficient heuristic scheme that combines discriminative learning (from a small training dataset of labelled trials) with unsupervised learning (the automated detection of sensors that collectively maximize the trial discriminability of the induced Covariance structure). The approach is motivated from a complex network modelling perspective. Its efficacy and efficiency are demonstrated experimentally, based on BCI-competition datasets concerning MI-tasks, and compared against popular techniques in the field.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automated Brain Extraction and Separation in Triphenyltetrazolium Chloride-Stained Rat Images.\n \n \n \n \n\n\n \n Chang, H. -.; Yeh, S. -.; Chiang, M. -.; and Hsieh, S. -.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1362-1366, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AutomatedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{9287866,\n  author = {H. -H. Chang and S. -J. Yeh and M. -C. Chiang and S. -T. Hsieh},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Automated Brain Extraction and Separation in Triphenyltetrazolium Chloride-Stained Rat Images},\n  year = {2020},\n  pages = {1362-1366},\n  abstract = {Ischemic stroke is one of the leading causes of death among aged population worldwide. To understand the mechanism and damage of cerebral ischemia, the middle cerebral artery occlusion (MCAO) model in rodents has been generally adopted. For its celerity and veracity, the 2,3,5-triphenyltetrazolium chloride (TTC) staining has been widely utilized to visualize the infarct lesion. An important precursor is to segment the brain regions and compute the midline that separates the brain for subsequent processing. This paper develops an automated brain extraction and hemisphere separation framework in TTC-stained rat images captured by a smartphone. A saliency feature detection scheme associated with superpixels is exploited to extract the brain region into individual slices from the compound image. A chain of edge detection, morphological operation, and polynomial regression methods are introduced to compute the midline. Massive experiments were conducted to quantitatively evaluate the proposed framework. Experimental results indicated that our brain extraction algorithm outperformed competitive methods and our hemisphere separation scheme provided high accuracy.},\n  keywords = {biodiffusion;biomedical MRI;blood vessels;brain;diseases;edge detection;feature extraction;haemorheology;image segmentation;medical image processing;neurophysiology;automated brain extraction;triphenyltetrazolium chloride-stained rat images;ischemic stroke;aged population worldwide;cerebral ischemia;middle cerebral artery occlusion model;celerity;veracity;2,3,5-triphenyltetrazolium chloride staining;brain region;separation framework;TTC-stained rat images;saliency feature detection scheme;compound image;hemisphere separation scheme;Image segmentation;Visualization;Image edge detection;Feature detection;Signal processing algorithms;Rats;Feature extraction;brain extraction;hemisphere separation;image segmentation;saliency feature;TTC},\n  doi = {10.23919/Eusipco47968.2020.9287866},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001362.pdf},\n}\n\n
\n
\n\n\n
\n Ischemic stroke is one of the leading causes of death among aged population worldwide. To understand the mechanism and damage of cerebral ischemia, the middle cerebral artery occlusion (MCAO) model in rodents has been generally adopted. For its celerity and veracity, the 2,3,5-triphenyltetrazolium chloride (TTC) staining has been widely utilized to visualize the infarct lesion. An important precursor is to segment the brain regions and compute the midline that separates the brain for subsequent processing. This paper develops an automated brain extraction and hemisphere separation framework in TTC-stained rat images captured by a smartphone. A saliency feature detection scheme associated with superpixels is exploited to extract the brain region into individual slices from the compound image. A chain of edge detection, morphological operation, and polynomial regression methods are introduced to compute the midline. Massive experiments were conducted to quantitatively evaluate the proposed framework. Experimental results indicated that our brain extraction algorithm outperformed competitive methods and our hemisphere separation scheme provided high accuracy.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n User Activity And Data Detection For MIMO Uplink C-RAN Using Bayesian Learning.\n \n \n \n \n\n\n \n Rajoriya, A.; Katiyar, V.; and Budhiraja, R.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1742-1746, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"UserPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287867,\n  author = {A. Rajoriya and V. Katiyar and R. Budhiraja},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {User Activity And Data Detection For MIMO Uplink C-RAN Using Bayesian Learning},\n  year = {2020},\n  pages = {1742-1746},\n  abstract = {We investigate user activity and data detection problem in a multiple-input multiple-output uplink cloud-radio access network, where the data matrix over a time-frame has overlapped burst sparsity due to sporadic user activity. We exploit this sparsity to recover data by proposing a weighted prior-sparse Bayesian learning algorithm. The proposed algorithm, due to carefully selected prior, captures not only the overlapped burst sparsity across time but also the block sparsity due to multi-user antennas. We also derive hyperparameter updates, and estimate the weight parameters using the support estimated via index-wise log-likelihood ratio test. We numerically demonstrate that the proposed algorithm has much lower bit error rate than the state-of-the-art competing algorithms.},\n  keywords = {Cloud-radio access network (C-RAN);compressive sensing;sparse Bayesian learning (SBL)},\n  doi = {10.23919/Eusipco47968.2020.9287867},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001742.pdf},\n}\n\n
\n
\n\n\n
\n We investigate user activity and data detection problem in a multiple-input multiple-output uplink cloud-radio access network, where the data matrix over a time-frame has overlapped burst sparsity due to sporadic user activity. We exploit this sparsity to recover data by proposing a weighted prior-sparse Bayesian learning algorithm. The proposed algorithm, due to carefully selected prior, captures not only the overlapped burst sparsity across time but also the block sparsity due to multi-user antennas. We also derive hyperparameter updates, and estimate the weight parameters using the support estimated via index-wise log-likelihood ratio test. We numerically demonstrate that the proposed algorithm has much lower bit error rate than the state-of-the-art competing algorithms.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Type/position classification of inter-floor noise in residential buildings with a single microphone via supervised learning.\n \n \n \n \n\n\n \n Choi, H.; Yang, H.; Lee, S.; and Seong, W.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 86-90, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Type/positionPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287868,\n  author = {H. Choi and H. Yang and S. Lee and W. Seong},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Type/position classification of inter-floor noise in residential buildings with a single microphone via supervised learning},\n  year = {2020},\n  pages = {86-90},\n  abstract = {Inter-floor noise propagates through a building structure from a noise source to neighbors on other floors. Identification of type/position of inter-floor noise in a building is difficult for human hearing. A convolutional neural network-based inter-floor noise type/position classification method was proposed in [Appl. Sci. 9, 3735 (2019)] to identify inter-floor noise. The method was evaluated against inter-floor noise collected in a single campus building as a feasibility test. In this work, the generalizability of the method was addressed through numerous tasks using new datasets collected in two real apartment buildings. These datasets contain inter-floor noise generated in rooms and at positions with three-dimensional spatial diversity, which was not studied in the previous work. Furthermore, type classification knowledge transfer between two individual apartment building domains was studied.},\n  keywords = {Spatial diversity;Buildings;Supervised learning;Signal processing;Task analysis;Knowledge transfer;Microphones;Inter-floor noise;single sensor acoustics;convolutional neural network;knowledge transfer},\n  doi = {10.23919/Eusipco47968.2020.9287868},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000086.pdf},\n}\n\n
\n
\n\n\n
\n Inter-floor noise propagates through a building structure from a noise source to neighbors on other floors. Identification of type/position of inter-floor noise in a building is difficult for human hearing. A convolutional neural network-based inter-floor noise type/position classification method was proposed in [Appl. Sci. 9, 3735 (2019)] to identify inter-floor noise. The method was evaluated against inter-floor noise collected in a single campus building as a feasibility test. In this work, the generalizability of the method was addressed through numerous tasks using new datasets collected in two real apartment buildings. These datasets contain inter-floor noise generated in rooms and at positions with three-dimensional spatial diversity, which was not studied in the previous work. Furthermore, type classification knowledge transfer between two individual apartment building domains was studied.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Speech Dereverberation Performance of a Polynomial-EVD Subspace Approach.\n \n \n \n \n\n\n \n Neo, V. W.; Evers, C.; and Naylor, P. A.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 221-225, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"SpeechPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287869,\n  author = {V. W. Neo and C. Evers and P. A. Naylor},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Speech Dereverberation Performance of a Polynomial-EVD Subspace Approach},\n  year = {2020},\n  pages = {221-225},\n  abstract = {The degradation of speech arising from additive background noise and reverberation affects the performance of important speech applications such as telecommunications, hearing aids, voice-controlled systems and robot audition. In this work, we focus on dereverberation. It is shown that the parameterized polynomial matrix eigenvalue decomposition (PEVD)-based speech enhancement algorithm exploits the lack of correlation between speech and the late reflections to enhance the speech component associated with the direct path and early reflections. The algorithm's performance is evaluated using simulations involving measured acoustic impulse responses and noise from the ACE corpus. The simulations and informal listening examples have indicated that the PEVD-based algorithm performs dereverberation over a range of SNRs without introducing any noticeable processing artefacts.},\n  keywords = {Analytical models;Signal processing algorithms;Speech enhancement;Reflection;Noise measurement;Reverberation;Matrix decomposition;Dereverberation;polynomial matrix eigenvalue decomposition;convolutive noise;broadband signal processing;microphone array},\n  doi = {10.23919/Eusipco47968.2020.9287869},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000221.pdf},\n}\n\n
\n
\n\n\n
\n The degradation of speech arising from additive background noise and reverberation affects the performance of important speech applications such as telecommunications, hearing aids, voice-controlled systems and robot audition. In this work, we focus on dereverberation. It is shown that the parameterized polynomial matrix eigenvalue decomposition (PEVD)-based speech enhancement algorithm exploits the lack of correlation between speech and the late reflections to enhance the speech component associated with the direct path and early reflections. The algorithm's performance is evaluated using simulations involving measured acoustic impulse responses and noise from the ACE corpus. The simulations and informal listening examples have indicated that the PEVD-based algorithm performs dereverberation over a range of SNRs without introducing any noticeable processing artefacts.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Electroacoustic method for the calibration of a heterogeneous distributed speaker system.\n \n \n \n \n\n\n \n Joubaud, T.; and Pallone, G.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 476-480, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ElectroacousticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287870,\n  author = {T. Joubaud and G. Pallone},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Electroacoustic method for the calibration of a heterogeneous distributed speaker system},\n  year = {2020},\n  pages = {476-480},\n  abstract = {We present an electroacoustic method to calibrate a heterogeneous distributed speaker system such as e.g. various Bluetooth speakers interconnected in a client-server architecture. This method allows to extract parameters necessary for an appropriate clock coordination (synchronization/syntonization), equalization and spatial configuration in a single calibration operation. This approach enables immersive audio or multiroom experiences without having to buy a closed dedicated system.},\n  keywords = {Bluetooth;Europe;Signal processing;Calibration;Immersive audio;Clocks;Calibration;distributed speakers;synchronization;equalization;cartography},\n  doi = {10.23919/Eusipco47968.2020.9287870},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000476.pdf},\n}\n\n
\n
\n\n\n
\n We present an electroacoustic method to calibrate a heterogeneous distributed speaker system such as e.g. various Bluetooth speakers interconnected in a client-server architecture. This method allows to extract parameters necessary for an appropriate clock coordination (synchronization/syntonization), equalization and spatial configuration in a single calibration operation. This approach enables immersive audio or multiroom experiences without having to buy a closed dedicated system.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Gaussian process with physical laws for 3D cardiac modeling.\n \n \n \n \n\n\n \n Nakano, M.; Shibue, R.; Kashino, K.; Tsukada, S.; and Tomoike, H.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1452-1456, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"GaussianPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287871,\n  author = {M. Nakano and R. Shibue and K. Kashino and S. Tsukada and H. Tomoike},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Gaussian process with physical laws for 3D cardiac modeling},\n  year = {2020},\n  pages = {1452-1456},\n  abstract = {This paper introduces some physical laws into a Gaussian process for a statistical three-dimensional (3D) cardiac computational model. The 3D cardiac shape modeling is still challenging, since it involves personality and diversity. However, in spite of such variety, the heart shape must be ruled by some physical laws, which should be an important clue for the statistical shape estimation. Specifically, we introduce the Frank-Starling laws into the Gaussian process as a linear constraint, whose resulting process also follows a Gaussian process. For demonstration, we apply our model into the pipeline that estimates the heart shape from cardiovascular magnetic resonance (CMR) imaging, by combining it with the deep neural networks-based anatomical segmentation of CMR imaging.},\n  keywords = {biomedical MRI;cardiovascular system;Gaussian processes;image segmentation;medical image processing;neural nets;physiological models;Gaussian process;physical laws;3D cardiac modeling;three-dimensional cardiac computational model;3D cardiac shape modeling;heart shape;statistical shape estimation;Frank-Starling laws;cardiovascular magnetic resonance imaging;CMR imaging;deep neural networks-based anatomical segmentation;Heart;Solid modeling;Three-dimensional displays;Shape;Computational modeling;Imaging;Gaussian processes;Gaussian process;Statistical shape model;Cardiac modeling;Frank-Starling laws},\n  doi = {10.23919/Eusipco47968.2020.9287871},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001452.pdf},\n}\n\n
\n
\n\n\n
\n This paper introduces some physical laws into a Gaussian process for a statistical three-dimensional (3D) cardiac computational model. The 3D cardiac shape modeling is still challenging, since it involves personality and diversity. However, in spite of such variety, the heart shape must be ruled by some physical laws, which should be an important clue for the statistical shape estimation. Specifically, we introduce the Frank-Starling laws into the Gaussian process as a linear constraint, whose resulting process also follows a Gaussian process. For demonstration, we apply our model into the pipeline that estimates the heart shape from cardiovascular magnetic resonance (CMR) imaging, by combining it with the deep neural networks-based anatomical segmentation of CMR imaging.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Analysis of Quadratic Surface Fitting for Subpixel Motion Extraction from Video Images.\n \n \n \n \n\n\n \n Xiong, B.; Zhang, Q.; and Baltazart, V.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 695-699, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AnalysisPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287872,\n  author = {B. Xiong and Q. Zhang and V. Baltazart},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Analysis of Quadratic Surface Fitting for Subpixel Motion Extraction from Video Images},\n  year = {2020},\n  pages = {695-699},\n  abstract = {Digital image correlation is a popular method for estimating object displacement in successive images. At the pixel level, displacement is estimated by maximizing the cross-correlation between two images. To achieve subpixel accuracy, displacement estimation can be refined in the vicinity of the cross-correlation peak. Among existing refinement methods, quadratic surface fitting provides a good trade-off between accuracy and computational burden. The purpose of this paper is to analyze the quadratic surface fitting method. It is shown that the quadratic surface fitted to the cross-correlation values in the vicinity of the cross-correlation peak does not always have a maximum. Then the conditions ensuring the existence of a maximum are analyzed. The reported results consolidate the theoretic basis of the quadratic surface fitting method for subpixel motion extraction.},\n  keywords = {Correlation;Digital images;Fitting;Signal processing algorithms;Signal processing;Surface fitting;Surface treatment;Digital image correlation;quadratic surface fitting;subpixel refinement},\n  doi = {10.23919/Eusipco47968.2020.9287872},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000695.pdf},\n}\n\n
\n
\n\n\n
\n Digital image correlation is a popular method for estimating object displacement in successive images. At the pixel level, displacement is estimated by maximizing the cross-correlation between two images. To achieve subpixel accuracy, displacement estimation can be refined in the vicinity of the cross-correlation peak. Among existing refinement methods, quadratic surface fitting provides a good trade-off between accuracy and computational burden. The purpose of this paper is to analyze the quadratic surface fitting method. It is shown that the quadratic surface fitted to the cross-correlation values in the vicinity of the cross-correlation peak does not always have a maximum. Then the conditions ensuring the existence of a maximum are analyzed. The reported results consolidate the theoretic basis of the quadratic surface fitting method for subpixel motion extraction.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fractional Superlets.\n \n \n \n \n\n\n \n Bârzan, H.; Moca, V. V.; Ichim, A. -.; and Muresan, R. C.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2220-2224, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"FractionalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287873,\n  author = {H. Bârzan and V. V. Moca and A. -M. Ichim and R. C. Muresan},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Fractional Superlets},\n  year = {2020},\n  pages = {2220-2224},\n  abstract = {The Continuous Wavelet Transform (CWT) provides a multi-resolution representation of a signal by scaling a mother wavelet and convolving it with the signal. The scalogram (squared modulus of the CWT) then represents the spread of the signal's energy as a function of time and scale. The scalogram has constant relative temporal resolution but, as the scale is compressed (frequency increased), it loses frequency resolution. To compensate for this, the recently-introduced superlets geometrically combine a set of wavelets with increasing frequency resolution to achieve time-frequency super-resolution. The number of wavelets in the set is called the order of the superlet and was initially defined as an integer number. This creates a series of issues when adaptive superlets are implemented, i.e. superlets whose order depends on frequency. In particular, adaptive superlets generate representations that suffer from {"}banding{"} because the order is adjusted in discrete steps as the frequency increases. Here, by relying on the weighted geometric mean, we introduce fractional superlets, which allow the order to be a fractional number. We show that fractional adaptive superlets provide high-resolution representations that are smooth across the entire spectrum and are clearly superior to representations based on the discrete adaptive superlets.},\n  keywords = {Time-frequency analysis;Continuous wavelet transforms;Energy resolution;Europe;Signal resolution;continuous wavelet transform;scalogram;superlet transform;adaptive superlets},\n  doi = {10.23919/Eusipco47968.2020.9287873},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002220.pdf},\n}\n\n
\n
\n\n\n
\n The Continuous Wavelet Transform (CWT) provides a multi-resolution representation of a signal by scaling a mother wavelet and convolving it with the signal. The scalogram (squared modulus of the CWT) then represents the spread of the signal's energy as a function of time and scale. The scalogram has constant relative temporal resolution but, as the scale is compressed (frequency increased), it loses frequency resolution. To compensate for this, the recently-introduced superlets geometrically combine a set of wavelets with increasing frequency resolution to achieve time-frequency super-resolution. The number of wavelets in the set is called the order of the superlet and was initially defined as an integer number. This creates a series of issues when adaptive superlets are implemented, i.e. superlets whose order depends on frequency. In particular, adaptive superlets generate representations that suffer from \"banding\" because the order is adjusted in discrete steps as the frequency increases. Here, by relying on the weighted geometric mean, we introduce fractional superlets, which allow the order to be a fractional number. We show that fractional adaptive superlets provide high-resolution representations that are smooth across the entire spectrum and are clearly superior to representations based on the discrete adaptive superlets.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Learning to Separate: Soundscape Classification using Foreground and Background.\n \n \n \n \n\n\n \n Dhanunjaya Varma, D.; Padmanabhan, R.; and Dileep, A. D.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 21-25, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"LearningPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287875,\n  author = {D. {Dhanunjaya Varma} and R. Padmanabhan and A. D. Dileep},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Learning to Separate: Soundscape Classification using Foreground and Background},\n  year = {2020},\n  pages = {21-25},\n  abstract = {This paper applies the framework of robust principal components analysis (RPCA) to the problem of classifying acoustic soundscapes. RPCA provides a mechanism to decompose a data matrix as the sum of a low-rank matrix and a sparse matrix. In the context of data representing acoustic soundscapes, the low-rank matrix represents the slow-changing background sound events, and the sparse matrix represents the occasional foreground sound events. The data representations are obtained as feature embeddings from pretrained deep convolutional networks. The paper investigates the effectiveness of classifying acoustic soundscapes by using the foreground or background information alone. Further, by using the subspace projection technique of nuisance attribute projection (NAP), the undesired components from the foreground or background are removed. Our results indicate that RPCA and subspace projections in-deed provide benefits in improving discrimination for classifying acoustic soundscapes.},\n  keywords = {Image analysis;Convolution;Europe;Acoustics;Sparse matrices;Matrix decomposition;Principal component analysis;Acoustic scene classification;robust PCA;sub-space projections},\n  doi = {10.23919/Eusipco47968.2020.9287875},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000021.pdf},\n}\n\n
\n
\n\n\n
\n This paper applies the framework of robust principal components analysis (RPCA) to the problem of classifying acoustic soundscapes. RPCA provides a mechanism to decompose a data matrix as the sum of a low-rank matrix and a sparse matrix. In the context of data representing acoustic soundscapes, the low-rank matrix represents the slow-changing background sound events, and the sparse matrix represents the occasional foreground sound events. The data representations are obtained as feature embeddings from pretrained deep convolutional networks. The paper investigates the effectiveness of classifying acoustic soundscapes by using the foreground or background information alone. Further, by using the subspace projection technique of nuisance attribute projection (NAP), the undesired components from the foreground or background are removed. Our results indicate that RPCA and subspace projections in-deed provide benefits in improving discrimination for classifying acoustic soundscapes.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n AM-FM Image Analysis based on Sparse Coding Frequency Separation Approach.\n \n \n \n \n\n\n \n Diop, E. H. S.; Skretting, K.; and Boudraa, A. -.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 610-614, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AM-FMPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287876,\n  author = {E. H. S. Diop and K. Skretting and A. -O. Boudraa},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {AM-FM Image Analysis based on Sparse Coding Frequency Separation Approach},\n  year = {2020},\n  pages = {610-614},\n  abstract = {We propose here an extension to images of a sparse coding frequency separation method. The approach is based on a 2D multicomponent amplitude modulation (AM)-frequency modulation (FM) image modeling, where the 2D monocomponent parts are obtained by sparse approximations that are solved with matching pursuits. For synthetic images, a separable dictionary is built, while a patch-based dictionary learning method is adopted for real images. In fact, the total variation (TV) norm is applied on patches to select the decomposition modes with highest TV-norm, doing so yields to an interesting image analysis tool that properly separates the image frequency contents. The proposed approach turns out to share the same behaviors with the well known empirical mode decomposition (EMD) method. Obtained results are encouraging for feature and texture analysis, and for image denoising as well.},\n  keywords = {Image coding;Image analysis;Frequency modulation;TV;Two dimensional displays;Matching pursuit algorithms;Tools;Sparse coding;2D AM-FM;2D Frequency separation;Orthogonal matching pursuit},\n  doi = {10.23919/Eusipco47968.2020.9287876},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0000610.pdf},\n}\n\n
\n
\n\n\n
\n We propose here an extension to images of a sparse coding frequency separation method. The approach is based on a 2D multicomponent amplitude modulation (AM)-frequency modulation (FM) image modeling, where the 2D monocomponent parts are obtained by sparse approximations that are solved with matching pursuits. For synthetic images, a separable dictionary is built, while a patch-based dictionary learning method is adopted for real images. In fact, the total variation (TV) norm is applied on patches to select the decomposition modes with highest TV-norm, doing so yields to an interesting image analysis tool that properly separates the image frequency contents. The proposed approach turns out to share the same behaviors with the well known empirical mode decomposition (EMD) method. Obtained results are encouraging for feature and texture analysis, and for image denoising as well.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Semi-supervised Riemannian Dimensionality Reduction and Classification Using a Manifold-based Random Walker Graph.\n \n \n \n \n\n\n \n Fallah, F.; Wiewel, F.; and Yang, B.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 1120-1124, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Semi-supervisedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287877,\n  author = {F. Fallah and F. Wiewel and B. Yang},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Semi-supervised Riemannian Dimensionality Reduction and Classification Using a Manifold-based Random Walker Graph},\n  year = {2020},\n  pages = {1120-1124},\n  abstract = {Efficient classification of manifold-based data demands dimensionality reduction. However, the optimal lower dimension is mostly unknown. Also, supervised classification demands a collection of expert-annotated samples. This collection is tedious, costly, and error prone. Thus, semi-supervised classifications are motivated. In this paper, we propose a principled method to determine the optimal lower dimension and to reduce dimension of manifold-based data for their semi-supervised classification. This method relies on a manifold-based random walker graph and enhances the discriminative power of the classifier in the reduced dimensional space. The reduced dimensional data can be classified by any method or the same graph after adapting edge weights of the graph to them. Thus, the proposed dimensionality reduction is independent of the classifier type. This method is evaluated on segmenting tissues on fat-water (2-channel) magnetic resonance images.},\n  keywords = {biomedical MRI;graph theory;image classification;image segmentation;medical image processing;random processes;supervised learning;manifold-based random walker graph;reduced dimensional space;reduced dimensional data;manifold-based data;expert-annotated samples;semisupervised Riemannian dimensionality reduction;semisupervised Riemannian dimensionality classification;fat-water magnetic resonance images;Dimensionality reduction;Image segmentation;Image edge detection;Magnetic resonance;Europe;Signal processing;Covariance matrices;Riemannian dimensionality reduction;semi-supervised classification;random walker graph;symmetric positive definite matrices;region covariance descriptors},\n  doi = {10.23919/Eusipco47968.2020.9287877},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0001120.pdf},\n}\n\n
\n
\n\n\n
\n Efficient classification of manifold-based data demands dimensionality reduction. However, the optimal lower dimension is mostly unknown. Also, supervised classification demands a collection of expert-annotated samples. This collection is tedious, costly, and error prone. Thus, semi-supervised classifications are motivated. In this paper, we propose a principled method to determine the optimal lower dimension and to reduce dimension of manifold-based data for their semi-supervised classification. This method relies on a manifold-based random walker graph and enhances the discriminative power of the classifier in the reduced dimensional space. The reduced dimensional data can be classified by any method or the same graph after adapting edge weights of the graph to them. Thus, the proposed dimensionality reduction is independent of the classifier type. This method is evaluated on segmenting tissues on fat-water (2-channel) magnetic resonance images.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Quantum-based interval selection of the Semi-classical Signal Analysis method.\n \n \n \n \n\n\n \n Piliouras, E.; and Laleg-Kirati, T. -.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2294-2298, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Quantum-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287878,\n  author = {E. Piliouras and T. -M. Laleg-Kirati},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Quantum-based interval selection of the Semi-classical Signal Analysis method},\n  year = {2020},\n  pages = {2294-2298},\n  abstract = {Semi-classical Signal Analysis (SCSA) is a signal representation algorithm utilizing the Schrödinger eigenvalue problem. The algorithm has found many applications, from signal processing to machine learning and denoising due to its adaptive and localized nature. So far, the algorithm’s design parameter was tuned heuristically, without using the knowledge of the quantum mechanical principles residing in the SCSA formulation. In this work, we extend the SCSA framework by calculating the bounds of the reconstruction parameter. The derived bounds are effectively the sampling theorem for SCSA, which is of paramount importance for the application of the theory. Moreover, guidelines towards an optimal choice of the parameter are provided, eliminating the heuristic scanning step.},\n  keywords = {Machine learning algorithms;Upper bound;Two dimensional displays;Signal processing algorithms;Eigenvalues and eigenfunctions;Signal analysis;Image reconstruction;SCSA;signal decomposition;semi-classical approximation;quantum mechanics;sampling theorem},\n  doi = {10.23919/Eusipco47968.2020.9287878},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002294.pdf},\n}\n\n
\n
\n\n\n
\n Semi-classical Signal Analysis (SCSA) is a signal representation algorithm utilizing the Schrödinger eigenvalue problem. The algorithm has found many applications, from signal processing to machine learning and denoising due to its adaptive and localized nature. So far, the algorithm’s design parameter was tuned heuristically, without using the knowledge of the quantum mechanical principles residing in the SCSA formulation. In this work, we extend the SCSA framework by calculating the bounds of the reconstruction parameter. The derived bounds are effectively the sampling theorem for SCSA, which is of paramount importance for the application of the theory. Moreover, guidelines towards an optimal choice of the parameter are provided, eliminating the heuristic scanning step.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Properties of a new R-estimator of shape matrices.\n \n \n \n \n\n\n \n Fortunati, S.; Renaux, A.; and Pascal, F.\n\n\n \n\n\n\n In 2020 28th European Signal Processing Conference (EUSIPCO), pages 2443-2447, Aug 2020. \n \n\n\n\n
\n\n\n\n \n \n \"PropertiesPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{9287879,\n  author = {S. Fortunati and A. Renaux and F. Pascal},\n  booktitle = {2020 28th European Signal Processing Conference (EUSIPCO)},\n  title = {Properties of a new R-estimator of shape matrices},\n  year = {2020},\n  pages = {2443-2447},\n  abstract = {This paper aims at presenting a simulative analysis of the main properties of a new R-estimator of shape matrices in Complex Elliptically Symmetric (CES) distributed observations. First proposed by Hallin, Oja and Paindaveine for the real-valued case and then extended to the complex field in our recent work, this R-estimator has the remarkable property to be, at the same time, distributionally robust and semiparametric efficient. Here, the efficiency of different possible configurations of this R-estimator are investigated by comparing the resulting Mean Square Error (MSE) with the Constrained Semiparametric Cramér-Rao Bound (CSCRB). Moreover, its robustness to outliers is assessed and compared with the one of the celebrated Tyler’s estimator.},\n  keywords = {Symmetric matrices;Shape;Europe;Mean square error methods;Signal processing;Robustness;CES distributions;scatter matrix estimation;semiparametric models;R-estimator},\n  doi = {10.23919/Eusipco47968.2020.9287879},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2020/pdfs/0002443.pdf},\n}\n\n
\n
\n\n\n
\n This paper aims at presenting a simulative analysis of the main properties of a new R-estimator of shape matrices in Complex Elliptically Symmetric (CES) distributed observations. First proposed by Hallin, Oja and Paindaveine for the real-valued case and then extended to the complex field in our recent work, this R-estimator has the remarkable property to be, at the same time, distributionally robust and semiparametric efficient. Here, the efficiency of different possible configurations of this R-estimator are investigated by comparing the resulting Mean Square Error (MSE) with the Constrained Semiparametric Cramér-Rao Bound (CSCRB). Moreover, its robustness to outliers is assessed and compared with the one of the celebrated Tyler’s estimator.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);