\n \n \n
\n
\n\n \n \n \n \n \n \n Patch-Based Colour Transfer with Optimal Transport.\n \n \n \n \n\n\n \n Alghamdi, H.; Grogan, M.; and Dahyot, R.\n\n\n \n\n\n\n In
2019 27th European Signal Processing Conference (EUSIPCO), pages 1-5, Sep. 2019. \n
Github: https://github.com/leshep/PCT_OT\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n\n\n\n
\n
@INPROCEEDINGS{8902611, \nauthor = {H. {Alghamdi} and M. {Grogan} and R. {Dahyot}}, \nbooktitle = {2019 27th European Signal Processing Conference (EUSIPCO)},\ntitle = {Patch-Based Colour Transfer with Optimal Transport}, \nurl = {https://www.eurasip.org/Proceedings/Eusipco/eusipco2019/Proceedings/papers/1570533179.pdf},\nnote = {Github: https://github.com/leshep/PCT_OT},\nabstract = {This paper proposes a new colour transfer method\nwith Optimal transport to transfer the colour of a source image to\nmatch the colour of a target image of the same scene. We propose\nto formulate the problem in higher dimensional spaces (than\ncolour spaces) by encoding overlapping neighborhoods of pixels\ncontaining colour information as well as spatial information.\nSince several recoloured candidates are now generated for each\npixel in the source image, we define an original procedure\nto efficiently merge these candidates which allows denoising\nand artifact removal as well as colour transfer. Experiments\nshow quantitative and qualitative improvements over previous\ncolour transfer methods. Our method can be applied to different\ncontexts of colour transfer such as transferring colour between\ndifferent camera models, camera settings, illumination conditions\nand colour retouch styles for photographs.},\nyear = {2019}, \nvolume = {}, \nnumber = {}, \npages = {1-5}, \nkeywords = {optimal transport;colour transfer;image enhancement;JPEG compression blocks}, \ndoi = {10.23919/EUSIPCO.2019.8902611},\nISSN = {2219-5491}, month = {Sep.}}\n\n
\n
\n\n\n
\n This paper proposes a new colour transfer method with Optimal transport to transfer the colour of a source image to match the colour of a target image of the same scene. We propose to formulate the problem in higher dimensional spaces (than colour spaces) by encoding overlapping neighborhoods of pixels containing colour information as well as spatial information. Since several recoloured candidates are now generated for each pixel in the source image, we define an original procedure to efficiently merge these candidates which allows denoising and artifact removal as well as colour transfer. Experiments show quantitative and qualitative improvements over previous colour transfer methods. Our method can be applied to different contexts of colour transfer such as transferring colour between different camera models, camera settings, illumination conditions and colour retouch styles for photographs.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Harmonic Networks with Limited Training Samples.\n \n \n \n \n\n\n \n Ulicny, M.; Krylov, V. A.; and Dahyot, R.\n\n\n \n\n\n\n In
2019 27th European Signal Processing Conference (EUSIPCO), pages 1-5, Sep. 2019. \n
Github: https://github.com/matej-ulicny/harmonic-networks and paper also on arxiv http://arxiv.org/abs/1905.00135 and https://www.eurasip.org/Proceedings/Eusipco/eusipco2019/Proceedings/papers/1570533913.pdf\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n\n\n\n
\n
@INPROCEEDINGS{8902831, \nauthor = {M. {Ulicny} and V. A. {Krylov} and R. {Dahyot}}, \nbooktitle = {2019 27th European Signal Processing Conference (EUSIPCO)},\nurl = {https://mural.maynoothuniversity.ie/15158/1/RD_harmonic%20networks.pdf},\ntitle = {Harmonic Networks with Limited Training Samples}, \nyear = {2019}, \nvolume = {}, \nnumber = {}, \npages = {1-5}, \nabstract = {Convolutional neural networks (CNNs) are very popular nowadays for image processing. CNNs allow one to learn optimal filters in a (mostly) supervised machine learning context. However this typically requires abundant labelled training data to estimate the filter parameters. Alternative strategies have been deployed for reducing the number of parameters and / or filters to be learned and thus decrease overfitting. In the context of reverting to preset filters, we propose here a computationally efficient harmonic block that uses Discrete Cosine Transform (DCT) filters in CNNs. In this work we examine the performance of harmonic networks in limited training data scenario. We validate experimentally that its performance compares well against scattering networks that use wavelets as preset filters.},\nkeywords = {Lapped Discrete Cosine Transform;harmonic network;convolutional filter;limited data}, \ndoi = {10.23919/EUSIPCO.2019.8902831},\nnote = {Github: https://github.com/matej-ulicny/harmonic-networks and paper also on arxiv http://arxiv.org/abs/1905.00135 and https://www.eurasip.org/Proceedings/Eusipco/eusipco2019/Proceedings/papers/1570533913.pdf},\narchivePrefix = {arXiv},\neprint = {1905.00135},\nISSN = {2219-5491}, \nmonth = {Sep.}}\n\n\n
\n
\n\n\n
\n Convolutional neural networks (CNNs) are very popular nowadays for image processing. CNNs allow one to learn optimal filters in a (mostly) supervised machine learning context. However this typically requires abundant labelled training data to estimate the filter parameters. Alternative strategies have been deployed for reducing the number of parameters and / or filters to be learned and thus decrease overfitting. In the context of reverting to preset filters, we propose here a computationally efficient harmonic block that uses Discrete Cosine Transform (DCT) filters in CNNs. In this work we examine the performance of harmonic networks in limited training data scenario. We validate experimentally that its performance compares well against scattering networks that use wavelets as preset filters.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Super-Resolution on Degraded Low-Resolution Images Using Convolutional Neural Networks.\n \n \n \n \n\n\n \n Albluwi, F.; Krylov, V. A.; and Dahyot, R.\n\n\n \n\n\n\n In
2019 27th European Signal Processing Conference (EUSIPCO), pages 1-5, Sep. 2019. \n
Github: https://github.com/Fatma-Albluwi/DBSR\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n\n\n\n
\n
@INPROCEEDINGS{8903000, \nauthor = {F. {Albluwi} and V. A. {Krylov} and R. {Dahyot}}, \nbooktitle = {2019 27th European Signal Processing Conference (EUSIPCO)}, \ntitle = {Super-Resolution on Degraded Low-Resolution Images Using Convolutional Neural Networks}, \nyear = {2019}, \nvolume = {}, \nabstract = {Single Image Super-Resolution (SISR) has witnessed\na dramatic improvement in recent years through the use of deep\nlearning and, in particular, convolutional neural networks (CNN).\nIn this work we address reconstruction from low-resolution\nimages and consider as well degrading factors in images such as\nblurring. To address this challenging problem, we propose a new\narchitecture to tackle blur with the down-sampling of images by\nextending the DBSRCNN architecture. We validate our new\narchitecture (DBSR) experimentally against several state of the\nart super-resolution techniques.},\nnote = {Github: https://github.com/Fatma-Albluwi/DBSR},\nurl = {https://www.eurasip.org/Proceedings/Eusipco/eusipco2019/Proceedings/papers/1570533420.pdf},\nnumber = {}, \npages = {1-5}, \nkeywords = {Image super-resolution;image deblurring;deep learning;CNN}, \ndoi = {10.23919/EUSIPCO.2019.8903000}, \nISSN = {2219-5491}, month = {Sep.}}\n\n
\n
\n\n\n
\n Single Image Super-Resolution (SISR) has witnessed a dramatic improvement in recent years through the use of deep learning and, in particular, convolutional neural networks (CNN). In this work we address reconstruction from low-resolution images and consider as well degrading factors in images such as blurring. To address this challenging problem, we propose a new architecture to tackle blur with the down-sampling of images by extending the DBSRCNN architecture. We validate our new architecture (DBSR) experimentally against several state of the art super-resolution techniques.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Mini-Batch VLAD for Visual Place Retrieval.\n \n \n \n \n\n\n \n Aljuaidi, R.; Su, J.; and Dahyot, R.\n\n\n \n\n\n\n In
2019 30th Irish Signals and Systems Conference (ISSC), pages 1-6, June 2019. \n
Awarded Best Student Paper at ISSC 2019. Github: https://github.com/ReemTCD/Mini_Batch_VLAD\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n\n\n\n
\n
@INPROCEEDINGS{8904931, \nauthor = {R. {Aljuaidi} and J. {Su} and R. {Dahyot}}, \nbooktitle = {2019 30th Irish Signals and Systems Conference (ISSC)}, \ntitle = {Mini-Batch VLAD for Visual Place Retrieval}, \nnote = {Awarded Best Student Paper at ISSC 2019. Github: https://github.com/ReemTCD/Mini_Batch_VLAD},\nyear = {2019}, \nvolume = {}, \nnumber = {}, \npages = {1-6}, \nabstract = {This study investigates the visual place retrieval of an image query using a geotagged image dataset.\nVector of Locally Aggregated Descriptors (VLAD) is one of\nthe local features that can be used for image place recognition.\nVLAD describes an image by the difference of its local feature\ndescriptors from an already computed codebook. Generally, a\nvisual codebook is generated from k-means clustering of the\ndescriptors. However, the dimensionality of visual features is\nnot trivial and the computational load of sample distances in\na large image dataset is challenging. In order to design an\naccurate image retrieval method with affordable computation\nexpenses, we propose to use the mini-batch k-means clustering\nto compute VLAD descriptor(MB-VLAD). The proposed MBVLAD technique shows advantage in retrieval accuracy in\ncomparison with the state of the art techniques.},\nkeywords = {feature extraction;content-based image retrieval;image processing}, \ndoi = {10.1109/ISSC.2019.8904931}, \nurl={https://mural.maynoothuniversity.ie/15129/1/RD_mini%20batch.pdf},\nISSN = {2688-1446},\nmonth = {June}}\n
\n
\n\n\n
\n This study investigates the visual place retrieval of an image query using a geotagged image dataset. Vector of Locally Aggregated Descriptors (VLAD) is one of the local features that can be used for image place recognition. VLAD describes an image by the difference of its local feature descriptors from an already computed codebook. Generally, a visual codebook is generated from k-means clustering of the descriptors. However, the dimensionality of visual features is not trivial and the computational load of sample distances in a large image dataset is challenging. In order to design an accurate image retrieval method with affordable computation expenses, we propose to use the mini-batch k-means clustering to compute VLAD descriptor(MB-VLAD). The proposed MBVLAD technique shows advantage in retrieval accuracy in comparison with the state of the art techniques.\n
\n\n\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Entropic Regularisation of Robust Optimal Transport.\n \n \n \n \n\n\n \n Dahyot, R.; Alghamdi, H.; and Grogan, M.\n\n\n \n\n\n\n In
Irish Machine Vision and Image Processing conference 2019, volume abs/1905.12678, 2019. \n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{DBLP:journals/corr/abs-1905-12678,\nauthor = {Rozenn Dahyot and\nHana Alghamdi and\nMair{\\'{e}}ad Grogan},\ntitle = {Entropic Regularisation of Robust Optimal Transport},\nabstract = {Grogan et al have recently proposed a solution to colour transfer by minimising the Euclidean distance L2 between two probability density functions capturing the colour distributions of two images (palette and target). It was shown to be very competitive to alternative solutions based on Optimal Transport for colour transfer. We show that in fact Grogan et al's formulation can also be understood as a new robust Optimal Transport based framework with entropy regularisation over marginals.},\nbooktitle = {Irish Machine Vision and Image Processing conference 2019},\nvolume = {abs/1905.12678},\nyear = {2019},\nurl = {https://arxiv.org/pdf/1905.12678.pdf},\narchivePrefix = {arXiv},\ndoi = {10.21427/w611-mb37},\neprint = {1905.12678},\ntimestamp = {Mon, 03 Jun 2019 13:42:33 +0200},\nbiburl = {https://dblp.org/rec/bib/journals/corr/abs-1905-12678},\nbibsource = {dblp computer science bibliography, https://dblp.org},\n}
\n
\n\n\n
\n Grogan et al have recently proposed a solution to colour transfer by minimising the Euclidean distance L2 between two probability density functions capturing the colour distributions of two images (palette and target). It was shown to be very competitive to alternative solutions based on Optimal Transport for colour transfer. We show that in fact Grogan et al's formulation can also be understood as a new robust Optimal Transport based framework with entropy regularisation over marginals.\n
\n\n\n
\n\n\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Denoising RENOIR Image Dataset with DBSR.\n \n \n \n \n\n\n \n Albluwi, F.; Krylov, V. A.; and Dahyot, R.\n\n\n \n\n\n\n In
Irish Machine Vision and Image Processing (IMVIP 2019), volume ISBN 978-0-9934207-4-0, pages 76-79, Technological University Dublin, 28-30 August 2019. \n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{IMVIP2019Albluwi, \ntitle = {Denoising RENOIR Image Dataset with DBSR}, \nauthor = {Fatma Albluwi and Vladimir A. Krylov and R. Dahyot},\nabstract = {Noise reduction algorithms have often been evaluated using images degraded by artificially synthesised\nnoise. The RENOIR image dataset provides an alternative way for testing noise reduction algorithms\non real noisy images and we propose in this paper to assess our CNN called De-Blurring Super-Resolution\n(DBSR) to reduce the natural noise due to low light conditions in a RENOIR dataset.},\nbooktitle = {Irish Machine Vision and Image Processing (IMVIP 2019)}, \naddress = {Technological University Dublin}, month = {28-30 August}, \nyear = {2019}, \nurl = {https://arrow.tudublin.ie/cgi/viewcontent.cgi?article = 1006&context = impstwo},\ndoi = {10.21427/g34k-8r27},\npages = {76-79}, \nvolume = {ISBN 978-0-9934207-4-0}}\n\n
\n
\n\n\n
\n Noise reduction algorithms have often been evaluated using images degraded by artificially synthesised noise. The RENOIR image dataset provides an alternative way for testing noise reduction algorithms on real noisy images and we propose in this paper to assess our CNN called De-Blurring Super-Resolution (DBSR) to reduce the natural noise due to low light conditions in a RENOIR dataset.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Harmonic Networks for Image Classification.\n \n \n \n \n\n\n \n Ulicny, M.; Krylov, V.; and Dahyot, R.\n\n\n \n\n\n\n In
British Machine Vision Conference (BMVC), Cardiff UK, 9-12 September 2019. \n
Github: https://github.com/matej-ulicny/harmonic-networks\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{BMVC2019,\ntitle = {Harmonic Networks for Image Classification}, \nauthor = {M. Ulicny and V. Krylov and R. Dahyot}, \nbooktitle = {British Machine Vision Conference (BMVC)}, \naddress = {Cardiff UK}, \nmonth = {9-12 September},\nabstract = {Convolutional neural networks (CNNs) learn filters in order to capture local correlation patterns in feature space. In contrast, in this paper we propose harmonic blocks that\nproduce features by learning optimal combinations of responses to preset spectral filters.\nWe rely on the use of the Discrete Cosine Transform filters which have excellent energy\ncompaction properties and are widely used for image compression. \nThe proposed harmonic blocks are intended to replace conventional convolutional layers to produce partially or fully harmonic versions of new or existing CNN architectures. We demonstrate\nhow the harmonic networks can be efficiently compressed by exploiting redundancy in\nspectral domain and truncating high-frequency information. We extensively validate our\napproach and show that the introduction of harmonic blocks into state-of-the-art CNN\nmodels results in improved classification performance on CIFAR and ImageNet datasets.},\nurl = {https://bmvc2019.org/wp-content/uploads/papers/0628-paper.pdf},\nnote = {Github: https://github.com/matej-ulicny/harmonic-networks},\nyear = {2019}}\n
\n
\n\n\n
\n Convolutional neural networks (CNNs) learn filters in order to capture local correlation patterns in feature space. In contrast, in this paper we propose harmonic blocks that produce features by learning optimal combinations of responses to preset spectral filters. We rely on the use of the Discrete Cosine Transform filters which have excellent energy compaction properties and are widely used for image compression. The proposed harmonic blocks are intended to replace conventional convolutional layers to produce partially or fully harmonic versions of new or existing CNN architectures. We demonstrate how the harmonic networks can be efficiently compressed by exploiting redundancy in spectral domain and truncating high-frequency information. We extensively validate our approach and show that the introduction of harmonic blocks into state-of-the-art CNN models results in improved classification performance on CIFAR and ImageNet datasets.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n L2 Divergence for robust colour transfer.\n \n \n \n \n\n\n \n Grogan, M.; and Dahyot, R.\n\n\n \n\n\n\n
Computer Vision and Image Understanding. 2019.\n
Github: https://github.com/groganma/gmm-colour-transfer\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{GROGAN2019, \ntitle = {L2 Divergence for robust colour transfer}, \njournal = {Computer Vision and Image Understanding},\nyear = {2019},\nnote = {Github: https://github.com/groganma/gmm-colour-transfer},\nissn = {1077-3142},\ndoi = {10.1016/j.cviu.2019.02.002}, \nurl = {https://mural.maynoothuniversity.ie/15103/1/RB_L2.pdf}, \nauthor = {Mairead Grogan and Rozenn Dahyot}, \nkeywords = {Colour Transfer, L2 Registration, Re-colouring, Colour Grading}, \nabstract = {Optimal Transport (OT) is a very popular framework for performing colour transfer \nin images and videos. We have proposed an alternative framework where the cost function used for \ninferring a parametric transfer function is defined as the robust L2 divergence between two \nprobability density functions. In this paper, we show that our approach combines many advantages \nof state of the art techniques and outperforms many recent algorithms as measured quantitatively \nwith standard quality metrics, and qualitatively using perceptual studies. Mathematically, our \nformulation is presented in contrast to the OT cost function that shares similarities with our cost function. \nOur formulation, however, is more flexible as it allows colour correspondences that may be available to be taken \ninto account and performs well despite potential occurrences of correspondence outlier pairs. Our algorithm is shown to be \nfast, robust and it easily allows for user interaction providing freedom for artists to fine tune the recoloured images and videos.}}\n\n
\n
\n\n\n
\n Optimal Transport (OT) is a very popular framework for performing colour transfer in images and videos. We have proposed an alternative framework where the cost function used for inferring a parametric transfer function is defined as the robust L2 divergence between two probability density functions. In this paper, we show that our approach combines many advantages of state of the art techniques and outperforms many recent algorithms as measured quantitatively with standard quality metrics, and qualitatively using perceptual studies. Mathematically, our formulation is presented in contrast to the OT cost function that shares similarities with our cost function. Our formulation, however, is more flexible as it allows colour correspondences that may be available to be taken into account and performs well despite potential occurrences of correspondence outlier pairs. Our algorithm is shown to be fast, robust and it easily allows for user interaction providing freedom for artists to fine tune the recoloured images and videos.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Object Geolocation from Crowdsourced Street Level Imagery.\n \n \n \n \n\n\n \n Krylov, V. A.; and Dahyot, R.\n\n\n \n\n\n\n In Alzate, C.; Monreale, A.; Assem, H.; Bifet, A.; Buda, T. S.; Caglayan, B.; Drury, B.; García-Martín, Eva; Gavaldà, R.; Kramer, S.; Lavesson, N.; Madden, M.; Molloy, I.; Nicolae, M.; and Sinn, M., editor(s),
ECML PKDD 2018 Workshops, pages 79–83, Cham, 2019. Springer International Publishing\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@InProceedings{10.1007/978-3-030-13453-2_7, \nauthor = {Krylov, Vladimir A. and Dahyot, Rozenn}, \neditor = {Alzate, Carlos\nand Monreale, Anna\nand Assem, Haytham\nand Bifet, Albert\nand Buda, Teodora Sandra\nand Caglayan, Bora\nand Drury, Brett\nand Garc{\\'i}a-Mart{\\'i}n, Eva\nand Gavald{\\`a}, Ricard\nand Kramer, Stefan\nand Lavesson, Niklas\nand Madden, Michael\nand Molloy, Ian\nand Nicolae, Maria-Irina\nand Sinn, Mathieu},\ndoi = {10.1007/978-3-030-13453-2_7}, \nurl = {https://mural.maynoothuniversity.ie/15249/1/RD_object.pdf},\ntitle = {Object Geolocation from Crowdsourced Street Level Imagery}, \nbooktitle = {ECML PKDD 2018 Workshops}, \nyear = {2019}, \npublisher = {Springer International Publishing}, \naddress = {Cham}, \npages = {79--83}, \nabstract = {We explore the applicability and limitations of a state-of-the-art object \ndetection and geotagging system [4] applied to crowdsourced image data. Our experiments with \nimagery from Mapillary crowdsourcing platform demonstrate that with increasing amount of images,\nthe detection accuracy is getting close to that obtained with high-end street level data. Nevertheless,\ndue to excessive camera position noise, the estimated geolocation (position) of the detected object is \nless accurate on crowdsourced Mapillary imagery than with high-end street level imagery obtained by Google Street View.},\nisbn = {978-3-030-13453-2}}\n\n
\n
\n\n\n
\n We explore the applicability and limitations of a state-of-the-art object detection and geotagging system [4] applied to crowdsourced image data. Our experiments with imagery from Mapillary crowdsourcing platform demonstrate that with increasing amount of images, the detection accuracy is getting close to that obtained with high-end street level data. Nevertheless, due to excessive camera position noise, the estimated geolocation (position) of the detected object is less accurate on crowdsourced Mapillary imagery than with high-end street level imagery obtained by Google Street View.\n
\n\n\n
\n\n\n
\n\n\n\n\n\n