var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/show?bib=http://dmery.sitios.ing.uc.cl/Prints/Mery_Domingo.bib&jsonp=1&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=http://dmery.sitios.ing.uc.cl/Prints/Mery_Domingo.bib&jsonp=1\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/show?bib=http://dmery.sitios.ing.uc.cl/Prints/Mery_Domingo.bib&jsonp=1\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2020\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Computer Vision for X-Ray Testing.\n \n \n \n\n\n \n Mery, D.; and Pieringer, C.\n\n\n \n\n\n\n Springer, Second edition, 2020.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@book{Mery2020:SpringerBook,\nauthor = {D. Mery and C. Pieringer},\ntitle = {{Computer Vision for X-Ray Testing}},\npublisher = {Springer},\nedition = {Second},\nyear = {2020},\nabstract = {This accessible textbook presents an introduction to computer vision algorithms for industrially-relevant applications of X-ray testing. Covering complex topics in an easy-to-understand way, without requiring any prior knowledge in the field, the book provides a concise review of the key methodologies in computer vision (including deep learning) for solving important problems in industrial radiology. The theoretical coverage is supported by numerous examples, each of which can be tested and evaluated by the reader using a freely-available Python Library and X-ray image database.}\n}\n\n\n
\n
\n\n\n
\n This accessible textbook presents an introduction to computer vision algorithms for industrially-relevant applications of X-ray testing. Covering complex topics in an easy-to-understand way, without requiring any prior knowledge in the field, the book provides a concise review of the key methodologies in computer vision (including deep learning) for solving important problems in industrial radiology. The theoretical coverage is supported by numerous examples, each of which can be tested and evaluated by the reader using a freely-available Python Library and X-ray image database.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Aluminum Casting Inspection using Deep Learning: A method based on Convolutional Neural Networks.\n \n \n \n \n\n\n \n Mery, D.\n\n\n \n\n\n\n Journal of Nondestructive Evaluation. 2020.\n (accepted in Jan. 2020)\n\n\n\n
\n\n\n\n \n \n \"AluminumPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery2020:JNDE-Castings,\nauthor="Mery, Domingo",\ntitle="Aluminum Casting Inspection using Deep Learning: A method based on Convolutional Neural Networks",\njournal="Journal of Nondestructive Evaluation",\nyear="2020",\nnote = "(accepted in Jan. 2020)",\nabstract="In the last years, many computer vision algorithms have been proposed for baggage inspection using X-ray images. In these approaches, the idea is to detect automatically threat objects. In baggage inspection, however, a single view is insufficient because there could be occluded parts or intricate projections that cannot be observed with a single view. In order to avoid a misinterpretation based on a single view, we propose the use of mono-energetic multiple X-ray views. Our approach computes a 3D reconstruction using Space Carving, a method that reconstructs a 3D object from its 2D silhouettes (that have been segmented using Geodesic Active Contours). The detection is performed by analyzing 3D features (obtained from the 3D reconstruction). Instead of dual energy, that is typically used in baggage inspection to analyze the material of the reconstructed objects, we propose to simply use mono-energy for the detection of threat objects that can be recognized by analyzing the shape, such as handguns. The approach has been successfully tested on X-ray images of travel-bags that contain handguns. In the evaluation of our method we have used sequences of x-ray images for the 3D reconstruction of objects inside travel-bags, where each sequence consists of 90 x-ray images. we obtained 0.964 in both precision and recall. We strongly believe that it is possible to design an automated aid for the human inspection task using these computer vision algorithms.",\nissn="1573-4862",\nurl = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2020-JNDE-Castings.pdf},\ndoi = {10.1007/s10921-020-0655-9}\n}\n\n
\n
\n\n\n
\n In the last years, many computer vision algorithms have been proposed for baggage inspection using X-ray images. In these approaches, the idea is to detect automatically threat objects. In baggage inspection, however, a single view is insufficient because there could be occluded parts or intricate projections that cannot be observed with a single view. In order to avoid a misinterpretation based on a single view, we propose the use of mono-energetic multiple X-ray views. Our approach computes a 3D reconstruction using Space Carving, a method that reconstructs a 3D object from its 2D silhouettes (that have been segmented using Geodesic Active Contours). The detection is performed by analyzing 3D features (obtained from the 3D reconstruction). Instead of dual energy, that is typically used in baggage inspection to analyze the material of the reconstructed objects, we propose to simply use mono-energy for the detection of threat objects that can be recognized by analyzing the shape, such as handguns. The approach has been successfully tested on X-ray images of travel-bags that contain handguns. In the evaluation of our method we have used sequences of x-ray images for the 3D reconstruction of objects inside travel-bags, where each sequence consists of 90 x-ray images. we obtained 0.964 in both precision and recall. We strongly believe that it is possible to design an automated aid for the human inspection task using these computer vision algorithms.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2019\n \n \n (13)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Characterization of spinal cord damage based on automatic video analysis of froglet swimming.\n \n \n \n \n\n\n \n Vidts, S. D.; Mendez-Olivos, E.; Palacios, M.; Larrain, J.; and Mery, D.\n\n\n \n\n\n\n Biology Open, 2019(8): 1-10. 2019.\n \n\n\n\n
\n\n\n\n \n \n \"CharacterizationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery2019:BIOLOPEN,\nauthor={S. De Vidts and E.E. Mendez-Olivos and M. Palacios and J. Larrain and D. Mery},\ntitle={Characterization of spinal cord damage based on automatic video analysis of froglet swimming},\njournal={Biology Open},\nyear=2019,\ndoi = {10.1242/bio.042960},\nvolume = 2019,\nnumber = 8,\npages = {1-10},\nabstract={Xenopus laevis frogs are a widely used organism to study aspects of modern biology. Its central nervous system is particularly interesting, because in certain stages of metamorphosis the spinal cord can regenerate after injury and recover swimming. With this in mind, automatic gait analysis could help evaluate the regenerative performance by means of a method that automatically and quantitatively establishes the degree in froglets limb movement. Here, we present an algorithm that characterizes spinal cord damage in froglets. The proposed method tracks the position of the limbs throughout videos and extracts kinematic features, which posteriorly serve to differentiate froglets with different levels of damage to the spinal cord. The detection algorithm and kinematic features chosen were validated in a pattern recognition experiment in which 90 videos (divided equally in three classes: uninjured, hemisected and transected) were classified. We conclude that our system is effective in the characterization of damage to the spinal cord through video analysis of a swimming froglet with a 97\\% accuracy. These results potentially validate this methodology to automatically compare the recovery of spinal cord function after different treatments without the need to manually process videos. In addition, the procedure could be used to measure the kinematics and behavioral response of froglets to different experimental conditions such as nutritional state, stress, genetic background and age.},\nurl = {https://bio.biologists.org/content/8/12/bio042960}\n}\n\n\n
\n
\n\n\n
\n Xenopus laevis frogs are a widely used organism to study aspects of modern biology. Its central nervous system is particularly interesting, because in certain stages of metamorphosis the spinal cord can regenerate after injury and recover swimming. With this in mind, automatic gait analysis could help evaluate the regenerative performance by means of a method that automatically and quantitatively establishes the degree in froglets limb movement. Here, we present an algorithm that characterizes spinal cord damage in froglets. The proposed method tracks the position of the limbs throughout videos and extracts kinematic features, which posteriorly serve to differentiate froglets with different levels of damage to the spinal cord. The detection algorithm and kinematic features chosen were validated in a pattern recognition experiment in which 90 videos (divided equally in three classes: uninjured, hemisected and transected) were classified. We conclude that our system is effective in the characterization of damage to the spinal cord through video analysis of a swimming froglet with a 97% accuracy. These results potentially validate this methodology to automatically compare the recovery of spinal cord function after different treatments without the need to manually process videos. In addition, the procedure could be used to measure the kinematics and behavioral response of froglets to different experimental conditions such as nutritional state, stress, genetic background and age.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Reconocimiento Facial: es bueno malo?.\n \n \n \n \n\n\n \n Mery, D.\n\n\n \n\n\n\n In La Tercera (06/09/19). 2019.\n \n\n\n\n
\n\n\n\n \n \n \"ReconocimientoPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{Mery2019:LaTercera,\n\tAuthor = {Mery, D.},\n\tTitle = {Reconocimiento Facial: es bueno malo?},\n  booktitle ={La Tercera (06/09/19)},\n\turl = {https://www.latercera.com/opinion/noticia/reconocimiento-facial-bueno-malo/},\n\tYear = {2019}}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Face Analysis: State of the Art and Ethical Challenges.\n \n \n \n \n\n\n \n Mery, D.\n\n\n \n\n\n\n In Proceedings of the Pacific Rim Symposium on Image and Video Technology (PSIVT 2019): Workshop Vision-Tech, 2019. \n \n\n\n\n
\n\n\n\n \n \n \"FacePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{Mery2019:PSIVT, \nauthor={Mery, D.}, \nbooktitle={Proceedings of the Pacific Rim Symposium on Image and Video Technology (PSIVT 2019): Workshop Vision-Tech}, \ntitle={Face Analysis: State of the Art and Ethical Challenges}, \nyear={2019},\nurl = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2019-PSIVT.pdf},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Robust Face Recognition System for One Sample Problem .\n \n \n \n \n\n\n \n Meena, M.; Singh, P.; Rana, A.; Mery, D.; and Prasad, M.\n\n\n \n\n\n\n In Proceedings of the Pacific Rim Symposium on Image and Video Technology (PSIVT 2019), 2019. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{Mery2019:PSIVT-FR, \nauthor={Meena, Mahendra and Singh, Priti  and Rana, Ajay and Mery, Domingo and Prasad, Mukesh\n}, \nbooktitle={Proceedings of the Pacific Rim Symposium on Image and Video Technology (PSIVT 2019)}, \ntitle={A Robust Face Recognition System for One Sample Problem }, \nyear={2019},\nurl = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2019-PSIVT-FR.pdf},\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Prostate Cancer Classification based on Best First Search and Taguchi Feature Selection Method .\n \n \n \n \n\n\n \n Rahman, M. A.; Singh, P.; Chandren Muniyandi, R.; Mery, D.; and Prasad, M.\n\n\n \n\n\n\n In Proceedings of the Pacific Rim Symposium on Image and Video Technology (PSIVT 2019), 2019. \n \n\n\n\n
\n\n\n\n \n \n \"ProstatePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{Mery2019:PSIVT-ProstateCancer, \nauthor={Rahman, Md Akizur and  Singh, Priyanka and Chandren Muniyandi, Ravie  and Mery, Domingo and Prasad, Mukesh}, \nbooktitle={Proceedings of the Pacific Rim Symposium on Image and Video Technology (PSIVT 2019)}, \ntitle={Prostate Cancer Classification based on Best First Search and Taguchi Feature Selection Method }, \nyear={2019},\nurl = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2019-PSIVT-ProstateCancer.pdf},\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Handgun Detection in Single-spectrum Multiple X-ray Views Based on 3D Object Recognition.\n \n \n \n\n\n \n \n\n\n \n\n\n\n Journal of Nondestructive Evaluation, 38(66): 1–11. 2019.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Digital rock approach to model the permeability in an artificially heated and fractured granodiorite from the Liquine Geothermal System (39S).\n \n \n \n \n\n\n \n Molina, E.; Arancibia, G.; Sepulveda, J.; Roquer, T.; Mery, D.; and Morata, D.\n\n\n \n\n\n\n Rock Mechanics and Rock Engineering,1-26. 2019.\n \n\n\n\n
\n\n\n\n \n \n \"DigitalPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery2019:RMRE,\nauthor={E. Molina and G. Arancibia and J. Sepulveda and T.  Roquer and D. Mery and D. Morata},\ntitle={Digital rock approach to model the permeability in an artificially heated and fractured granodiorite from the Liquine Geothermal System (39S)},\njournal={Rock Mechanics and Rock Engineering},\nyear=2019,\npages = {1-26},\nabstract={The Southern Volcanic Zone of the Andes has a high potential in terms of geothermal resources and is an exceptional and poorly explored natural laboratory to study the interplay between tectonic stresses, thermal damage, low-permeable crystalline rocks, and fluid flow. Permeability is mostly related to the damage zones associated with the faults controlling regional tectonics, namely, the Liquine-Ofqui Fault System and Andean Transverse Faults. This research presents a laboratory approach comprising a characterization of the analogue host rock from a shallow, low-to-medium temperature geothermal system surrounding the Liquiñe area in Southern Chile (39°S) to better constrain intrinsic and extrinsic factors which allow permeable pathways to exist. We analyse the effect of thermal stress at 25, 150, and 210 °C in a granodiorite, measuring some petrophysical properties before and after applying thermal damage, and then loaded the samples until failure. We also compared petrophysical properties with the fracture network characterization using X-ray microcomputed tomography imaging, segmentation, and fluid flow computational simulations. The results show that thermal stress produces intercrystalline microcracks, which result in: (1) an increase in capillary absorption; (2) a decrease in ultrasonic wave velocities; (3) a decrease in compressive strength; (4) a decrease in fracture aperture, and (5) fluid flow simulations indicate that permeability is similar at different temperatures. We conclude that for the granodiorite host rock of the Liquiñe geothermal system, the combined effect of thermal stress, even at low temperature, may constitute an effective mechanism for sustaining permeability at shallowest depths.},\nurl = {https://doi.org/10.1007/s00603-019-01967-6}\n}\n\n\n\n\n\n
\n
\n\n\n
\n The Southern Volcanic Zone of the Andes has a high potential in terms of geothermal resources and is an exceptional and poorly explored natural laboratory to study the interplay between tectonic stresses, thermal damage, low-permeable crystalline rocks, and fluid flow. Permeability is mostly related to the damage zones associated with the faults controlling regional tectonics, namely, the Liquine-Ofqui Fault System and Andean Transverse Faults. This research presents a laboratory approach comprising a characterization of the analogue host rock from a shallow, low-to-medium temperature geothermal system surrounding the Liquiñe area in Southern Chile (39°S) to better constrain intrinsic and extrinsic factors which allow permeable pathways to exist. We analyse the effect of thermal stress at 25, 150, and 210 °C in a granodiorite, measuring some petrophysical properties before and after applying thermal damage, and then loaded the samples until failure. We also compared petrophysical properties with the fracture network characterization using X-ray microcomputed tomography imaging, segmentation, and fluid flow computational simulations. The results show that thermal stress produces intercrystalline microcracks, which result in: (1) an increase in capillary absorption; (2) a decrease in ultrasonic wave velocities; (3) a decrease in compressive strength; (4) a decrease in fracture aperture, and (5) fluid flow simulations indicate that permeability is similar at different temperatures. We conclude that for the granodiorite host rock of the Liquiñe geothermal system, the combined effect of thermal stress, even at low temperature, may constitute an effective mechanism for sustaining permeability at shallowest depths.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n One-dimensional local binary pattern based color descriptor to classify stress values from photoelasticity videos.\n \n \n \n\n\n \n Brinez de Leon, J.; Restrepo, A.; Branch, J.; and Mery, D.\n\n\n \n\n\n\n In SPIE Conference Optics and Photonics for Information Processing XIII, 2019. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{Mery2019:SPIE, \nauthor={Brinez de Leon, J.C. and Restrepo, A. and Branch, J.W. and Mery, D.}, \nbooktitle={SPIE Conference Optics and Photonics for Information Processing XIII},\ntitle={ One-dimensional local binary pattern based color descriptor to classify stress values from photoelasticity videos}, \nyear={2019}}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Face Recognition in Low-Quality Images using Adaptive Sparse Representations.\n \n \n \n\n\n \n \n\n\n \n\n\n\n Image and Vision Computing,46-58. 2019.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On Low Resolution Face Recognition in the Wild: Comparisons and New Techniques.\n \n \n \n \n\n\n \n Li, P.; Prieto, L.; Mery, D.; and Flynn, P.\n\n\n \n\n\n\n IEEE Transactions on Information, Forensics and Security. 2019.\n (accepted in Dec. 2018)\n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery2019:IEEE-TIFS,\n  title={On Low Resolution Face Recognition in the Wild: Comparisons and New Techniques},\nauthor={Li, Pei and Prieto, Loreto and Mery, Domingo and Flynn, Patrick}, \njournal = {IEEE Transactions on Information, Forensics and Security},\n  year = 2019,\n  note = {(accepted in Dec. 2018)},\nurl = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2019-TIFS-WildFR.pdf}}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Student Attendance System in Crowded Classrooms using a Smartphone Camera.\n \n \n \n \n\n\n \n Mery, D.; Mackenney, I.; and Villalobos, E.\n\n\n \n\n\n\n In 2019 IEEE Winter Conference on Applications of Computer Vision (WACV2019), 2019. \n \n\n\n\n
\n\n\n\n \n \n \"StudentPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{Mery2019:WACV, \nauthor={Mery, D. and Mackenney, I. and Villalobos, E.}, \nbooktitle={2019 IEEE Winter Conference on Applications of Computer Vision (WACV2019)},\ntitle={Student Attendance System in Crowded Classrooms using a Smartphone Camera}, \nyear={2019},\nabstract = {To follow the attendance of students is a major concern in many educational institutions. The manual management of the attendance sheets is laborious for crowded classrooms. In this paper, we propose and evaluate a general methodology for the automated student attendance system that can be used in crowded classrooms, in which the session images are taken by a smartphone camera. We release a realistic full-annotated dataset of images of a classroom with around 70 students in 25 sessions, taken during 15 weeks. Ten face recognition algorithms based on learned and handcrafted features are evaluated using a protocol that takes into account the number of face images per subject used in the gallery. In our experiments, the best one has been FaceNet, a method based on deep learning features, achieving around 95\\% of accuracy with only one enrollment image per subject. We believe that our automated student attendance system based on face recognition can be used to save time for both teacher and students and to prevent fake attendance.},\nurl = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2019-WACV.pdf},\n}\n\n
\n
\n\n\n
\n To follow the attendance of students is a major concern in many educational institutions. The manual management of the attendance sheets is laborious for crowded classrooms. In this paper, we propose and evaluate a general methodology for the automated student attendance system that can be used in crowded classrooms, in which the session images are taken by a smartphone camera. We release a realistic full-annotated dataset of images of a classroom with around 70 students in 25 sessions, taken during 15 weeks. Ten face recognition algorithms based on learned and handcrafted features are evaluated using a protocol that takes into account the number of face images per subject used in the gallery. In our experiments, the best one has been FaceNet, a method based on deep learning features, achieving around 95% of accuracy with only one enrollment image per subject. We believe that our automated student attendance system based on face recognition can be used to save time for both teacher and students and to prevent fake attendance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Palaeopermeability anisotropy and geometrical properties of sealed-microfractures from micro-CT analyses: An open-source implementation.\n \n \n \n \n\n\n \n Gomila, R.; Bracke, R.; Arancibia, G.; Mery, D.; Morata, D.; and Nehler, M.\n\n\n \n\n\n\n Micron, 117(2019): 29-39. 2019.\n \n\n\n\n
\n\n\n\n \n \n \"PalaeopermeabilityPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery2018:Micron,\ntitle={Palaeopermeability anisotropy and geometrical properties of sealed-microfractures from micro-CT analyses: An open-source implementation},\nauthor={Gomila, R. and Bracke, R. and Arancibia, G. and Mery, D. and Morata, D. and Nehler, M.},\nyear = 2019,\njournal = {Micron},\npages = {29-39},\nvolume = 117,\nnumber = 2019,\nabstract = {Fault zone permeability and the real 3D-spatial distribution of the fault-related fracture networks are critical in the assessment of fault zones behavior for fluids. The study of the real 3D-spatial distribution of the microfracture network, using X-ray micro-computed tomography, is a crucial factor to unravel the real structural permeability conditions of a fault-zone. Despite the availability of several commercial software for rock properties estimation from X-ray micro-computed tomography scanning, their high cost and lack of programmability encourage the use of open-source data treatment. This work presents the implementation of a methodology flow for the quantification of both structural and geometrical parameters (fractures density, fractures aperture, fractures porosity, and fractures surface area), and the modeling of palaeopermeability of fault-related fractured samples, with focus in the proper spatial orientation of both the sample and the results. This is performed with an easy to follow step-by-step implementation, by a combination of open-source software, newly implemented codes, and numerical methods. This approach keeps track of the sample?s spatial orientation from the physical to the virtual world, thus assessing any fault-related palaeopermeability anisotropy.},\nurl = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2019-Micron.pdf}}\n\n
\n
\n\n\n
\n Fault zone permeability and the real 3D-spatial distribution of the fault-related fracture networks are critical in the assessment of fault zones behavior for fluids. The study of the real 3D-spatial distribution of the microfracture network, using X-ray micro-computed tomography, is a crucial factor to unravel the real structural permeability conditions of a fault-zone. Despite the availability of several commercial software for rock properties estimation from X-ray micro-computed tomography scanning, their high cost and lack of programmability encourage the use of open-source data treatment. This work presents the implementation of a methodology flow for the quantification of both structural and geometrical parameters (fractures density, fractures aperture, fractures porosity, and fractures surface area), and the modeling of palaeopermeability of fault-related fractured samples, with focus in the proper spatial orientation of both the sample and the results. This is performed with an easy to follow step-by-step implementation, by a combination of open-source software, newly implemented codes, and numerical methods. This approach keeps track of the sample?s spatial orientation from the physical to the virtual world, thus assessing any fault-related palaeopermeability anisotropy.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Detecting and characterizing upwelling filaments in a numerical ocean model.\n \n \n \n \n\n\n \n Artal, O.; Sepúlveda, H.; Mery, D.; and Pieringer, C.\n\n\n \n\n\n\n Computers and Geosciences, 2019(122): 25-34. 2019.\n \n\n\n\n
\n\n\n\n \n \n \"DetectingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery2019:CAGEO,\n  title={Detecting and characterizing upwelling filaments in a numerical ocean model},\nauthor={Artal, O. and Sep\\'ulveda, H.H. and Mery, D. and Pieringer, C.},\nyear = 2019,\njournal = {Computers and Geosciences},\npages = {25-34},\nvolume = 2019,\nnumber = 122,\nabstract = {Upwelling filaments are long (approx. 100's km) narrow (approx.  10 km) structures in the coastal ocean. They export nutrients and prevent the movement of larvae along the coast. Filaments can be observed in satellite images and in numerical models, but their manual identification and characterization are complex and time-consuming. Here we present a Matlab code for a manual method to assist experts in this task, and a code for an automatic filament detection method (AFD) based on image processing and pattern recognition to identify and extract features in output files from a numerical ocean model. AFD was tested with a simulation of northern Chile. AFD had a similar performance in filament detection to that of human experts. AFD provides substantial time savings when analyzing a large number of images from a numerical ocean model. AFD is open source and freely available.},\nurl = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2019-CAGEO.pdf}}\n\n\n\n
\n
\n\n\n
\n Upwelling filaments are long (approx. 100's km) narrow (approx. 10 km) structures in the coastal ocean. They export nutrients and prevent the movement of larvae along the coast. Filaments can be observed in satellite images and in numerical models, but their manual identification and characterization are complex and time-consuming. Here we present a Matlab code for a manual method to assist experts in this task, and a code for an automatic filament detection method (AFD) based on image processing and pattern recognition to identify and extract features in output files from a numerical ocean model. AFD was tested with a simulation of northern Chile. AFD had a similar performance in filament detection to that of human experts. AFD provides substantial time savings when analyzing a large number of images from a numerical ocean model. AFD is open source and freely available.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2018\n \n \n (8)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n 140000157-23.\n \n \n \n \n\n\n \n Mery, D.\n\n\n \n\n\n\n Computer Vision for Fault Detection in Aluminum Castings, pages 332-377. CRC Press, 2018.\n \n\n\n\n
\n\n\n\n \n \n \"ComputerPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inbook{Mery2018-EAIA,\n author = "Domingo Mery",\n title = "Computer Vision for Fault Detection in Aluminum Castings",\n chapter = {140000157-23},\n volume = ,\n booktitle = "Encyclopedia of Aluminum and Its Alloys",\n publisher = "CRC Press",\n year = 2018,\n pages = {332-377},\n doi = {10.1201/9781351045636-140000157},\n url = {http://dmery.sitios.ing.uc.cl/Prints/Chapters/2018-EAIA-Castings.pdf}\n }\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Science and technology for Chile's future.\n \n \n \n \n\n\n \n Mery, D.; and Kuzmicic, J.\n\n\n \n\n\n\n Journal I3, School of Engineering, 2018(10): 2-3. 2018.\n \n\n\n\n
\n\n\n\n \n \n \"SciencePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery2018:I3-Editorial,\n  title={Science and technology for Chile's future},\nauthor={Mery, D. and Kuzmicic, J.},\nyear = 2018,\njournal = {Journal I3, School of Engineering},\nvolume={2018},\nnumber={10},\npages={2-3},\nurl = {http://dmery.sitios.ing.uc.cl/Prints/Other-Journals/2018-JournalI3-Editorial.pdf}}\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Augmenting intensity change to recognize faces in unrecognizable images.\n \n \n \n \n\n\n \n Fontaine, A.; and Mery, D.\n\n\n \n\n\n\n Journal I3, School of Engineering, 2018(10): 46-57. 2018.\n \n\n\n\n
\n\n\n\n \n \n \"AugmentingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery2018:I3-AFontaine,\n  title={Augmenting intensity change to recognize faces in unrecognizable images},\nauthor={Fontaine, A. and Mery, D.},\nyear = 2018,\njournal = {Journal I3, School of Engineering},\nvolume={2018},\nnumber={10},\npages={46-57},\nurl = {http://dmery.sitios.ing.uc.cl/Prints/Other-Journals/2018-JournalI3-AFontaine.pdf}}\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Face Recognition in Low Quality Images: A Survey.\n \n \n \n \n\n\n \n Li, P.; Prieto, L.; Mery, D.; and Flynn, P.\n\n\n \n\n\n\n arXiv preprint arXiv:1805.11519. 2018.\n \n\n\n\n
\n\n\n\n \n \n \"FacePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery2018:arXiv_c,\n  title={Face Recognition in Low Quality Images: A Survey},\n  author={Li, Pei and Prieto, Loreto and Mery, Domingo and Flynn, Patrick},\n  journal={arXiv preprint arXiv:1805.11519},\n  url = {https://arxiv.org/pdf/1805.11519},\n  year={2018},\n  abstract = {Low-resolution face recognition (LRFR) has received increasing attention over the past few years. Its applications lie widely in the real-world environment when high-resolution or high-quality images are hard to capture. One of the biggest demands for LRFR technologies is video surveillance. As the the number of surveillance cameras in the city increases, the videos that captured will need to be processed automatically. However, those videos or images are usually captured with large standoffs, arbitrary illumination condition, and diverse angles of view. Faces in these images are generally small in size. Several studies addressed this problem employed techniques like super resolution, deblurring, or learning a relationship between different resolution domains. In this paper, we provide a comprehensive review of approaches to low-resolution face recognition in the past five years. First, a general problem definition is given. Later, systematically analysis of the works on this topic is presented by catogory. In addition to describing the methods, we also focus on datasets and experiment settings. We further address the related works on unconstrained low-resolution face recognition and compare them with the result that use synthetic low-resolution data. Finally, we summarized the general limitations and speculate a priorities for the future effort.}\n}\n\n\n
\n
\n\n\n
\n Low-resolution face recognition (LRFR) has received increasing attention over the past few years. Its applications lie widely in the real-world environment when high-resolution or high-quality images are hard to capture. One of the biggest demands for LRFR technologies is video surveillance. As the the number of surveillance cameras in the city increases, the videos that captured will need to be processed automatically. However, those videos or images are usually captured with large standoffs, arbitrary illumination condition, and diverse angles of view. Faces in these images are generally small in size. Several studies addressed this problem employed techniques like super resolution, deblurring, or learning a relationship between different resolution domains. In this paper, we provide a comprehensive review of approaches to low-resolution face recognition in the past five years. First, a general problem definition is given. Later, systematically analysis of the works on this topic is presented by catogory. In addition to describing the methods, we also focus on datasets and experiment settings. We further address the related works on unconstrained low-resolution face recognition and compare them with the result that use synthetic low-resolution data. Finally, we summarized the general limitations and speculate a priorities for the future effort.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Low Resolution Face Recognition in the Wild.\n \n \n \n \n\n\n \n Li, P.; Prieto, L.; Mery, D.; and Flynn, P.\n\n\n \n\n\n\n arXiv preprint arXiv:1805.11529. 2018.\n \n\n\n\n
\n\n\n\n \n \n \"LowPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery2018:arXiv_b,\n  title={Low Resolution Face Recognition in the Wild},\n  author={Li, Pei and Prieto, Loreto and Mery, Domingo and Flynn, Patrick},\n  journal={arXiv preprint arXiv:1805.11529},\n  year={2018},\n  url = {https://arxiv.org/pdf/1805.11529},\n  abstract = {Although face recognition systems have achieved impressive performance in recent years, the low-resolution face recognition (LRFR) task remains challenging, especially when the LR faces are captured under non-ideal conditions, as is common in surveillance-based applications. Faces captured in such conditions are often contaminated by blur, nonuniform lighting, and nonfrontal face pose. In this paper, we analyze face recognition techniques using data captured under low-quality conditions in the wild. We provide a comprehensive analysis of experimental results for two of the most important applications in real surveillance applications, and demonstrate practical approaches to handle both cases that show promising performance. The following three contributions are made: {\\em (i)} we conduct experiments to evaluate super-resolution methods for low-resolution face recognition; {\\em (ii)} we study face re-identification on various public face datasets including real surveillance and low-resolution subsets of large-scale datasets, present a baseline result for several deep learning based approaches, and improve them by introducing a GAN pre-training approach and fully convolutional architecture; and {\\em (iii)} we explore low-resolution face identification by employing a state-of-the-art supervised discriminative learning approach. Evaluations are conducted on challenging portions of the SCFace and UCCSface datasets.}\n}\n\n\n
\n
\n\n\n
\n Although face recognition systems have achieved impressive performance in recent years, the low-resolution face recognition (LRFR) task remains challenging, especially when the LR faces are captured under non-ideal conditions, as is common in surveillance-based applications. Faces captured in such conditions are often contaminated by blur, nonuniform lighting, and nonfrontal face pose. In this paper, we analyze face recognition techniques using data captured under low-quality conditions in the wild. We provide a comprehensive analysis of experimental results for two of the most important applications in real surveillance applications, and demonstrate practical approaches to handle both cases that show promising performance. The following three contributions are made: \\em (i) we conduct experiments to evaluate super-resolution methods for low-resolution face recognition; \\em (ii) we study face re-identification on various public face datasets including real surveillance and low-resolution subsets of large-scale datasets, present a baseline result for several deep learning based approaches, and improve them by introducing a GAN pre-training approach and fully convolutional architecture; and \\em (iii) we explore low-resolution face identification by employing a state-of-the-art supervised discriminative learning approach. Evaluations are conducted on challenging portions of the SCFace and UCCSface datasets.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Seed-Point Detection of Clumped Convex Objects by Short-Range Attractive Long-Range Repulsive Particle Clustering.\n \n \n \n \n\n\n \n Kapaldo, J.; Han, X.; and Mery, D.\n\n\n \n\n\n\n arXiv preprint arXiv:1804.04071. 2018.\n \n\n\n\n
\n\n\n\n \n \n \"Seed-PointPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery2018:arXiv_a,\n  title={Seed-Point Detection of Clumped Convex Objects by Short-Range Attractive Long-Range Repulsive Particle Clustering},\n  author={Kapaldo, James and Han, Xu and Mery, Domingo},\n  journal={arXiv preprint arXiv:1804.04071},\n  year={2018},\n  url = {https://arxiv.org/pdf/1804.04071},\n  abstract = {Locating the center of convex objects is important in both image processing and unsupervised machine learning/data clustering fields. The automated analysis of biological images uses both of these fields for locating cell nuclei and for discovering new biological effects or cell phenotypes. In this work, we develop a novel clustering method for locating the centers of overlapping convex objects by modeling particles that interact by a short-range attractive and long-range repulsive potential and are confined to a potential well created from the data. We apply this method to locating the centers of clumped nuclei in cultured cells, where we show that it results in a significant improvement over existing methods (8.2\\% in F1 score); and we apply it to unsupervised learning on a difficult data set that has rare classes without local density maxima, and show it is able to well locate cluster centers when other clustering techniques fail.}\n}\n\n\n\n
\n
\n\n\n
\n Locating the center of convex objects is important in both image processing and unsupervised machine learning/data clustering fields. The automated analysis of biological images uses both of these fields for locating cell nuclei and for discovering new biological effects or cell phenotypes. In this work, we develop a novel clustering method for locating the centers of overlapping convex objects by modeling particles that interact by a short-range attractive and long-range repulsive potential and are confined to a potential well created from the data. We apply this method to locating the centers of clumped nuclei in cultured cells, where we show that it results in a significant improvement over existing methods (8.2% in F1 score); and we apply it to unsupervised learning on a difficult data set that has rare classes without local density maxima, and show it is able to well locate cluster centers when other clustering techniques fail.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Recognition of Faces and Facial Attributes using Accumulative Local Sparse Representations.\n \n \n \n \n\n\n \n Mery, D.; and Banerjee, S.\n\n\n \n\n\n\n In International Conference on Acoustics, Speech, and Signal Processing (ICASSP 2018), 2018. \n Calgary, Canada, 15-20/Apr.\n\n\n\n
\n\n\n\n \n \n \"RecognitionPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{Mery2018:ICASSP, \nauthor={Mery, D. and Banerjee, S.},\nbooktitle={International Conference on Acoustics, Speech, and Signal Processing (ICASSP 2018)},\ntitle={Recognition of Faces and Facial Attributes using Accumulative Local Sparse Representations}, \nnote = {Calgary, Canada, 15-20/Apr.},\nyear={2018},\nurl = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2018-ICASSP.pdf},\nabstract = {This paper addresses the problem of automated recognition of faces and facial attributes by proposing a new general approach called Accumulative Local Sparse Representation (ALSR). In the learning stage, we build a general dictionary of patches that are extracted from face images in a dense manner on a grid. In the testing stage, patches of the query image are sparsely represented using a {\\em local dictionary}. This dictionary contains similar atoms of the general dictionary that are spatially in the same neighborhood. If the sparsity concentration index of the query patch is high enough, we build a descriptor by using a sum-pooling operator that evaluates the contribution provided by the atoms of each class. The classification is performed by maximizing the sum of the descriptors of all selected patches. ALSR can learn a model for each recognition task dealing with more variability in ambient lighting, pose, expression, occlusion, face size, etc. Experiments on three popular face databases (LFW for faces, AR for gender and Oulu-CASIA for expressions), show that ALSR outperforms representative methods in the literature, when a huge number of training images is not available.}\n}\n\n
\n
\n\n\n
\n This paper addresses the problem of automated recognition of faces and facial attributes by proposing a new general approach called Accumulative Local Sparse Representation (ALSR). In the learning stage, we build a general dictionary of patches that are extracted from face images in a dense manner on a grid. In the testing stage, patches of the query image are sparsely represented using a \\em local dictionary. This dictionary contains similar atoms of the general dictionary that are spatially in the same neighborhood. If the sparsity concentration index of the query patch is high enough, we build a descriptor by using a sum-pooling operator that evaluates the contribution provided by the atoms of each class. The classification is performed by maximizing the sum of the descriptors of all selected patches. ALSR can learn a model for each recognition task dealing with more variability in ambient lighting, pose, expression, occlusion, face size, etc. Experiments on three popular face databases (LFW for faces, AR for gender and Oulu-CASIA for expressions), show that ALSR outperforms representative methods in the literature, when a huge number of training images is not available.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n On-line Reinforcement Learning Detection System.\n \n \n \n\n\n \n Prasad, M.; Sundaram, S.; and Mery, D.\n\n\n \n\n\n\n In 3rd INNS Conference on Big Data and Deep Learning (INNS BDDL 2018), 2018. \n Sanur, Bali, Indonesia, 17-19/Apr.\n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{Mery2018:INNS-BDDL, \nauthor={Prasad, M. and Sundaram, S. and Mery, D.},\nbooktitle={3rd INNS Conference on Big Data and Deep Learning (INNS BDDL 2018)},\ntitle={On-line Reinforcement Learning Detection System}, \nnote = {Sanur, Bali, Indonesia, 17-19/Apr.},\nyear={2018}\n}\n\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2017\n \n \n (15)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Women Engineers to the Power.\n \n \n \n \n\n\n \n Mery, D.; and Kuzmicic, J.\n\n\n \n\n\n\n Journal I3, School of Engineering, 2017(9): 2-3. 2017.\n \n\n\n\n
\n\n\n\n \n \n \"WomenPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery2017:I3-Editorial,\n  title={Women Engineers to the Power},\nauthor={Mery, D. and Kuzmicic, J.},\nyear = 2017,\njournal = {Journal I3, School of Engineering},\nvolume={2017},\nnumber={9},\npages={2-3},\nurl = {http://dmery.sitios.ing.uc.cl/Prints/Other-Journals/2017-JournalI3-Editorial.pdf}}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Method for Automatic Surface Inspection using Models-Based 3D Descriptor.\n \n \n \n \n\n\n \n Madrigal, C.; Branch, J.; Restrepo, A.; and Mery, D.\n\n\n \n\n\n\n Sensors, 17(10): 2262. 2017.\n \n\n\n\n
\n\n\n\n \n \n \"MethodPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery2017:Sensors,\n  title={Method for Automatic Surface Inspection using Models-Based 3D\nDescriptor},\nauthor={Madrigal, C.A. and Branch, J.W. and Restrepo, A. and Mery, D.},\nyear = 2017,\njournal = {Sensors},\nvolume={17},\nnumber={10},\npages={2262},\nurl = {http://www.mdpi.com/1424-8220/17/10/2262/pdf},\ndoi = {10.3390/s17102262},\nabstract = {Automatic visual inspection allows identifying surface defects in manufactured parts. Nevertheless, when defects are on a sub-millimeter scale, its detecting and recognizing is a challenge. In particular, if the defect generates topological deformations that are not shown as a strong contrast in the 2D image. In this paper, we presented a method to recognize surface defects on 3d point clouds. First, we propose a novel 3d local descriptor called MPFH (Model Point Feature Histogram) for defect detection. Our descriptor is inspired from earlier one such as PFH (Point Feature Histogram). To construct the MPFH descriptor, the models that best fit the local surface and their normal vectors are estimated. For each surface model, its contribution weight to the formation of the surface region is calculated and from the relative difference between models of the same region a histogram is generated representing the underlying surface changes. Second, through a classification stage, the points on the surface are labeled in 5 types of primitives and the defect is detected. Third, the connected components of primitives are projected to a plane, forming a 2D image. Finally, 2D geometrical features are extracted and by a support vector machine, the defects are recognized. The database used is composed of 3D simulated surfaces, 3D reconstructions of defects in welding, artificial teeth, indentations in materials, ceramics and 3D models of defects. The quantitative and qualitative results showed that the proposed method of description is robust to noise and to the scale factor and sufficiently discriminative to detect some surface defects. The performance evaluation of the proposed method was performed for a classification task of 3D point cloud in primitives, reporting an accuracy of 95\\% and higher than other state-of-art descriptors. The rate of recognition of defects was close to 94\\%.}\n}\n\n
\n
\n\n\n
\n Automatic visual inspection allows identifying surface defects in manufactured parts. Nevertheless, when defects are on a sub-millimeter scale, its detecting and recognizing is a challenge. In particular, if the defect generates topological deformations that are not shown as a strong contrast in the 2D image. In this paper, we presented a method to recognize surface defects on 3d point clouds. First, we propose a novel 3d local descriptor called MPFH (Model Point Feature Histogram) for defect detection. Our descriptor is inspired from earlier one such as PFH (Point Feature Histogram). To construct the MPFH descriptor, the models that best fit the local surface and their normal vectors are estimated. For each surface model, its contribution weight to the formation of the surface region is calculated and from the relative difference between models of the same region a histogram is generated representing the underlying surface changes. Second, through a classification stage, the points on the surface are labeled in 5 types of primitives and the defect is detected. Third, the connected components of primitives are projected to a plane, forming a 2D image. Finally, 2D geometrical features are extracted and by a support vector machine, the defects are recognized. The database used is composed of 3D simulated surfaces, 3D reconstructions of defects in welding, artificial teeth, indentations in materials, ceramics and 3D models of defects. The quantitative and qualitative results showed that the proposed method of description is robust to noise and to the scale factor and sufficiently discriminative to detect some surface defects. The performance evaluation of the proposed method was performed for a classification task of 3D point cloud in primitives, reporting an accuracy of 95% and higher than other state-of-art descriptors. The rate of recognition of defects was close to 94%.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n MEG source reconstruction method impacts the source-space connectivity estimation: A comparison between minimum-norm solution and beamforming.\n \n \n \n\n\n \n Hincapie, A.; Kujala, J.; Mattout, J.; Pascarella, A.; Daligault, S.; Delpuech, C.; Mery, D.; Cosmelli, D.; and Jerbi, K.\n\n\n \n\n\n\n In International Conference on Basic and Clinical Multimodal Imaging (BaCI2017), 2017. \n Bern, 29/Aug- 02/Sep\n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{Mery2017:BACI, \nauthor={Hincapie, A.S. and Kujala, J. and Mattout, J. and Pascarella, A. and Daligault, S. and Delpuech, C. and Mery, D. and Cosmelli, D. and Jerbi, K.},\nbooktitle={International Conference on Basic and Clinical Multimodal Imaging (BaCI2017)},\ntitle={{MEG} source reconstruction method impacts the source-space connectivity estimation: A comparison between minimum-norm solution and beamforming}, \nnote = {Bern, 29/Aug- 02/Sep},\nyear={2017}\n}\n\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Modeling Search Behaviors during the Acquisition of Expertise in a Sequential Decision-Making Task.\n \n \n \n \n\n\n \n Moenne-Loccoz, C.; Vergara, R.; Lopez, V.; Mery, D.; and Cosmelli, D.\n\n\n \n\n\n\n Frontiers in Computational Neuroscience, 11(80). 2017.\n \n\n\n\n
\n\n\n\n \n \n \"ModelingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery2017:Frontiers,\n  title={Modeling Search Behaviors during the Acquisition of Expertise in a Sequential Decision-Making Task},\nauthor={Moenne-Loccoz, C. and Vergara, R.C. and Lopez, V.  and Mery, D. and Cosmelli, D.},\nyear = 2017,\njournal = {Frontiers in Computational Neuroscience},\nvolume = 11,\nnumber = 80,\nurl = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2017-FNCOM.pdf},\nabstract = {Our daily interaction with the world is plagued of situations in which we develop experience through self-motivated repetition of the same task. In many of these interactions, and especially when dealing with computer and machine interfaces, we must deal with sequences of decisions and actions. For instance, when drawing cash from an ATM machine, choices are presented in a step-by-step fashion and a specific sequence of choices must be performed in order to produce the expected outcome. But, as we become experts in the use of such interfaces, is it possible to identify specific search and learning strategies? And if so, can we use this information to predict future actions? In addition to better understanding the cognitive processes underlying sequential decision making, this could allow building adaptive interfaces that can facilitate interaction at different moments of the learning curve. Here we tackle the question of modeling sequential decision-making behavior in a simple human-computer interface that instantiates a 4-level binary decision tree (BDT) task. We record behavioral data from voluntary participants while they attempt to solve the task. Using a Hidden Markov Model-based approach that capitalizes on the hierarchical structure of behavior, we then model their performance during the interaction. Our results show that partitioning the problem space into a small set of hierarchically related stereotyped strategies can potentially capture a host of individual decision making policies. This allows us to follow how participants learn and develop expertise in the use of the interface. Moreover, using a Mixture of Experts based on these stereotyped strategies, the model is able to predict the behavior of participants that master the task.}\n}\n\n\n\n
\n
\n\n\n
\n Our daily interaction with the world is plagued of situations in which we develop experience through self-motivated repetition of the same task. In many of these interactions, and especially when dealing with computer and machine interfaces, we must deal with sequences of decisions and actions. For instance, when drawing cash from an ATM machine, choices are presented in a step-by-step fashion and a specific sequence of choices must be performed in order to produce the expected outcome. But, as we become experts in the use of such interfaces, is it possible to identify specific search and learning strategies? And if so, can we use this information to predict future actions? In addition to better understanding the cognitive processes underlying sequential decision making, this could allow building adaptive interfaces that can facilitate interaction at different moments of the learning curve. Here we tackle the question of modeling sequential decision-making behavior in a simple human-computer interface that instantiates a 4-level binary decision tree (BDT) task. We record behavioral data from voluntary participants while they attempt to solve the task. Using a Hidden Markov Model-based approach that capitalizes on the hierarchical structure of behavior, we then model their performance during the interaction. Our results show that partitioning the problem space into a small set of hierarchically related stereotyped strategies can potentially capture a host of individual decision making policies. This allows us to follow how participants learn and develop expertise in the use of the interface. Moreover, using a Mixture of Experts based on these stereotyped strategies, the model is able to predict the behavior of participants that master the task.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Comparing Neural and Attractiveness-based Visual Features for Artwork Recommendation.\n \n \n \n \n\n\n \n Dominguez, V.; Messina, P.; Parra, D.; Mery, D.; Trattner, C.; and A., S.\n\n\n \n\n\n\n arXiv preprint arXiv:1706.07515v1. 2017.\n \n\n\n\n
\n\n\n\n \n \n \"ComparingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{RecSys2017:Artwork,\n  title={Comparing Neural and Attractiveness-based Visual Features for Artwork Recommendation},\n  author={Dominguez, V. and Messina, P. and Parra, D. and Mery, D. and Trattner, C. and Soto A.},\n  journal={arXiv preprint  arXiv:1706.07515v1},\n  url = {https://arxiv.org/abs/1706.07515},\n  abstract = {Advances in image processing and computer vision in the latest years have brought about the use of visual features in artwork recommendation. Recent works have shown that visual features obtained from pre-trained deep neural networks (DNNs) perform very well for recommending digital art. Other recent works have shown that explicit visual features (EVF) based on attractiveness can perform well in preference prediction tasks, but no previous work has compared DNN features versus specific attractiveness-based visual features (e.g. brightness, texture) in terms of recommendation performance. In this work, we study and compare the performance of DNN and EVF features for the purpose of physical artwork recommendation using transaction data from UGallery, an online store of physical paintings. In addition, we perform an exploratory analysis to understand if DNN embedded features have some relation with certain EVF. Our results show that DNN features outperform EVF, that certain EVF features are more suited for physical artwork recommendation and, finally, we show evidence that certain neurons in the DNN might be partially encoding visual features such as brightness, providing an opportunity for explaining recommendations based on visual neural models.},\n  year={2017}\n}\n\n\n\n\n\n
\n
\n\n\n
\n Advances in image processing and computer vision in the latest years have brought about the use of visual features in artwork recommendation. Recent works have shown that visual features obtained from pre-trained deep neural networks (DNNs) perform very well for recommending digital art. Other recent works have shown that explicit visual features (EVF) based on attractiveness can perform well in preference prediction tasks, but no previous work has compared DNN features versus specific attractiveness-based visual features (e.g. brightness, texture) in terms of recommendation performance. In this work, we study and compare the performance of DNN and EVF features for the purpose of physical artwork recommendation using transaction data from UGallery, an online store of physical paintings. In addition, we perform an exploratory analysis to understand if DNN embedded features have some relation with certain EVF. Our results show that DNN features outperform EVF, that certain EVF features are more suited for physical artwork recommendation and, finally, we show evidence that certain neurons in the DNN might be partially encoding visual features such as brightness, providing an opportunity for explaining recommendations based on visual neural models.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Learning Face Similarity for Re-identification from Real Surveillance Video: A Deep Metric Solution.\n \n \n \n\n\n \n Li, P.; Flynn, P.; Mery, D.; and Prieto, M.\n\n\n \n\n\n\n In International Joint Conference on Biometrics (IJCB2017), 2017. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{Mery2017:IJCB, \nauthor={Li, P. and Flynn, P. and Mery, D. and Prieto, M.L.}, \nbooktitle={International Joint Conference on Biometrics (IJCB2017)},\ntitle={Learning Face Similarity for Re-identification from Real Surveillance Video: A Deep Metric Solution}, \nyear={2017},\nabstract = {Person re-identification (ReID) is the task of automatically matching persons across surveillance cameras with location or time differences. Nearly all proposed ReID approaches exploit body features. Even if successfully captured in the scene, faces are often assumed to be unhelpful to the ReID process. As cameras and surveillance systems improve, `Facial ReID' approaches deserve attention. The following contributions are made in this work: 1) We describe a high-quality dataset for person re-identification featuring faces. This dataset was collected from a real surveillance network in a municipal rapid transit system, and includes the same people appearing in multiple sites at multiple dimes wearing different attire. 2) We employ new DNN architectures and patch matching techniques to handle face misalignment in quality regimes where landmarking fails. We further boost the performance by adopting the fully convolutional structure and spatial pyramid pooling (SPP).\n}\n}\n\n\n
\n
\n\n\n
\n Person re-identification (ReID) is the task of automatically matching persons across surveillance cameras with location or time differences. Nearly all proposed ReID approaches exploit body features. Even if successfully captured in the scene, faces are often assumed to be unhelpful to the ReID process. As cameras and surveillance systems improve, `Facial ReID' approaches deserve attention. The following contributions are made in this work: 1) We describe a high-quality dataset for person re-identification featuring faces. This dataset was collected from a real surveillance network in a municipal rapid transit system, and includes the same people appearing in multiple sites at multiple dimes wearing different attire. 2) We employ new DNN architectures and patch matching techniques to handle face misalignment in quality regimes where landmarking fails. We further boost the performance by adopting the fully convolutional structure and spatial pyramid pooling (SPP). \n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Biometría vs. Ficción.\n \n \n \n \n\n\n \n Mery, D.\n\n\n \n\n\n\n In Especial de Videovigilancia y Control de Acceso, El Mercurio (23/05/17). 2017.\n \n\n\n\n
\n\n\n\n \n \n \"BiometríaPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{Mery2017:ElMercurio,\n\tAuthor = {Mery, D.},\n\tTitle = {{Biometr\\'ia vs. Ficci\\'on}},\n  booktitle ={ Especial de Videovigilancia y Control de Acceso, El Mercurio (23/05/17)},\n\turl = {http://impresa.elmercurio.com/Pages/NewsDetail.aspx?dt=2017-05-23&PaginaId=4&SupplementId=19&BodyId=17},\n\tYear = {2017}}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Logarithmic X-ray Imaging Model for Baggage Inspection: Simulation and Object Detection.\n \n \n \n \n\n\n \n Mery, D.; and Katsaggelos, A.\n\n\n \n\n\n\n In 13th IEEE CVPR Workshop on Perception Beyond the Visible Spectrum (PBVS 2017), 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 4 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{Mery2017:PBVS, \nauthor={Mery, D. and Katsaggelos, A.K.}, \nbooktitle={13th IEEE CVPR Workshop on Perception Beyond the Visible Spectrum (PBVS 2017)},\ntitle={A Logarithmic X-ray Imaging Model for Baggage Inspection: Simulation and Object Detection}, \nyear={2017},\nabstract = {In the last years, many computer vision algorithms have been developed for X-ray testing tasks. Some of them deal with baggage inspection, in which the aim is to detect automatically target objects. The progress in automated baggage inspection, however, is modest and very limited compared to what is needed because X-ray screening systems are still being manipulated by human inspectors. In this work, we present an X-ray imaging model that can separate foreground from background in baggage screening. The model can be used in two main tasks: i) Simulation of new X-ray images, where simulated images can be used in training programs for human inspectors, or can be used to enhance datasets for computer vision algorithms. ii) Detection of (threat) objects, where new algorithms can be employed to perform automated baggage inspection or to aid an user in the inspection task showing potential threats. In our model, rather than a multiplication of foreground and background, that is typically used in X-ray imaging, we propose the addition of logarithmic images. This allows the use of linear strategies to superimpose images of threat objects onto X-ray images and the use of sparse representations in order segment target objects. In our experiments, we simulate new X-ray images of handguns, shuriken and razor blades, in which it is impossible to distinguish simulated and real X-ray images. In addition, we show in our experiments the effective detection of shuriken, razor blades and handguns using the proposed algorithm.},\nurl = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2017-PBVS.pdf}\n}\n\n\n\n
\n
\n\n\n
\n In the last years, many computer vision algorithms have been developed for X-ray testing tasks. Some of them deal with baggage inspection, in which the aim is to detect automatically target objects. The progress in automated baggage inspection, however, is modest and very limited compared to what is needed because X-ray screening systems are still being manipulated by human inspectors. In this work, we present an X-ray imaging model that can separate foreground from background in baggage screening. The model can be used in two main tasks: i) Simulation of new X-ray images, where simulated images can be used in training programs for human inspectors, or can be used to enhance datasets for computer vision algorithms. ii) Detection of (threat) objects, where new algorithms can be employed to perform automated baggage inspection or to aid an user in the inspection task showing potential threats. In our model, rather than a multiplication of foreground and background, that is typically used in X-ray imaging, we propose the addition of logarithmic images. This allows the use of linear strategies to superimpose images of threat objects onto X-ray images and the use of sparse representations in order segment target objects. In our experiments, we simulate new X-ray images of handguns, shuriken and razor blades, in which it is impossible to distinguish simulated and real X-ray images. In addition, we show in our experiments the effective detection of shuriken, razor blades and handguns using the proposed algorithm.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Threat Objects Detection in X-ray Images Using an Active Vision Approach.\n \n \n \n \n\n\n \n Riffo, V.; Flores, S.; and Mery, D.\n\n\n \n\n\n\n Journal of Nondestructive Evaluation, 36(3): 44. 2017.\n \n\n\n\n
\n\n\n\n \n \n \"ThreatPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery2017:JNDE-Active,\nauthor="Riffo, Vladimir\nand Flores, Sebastian\nand Mery, Domingo",\ntitle="Threat Objects Detection in X-ray Images Using an Active Vision Approach",\njournal="Journal of Nondestructive Evaluation",\nyear="2017",\nvolume="36",\nnumber="3",\npages="44",\nabstract="X-ray testing for baggage inspection has been increasingly used at airports, reducing the risk of terrorist crimes and attacks. Nevertheless, this task is still being carried out by human inspectors and with limited technological support. The technology that is being used is not always effective, as it depends mainly on the position of the object of interest, occlusion, and the accumulated experience of the inspector. Due to this problem, we have developed an approach that inspects X-ray images using active vision in order to automatically detect objects that represent a threat. Our method includes three steps: detection of potential threat objects in single views based on the similarity of features and spatial distribution; estimation of the best-next-view using Q-learning; and elimination of false alarms based on multiple view constraints. We tested our algorithm on X-ray images that included handguns and razor blades. In the detection of handguns we registered good results for recall and precision (Re = 67{\\%}, Pr = 83{\\%}) along with a high performance in the detection of razor blades (Re = 82{\\%}, Pr = 100{\\%}) taking into consideration 360 inspections in each case. Our results indicate that non-destructive inspection actively using X-ray images, leads to more effective object detection in complex environments, and helps to offset certain levels of occlusion and the internal disorder of baggage.",\nissn="1573-4862",\ndoi="10.1007/s10921-017-0419-3",\nurl="http://dx.doi.org/10.1007/s10921-017-0419-3"\n}\n\n
\n
\n\n\n
\n X-ray testing for baggage inspection has been increasingly used at airports, reducing the risk of terrorist crimes and attacks. Nevertheless, this task is still being carried out by human inspectors and with limited technological support. The technology that is being used is not always effective, as it depends mainly on the position of the object of interest, occlusion, and the accumulated experience of the inspector. Due to this problem, we have developed an approach that inspects X-ray images using active vision in order to automatically detect objects that represent a threat. Our method includes three steps: detection of potential threat objects in single views based on the similarity of features and spatial distribution; estimation of the best-next-view using Q-learning; and elimination of false alarms based on multiple view constraints. We tested our algorithm on X-ray images that included handguns and razor blades. In the detection of handguns we registered good results for recall and precision (Re = 67%, Pr = 83%) along with a high performance in the detection of razor blades (Re = 82%, Pr = 100%) taking into consideration 360 inspections in each case. Our results indicate that non-destructive inspection actively using X-ray images, leads to more effective object detection in complex environments, and helps to offset certain levels of occlusion and the internal disorder of baggage.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The impact of MEG source reconstruction method on source-space connectivity estimation: A comparison between minimum-norm solution and beamforming.\n \n \n \n \n\n\n \n Hincapie, A.; Kujala, J.; Mattout, J.; Pascarella, A.; Daligault, S.; Delpuech, C.; Mery, D.; Cosmelli, D.; and Jerbi, K.\n\n\n \n\n\n\n Neuroimage, (August): 29-42. 2017.\n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery2017:Neuroimage,\n  title={The impact of MEG source reconstruction method on source-space connectivity estimation: A comparison between minimum-norm solution and beamforming},\nauthor={Hincapie, A.S. and Kujala, J. and Mattout, J. and Pascarella, A. and Daligault, S. and Delpuech, C. and Mery, D. and Cosmelli, D. and Jerbi, K.},\nyear = 2017,\njournal = {Neuroimage},\n  publisher={Elsevier},\nvolumen = {156},\nnumber = {August},\npages = {29-42},\nurl = {http://dx.doi.org/10.1016/j.neuroimage.2017.04.038},\ndoi = {10.1016/j.neuroimage.2017.04.038}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Modelo pionero de entrenamiento en trauma vascular impreso en 3D en base a imágenes de pacientes reales: un trabajo interdisciplinario de simulación en educación quirúrgica.\n \n \n \n\n\n \n Achurra, P.; Mondragón, G.; Caro, I.; Figueroa, D.; Marine, L.; Mery, D.; and Martínez, Jorge\n\n\n \n\n\n\n Investigación en Educación Médica, 6(22): 133-134. 2017.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery2017:3DModel,\n  title={Modelo pionero de entrenamiento en trauma vascular impreso en 3D en base a im{\\'a}genes de pacientes reales: un trabajo interdisciplinario de simulaci{\\'o}n en educaci{\\'o}n quir{\\'u}rgica},\n  author={Achurra, Pablo and Mondrag{\\'o}n, Germ{\\'a}n and Caro, Iv{\\'a}n and Figueroa, Daniela and Marine, Leopoldo and Mery, Domingo and Mart{\\'i}nez, Jorge},\n  journal={Investigaci{\\'o}n en Educaci{\\'o}n M{\\'e}dica},\n  volume={6},\n  number={22},\n  pages={133-134},\n  year={2017},\n  publisher={Elsevier}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Face Recognition Using Sparse Fingerprint Classification Algorithm.\n \n \n \n \n\n\n \n Larrain, T.; Bernhard, J.; Mery, D.; and Bowyer, K.\n\n\n \n\n\n\n IEEE Transactions on Information, Forensics and Security, 12(7): 1646-1657. 2017.\n \n\n\n\n
\n\n\n\n \n \n \"FacePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery2017:IEEE-TIFS,\n  title={Face Recognition Using Sparse Fingerprint Classification Algorithm},\nauthor={Larrain, T. and Bernhard, J. and Mery, D. and Bowyer, K.}, \njournal = {IEEE Transactions on Information, Forensics and Security},\nvolume = 12,\nnumber = 7,\npages = {1646-1657},\n  year = 2017,\n  abstract = {Unconstrained face recognition is still an open problem as state-of-the-art algorithms have not yet reached high recognition performance in real-world environments. This paper addresses this problem by proposing a new approach called Sparse Fingerprint Classification Algorithm (SFCA). In the training phase, for each enrolled subject, a grid of patches is extracted from each subject's face images in order to construct representative dictionaries. In the testing phase, a grid is extracted from the query image and every patch is transformed into a binary sparse representation using the dictionary, creating a fingerprint of the face. The binary coefficients vote for their corresponding classes and the maximum-vote class decides the identity of the query image. Experiments were carried out on seven widely-used face databases. The results demonstrate that when the size of the dataset is small or medium (i.e., the number of subjects is not greater than one hundred), SFCA is able to deal with a larger degree of variability in ambient lighting, pose, expression, occlusion, face size, and distance from the camera than other current state-of-the-art algorithms.},\n  url = {http://ieeexplore.ieee.org/document/7875165/},\n  doi = {10.1109/TIFS.2017.2680403}\n}\n\n\n\n
\n
\n\n\n
\n Unconstrained face recognition is still an open problem as state-of-the-art algorithms have not yet reached high recognition performance in real-world environments. This paper addresses this problem by proposing a new approach called Sparse Fingerprint Classification Algorithm (SFCA). In the training phase, for each enrolled subject, a grid of patches is extracted from each subject's face images in order to construct representative dictionaries. In the testing phase, a grid is extracted from the query image and every patch is transformed into a binary sparse representation using the dictionary, creating a fingerprint of the face. The binary coefficients vote for their corresponding classes and the maximum-vote class decides the identity of the query image. Experiments were carried out on seven widely-used face databases. The results demonstrate that when the size of the dataset is small or medium (i.e., the number of subjects is not greater than one hundred), SFCA is able to deal with a larger degree of variability in ambient lighting, pose, expression, occlusion, face size, and distance from the camera than other current state-of-the-art algorithms.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automatic Defect Recognition in X-ray Testing using Computer Vision.\n \n \n \n \n\n\n \n Mery, D.; and Arteta, C.\n\n\n \n\n\n\n In 2017 IEEE Winter Conference on Applications of Computer Vision (WACV2017), 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AutomaticPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{Mery2017:WACV, \nauthor={Mery, D. and Arteta, C.}, \nbooktitle={2017 IEEE Winter Conference on Applications of Computer Vision (WACV2017)},\ntitle={Automatic Defect Recognition in X-ray Testing using Computer Vision}, \nyear={2017},\nabstract = {To ensure safety in the construction of important metallic components for roadworthiness, it is necessary to check every component thoroughly using non-destructive testing. In last decades, X-ray testing has been adopted as the principal non-destructive testing method to identify defects within a component which are undetectable to the naked eye. Nowadays, modern computer vision techniques, such as deep learning and sparse representations, are opening new avenues in automatic object recognition in optical images. These techniques have been broadly used in object and texture recognition by the computer vision community with promising results in optical images. However, a comprehensive evaluation in X-ray testing is required. In this paper, we release a new dataset containing around 47.500 cropped X-ray images of 32 x 32 pixels with defects and no-defects in automotive components. Using this dataset, we evaluate and compare 24 computer vision techniques including deep learning, sparse representations, local descriptors and texture features, among others. We show in our experiments that the best performance was achieved by a simple LBP descriptor with a SVM-linear classifier obtaining 97\\% precision and 94\\% recall. We believe that the methodology presented could be used in similar projects that have to deal with automated detection of defects.},\nurl = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2017-WACV.pdf},\n}\n\n\n\n
\n
\n\n\n
\n To ensure safety in the construction of important metallic components for roadworthiness, it is necessary to check every component thoroughly using non-destructive testing. In last decades, X-ray testing has been adopted as the principal non-destructive testing method to identify defects within a component which are undetectable to the naked eye. Nowadays, modern computer vision techniques, such as deep learning and sparse representations, are opening new avenues in automatic object recognition in optical images. These techniques have been broadly used in object and texture recognition by the computer vision community with promising results in optical images. However, a comprehensive evaluation in X-ray testing is required. In this paper, we release a new dataset containing around 47.500 cropped X-ray images of 32 x 32 pixels with defects and no-defects in automotive components. Using this dataset, we evaluate and compare 24 computer vision techniques including deep learning, sparse representations, local descriptors and texture features, among others. We show in our experiments that the best performance was achieved by a simple LBP descriptor with a SVM-linear classifier obtaining 97% precision and 94% recall. We believe that the methodology presented could be used in similar projects that have to deal with automated detection of defects.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Modern Computer Vision Techniques for X-ray Testing in Baggage Inspection.\n \n \n \n \n\n\n \n Mery, D.; Svec, E.; Arias, M.; Riffo, V.; Saavedra, J.; and Banerjee, S.\n\n\n \n\n\n\n IEEE Transactions on Systems, Man, and Cybernetics: Systems, 47(4): 682-692. 2017.\n \n\n\n\n
\n\n\n\n \n \n \"ModernPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery2016:IEEE-SMCMa,\n  title={Modern Computer Vision Techniques for X-ray Testing in Baggage Inspection},\nauthor={Mery, D. and Svec, E. and Arias, M. and Riffo, V. and Saavedra, J.M. and Banerjee, S.}, \njournal = {IEEE Transactions on Systems, Man, and Cybernetics: Systems},\n  volume = 47,\n  number = 4,\n  pages = {682-692},\n  year = 2017,\n  abstract = {X-ray screening systems have been used to safeguard environments in which access control is of paramount importance. Security checkpoints have been placed at the entrances to many public places to detect prohibited items such as handguns and explosives. Generally, human operators are in charge of these tasks as automated recognition in baggage inspection is still far from perfect. Research and development on X-ray testing is, however, exploring new approaches based on computer vision that can be used to aid human operators. This paper attempts to make a contribution to the field of object recognition in X-ray testing by evaluating different computer vision strategies that have been proposed in the last years. We tested ten approaches. They are based on bag of words, sparse representations, deep learning and classic pattern recognition schemes among others. For each method, we i) present a brief explanation, ii) show experimental results on the same database, and iii) provide concluding remarks discussing pros and cons of each method. In order to make fair comparisons, we define a common experimental protocol based on training, validation and testing data (selected from the public GDXray database). The effectiveness of each method was tested in the recognition of three different threat objects: handguns, shuriken (ninja stars) and razor blades. In our experiments, the highest recognition rate was achieved by methods based on visual vocabularies and deep features with more than 95\\% of accuracy. We strongly believe that it is possible to design an automated aid for the human inspection task using these computer vision algorithms.},\n  doi = {10.1109/TSMC.2016.2628381},\n  url = {http://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=7775025&tag=1}\n}\n\n\n\n
\n
\n\n\n
\n X-ray screening systems have been used to safeguard environments in which access control is of paramount importance. Security checkpoints have been placed at the entrances to many public places to detect prohibited items such as handguns and explosives. Generally, human operators are in charge of these tasks as automated recognition in baggage inspection is still far from perfect. Research and development on X-ray testing is, however, exploring new approaches based on computer vision that can be used to aid human operators. This paper attempts to make a contribution to the field of object recognition in X-ray testing by evaluating different computer vision strategies that have been proposed in the last years. We tested ten approaches. They are based on bag of words, sparse representations, deep learning and classic pattern recognition schemes among others. For each method, we i) present a brief explanation, ii) show experimental results on the same database, and iii) provide concluding remarks discussing pros and cons of each method. In order to make fair comparisons, we define a common experimental protocol based on training, validation and testing data (selected from the public GDXray database). The effectiveness of each method was tested in the recognition of three different threat objects: handguns, shuriken (ninja stars) and razor blades. In our experiments, the highest recognition rate was achieved by methods based on visual vocabularies and deep features with more than 95% of accuracy. We strongly believe that it is possible to design an automated aid for the human inspection task using these computer vision algorithms.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Object recognition in X-ray testing using an efficient search algorithm in multiple views.\n \n \n \n \n\n\n \n Mery, D.; Riffo, V.; Zuccar, I.; and Pieringer, C.\n\n\n \n\n\n\n Insight - Non-Destructive Testing and Condition Monitoring, 59(2): 85-92. 2017.\n \n\n\n\n
\n\n\n\n \n \n \"ObjectPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery2017:Insight,\n  title={Object recognition in X-ray testing using an efficient search algorithm in multiple views},\nauthor={Mery, D. and Riffo, V. and Zuccar, I. and Pieringer, C.}, \n  journal={Insight - Non-Destructive Testing and Condition Monitoring},\n  volume=59,\n  number = 2,\n  pages  ={85-92},\n  year = 2017,\n  abstract = {In order to reduce the security risk of a commercial aircraft, passengers are not allowed to take certain items in their carry-on baggage. For this reason, human operators are trained to detect prohibited items using a manually controlled baggage screening process. In this paper, we propose the use of an automated method based on multiple X-ray views to recognize certain regular objects with highly defined shapes and sizes. The method consists of two steps: `monocular analysis', to obtain possible detections in each view of a sequence, and `multiple view analysis', to recognize the objects of interest using matchings in all views. The search for matching candidates is efficiently performed using a lookup table that is computed off-line. In order to illustrate the effectiveness of the proposed method, experimental results on recognizing regular objects -clips, springs and razor blades- in pen cases are shown achieving high precision and recall ($Pr =$   95.7\\% , $Re =$ 92.5\\%) for 120 objects. We believe that it would be possible to design an automated aid in a target detection task using the proposed algorithm.},\n    url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2017-Insight-MultiX-ray.pdf},\n}\n\n\n\n
\n
\n\n\n
\n In order to reduce the security risk of a commercial aircraft, passengers are not allowed to take certain items in their carry-on baggage. For this reason, human operators are trained to detect prohibited items using a manually controlled baggage screening process. In this paper, we propose the use of an automated method based on multiple X-ray views to recognize certain regular objects with highly defined shapes and sizes. The method consists of two steps: `monocular analysis', to obtain possible detections in each view of a sequence, and `multiple view analysis', to recognize the objects of interest using matchings in all views. The search for matching candidates is efficiently performed using a lookup table that is computed off-line. In order to illustrate the effectiveness of the proposed method, experimental results on recognizing regular objects -clips, springs and razor blades- in pen cases are shown achieving high precision and recall ($Pr =$ 95.7% , $Re =$ 92.5%) for 120 objects. We believe that it would be possible to design an automated aid in a target detection task using the proposed algorithm.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2016\n \n \n (11)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Adaptive Image Segmentation Based on Histogram Transition Zone Analysis.\n \n \n \n \n\n\n \n Gonzalez-Acuña, R.; Mery, D.; and Klette, R.\n\n\n \n\n\n\n International Journal of Fuzzy Logic and Intelligent Systems, 16(4): 299-307. 2016.\n \n\n\n\n
\n\n\n\n \n \n \"AdaptivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery2016:IJFIS,\n  title={Adaptive Image Segmentation Based on Histogram Transition Zone Analysis},\nauthor={Gonzalez-Acu\\~na, R. and  Mery, D. and Klette, R.}, \njournal = {International Journal of Fuzzy Logic and Intelligent Systems},\n  volume={16},\n  number={4},\n  pages={299-307},\n    year = 2016,\n  doi = {10.5391/IJFIS.2016.16.4.299},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Other-Journals/2016-IJFIS.pdf}\n  }\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Automatic Visual Inspection: An Approach with Multi-Instance Learning.\n \n \n \n\n\n \n Mera, C.; Orozco-Alzate, M.; Branch, J.; and Mery, D.\n\n\n \n\n\n\n Computers in Industry, 83(2016): 46-54. 2016.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery2016:ComIndustry,\n  title={Automatic Visual Inspection: An Approach with Multi-Instance Learning},\nauthor={Mera, C. and Orozco-Alzate, M. and Branch, J. and Mery, D.}, \n  journal={Computers in Industry},\n  volume=83,\n  number = 2016,\n  pages  ={46-54},\n  year = 2016,\n  publisher={Elsevier},\n  doi = {10.1016/j.compind.2016.09.002},\n  abstract = {One of the industrial applications of computer vision is automatic visual inspection. In the last decade, standard supervised learning methods have been used to detect defects in different kind of products. These methods are trained with a set of images where every image has to be manually segmented and labeled by experts in the application domain. These manual segmentations require a large amount of high quality delineations (on pixels), which can be time consuming and often a difficult task. Multi-Instance Learning (MIL), in contrast to standard supervised classifiers, avoids this task and can, therefore, be trained with weakly labeled images. In this paper, we propose an approach for the automatic visual inspection that uses MIL for defect detection. The approach has been tested with data from three artificial benchmark datasets and three real- world industrial scenarios: inspection of artificial teeth, weld defect detection and fishbone detection. Results show that the proposed approach can be used with weakly labeled images for defect detection on automatic visual inspection systems. This approach is able to increase the area under the receiver-operating characteristic curve (AUC) up to 6.3\\% compared with the naive MIL approach of propagating the bag labels.\n}\n}\n\n\n
\n
\n\n\n
\n One of the industrial applications of computer vision is automatic visual inspection. In the last decade, standard supervised learning methods have been used to detect defects in different kind of products. These methods are trained with a set of images where every image has to be manually segmented and labeled by experts in the application domain. These manual segmentations require a large amount of high quality delineations (on pixels), which can be time consuming and often a difficult task. Multi-Instance Learning (MIL), in contrast to standard supervised classifiers, avoids this task and can, therefore, be trained with weakly labeled images. In this paper, we propose an approach for the automatic visual inspection that uses MIL for defect detection. The approach has been tested with data from three artificial benchmark datasets and three real- world industrial scenarios: inspection of artificial teeth, weld defect detection and fishbone detection. Results show that the proposed approach can be used with weakly labeled images for defect detection on automatic visual inspection systems. This approach is able to increase the area under the receiver-operating characteristic curve (AUC) up to 6.3% compared with the naive MIL approach of propagating the bag labels. \n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Object Recognition in X-ray testing Using Adaptive Sparse Representations.\n \n \n \n \n\n\n \n Mery, D.; Svec, E.; and Arias, M.\n\n\n \n\n\n\n Journal of Nondestructive Evaluation, 35(3): 1-19. 2016.\n \n\n\n\n
\n\n\n\n \n \n \"ObjectPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery2016:JNDE-XASR,\n  title={Object Recognition in X-ray testing Using Adaptive Sparse Representations},\n  author={Mery, D. and Svec, E. and Arias, M.},\n  journal={Journal of Nondestructive Evaluation},\n  volume={35},\n  number={3},\n  pages={1-19},\n  year={2016},\n  publisher={Springer},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2016-JNDE-XASR.pdf},\n  abstract = {In recent years, X-ray screening systems have been used to safeguard environments in which access control is of paramount importance. Security checkpoints have been placed at the entrances to many public places to detect prohibited items such as handguns and explosives.  Human operators complete these tasks because automated recognition in baggage inspection is far from perfect. Research and development on X-ray testing is, however, ongoing into new approaches that can be used to aid human operators. This paper attempts to make a contribution to the field of object recognition by proposing a new approach called Adaptive Sparse Representation (XASR+). It consists of two stages: learning and testing. In the learning stage, for each object of training dataset, several patches are extracted from its X-ray images in order to construct representative dictionaries. A stop-list is used to remove very common words of the dictionaries. In the testing stage, test patches of the test image are extracted, and for each test patch a dictionary is built concatenating the `best' representative dictionary of each object. Using this adapted dictionary, each test patch is classified following the Sparse Representation Classification (SRC) methodology. Finally, the test image is classified by patch voting. Thus, our approach is able to deal with less constrained conditions including some contrast variability, pose, intra-class variability, size of the image and focal distance. We tested the effectiveness of our method for the detection of four different objects. In our experiments, the recognition rate was more than 97\\% in each class, and more than 94\\% if the object is occluded less than 15\\%. Results show that XASR+ deals well with unconstrained conditions, outperforming various representative methods in the literature.\n}\n}\n\n
\n
\n\n\n
\n In recent years, X-ray screening systems have been used to safeguard environments in which access control is of paramount importance. Security checkpoints have been placed at the entrances to many public places to detect prohibited items such as handguns and explosives. Human operators complete these tasks because automated recognition in baggage inspection is far from perfect. Research and development on X-ray testing is, however, ongoing into new approaches that can be used to aid human operators. This paper attempts to make a contribution to the field of object recognition by proposing a new approach called Adaptive Sparse Representation (XASR+). It consists of two stages: learning and testing. In the learning stage, for each object of training dataset, several patches are extracted from its X-ray images in order to construct representative dictionaries. A stop-list is used to remove very common words of the dictionaries. In the testing stage, test patches of the test image are extracted, and for each test patch a dictionary is built concatenating the `best' representative dictionary of each object. Using this adapted dictionary, each test patch is classified following the Sparse Representation Classification (SRC) methodology. Finally, the test image is classified by patch voting. Thus, our approach is able to deal with less constrained conditions including some contrast variability, pose, intra-class variability, size of the image and focal distance. We tested the effectiveness of our method for the detection of four different objects. In our experiments, the recognition rate was more than 97% in each class, and more than 94% if the object is occluded less than 15%. Results show that XASR+ deals well with unconstrained conditions, outperforming various representative methods in the literature. \n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n System and method for nutrition analysis using food image recognition.\n \n \n \n \n\n\n \n Ortiz, C.; and Mery, D.\n\n\n \n\n\n\n May 24 2016.\n US Patent 9,349,297\n\n\n\n
\n\n\n\n \n \n \"SystemPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{Mery2016:Patent,\n  title     = {System and method for nutrition analysis using food image recognition},\n  author    = {Ortiz, C.A. and Mery, D.},\n  url       = {http://www.google.com/patents/US9349297},\n  year      = {2016},\n  month     = {May 24},\n  publisher = {Google Patents},\n  note      = {US Patent 9,349,297},\n  abstract  = {The present disclosure provides a system and method for determining a nutritional value of a food item. The system and method utilizes a food container as a model to adjust various types of distortions that exists in an instant image of the food container that retains the food item. The instant image may be compared to the model image of the food container to correct any distortions. The food container includes a boundary which has a predetermined color. The predetermined color of the boundary can be used to adjust the color configuration of the instant image, thereby increasing the accuracy of the food identification.}\n}\n\n
\n
\n\n\n
\n The present disclosure provides a system and method for determining a nutritional value of a food item. The system and method utilizes a food container as a model to adjust various types of distortions that exists in an instant image of the food container that retains the food item. The instant image may be compared to the model image of the food container to correct any distortions. The food container includes a boundary which has a predetermined color. The predetermined color of the boundary can be used to adjust the color configuration of the instant image, thereby increasing the accuracy of the food identification.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On accuracy estimation and comparison of results in biometric research.\n \n \n \n \n\n\n \n Mery, D.; Zhao, Y.; and Bowyer, K.\n\n\n \n\n\n\n In Proceedings of the 8th IEEE International Conference on Biometrics: Theory, Applications, and Systems (BTAS 2016), 2016. \n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{Mery2016:BTAS, \nauthor={Mery, D. and Zhao, Y. and Bowyer, K. }, \nbooktitle={Proceedings of the 8th IEEE International Conference on Biometrics: Theory, Applications, and Systems (BTAS 2016)}, \ntitle={On accuracy estimation and comparison of results in biometric research}, \nyear={2016},\nurl = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2016-BTAS.pdf},\nabstract = {The estimated accuracy of an algorithm is the most important element of the typical biometrics research publication. Comparisons between algorithms are commonly made based on estimated accuracies reported in different publications. However, even when the same dataset is used in two publications, there is a very low frequency of the publications using the same protocol for estimating algorithm accuracy. Using the example problems of face recognition, expression recognition and gender classification, we show that the variation in estimated performance on the same dataset across different protocols can be enormous. Based on these results, we make recommendations for how to obtain performance estimates that allow reliable comparison between algorithms.},\ndoi = {10.1109/BTAS.2016.7791188}\n}\n\n
\n
\n\n\n
\n The estimated accuracy of an algorithm is the most important element of the typical biometrics research publication. Comparisons between algorithms are commonly made based on estimated accuracies reported in different publications. However, even when the same dataset is used in two publications, there is a very low frequency of the publications using the same protocol for estimating algorithm accuracy. Using the example problems of face recognition, expression recognition and gender classification, we show that the variation in estimated performance on the same dataset across different protocols can be enormous. Based on these results, we make recommendations for how to obtain performance estimates that allow reliable comparison between algorithms.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Action Recognition in Video Using Sparse Coding and Relative Features.\n \n \n \n \n\n\n \n Alfaro, A.; Mery, D.; and Soto, A.\n\n\n \n\n\n\n In Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR2016), 2016. \n \n\n\n\n
\n\n\n\n \n \n \"ActionPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{Mery2016:CVPR, \nauthor={Alfaro, A. and Mery, D. and Soto, A.}, \nbooktitle={Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR2016)}, \ntitle={Action Recognition in Video Using Sparse Coding and Relative Features}, \nyear={2016},\nurl = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2016-CVPR-Anali.pdf},\nabstract = {This work presents an approach to category-based action\nrecognition in video using sparse coding techniques.\nThe proposed approach includes two main contributions:\ni) A new method to handle intra-class variations by decomposing\neach video into a reduced set of representative\natomic action acts or key-sequences, and ii) A new video\ndescriptor, ITRA: Inter-Temporal Relational Act Descriptor,\nthat exploits the power of comparative reasoning to capture\nrelative similarity relations among key-sequences. In terms\nof the method to obtain key-sequences, we introduce a loss\nfunction that, for each video, leads to the identification of a\nsparse set of representative key-frames capturing both, relevant\nparticularities arising in the input video, as well as\nrelevant generalities arising in the complete class collection.\nIn terms of the method to obtain the ITRA descriptor,\nwe introduce a novel scheme to quantify relative intra\nand inter-class similarities among local temporal patterns\narising in the videos. The resulting ITRA descriptor demonstrates\nto be highly effective to discriminate among action\ncategories. As a result, the proposed approach reaches remarkable\naction recognition performance on several popular\nbenchmark datasets, outperforming alternative state-ofthe-art\ntechniques by a large margin.},\ndoi = {10.1109/CVPR.2016.294}}\n\n
\n
\n\n\n
\n This work presents an approach to category-based action recognition in video using sparse coding techniques. The proposed approach includes two main contributions: i) A new method to handle intra-class variations by decomposing each video into a reduced set of representative atomic action acts or key-sequences, and ii) A new video descriptor, ITRA: Inter-Temporal Relational Act Descriptor, that exploits the power of comparative reasoning to capture relative similarity relations among key-sequences. In terms of the method to obtain key-sequences, we introduce a loss function that, for each video, leads to the identification of a sparse set of representative key-frames capturing both, relevant particularities arising in the input video, as well as relevant generalities arising in the complete class collection. In terms of the method to obtain the ITRA descriptor, we introduce a novel scheme to quantify relative intra and inter-class similarities among local temporal patterns arising in the videos. The resulting ITRA descriptor demonstrates to be highly effective to discriminate among action categories. As a result, the proposed approach reaches remarkable action recognition performance on several popular benchmark datasets, outperforming alternative state-ofthe-art techniques by a large margin.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n MEG Connectivity and Power Detections with Minimum Norm Estimates Require Different Regularization Parameters.\n \n \n \n \n\n\n \n Hincapie, A.; Kujala, J.; Mattout, J.; Daligault, S.; Delpuech, C.; Mery, D.; Cosmelli, D.; and Jerbi, K.\n\n\n \n\n\n\n Computational intelligence and neuroscience, 2016. 2016.\n \n\n\n\n
\n\n\n\n \n \n \"MEGPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery2016:Hindawi,\n  author={Hincapie, A.-S. and Kujala, J. and Mattout, J. and Daligault, S. and Delpuech, C. and Mery, D. and Cosmelli, D. and Jerbi, K.},\n  title={MEG Connectivity and Power Detections with Minimum Norm Estimates Require Different Regularization Parameters},\n  journal={Computational intelligence and neuroscience},\n  volume={2016},\n  year={2016},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2016-Hindawi.pdf},\nabstract = {Minimum Norm Estimation (MNE) is an inverse solution method widely used to reconstruct the source time series that underlie magnetoencephalography (MEG) data. MNE addresses the ill-posed nature of MEG source estimation through regularization (e.g., Tikhonov regularization). Selecting the best regularization parameter is a critical step. Generally, once set, it is common practice to keep the same coefficient throughout a study. However, it is yet to be known whether the optimal lambda for spectral power analysis of MEG source data coincides with the optimal regularization for source-level oscillatory coupling analysis. We addressed this question via extensive Monte-Carlo simulations of MEG data, where we generated 21,600 configurations of pairs of coupled sources with varying sizes, signal-to-noise ratio (SNR), and coupling strengths. Then, we searched for the Tikhonov regularization coefficients (lambda) that maximize detection performance for (a) power and (b) coherence. For coherence, the optimal lambda was two orders of magnitude smaller than the best lambda for power. Moreover, we found that the spatial extent of the interacting sources and SNR, but not the extent of coupling, were the main parameters affecting the best choice for lambda. Our findings suggest using less regularization when measuring oscillatory coupling compared to power estimation.}\n}\n\n
\n
\n\n\n
\n Minimum Norm Estimation (MNE) is an inverse solution method widely used to reconstruct the source time series that underlie magnetoencephalography (MEG) data. MNE addresses the ill-posed nature of MEG source estimation through regularization (e.g., Tikhonov regularization). Selecting the best regularization parameter is a critical step. Generally, once set, it is common practice to keep the same coefficient throughout a study. However, it is yet to be known whether the optimal lambda for spectral power analysis of MEG source data coincides with the optimal regularization for source-level oscillatory coupling analysis. We addressed this question via extensive Monte-Carlo simulations of MEG data, where we generated 21,600 configurations of pairs of coupled sources with varying sizes, signal-to-noise ratio (SNR), and coupling strengths. Then, we searched for the Tikhonov regularization coefficients (lambda) that maximize detection performance for (a) power and (b) coherence. For coherence, the optimal lambda was two orders of magnitude smaller than the best lambda for power. Moreover, we found that the spatial extent of the interacting sources and SNR, but not the extent of coupling, were the main parameters affecting the best choice for lambda. Our findings suggest using less regularization when measuring oscillatory coupling compared to power estimation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Quality Evaluation and Control of French Fries.\n \n \n \n \n\n\n \n Pedreschi, F.; Mery, D.; and Manrique, T.\n\n\n \n\n\n\n In Computer Vision Technology for Food Quality Evaluation, 22, pages 591-614. Ed. Da-Wen Sun, 2nd Edition, Elsevier, 2016.\n \n\n\n\n
\n\n\n\n \n \n \"QualityPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{Mery2016:ElsevierChapter-a,\n  author    ={Pedreschi, F. and Mery, D. and Manrique, T.},\n  title     ={Quality Evaluation and Control of French Fries},\n  booktitle ={Computer Vision Technology for Food Quality Evaluation},\n  publisher ={Ed. Da-Wen Sun, 2nd Edition, Elsevier},\n  year      ={2016},\n  chapter   ={22},\n  pages     ={591-614},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Chapters/2016-DWSun-FrenchFrieds.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Grading of Potatoes.\n \n \n \n \n\n\n \n Pedreschi, F.; Mery, D.; and Manrique, T.\n\n\n \n\n\n\n In Computer Vision Technology for Food Quality Evaluation, 15, pages 369-384. Ed. Da-Wen Sun, 2nd Edition, Elsevier, 2016.\n \n\n\n\n
\n\n\n\n \n \n \"GradingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{Mery2016:ElsevierChapter-b,\n  author    ={Pedreschi, F. and Mery, D. and Manrique, T.},\n  title     ={Grading of Potatoes},\n  booktitle ={Computer Vision Technology for Food Quality Evaluation},\n  publisher ={Ed. Da-Wen Sun, 2nd Edition, Elsevier},\n  year      ={2016},\n  chapter   ={15},\n  pages     ={369-384},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Chapters/2016-DWSun-Potatoes.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Un área en constante aprendizaje.\n \n \n \n \n\n\n \n Mery, D.\n\n\n \n\n\n\n In Especial de Biometría, El Mercurio (23/03/16). 2016.\n \n\n\n\n
\n\n\n\n \n \n \"UnPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{Mery2016:ElMercurio,\n\tAuthor = {Mery, D.},\n\tTitle = {{Un \\'area en constante aprendizaje}},\n  booktitle ={ Especial de Biometr\\'ia, El Mercurio (23/03/16)},\n\turl = {http://dmery.sitios.ing.uc.cl/Prints/Other-Journals/2016-Biometria-ElMercurio.pdf},\n\tYear = {2016}}\n\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automated detection of threat objects using Adapted Implicit Shape Model.\n \n \n \n \n\n\n \n Riffo, V.; and Mery, D.\n\n\n \n\n\n\n IEEE Transactions on Systems, Man, and Cybernetics: Systems, 46(4): 472-482. 2016.\n \n\n\n\n
\n\n\n\n \n \n \"AutomatedPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery2016:IEEE-SMCMb,\nauthor = {Riffo, V. and Mery, D.},\ntitle = {{Automated detection of threat objects using Adapted Implicit Shape Model}},\njournal = {IEEE Transactions on Systems, Man, and Cybernetics: Systems},\n  volume={46},\n  number={4},\n  pages={472-482},\n  year={2016},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2015-IEEE-TSMCS.pdf},\n  abstract = {Baggage inspection using X-ray screening is a priority task that reduces the risk of crime and terrorist attacks. Manual detection of threat items is tedious because very few bags actually contain threat items and the process requires a high degree of concentration. An automated solution would be a welcome development in this field. We propose a methodology for automatic detection of threat objects using single X-ray images. Our approach is an adaptation of a methodology originally created for recognizing objects in photographs based on Implicit Shape Models. Our detection method uses a visual vocabulary and an occurrence structure generated from a training dataset that contains representative X-ray images of the threat object to be detected. Our method can be applied to single views of grayscale X-ray images obtained using a single energy acquisition system. We tested the effectiveness of our method for the detection of three different threat objects: razor blades, shuriken (ninja stars) and handguns. The testing dataset for each threat object consisted of 200 X-ray images of bags. The true positive and false positive rates (TPR, FPR) are: (0.99, 0.02) for razor blades, (0.97, 0.06) for shuriken and (0.89, 0.17) for handguns. If other representative training datasets were utilized, we believe that our methodology could aid in the detection of other kinds of threat objects.}\n}\n\n
\n
\n\n\n
\n Baggage inspection using X-ray screening is a priority task that reduces the risk of crime and terrorist attacks. Manual detection of threat items is tedious because very few bags actually contain threat items and the process requires a high degree of concentration. An automated solution would be a welcome development in this field. We propose a methodology for automatic detection of threat objects using single X-ray images. Our approach is an adaptation of a methodology originally created for recognizing objects in photographs based on Implicit Shape Models. Our detection method uses a visual vocabulary and an occurrence structure generated from a training dataset that contains representative X-ray images of the threat object to be detected. Our method can be applied to single views of grayscale X-ray images obtained using a single energy acquisition system. We tested the effectiveness of our method for the detection of three different threat objects: razor blades, shuriken (ninja stars) and handguns. The testing dataset for each threat object consisted of 200 X-ray images of bags. The true positive and false positive rates (TPR, FPR) are: (0.99, 0.02) for razor blades, (0.97, 0.06) for shuriken and (0.89, 0.17) for handguns. If other representative training datasets were utilized, we believe that our methodology could aid in the detection of other kinds of threat objects.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2015\n \n \n (10)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Computer Vision for X-Ray Testing.\n \n \n \n\n\n \n Mery, D.\n\n\n \n\n\n\n Springer, 2015.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@book{Mery2015:SpringerBook,\nauthor = {Mery, D.},\ntitle = {{Computer Vision for X-Ray Testing}},\npublisher = {Springer},\nyear = {2015},\nabstract = {This accessible textbook presents an introduction to computer vision algorithms for industrially-relevant applications of X-ray testing. Covering complex topics in an easy-to-understand way, without requiring any prior knowledge in the field, the book provides a concise review of the key methodologies in computer vision for solving important problems in industrial radiology. The theoretical coverage is supported by numerous examples, each of which can be tested and evaluated by the reader using a freely-available Matlab toolbox and X-ray image database.}\n}\n\n
\n
\n\n\n
\n This accessible textbook presents an introduction to computer vision algorithms for industrially-relevant applications of X-ray testing. Covering complex topics in an easy-to-understand way, without requiring any prior knowledge in the field, the book provides a concise review of the key methodologies in computer vision for solving important problems in industrial radiology. The theoretical coverage is supported by numerous examples, each of which can be tested and evaluated by the reader using a freely-available Matlab toolbox and X-ray image database.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n GDXray: The Database of X-ray Images for Nondestructive Testing.\n \n \n \n \n\n\n \n Mery, D.; Riffo, V.; Zscherpel, U.; Mondragón, G.; Lillo, I.; Zuccar, I.; Lobel, H.; and Carrasco, M.\n\n\n \n\n\n\n Journal of Nondestructive Evaluation, 34(4): 1-12. 2015.\n \n\n\n\n
\n\n\n\n \n \n \"GDXray:Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery2015:JNDE-GDXray,\n  title={{GDXray}: The Database of {X-ray} Images for Nondestructive Testing},\n  author={Mery, Domingo and Riffo, Vladimir and Zscherpel, Uwe and Mondrag{\\'o}n, German and Lillo, Iv{\\'a}n and Zuccar, Irene and Lobel, Hans and Carrasco, Miguel},\n  journal={Journal of Nondestructive Evaluation},\n  volume={34},\n  number={4},\n  pages={1-12},\n  year={2015},\n  publisher={Springer},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2015-JNDE-GDXray.pdf},\n  abstract = {In this paper, we present a new dataset consisting of 19,407 X-ray images. The images are organized in a public database called GDXray that can be used free of charge, but for research and educational purposes only. The database includes five groups of X-ray images: castings, welds, baggage, natural objects  and settings. Each group has several series, and each series several X-ray images. Most of the series are annotated or labeled. In such cases, the coordinates of the bounding boxes of the objects of interest or the labels of the images are available in standard text files. The size of GDXray is 3.5 GB and it can be downloaded from our website. We believe that GDXray represents a relevant contribution to the X-ray testing community. On the one hand, students, researchers and engineers can use these X-ray images to develop, test and evaluate image analysis and computer vision algorithms without purchasing expensive X-ray equipment. On the other hand, these images can be used as a benchmark in order to test and compare the performance of different approaches on the same data. Moreover, the database can be used in the training programs of human inspectors.\n}\n}\n\n
\n
\n\n\n
\n In this paper, we present a new dataset consisting of 19,407 X-ray images. The images are organized in a public database called GDXray that can be used free of charge, but for research and educational purposes only. The database includes five groups of X-ray images: castings, welds, baggage, natural objects and settings. Each group has several series, and each series several X-ray images. Most of the series are annotated or labeled. In such cases, the coordinates of the bounding boxes of the objects of interest or the labels of the images are available in standard text files. The size of GDXray is 3.5 GB and it can be downloaded from our website. We believe that GDXray represents a relevant contribution to the X-ray testing community. On the one hand, students, researchers and engineers can use these X-ray images to develop, test and evaluate image analysis and computer vision algorithms without purchasing expensive X-ray equipment. On the other hand, these images can be used as a benchmark in order to test and compare the performance of different approaches on the same data. Moreover, the database can be used in the training programs of human inspectors. \n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On accuracy estimation in face biometric problems.\n \n \n \n \n\n\n \n Mery, D.; Zhao, Y.; and Bowyer, K.\n\n\n \n\n\n\n In Workshop on Forensics Applications of Computer Vision and Pattern Recognition, in conjunction with International Conference on Computer Vision (ICCV2015), 2015. \n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{Mery2015:ICCV-FACV-a, \nauthor={Mery, D. and Zhao, Y. and Bowyer, K. }, \nbooktitle={Workshop on Forensics Applications of Computer Vision and Pattern Recognition, in conjunction with International Conference on Computer Vision (ICCV2015)}, \ntitle={On accuracy estimation in face biometric problems}, \nyear={2015},\nurl = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2015-FACV-Accuracy.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Blur Adaptive Sparse Representation of Random Patches for Face Recognition on Blurred Images.\n \n \n \n \n\n\n \n Heinsohn, D.; and Mery, D.\n\n\n \n\n\n\n In Workshop on Forensics Applications of Computer Vision and Pattern Recognition, in conjunction with International Conference on Computer Vision (ICCV2015), 2015. \n \n\n\n\n
\n\n\n\n \n \n \"BlurPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{Mery2015:ICCV-FACV-b, \nauthor={Heinsohn, D. and Mery, D.}, \nbooktitle={Workshop on Forensics Applications of Computer Vision and Pattern Recognition, in conjunction with International Conference on Computer Vision (ICCV2015)}, \ntitle={ Blur Adaptive Sparse Representation of Random Patches for Face Recognition on Blurred Images}, \nyear={2015},\nurl = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2015-FACV-bASR.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Object Recognition in Baggage Inspection Using Adaptive Sparse Representations of X-ray Images.\n \n \n \n \n\n\n \n Mery, D.; Svec, E.; and Arias, M.\n\n\n \n\n\n\n In Proceedings of the Pacific Rim Symposium on Image and Video Technology (PSIVT 2015), 2015. \n \n\n\n\n
\n\n\n\n \n \n \"ObjectPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{Mery2015:PSIVT-XASR, \nauthor={Mery, D. and Svec, E. and Arias, M.}, \nbooktitle={Proceedings of the Pacific Rim Symposium on Image and Video Technology (PSIVT 2015)}, \ntitle={Object Recognition in Baggage Inspection Using Adaptive Sparse Representations of {X-ray} Images}, \nyear={2015},\nurl = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2015-PSIVT-XASR.pdf},\nabstract = {In recent years, X-ray screening systems have been used to safeguard environments in which access control is of paramount importance. Security checkpoints have been placed at the entrances to many public places to detect prohibited items such as handguns and explosives.  Human operators complete these tasks because automated recognition in baggage inspection is far from perfect. Research and development on X-ray testing is, however, ongoing into new approaches that can be used to aid human operators. This paper attempts to make a contribution to the field of object recognition by proposing a new approach called Adaptive Sparse Representation (XASR+). It consists of two stages: learning and testing. In the learning stage, for each object of training dataset, several random patches are extracted from its X-ray images in order to construct representative dictionaries. A stop-list is used to remove very common words of the dictionaries. In the testing stage, random test patches of the query image are extracted, and for each test patch a dictionary is built concatenating the `best' representative dictionary of each object. Using this adapted dictionary, each test patch is classified following the Sparse Representation Classification (SRC) methodology. Finally, the query image is classified by patch voting. Thus, our approach is able to deal with less constrained conditions including some contrast variability, pose, intra-class variability, size of the image and focal distance. We tested the effectiveness of our method for the detection of four different objects. In our experiments, the recognition rate was more than 95\\% in each class, and more than 85\\% if the object is occluded less than 15\\%. Results show that XASR+ deals well with unconstrained conditions, outperforming various representative methods in the literature.\n}\n}\n\n
\n
\n\n\n
\n In recent years, X-ray screening systems have been used to safeguard environments in which access control is of paramount importance. Security checkpoints have been placed at the entrances to many public places to detect prohibited items such as handguns and explosives. Human operators complete these tasks because automated recognition in baggage inspection is far from perfect. Research and development on X-ray testing is, however, ongoing into new approaches that can be used to aid human operators. This paper attempts to make a contribution to the field of object recognition by proposing a new approach called Adaptive Sparse Representation (XASR+). It consists of two stages: learning and testing. In the learning stage, for each object of training dataset, several random patches are extracted from its X-ray images in order to construct representative dictionaries. A stop-list is used to remove very common words of the dictionaries. In the testing stage, random test patches of the query image are extracted, and for each test patch a dictionary is built concatenating the `best' representative dictionary of each object. Using this adapted dictionary, each test patch is classified following the Sparse Representation Classification (SRC) methodology. Finally, the query image is classified by patch voting. Thus, our approach is able to deal with less constrained conditions including some contrast variability, pose, intra-class variability, size of the image and focal distance. We tested the effectiveness of our method for the detection of four different objects. In our experiments, the recognition rate was more than 95% in each class, and more than 85% if the object is occluded less than 15%. Results show that XASR+ deals well with unconstrained conditions, outperforming various representative methods in the literature. \n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Iris Segmentation using Geodesic Active Contours and GrabCut.\n \n \n \n \n\n\n \n Banerjee, S.; and Mery, D.\n\n\n \n\n\n\n In Proceedings In Proceedings of Workshop on 2D & 3D Geometric Properties from Incomplete Data, in conjunction of the Pacific Rim Symposium on Image and Video Technology (PSIVT 2015), 2015. \n \n\n\n\n
\n\n\n\n \n \n \"IrisPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{Mery2015:PSIVT-Iris, \nauthor={Banerjee, S. and Mery, D.}, \nbooktitle={Proceedings In Proceedings of Workshop on 2D \\& 3D Geometric Properties from Incomplete Data, in conjunction of the Pacific Rim Symposium on Image and Video Technology (PSIVT 2015)}, \ntitle={Iris Segmentation using Geodesic Active Contours and GrabCut}, \nyear={2015},\nurl = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2015-PSIVT-IrisSegmentation.pdf},\nabstract = {Iris segmentation is an important step in iris recognition as inaccurate segmentation often leads to faulty recognition. We propose an unsupervised, intensity based iris segmentation algorithm in this paper. The algorithm is fully automatic and can work for varied levels of occlusion, illumination and different shapes of the iris. A near central point inside the pupil is first detected using intensity based profiling of the eye image. Using that point as the center, we estimate the outer contour of the iris and the contour of the pupil using geodesic active contours, an iterative energy minimization algorithm based on the gradient of intensities. The iris region is then segmented out using both these estimations by applying an automatic version of GrabCut, an energy minimization algorithm from the graph cut family, representing the image as a Markov random field. The final result is refined using an ellipse-fitting algorithm based on the geometry of the GrabCut segmentation. To test our method, experiments were performed on 600 near infra-red eye images from the GFI database. The following features of the iris image are estimated: center and radius of the pupil and the iris. In order to evaluate the performance, we compare the features obtained by our method and the segmentation modules of three popular iris recognition systems with manual segmentation (ground truth). The results show that the proposed method performs as good as, in many cases better, when compared with these systems.\n}\n}\n\n
\n
\n\n\n
\n Iris segmentation is an important step in iris recognition as inaccurate segmentation often leads to faulty recognition. We propose an unsupervised, intensity based iris segmentation algorithm in this paper. The algorithm is fully automatic and can work for varied levels of occlusion, illumination and different shapes of the iris. A near central point inside the pupil is first detected using intensity based profiling of the eye image. Using that point as the center, we estimate the outer contour of the iris and the contour of the pupil using geodesic active contours, an iterative energy minimization algorithm based on the gradient of intensities. The iris region is then segmented out using both these estimations by applying an automatic version of GrabCut, an energy minimization algorithm from the graph cut family, representing the image as a Markov random field. The final result is refined using an ellipse-fitting algorithm based on the geometry of the GrabCut segmentation. To test our method, experiments were performed on 600 near infra-red eye images from the GFI database. The following features of the iris image are estimated: center and radius of the pupil and the iris. In order to evaluate the performance, we compare the features obtained by our method and the segmentation modules of three popular iris recognition systems with manual segmentation (ground truth). The results show that the proposed method performs as good as, in many cases better, when compared with these systems. \n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automatic facial attribute analysis via adaptive sparse representation of random patches.\n \n \n \n \n\n\n \n Mery, D.; and Bowyer, K.\n\n\n \n\n\n\n Pattern Recognition Letters, 68: 260-269. 2015.\n \n\n\n\n
\n\n\n\n \n \n \"AutomaticPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery2015:PLR,\n  title={Automatic facial attribute analysis via adaptive sparse representation of random patches},\n  author={Mery, Domingo and Bowyer, Kevin},\n  journal={Pattern Recognition Letters},\n  volume={68},\n  pages={260-269},\n  year={2015},\n  publisher={Elsevier},\nurl = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2015-PLR.pdf},\nabstract = {It is well known that some facial attributes -like soft biometric traits- can increase the performance of traditional biometric systems and help recognition based on human descriptions. In addition, other facial attributes, such as facial expressions, can be used in human?computer interfaces, image retrieval, talking heads and human emotion analysis. This paper addresses the problem of automated recognition of facial attributes by proposing a new general approach called Adaptive Sparse Representation of Random Patches (ASR+). The proposed method consists of two stages: In the learning stage, random patches are extracted from representative face images of each class (e.g., in gender recognition -a two-class problem-, images of females/males) in order to construct representative dictionaries. A stop list is used to remove very common words of the dictionaries. In the testing stage, random test patches of the query image are extracted, and for each non-stopped test patch a dictionary is built concatenating the ?best? representative dictionary of each class. Using this adapted dictionary, each non?stopped test patch is classified following the Sparse Representation Classification (SRC) methodology. Finally, the query image is classified by patch voting. Thus, our approach is able to learn a model for each recognition task dealing with a larger degree of variability in ambient lighting, pose, expression, occlusion, face size and distance from the camera. Experiments were carried out on eight face databases in order to recognize facial expression, gender, race, disguise and beard. Results show that ASR+ deals well with unconstrained conditions, outperforming various representative methods in the literature in many complex scenarios.}}\n\n
\n
\n\n\n
\n It is well known that some facial attributes -like soft biometric traits- can increase the performance of traditional biometric systems and help recognition based on human descriptions. In addition, other facial attributes, such as facial expressions, can be used in human?computer interfaces, image retrieval, talking heads and human emotion analysis. This paper addresses the problem of automated recognition of facial attributes by proposing a new general approach called Adaptive Sparse Representation of Random Patches (ASR+). The proposed method consists of two stages: In the learning stage, random patches are extracted from representative face images of each class (e.g., in gender recognition -a two-class problem-, images of females/males) in order to construct representative dictionaries. A stop list is used to remove very common words of the dictionaries. In the testing stage, random test patches of the query image are extracted, and for each non-stopped test patch a dictionary is built concatenating the ?best? representative dictionary of each class. Using this adapted dictionary, each non?stopped test patch is classified following the Sparse Representation Classification (SRC) methodology. Finally, the query image is classified by patch voting. Thus, our approach is able to learn a model for each recognition task dealing with a larger degree of variability in ambient lighting, pose, expression, occlusion, face size and distance from the camera. Experiments were carried out on eight face databases in order to recognize facial expression, gender, race, disguise and beard. Results show that ASR+ deals well with unconstrained conditions, outperforming various representative methods in the literature in many complex scenarios.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Inspection of Complex Objects Using Multiple-X-Ray Views.\n \n \n \n \n\n\n \n Mery, D.\n\n\n \n\n\n\n IEEE/ASME Transactions on Mechatronics, 20(1): 338-347. 2015.\n \n\n\n\n
\n\n\n\n \n \n \"InspectionPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery2015:IEEE-Mechatronics,\nauthor = {Mery, D.},\ntitle = {{Inspection of Complex Objects Using Multiple-X-Ray Views}},\njournal = {IEEE/ASME Transactions on Mechatronics},\nyear = {2015},\nvolume = {20},\nnumber = {1},\npages = {338-347},\nurl = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2014-IEEE-ComplexObjects.pdf},\nabstract = {This paper presents a new methodology for identifying parts of interest inside of a complex object using multiple X-ray views. The proposed method consists of five steps: A) {\\em image acquisition}, that acquires an image sequence where the parts of the object are captured from different viewpoints; B) {\\em geometric model estimation}, that establishes a multiple view geometric model used to find the correct correspondence among different views; C) {\\em single view detection}, that segment potential regions of interest in each view; D) {\\em multiple view detection}, that matches and tracks potential regions based on similarity and geometrical multiple view constraints; and E) {\\em analysis}, that analyzes the tracked regions using multiple view information, filtering out false alarms without eliminating existing parts of interest. In order to evaluate the effectiveness of the proposed method, the algorithm was tested on 32 cases (five applications using different segmentation approaches) yielding promising results: precision and recall were 95.7\\% and 93.9\\%, respectively. Additionally, the multiple view information obtained from the tracked parts was effectively used for recognition purposes. In our recognition experiments, we obtained an accuracy of 96.5\\%. Validation experiments show that our approach achieves better performance than other representative methods in the literature.\n}\n}\n\n
\n
\n\n\n
\n This paper presents a new methodology for identifying parts of interest inside of a complex object using multiple X-ray views. The proposed method consists of five steps: A) \\em image acquisition, that acquires an image sequence where the parts of the object are captured from different viewpoints; B) \\em geometric model estimation, that establishes a multiple view geometric model used to find the correct correspondence among different views; C) \\em single view detection, that segment potential regions of interest in each view; D) \\em multiple view detection, that matches and tracks potential regions based on similarity and geometrical multiple view constraints; and E) \\em analysis, that analyzes the tracked regions using multiple view information, filtering out false alarms without eliminating existing parts of interest. In order to evaluate the effectiveness of the proposed method, the algorithm was tested on 32 cases (five applications using different segmentation approaches) yielding promising results: precision and recall were 95.7% and 93.9%, respectively. Additionally, the multiple view information obtained from the tracked parts was effectively used for recognition purposes. In our recognition experiments, we obtained an accuracy of 96.5%. Validation experiments show that our approach achieves better performance than other representative methods in the literature. \n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Visual Recognition to Access and Analyze People Density and Flow Patterns in Indoor Environments.\n \n \n \n \n\n\n \n Ruz, C; Pieringer, C.; Peralta, B.; Lillo, I.; Espinace, P.; Gonzalez, R; Wendt, B; Mery, D.; and Soto, A.\n\n\n \n\n\n\n In 2015 IEEE Winter Conference on Applications of Computer Vision (WACV2015), pages 1-8, 2015. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"VisualPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery2015:WACV,\n  title={Visual Recognition to Access and Analyze People Density and Flow Patterns in Indoor Environments},\n  author={Ruz, C and Pieringer, Christian and Peralta, Billy and Lillo, Ivan and Espinace, Pablo and Gonzalez, R and Wendt, B and Mery, Domingo and Soto, Alvaro},\n  booktitle={2015 IEEE Winter Conference on Applications of Computer Vision (WACV2015)},\n  pages={1-8},\n  year={2015},\n  organization={IEEE},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2015-WACV.pdf},\n  abstract = {This work describes our experience developing a system\nto access density and flow of people in large indoor spaces\nusing a network of RGB cameras. The proposed system is\nbased on a set of overlapped and calibrated cameras. This\nfacilitates the use of geometric constraints that help to reduce\nvisual ambiguities. These constraints are combined\nwith classifiers based on visual appearance to produce an\nefficient and robust method to detect and track humans. In\nthis work, we argue that flow and density of people are low\nlevel measurements that need to be complemented with suitable\nanalytic tools to bridge semantic gaps and become useful\ninformation for a target application. Consequently, we\nalso propose a set of analytic tools that help a human user\nto effectively take advantage of the measurements provided\nby the system. Finally, we report results that demonstrate\nthe relevance of the proposed ideas.}\n}\n\n
\n
\n\n\n
\n This work describes our experience developing a system to access density and flow of people in large indoor spaces using a network of RGB cameras. The proposed system is based on a set of overlapped and calibrated cameras. This facilitates the use of geometric constraints that help to reduce visual ambiguities. These constraints are combined with classifiers based on visual appearance to produce an efficient and robust method to detect and track humans. In this work, we argue that flow and density of people are low level measurements that need to be complemented with suitable analytic tools to bridge semantic gaps and become useful information for a target application. Consequently, we also propose a set of analytic tools that help a human user to effectively take advantage of the measurements provided by the system. Finally, we report results that demonstrate the relevance of the proposed ideas.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Mucho más que ciencia ficción.\n \n \n \n \n\n\n \n Mery, D.\n\n\n \n\n\n\n In Especial de Biometría, El Mercurio. 2015.\n \n\n\n\n
\n\n\n\n \n \n \"MuchoPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{Mery2015:ElMercurio,\n\tAuthor = {Mery, D.},\n\tTitle = {{Mucho m\\'as que ciencia ficci\\'on}},\n  booktitle ={ Especial de Biometr\\'ia, El Mercurio},\n\turl = {http://dmery.sitios.ing.uc.cl/Prints/Other-Journals/2015-Biometria-ElMercurio.pdf},\n\tYear = {2015}}\n\n\n\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2014\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Face recognition via adaptive sparse representations of random patches.\n \n \n \n \n\n\n \n Mery, D.; and Bowyer, K.\n\n\n \n\n\n\n In IEEE International Workshop on Information Forensics and Security (WIFS2014), pages 13-18, 2014. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"FacePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery2014:WIFS,\n  title={Face recognition via adaptive sparse representations of random patches},\n  author={Mery, Domingo and Bowyer, Kevin},\n  booktitle={IEEE International Workshop on Information Forensics and Security (WIFS2014)},\n  pages={13-18},\n  year={2014},\n  organization={IEEE},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2014-WIFS-MeryBowyer.pdf},\n  abstract = {Unconstrained face recognition is still an open problem, as state-of-the-art algorithms have not yet reached high recognition performance in real-world environments, e.g., crowd scenes at the Boston Marathon). This paper addresses this problem by proposing a new approach called Adaptive Sparse Representation of Random Patches (ASR+). In the learning stage, for each enrolled subject, a number of random patches are extracted from the subject's gallery images in order to construct representative dictionaries. In the testing stage, random test patches of the query image are extracted, and for each test patch a dictionary is built concatenating the 'best' representative dictionary of each subject. Using this adapted dictionary, each test patch is classified following the Sparse Representation Classification (SRC) methodology. Finally, the query image is classified by patch voting. Thus, our approach is able to deal with a larger degree of variability in ambient lighting, pose, expression, occlusion, face size and distance from the camera. Experiments were carried out on five widely-used face databases. Results show that ASR+ deals well with unconstrained conditions, outperforming various representative methods in the literature in many complex scenarios.\n}\n}\n\n
\n
\n\n\n
\n Unconstrained face recognition is still an open problem, as state-of-the-art algorithms have not yet reached high recognition performance in real-world environments, e.g., crowd scenes at the Boston Marathon). This paper addresses this problem by proposing a new approach called Adaptive Sparse Representation of Random Patches (ASR+). In the learning stage, for each enrolled subject, a number of random patches are extracted from the subject's gallery images in order to construct representative dictionaries. In the testing stage, random test patches of the query image are extracted, and for each test patch a dictionary is built concatenating the 'best' representative dictionary of each subject. Using this adapted dictionary, each test patch is classified following the Sparse Representation Classification (SRC) methodology. Finally, the query image is classified by patch voting. Thus, our approach is able to deal with a larger degree of variability in ambient lighting, pose, expression, occlusion, face size and distance from the camera. Experiments were carried out on five widely-used face databases. Results show that ASR+ deals well with unconstrained conditions, outperforming various representative methods in the literature in many complex scenarios. \n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Recognition of facial attributes using adaptive sparse representations of random patches.\n \n \n \n \n\n\n \n Mery, D.; and Bowyer, K.\n\n\n \n\n\n\n In European Conference on Computer Vision, pages 778-792, 2014. Springer\n \n\n\n\n
\n\n\n\n \n \n \"RecognitionPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery2014:ECCV-SoftBio,\n  title={Recognition of facial attributes using adaptive sparse representations of random patches},\n  author={Mery, Domingo and Bowyer, Kevin},\n  booktitle={European Conference on Computer Vision},\n  pages={778-792},\n  year={2014},\n  organization={Springer},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2014-ECCV-MeryBowyer.pdf},\n  abstract = {It is well known that some facial attributes -like soft biometric traits- can increase the performance of traditional biometric systems and help recognition based on human descriptions. In addition, other facial attributes -like facial expressions- can be used in human-computer interfaces, image retrieval, talking heads and human emotion analysis. This paper addresses the problem of automated recognition of facial attributes by proposing a new general approach called Adaptive Sparse Representation of Random Patches (ASR+). In the learning stage, random patches are extracted from representative face images of each class, e.g., in gender recognition -a two-class problem-, images of females/males) in order to construct representative dictionaries. In the testing stage, random test patches of the query image are extracted, and for each test patch a dictionary is built concatenating the `best' representative dictionary of each class. Using this adapted dictionary, each test patch is classified following the Sparse Representation Classification (SRC) methodology. Finally, the query image is classified by patch voting. Thus, our approach is able to learn a model for each recognition task dealing with a larger degree of variability in ambient lighting, pose, expression, occlusion, face size and distance from the camera. Experiments were carried out on seven face databases in order to recognize facial expression, gender, race and disguise. Results show that ASR+ deals well with unconstrained conditions, outperforming various representative methods in the literature in many complex scenarios.}\n}\n\n
\n
\n\n\n
\n It is well known that some facial attributes -like soft biometric traits- can increase the performance of traditional biometric systems and help recognition based on human descriptions. In addition, other facial attributes -like facial expressions- can be used in human-computer interfaces, image retrieval, talking heads and human emotion analysis. This paper addresses the problem of automated recognition of facial attributes by proposing a new general approach called Adaptive Sparse Representation of Random Patches (ASR+). In the learning stage, random patches are extracted from representative face images of each class, e.g., in gender recognition -a two-class problem-, images of females/males) in order to construct representative dictionaries. In the testing stage, random test patches of the query image are extracted, and for each test patch a dictionary is built concatenating the `best' representative dictionary of each class. Using this adapted dictionary, each test patch is classified following the Sparse Representation Classification (SRC) methodology. Finally, the query image is classified by patch voting. Thus, our approach is able to learn a model for each recognition task dealing with a larger degree of variability in ambient lighting, pose, expression, occlusion, face size and distance from the camera. Experiments were carried out on seven face databases in order to recognize facial expression, gender, race and disguise. Results show that ASR+ deals well with unconstrained conditions, outperforming various representative methods in the literature in many complex scenarios.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automated Object Recognition using Multiple X-ray Views.\n \n \n \n \n\n\n \n Mery, D.; and Riffo, V.\n\n\n \n\n\n\n Materials Evaluation, 72(11): 1362-1372. 2014.\n \n\n\n\n
\n\n\n\n \n \n \"AutomatedPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery2014:MatEval,\n  title={Automated Object Recognition using Multiple X-ray Views},\n  author={Mery, Domingo and Riffo, Vladimir},\n  journal={Materials Evaluation},\n  volume={72},\n  number={11},\n  year={2014},\n  pages={1362-1372},\n  publisher={The American Society for Nondestructive Testing},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2014-MatEval.pdf},\n  abstract = {In order to reduce the security risk of a commercial aircraft, passengers are not allowed to take certain items in their carry-on baggage. For this reason, human operators are trained to detect prohibited items using a manually controlled baggage screening process. The inspection process, however, is highly complex as hazardous items are very difficult to detect when placed in close packed bags, superimposed by other objects, and/or rotated showing an unrecognizable profile. In this paper, we review certain advances achieved by our research group in this field. Our methodology is based on multiple view analysis, because it can be a powerful tool for examining complex objects in cases in which uncertainty can lead to misinterpretation. In our approach, multiple views (taken from fixed points of view, or using an active vision approach in which the best views are automated selected) are analyzed in the detection of regular objects. In order to illustrate the effectiveness of the proposed method, experimental results on recognizing guns, razor blades, pins, clips and springs in baggage inspection are presented achieving around 90\\% accuracy. We believe that it would be possible to design an automated aid in a target detection task using the proposed algorithm.\n}\n}\n\n
\n
\n\n\n
\n In order to reduce the security risk of a commercial aircraft, passengers are not allowed to take certain items in their carry-on baggage. For this reason, human operators are trained to detect prohibited items using a manually controlled baggage screening process. The inspection process, however, is highly complex as hazardous items are very difficult to detect when placed in close packed bags, superimposed by other objects, and/or rotated showing an unrecognizable profile. In this paper, we review certain advances achieved by our research group in this field. Our methodology is based on multiple view analysis, because it can be a powerful tool for examining complex objects in cases in which uncertainty can lead to misinterpretation. In our approach, multiple views (taken from fixed points of view, or using an active vision approach in which the best views are automated selected) are analyzed in the detection of regular objects. In order to illustrate the effectiveness of the proposed method, experimental results on recognizing guns, razor blades, pins, clips and springs in baggage inspection are presented achieving around 90% accuracy. We believe that it would be possible to design an automated aid in a target detection task using the proposed algorithm. \n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Oil Content Fraction in Tortilla Chips During Frying and their Prediction by Image Analysis Using Computer Vision.\n \n \n \n \n\n\n \n Matiacevich, S. B; Henriquez, O. C; Mery, D.; and Pedreschi, F.\n\n\n \n\n\n\n International Journal of Food Properties, 17(2): 261-272. 2014.\n \n\n\n\n
\n\n\n\n \n \n \"OilPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery2014:JFoodProperties,\n  title={Oil Content Fraction in Tortilla Chips During Frying and their Prediction by Image Analysis Using Computer Vision},\n  author={Matiacevich, Silvia B and Henriquez, Olivia C and Mery, Domingo and Pedreschi, Franco},\n  journal={International Journal of Food Properties},\n  volume={17},\n  number={2},\n  pages={261-272},\n  year={2014},\n  publisher={Taylor \\& Francis},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2013-IJFP-Matiacevich.pdf},  \nabstract = {The increasing consumption worldwide of tortilla chips make relevant to design and optimize\ntheir industrial quality analysis. Surface, structural and total oil content during frying of tortilla\nchips fried at 160, 175, 190ºC for different times were analyzed. The aim was to obtain a\nrelationship between oil content and features from their digital images. The results showed a\nhigh linear correlation (R>0.90) between oil content with image features at each frying\ntemperature, indicating that trustable models can be developed, allowing the prediction of oil\ncontent of tortilla chips by using selected features extracted from their digital images, without the\nnecessity of measuring them. Cross-validation technique demonstrated the repeatability of each\nmodel and their good performance (>90\\%).}}\n\n\n\n\n\n\n
\n
\n\n\n
\n The increasing consumption worldwide of tortilla chips make relevant to design and optimize their industrial quality analysis. Surface, structural and total oil content during frying of tortilla chips fried at 160, 175, 190ºC for different times were analyzed. The aim was to obtain a relationship between oil content and features from their digital images. The results showed a high linear correlation (R>0.90) between oil content with image features at each frying temperature, indicating that trustable models can be developed, allowing the prediction of oil content of tortilla chips by using selected features extracted from their digital images, without the necessity of measuring them. Cross-validation technique demonstrated the repeatability of each model and their good performance (>90%).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Computer vision technology for X-ray testing.\n \n \n \n \n\n\n \n Mery, D.\n\n\n \n\n\n\n Insight-Non-Destructive Testing and Condition Monitoring, 56(3): 147-155. 2014.\n \n\n\n\n
\n\n\n\n \n \n \"ComputerPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery2014:Insight,\nauthor = {Mery, D.},\ntitle = {{Computer vision technology for X-ray testing}},\njournal = {Insight-Non-Destructive Testing and Condition Monitoring},\nyear = {2014},\nvolume = {56},\nnumber = {3},\npages = {147-155},\nurl = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2014-Insight-XrayTesting.pdf},\nabstract = {X-ray testing has been developed for inspection of materials or objects, where the aim is to analyze -nondestructively- those inner parts that are undetectable to the naked eye. Thus, X-ray testing is used to determine if a test object deviates from a given set of specifications. Typical applications are inspection of automotive parts, quality control of welds, screening of baggage, analysis of food products and inspection of cargos. In order to achieve efficient and effective X-ray testing, automated and semi-automated systems based on computer vision algorithms are being developed to execute this task. In this paper, we present a general overview of computer vision approaches that have been used in X-ray testing. In addition, we review some techniques that have been applied in certain relevant applications; and we introduce a public database of X-ray images that can be used for testing and evaluation of image analysis and computer vision algorithms. Finally, we conclude that there are some areas, like casting inspection where automated systems are very effective, and other application areas, such as baggage screening, where human inspection is still used; there are certain application areas ?like weld and cargo inspections? where the process is semi-automatic; and there is some research in areas -including food analysis- where processes are beginning to be characterized by the use of X-ray imaging.\n}\n}\n\n\n\n\n
\n
\n\n\n
\n X-ray testing has been developed for inspection of materials or objects, where the aim is to analyze -nondestructively- those inner parts that are undetectable to the naked eye. Thus, X-ray testing is used to determine if a test object deviates from a given set of specifications. Typical applications are inspection of automotive parts, quality control of welds, screening of baggage, analysis of food products and inspection of cargos. In order to achieve efficient and effective X-ray testing, automated and semi-automated systems based on computer vision algorithms are being developed to execute this task. In this paper, we present a general overview of computer vision approaches that have been used in X-ray testing. In addition, we review some techniques that have been applied in certain relevant applications; and we introduce a public database of X-ray images that can be used for testing and evaluation of image analysis and computer vision algorithms. Finally, we conclude that there are some areas, like casting inspection where automated systems are very effective, and other application areas, such as baggage screening, where human inspection is still used; there are certain application areas ?like weld and cargo inspections? where the process is semi-automatic; and there is some research in areas -including food analysis- where processes are beginning to be characterized by the use of X-ray imaging. \n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2013\n \n \n (9)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Detection of regular objects in baggage using multiple X-ray views.\n \n \n \n \n\n\n \n Mery, D.; Mondragon, G.; Riffo, V.; and Zuccar, I.\n\n\n \n\n\n\n Insight-Non-Destructive Testing and Condition Monitoring, 55(1): 16-21. 2013.\n \n\n\n\n
\n\n\n\n \n \n \"DetectionPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 10 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery-2013:Insight,\n  title={Detection of regular objects in baggage using multiple {X-ray} views},\nauthor = {Mery, D. and Mondragon, G. and Riffo, V.  and Zuccar, I.}, \njournal     = {Insight-Non-Destructive Testing and Condition Monitoring},\n  volume    ={55},\n  number    ={1},\n  pages     ={16-21},\n  year      ={2013},\n  url       = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2013-Insight-BagaggeScreening.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automated design of a computer vision system for visual food quality evaluation.\n \n \n \n \n\n\n \n Mery, D.; Pedreschi, F.; and Soto, A.\n\n\n \n\n\n\n Food and Bioprocess Technology, 6(8): 2093-2108. 2013.\n \n\n\n\n
\n\n\n\n \n \n \"AutomatedPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery2013:FABT,\n  author    ={Mery, D. and Pedreschi, F. and Soto, A.},\n  title     ={Automated design of a computer vision system for visual food quality evaluation},\n  journal   ={Food and Bioprocess Technology},\n  volume    ={6},\n  number    ={8},\n  pages     ={2093-2108},\n  year      ={2013},\n  url       = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2013-FABT-ComputerVisionSystem.pdf}\n }\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Joint Dictionary and Classifier learning for Categorization of Images using a Max-margin Framework.\n \n \n \n \n\n\n \n Lobel, H.; Vidal, R.; Mery, D.; and Soto, A.\n\n\n \n\n\n\n In Pacific-Rim Symposium on Image and Video Technology, pages 87-98, 2013. \n \n\n\n\n
\n\n\n\n \n \n \"JointPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 7 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery2013:PSIVT-a,\n  author    = {Lobel, H. and Vidal, R. and Mery, D. and Soto, A.},\n  title     = {Joint Dictionary and Classifier learning for Categorization of Images using a Max-margin Framework},\n  booktitle = {Pacific-Rim Symposium on Image and Video Technology},\n  pages     = {87-98},\n  year      = {2013},\n  url       = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2013-PSIVT-Lobel.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Human action recognition from inter-temporal dictionaries of key-sequences.\n \n \n \n \n\n\n \n Alfaro, A.; Mery, D.; and Soto, A.\n\n\n \n\n\n\n In Pacific-Rim Symposium on Image and Video Technology, pages 419-430, 2013. \n \n\n\n\n
\n\n\n\n \n \n \"HumanPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 14 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery2013:PSIVT-b,\n  author    = {Alfaro, A. and Mery, D. and Soto, A.},\n  title     = {Human action recognition from inter-temporal dictionaries of key-sequences},\n  booktitle = {Pacific-Rim Symposium on Image and Video Technology},\n  pages     = {419-430},\n  year      = {2013},\n  url       = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2013-PSIVT-Alfaro.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automated Object Recognition in Baggage Screening Using Multiple X-ray Views.\n \n \n \n \n\n\n \n Mery, D.; and Riffo, V.\n\n\n \n\n\n\n In 52nd Annual Conference of the British Institute for Non-Destructive Testing, Telford, UK, 2013. \n \n\n\n\n
\n\n\n\n \n \n \"AutomatedPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 11 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery2013:NDT,\nauthor      = {Mery, D. and Riffo, V.},\ntitle       = {Automated Object Recognition in Baggage Screening Using Multiple {X-ray} Views},\nbooktitle   = {52nd Annual Conference of the British Institute for Non-Destructive Testing, Telford, UK},\nyear        = {2013},\nurl         = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2013-NDT-Telford.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automated X-Ray Object Recognition Using an Efficient Search Algorithm in Multiple Views.\n \n \n \n \n\n\n \n Mery, D.; Riffo, V.; Zuccar, I.; and Pieringer, C.\n\n\n \n\n\n\n In IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW 2013), pages 368-374, 2013. \n \n\n\n\n
\n\n\n\n \n \n \"AutomatedPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 6 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery2013:PBVS-a,\nauthor      = {Mery, D. and Riffo, V. and Zuccar, I. and Pieringer, C.},\ntitle       = {Automated {X-Ray} Object Recognition Using an Efficient Search Algorithm in Multiple Views},\nbooktitle   = {IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW 2013)},\nyear        = {2013},\npages       = {368-374},\nurl         = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2013-CPVR-PBVS-MultiX-ray.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n X-Ray Testing by Computer Vision.\n \n \n \n \n\n\n \n Mery, D.\n\n\n \n\n\n\n In IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW 2013), pages 360-367, 2013. \n \n\n\n\n
\n\n\n\n \n \n \"X-RayPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 5 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery2013:PBVS-b,\nauthor      = {Mery, D.},\ntitle       = {X-Ray Testing by Computer Vision},\nbooktitle   = {IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW 2013)},\nyear        = {2013},\npages       = {360-367},\nurl         = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2013-CPVR-PBVS-XrayTesting.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Sensory Quality Classification of Sopaipillas by means of Computer Vision.\n \n \n \n\n\n \n Bunger, A.; Pedreschi, F.; Mariotti, S.; Ahumada, A.; Mery, D.; and Lillo, I.\n\n\n \n\n\n\n In Proceedings of 10th Pangborn Sensory Science Symposium, Rio de Janeiro, 11-13 Aug., 2013. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery2013:Pangborn,\nauthor      = {Bunger, A. and Pedreschi, F. and Mariotti, S. and Ahumada, A. and  Mery, D.and Lillo, I.},\ntitle       = {Sensory Quality Classification of Sopaipillas by means of Computer Vision},\nbooktitle   = {Proceedings of 10th Pangborn Sensory Science Symposium, Rio de Janeiro, 11-13 Aug.},\nyear        = {2013}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n X-ray Testing: The State of the Art.\n \n \n \n \n\n\n \n Mery, D.\n\n\n \n\n\n\n e-Journal of Nondestructive Testing. Sep 2013.\n \n\n\n\n
\n\n\n\n \n \n \"X-rayPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery2013:eJNDT,\nauthor = {Mery, D.},\ntitle = {{X-ray Testing: The State of the Art}},\njournal = {e-Journal of Nondestructive Testing},\nyear = {2013},\nmonth = {Sep},\nurl = {http://dmery.sitios.ing.uc.cl/Prints/Other-Journals/2013-NDTNET.pdf}\n}\n\n\n\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2012\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Active X-ray testing of Complex Objects.\n \n \n \n \n\n\n \n Riffo, V.; and Mery, D.\n\n\n \n\n\n\n Insight-Non-Destructive Testing and Condition Monitoring, 54(1): 28-35. 2012.\n \n\n\n\n
\n\n\n\n \n \n \"ActivePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 5 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery2012:Insight,\n  author  = {V. Riffo and Domingo Mery},\n  title   = {Active X-ray testing of Complex Objects},\n  journal = {Insight-Non-Destructive Testing and Condition Monitoring},\n  volume  = 54,\n  number  = 1,\n  pages   = {28-35},\n  year    = {2012},\n  url     = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2011-Insight-ActiveXRay.pdf}\n }\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Prediction of Mechanical Properties of Corn and Tortilla Chips using Computer Vision.\n \n \n \n \n\n\n \n Matiacevich, S.; Mery, D.; and Pedreschi, F.\n\n\n \n\n\n\n International Journal of Food and Bioprocess Technology, 5(5): 2025-2030. 2012.\n \n\n\n\n
\n\n\n\n \n \n \"PredictionPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery2012:FABT,\n  author = {S.M. Matiacevich and Domingo Mery and F. Pedreschi},\n  title = {Prediction of Mechanical Properties of Corn and Tortilla Chips using Computer Vision},\n  journal = {International Journal of Food and Bioprocess Technology},\n  volume = 5,\n  number = 5,\n  pages = {2025-2030},\n  year = {2012},\n    url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2012-FABT-Tortillas.pdf}\n }\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Indoor Mobile Robotics at Grima, PUC.\n \n \n \n \n\n\n \n Caro, L.; Correa, J.; Espinace, P.; Maturana, D.; Mitnik, R.; Montabone, S.; Pszszolkowski, S.; Araneda, A.; Mery, D.; Torres, M.; and Soto, A.\n\n\n \n\n\n\n Journal of Intelligent and Robotic Systems, 66(1-2): 151-165. 2012.\n \n\n\n\n
\n\n\n\n \n \n \"IndoorPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery2012:JIRS,\n  author = {L. Caro and J. Correa and P. Espinace and D. Maturana and R. Mitnik and S. Montabone and S. Pszszolkowski and A. Araneda and Domingo Mery and M. Torres and A. Soto},\n  title = {Indoor Mobile Robotics at {Grima}, {PUC}},\n  journal = {Journal of Intelligent and Robotic Systems},\n  volume = 66,\n  number = {1-2},\n  pages = {151-165},\n  year = {2012},\n    url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2011-JINT-RoboticsGRIMA.pdf}\n }\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automatic Landform Classification of Uplands based on Haralick's Textures.\n \n \n \n \n\n\n \n Patino, D.; Mery, D.; Botero, V.; and Branch, J.\n\n\n \n\n\n\n In Proceedings of XXXVIII Latin-American Conference on Informatics (CLEI2012), Medellin, Oct. 1-5., pages 1-8, 2012. \n \n\n\n\n
\n\n\n\n \n \n \"AutomaticPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery2012:CLEI,\n  author    = {D. Patino and Domingo Mery and V. Botero and J. Branch},\n  title     = {Automatic Landform Classification of Uplands based on Haralick's Textures},\n  booktitle = {Proceedings of XXXVIII Latin-American Conference on Informatics (CLEI2012), Medellin, Oct. 1-5.},\n  pages={1-8},\n  year      = {2012},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2012-CLEI-DPatino.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Procesando mas alla de lo visible.\n \n \n \n \n\n\n \n Mery, D.\n\n\n \n\n\n\n Revista Bits de Ciencias,44-48. 2012.\n \n\n\n\n
\n\n\n\n \n \n \"ProcesandoPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery-2012-BITS,\n  author = {Domingo Mery},\n  title = {Procesando mas alla de lo visible},\n  journal = {Revista Bits de Ciencias},\n  year = {2012},\n  pages = {44-48},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Other-Journals/2012-Bits.pdf}\n }\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Head Modeling Using Multiple-views.\n \n \n \n \n\n\n \n Pieringer, C.; Mery, D.; and Soto, A.\n\n\n \n\n\n\n In CWPR 2012 - IV Chilean Workshop on Pattern Recognition, pages 23-37, 2012. \n \n\n\n\n
\n\n\n\n \n \n \"HeadPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery-2012-CWPR,\nauthor={C. Pieringer and Domingo Mery and A. Soto},\ntitle={{Head Modeling Using Multiple-views}},\nbooktitle={{CWPR 2012 - IV Chilean Workshop on Pattern Recognition}},\nyear={2012},\npages={23-37},\nurl = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/Local/2012-CWPR-Pieringer.pdf}\n}\n\n\n\n\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2011\n \n \n (15)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Automated Detection of Welding Discontinuities without Segmentation.\n \n \n \n \n\n\n \n Mery, D.\n\n\n \n\n\n\n Materials Evaluation, (June): 657-663. 2011.\n \n\n\n\n
\n\n\n\n \n \n \"AutomatedPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 12 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery2011:MatEval,\n  author = {Mery, D.},\n  title = {Automated Detection of Welding Discontinuities without Segmentation},\n  journal = {Materials Evaluation},\n  number = {June},\n  pages = {657-663},\n  year = {2011},\n url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2011-MatEval-Welding.pdf}\n }\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n BALU: A Matlab toolbox for computer vision, pattern recognition and image processing.\n \n \n \n \n\n\n \n Mery, D.\n\n\n \n\n\n\n 2011.\n \n\n\n\n
\n\n\n\n \n \n \"BALU:Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{Mery2011:Balu,\n\tAuthor = {Mery, D.},\n\tTitle = {{BALU}: A {Matlab} toolbox for computer vision, pattern recognition and image processing},\n\tYear = {2011},\n\turl = {http://dmery.ing.puc.cl/index.php/balu/}}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automated Fish Bone Detection using X-ray Testing.\n \n \n \n \n\n\n \n Mery, D.; Lillo, I.; Riffo, V.; Soto, A.; Cipriano, A.; and Aguilera, J.\n\n\n \n\n\n\n Journal of Food Engineering, 2011(105): 485-492. 2011.\n \n\n\n\n
\n\n\n\n \n \n \"AutomatedPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 7 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery2011:JFoodEng,\n  author    = {Mery, D. and Lillo, I. and Riffo, V. and Soto, A. and Cipriano, A. and Aguilera, J.M.},\n  title = {Automated Fish Bone Detection using X-ray Testing},\n  journal = {Journal of Food Engineering},\n  volume = 2011,\n  number = 105,\n  pages = {485-492},\n  year = {2011},\n url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2011-JFoodEng-SalmonX.pdf}\n }\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Computer Vision Classification of Potato Chips by Color.\n \n \n \n \n\n\n \n F., P.; Mery, D.; Bunger, A.; and Yanez, V.\n\n\n \n\n\n\n Journal of Food Processing Engineering, 34(2011): 1714-1728. 2011.\n \n\n\n\n
\n\n\n\n \n \n \"ComputerPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery2011:JFPE,\n  author = {Pedreschi F. and Mery, D. and Bunger, A. and Yanez, V.},\n  title = {Computer Vision Classification of Potato Chips by Color},\n  journal = {Journal of Food Processing Engineering},\n  volume = 34,\n  number = 2011,\n  pages = {1714-1728},\n  year = {2011},\n url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2010-JFPE.pdf}\n }\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automatic Multiple View Inspection using Geometrical Tracking and Feature Analysis in Aluminum Wheels.\n \n \n \n \n\n\n \n Carrasco, M.; and Mery, D.\n\n\n \n\n\n\n Machine Vision and Applications, 2010(22): 157-170. 2011.\n \n\n\n\n
\n\n\n\n \n \n \"AutomaticPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 7 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery2011:JMVA,\n  author = {Carrasco, M. and Mery, D.},\n  title = {Automatic Multiple View Inspection using Geometrical Tracking and Feature Analysis in Aluminum Wheels},\n  journal = {Machine Vision and Applications},\n  volume = 2010,\n  number = 22,\n  pages = {157-170},\n  year = {2011},\n url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2011-JMVA.pdf}\n }\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Bifocal Matching using Multiple Geometrical Solutions.\n \n \n \n \n\n\n \n M., C.; and Mery, D.\n\n\n \n\n\n\n In 5th Pacific-Rim Symposium on Image and Video Technology (PSIVT2011), IEEE Computer Society, Gwangju, South Korea, Nov. 20-23, 2011. \n \n\n\n\n
\n\n\n\n \n \n \"BifocalPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery2011:PSIVT,\n  author    = {Carrasco M. and Mery, D.},\n  title     = {Bifocal Matching using Multiple Geometrical Solutions},\n  booktitle = {5th Pacific-Rim Symposium on Image and Video Technology (PSIVT2011), IEEE Computer Society, Gwangju, South Korea, Nov. 20-23},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2011-PSIVT-Carrasco.pdf},\n  year      = {2011}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Dynamic Signature Recognition based on Fisher Discriminant.\n \n \n \n \n\n\n \n Schmidt, T.; and Mery, D.\n\n\n \n\n\n\n In 16th Iberoamerican Congress on Pattern Recognition (CIARP 2011), Pucon, Chile, Nov. 15-18, pages Lecture Notes in Computer Science, 2011. \n \n\n\n\n
\n\n\n\n \n \n \"DynamicPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery2011:CIARP-a,\n  author    = {T. Schmidt and Domingo Mery},\n  title     = {Dynamic Signature Recognition based on Fisher Discriminant},\n  booktitle = {16th Iberoamerican Congress on Pattern Recognition (CIARP 2011), Pucon, Chile, Nov. 15-18},\n  pages = {Lecture Notes in Computer Science},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2011-CIARP-Schmidt.pdf},\n  year      = {2011}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Improving Tracking Algorithm using Saliency.\n \n \n \n \n\n\n \n Undurraga, C.; and Mery, D.\n\n\n \n\n\n\n In 16th Iberoamerican Congress on Pattern Recognition (CIARP 2011), Pucon, Chile, Nov. 15-18, pages Lecture Notes in Computer Science, 2011. \n \n\n\n\n
\n\n\n\n \n \n \"ImprovingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery2011:CIARP-b,\n  author    = {C. Undurraga and Domingo Mery},\n  title     = {Improving Tracking Algorithm using Saliency},\n  booktitle = {16th Iberoamerican Congress on Pattern Recognition (CIARP 2011), Pucon, Chile, Nov. 15-18},\n  pages = {Lecture Notes in Computer Science},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2011-CIARP-Undurraga.pdf},\n  year      = {2011}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Probabilistic Iterative Local Search Algorithm Applied to Full Model Selection.\n \n \n \n \n\n\n \n Cortazar, E.; and Mery, D.\n\n\n \n\n\n\n In 3rd Chilean Workshop on Pattern Recognition (CWPR 2011), Pucon, Chile, Nov. 15-18, pages Lecture Notes in Computer Science, 2011. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery2011:CIARP-c,\n  author    = {E. Cortazar and Domingo Mery},\n  title     = {A Probabilistic Iterative Local Search Algorithm Applied to Full Model Selection},\n  booktitle = {3rd Chilean Workshop on Pattern Recognition (CWPR 2011), Pucon, Chile, Nov. 15-18},\n  pages = {Lecture Notes in Computer Science},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/Local/2011-CWPR-Cortazar.pdf},\n  year      = {2011}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Active Radiosocopic Inspection of Complex Objects using Multiple Views (in Spanish).\n \n \n \n \n\n\n \n Riffo, V.; and Mery, D.\n\n\n \n\n\n\n In 3rd Chilean Workshop on Pattern Recognition (CWPR 2011), Pucon, Chile, Nov. 15-18, 2011. \n \n\n\n\n
\n\n\n\n \n \n \"ActivePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery2011:CIARP-d,\n  author    = {V. Riffo and Domingo Mery},\n  title     = {Active Radiosocopic Inspection of Complex Objects using Multiple Views (in Spanish)},\n  booktitle = {3rd Chilean Workshop on Pattern Recognition (CWPR 2011), Pucon, Chile, Nov. 15-18},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/Local/2011-CWPR-Riffo.pdf},\n  year      = {2011}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automated Detection in Complex Objects using a Tracking Algorithm in Multiple X-ray Views.\n \n \n \n \n\n\n \n Mery, D.\n\n\n \n\n\n\n In Proceedings of the 8th IEEE Workshop on Object Tracking and Classification Beyond the Visible Spectrum (OTCBVS 2011), in Conjunction with Computer Vision and Pattern Recognition (CVPR 2011), Colorado Spring, USA, pages 41-48, 2011. \n \n\n\n\n
\n\n\n\n \n \n \"AutomatedPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 8 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery2011:OTCBVS,\n  author    = {Domingo Mery},\n  title     = {Automated Detection in Complex Objects using a Tracking Algorithm in Multiple X-ray Views},\n  booktitle = {Proceedings of the 8th IEEE Workshop on Object Tracking and Classification Beyond the Visible Spectrum (OTCBVS 2011), in Conjunction with Computer Vision and Pattern Recognition (CVPR 2011), Colorado Spring, USA},\n  pages = {41-48},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2011-OTCBVS-MultiXray.pdf},\n  year      = {2011}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Learning Discriminative Local Binary Patterns for Face Recognition.\n \n \n \n \n\n\n \n Maturana, D.; Mery, D.; and Soto, A.\n\n\n \n\n\n\n In Proceedings of the Ninth IEEE International Conference on Automatic Face and Gesture Recognition (FG-2011), 2011. \n \n\n\n\n
\n\n\n\n \n \n \"LearningPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 17 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery2011:FG,\n  author    = {D. Maturana and Domingo Mery and A. Soto},\n  title     = {Learning Discriminative Local Binary Patterns for Face Recognition},\n  booktitle = {Proceedings of the Ninth IEEE International Conference on Automatic Face and Gesture Recognition (FG-2011)},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2011-FG-FaceRecognition.pdf},\n  year      = {2011}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The automatic sorting using image processing improves postharvest blueberries storage quality.\n \n \n \n \n\n\n \n Leiva, G.; Mondragon, G.; Mery, D.; and Aguilera, J.\n\n\n \n\n\n\n In Proceedings of International Congress on Engineering and Food (ICEF-2011), 2011. \n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery2011:ICEF-a,\n  author    = {G. Leiva and G. Mondragon and Domingo Mery and J.M. Aguilera},\n  title     = {The automatic sorting using image processing improves postharvest blueberries storage quality},\n  booktitle = {Proceedings of International Congress on Engineering and Food (ICEF-2011)},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2011-ICEF-Blueberries.pdf},\n  year      = {2011}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automated detection of softening and hard columella in kiwifruits during postharvest using X-ray testing.\n \n \n \n \n\n\n \n Mondragon, G.; Leiva, G.; Aguilera, J.; and Mery, D.\n\n\n \n\n\n\n In Proceedings of International Congress on Engineering and Food (ICEF-2011), 2011. \n \n\n\n\n
\n\n\n\n \n \n \"AutomatedPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery2011:ICEF-b,\n  author    = {G. Mondragon and G. Leiva and J.M. Aguilera and Domingo Mery },\n  title     = {Automated detection of softening and hard columella in kiwifruits during postharvest using X-ray testing},\n  booktitle = {Proceedings of International Congress on Engineering and Food (ICEF-2011)},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2011-ICEF-Kiwis.pdf},\n  year      = {2011}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n GRIMA: Grupo de Inteligencia de Maquina.\n \n \n \n \n\n\n \n Baier, J.; Mery, D.; Pichara, K.; and Soto, A.\n\n\n \n\n\n\n Revista Bits de Ciencias,65-67. 2011.\n \n\n\n\n
\n\n\n\n \n \n \"GRIMA:Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery2011:BITS,\n  author = {J. Baier and Domingo Mery and K. Pichara and A. Soto},\n  title = {GRIMA: Grupo de Inteligencia de Maquina},\n  journal = {Revista Bits de Ciencias},\n  year = {2011},\n  pages = {65-67},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Other-Journals/2011-Bits.pdf}\n }\n\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2010\n \n \n (13)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n SCCC 2010, Proceedings of the XXIX International Conference of the Chilean Computer Science Society, Antofagasta, Chile, 15-19 November 2010.\n \n \n \n \n\n\n \n Ochoa, S.; Meza, F.; Mery, D.; and Cubillos, C.,\n editors.\n \n\n\n \n\n\n\n IEEE Computer Society. 2010.\n \n\n\n\n
\n\n\n\n \n \n \"SCCCLink\n  \n \n \n \"SCCCPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@proceedings{Mery-2010-SCCC,\n  editor    = {Ochoa, S.and\n               Meza, F. and\n               Mery, D. and\n               Cubillos, C.},\n  title     = {SCCC 2010, Proceedings of the XXIX International Conference\n               of the Chilean Computer Science Society, Antofagasta, Chile,\n               15-19 November 2010},\n  booktitle = {SCCC},\n  publisher = {IEEE Computer Society},\n  year      = {2010},\n  ee        = {http://ieeexplore.ieee.org/xpl/mostRecentIssue.jsp?punumber=5750114},\n  bibsource = {DBLP, http://dblp.uni-trier.de},\n url = {http://dmery.sitios.ing.uc.cl/Prints/Books/2010-SCCC.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Visual Inspection of Glass Bottlenecks by Multiple-View Analysis.\n \n \n \n \n\n\n \n Carrasco, M.; Pizarro, L.; and Mery, D.\n\n\n \n\n\n\n International Journal of Computer Integrated Manufacturing, 23(10): 925-941. 2010.\n \n\n\n\n
\n\n\n\n \n \n \"VisualPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery-2010-IJCIM,\n  author = {M. Carrasco and L. Pizarro and Domingo Mery},\n  title = {Visual Inspection of Glass Bottlenecks by Multiple-View Analysis},\n  journal = {International Journal of Computer Integrated Manufacturing},\n  volume = 23,\n  number = 10,\n  pages = {925-941},\n  year = {2010},\n url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2010-IJCIM.pdf}\n }\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Quality Classification of Corn Tortillas using Computer Vision.\n \n \n \n \n\n\n \n Mery, D.; Chanona-Perez, J.; Soto, A.; Aguilera, J.; Cipriano, A.; Velez-Rivera, N.; Arzate-Vazquez, I.; and Gutierrez-Lopez, G.\n\n\n \n\n\n\n Journal of Food Engineering, (4): 357-364. 2010.\n \n\n\n\n
\n\n\n\n \n \n \"QualityPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 5 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery-2010-Tortillas,\n  author    = {Domingo Mery and J.J.  Chanona-Perez and A. Soto and J.M. Aguilera and A. Cipriano and N. Velez-Rivera and I. Arzate-Vazquez and G.F. Gutierrez-Lopez},\n  title     = {Quality Classification of Corn Tortillas using Computer Vision},\njournal = {Journal of Food Engineering},\n  year      = {2010},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2010-JFoodEng-Tortillas.pdf},\nvolumen = 101,\nnumber = 4,\npages = {357-364}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Flaw Detection in Aluminum Die Castings Using Simultaneous Combination of Multiple Views.\n \n \n \n \n\n\n \n Pieringer, C.; and Mery, D.\n\n\n \n\n\n\n Insight-Non-Destructive Testing and Condition Monitoring, 23(10): 548-552. 2010.\n \n\n\n\n
\n\n\n\n \n \n \"FlawPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery-2010-Insight,\n  author    = {C. Pieringer and Domingo Mery},\n  title     = {Flaw Detection in Aluminum Die Castings Using Simultaneous Combination of Multiple Views},\njournal = {Insight-Non-Destructive Testing and Condition Monitoring},\n  year      = {2010},\n  volume =  23,\n  number = 10,\n  pages = {548-552},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2010-Insight.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Dynamic Signature Recognition and Classification with Computer Vision Algorithms.\n \n \n \n \n\n\n \n Schmidt, T.; Riffo, V.; Mery, D.; and Peralta, B.\n\n\n \n\n\n\n In Proceedings of the IX International Conference of the Peruvian Computing Society Conference, Trujillo, Peru, October 2010. Peruvian Computing Society\n (in Spanish)\n\n\n\n
\n\n\n\n \n \n \"DynamicPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery-2010-Peru,\nauthor = {Teodoro Schmidt and Vladimir Riffo and Domingo Mery and Billy Peralta},\ntitle = {Dynamic Signature Recognition and Classification with Computer Vision Algorithms},\nbooktitle = {Proceedings of the IX International Conference of the Peruvian Computing Society Conference},\nyear = {2010},\naddress = {Trujillo, Peru},\nmonth = {October},\nnote = {(in Spanish)},\norganization = {Peruvian Computing Society},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2010-Peru-Schmidt.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Head Tracking for 3D Audio using Nintendo Wii Remote,.\n \n \n \n \n\n\n \n Ubilla, M.; Cadiz, R.; and Mery, D.\n\n\n \n\n\n\n In 2010 International Computer Music Conference (ICM2010), New York City, June 1-5, 2010. \n \n\n\n\n
\n\n\n\n \n \n \"HeadPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery-2010-ICMC,\n  author    = {M. Ubilla and R. Cadiz and Domingo Mery},\n  title     = {Head Tracking for 3D Audio using Nintendo Wii Remote,},\n  booktitle = {2010 International Computer Music Conference (ICM2010), New York City, June 1-5},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2010-ICMC2010-Ubilla.pdf},\n  year      = {2010}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Evaluation of Tortillas Quality using Image Analysis.\n \n \n \n \n\n\n \n Velez-Rivera, N.; Arzate-Vazquez, I.; Mery, D.; Gutierrez-Lopez, G.; and Chanona-Perez, J.\n\n\n \n\n\n\n In Congreso Internacional del Caribe Sobre Inocuidad, Calidad y Funcionalidad de los Alimentos en la Industria y Servicios de Alimentacion, 20 al 24 de Octubre del 2009. Cancun, 2010. \n (in Spanish)\n\n\n\n
\n\n\n\n \n \n \"EvaluationPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery-2010-Cancun,\n  author    = {N. Velez-Rivera and I. Arzate-Vazquez and Domingo Mery and G.F. Gutierrez-Lopez and J.J.  Chanona-Perez},\n  note = {(in Spanish)},\n  title     = {Evaluation of Tortillas Quality using Image Analysis},\n  booktitle = {Congreso Internacional del Caribe Sobre Inocuidad, Calidad y Funcionalidad de los Alimentos en la Industria y Servicios de Alimentacion, 20 al 24 de Octubre del 2009. Cancun},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2010-Cancun.pdf},\n  year      = {2010}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automated Detection of Fish Bones in Salmon Fillets using X-ray Testing.\n \n \n \n \n\n\n \n Mery, D.; Lillo, I.; Lobel, H.; Riffo, V.; Soto, A.; Cipriano, A.; and Aguilera, J.\n\n\n \n\n\n\n In 4th Pacific-Rim Symposium on Image and Video Technology (PSIVT2010), Singapore, Nov. 14-17, 2010, pages 46-51, 2010. \n \n\n\n\n
\n\n\n\n \n \n \"AutomatedPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery-2010-PSIVT-SalmonX,\n  author    = {Domingo Mery and I. Lillo and H. Lobel and V. Riffo and A. Soto and A. Cipriano and J.M. Aguilera},\n  title     = {Automated Detection of Fish Bones in Salmon Fillets using X-ray Testing},\n  booktitle = {4th Pacific-Rim Symposium on Image and Video Technology (PSIVT2010), Singapore, Nov. 14-17, 2010},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2010-PSIVT-SalmonX.pdf},\n  pages = {46-51},\n  year      = {2010}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Object Tracking based on Covariance Descriptors and an On-Line Naive Bayes Nearest Neighbor Classifier.\n \n \n \n \n\n\n \n Cortez, P.; Mery, D.; and Sucar, E.\n\n\n \n\n\n\n In 4th Pacific-Rim Symposium on Image and Video Technology (PSIVT2010), Singapore, Nov. 14-17, 2010, pages 139-144, 2010. \n \n\n\n\n
\n\n\n\n \n \n \"ObjectPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 4 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery-2010-PSIVT-ONBNN,\n  author    = {P. Cortez and Domingo Mery and E. Sucar},\n  title     = {Object Tracking based on Covariance Descriptors and an On-Line Naive Bayes Nearest Neighbor Classifier},\n  booktitle = {4th Pacific-Rim Symposium on Image and Video Technology (PSIVT2010), Singapore, Nov. 14-17, 2010},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2010-PSIVT-ONBNN.pdf},\n  pages = {139-144},\n  year      = {2010}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Face Recognition with Decision Tree-based Local Binary Patterns.\n \n \n \n \n\n\n \n Maturana, D.; Soto, A.; and Mery, D.\n\n\n \n\n\n\n In Asian Conference on Computer Vision (ACCV2010), Queenstown, Nov. 08-12, 2010, 2010. \n \n\n\n\n
\n\n\n\n \n \n \"FacePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery-2010-ACCV-FaceRecognition,\n  author    = {D. Maturana and A. Soto and Domingo Mery},\n  title     = {Face Recognition with Decision Tree-based Local Binary Patterns},\n  booktitle = {Asian Conference on Computer Vision (ACCV2010), Queenstown, Nov. 08-12, 2010},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2010-ACCV-FaceRecognition.pdf},\n  year      = {2010}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Saliency Model using the Covariance Descriptor.\n \n \n \n \n\n\n \n Undurraga, C.; Mery, D.; and Sucar, E.\n\n\n \n\n\n\n In Proceedings of the 2nd Chilean Workshop on Pattern Recognition, 2010. \n (in Spanish)\n\n\n\n
\n\n\n\n \n \n \"SaliencyPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{Undurraga-2010-CWPR,\n  author = {C. Undurraga and Domingo Mery and E. Sucar},\n  title = {Saliency Model using the Covariance Descriptor},\n  booktitle = {Proceedings of the 2nd Chilean Workshop on Pattern Recognition},\n  year = {2010},\n  note = {(in Spanish)},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/Local/2010-CWPR-Undurraga.pdf}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Face Recognition with Optimized Tree-Structured Local Binary Patterns.\n \n \n \n \n\n\n \n Maturana, D.; Mery, D.; and Soto, A.\n\n\n \n\n\n\n In Proceedings of the 2nd Chilean Workshop on Pattern Recognition, 2010. \n (in Spanish)\n\n\n\n
\n\n\n\n \n \n \"FacePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{Maturana-2010-CWPR,\n  author = {D. Maturana and Domingo Mery and A. Soto},\n  title = {Face Recognition with Optimized Tree-Structured Local Binary Patterns},\n  booktitle = {Proceedings of the 2nd Chilean Workshop on Pattern Recognition},\n  year = {2010},\n  note = {(in Spanish)},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/Local/2010-CWPR-Maturana.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Object recognition from a large set of visual features and 3D range data.\n \n \n \n \n\n\n \n Espinace, P.; Mery, D.; and Soto, A.\n\n\n \n\n\n\n In Proceedings of the 2nd Chilean Workshop on Pattern Recognition, 2010. \n \n\n\n\n
\n\n\n\n \n \n \"ObjectPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{Espinace-2010-CWPR,\n  author = {P. Espinace and Domingo Mery and A. Soto},\n  title = {Object recognition from a large set of visual features and 3D range data},\n  booktitle = {Proceedings of the 2nd Chilean Workshop on Pattern Recognition},\n  year = {2010},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/Local/2010-CWPR-Espinace.pdf}\n}\n\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2009\n \n \n (10)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Analysis of Mechanical Properties in Tortillas using Computer Vision.\n \n \n \n\n\n \n Matiacevich, S.; Inostroza, M.; Mery, D.; and Pedreschi, F.\n\n\n \n\n\n\n Alimentos Hoy, Asociacion Colombiana de Ciencia y Tecnologia de los Alimentos, 1(30): B1. 2009.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery-2009-Matiacevich-d,\n  author    = {S.B. Matiacevich and M.P. Inostroza and Domingo Mery and F. Pedreschi},\n  title     = {Analysis of Mechanical Properties in Tortillas using Computer Vision},\njournal = {Alimentos Hoy, Asociacion Colombiana de Ciencia y Tecnologia de los Alimentos},\n  year      = {2009},\n  volume = {1},\n  number = {30},\n  pages = {B1}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Survey of Land Mine Detection Technology.\n \n \n \n \n\n\n \n Robledo, L.; Carrasco, M.; and Mery, D.\n\n\n \n\n\n\n International Journal of Remote Sensing, 30(9): 2399-2410. 2009.\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery-2009-IJRS,\n  author = {L.F. Robledo and M. Carrasco and Domingo Mery},\n  title = {A Survey of Land Mine Detection Technology},\n  journal = {International Journal of Remote Sensing},\n  year = {2009},\n  volume = {30},\n  pages = {2399-2410},\n  number = {9},\n url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2009-MineDetection.pdf}\n }\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Face Recognition with Decision Tree-based Local Binary Patterns.\n \n \n \n \n\n\n \n Maturana, D.; Mery, D.; and Soto, A.\n\n\n \n\n\n\n In EVIC 2009: Escuela de Verano Latinoamericana IEEE en Inteligencia Computacional y Robotica, 2009. \n \n\n\n\n
\n\n\n\n \n \n \"FacePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery-2009-EVIC,\n  author    = {D. Maturana and Domingo Mery and A. Soto},\n  title     = {Face Recognition with Decision Tree-based Local Binary Patterns},\n  booktitle = {EVIC 2009: Escuela de Verano Latinoamericana IEEE en Inteligencia Computacional y Robotica},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2009-EVIC-DMaturana.pdf},\n  year      = {2009}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Performance evaluation of the Covariance descriptor for target detection.\n \n \n \n \n\n\n \n Cortez, P.; Undurraga, C.; Mery, D.; and Soto, A.\n\n\n \n\n\n\n In Proceedings of the XXVIII International Conference of the Chilean Computer Science Society, IEEE CS Society, 2009. \n \n\n\n\n
\n\n\n\n \n \n \"PerformancePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery-2009-SCCC-CortezUndurraga,\n  author    = {P. Cortez and C. Undurraga and Domingo Mery and A. Soto},\n  title     = {Performance evaluation of the Covariance descriptor for target detection},\n  booktitle = {Proceedings of the XXVIII International Conference of the Chilean Computer Science Society, IEEE CS Society},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2009-SCCC-CortezUndurraga.pdf},\n  year      = {2009}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Face Recognition with Local Binary Patterns, Spatial Pyramid Histograms and Naive Bayes Nearest Neighbor classification.\n \n \n \n \n\n\n \n Maturana, D.; Mery, D.; and Soto, A.\n\n\n \n\n\n\n In Proceedings of the XXVIII International Conference of the Chilean Computer Science Society, IEEE CS Society, 2009. \n \n\n\n\n
\n\n\n\n \n \n \"FacePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery-2009-CWPR-DMaturana,\n  author    = {D. Maturana and Domingo Mery and A. Soto},\n  title     = {Face Recognition with Local Binary Patterns, Spatial Pyramid Histograms and Naive Bayes Nearest Neighbor classification},\n  booktitle = {Proceedings of the XXVIII International Conference of the Chilean Computer Science Society, IEEE CS Society},\n  year      = {2009},\n  url     = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/Local/2009-CWPR-DMaturana.pdf},\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Prediction of oil content in tortillas chips using computer vision.\n \n \n \n\n\n \n Matiacevich, S.; Inostroza, M.; Mery, D.; and Pedreschi, F.\n\n\n \n\n\n\n In International Symposium on Food Processing, Monitoring Technology in Bioprocesses and Food Quality Management (CGIR 2009). 31.Ago-02.Sep, Postdam, 2009. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery-2009-Matiacevich-a,\n  author    = {S.B. Matiacevich and M.P. Inostroza and Domingo Mery and F. Pedreschi},\n  title     = {Prediction of oil content in tortillas chips using computer vision},\n  booktitle = {International Symposium on Food Processing, Monitoring Technology in Bioprocesses and Food Quality Management (CGIR 2009). 31.Ago-02.Sep, Postdam},\n  year      = {2009}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Prediction of Physical Properties in Tortillas Chips using Computer Vision.\n \n \n \n \n\n\n \n Matiacevich, S.; Inostroza, M.; Mery, D.; and Pedreschi, F.\n\n\n \n\n\n\n In Congreso Iberoamericano de Ingenier’a de Alimentos (CIBIA VII), 6-9 Sept. Bogota, 2009. \n \n\n\n\n
\n\n\n\n \n \n \"PredictionPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery-2009-Matiacevich-b,\n  author    = {S.B. Matiacevich and M.P. Inostroza and Domingo Mery and F. Pedreschi},\n  title     = {Prediction of Physical Properties in Tortillas Chips using Computer Vision},\n  booktitle = {Congreso Iberoamericano de Ingenier’a de Alimentos (CIBIA VII), 6-9 Sept. Bogota},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2009-CIBIA.pdf},\n  year      = {2009}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Analysis of Mechanical Properties in Tortillas using Computer Vision.\n \n \n \n\n\n \n Matiacevich, S.; Inostroza, M.; Mery, D.; and Pedreschi, F.\n\n\n \n\n\n\n In Congreso Internacional de Ciencia y Tecnolog’a de los Alimentos. 15-17. Abr., Cordoba., 2009. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery-2009-Matiacevich-c,\n  author    = {S.B. Matiacevich and M.P. Inostroza and Domingo Mery and F. Pedreschi},\n  title     = {Analysis of Mechanical Properties in Tortillas using Computer Vision},\n  booktitle = {Congreso Internacional de Ciencia y Tecnolog’a de los Alimentos. 15-17. Abr., Cordoba.},\n  year      = {2009}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automated classification of blueberries with fungal infection.\n \n \n \n \n\n\n \n Leiva, G.; Mondragon, G.; Arrieta, C.; and Mery, D.\n\n\n \n\n\n\n In Proceedings of the 1st Chilean Workshop on Pattern Recognition, 2009. \n (in Spanish)\n\n\n\n
\n\n\n\n \n \n \"AutomatedPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{Mery-2009-CWPR-a,\n  author = {G. Leiva and G. Mondragon and C. Arrieta and Domingo Mery},\n  title = {Automated classification of blueberries with fungal infection},\n  booktitle = {Proceedings of the 1st Chilean Workshop on Pattern Recognition},\n  year = {2009},\n  note = {(in Spanish)},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/Local/2009-CWPR-GLeiva.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Performance Evaluation of Covariance Descriptors for Target Detection.\n \n \n \n \n\n\n \n Cortez, P.; Undurraga, C.; Mery, D.; and Soto, A.\n\n\n \n\n\n\n In Proceedings of the 1st Chilean Workshop on Pattern Recognition, 2009. \n (in Spanish)\n\n\n\n
\n\n\n\n \n \n \"PerformancePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{Mery-2009-CWPR-b,\n  author = {P. Cortez and C. Undurraga and Domingo Mery and A. Soto},\n  title = {Performance Evaluation of Covariance Descriptors for Target Detection},\n  booktitle = {Proceedings of the 1st Chilean Workshop on Pattern Recognition},\n  year = {2009},\n  note = {(in Spanish)},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/Local/2009-CWPR-CortezUndurraga.pdf}\n}\n\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2008\n \n \n (8)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Detection and classification of weld defects in radiographic images: Part III - phenomenological analysis.\n \n \n \n \n\n\n \n Padua, G.; Silva, R.; D.Mery; Siqueira, M.; Rebello, J.; and Caloba, L.\n\n\n \n\n\n\n Materials Evaluation, 66(2): 145-159. 2008.\n \n\n\n\n
\n\n\n\n \n \n \"DetectionPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery-2008-MatEval-3,\n  author = {G.X. Padua and R. Silva and D.Mery and M.H.S. Siqueira and J.M.A. Rebello and L.P. Caloba},\n  title = {Detection and classification of weld defects in radiographic images: Part {III} - phenomenological analysis},\n  journal = {Materials Evaluation},\n  year = {2008},\n  volume = {66},\n  pages = {145-159},\n  number = {2},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2008-MatEval-WeldDiscontinuities-3.pdf}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Evaluation of Acoustic Emission Signal Parameters for Identifying the Propagation of Defects in Pressurized Tubes.\n \n \n \n \n\n\n \n Silva, R.; D.Mery; and Soares, S.\n\n\n \n\n\n\n Materials Evaluation, 66(5): 493-500. 2008.\n \n\n\n\n
\n\n\n\n \n \n \"EvaluationPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery-2008-MatEval-AcousticEmision,\n  author = {R. Silva and D.Mery and S.D. Soares},\n  title = {Evaluation of Acoustic Emission Signal Parameters for Identifying the Propagation of Defects in Pressurized Tubes},\n  journal = {Materials Evaluation},\n  year = {2008},\n  volume = {66},\n  pages = {493-500},\n  number = {5},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2008-MatEval-AcousticEmision.pdf}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robust automated multiple view inspection.\n \n \n \n \n\n\n \n Pizarro, L.; Mery, D.; Delpiano, R.; and Carrasco, M.\n\n\n \n\n\n\n Pattern Analysis and Applications, 11(1): 21-32. 2008.\n \n\n\n\n
\n\n\n\n \n \n \"RobustPaper\n  \n \n \n \"RobustLink\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery-2008-PatternAnalApplic,\n  author    = {Luis Pizarro and\n               Domingo Mery and\n               Rafael Delpiano and\n               Miguel Carrasco},\n  title     = {Robust automated multiple view inspection},\n  journal   = {Pattern Analysis and Applications},\n  volume    = {11},\n  number    = {1},\n  year      = {2008},\n  pages     = {21-32},\n  url      = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2008-PatternAnalApplic.pdf},\n  ee        = {http://dx.doi.org/10.1007/s10044-007-0075-9},\n  bibsource = {DBLP, http://dblp.uni-trier.de}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Features: The more the better.\n \n \n \n \n\n\n \n Mery, D.; and Soto, A.\n\n\n \n\n\n\n In The 7th WSEAS International Conference on Signal Processing, Computational Geometry and Artificial Vision (ISCGAV-2008), Rodos Island, Greece, 2008. \n \n\n\n\n
\n\n\n\n \n \n \"Features:Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery-2008-ISCGAV-a,\n  author    = {Domingo Mery and A. Soto},\n  title     = {Features: The more the better},\n  booktitle = {The 7th WSEAS International Conference on Signal Processing, Computational Geometry and Artificial Vision (ISCGAV-2008), Rodos Island, Greece},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2008-ISCGAV-TheMoreTheBetter.pdf},\n  year      = {2008}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Image Acquisition and Automated Inspection of Wine Bottlenecks by Tracking on Multiple Views.\n \n \n \n \n\n\n \n Carrasco, M.; Pizarro, L.; and Mery, D.\n\n\n \n\n\n\n In The 7th WSEAS International Conference on Signal Processing, Computational Geometry and Artificial Vision (ISCGAV-2008), Rodos Island, Greece, 2008. \n \n\n\n\n
\n\n\n\n \n \n \"ImagePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery-2008-ISCGAV-b,\n  author    = {M. Carrasco and L. Pizarro and Domingo Mery},\n  title     = { Image Acquisition and Automated Inspection of Wine Bottlenecks by Tracking on Multiple Views},\n  booktitle = {The 7th WSEAS International Conference on Signal Processing, Computational Geometry and Artificial Vision (ISCGAV-2008), Rodos Island, Greece},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2008-ISCGAV-CarrascoPizarro.pdf},\n  year      = {2008}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Computer Vision Classification of Potato Chips.\n \n \n \n \n\n\n \n Pedreschi, F.; Mery, D.; Burger, A.; and Yanez, V.\n\n\n \n\n\n\n In CIGR - International Conference of Agricultural Engineering, Brazil, August 31 - September 4, 2008. \n \n\n\n\n
\n\n\n\n \n \n \"ComputerPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery-2008-ICAE,\n  author    = {F. Pedreschi and Domingo Mery and A. Burger and V. Yanez},\n  title     = {Computer Vision Classification of Potato Chips},\n  booktitle = { CIGR - International Conference of Agricultural Engineering, Brazil, August 31 - September 4},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2008-ICAE-Pedreschi.pdf},\n  year      = {2008}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Quality Evaluation and Control of French Fries.\n \n \n \n \n\n\n \n Pedreschi, F.; Mery, D.; and Manrique, T.\n\n\n \n\n\n\n In Computer Vision Technology for Food Quality Evaluation, 16, pages 701-738. Ed. Da-Wen Sun, Elsevier, 2008.\n \n\n\n\n
\n\n\n\n \n \n \"QualityPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{Mery2008-Chapter-Food-a,\n  author    ={Pedreschi, F. and Mery, D. and Manrique, T.},\n  title     ={Quality Evaluation and Control of French Fries},\n  booktitle ={Computer Vision Technology for Food Quality Evaluation},\n  publisher ={Ed. Da-Wen Sun, Elsevier},\n  chapter   ={16},\n  pages     ={701-738},\n  year      ={2008},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Chapters/2008-DWSun-FrenchFrieds.pdf}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Grading of Potatoes.\n \n \n \n \n\n\n \n Pedreschi, F.; Mery, D.; and Manrique, T.\n\n\n \n\n\n\n In Computer Vision Technology for Food Quality Evaluation, 16, pages 701-738. Ed. Da-Wen Sun, Elsevier, 2008.\n \n\n\n\n
\n\n\n\n \n \n \"GradingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{Mery2008-Chapter-Food-b,\n  author    ={Pedreschi, F. and Mery, D. and Manrique, T.},\n  title     ={Grading of Potatoes},\n  booktitle ={Computer Vision Technology for Food Quality Evaluation},\n  publisher ={Ed. Da-Wen Sun, Elsevier},\n  chapter   ={16},\n  pages     ={701-738},\n  year      ={2008},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Chapters/2008-DWSun-Potatoes.pdf}\n}\n\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2007\n \n \n (15)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n State-of-the-Art of Weld Seam Inspection by Radiographic Testing: Part II - Pattern Recognition.\n \n \n \n \n\n\n \n Silva, R.; and D.Mery\n\n\n \n\n\n\n Materials Evaluation, 65(9): 833-838. 2007.\n \n\n\n\n
\n\n\n\n \n \n \"State-of-the-ArtPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery-2008-MatEval-Welding-2,\n  author = {R. Silva and D.Mery},\n  title = {State-of-the-Art of Weld Seam Inspection by Radiographic Testing: Part {II} - Pattern Recognition},\n  journal = {Materials Evaluation},\n  year = {2007},\n  volume = {65},\n  pages = {833-838},\n  number = {9},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2007-MatEval-Welding-2.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n State-of-the-Art of Weld Seam Inspection by Radiographic Testing: Part I - Image Processing.\n \n \n \n \n\n\n \n Silva, R.; and D.Mery\n\n\n \n\n\n\n Materials Evaluation, 65(6): 643-647. 2007.\n \n\n\n\n
\n\n\n\n \n \n \"State-of-the-ArtPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery-2008-MatEval-Welding-1,\n  author = {R. Silva and D.Mery},\n  title = {State-of-the-Art of Weld Seam Inspection by Radiographic Testing: Part {I} - Image Processing},\n  journal = {Materials Evaluation},\n  year = {2007},\n  volume = {65},\n  pages = {643-647},\n  number = {6},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2007-MatEval-Welding-1.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Color kinetics and acrylamide formation in NaCl soaked potato chips.\n \n \n \n \n\n\n \n Pedreschi, F.; Bustos, O.; Mery, D.; Moyano, P.; Kaack, K.; and Granby, K.\n\n\n \n\n\n\n Journal of Food Engineering, 79(3): 989 - 997. 2007.\n \n\n\n\n
\n\n\n\n \n \n \"ColorLink\n  \n \n \n \"ColorPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Mery-2007-JFoodEng-ColorKinetics,\ntitle = "Color kinetics and acrylamide formation in NaCl soaked potato chips",\njournal = "Journal of Food Engineering",\nvolume = "79",\nnumber = "3",\npages = "989 - 997",\nyear = "2007",\nissn = "0260-8774",\ndoi = "DOI: 10.1016/j.jfoodeng.2006.03.020",\nee = "http://www.sciencedirect.com/science/article/B6T8J-4JT836T-5/2/fad071c7622d88c711dbce649a8c166a",\nurl = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2007-JFoodEng-ColorKinetics.pdf},\nauthor = "F. Pedreschi and O. Bustos and Domingo Mery and P. Moyano and K. Kaack and K. Granby",\nkeywords = "Potato slices",\nkeywords = "Frying",\nkeywords = "Color",\nkeywords = "Kinetics",\nkeywords = "Soaking",\nkeywords = "NaCl",\nkeywords = "Acrylamide"\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Color development and acrylamide content of pre-dried potato chips.\n \n \n \n \n\n\n \n Pedreschi, F.; Leon, J.; Mery, D.; Moyano, P.; Pedreschi, R.; Kaack, K.; and Granby, K.\n\n\n \n\n\n\n Journal of Food Engineering, 79(3): 786 - 793. 2007.\n \n\n\n\n
\n\n\n\n \n \n \"ColorLink\n  \n \n \n \"ColorPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Mery-2007-JFoodEng-ColorDevelopment,\ntitle = "Color development and acrylamide content of pre-dried potato chips",\njournal = "Journal of Food Engineering",\nvolume = "79",\nnumber = "3",\npages = "786 - 793",\nyear = "2007",\nissn = "0260-8774",\ndoi = "DOI: 10.1016/j.jfoodeng.2006.03.001",\nee = "http://www.sciencedirect.com/science/article/B6T8J-4JRVB1M-4/2/bda82d8dafffa2e60f3f9c03daee6e5b",\nurl = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2007-JFoodEng-ColorDevelopment.pdf},\nauthor = "Franco Pedreschi and Jorge Leon and Domingo Mery and Pedro Moyano and Romina Pedreschi and Karl Kaack and Kit Granby",\nkeywords = "Potato chips",\nkeywords = "Frying",\nkeywords = "Color",\nkeywords = "Pre-drying",\nkeywords = "Blanching",\nkeywords = "Acrylamide"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Detection and classification of weld defects in radiographic images: Part II - unsupervised learning.\n \n \n \n \n\n\n \n Padua, G.; Silva, R.; D.Mery; Siqueira, M.; Rebello, J.; and Caloba, L.\n\n\n \n\n\n\n Materials Evaluation, 65(12): 1230-1237. 2007.\n \n\n\n\n
\n\n\n\n \n \n \"DetectionPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery-2007-MatEval-2,\n  author = {G.X. Padua and R. Silva and D.Mery and M.H.S. Siqueira and J.M.A. Rebello and L.P. Caloba},\n  title = {Detection and classification of weld defects in radiographic images: Part {II} - unsupervised learning},\n  journal = {Materials Evaluation},\n  year = {2007},\n  volume = {65},\n  pages = {1230-1237},\n  number = {12},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2007-MatEval-WeldDiscontinuities-2.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Detection and classification of weld defects in radiographic images: Part I - supervised learning.\n \n \n \n \n\n\n \n Padua, G.; Silva, R.; D.Mery; Siqueira, M.; Rebello, J.; and Caloba, L.\n\n\n \n\n\n\n Materials Evaluation, 65(11): 1139-1145. 2007.\n \n\n\n\n
\n\n\n\n \n \n \"DetectionPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery-2007-MatEval-1,\n  author = {G.X. Padua and R. Silva and D.Mery and M.H.S. Siqueira and J.M.A. Rebello and L.P. Caloba},\n  title = {Detection and classification of weld defects in radiographic images: Part {I} - supervised learning},\n  journal = {Materials Evaluation},\n  year = {2007},\n  volume = {65},\n  pages = {1139-1145},\n  number = {11},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2007-MatEval-WeldDiscontinuities-1.pdf}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Accuracy estimation of detection of casting defects in X-ray images using some statistical techniques.\n \n \n \n \n\n\n \n Silva, R.; and D.Mery\n\n\n \n\n\n\n Insight-Non-Destructive Testing and Condition Monitoring, 49(10): 603-609. 2007.\n \n\n\n\n
\n\n\n\n \n \n \"AccuracyPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery-2007-Insight-a,\n  author = {R. Silva and D.Mery},\n  title = {Accuracy estimation of detection of casting defects in {X}-ray images using some statistical techniques},\n  journal = {Insight-Non-Destructive Testing and Condition Monitoring},\n  year = {2007},\n  volume = {49},\n  pages = {603-609},\n  number = {10},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2007-Insight-Bootstrap.pdf}\n }\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Progress in Pattern Recognition, Image Analysis and Applications, 12th Iberoamericann Congress on Pattern Recognition, CIARP 2007, Valparaiso, Chile, November 13-16, 2007, Proceedings.\n \n \n \n \n\n\n \n Rueda, L.; Mery, D.; and Kittler, J.\n\n\n \n\n\n\n Volume 4756, of Lecture Notes in Computer Science.Springer. 2007.\n \n\n\n\n
\n\n\n\n \n \n \"ProgressLink\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@proceedings{Mery-2007-CIARP,\n  author    = {Rueda, L. and\n               Mery, D. and\n               Kittler, J.},\n  title     = {Progress in Pattern Recognition, Image Analysis and Applications,\n               12th Iberoamericann Congress on Pattern Recognition, CIARP\n               2007, Valparaiso, Chile, November 13-16, 2007, Proceedings},\n  booktitle = {CIARP},\n  publisher = {Springer},\n  series    = {Lecture Notes in Computer Science},\n  volume    = {4756},\n  year      = {2007},\n  isbn      = {978-3-540-76724-4},\n  ee        = {http://books.google.com/books?id=JMQk1HJmhv0C&printsec=frontcover&source=gbs_v2_summary_r&cad=0#v=onepage&q=&f=false},\n  bibsource = {DBLP, http://dblp.uni-trier.de}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Advances in Image and Video Technology, Second Pacific Rim Symposium, PSIVT 2007, Santiago, Chile, December 17-19, 2007, Proceedings.\n \n \n \n \n\n\n \n Mery, D.; and Rueda, L.\n\n\n \n\n\n\n Volume 4872, of Lecture Notes in Computer Science.Springer. 2007.\n \n\n\n\n
\n\n\n\n \n \n \"AdvancesLink\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@proceedings{Mery-2007-PSIVT,\n  author    = {Mery, D. and\n               Rueda, L.},\n  title     = {Advances in Image and Video Technology, Second Pacific Rim\n               Symposium, PSIVT 2007, Santiago, Chile, December 17-19,\n               2007, Proceedings},\n  booktitle = {PSIVT},\n  publisher = {Springer},\n  series    = {Lecture Notes in Computer Science},\n  volume    = {4872},\n  year      = {2007},\n  ee        = {http://books.google.com/books?id=vkNfw8SsU3oC&printsec=frontcover&source=gbs_v2_summary_r&cad=0#v=onepage&q=&f=false},\n  isbn      = {978-3-540-77128-9},\n  bibsource = {DBLP, http://dblp.uni-trier.de}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Bimodal Biometric Person Identification System Under Perturbations.\n \n \n \n \n\n\n \n Carrasco, M.; Pizarro, L.; and Mery, D.\n\n\n \n\n\n\n In Advances in Image and Video Technology, Second Pacific Rim Symposium, PSIVT 2007, Santiago, Chile, December 17-19, 2007, Proceedings, pages 114-127, 2007. \n \n\n\n\n
\n\n\n\n \n \n \"BimodalLink\n  \n \n \n \"BimodalPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery-2007-PSIVTb,\n  author    = {Miguel Carrasco and\n               Luis Pizarro and\n               Domingo Mery},\n  title     = {Bimodal Biometric Person Identification System Under Perturbations},\n  booktitle = {Advances in Image and Video Technology, Second Pacific Rim\n               Symposium, PSIVT 2007, Santiago, Chile, December 17-19,\n               2007, Proceedings},\n  year      = {2007},\n  pages     = {114-127},\n  ee        = {http://dx.doi.org/10.1007/978-3-540-77129-6_14},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2007-PSIVT-CarrascoPizarroMery.pdf},\n  bibsource = {DBLP, http://dblp.uni-trier.de}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automatic Multiple Visual Inspection on Non-calibrated Image Sequence with Intermediate Classifier Block.\n \n \n \n \n\n\n \n Carrasco, M.; and Mery, D.\n\n\n \n\n\n\n In Advances in Image and Video Technology, Second Pacific Rim Symposium, PSIVT 2007, Santiago, Chile, December 17-19, 2007, Proceedings, pages 371-384, 2007. \n \n\n\n\n
\n\n\n\n \n \n \"AutomaticPaper\n  \n \n \n \"AutomaticLink\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery-2007-PSIVTa,\n  author    = {Miguel Carrasco and\n               Domingo Mery},\n  title     = {Automatic Multiple Visual Inspection on Non-calibrated Image\n               Sequence with Intermediate Classifier Block},\n  booktitle = {Advances in Image and Video Technology, Second Pacific Rim\n               Symposium, PSIVT 2007, Santiago, Chile, December 17-19,\n               2007, Proceedings},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2007-PSIVT-CarrascoMery.pdf},\n  year      = {2007},\n  pages     = {371-384},\n  ee        = {http://dx.doi.org/10.1007/978-3-540-77129-6_34},\n  bibsource = {DBLP, http://dblp.uni-trier.de}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robust Tree-Ring Detection.\n \n \n \n \n\n\n \n Cerda, M.; Hitschfeld-Kahler, N.; and Mery, D.\n\n\n \n\n\n\n In Advances in Image and Video Technology, Second Pacific Rim Symposium, PSIVT 2007, Santiago, Chile, December 17-19, 2007, Proceedings, pages 575-585, 2007. \n \n\n\n\n
\n\n\n\n \n \n \"RobustPaper\n  \n \n \n \"RobustLink\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery-2007-PSIVTc,\n  author    = {Mauricio Cerda and\n               Nancy Hitschfeld-Kahler and\n               Domingo Mery},\n  title     = {Robust Tree-Ring Detection},\n  booktitle = {Advances in Image and Video Technology, Second Pacific Rim\n               Symposium, PSIVT 2007, Santiago, Chile, December 17-19,\n               2007, Proceedings},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2007-PSIVT-CerdaHitschfeldMery.pdf},\n  year      = {2007},\n  pages     = {575-585},\n  ee        = {http://dx.doi.org/10.1007/978-3-540-77129-6_50},\n  bibsource = {DBLP, http://dblp.uni-trier.de}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Accuracy Estimation of Detection of Casting Defects in X-Ray Images Using Some Statistical Techniques.\n \n \n \n \n\n\n \n da Silva, R. R.; and Mery, D.\n\n\n \n\n\n\n In Advances in Image and Video Technology, Second Pacific Rim Symposium, PSIVT 2007, Santiago, Chile, December 17-19, 2007, Proceedings, pages 639-650, 2007. \n \n\n\n\n
\n\n\n\n \n \n \"AccuracyPaper\n  \n \n \n \"AccuracyLink\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery-2007-PSIVTd,\n  author    = {Romeu Ricardo da Silva and\n               Domingo Mery},\n  title     = {Accuracy Estimation of Detection of Casting Defects in X-Ray\n               Images Using Some Statistical Techniques},\n  booktitle = {Advances in Image and Video Technology, Second Pacific Rim\n               Symposium, PSIVT 2007, Santiago, Chile, December 17-19,\n               2007, Proceedings},\n  year      = {2007},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2007-PSIVT-SilvaMery.pdf},\n  pages     = {639-650},\n  ee        = {http://dx.doi.org/10.1007/978-3-540-77129-6_55},\n  bibsource = {DBLP, http://dblp.uni-trier.de}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Computer Vision for Quality Control in Latin American Food Industry, A Case Study.\n \n \n \n \n\n\n \n Aguilera, J.; Cipriano, A.; Erana, M.; Lillo, I.; Mery, D.; and Soto, A.\n\n\n \n\n\n\n In International Conference on Computer Vision (ICCV2007): Workshop on Computer Vision Applications for Developing Countries, 2007. \n \n\n\n\n
\n\n\n\n \n \n \"ComputerPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery-2007-ICCV-Food,\n  author = {J.M. Aguilera and A. Cipriano and M. Erana and I. Lillo and\n\tDomingo Mery and A. Soto},\n  title = {Computer Vision for Quality Control in Latin American Food Industry,\n\tA Case Study},\n  booktitle = {International Conference on Computer Vision (ICCV2007): Workshop on Computer Vision Applications for Developing Countries},\n  year = {2007},\n  url       = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2007-ICCV-Food.pdf}\n  }\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n High contrast pixels: a new feature for defect detection in X-ray testin.\n \n \n \n \n\n\n \n D.Mery\n\n\n \n\n\n\n Insight-Non-Destructive Testing and Condition Monitoring, 46(12): 751-753. 2007.\n \n\n\n\n
\n\n\n\n \n \n \"HighPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery-2006-Insight-HCP,\n  author = {D.Mery},\n  title = {High contrast pixels: a new feature for defect detection in X-ray testin},\n  journal = {Insight-Non-Destructive Testing and Condition Monitoring},\n  year = {2007},\n  volume = {46},\n  pages = {751-753},\n  number = {12},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2006-Insight-HighContrastPixels.pdf}\n }\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2006\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Development of a computer vision system to measure the color of potato chips.\n \n \n \n \n\n\n \n Pedreschi, F.; Leon, J.; Mery, D.; and Moyano, P.\n\n\n \n\n\n\n Food Research International, 39(10): 1092 - 1098. 2006.\n Physical Properties VI\n\n\n\n
\n\n\n\n \n \n \"DevelopmentLink\n  \n \n \n \"DevelopmentPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Mery-2006-FoodResInt-DevelopmentCVS,\ntitle = "Development of a computer vision system to measure the color of potato chips",\njournal = "Food Research International",\nvolume = "39",\nnumber = "10",\npages = "1092 - 1098",\nyear = "2006",\nnote = "Physical Properties VI",\nissn = "0963-9969",\ndoi = "DOI: 10.1016/j.foodres.2006.03.009",\nee = "http://www.sciencedirect.com/science/article/B6T6V-4JRVR47-2/2/5fb514144910614a06446a5631d9e2a8",\nurl = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2006-FoodResInt-DevelopmentComputerVisionSystem.pdf},\nauthor = "Franco Pedreschi and Jorge Leon and Domingo Mery and Pedro Moyano",\nkeywords = "Potato chips",\nkeywords = "Frying",\nkeywords = "Color",\nkeywords = "Computer vision",\nkeywords = "L*a*b*",\nkeywords = "Image processing"\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Color Measurements in L*a*b* units from RGB Digital Images.\n \n \n \n \n\n\n \n Leon, K.; Mery, D.; Pedreschi, F.; and Leon, J.\n\n\n \n\n\n\n Food Research International, 39(10): 1084 - 1091. 2006.\n Physical Properties VI\n\n\n\n
\n\n\n\n \n \n \"ColorLink\n  \n \n \n \"ColorPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@ARTICLE{Mery-2006-FoodResInt-Lab,\njournal = "Food Research International",\nvolume = "39",\nnumber = "10",\npages = "1084 - 1091",\nyear = "2006",\nnote = "Physical Properties VI",\nissn = "0963-9969",\ndoi = "DOI: 10.1016/j.foodres.2006.03.006",\nee = "http://www.sciencedirect.com/science/article/B6T6V-4JKR6C6-1/2/84e530b6462cfbea4a090a3a23687edc",\n  author = {K. Leon and Domingo Mery and F. Pedreschi and J. Leon},\n  title = {Color Measurements in {L*a*b*} units from {RGB} Digital Images},\nkeywords = "Color",\nkeywords = "RGB",\nkeywords = "L*a*b*",\nkeywords = "Computer vision",\nkeywords = "Neural networks",\n  url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2006-FoodResInt-ColorMeasurementLab.pdf}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automated Radioscopic Testing of Aluminum Die Castings.\n \n \n \n \n\n\n \n Mery, D.\n\n\n \n\n\n\n Materials Evaluation, 64(2): 135-143. 2006.\n \n\n\n\n
\n\n\n\n \n \n \"AutomatedPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery-2006-MatEval,\n  author = {Domingo Mery},\n  title = {Automated Radioscopic Testing of Aluminum Die Castings},\n  journal = {Materials Evaluation},\n  year = {2006},\n  volume = {64},\n  pages = {135-143},\n  number = {2},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2006-MatEval-a.pdf}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automated visual inspection using trifocal analysis in an uncalibrated sequence of images.\n \n \n \n \n\n\n \n Carrasco, M.; and Mery, D.\n\n\n \n\n\n\n Materials Evaluation, 64: 900-906. 2006.\n \n\n\n\n
\n\n\n\n \n \n \"AutomatedPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery-2006-MatEval-b,\n  author = {M. Carrasco and Domingo Mery},\n  title = {Automated visual inspection using trifocal analysis in an uncalibrated\n\tsequence of images},\n  journal = {Materials Evaluation},\n  year = {2006},\n  volume = {9},\n  volume = {64},\n  pages = {900-906},\n    url       = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2006-MatEval-b.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Advances on Automated Multiple View Inspection.\n \n \n \n \n\n\n \n Mery, D.; and Carrasco, M.\n\n\n \n\n\n\n In Advances in Image and Video Technology, First Pacific Rim Symposium, PSIVT 2006, Hsinchu, Taiwan, December 10-13, 2006, Proceedings, pages 513-522, 2006. \n \n\n\n\n
\n\n\n\n \n \n \"AdvancesPaper\n  \n \n \n \"AdvancesLink\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery-2006-PSIVTa,\n  author    = {Domingo Mery and\n               Miguel Carrasco},\n  title     = {Advances on Automated Multiple View Inspection},\n  booktitle = {Advances in Image and Video Technology, First Pacific Rim\n               Symposium, PSIVT 2006, Hsinchu, Taiwan, December 10-13,\n               2006, Proceedings},\n  year      = {2006},\n  pages     = {513-522},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2006-PSIVT-MeryCarrasco.pdf},\n  ee        = {http://dx.doi.org/10.1007/11949534_51},\n  bibsource = {DBLP, http://dblp.uni-trier.de}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automatic Selection and Detection of Visual Landmarks Using Multiple Segmentations.\n \n \n \n \n\n\n \n Langdon, D.; Soto, A.; and Mery, D.\n\n\n \n\n\n\n In Advances in Image and Video Technology, First Pacific Rim Symposium, PSIVT 2006, Hsinchu, Taiwan, December 10-13, 2006, Proceedings, pages 601-610, 2006. \n \n\n\n\n
\n\n\n\n \n \n \"AutomaticPaper\n  \n \n \n \"AutomaticLink\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery-2006-PSIVTb,\n  author    = {Daniel Langdon and\n               Alvaro Soto and\n               Domingo Mery},\n  title     = {Automatic Selection and Detection of Visual Landmarks Using\n               Multiple Segmentations},\n  booktitle = {Advances in Image and Video Technology, First Pacific Rim\n               Symposium, PSIVT 2006, Hsinchu, Taiwan, December 10-13,\n               2006, Proceedings},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2006-PSIVT-LangdonSotoMery.pdf},\n  year      = {2006},\n  pages     = {601-610},\n  ee        = {http://dx.doi.org/10.1007/11949534_60},\n  bibsource = {DBLP, http://dblp.uni-trier.de}\n}\n\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2005\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Image Processing for Fault Detection in Aluminum Castings.\n \n \n \n\n\n \n Mery, D.; Filbert, D.; and Jaeger, T.\n\n\n \n\n\n\n In Analytical Characterization of Aluminum and Its Alloys, 16, pages 701-738. Ed. C.S. MacKenzie and G.E. Totten, CRC Press, Taylor and Francis, Florida, 2005.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{Mery2005-Chapter,\n  author    ={Mery, D. and Filbert, D. and Jaeger, Th.},\n  title     ={{Image Processing for Fault Detection in Aluminum Castings.}},\n  booktitle ={{Analytical Characterization of Aluminum and Its Alloys}},\n  publisher ={Ed. C.S. MacKenzie and G.E. Totten, CRC Press, Taylor and Francis, Florida},\n  year      ={2005},\n  chapter   ={16},\n  pages     ={701-738},\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Simulation of Defects in Aluminium Castings Using CAD Models of Flaws and Real X-ray Images.\n \n \n \n \n\n\n \n D.Mery; Hahn, D.; and Hitschfeld, N.\n\n\n \n\n\n\n Insight-Non-Destructive Testing and Condition Monitoring, 47(10): 618-624. 2005.\n Ron Halmshaw Award 2005\n\n\n\n
\n\n\n\n \n \n \"SimulationPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery-2005-Insight-b,\n  author = {D.Mery and D. Hahn and N. Hitschfeld},\n  title = {Simulation of Defects in Aluminium Castings Using CAD Models of Flaws and Real X-ray Images},\n  journal = {Insight-Non-Destructive Testing and Condition Monitoring},\n  year = {2005},\n  volume = {47},\n  pages = {618-624},\n  note = {Ron Halmshaw Award 2005},\n  number = {10},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2005-Insight-b.pdf}\n }\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Segmentation of circular casting defects using a robust algorithm.\n \n \n \n \n\n\n \n Ghoreyshi, A.; Vidal, R.; and D.Mery\n\n\n \n\n\n\n Insight-Non-Destructive Testing and Condition Monitoring, 47(10): 615-617. 2005.\n \n\n\n\n
\n\n\n\n \n \n \"SegmentationPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery-2005-Insight-a,\n  author = {A. Ghoreyshi and R. Vidal and D.Mery},\n  title = {Segmentation of circular casting defects using a robust algorithm},\n  journal = {Insight-Non-Destructive Testing and Condition Monitoring},\n  year = {2005},\n  volume = {47},\n  pages = {615-617},\n  number = {10},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2005-Insight-a.pdf}\n }\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Segmentation of colour food images using a robust algorithm.\n \n \n \n \n\n\n \n Mery, D.; and Pedreschi, F.\n\n\n \n\n\n\n Journal of Food Engineering, 66(3): 353 - 360. 2005.\n \n\n\n\n
\n\n\n\n \n \n \"SegmentationLink\n  \n \n \n \"SegmentationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Mery-2005-JFoodEng-Segmentation,\ntitle = "Segmentation of colour food images using a robust algorithm",\njournal = "Journal of Food Engineering",\nvolume = "66",\nnumber = "3",\npages = "353 - 360",\nyear = "2005",\nissn = "0260-8774",\ndoi = "DOI: 10.1016/j.jfoodeng.2004.04.001",\nee = "http://www.sciencedirect.com/science/article/B6T8J-4CGM96K-3/2/3110ed744530fe1638443deb24dcbc05",\n  author = {Domingo Mery and F. Pedreschi},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2005-JFoodEng-Segmentation.pdf},\nkeywords = "Image analysis",\nkeywords = "Image processing",\nkeywords = "Segmentation",\nkeywords = "Colour images"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automated inspection of aluminium castings using fusion strategies.\n \n \n \n \n\n\n \n Mery, D.; Chacon, M.; Munoz, L.; and Gonzalez, L.\n\n\n \n\n\n\n Materials Evaluation, 63(2): 148-153. 2005.\n \n\n\n\n
\n\n\n\n \n \n \"AutomatedPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery-2005-MatEval-a,\n  author = {Domingo Mery and M. Chacon and L. Munoz and L. Gonzalez},\n  title = {Automated inspection of aluminium castings using fusion strategies},\n  journal = {Materials Evaluation},\n  year = {2005},\n  volume = {63},\n  pages = {148-153},\n  number = {2},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2005-MatEval-a.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automated Multiple View Inspection Based on Uncalibrated Image Sequences.\n \n \n \n \n\n\n \n Mery, D.; and Carrasco, M.\n\n\n \n\n\n\n In Image Analysis, 14th Scandinavian Conference, SCIA 2005, Joensuu, Finland, June 19-22, 2005, Proceedings, pages 1238-1247, 2005. \n \n\n\n\n
\n\n\n\n \n \n \"AutomatedPaper\n  \n \n \n \"AutomatedLink\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery-2005-SCIA,\n  author    = {Domingo Mery and\n               Miguel Carrasco},\n  title     = {Automated Multiple View Inspection Based on Uncalibrated\n               Image Sequences},\n  booktitle = {Image Analysis, 14th Scandinavian Conference, SCIA 2005,\n               Joensuu, Finland, June 19-22, 2005, Proceedings},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2005-SCIA-MeryCarrasco.pdf},\n  year      = {2005},\n  pages     = {1238-1247},\n  ee        = {http://dx.doi.org/10.1007/11499145_125},\n  bibsource = {DBLP, http://dblp.uni-trier.de}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2004\n \n \n (8)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Vision por Computador.\n \n \n \n \n\n\n \n Mery, D.\n\n\n \n\n\n\n Departamento de Ciencia de la Computacion, Pontificia Universidad Catolica de Chile, Santiago de Chile, 2004.\n \n\n\n\n
\n\n\n\n \n \n \"VisionPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@BOOK{Mery-2004-ApuntesComputerVision,\n  title = {Vision por Computador},\n  publisher = {Departamento de Ciencia de la Computacion, Pontificia Universidad\n\tCatolica de Chile},\n  year = {2004},\n  author = {Domingo Mery},\n  address = {Santiago de Chile},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Books/2004-ApuntesVision.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Neural network method for failure detection with skewed class distribution.\n \n \n \n \n\n\n \n Carvajal, K.; Chacon, M.; Mery, D.; and Acuna, G.\n\n\n \n\n\n\n Insight-Non-Destructive Testing and Condition Monitoring, 46(7): 399-402. 2004.\n \n\n\n\n
\n\n\n\n \n \n \"NeuralPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery-2004-Insight,\n  author = {K. Carvajal and M. Chacon and Domingo Mery and G. Acuna},\n  title = {Neural network method for failure detection with skewed class distribution},\n  journal = {Insight-Non-Destructive Testing and Condition Monitoring},\n  year = {2004},\n  volume = {46},\n  pages = {399-402},\n  number = {7},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2004-Insight.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Segmentation of welding defects using a robust algorithm.\n \n \n \n \n\n\n \n Carrasco, M.; and Mery, D.\n\n\n \n\n\n\n Materials Evaluation, 62(11): 1142-1147. 2004.\n \n\n\n\n
\n\n\n\n \n \n \"SegmentationPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery-2004-MatEval,\n  author = {M. Carrasco and Domingo Mery},\n  title = {Segmentation of welding defects using a robust algorithm},\n  journal = {Materials Evaluation},\n  year = {2004},\n  volume = {62},\n  pages = {1142-1147},\n  number = {11},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2004-MatEval.pdf}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Classification of Potato Chips using Pattern Recognition.\n \n \n \n \n\n\n \n Pedreschi, F.; Mendoza, M. F.; and Aguilera, J.\n\n\n \n\n\n\n Journal of Food Science, 69(6): E264-E270. 2004.\n \n\n\n\n
\n\n\n\n \n \n \"ClassificationPaper\n  \n \n \n \"ClassificationLink\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 5 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery-2004-JFoodScience,\n  author = {F. Pedreschi and Mery F. Mendoza and J.M. Aguilera},\n  title = {Classification of Potato Chips using Pattern Recognition},\n  journal = {Journal of Food Science},\n  year = {2004},\n  volume = {69},\n  pages = {E264-E270},\n  number = {6},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2004-JFoodScience.pdf},\n  ee = {http://www3.interscience.wiley.com/journal/118756462/abstract?CRETRY=1&SRETRY=0}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automated defect detection in aluminium castings and welds using neuro-fuzzy classifiers.\n \n \n \n \n\n\n \n Hernandez, S.; Saez, D.; Mery, D.; da Silva, R.; and Sequeira, M.\n\n\n \n\n\n\n In Proceedings of the 16th World Conference on Non-Destructive Testing (WCNDT-2004), Montreal, Aug. 30 - Sep 3 2004. \n \n\n\n\n
\n\n\n\n \n \n \"AutomatedPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery-2004-WCNDT,\n  author = {S. Hernandez and D. Saez and Domingo Mery and R. da Silva and M. Sequeira},\n  title = {Automated defect detection in aluminium castings and welds using\n\tneuro-fuzzy classifiers},\n  booktitle = {Proceedings of the 16th World Conference on Non-Destructive Testing\n\t(WCNDT-2004)},\n  year = {2004},\n  address = {Montreal},\n  month = {Aug. 30 - Sep 3},\nurl = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2004-WCNDT-Hernandez.pdf}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Tracking of Points in a Calibrated and Noisy Image Sequence.\n \n \n \n \n\n\n \n Mery, D.; Ochoa, F.; and Vidal, R.\n\n\n \n\n\n\n In Image Analysis and Recognition: International Conference, ICIAR 2004, Porto, Portugal, September 29-October 1, 2004, Proceedings, Part II, pages 647-654, 2004. \n \n\n\n\n
\n\n\n\n \n \n \"TrackingPaper\n  \n \n \n \"TrackingLink\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery-2004-ICIARa,\n  author    = {Domingo Mery and\n               Felipe Ochoa and\n               Ren{e} Vidal},\n  title     = {Tracking of Points in a Calibrated and Noisy Image Sequence},\n  booktitle = {Image Analysis and Recognition: International Conference,\n               ICIAR 2004, Porto, Portugal, September 29-October 1, 2004,\n               Proceedings, Part II},\n  year      = {2004},\n  pages     = {647-654},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2004-LNCS-a.pdf},\n  ee        = {http://springerlink.metapress.com/openurl.asp?genre=article{\\&}issn=0302-9743{\\&}volume=3211{\\&}spage=647},\n  bibsource = {DBLP, http://dblp.uni-trier.de}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automated Visual Inspection of Glass Bottles Using Adapted Median Filtering.\n \n \n \n \n\n\n \n Mery, D.; and Medina, O.\n\n\n \n\n\n\n In Image Analysis and Recognition: International Conference, ICIAR 2004, Porto, Portugal, September 29-October 1, 2004, Proceedings, Part II, pages 818-825, 2004. \n \n\n\n\n
\n\n\n\n \n \n \"AutomatedPaper\n  \n \n \n \"AutomatedLink\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery-2004-ICIARc,\n  author    = {Domingo Mery and\n               Olaya Medina},\n  title     = {Automated Visual Inspection of Glass Bottles Using Adapted\n               Median Filtering},\n  booktitle = {Image Analysis and Recognition: International Conference,\n               ICIAR 2004, Porto, Portugal, September 29-October 1, 2004,\n               Proceedings, Part II},\n  year      = {2004},\n  pages     = {818-825},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2004-LNCS-c.pdf},\n  ee        = {http://springerlink.metapress.com/openurl.asp?genre=article{\\&}issn=0302-9743{\\&}volume=3212{\\&}spage=818},\n  bibsource = {DBLP, http://dblp.uni-trier.de}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Neuro-Fuzzy Method for Automated Defect Detection in Aluminium Castings.\n \n \n \n \n\n\n \n Hernandez, S.; Saez, D.; and Mery, D.\n\n\n \n\n\n\n In Image Analysis and Recognition: International Conference, ICIAR 2004, Porto, Portugal, September 29-October 1, 2004, Proceedings, Part II, pages 826-833, 2004. \n \n\n\n\n
\n\n\n\n \n \n \"Neuro-FuzzyPaper\n  \n \n \n \"Neuro-FuzzyLink\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery-2004-ICIARb,\n  author    = {Sergio Hernandez and\n               Doris Saez and\n               Domingo Mery},\n  title     = {Neuro-Fuzzy Method for Automated Defect Detection in Aluminium\n               Castings},\n  booktitle = {Image Analysis and Recognition: International Conference,\n               ICIAR 2004, Porto, Portugal, September 29-October 1, 2004,\n               Proceedings, Part II},\n  year      = {2004},\n  pages     = {826-833},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2004-LNCS-b.pdf},\n  ee        = {http://springerlink.metapress.com/openurl.asp?genre=article{\\&}issn=0302-9743{\\&}volume=3212{\\&}spage=826},\n  bibsource = {DBLP, http://dblp.uni-trier.de}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2003\n \n \n (7)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Pattern Recognition in the Automatic Inspection of Aluminium Castings.\n \n \n \n \n\n\n \n Mery, D.; Da-Silva, R.; Caloba, L.; and Rebello, J.\n\n\n \n\n\n\n Insight-Non-Destructive Testing and Condition Monitoring, 45(7): 431-439. 2003.\n \n\n\n\n
\n\n\n\n \n \n \"PatternPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery-Insight-2003-a,\n  author = {Domingo Mery and R. Da-Silva and L. Caloba and J. Rebello},\n  title = {Pattern Recognition in the Automatic Inspection of Aluminium Castings},\n  journal = {Insight-Non-Destructive Testing and Condition Monitoring},\n  year = {2003},\n  volume = {45},\n  pages = {431-439},\n  number = {7},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2003-Insight-a.pdf}\n }\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Exploiting Multiple View Geometry in X-ray Testing: Part I, Theory.\n \n \n \n \n\n\n \n Mery, D.\n\n\n \n\n\n\n Materials Evaluation, 61(11): 1226-1233. November 2003.\n \n\n\n\n
\n\n\n\n \n \n \"ExploitingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery-2003-MatEval-a,\n  author = {Domingo Mery},\n  title = {Exploiting Multiple View Geometry in {X-ray} Testing: Part {I}, Theory},\n  journal = {Materials Evaluation},\n  year = {2003},\n  volume = {61},\n  pages = {1226-1233},\n  number = {11},\n  month = {November},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2003-MatEval-a.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Exploiting Multiple View Geometry in X-ray Testing: Part II, Applications.\n \n \n \n \n\n\n \n Mery, D.\n\n\n \n\n\n\n Materials Evaluation, 61(12): 1311-1314. December 2003.\n \n\n\n\n
\n\n\n\n \n \n \"ExploitingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery-2003-MatEval-b,\n  author = {Domingo Mery},\n  title = {Exploiting Multiple View Geometry in {X-ray} Testing: Part {II},\n\tApplications},\n  journal = {Materials Evaluation},\n  year = {2003},\n  volume = {61},\n  pages = {1311-1314},\n  number = {12},\n  month = {December},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2003-MatEval-b.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Explicit Geometric Model of a Radioscopic Imaging System.\n \n \n \n \n\n\n \n Mery, D.\n\n\n \n\n\n\n NDT & E International, 36(8): 587-599. 2003.\n \n\n\n\n
\n\n\n\n \n \n \"ExplicitPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery-2003-NDTE,\n  author = {Domingo Mery},\n  title = {Explicit Geometric Model of a Radioscopic Imaging System},\n  journal = {NDT \\& E International},\n  year = {2003},\n  volume = {36},\n  pages = {587-599},\n  number = {8},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2003-NDTE.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automatic detection of welding defects using texture features.\n \n \n \n \n\n\n \n Mery, D.; and Berti, M.\n\n\n \n\n\n\n Insight-Non-Destructive Testing and Condition Monitoring, 45(10): 678-681. 2003.\n \n\n\n\n
\n\n\n\n \n \n \"AutomaticPaper\n  \n \n \n \"AutomaticLink\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery-2003-Insight-b,\n  author = {Domingo Mery and M.A. Berti},\n  title = {Automatic detection of welding defects using texture features},\n  journal = {Insight-Non-Destructive Testing and Condition Monitoring},\n  year = {2003},\n  volume = {45},\n  pages = {678-681},\n  number = {10},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2003-Insight-b.pdf},\n  ee = {http://www.atypon-link.com/BINT/doi/abs/10.1784/insi.45.10.676.52952}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A General Approach to Flaw Simulation in Castings by Superimposing Projections of 3D Models onto Real X-ray Images.\n \n \n \n \n\n\n \n Hahn, D.; and Mery, D.\n\n\n \n\n\n\n In International Conference on Computerized Tomography for Industrial Applications and Image Processing in Radiology, German Society of Non-Destructive Testing, pages 253-264, Berlin, June 23-25 2003. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery-2003-CTIPb,\n  author = {D. Hahn and Domingo Mery },\n  title = {A General Approach to Flaw Simulation in Castings by Superimposing\n\tProjections of {3D} Models onto Real {X-ray} Images},\n  booktitle = {International Conference on Computerized Tomography for Industrial\n\tApplications and Image Processing in Radiology, German Society of\n\tNon-Destructive Testing},\n  year = {2003},\n  pages = {253-264},\n  address = {Berlin},\n  month = {June 23-25},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2003-CTIP-b.pdf}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Crossing Line Profile: A New Approach to Detecting Defects in Aluminium Die Castings.\n \n \n \n \n\n\n \n Mery, D.\n\n\n \n\n\n\n In Image Analysis, 13th Scandinavian Conference, SCIA 2003, Halmstad, Sweden, June 29 - July 2, 2003, Proceedings, pages 725-732, 2003. \n \n\n\n\n
\n\n\n\n \n \n \"CrossingPaper\n  \n \n \n \"CrossingLink\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 5 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery-2003-SCIA,\n  author    = {Domingo Mery},\n  title     = {Crossing Line Profile: A New Approach to Detecting Defects\n               in Aluminium Die Castings},\n  booktitle = {Image Analysis, 13th Scandinavian Conference, SCIA 2003,\n               Halmstad, Sweden, June 29 - July 2, 2003, Proceedings},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2003-LNCS.pdf},\n  year      = {2003},\n  pages     = {725-732},\n  ee        = {http://springerlink.metapress.com/openurl.asp?genre=article{\\&}issn=0302-9743{\\&}volume=2749{\\&}spage=725},\n  bibsource = {DBLP, http://dblp.uni-trier.de}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2002\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n A Review of Methods for Automated Recognition of Casting Defects.\n \n \n \n \n\n\n \n Mery, D.; Jaeger, T.; and Filbert, D.\n\n\n \n\n\n\n Insight-Non-Destructive Testing and Condition Monitoring, 44(7): 428-436. 2002.\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 4 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery-2002-Insight-a,\n  author = {Domingo Mery and Th. Jaeger and D. Filbert},\n  title = {A Review of Methods for Automated Recognition of Casting Defects},\n  journal = {Insight-Non-Destructive Testing and Condition Monitoring},\n  year = {2002},\n  volume = {44},\n  pages = {428-436},\n  number = {7},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2002-Insight-a.pdf}\n }\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n New Approaches for Defect Recognition with X-ray Testing.\n \n \n \n \n\n\n \n Mery, D.\n\n\n \n\n\n\n Insight-Non-Destructive Testing and Condition Monitoring, 44(10): 614-615. 2002.\n \n\n\n\n
\n\n\n\n \n \n \"NewPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery-2002-Insight,\n  author = {Domingo Mery},\n  title = {New Approaches for Defect Recognition with {X-ray} Testing},\n  journal = {Insight-Non-Destructive Testing and Condition Monitoring},\n  year = {2002},\n  volume = {44},\n  pages = {614-615},\n  number = {10},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2002-Insight-a.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automated flaw detection in aluminum castings based on the tracking of potential defects in a radioscopic image sequence.\n \n \n \n \n\n\n \n Mery, D.; and Filbert, D.\n\n\n \n\n\n\n IEEE Transactions on Robotics, 18(6): 890-901. 2002.\n \n\n\n\n
\n\n\n\n \n \n \"AutomatedPaper\n  \n \n \n \"AutomatedLink\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 5 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mery-2002-IEEE,\n  author    = {Domingo Mery and\n               Dieter Filbert},\n  title     = {Automated flaw detection in aluminum castings based on the\n               tracking of potential defects in a radioscopic image sequence},\n  journal   = {IEEE Transactions on Robotics},\n  volume    = {18},\n  url       = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2002-IEEE.pdf},\n  number    = {6},\n  year      = {2002},\n  pages     = {890-901},\n  ee        = {http://dx.doi.org/10.1109/TRA.2002.805646},\n  bibsource = {DBLP, http://dblp.uni-trier.de}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Classification of Potential Defects in Automated Inspection of Aluminium Castings Using Statistical Pattern Recognition.\n \n \n \n \n\n\n \n Mery, D.; and Filbert, D.\n\n\n \n\n\n\n In 8th European Conference on Non-Destructive Testing (ECNDT 2002), Barcelona, 17-21 June 2002. \n \n\n\n\n
\n\n\n\n \n \n \"ClassificationPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery-2002-ECNDT-a,\n  author = {Domingo Mery and D. Filbert},\n  title = {Classification of Potential Defects in Automated Inspection of Aluminium\n\tCastings Using Statistical Pattern Recognition},\n  booktitle = {8th European Conference on Non-Destructive Testing (ECNDT 2002)},\n  year = {2002},\n  address = {Barcelona},\n  month = {17-21 June},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2002-ECNDT-b.pdf}\n  }\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automated Inspection of Moving Aluminium Castings.\n \n \n \n \n\n\n \n Mery, D.; and Filbert, D.\n\n\n \n\n\n\n In 8th European Conference on Non-Destructive Testing (ECNDT 2002), Barcelona, 17-21 June 2002. \n \n\n\n\n
\n\n\n\n \n \n \"AutomatedPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery-2002-ECNDT-a,\n  author = {Domingo Mery and D. Filbert},\n  title = {Automated Inspection of Moving Aluminium Castings},\n  booktitle = {8th European Conference on Non-Destructive Testing (ECNDT 2002)},\n  year = {2002},\n  address = {Barcelona},\n  month = {17-21 June},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2002-ECNDT-a.pdf}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Seguimiento de Puntos en imágents ruidosas.\n \n \n \n\n\n \n Mery, D.; and Villanueva, M.\n\n\n \n\n\n\n In Proceedings of XXVIII Latin-American Conference on Informatics (CLEI2012), Medellin, Oct. 1-5., 2002. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery-2002-CLEI,\n  author    = {Domingo Mery and M. Villanueva},\n  title     = {Seguimiento de Puntos en im\\'agents ruidosas},\n  booktitle = {Proceedings of XXVIII Latin-American Conference on Informatics (CLEI2012), Medellin, Oct. 1-5.},\n  year      = {2002}\n}\n\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2001\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Automated Flaw Detection in Castings from Digital Radioscopic Image Sequences.\n \n \n \n \n\n\n \n Mery, D.\n\n\n \n\n\n\n Verlag Dr. Köster, Berlin, 2001.\n (Ph.D. Thesis in German)\n\n\n\n
\n\n\n\n \n \n \"AutomatedPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@BOOK{Mery-2001-PhDThesis,\n  title = {Automated Flaw Detection in Castings from Digital Radioscopic Image\n\tSequences},\n  publisher = {Verlag Dr. K\\"oster},\n  year = {2001},\n  author = {Domingo Mery},\n  address = {Berlin},\n  note = {(Ph.D. Thesis in German)},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/Books/2001-Dissertation-TU-Berlin.pdf}\n  }\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automated Quality Control of Castings - State of the Art.\n \n \n \n \n\n\n \n Mery, D.; Jaeger, T.; and Filbert, D.\n\n\n \n\n\n\n tm - Technisches Messen, 68(7-8): 338-349. 2001.\n (in German)\n\n\n\n
\n\n\n\n \n \n \"AutomatedPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery-2001-tm,\n  author = {Domingo Mery and Th. Jaeger and D. Filbert},\n  title = {Automated Quality Control of Castings - State of the Art},\n  journal = {tm - Technisches Messen},\n  year = {2001},\n  volume = {68},\n  pages = {338-349},\n  number = {7-8},\n  note = {(in German)},\n url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2001-tm.pdf}\n }\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fully Automated X-Ray Inspection System: Non-destructive Testing of Castings.\n \n \n \n \n\n\n \n Mery, D.; Jaeger, T.; and Filbert, D.\n\n\n \n\n\n\n Materialpruefung, 43(11-12): 433-441. 2001.\n (in German)\n\n\n\n
\n\n\n\n \n \n \"FullyPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery-2001-MatPruef,\n  author = {Domingo Mery and Th. Jaeger and D. Filbert},\n  title = {Fully Automated X-Ray Inspection System: Non-destructive Testing of Castings},\n  journal = {Materialpruefung},\n  year = {2001},\n  volume = {43},\n  pages = {433-441},\n  number = {11-12},\n  note = {(in German)},\n url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2001-MatPruef.pdf}\n }\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Flaw simulation in castings inspection by radioscopy.\n \n \n \n \n\n\n \n Mery, D.\n\n\n \n\n\n\n Insight-Non-Destructive Testing and Condition Monitoring, 43(10): 664-668. 2001.\n \n\n\n\n
\n\n\n\n \n \n \"FlawPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery-2001-Insight,\n  author = {Domingo Mery},\n  title = {Flaw simulation in castings inspection by radioscopy},\n  journal = {Insight-Non-Destructive Testing and Condition Monitoring},\n  year = {2001},\n  volume = {43},\n  pages = {664-668},\n  number = {10},\n  url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2001-Insight.pdf}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A new Algorithm for Flaw Simulation in Castings by Superimposing Projections of 3D Models onto X-Ray Images.\n \n \n \n \n\n\n \n Mery, D.\n\n\n \n\n\n\n In 21st International Conference of the Chilean Computer Science Society (SCCC 2001), 6-8 November 2001, Punta Arenas, Chile, pages 193-202, 2001. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n \n \"ALink\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 5 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery-2001-SCCC,\n  author    = {Domingo Mery},\n  title     = {A new Algorithm for Flaw Simulation in Castings by Superimposing\n               Projections of 3D Models onto X-Ray Images},\n  booktitle = {21st International Conference of the Chilean Computer Science\n               Society (SCCC 2001), 6-8 November 2001, Punta Arenas, Chile},\n  year      = {2001},\n  pages     = {193-202},\n  url       = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2001-sccc.pdf},\n  ee        = {http://csdl.computer.org/comp/proceedings/sccc/2001/1396/00/13960193abs.htm},\n  bibsource = {DBLP, http://dblp.uni-trier.de}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2000\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n The epipolar Geometry in the Radioscopy: Theory and Application.\n \n \n \n \n\n\n \n Mery, D.; and Filbert, D.\n\n\n \n\n\n\n at - Automatisierungstechnik, 48(12): 588-596. 2000.\n (in German)\n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery-2000-at,\n  author = {Domingo Mery and D. Filbert},\n  title = {The epipolar Geometry in the Radioscopy: Theory and Application},\n  journal = {at - Automatisierungstechnik},\n  year = {2000},\n  volume = {48},\n  pages = {588-596},\n  number = {12},\n  note = {(in German)},\n url = {http://dmery.sitios.ing.uc.cl/Prints/Other-Journals/2000-at.pdf}\n }\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Flaw Tracking in a Sequence of Digital X-ray Images: a New Method of Automated Quality Control of Castings.\n \n \n \n \n\n\n \n Mery, D.; and Filbert, D.\n\n\n \n\n\n\n tm - Technisches Messen, 67(4): 160-165. 2000.\n (in German)\n\n\n\n
\n\n\n\n \n \n \"FlawPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery-2000-tm,\n  author = {Domingo Mery and D. Filbert},\n  title = {Flaw Tracking in a Sequence of Digital {X-ray} Images: a New Method\n\tof Automated Quality Control of Castings},\n  journal = {tm - Technisches Messen},\n  year = {2000},\n  volume = {67},\n  pages = {160-165},\n  number = {4},\n  note = {(in German)},\n url = {http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/2000-tm.pdf}\n }\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Fast Non-iterative Algorithm for the Removal of Blur Caused by Uniform Linear Motion in X-ray Images.\n \n \n \n \n\n\n \n Mery, D.; and Filbert, D.\n\n\n \n\n\n\n In Proceedings of the 15th World Conference on Non-Destructive Testing (WCNDT-2000), Rome, Oct. 15-21 2000. \n \n\n\n\n
\n\n\n\n \n \n \"ALink\n  \n \n \n \"APaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery-2000-WCNDT-a,\n  author = {Domingo Mery and D. Filbert},\n  title = {A Fast Non-iterative Algorithm for the Removal of Blur Caused by\n\tUniform Linear Motion in {X-ray} Images},\n  booktitle = {Proceedings of the 15th World Conference on Non-Destructive Testing\n\t(WCNDT-2000)},\n  year = {2000},\n  address = {Rome},\n  month = {Oct. 15-21},\n  ee = {http://www.ndt.net/search/link.php?id=735&file=article/wcndt00/papers/idn230/idn230.htm},\n url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2000-WCNDT-a.pdf}\n }\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Improvement in Automated Aluminum Casting Inspection by Finding Correspondence of Potential Flaws in Multiple Radioscopic Images.\n \n \n \n \n\n\n \n Mery, D.; Filbert, D.; and Parspour, N.\n\n\n \n\n\n\n In Proceedings of the 15th World Conference on Non-Destructive Testing (WCNDT-2000), Rome, Oct. 15-21 2000. \n \n\n\n\n
\n\n\n\n \n \n \"ImprovementLink\n  \n \n \n \"ImprovementPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mery-2000-WCNDT-b,\n  author = {Domingo Mery and D. Filbert and N. Parspour},\n  title = {Improvement in Automated Aluminum Casting Inspection by Finding Correspondence\n\tof Potential Flaws in Multiple Radioscopic Images},\n  booktitle = {Proceedings of the 15th World Conference on Non-Destructive Testing\n\t(WCNDT-2000)},\n  year = {2000},\n  address = {Rome},\n  month = {Oct. 15-21},\n  ee = {http://www.ndt.net/search/link.php?id=734&file=article/wcndt00/papers/idn229/idn229.htm},\n url = {http://dmery.sitios.ing.uc.cl/Prints/Conferences/International/2000-WCNDT-b.pdf}\n}\n\n\n\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 1998\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Real Time Visual Sensor for Supervision of Flotations Cells.\n \n \n \n \n\n\n \n Cipriano, A.; Guarini, M.; Vidal, R.; Soto, A.; Sepulveda, C.; Mery, D.; and Briseno, H.\n\n\n \n\n\n\n Minerals Engineering, 11(6): 489-499. 1998.\n \n\n\n\n
\n\n\n\n \n \n \"RealPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Mery-1998-MinEng,\n  author = {A. Cipriano and M. Guarini and R. Vidal and A. Soto and C. Sepulveda and Domingo Mery and H. Briseno},\n  title = {Real Time Visual Sensor for Supervision of Flotations Cells},\n  journal = {Minerals Engineering},\n  year = {1998},\n  volume = {11},\n  pages = {489-499},\n  number = {6},\n  url  ={http://dmery.sitios.ing.uc.cl/Prints/ISI-Journals/1998-MinEng.pdf}\n}\n\n\n\n\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);