Publications
2023
Chaves, Deisy; Fidalgo, Eduardo; González, Pablo Rodríguez; Fernández-Abia, Ana Isabel; Alegre, Enrique; Barreiro, Joaquín
Automatic classification of pores in aluminum castings using machine learning Artículo de revista
En: XLIV Jornadas de Automática, pp. 849–854, 2023, (Publisher: Universidade da Coruña. Servizo de Publicacións).
Resumen | Enlaces | BibTeX | Etiquetas: casting manufacturing, Image classification, porosity detection, SVM classifiers
@article{chaves_automatic_2023,
title = {Automatic classification of pores in aluminum castings using machine learning},
author = {Deisy Chaves and Eduardo Fidalgo and Pablo Rodríguez González and Ana Isabel Fernández-Abia and Enrique Alegre and Joaquín Barreiro},
url = {https://ruc.udc.es/dspace/handle/2183/33692},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {XLIV Jornadas de Automática},
pages = {849–854},
abstract = {This paper proposes automating the classification of porosity defects in aluminum parts manufactured by casting. Images of parts produced by traditional sand molding and the Binder Jetting (BJ) additive technique are analyzed. The method uses SIFT descriptors and BoVW features to train two SVM classifiers: one for detecting pores and another for classifying the type of porosity (gas-related or shrinkage-related). This automated approach improves inspection efficiency and accuracy compared to traditional manual methods.},
note = {Publisher: Universidade da Coruña. Servizo de Publicacións},
keywords = {casting manufacturing, Image classification, porosity detection, SVM classifiers},
pubstate = {published},
tppubtype = {article}
}
2021
Blanco-Medina, Pablo; Fidalgo, Eduardo; Alegre, Enrique; Carofilis-Vasco, Andrés; Jáñez-Martino, Francisco; Fidalgo-Villar, Víctor
Detecting vulnerabilities in critical infrastructures by classifying exposed industrial control systems using deep learning Artículo de revista
En: Applied Sciences, vol. 11, no 1, pp. 367, 2021, (Publisher: MDPI).
Resumen | Enlaces | BibTeX | Etiquetas: deep learning, Fine-tuning, Image classification, Industrial Control System, Transfer Learning
@article{blanco-medina_detecting_2021,
title = {Detecting vulnerabilities in critical infrastructures by classifying exposed industrial control systems using deep learning},
author = {Pablo Blanco-Medina and Eduardo Fidalgo and Enrique Alegre and Andrés Carofilis-Vasco and Francisco Jáñez-Martino and Víctor Fidalgo-Villar},
url = {https://www.mdpi.com/2076-3417/11/1/367},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
journal = {Applied Sciences},
volume = {11},
number = {1},
pages = {367},
abstract = {This paper presents a deep learning pipeline to classify industrial control panel screenshots into three categories: internet technologies, operation technologies, and others. Using the CRINF-300 dataset, the authors compared CNN architectures and found that Inception-ResNet-V2 and VGG16 performed best, while MobileNet-V1 was recommended for time-sensitive systems with GPU availability.},
note = {Publisher: MDPI},
keywords = {deep learning, Fine-tuning, Image classification, Industrial Control System, Transfer Learning},
pubstate = {published},
tppubtype = {article}
}
2020
Blanco-Medina, Pablo; Fidalgo, Eduardo; Alegre, Enrique; Jáñez-Martino, Francisco; Carofilis-Vasco, Andrés; Fidalgo-Villar, Víctor
Classification of Industrial Control Systems screenshots using Transfer Learning Artículo de revista
En: arXiv e-prints, pp. arXiv–2005, 2020.
Resumen | BibTeX | Etiquetas: Image classification, Industrial Control System, Transfer Learning
@article{blanco-medina_classification_2020,
title = {Classification of Industrial Control Systems screenshots using Transfer Learning},
author = {Pablo Blanco-Medina and Eduardo Fidalgo and Enrique Alegre and Francisco Jáñez-Martino and Andrés Carofilis-Vasco and Víctor Fidalgo-Villar},
year = {2020},
date = {2020-01-01},
journal = {arXiv e-prints},
pages = {arXiv–2005},
abstract = {This study evaluates CNN-based transfer learning for classifying Industrial Control System screenshots. Five pre-trained architectures are tested, with MobileNetV1 achieving the best balance of accuracy (97.95% F1-score) and CPU speed (0.47s). For GPU-dependent, time-critical tasks, VGG16 is faster (0.04s) but less accurate (87.67%).},
keywords = {Image classification, Industrial Control System, Transfer Learning},
pubstate = {published},
tppubtype = {article}
}
Biswas, Rubel; Carofilis-Vasco, Andrés; Fidalgo, Eduardo; Jáñez-Martino, Francisco; Blanco-Medina, Pablo
Perceptual Hashing applied to Tor domains recognition Artículo de revista
En: arXiv preprint arXiv:2005.10090, 2020.
Resumen | Enlaces | BibTeX | Etiquetas: Cybersecurity, DCT, Deep Web, Image classification, TOR
@article{biswas_perceptual_2020-1,
title = {Perceptual Hashing applied to Tor domains recognition},
author = {Rubel Biswas and Andrés Carofilis-Vasco and Eduardo Fidalgo and Francisco Jáñez-Martino and Pablo Blanco-Medina},
url = {https://arxiv.org/abs/2005.10090},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
journal = {arXiv preprint arXiv:2005.10090},
abstract = {This paper introduces Frequency-Dominant Neighborhood Structure (F-DNS), a perceptual hashing method for automatically classifying Tor domains by their screenshots. F-DNS outperforms other methods, achieving better correlation coefficients, especially for rotated images. The method was tested on the Darknet Usage Service Images-2K (DUSI-2K) dataset and achieved an accuracy of 98.75%, surpassing other classification and hashing techniques.},
keywords = {Cybersecurity, DCT, Deep Web, Image classification, TOR},
pubstate = {published},
tppubtype = {article}
}
Fidalgo, Eduardo; Carofilis-Vasco, Andrés; Jáñez-Martino, Francisco; Blanco-Medina, Pablo
Classifying suspicious content in Tor Darknet Artículo de revista
En: arXiv preprint arXiv:2005.10086, 2020.
Resumen | Enlaces | BibTeX | Etiquetas: Computer vision, Criminal Activity Detection, Darknet Analysis, Image classification
@article{fidalgo_classifying_2020-1,
title = {Classifying suspicious content in Tor Darknet},
author = {Eduardo Fidalgo and Andrés Carofilis-Vasco and Francisco Jáñez-Martino and Pablo Blanco-Medina},
url = {https://ui.adsabs.harvard.edu/abs/2020arXiv200510086F/abstract},
year = {2020},
date = {2020-01-01},
journal = {arXiv preprint arXiv:2005.10086},
abstract = {This paper proposes Semantic Attention Keypoint Filtering (SAKF) to classify Tor Darknet images by focusing on significant features related to criminal activities. By combining saliency maps with Bag of Visual Words (BoVW), SAKF outperforms CNN approaches (MobileNet v1, ResNet50) and BoVW with dense SIFT descriptors, achieving 87.98% accuracy.},
keywords = {Computer vision, Criminal Activity Detection, Darknet Analysis, Image classification},
pubstate = {published},
tppubtype = {article}
}
Fidalgo, Eduardo; Carofilis-Vasco, Andrés; Jáñez-Martino, Francisco; Blanco-Medina, Pablo
Classifying Suspicious Content in Tor Darknet Artículo de revista
En: arXiv e-prints, pp. arXiv–2005, 2020.
Resumen | Enlaces | BibTeX | Etiquetas: Computer vision, Criminal Activity Detection, Darknet Analysis, Image classification
@article{fidalgo_classifying_2020,
title = {Classifying Suspicious Content in Tor Darknet},
author = {Eduardo Fidalgo and Andrés Carofilis-Vasco and Francisco Jáñez-Martino and Pablo Blanco-Medina},
url = {https://arxiv.org/abs/2005.10086},
year = {2020},
date = {2020-01-01},
journal = {arXiv e-prints},
pages = {arXiv–2005},
abstract = {This paper proposes Semantic Attention Keypoint Filtering (SAKF) to classify Tor Darknet images by focusing on significant features related to criminal activities. By combining saliency maps with Bag of Visual Words (BoVW), SAKF outperforms CNN approaches (MobileNet v1, ResNet50) and BoVW with dense SIFT descriptors, achieving 87.98% accuracy.},
keywords = {Computer vision, Criminal Activity Detection, Darknet Analysis, Image classification},
pubstate = {published},
tppubtype = {article}
}
2019
Fidalgo, Eduardo; Alegre, Enrique; Fernández-Robles, Laura; González-Castro, Víctor
Classifying suspicious content in tor darknet through Semantic Attention Keypoint Filtering Artículo de revista
En: Digital Investigation, vol. 30, pp. 12–22, 2019, (Publisher: Elsevier).
Resumen | Enlaces | BibTeX | Etiquetas: Computer vision, Darknet Investigation, Image classification, Semantic Attention
@article{fidalgo_classifying_2019,
title = {Classifying suspicious content in tor darknet through Semantic Attention Keypoint Filtering},
author = {Eduardo Fidalgo and Enrique Alegre and Laura Fernández-Robles and Víctor González-Castro},
url = {https://www.sciencedirect.com/science/article/pii/S1742287619300027},
year = {2019},
date = {2019-01-01},
journal = {Digital Investigation},
volume = {30},
pages = {12–22},
abstract = {This paper presents Semantic Attention Keypoint Filtering (SAKF) for automatically classifying relevant parts of images from the Tor Darknet, focusing on salient features while removing non-significant background. By combining saliency maps with Bag of Visual Words (BoVW), SAKF outperforms dense SIFT descriptors and deep CNN features (MobileNet, ResNet50), achieving significantly higher accuracies across multiple datasets. The approach shows promise for aiding law enforcement investigations in the Darknet.},
note = {Publisher: Elsevier},
keywords = {Computer vision, Darknet Investigation, Image classification, Semantic Attention},
pubstate = {published},
tppubtype = {article}
}
Fidalgo, Eduardo; Alegre, Enrique; Fernández-Robles, Laura; González-Castro, Víctor
Early fusion of multi-level saliency descriptors for image classification Artículo de revista
En: Revista Iberoamericana de Automática e Informática industrial, vol. 16, no 3, pp. 358–368, 2019, (Publisher: UNIV POLITECNICA VALENCIA, EDITORIAL UPV CAMINO VERA SN, VALENCIA, 46022, SPAIN).
Resumen | Enlaces | BibTeX | Etiquetas: Bag of Visual Words, BoVW, Feature Filtering, Image classification, Saliency Maps, SIFT Descriptors
@article{fidalgo_early_2019,
title = {Early fusion of multi-level saliency descriptors for image classification},
author = {Eduardo Fidalgo and Enrique Alegre and Laura Fernández-Robles and Víctor González-Castro},
url = {https://scholar.google.es/citations?view_op=view_citation&hl=es&user=opCbArQAAAAJ&cstart=20&pagesize=80&sortby=title&citation_for_view=opCbArQAAAAJ:BrmTIyaxlBUC},
year = {2019},
date = {2019-01-01},
journal = {Revista Iberoamericana de Automática e Informática industrial},
volume = {16},
number = {3},
pages = {358–368},
abstract = {This paper proposes an improved image classification method by enhancing Bag of Visual Words (BoVW) coding through saliency maps. By treating saliency maps as topographic maps and filtering background features, classification accuracy is improved. Six saliency algorithms were evaluated, selecting GBVS and SIM for retaining object information. SIFT descriptors from the background were filtered using binary images at different saliency levels, and early fusion of these descriptors was tested across five datasets.},
note = {Publisher: UNIV POLITECNICA VALENCIA, EDITORIAL UPV CAMINO VERA SN, VALENCIA, 46022, SPAIN},
keywords = {Bag of Visual Words, BoVW, Feature Filtering, Image classification, Saliency Maps, SIFT Descriptors},
pubstate = {published},
tppubtype = {article}
}
2018
Fidalgo, Eduardo; Alegre, Enrique; González-Castro, Víctor; Fernández-Robles, Laura
Boosting image classification through semantic attention filtering strategies Artículo de revista
En: Pattern Recognition Letters, vol. 112, pp. 176–183, 2018, (Publisher: North-Holland).
Resumen | Enlaces | BibTeX | Etiquetas: Bag of Words, Image classification, Mean Shift, Saliency Map, support vector machine
@article{fidalgo_boosting_2018,
title = {Boosting image classification through semantic attention filtering strategies},
author = {Eduardo Fidalgo and Enrique Alegre and Víctor González-Castro and Laura Fernández-Robles},
url = {https://www.sciencedirect.com/science/article/pii/S0167865518302757},
year = {2018},
date = {2018-01-01},
journal = {Pattern Recognition Letters},
volume = {112},
pages = {176–183},
abstract = {This paper presents three attention filtering methods based on saliency maps to enhance image classification using BoVW, SPM, and CNN features. The proposed strategies include AutoBlur for selecting the image signature's blurring factor and two SARF variants: one using Mean Shift segmentation and the other with a key point voting system. Experiments show that these methods improve classification performance in five datasets, outperforming baseline methods with BoVW, and achieving competitive results with SPM and CNN.},
note = {Publisher: North-Holland},
keywords = {Bag of Words, Image classification, Mean Shift, Saliency Map, support vector machine},
pubstate = {published},
tppubtype = {article}
}
Matilla, David; González-Castro, Víctor; Fernández-Robles, Laura; Fidalgo, Eduardo; Al-Nabki, Wesam
Color SIFT descriptors to categorize illegal activities in images of onion domains Artículo de revista
En: Actas de las XXXIX Jornadas de Automática, Badajoz, 5-7 de Septiembre de 2018, 2018, (Publisher: Universidad de Extremadura).
Resumen | Enlaces | BibTeX | Etiquetas: Dark Web, Image classification, TOR
@article{matilla_color_2018,
title = {Color SIFT descriptors to categorize illegal activities in images of onion domains},
author = {David Matilla and Víctor González-Castro and Laura Fernández-Robles and Eduardo Fidalgo and Wesam Al-Nabki},
url = {https://scholar.google.es/citations?view_op=view_citation&hl=en&user=4jZgNVkAAAAJ&cstart=20&pagesize=80&sortby=title&citation_for_view=4jZgNVkAAAAJ:RHpTSmoSYBkC},
year = {2018},
date = {2018-01-01},
urldate = {2018-01-01},
journal = {Actas de las XXXIX Jornadas de Automática, Badajoz, 5-7 de Septiembre de 2018},
abstract = {This paper explores identifying illegal domains on the Tor darknet based on their visual content. After crawling 500 hidden services and categorizing their images into five illegal categories, a classifier was trained using the Bag of Visual Words (BoVW) model with SIFT descriptors. Since SIFT only works with grayscale images, color-SIFT variants (HSV-SIFT, RGB-SIFT) were tested. The results show that color-SIFT descriptors, particularly HSV-SIFT, outperform traditional SIFT, achieving an accuracy of 59.44%, compared to SIFT's 57.52%.},
note = {Publisher: Universidad de Extremadura},
keywords = {Dark Web, Image classification, TOR},
pubstate = {published},
tppubtype = {article}
}
Fidalgo, Eduardo; Alegre, Enrique; González-Castro, Victor; Fernández-Robles, Laura
Illegal activity categorisation in DarkNet based on image classification using CREIC method Artículo de revista
En: International Joint Conference SOCO’17-CISIS’17-ICEUTE’17 León, Spain, September 6–8, 2017, Proceeding 12, pp. 600–609, 2018, (Publisher: Springer International Publishing).
Resumen | Enlaces | BibTeX | Etiquetas: Bag of Visual Words, Edge-SIFT descriptors, Image classification, support vector machine, TOR
@article{fidalgo_illegal_2018,
title = {Illegal activity categorisation in DarkNet based on image classification using CREIC method},
author = {Eduardo Fidalgo and Enrique Alegre and Victor González-Castro and Laura Fernández-Robles},
url = {https://link.springer.com/chapter/10.1007/978-3-319-67180-2_58},
year = {2018},
date = {2018-01-01},
journal = {International Joint Conference SOCO’17-CISIS’17-ICEUTE’17 León, Spain, September 6–8, 2017, Proceeding 12},
pages = {600–609},
abstract = {This paper introduces TOIC (TOr Image Categories), a dataset of illegal images from the TOR network, and presents a method to classify them using a combination of Edge-SIFT and dense SIFT descriptors. These features are extracted from edge images created with the Compass Operator. The method employs a Bag of Visual Words model that fuses these descriptors early in the process to effectively detect and categorize illegal content. By selecting the optimal radius before calculating Edge-SIFT, the approach improves classification performance, achieving an accuracy of 92.49% on the TOIC dataset, and showing increased accuracy in tests on both TOIC and the Butterflies dataset. The method offers an efficient tool for identifying illegal content in the TOR network.},
note = {Publisher: Springer International Publishing},
keywords = {Bag of Visual Words, Edge-SIFT descriptors, Image classification, support vector machine, TOR},
pubstate = {published},
tppubtype = {article}
}
2017
Biswas, Rubel; Fidalgo, Eduardo; Alegre, Enrique
Recognition of service domains on TOR dark net using perceptual hashing and image classification techniques Artículo de revista
En: 8th International Conference on Imaging for Crime Detection and Prevention (ICDP 2017), pp. 7–12, 2017, (Publisher: IET).
Resumen | Enlaces | BibTeX | Etiquetas: Darknet Detection, Image classification, perceptual hashing, TOR
@article{biswas_recognition_2017,
title = {Recognition of service domains on TOR dark net using perceptual hashing and image classification techniques},
author = {Rubel Biswas and Eduardo Fidalgo and Enrique Alegre},
url = {https://ieeexplore.ieee.org/abstract/document/8372164},
year = {2017},
date = {2017-01-01},
journal = {8th International Conference on Imaging for Crime Detection and Prevention (ICDP 2017)},
pages = {7–12},
abstract = {This paper presents a framework for identifying services on the TOR network, leveraging image content to categorize various activities such as file-sharing, ransomware, and counterfeit goods. The authors introduce the DUSI (Darknet Usage Service Images) dataset, which includes snapshots from active TOR domains across six service categories. Two pipelines were evaluated: one using Perceptual Hashing and another using Bag of Visual Words (BoVW) with SVM classifiers. The Perceptual Hashing approach achieved the highest accuracy of 99.38%, making it the recommended method for detecting TOR services based on image snapshots.},
note = {Publisher: IET},
keywords = {Darknet Detection, Image classification, perceptual hashing, TOR},
pubstate = {published},
tppubtype = {article}
}
Gangwar, Abhishek; Fidalgo, Eduardo; Alegre, Enrique; González-Castro, Víctor
Pornography and child sexual abuse detection in image and video: A comparative evaluation Artículo de revista
En: 2017, (Publisher: IET Digital Library).
Resumen | Enlaces | BibTeX | Etiquetas: CSA, deep learning, Image classification, pornography detection
@article{gangwar_pornography_2017,
title = {Pornography and child sexual abuse detection in image and video: A comparative evaluation},
author = {Abhishek Gangwar and Eduardo Fidalgo and Enrique Alegre and Víctor González-Castro},
url = {https://digital-library.theiet.org/doi/10.1049/ic.2017.0046},
year = {2017},
date = {2017-01-01},
abstract = {This paper reviews automatic detection methods for pornography and Child Sex Abuse (CSA) material, particularly in sensitive environments like educational or work settings. It evaluates five pornography detection approaches, including traditional skin detection and modern deep learning techniques, using two publicly available pornographic databases. The study finds that methods utilizing multiple features perform better than those relying on single features and that deep learning-based methods outperform traditional approaches, achieving state-of-the-art results. Additionally, the methods were tested on real-world CSA material provided by the Spanish Police.},
note = {Publisher: IET Digital Library},
keywords = {CSA, deep learning, Image classification, pornography detection},
pubstate = {published},
tppubtype = {article}
}
Fidalgo, Eduardo
Selection of relevant information to improve Image Classification using Bag of Visual Words Artículo de revista
En: 2017.
Resumen | Enlaces | BibTeX | Etiquetas: Computer vision, feature selection, Fine-grained Recognition, Image classification
@article{fidalgo_selection_2017-2,
title = {Selection of relevant information to improve Image Classification using Bag of Visual Words},
author = {Eduardo Fidalgo},
url = {https://www.raco.cat/index.php/ELCVIA/article/view/v16-n2-fidalgo},
year = {2017},
date = {2017-01-01},
abstract = {This PhD thesis tackles a major challenge in computer vision: image classification. With the rapid increase in the number of images, it is essential to classify them accurately and efficiently. The typical classification pipeline involves extracting image features, encoding them into vectors, and classifying them with a pre-trained model. The Bag of Words model and its variants, such as pyramid matching and weighted schemes, have proven to be effective. However, errors can occur at any stage, causing performance issues, especially when dealing with multiple objects, small or thin items, incorrect annotations, or fine-grained recognition. The thesis highlights the importance of good feature selection to enhance classification performance, showing that high-quality features can lead to improved fine-grained classification without requiring extensive training datasets.},
keywords = {Computer vision, feature selection, Fine-grained Recognition, Image classification},
pubstate = {published},
tppubtype = {article}
}
Fidalgo, Eduardo
Selection of relevant information to improve Image Classification using Bag of Visual Words Artículo de revista
En: ELCVIA: electronic letters on computer vision and image analysis, vol. 16, no 2, pp. 5–8, 2017.
Resumen | Enlaces | BibTeX | Etiquetas: Computer vision, feature selection, Fine-grained Recognition, Image classification
@article{fidalgo_selection_2017,
title = {Selection of relevant information to improve Image Classification using Bag of Visual Words},
author = {Eduardo Fidalgo},
url = {https://recercat.cat/handle/2072/428567},
year = {2017},
date = {2017-01-01},
journal = {ELCVIA: electronic letters on computer vision and image analysis},
volume = {16},
number = {2},
pages = {5–8},
abstract = {This PhD thesis addresses one of the main challenges in computer vision: image classification. With the rapid growth in the number of images, reliable classification has become increasingly important. The conventional classification pipeline involves extracting local image features, encoding them into a feature vector, and classifying them using a pre-trained model. The Bag of Words model and its extensions, such as pyramid matching and weighted schemes, have shown strong results. However, errors can occur in any step of the process, which may lead to a decline in classification performance. These errors can stem from multiple objects in an image, thin or small objects, incorrect annotations, or fine-grained recognition tasks. The thesis demonstrates that selecting high-quality features can significantly improve fine-grained classification, showing that a large training dataset is not always necessary to achieve good results.},
keywords = {Computer vision, feature selection, Fine-grained Recognition, Image classification},
pubstate = {published},
tppubtype = {article}
}
2016
Azzopardi, George; Fernández-Robles, Laura; Alegre, Enrique; Petkov, Nicolai
Increased generalization capability of trainable cosfire filters with application to machine vision Artículo de revista
En: 2016 23rd International Conference on Pattern Recognition (ICPR), pp. 3356–3361, 2016, (Publisher: IEEE).
Resumen | Enlaces | BibTeX | Etiquetas: COSFIRE Filters, Genetic Algorithm, Image classification, Pattern Recognition
@article{azzopardi_increased_2016,
title = {Increased generalization capability of trainable cosfire filters with application to machine vision},
author = {George Azzopardi and Laura Fernández-Robles and Enrique Alegre and Nicolai Petkov},
url = {https://ieeexplore.ieee.org/abstract/document/7900152},
year = {2016},
date = {2016-01-01},
journal = {2016 23rd International Conference on Pattern Recognition (ICPR)},
pages = {3356–3361},
abstract = {This paper proposes an optimization for COSFIRE filters using a genetic algorithm to select the best contour parts for pattern recognition. The optimized COSFIRE filters, tested on a dataset of milling machine images, showed improved precision (P = 91.99%) and recall (R = 96.22%) compared to the original version, enhancing efficiency and generalization while maintaining selectivity.},
note = {Publisher: IEEE},
keywords = {COSFIRE Filters, Genetic Algorithm, Image classification, Pattern Recognition},
pubstate = {published},
tppubtype = {article}
}
Fidalgo, Eduardo; Alegre, Enrique; González-Castro, Víctor; Fernández-Robles, Laura
Compass radius estimation for improved image classification using Edge-SIFT Artículo de revista
En: Neurocomputing, vol. 197, pp. 119–135, 2016, (Publisher: Elsevier).
Resumen | Enlaces | BibTeX | Etiquetas: Accuracy Improvement, Compass operator, Edge-SIFT, Image classification, SIFT
@article{fidalgo_compass_2016,
title = {Compass radius estimation for improved image classification using Edge-SIFT},
author = {Eduardo Fidalgo and Enrique Alegre and Víctor González-Castro and Laura Fernández-Robles},
url = {https://www.sciencedirect.com/science/article/pii/S0925231216002824},
year = {2016},
date = {2016-01-01},
journal = {Neurocomputing},
volume = {197},
pages = {119–135},
abstract = {Combining SIFT with Edge-SIFT enhances image classification. This study evaluates how different radii of the compass operator impact performance and shows that the commonly used radius of 4.00 is not optimal. By selecting the best radius for each image, accuracy can exceed 95%. A new method is proposed to determine the optimal radius, leading to accuracy improvements across several datasets, with gains up to 24.4%.},
note = {Publisher: Elsevier},
keywords = {Accuracy Improvement, Compass operator, Edge-SIFT, Image classification, SIFT},
pubstate = {published},
tppubtype = {article}
}
2014
García-Ordás, Marïa Teresa; Alegre, Enrique; González-Castro, Víctor; García-Ordás, Diego
aZIBO: a new descriptor based in shape moments and rotational invariant features Artículo de revista
En: 2014 22nd International Conference on Pattern Recognition, pp. 2395–2400, 2014, (Publisher: IEEE).
Resumen | Enlaces | BibTeX | Etiquetas: EGCM, Image classification, machile learning, shape descriptors, zernike moments
@article{garcia-ordas_azibo_2014,
title = {aZIBO: a new descriptor based in shape moments and rotational invariant features},
author = {Marïa Teresa García-Ordás and Enrique Alegre and Víctor González-Castro and Diego García-Ordás},
url = {https://ieeexplore.ieee.org/abstract/document/6977127},
year = {2014},
date = {2014-01-01},
journal = {2014 22nd International Conference on Pattern Recognition},
pages = {2395–2400},
abstract = {This work introduces a new shape descriptor called ZIBO (absolute Zernike moments with Invariant Boundary Orientation), combining global Zernike moments and a rotationally invariant version of the Edge Gradient Co-occurrence Matrix (EGCM). The descriptors were applied to three datasets (Kimia99, MPEG2, MPEG7) and evaluated using kNN with City block and Chi-square distance metrics. The combination of global and local descriptors achieved better results than the baseline ZMEG method. Specifically, the ZIBO descriptor obtained success rates of 78.29% on MPEG7 and 81.00% on MPEG2, outperforming ZMEG by 2.43% and 3.75%, respectively.},
note = {Publisher: IEEE},
keywords = {EGCM, Image classification, machile learning, shape descriptors, zernike moments},
pubstate = {published},
tppubtype = {article}
}
2012
García-Ordás, Maite; Fernández-Robles, Laura; Olivera, Óscar García-Olalla; García-Ordás, Diego; Alegre, Enrique
Boar spermatozoa classication using local invariant features and bag ofwords Artículo de revista
En: Actas de las XXXIII Jornadas de Automática: Vigo, 5 al 7 de Septiembre de 2012, pp. 124, 2012, (Publisher: Universidade de Vigo).
Resumen | Enlaces | BibTeX | Etiquetas: Bag of Words, Image classification, Invariant Local Features, SVM
@article{garcia-ordas_boar_2012,
title = {Boar spermatozoa classication using local invariant features and bag ofwords},
author = {Maite García-Ordás and Laura Fernández-Robles and Óscar García-Olalla Olivera and Diego García-Ordás and Enrique Alegre},
url = {https://d1wqtxts1xzle7.cloudfront.net/44449820/Boar_spermatozoa_classification_using_lo20160405-12762-rpptl6-libre.pdf?1459894371=&response-content-disposition=inline%3B+filename%3DBoar_spermatozoa_classification_using_lo.pdf&Expires=1739810149&Signature=TVTQuev93pbuKg4OlXk4suOi~Coac8HAB8rlkx~gQU1hgQGzVLSHM-qPjGgmrebUZtRI6cO92VmqX5nLYwJZXXqabj7XL~MZdxEyfZFsXefB2yEW47E37QamibGNRwNQOYYXsLMkBcV4yjY0~fk4eEh3muwznGtFmBzYynLuFUsE6eDRmhg3caXHnwOE3ulYfilE-VRBGlORpq-q7c6UMS8rsvmj9L1PvOjwno77xYKcgu5Hf0wOgFkKgIZx-XLJ39pzh2pxzdiiLz7Ghnwmre5XjiqAR6wheQfEey~tzH31B5aewIWwQfYy4FAniapRIv2t~8i9uanYGwwG0ZMu4w__&Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA},
year = {2012},
date = {2012-01-01},
journal = {Actas de las XXXIII Jornadas de Automática: Vigo, 5 al 7 de Septiembre de 2012},
pages = {124},
abstract = {In this work, different descriptors and classifiers were compared to classify boar spermatozoa acrosome as intact or damaged using the Bag of Words (BOW) method. This approach models images using a dictionary-based technique, where each image is described by local points from the dictionary without considering spatial information. The method was tested with SVM, kNN, QDA, and LDA classifiers. The dictionary was created using two approaches: k-means and fuzzy clustering. Better results were obtained with the k-means algorithm and SVM classification. Two local invariant descriptors were tested: SIFT with a success rate of 64.88% and SURF with a success rate of 71.75%.},
note = {Publisher: Universidade de Vigo},
keywords = {Bag of Words, Image classification, Invariant Local Features, SVM},
pubstate = {published},
tppubtype = {article}
}
2011
Alegre, Enrique; Olivera, Óscar García-Olalla; González-Castro, Víctor; Joshi, Swapna
Boar spermatozoa classification using longitudinal and transversal profiles (LTP) descriptor in digital images Artículo de revista
En: Combinatorial Image Analysis: 14th International Workshop, IWCIA 2011, Madrid, Spain, May 23-25, 2011. Proceedings 14, pp. 410–419, 2011, (Publisher: Springer Berlin Heidelberg).
Resumen | Enlaces | BibTeX | Etiquetas: Image classification, kNN, LTP Descriptor, Neural Network, Spermatozoa Classification, texture descriptors
@article{alegre_boar_2011,
title = {Boar spermatozoa classification using longitudinal and transversal profiles (LTP) descriptor in digital images},
author = {Enrique Alegre and Óscar García-Olalla Olivera and Víctor González-Castro and Swapna Joshi},
url = {https://link.springer.com/chapter/10.1007/978-3-642-21073-0_36},
year = {2011},
date = {2011-01-01},
journal = {Combinatorial Image Analysis: 14th International Workshop, IWCIA 2011, Madrid, Spain, May 23-25, 2011. Proceedings 14},
pages = {410–419},
abstract = {A new textural descriptor called Longitudinal and Transversal Profiles (LTP) has been proposed to classify images of dead and alive spermatozoa heads. The dataset consists of 376 dead spermatozoa head images and 472 alive ones. The performance of LTP was compared to other descriptors like Pattern Spectrum, Flusser, Hu, and a histogram-based statistical descriptor. The feature vectors were classified using both a back-propagation Neural Network and the kNN algorithm. The LTP descriptor achieved a classification error of 30.58%, outperforming the other descriptors. Additionally, the Area Under the ROC Curve (AUC) confirmed that LTP provided better performance than the other texture descriptors.},
note = {Publisher: Springer Berlin Heidelberg},
keywords = {Image classification, kNN, LTP Descriptor, Neural Network, Spermatozoa Classification, texture descriptors},
pubstate = {published},
tppubtype = {article}
}
2009
Alegre, Enrique; González-Castro, Víctor; Castejón-Limas, Manuel
Comparison of supervised and unsupervised methods to classify boar acrosomes using texture descriptors Artículo de revista
En: 2009 International Symposium ELMAR, pp. 65–70, 2009, (Publisher: IEEE).
Resumen | Enlaces | BibTeX | Etiquetas: Image classification, Sperm Analysis, Supervised vs. Unsupervised Learning, texture descriptors
@article{alegre_comparison_2009,
title = {Comparison of supervised and unsupervised methods to classify boar acrosomes using texture descriptors},
author = {Enrique Alegre and Víctor González-Castro and Manuel Castejón-Limas},
url = {https://ieeexplore.ieee.org/abstract/document/5342859},
year = {2009},
date = {2009-01-01},
urldate = {2009-01-01},
journal = {2009 International Symposium ELMAR},
pages = {65–70},
abstract = {This work compares supervised and unsupervised methods for classifying boar sperm head images based on membrane integrity. Five texture descriptors were tested, and classification was performed using LDA, QDA, k-NN, and Neural Networks. Results indicate that unsupervised methods outperform supervised ones, achieving a lower error rate of 6.11% compared to 9%.},
note = {Publisher: IEEE},
keywords = {Image classification, Sperm Analysis, Supervised vs. Unsupervised Learning, texture descriptors},
pubstate = {published},
tppubtype = {article}
}
2007
Castejón-Limas, Manuel; Alegre, Enrique; Barreiro, Joaquín; Hernández, LK
On-line tool wear monitoring using geometric descriptors from digital images Artículo de revista
En: International Journal of Machine Tools and Manufacture, vol. 47, no 12-13, pp. 1847–1853, 2007, (Publisher: Pergamon).
Resumen | Enlaces | BibTeX | Etiquetas: Computer vision, Image classification, Monitoring, Tool wear
@article{castejon-limas_-line_2007,
title = {On-line tool wear monitoring using geometric descriptors from digital images},
author = {Manuel Castejón-Limas and Enrique Alegre and Joaquín Barreiro and LK Hernández},
url = {https://www.sciencedirect.com/science/article/pii/S0890695507000892},
year = {2007},
date = {2007-01-01},
journal = {International Journal of Machine Tools and Manufacture},
volume = {47},
number = {12-13},
pages = {1847–1853},
abstract = {A computer vision and statistical learning system is proposed to estimate wear levels in cutting inserts and determine the optimal replacement time. Using a CNC lathe and vision system, 1383 flank images were processed, extracting nine geometrical descriptors. Linear Discriminant Analysis identified three key descriptors—eccentricity, extent, and solidity—capturing 98.63% of relevant information. A finite mixture model classified wear into three levels: low, medium, and high. The monitoring approach tracks tool wear evolution, ensuring replacement before reaching high wear, optimizing performance and preventing failures.},
note = {Publisher: Pergamon},
keywords = {Computer vision, Image classification, Monitoring, Tool wear},
pubstate = {published},
tppubtype = {article}
}
2005
Sánchez-González, Lidia; Petkov, Nicolai; Alegre, Enrique
Classification of boar spermatozoid head images using a model intracellular density distribution Artículo de revista
En: Progress in Pattern Recognition, Image Analysis and Applications: 10th Iberoamerican Congress on Pattern Recognition, CIARP 2005, Havana, Cuba, November 15-18, 2005. Proceedings 10, pp. 154–160, 2005, (Publisher: Springer Berlin Heidelberg).
Resumen | Enlaces | BibTeX | Etiquetas: Biomedical Imaging, Image classification, machine learning, Pattern Recognition, Sperm Analysis
@article{sanchez-gonzalez_classification_2005,
title = {Classification of boar spermatozoid head images using a model intracellular density distribution},
author = {Lidia Sánchez-González and Nicolai Petkov and Enrique Alegre},
url = {https://link.springer.com/chapter/10.1007/11578079_17},
year = {2005},
date = {2005-01-01},
journal = {Progress in Pattern Recognition, Image Analysis and Applications: 10th Iberoamerican Congress on Pattern Recognition, CIARP 2005, Havana, Cuba, November 15-18, 2005. Proceedings 10},
pages = {154–160},
abstract = {A novel method is proposed to classify boar spermatozoid heads based on intracellular intensity distribution. A model distribution is created from normal samples, and deviations are used for classification. The decision criterion minimizes classification errors, achieving a global error of 20.40%.},
note = {Publisher: Springer Berlin Heidelberg},
keywords = {Biomedical Imaging, Image classification, machine learning, Pattern Recognition, Sperm Analysis},
pubstate = {published},
tppubtype = {article}
}
0000
Fernández-Robles, Laura; Olivera, Óscar García-Olalla; García-Ordás, María Teresa; García-Ordás, Diego; Alegre, Enrique
SVM APPROACH TO CLASSIFY BOAR ACROSOME INTEGRITY OF A MULTI-FEATURES SURF Artículo de revista
En: 0000.
Resumen | Enlaces | BibTeX | Etiquetas: Image classification, Image Recognition, Invariant Local Features, support vector machine, SURF
@article{fernandez-robles_svm_nodate,
title = {SVM APPROACH TO CLASSIFY BOAR ACROSOME INTEGRITY OF A MULTI-FEATURES SURF},
author = {Laura Fernández-Robles and Óscar García-Olalla Olivera and María Teresa García-Ordás and Diego García-Ordás and Enrique Alegre},
url = {https://d1wqtxts1xzle7.cloudfront.net/44449818/SVM_approach_to_classify_boar_acrosome_i20160405-28158-qszl8d-libre.pdf?1459894369=&response-content-disposition=inline%3B+filename%3DSVM_Approach_to_Classify_Boar_Acrosome_I.pdf&Expires=1739795517&Signature=Tgnu3YoKmzyQiRloeYT95Z4ufJAMUtL~2z~sVtWh4x0OwtjsDwwxq7cUYjl-q5NxrhAJJNz3b7f7YchGOHb6p7lf48EUqtmL1Cjm1mI6YY59k3-ds8J53mCRa0SdXtjjZa0MvchGa2Aqbqx3pt5Ep6v5To7Trx3aKfElmzjdaSP7yKZxPa~b92YaH02HFDTQkx8UFEf6TuCoitK-mz4On4xw-6-RfHwdh37FtKePaXdKxv~sHwmvwVWlOn~yaNIPTO1sl3X8LT9zuUU~8yHltm8xUlFuOzWXwgAe8bMmYMWr6HwY-GG7ExpJQj43FmIa6XXflt7MlJRIuAzSgSo~Lw__&Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA},
abstract = {This paper presents an approach to improve the classification of invariant local feature descriptors in images of boar spermatozoa heads using Support Vector Machine (SVM). The method involves detecting interest points with SURF and classifying the acrosome as intact or damaged. The approach focuses on classifying the whole head rather than individual points, leveraging the fact that a head typically has more distinctive points of its own class than doubtful ones. The results show a hit rate of 90.91%, indicating that this method could be an effective alternative for classifying invariant local features.},
keywords = {Image classification, Image Recognition, Invariant Local Features, support vector machine, SURF},
pubstate = {published},
tppubtype = {article}
}
Carofilis-Vasco, Andrés; Blanco-Medina, Pablo; Jáñez-Martino, Francisco; Bennabhaktula, Guru Swaroop; Fidalgo, Eduardo; Prieto-Castro, Alejandro; Fidalgo-Villar, Víctor
Classifying Screenshots of Industrial Control System Using Transfer Learning and Fine-Tuning Artículo de revista
En: 0000.
Resumen | Enlaces | BibTeX | Etiquetas: deep learning, Fine-tuning, Image classification, Industrial Control Systems, Transfer Learning
@article{carofilis-vasco_classifying_nodate,
title = {Classifying Screenshots of Industrial Control System Using Transfer Learning and Fine-Tuning},
author = {Andrés Carofilis-Vasco and Pablo Blanco-Medina and Francisco Jáñez-Martino and Guru Swaroop Bennabhaktula and Eduardo Fidalgo and Alejandro Prieto-Castro and Víctor Fidalgo-Villar},
url = {https://buleria.unileon.es/handle/10612/20274},
abstract = {This paper proposes a deep learning pipeline to classify industrial control panel screenshots into IT, OT, and other categories. Using transfer learning on nine pre-trained CNNs, the model is tested on the CRINF-300 dataset. Inception-ResNet-V2 achieves the best F1-score (98.32%), while MobileNet-V1 offers the best speed-performance balance.},
keywords = {deep learning, Fine-tuning, Image classification, Industrial Control Systems, Transfer Learning},
pubstate = {published},
tppubtype = {article}
}