journals.bib

@article{FraLamRig21-ML-IJ,
  title = {Symbolic {DNN-Tuner}},
  author = {Michele Fraccaroli and
               Evelina Lamma and
               Fabrizio Riguzzi},
  journal = {Machine Learning},
  publisher = {Springer},
  copyright = {Springer},
  year = {2021},
  abstract = {Hyper-Parameter Optimization (HPO) occupies a fundamental role
in Deep Learning systems due to the number of hyper-parameters (HPs) to be
set. The state-of-the-art of HPO methods are Grid Search, Random Search and
Bayesian Optimization. The  rst two methods try all possible combinations
and random combination of the HPs values, respectively. This is performed in
a blind manner, without any information for choosing the new set of HPs val-
ues. Bayesian Optimization (BO), instead, keeps track of past results and uses
them to build a probabilistic model mapping HPs into a probability density of
the objective function. Bayesian Optimization builds a surrogate probabilistic
model of the objective function,  nds the HPs values that perform best on the
surrogate model and updates it with new results. In this paper, we improve BO
applied to Deep Neural Network (DNN) by adding an analysis of the results
of the network on training and validation sets. This analysis is performed by
exploiting rule-based programming, and in particular by using Probabilistic
Logic Programming. The resulting system, called Symbolic DNN-Tuner, logi-
cally evaluates the results obtained from the training and the validation phase
and, by applying symbolic tuning rules,  xes the network architecture, and its
HPs, therefore improving performance. We also show the e ectiveness of the
proposed approach, by an experimental evaluation on literature and real-life
datasets.},
  keywords = {Deep Learning   Hyper-Parameter Optimization   Probabilistic
Logic Programming},
  doi = {10.1007/s10994-021-06097-1},
  isbn = {1573-0565}
}
@article{FraLamRig2022-SwX-IJ,
  title = {Symbolic {DNN-Tuner}: A {Python} and {ProbLog}-based system for optimizing Deep Neural Networks hyperparameters},
  journal = {SoftwareX},
  volume = {17},
  pages = {100957},
  year = {2022},
  issn = {2352-7110},
  doi = {10.1016/j.softx.2021.100957},
  url = {https://www.sciencedirect.com/science/article/pii/S2352711021001825},
  author = {Michele Fraccaroli and Evelina Lamma and Fabrizio Riguzzi},
  keywords = {Deep learning, Probabilistic Logic Programming, Hyper-parameters tuning, Neural-symbolic integration},
  abstract = {The application of deep learning models to increasingly complex contexts has led to a rise in the complexity of the models themselves. Due to this, there is an increase in the number of hyper-parameters (HPs) to be set and Hyper-Parameter Optimization (HPO) algorithms occupy a fundamental role in deep learning. Bayesian Optimization (BO) is the state-of-the-art of HPO for deep learning models. BO keeps track of past results and uses them to build a probabilistic model, building a probability density of HPs. This work aims to improve BO applied to Deep Neural Networks (DNNs) by an analysis of the results of the network on training and validation sets. This analysis is obtained by applying symbolic tuning rules, implemented in Probabilistic Logic Programming (PLP). The resulting system, called Symbolic DNN-Tuner, logically evaluates the results obtained from the training and the validation phase and, by applying symbolic tuning rules, fixes the network architecture, and its HPs, leading to improved performance. In this paper, we present the general system and its implementation. We also show its graphical interface and a simple example of execution.}
}
@article{NguFraBizLam2022-MBEC-IJ,
  abstract = {Recently, Artificial Intelligence (AI) and Machine Learning (ML) have been successfully applied to many domains of interest including medical diagnosis. Due to the availability of a large quantity of data, it is possible to build reliable AI systems that assist humans in making decisions. The recent Covid-19 pandemic quickly spread over the world causing serious health problems and severe economic and social damage. Computer scientists are actively working together with doctors on different ML models to diagnose Covid-19 patients using Computed Tomography (CT) scans and clinical data. In this work, we propose a neural-symbolic system that predicts if a Covid-19 patient arriving at the hospital will end in a critical condition. The proposed system relies on Deep 3D Convolutional Neural Networks (3D-CNNs) for analyzing lung CT scans of Covid-19 patients, Decision Trees (DTs) for predicting if a Covid-19 patient will eventually pass away by analyzing its clinical data, and a neural system that integrates the previous ones using Hierarchical Probabilistic Logic Programs (HPLPs). Predicting if a Covid-19 patient will end in a critical condition is useful for managing the limited number of intensive care at the hospital. Moreover, knowing early that a Covid-19 patient could end in serious conditions allows doctors to gain early knowledge on patients and provide special treatment to those predicted to finish in critical conditions. The proposed system, entitled Neural HPLP, obtains good performance in terms of area under the receiver operating characteristic and precision curves with values of about 0.96 for both metrics. Therefore, with Neural HPLP, it is possible not only to efficiently predict if Covid-19 patients will end in severe conditions but also possible to provide an explanation of the prediction. This makes Neural HPLP explainable, interpretable, and reliable.},
  author = {Fadja, Arnaud Nguembang and Fraccaroli, Michele and Bizzarri, Alice and Mazzuchelli, Giulia and Lamma, Evelina},
  date = {2022/10/06},
  date-added = {2022-10-06 16:02:08 +0200},
  date-modified = {2022-10-06 16:02:08 +0200},
  doi = {10.1007/s11517-022-02674-1},
  id = {Fadja2022},
  isbn = {1741-0444},
  journal = {Medical \& Biological Engineering \& Computing},
  title = {Neural-Symbolic Ensemble Learning for early-stage prediction of critical state of Covid-19 patients},
  url = {https://doi.org/10.1007/s11517-022-02674-1},
  year = {2022},
  bdsk-url-1 = {https://doi.org/10.1007/s11517-022-02674-1}
}
@article{FerFraLam23-IJIS-IJ,
  author = {Ferrari, Niccol{\`o}
    and Fraccaroli, Michele
    and Lamma, Evelina},
  title = {GRD-Net: Generative-Reconstructive-Discriminative Anomaly Detection with Region of Interest Attention Module},
  journal = {International Journal of Intelligent Systems},
  year = {2023},
  month = {Sep},
  day = {02},
  publisher = {Hindawi},
  volume = {2023},
  pages = {7773481},
  abstract = {Anomaly detection is nowadays increasingly used in industrial applications and processes. One of the main fields of the appliance is the visual inspection for surface anomaly detection, which aims to spot regions that deviate from regularity and consequently identify abnormal products. Defect localization is a key task that is usually achieved using a basic comparison between generated image and the original one, implementing some blob analysis or image-editing algorithms in the postprocessing step, which is very biased towards the source dataset, and they are unable to generalize. Furthermore, in industrial applications, the totality of the image is not always interesting but could be one or some regions of interest (ROIs), where only in those areas there are relevant anomalies to be spotted. For these reasons, we propose a new architecture composed by two blocks. The first block is a generative adversarial network (GAN), based on a residual autoencoder (ResAE), to perform reconstruction and denoising processes, while the second block produces image segmentation, spotting defects. This method learns from a dataset composed of good products and generated synthetic defects. The discriminative network is trained using a ROI for each image contained in the training dataset. The network will learn in which area anomalies are relevant. This approach guarantees the reduction of using preprocessing algorithms, formerly developed with blob analysis and image-editing procedures. To test our model, we used challenging MVTec anomaly detection datasets and an industrial large dataset of pharmaceutical BFS strips of vials. This set constitutes a more realistic use case of the aforementioned network.},
  issn = {0884-8173},
  doi = {10.1155/2023/7773481},
  url = {https://doi.org/10.1155/2023/7773481}
}
@article{FraCasBizLam23-AI-IJ,
  abstract = {Nowadays, deep learning is a key technology for many applications in the industrial area such as anomaly detection. The role of Machine Learning (ML) in this field relies on the ability of training a network to learn to inspect images to determine the presence or not of anomalies. Frequently, in Industry 4.0 w.r.t. the anomaly detection task, the images to be analyzed are not optimal, since they contain edges or areas, that are not of interest which could lead the network astray. Thus, this study aims at identifying a systematic way to train a neural network to make it able to focus only on the area of interest. The study is based on the definition of a loss to be applied in the training phase of the network that, using masks, gives higher weight to the anomalies identified within the area of interest. The idea is to add an Overlap Coefficient to the standard cross-entropy. In this way, the more the identified anomaly is outside the Area of Interest (AOI) the greater is the loss. We call the resulting loss Cross-Entropy Overlap Distance (CEOD). The advantage of adding the masks in the training phase is that the network is forced to learn and recognize defects only in the area circumscribed by the mask. The added benefit is that, during inference, these masks will no longer be needed. Therefore, there is no difference, in terms of execution times, between a standard Convolutional Neural Network (CNN) and a network trained with this loss. In some applications, the masks themselves are determined at run-time through a trained segmentation network, as we have done for instance in the "Machine learning for visual inspection and quality control" project, funded by the MISE Competence Center Bi-REX.},
  author = {Fraccaroli, Michele and Bizzarri, Alice and Casellati, Paolo and Lamma, Evelina},
  date = {2023/12/12},
  date-added = {2023-12-18 20:50:22 +0100},
  date-modified = {2023-12-18 20:50:22 +0100},
  doi = {10.1007/s10489-023-05177-0},
  id = {Fraccaroli2023},
  isbn = {1573-7497},
  journal = {Applied Intelligence},
  title = {Exploiting CNN's visual explanations to drive anomaly detection},
  url = {https://doi.org/10.1007/s10489-023-05177-0},
  year = {2023},
  bdsk-url-1 = {https://doi.org/10.1007/s10489-023-05177-0}
}
@article{BizFraLam24-FAI-IJ,
  author = {Bizzarri, Alice and Fraccaroli, Michele and Lamma, Evelina and Riguzzi, Fabrizio},
  title = {Integration between constrained optimization and deep networks: a survey},
  journal = {Frontiers in Artificial Intelligence},
  volume = {7},
  year = {2024},
  url = {https://www.frontiersin.org/articles/10.3389/frai.2024.1414707},
  doi = {10.3389/frai.2024.1414707},
  issn = {2624-8212},
  abstract = {Integration between constrained optimization and deep networks has garnered significant interest from both research and industrial laboratories. Optimization techniques can be employed to optimize the choice of network structure based not only on loss and accuracy but also on physical constraints. Additionally, constraints can be imposed during training to enhance the performance of networks in specific contexts. This study surveys the literature on the integration of constrained optimization with deep networks. Specifically, we examine the integration of hyper-parameter tuning with physical constraints, such as the number of FLOPS (FLoating point Operations Per Second), a measure of computational capacity, latency, and other factors. This study also considers the use of context-specific knowledge constraints to improve network performance. We discuss the integration of constraints in neural architecture search (NAS), considering the problem as both a multi-objective optimization (MOO) challenge and through the imposition of penalties in the loss function. Furthermore, we explore various approaches that integrate logic with deep neural networks (DNNs). In particular, we examine logic-neural integration through constrained optimization applied during the training of NNs and the use of semantic loss, which employs the probabilistic output of the networks to enforce constraints on the output.}
}
@article{FerZanFraBizLam24-SSRN-IJ,
  author = {Ferrari, Niccolò and Zanarini, Nicola and Fraccaroli, Michele and Bizzarri, Alice and Lamma, Evelina},
  editor = {SSRN},
  title = {Integration of Deep Generative Anomaly Detection Algorithm in High-Speed Industrial Line},
  year = {2024},
  url = {https://papers.ssrn.com/sol3/papers.cfm?abstract_id=485866}
}
@article{FulFraFranFabBalCiaGhi24-MDPI-IJ,
  author = {Fullin, Nicola and Fraccaroli, Michele and Francioni, Mirko and Fabbri, Stefano and Ballaera, Angelo and Ciavola, Paolo and Ghirotti, Monica},
  doi = {10.3390/rs16142604},
  issn = {2072-4292},
  journal = {Remote Sensing},
  number = {14},
  title = {Detection of Cliff Top Erosion Drivers through Machine Learning Algorithms between Portonovo and Trave Cliffs (Ancona, Italy)},
  url = {https://www.mdpi.com/2072-4292/16/14/2604},
  volume = {16},
  year = {2024}
}

This file was generated by bibtex2html 1.98.