2023.bib

@inproceedings{GenBizAzz23-ILP-IC,
  author = {Gentili, Elisabetta and Bizzarri, Alice and Azzolini, Damiano and Zese, Riccardo and Riguzzi, Fabrizio},
  editor = {Bellodi, Elena and Lisi, Francesca Alessandra and Zese, Riccardo},
  title = {Regularization in Probabilistic Inductive Logic Programming},
  booktitle = {Inductive Logic Programming - ILP 2023},
  year = {2023},
  publisher = {Springer Nature Switzerland},
  address = {Cham},
  pages = {16--29},
  isbn = {978-3-031-49299-0},
  doi = {10.1007/978-3-031-49299-0_2},
  series = {Lecture Notes in Computer Science},
  volume = {14363},
  url = {https://link.springer.com/chapter/10.1007/978-3-031-49299-0_2}
}
@article{FraCasBizLam23-AI-IJ,
  abstract = {Nowadays, deep learning is a key technology for many applications in the industrial area such as anomaly detection. The role of Machine Learning (ML) in this field relies on the ability of training a network to learn to inspect images to determine the presence or not of anomalies. Frequently, in Industry 4.0 w.r.t. the anomaly detection task, the images to be analyzed are not optimal, since they contain edges or areas, that are not of interest which could lead the network astray. Thus, this study aims at identifying a systematic way to train a neural network to make it able to focus only on the area of interest. The study is based on the definition of a loss to be applied in the training phase of the network that, using masks, gives higher weight to the anomalies identified within the area of interest. The idea is to add an Overlap Coefficient to the standard cross-entropy. In this way, the more the identified anomaly is outside the Area of Interest (AOI) the greater is the loss. We call the resulting loss Cross-Entropy Overlap Distance (CEOD). The advantage of adding the masks in the training phase is that the network is forced to learn and recognize defects only in the area circumscribed by the mask. The added benefit is that, during inference, these masks will no longer be needed. Therefore, there is no difference, in terms of execution times, between a standard Convolutional Neural Network (CNN) and a network trained with this loss. In some applications, the masks themselves are determined at run-time through a trained segmentation network, as we have done for instance in the "Machine learning for visual inspection and quality control" project, funded by the MISE Competence Center Bi-REX.},
  author = {Fraccaroli, Michele and Bizzarri, Alice and Casellati, Paolo and Lamma, Evelina},
  date = {2023/12/12},
  date-added = {2023-12-18 20:50:22 +0100},
  date-modified = {2023-12-18 20:50:22 +0100},
  doi = {10.1007/s10489-023-05177-0},
  id = {Fraccaroli2023},
  isbn = {1573-7497},
  journal = {Applied Intelligence},
  title = {Exploiting CNN's visual explanations to drive anomaly detection},
  url = {https://doi.org/10.1007/s10489-023-05177-0},
  year = {2023},
  bdsk-url-1 = {https://doi.org/10.1007/s10489-023-05177-0}
}
@article{BelBerBizFavFraZes23-MCSoC-IC,
  author = {Bellodi, Elena and Bertozzi, Davide and Bizzarri, Alice and Favalli, Michele and Fraccaroli, Michele and Zese, Riccardo},
  booktitle = {2023 IEEE 16th International Symposium on Embedded Multicore/Many-core Systems-on-Chip (MCSoC)},
  title = {Efficient Resource-Aware Neural Architecture Search with a Neuro-Symbolic Approach},
  year = {2023},
  volume = {},
  number = {},
  pages = {171-178},
  keywords = {Deep learning;Performance evaluation;Costs;Terminology;Computational modeling;Search problems;Probabilistic logic;neural networks;neural network hardware accelerators;architecture search;probabilistic logic programming},
  doi = {10.1109/MCSoC60832.2023.00034}
}

This file was generated by bibtex2html 1.98.