2022.bib

@inproceedings{AzzRigLam22-hybridSummary-IW,
  title = {Semantics for Hybrid Probabilistic Logic Programs with Function Symbols: Technical Summary},
  author = {Azzolini, Damiano and Riguzzi, Fabrizio and Lamma, Evelina},
  year = {2022},
  editor = {Joaquín Arias and Roberta Calegari and Luke Dickens and Wolfgang Faber and Jorge Fandinno and Gopal Gupta and Markus Hecher and Daniela Inclezan and Emily LeBlanc and Michael Morak and Elmer Salazar and Jessica Zangari},
  booktitle = {Proceedings of the International Conference on Logic Programming 2022 Workshops co-located with the 38th International Conference on Logic Programming (ICLP 2022)},
  series = {CEUR Workshop Proceedings},
  publisher = {Sun {SITE} Central Europe},
  address = {Aachen, Germany},
  issn = {1613-0073},
  venue = {Haifa, Israel},
  volume = {3193},
  pages = {1--5},
  pdf = {http://ceur-ws.org/Vol-3193/short1PLP.pdf}
}
@inproceedings{AlbZesRig2022-Iterative-IC,
  author = {Alberti, Marco and Zese, Riccardo and Riguzzi, Fabrizio and Lamma, Evelina},
  year = {2022},
  title = {{An Iterative Fixpoint Semantics for MKNF Hybrid Knowledge Bases with Function Symbols}},
  editor = {Lierler, Yuliya and Morales, Jose F. and Dodaro, Carmine and Dahl, Veronica and Gebser, Martin and Tekle, Tuncay},
  booktitle = {Proceedings of the 38th International Conference on
               Logic Programming (Technical Communications)},
  series = {Electronic Proceedings in Theoretical Computer Science},
  issn = {2075-2180},
  volume = {364},
  publisher = {Open Publishing Association},
  address = {Waterloo, Australia},
  pages = {65-78},
  doi = {10.4204/EPTCS.364.7},
  url = {https://eptcs.web.cse.unsw.edu.au/paper.cgi?ICLP2022.7},
  pdf = {https://eptcs.web.cse.unsw.edu.au/paper.cgi?ICLP2022.7.pdf}
}
@inproceedings{FraLamRig22-recently-IC,
  title = {Exploiting Parameters Learning for Hyper-parameters Optimization in Deep Neural Networks},
  booktitle = {Proceedings of the 38th International Conference on Logic Programming (Technical Communications), Recently Published Research track},
  issn = {2075-2180},
  doi = {10.4204/EPTCS.364},
  volume = {364},
  pages = {142--144},
  series = {Electronic Proceedings in Theoretical Computer Science},
  publisher = {Open Publishing Association},
  address = {Waterloo, Australia},
  editor = {Yuliya Lierler and Jose F. Morales and Carmine Dodaro and Veronica Dahl and Martin Gebser and Tuncay Tekle},
  year = {2022},
  author = {Michele Fraccaroli and Fabrizio Riguzzi and Evelina Lamma},
  url = {https://arxiv.org/html/2208.02685v1/#EPTCS364.17}
}
@article{FraLamRig2022-SwX-IJ,
  title = {Symbolic {DNN-Tuner}: A {Python} and {ProbLog}-based system for optimizing Deep Neural Networks hyperparameters},
  journal = {SoftwareX},
  volume = {17},
  pages = {100957},
  year = {2022},
  issn = {2352-7110},
  doi = {10.1016/j.softx.2021.100957},
  url = {https://www.sciencedirect.com/science/article/pii/S2352711021001825},
  author = {Michele Fraccaroli and Evelina Lamma and Fabrizio Riguzzi},
  keywords = {Deep learning, Probabilistic Logic Programming, Hyper-parameters tuning, Neural-symbolic integration},
  abstract = {The application of deep learning models to increasingly complex contexts has led to a rise in the complexity of the models themselves. Due to this, there is an increase in the number of hyper-parameters (HPs) to be set and Hyper-Parameter Optimization (HPO) algorithms occupy a fundamental role in deep learning. Bayesian Optimization (BO) is the state-of-the-art of HPO for deep learning models. BO keeps track of past results and uses them to build a probabilistic model, building a probability density of HPs. This work aims to improve BO applied to Deep Neural Networks (DNNs) by an analysis of the results of the network on training and validation sets. This analysis is obtained by applying symbolic tuning rules, implemented in Probabilistic Logic Programming (PLP). The resulting system, called Symbolic DNN-Tuner, logically evaluates the results obtained from the training and the validation phase and, by applying symbolic tuning rules, fixes the network architecture, and its HPs, leading to improved performance. In this paper, we present the general system and its implementation. We also show its graphical interface and a simple example of execution.}
}
@unpublished{FraBizCasLam2022-ITAL_IA-NW,
  author = {Michele Fraccaroli and Alice Bizzarri and Paolo Casellati and Evelina Lamma},
  title = {Cross Entropy Overlap Distance},
  note = {Accepted and Presented at ITAL-IA 2022, workshop on AI for Industry},
  month = {feb},
  year = {2022},
  pdf = {http://ml.unife.it/wp-content/uploads/Papers/FraBizCasLam-ITAL_IA22.pdf}
}
@inproceedings{AzzRigBelLam22-BSCT-IW,
  title = {A Probabilistic Logic Model of Lightning Network},
  author = {Azzolini, Damiano and Riguzzi, Fabrizio and Bellodi, Elena and Lamma, Evelina},
  booktitle = {Business Information Systems Workshops},
  year = {2022},
  editor = {Abramowicz, Witold and Auer, S{\"o}ren and Str{\'o}{\.{z}}yna, Milena},
  pages = {321--333},
  series = {Lecture Notes in Business Information Processing (LNBIP)},
  publisher = {Springer International Publishing},
  address = {Cham, Switzerland},
  eventdate = {June 14-17, 2021},
  doi = {10.1007/978-3-031-04216-4_28},
  url = {https://link.springer.com/chapter/10.1007/978-3-031-04216-4_28},
  pdf = {http://ml.unife.it/wp-content/uploads/Papers/AzzRigBelLam22-BSCT-IW.pdf}
}
@incollection{ZesBelFraRigLam22-MLNVM-BC,
  author = {Zese, Riccardo and Bellodi, Elena and Fraccaroli, Michele and Riguzzi, Fabrizio and Lamma, Evelina},
  editor = {Micheloni, Rino and Zambelli, Cristian},
  title = {Neural Networks and Deep Learning Fundamentals},
  booktitle = {Machine Learning and Non-volatile Memories},
  year = {2022},
  publisher = {Springer International Publishing},
  address = {Cham},
  pages = {23--42},
  abstract = {In the last decade, Neural Networks (NNs) have come to the fore as one of the most powerful and versatile approaches to many machine learning tasks. Deep Learning (DL)Deep Learning (DL), the latest incarnation of NNs, is nowadays applied in every scenario that needs models able to predict or classify data. From computer vision to speech-to-text, DLDeep Learning (DL) techniques are able to achieve super-human performance in many cases. This chapter is devoted to give a (not comprehensive) introduction to the field, describing the main branches and model architectures, in order to try to give a roadmap of this area to the reader.},
  isbn = {978-3-031-03841-9},
  doi = {10.1007/978-3-031-03841-9_2},
  url = {https://doi.org/10.1007/978-3-031-03841-9_2}
}
@article{NguFraBizLam2022-MBEC-IJ,
  abstract = {Recently, Artificial Intelligence (AI) and Machine Learning (ML) have been successfully applied to many domains of interest including medical diagnosis. Due to the availability of a large quantity of data, it is possible to build reliable AI systems that assist humans in making decisions. The recent Covid-19 pandemic quickly spread over the world causing serious health problems and severe economic and social damage. Computer scientists are actively working together with doctors on different ML models to diagnose Covid-19 patients using Computed Tomography (CT) scans and clinical data. In this work, we propose a neural-symbolic system that predicts if a Covid-19 patient arriving at the hospital will end in a critical condition. The proposed system relies on Deep 3D Convolutional Neural Networks (3D-CNNs) for analyzing lung CT scans of Covid-19 patients, Decision Trees (DTs) for predicting if a Covid-19 patient will eventually pass away by analyzing its clinical data, and a neural system that integrates the previous ones using Hierarchical Probabilistic Logic Programs (HPLPs). Predicting if a Covid-19 patient will end in a critical condition is useful for managing the limited number of intensive care at the hospital. Moreover, knowing early that a Covid-19 patient could end in serious conditions allows doctors to gain early knowledge on patients and provide special treatment to those predicted to finish in critical conditions. The proposed system, entitled Neural HPLP, obtains good performance in terms of area under the receiver operating characteristic and precision curves with values of about 0.96 for both metrics. Therefore, with Neural HPLP, it is possible not only to efficiently predict if Covid-19 patients will end in severe conditions but also possible to provide an explanation of the prediction. This makes Neural HPLP explainable, interpretable, and reliable.},
  author = {Fadja, Arnaud Nguembang and Fraccaroli, Michele and Bizzarri, Alice and Mazzuchelli, Giulia and Lamma, Evelina},
  date = {2022/10/06},
  date-added = {2022-10-06 16:02:08 +0200},
  date-modified = {2022-10-06 16:02:08 +0200},
  doi = {10.1007/s11517-022-02674-1},
  id = {Fadja2022},
  isbn = {1741-0444},
  journal = {Medical \& Biological Engineering \& Computing},
  title = {Neural-Symbolic Ensemble Learning for early-stage prediction of critical state of Covid-19 patients},
  url = {https://doi.org/10.1007/s11517-022-02674-1},
  year = {2022},
  bdsk-url-1 = {https://doi.org/10.1007/s11517-022-02674-1}
}

This file was generated by bibtex2html 1.98.