latest.bib

@article{AzzBellFer2022-IJAR-IJ,
  title = {Abduction with probabilistic logic programming under the distribution semantics},
  journal = {International Journal of Approximate Reasoning},
  volume = {142},
  pages = {41-63},
  year = {2022},
  issn = {0888-613X},
  doi = {10.1016/j.ijar.2021.11.003},
  url = {https://www.sciencedirect.com/science/article/pii/S0888613X2100181X},
  author = {Damiano Azzolini and Elena Bellodi and Stefano Ferilli and Fabrizio Riguzzi and Riccardo Zese},
  keywords = {Abduction, Distribution semantics, Probabilistic logic programming, Statistical relational artificial intelligence},
  abstract = {In Probabilistic Abductive Logic Programming we are given a probabilistic logic program, a set of abducible facts, and a set of constraints. Inference in probabilistic abductive logic programs aims to find a subset of the abducible facts that is compatible with the constraints and that maximizes the joint probability of the query and the constraints. In this paper, we extend the PITA reasoner with an algorithm to perform abduction on probabilistic abductive logic programs exploiting Binary Decision Diagrams. Tests on several synthetic datasets show the effectiveness of our approach.},
  scopus = {2-s2.0-85119493622}
}
@inbook{ZesBelFraRigLam22-MLNVM-BC,
  author = {Zese, Riccardo and Bellodi, Elena and Fraccaroli, Michele and Riguzzi, Fabrizio and Lamma, Evelina},
  editor = {Micheloni, Rino and Zambelli, Cristian},
  title = {Neural Networks and Deep Learning Fundamentals},
  booktitle = {Machine Learning and Non-volatile Memories},
  year = {2022},
  publisher = {Springer International Publishing},
  address = {Cham},
  pages = {23--42},
  abstract = {In the last decade, Neural Networks (NNs) have come to the fore as one of the most powerful and versatile approaches to many machine learning tasks. Deep Learning (DL)Deep Learning (DL), the latest incarnation of NNs, is nowadays applied in every scenario that needs models able to predict or classify data. From computer vision to speech-to-text, DLDeep Learning (DL) techniques are able to achieve super-human performance in many cases. This chapter is devoted to give a (not comprehensive) introduction to the field, describing the main branches and model architectures, in order to try to give a roadmap of this area to the reader.},
  isbn = {978-3-031-03841-9},
  doi = {10.1007/978-3-031-03841-9_2},
  url = {https://doi.org/10.1007/978-3-031-03841-9_2}
}
@article{RigBelZesAlbLam21-ML-IJ,
  author = {Riguzzi, Fabrizio and Bellodi, Elena and Zese, Riccardo and Alberti, Marco and Lamma, Evelina},
  title = {Probabilistic inductive constraint logic},
  journal = {Machine Learning},
  year = {2021},
  volume = {110},
  issue = {4},
  pages = {723-754},
  doi = {10.1007/s10994-020-05911-6},
  pdf = {https://link.springer.com/content/pdf/10.1007/s10994-020-05911-6.pdf},
  publisher = {Springer},
  issn = {08856125},
  abstract = {Probabilistic logical models deal effectively with uncertain relations and entities typical of many real world domains. In the field of probabilistic logic programming usually the aim is to learn these kinds of models to predict specific atoms or predicates of the domain, called target atoms/predicates. However, it might also be useful to learn classifiers for interpretations as a whole: to this end, we consider the models produced by the inductive constraint logic system, represented by sets of integrity constraints, and we propose a probabilistic version of them. Each integrity constraint is annotated with a probability, and the resulting probabilistic logical constraint model assigns a probability of being positive to interpretations. To learn both the structure and the parameters of such probabilistic models we propose the system PASCAL for “probabilistic inductive constraint logic”. Parameter learning can be performed using gradient descent or L-BFGS. PASCAL has been tested on 11 datasets and compared with a few statistical relational systems and a system that builds relational decision trees (TILDE): we demonstrate that this system achieves better or comparable results in terms of area under the precision–recall and receiver operating characteristic curves, in a comparable execution time.}
}
@article{BelAlbRig21-TPLP-IJ,
  author = {Elena Bellodi and
               Marco Gavanelli and
               Riccardo Zese and
               Evelina Lamma and
               Fabrizio Riguzzi},
  title = {Nonground Abductive Logic Programming with Probabilistic Integrity Constraints},
  journal = {Theory and Practice of Logic Programming},
  publisher = {Cambridge University Press},
  copyright = {Cambridge University Press},
  year = {2021},
  url = {https://arxiv.org/abs/2108.03033},
  volume = {21},
  doi = {10.1017/S1471068421000417},
  pdf = {https://arxiv.org/pdf/2108.03033.pdf},
  number = {5},
  pages = {557--574}
}
@inproceedings{BelZesBer21-LOD-IC,
  title = {Machine Learning in a Policy Support System for Smart Tourism Management},
  author = {Elena Bellodi and Riccardo Zese and Francesco Bertasi},
  booktitle = {Proceedings of the 7th International Online & Onsite Conference on Machine Learning, Optimization, and Data Science - LOD, October 4 – 8, 2021 – Grasmere, Lake District, England – UK},
  year = 2021,
  publisher = {Springer Nature},
  address = {Heidelberg, Germany},
  series = {Lecture Notes in Computer Science},
  venue = {Online and Grasmere, Lake District, UK},
  eventdate = {October 4 – 8, 2021},
  copyright = {Springer},
  volume = {In press}
}
@article{ZesBelLucAlv21-IEEE-IJ,
  author = {Riccardo Zese and Elena Bellodi and Chiara Luciani and Stefano Alvisi},
  title = {Neural Network Techniques for Detecting Intra-Domestic Water Leaks of Different Magnitude},
  journal = {IEEE Access},
  publisher = {IEEE},
  year = {2021},
  url = {https://ieeexplore.ieee.org/document/9530653},
  volume = {9},
  doi = {10.1109/ACCESS.2021.3111113},
  pages = {126135 - 126147},
  isbn-online = {2169-3536}
}
@article{ZesCot21-JWS-IJ,
  title = {Optimizing a tableau reasoner and its implementation in Prolog},
  journal = {Journal of Web Semantics},
  volume = {71},
  number = {100677},
  pages = {1--22},
  year = {2021},
  issn = {1570-8268},
  doi = {https://doi.org/10.1016/j.websem.2021.100677},
  url = {https://www.sciencedirect.com/science/article/pii/S1570826821000524},
  author = {Riccardo Zese and Giuseppe Cota},
  keywords = {Reasoner, Axiom pinpointing, Tableau algorithm, (Probabilistic) description logic, Prolog},
  abstract = {One of the foremost reasoning services for knowledge bases is finding all the justifications for a query. This is useful for debugging purpose and for coping with uncertainty. Among Description Logics (DLs) reasoners, the tableau algorithm is one of the most used. However, in order to collect the justifications, the reasoners must manage the non-determinism of the tableau method. For these reasons, a Prolog implementation can facilitate the management of such non-determinism. The TRILL framework contains three probabilistic reasoners written in Prolog: TRILL, TRILLP and TORNADO. Since they are all part of the same framework, the choice about which to use can be done easily via the framework settings. Each one of them uses different approaches for probabilistic inference and handles different DLs flavors. Our previous work showed that they can sometimes achieve better results than state-of-the-art (non-)probabilistic reasoners. In this paper we present two optimizations that improve the performances of the TRILL reasoners. The first one consists into identifying the fragment of the KB that allows to perform inference without losing the completeness. The second one modifies which tableau rule to apply and their order of application, in order to reduce the number of operations. Experimental results show the effectiveness of the introduced optimizations.}
}

This file was generated by bibtex2html 1.98.