@article{RigBelZesAlbLam21-ML-IJ, author = {Riguzzi, Fabrizio and Bellodi, Elena and Zese, Riccardo and Alberti, Marco and Lamma, Evelina}, title = {Probabilistic inductive constraint logic}, journal = {Machine Learning}, year = {2021}, volume = {110}, issue = {4}, pages = {723-754}, doi = {10.1007/s10994-020-05911-6}, pdf = {https://link.springer.com/content/pdf/10.1007/s10994-020-05911-6.pdf}, publisher = {Springer}, issn = {08856125}, abstract = {Probabilistic logical models deal effectively with uncertain relations and entities typical of many real world domains. In the field of probabilistic logic programming usually the aim is to learn these kinds of models to predict specific atoms or predicates of the domain, called target atoms/predicates. However, it might also be useful to learn classifiers for interpretations as a whole: to this end, we consider the models produced by the inductive constraint logic system, represented by sets of integrity constraints, and we propose a probabilistic version of them. Each integrity constraint is annotated with a probability, and the resulting probabilistic logical constraint model assigns a probability of being positive to interpretations. To learn both the structure and the parameters of such probabilistic models we propose the system PASCAL for “probabilistic inductive constraint logic”. Parameter learning can be performed using gradient descent or L-BFGS. PASCAL has been tested on 11 datasets and compared with a few statistical relational systems and a system that builds relational decision trees (TILDE): we demonstrate that this system achieves better or comparable results in terms of area under the precision–recall and receiver operating characteristic curves, in a comparable execution time.} }
This file was generated by bibtex2html 1.98.