@article{BelRig12-IA-IJ, author = {Elena Bellodi and Fabrizio Riguzzi}, title = { Experimentation of an Expectation Maximization Algorithm for Probabilistic Logic Programs}, year = {2012}, journal = {Intelligenza Artificiale}, publisher = {IOS Press}, copyright = {IOS Press}, pdf = {http://ml.unife.it/wp-content/uploads/Papers/BelRig12-IA-IJ.pdf}, abstract = {Statistical Relational Learning and Probabilistic Inductive Logic Programming are two emerging fields that use representation languages able to combine logic and probability. In the field of Logic Programming, the distribution semantics is one of the prominent approaches for representing uncertainty and underlies many languages such as ICL, PRISM, ProbLog and LPADs. Learning the parameters for such languages requires an Expectation Maximization algorithm since their equivalent Bayesian networks contain hidden variables. EMBLEM (EM over BDDs for probabilistic Logic programs Efficient Mining) is an EM algorithm for languages following the distribution semantics that computes expectations directly on the Binary Decision Diagrams that are built for inference. In this paper we present experiments comparing EMBLEM with LeProbLog, Alchemy, CEM, RIB and LFI-ProbLog on six real world datasets. The results show that EMBLEM is able to solve problems on which the other systems fail and it often achieves significantly higher areas under the Precision Recall and the ROC curves in a similar time.}, keywords = {Statistical Relational Learning, Probabilistic Inductive Logic Programming, Probabilistic Logic Programming, Expectation Maximization, Binary Decision Diagrams, Logic Programs with Annotated Disjunctions }, volume = {8}, number = {1}, pages = {3-18}, doi = {10.3233/IA-2012-0027} }
@article{BelRig13-IDA-IJ, author = {Elena Bellodi and Fabrizio Riguzzi}, title = { Expectation {Maximization} over Binary Decision Diagrams for Probabilistic Logic Programs}, year = {2013}, volume = {17}, number = {2}, journal = {Intelligent Data Analysis}, publisher = {IOS Press}, copyright = {IOS Press}, pages = {343-363}, doi = {10.3233/IDA-130582}, pdf = {http://ml.unife.it/wp-content/uploads/Papers/BelRig13-IDA-IJ.pdf}, abstract = {Recently much work in Machine Learning has concentrated on using expressive representation languages that combine aspects of logic and probability. A whole field has emerged, called Statistical Relational Learning, rich of successful applications in a variety of domains. In this paper we present a Machine Learning technique targeted to Probabilistic Logic Programs, a family of formalisms where uncertainty is represented using Logic Programming tools. Among various proposals for Probabilistic Logic Programming, the one based on the distribution semantics is gaining popularity and is the basis for languages such as ICL, PRISM, ProbLog and Logic Programs with Annotated Disjunctions. This paper proposes a technique for learning parameters of these languages. Since their equivalent Bayesian networks contain hidden variables, an Expectation Maximization (EM) algorithm is adopted. In order to speed the computation up, expectations are computed directly on the Binary Decision Diagrams that are built for inference. The resulting system, called EMBLEM for ``EM over Bdds for probabilistic Logic programs Efficient Mining'', has been applied to a number of datasets and showed good performances both in terms of speed and memory usage. In particular its speed allows the execution of a high number of restarts, resulting in good quality of the solutions.}, keywords = {Statistical Relational Learning, Probabilistic Inductive Logic Programming, Probabilistic Logic Programs, Logic Programs with Annotated Disjunctions, Expectation Maximization, Binary Decision Diagrams } }
@article{RigBelZes14-FAI-IJ, author = {Riguzzi, Fabrizio and Bellodi, Elena and Zese, Riccardo}, title = {A History of Probabilistic Inductive Logic Programming}, journal = {Frontiers in Robotics and AI}, volume = {1}, year = {2014}, number = {6}, url = {http://www.frontiersin.org/computational_intelligence/10.3389/frobt.2014.00006/abstract}, doi = {10.3389/frobt.2014.00006}, issn = {2296-9144}, abstract = {The field of Probabilistic Logic Programming (PLP) has seen significant advances in the last 20?years, with many proposals for languages that combine probability with logic programming. Since the start, the problem of learning probabilistic logic programs has been the focus of much attention. Learning these programs represents a whole subfield of Inductive Logic Programming (ILP). In Probabilistic ILP (PILP), two problems are considered: learning the parameters of a program given the structure (the rules) and learning both the structure and the parameters. Usually, structure learning systems use parameter learning as a subroutine. In this article, we present an overview of PILP and discuss the main results.}, pages = {1-5}, keywords = {logic programming, probabilistic programming, inductive logic programming, probabilistic logic programming, statistical relational learning}, copyright = {by the authors} }
@article{BelLamRig14-ICLP-IJ, author = { Elena Bellodi and Evelina Lamma and Fabrizio Riguzzi and Santos Costa, Vitor and Riccardo Zese}, title = {Lifted Variable Elimination for Probabilistic Logic Programming}, journal = {Theory and Practice of Logic Programming}, publisher = {Cambridge University Press}, copyright = {Cambridge University Press}, number = {Special issue 4-5 - ICLP 2014}, volume = {14}, year = {2014}, pages = {681-695}, doi = {10.1017/S1471068414000283}, pdf = {http://arxiv.org/abs/1405.3218}, keywords = {Probabilistic Logic Programming, Lifted Inference, Variable Elimination, Distribution Semantics, ProbLog, Statistical Relational Artificial Intelligence}, abstract = {Lifted inference has been proposed for various probabilistic logical frameworks in order to compute the probability of queries in a time that depends on the size of the domains of the random variables rather than the number of instances. Even if various authors have underlined its importance for probabilistic logic programming (PLP), lifted inference has been applied up to now only to relational languages outside of logic programming. In this paper we adapt Generalized Counting First Order Variable Elimination (GC-FOVE) to the problem of computing the probability of queries to probabilistic logic programs under the distribution semantics. In particular, we extend the Prolog Factor Language (PFL) to include two new types of factors that are needed for representing ProbLog programs. These factors take into account the existing causal independence relationships among random variables and are managed by the extension to variable elimination proposed by Zhang and Poole for dealing with convergent variables and heterogeneous factors. Two new operators are added to GC-FOVE for treating heterogeneous factors. The resulting algorithm, called LP2 for Lifted Probabilistic Logic Programming, has been implemented by modifying the PFL implementation of GC-FOVE and tested on three benchmarks for lifted inference. A comparison with PITA and ProbLog2 shows the potential of the approach.}, isi = {000343203200019}, scopus = {84904624147} }
@article{DiMBelRig15-ML-IJ, author = {Di Mauro, Nicola and Elena Bellodi and Fabrizio Riguzzi}, title = {Bandit-Based {Monte-Carlo} Structure Learning of Probabilistic Logic Programs}, journal = {Machine Learning}, publisher = {Springer International Publishing}, copyright = {Springer International Publishing}, year = {2015}, volume = {100}, number = {1}, pages = {127-156}, month = {July}, doi = {10.1007/s10994-015-5510-3}, url = {http://ml.unife.it/wp-content/uploads/Papers/DiMBelRig-ML15.pdf}, keywords = {probabilistic inductive logic programming, statistical relational learning, structure learning, distribution semantics, logic programs with annotated disjunction}, abstract = {Probabilistic Logic Programming can be used to model domains with complex and uncertain relationships among entities. While the problem of learning the parameters of such programs has been considered by various authors, the problem of learning the structure is yet to be explored in depth. In this work we present an approximate search method based on a one-player game approach, called LEMUR. It sees the problem of learning the structure of a probabilistic logic program as a multiarmed bandit problem, relying on the Monte-Carlo tree search UCT algorithm that combines the precision of tree search with the generality of random sampling. LEMUR works by modifying the UCT algorithm in a fashion similar to FUSE, that considers a finite unknown horizon and deals with the problem of having a huge branching factor. The proposed system has been tested on various real-world datasets and has shown good performance with respect to other state of the art statistical relational learning approaches in terms of classification abilities.}, note = {The original publication is available at \url{http://link.springer.com}} }
@article{RigBelLamZes15-SW-IJ, author = {Fabrizio Riguzzi and Elena Bellodi and Evelina Lamma and Riccardo Zese}, title = {Probabilistic Description Logics under the Distribution Semantics}, journal = {Semantic Web - Interoperability, Usability, Applicability}, volume = {6}, number = {5}, pages = {447-501}, pdf = {http://ml.unife.it/wp-content/uploads/Papers/RigBelLamZes-SW14.pdf}, year = {2015}, doi = {10.3233/SW-140154}, abstract = { Representing uncertain information is crucial for modeling real world domains. In this paper we present a technique for the integration of probabilistic information in Description Logics (DLs) that is based on the distribution semantics for probabilistic logic programs. In the resulting approach, that we called DISPONTE, the axioms of a probabilistic knowledge base (KB) can be annotated with a real number between 0 and 1. A probabilistic knowledge base then defines a probability distribution over regular KBs called worlds and the probability of a given query can be obtained from the joint distribution of the worlds and the query by marginalization. We present the algorithm BUNDLE for computing the probability of queries from DISPONTE KBs. The algorithm exploits an underlying DL reasoner, such as Pellet, that is able to return explanations for queries. The explanations are encoded in a Binary Decision Diagram from which the probability of the query is computed. The experimentation of BUNDLE shows that it can handle probabilistic KBs of realistic size. }, keywords = { Probabilistic Ontologies, Probabilistic Description Logics, OWL, Probabilistic Logic Programming, Distribution Semantics} }
@article{BelRig15-TPLP-IJ, author = {Elena Bellodi and Fabrizio Riguzzi}, title = {Structure Learning of Probabilistic Logic Programs by Searching the Clause Space}, journal = {Theory and Practice of Logic Programming}, publisher = {Cambridge University Press}, copyright = {Cambridge University Press}, year = {2015}, volume = {15}, number = {2}, pages = {169-212}, pdf = {http://arxiv.org/abs/1309.2080}, url = {http://journals.cambridge.org/abstract_S1471068413000689}, doi = {10.1017/S1471068413000689}, keywords = {probabilistic inductive logic programming, statistical relational learning, structure learning, distribution semantics, logic programs with annotated disjunction, CP-logic}, abstract = {Learning probabilistic logic programming languages is receiving an increasing attention, and systems are available for learning the parameters (PRISM, LeProbLog, LFI-ProbLog and EMBLEM) or both structure and parameters (SEM-CP-logic and SLIPCASE) of these languages. In this paper we present the algorithm SLIPCOVER for "Structure LearnIng of Probabilistic logic programs by searChing OVER the clause space." It performs a beam search in the space of probabilistic clauses and a greedy search in the space of theories using the log likelihood of the data as the guiding heuristics. To estimate the log likelihood, SLIPCOVER performs Expectation Maximization with EMBLEM. The algorithm has been tested on five real world datasets and compared with SLIPCASE, SEM-CP-logic, Aleph and two algorithms for learning Markov Logic Networks (Learning using Structural Motifs (LSM) and ALEPH++ExactL1). SLIPCOVER achieves higher areas under the precision-recall and receiver operating characteristic curves in most cases.} }
@article{RigBelLam16-SPE-IJ, author = {Fabrizio Riguzzi and Elena Bellodi and Evelina Lamma and Riccardo Zese and Giuseppe Cota}, title = {Probabilistic Logic Programming on the Web}, journal = {Software: Practice and Experience}, publisher = {Wiley}, copyright = {Wiley}, year = {2016}, issn = {1097-024X}, url = {http://ml.unife.it/wp-content/uploads/Papers/RigBelLam-SPE16.pdf}, abstract = { We present the web application "cplint on SWISH", that allows the user to write probabilistic logic programs and compute the probability of queries with just a web browser. The application is based on SWISH, a recently proposed web framework for logic programming. SWISH is based on various features and packages of SWI-Prolog, in particular its web server and its Pengine library, that allow to create remote Prolog engines and to pose queries to them. In order to develop the web application, we started from the PITA system which is included in cplint, a suite of programs for reasoning on Logic Programs with Annotated Disjunctions, by porting PITA to SWI-Prolog. Moreover, we modified the PITA library so that it can be executed in a multi-threading environment. Developing "cplint on SWISH" also required modification of the JavaScript SWISH code that creates and queries Pengines. "cplint on SWISH" includes a number of examples that cover a wide range of domains and provide interesting applications of Probabilistic Logic Programming (PLP). By providing a web interface to cplint we allow users to experiment with PLP without the need to install a system, a procedure which is often complex, error prone and limited mainly to the Linux platform. In this way, we aim to reach out to a wider audience and popularize PLP.}, keywords = { Logic Programming, Probabilistic Logic Programming, Distribution Semantics, Logic Programs with Annotated Disjunctions, Web Applications }, doi = {10.1002/spe.2386}, volume = {46}, number = {10}, pages = {1381-1396}, month = {October}, wos = {WOS:000383624900005}, scopus = {2-s2.0-84951829971} }
@article{BelRigLam16-IDA-IJ, author = {Elena Bellodi and Fabrizio Riguzzi and Evelina Lamma}, title = {Statistical Relational Learning for Workflow Mining}, journal = {Intelligent Data Analysis}, publisher = {IOS Press}, copyright = {IOS Press}, year = {2016}, doi = {10.3233/IDA-160818}, month = {April}, volume = {20}, number = {3}, pages = {515-541}, url = {http://ml.unife.it/wp-content/uploads/Papers/BelRigLam-IDA15.pdf}, keywords = {Workflow Mining, Process Mining, Knowledge-based Process Models, Inductive Logic Programming, Statistical Relational Learning, Business Process Management }, abstract = { The management of business processes can support efficiency improvements in organizations. One of the most interesting problems is the mining and representation of process models in a declarative language. Various recently proposed knowledge-based languages showed advantages over graph-based procedural notations. Moreover, rapid changes of the environment require organizations to check how compliant are new process instances with the deployed models. We present a Statistical Relational Learning approach to Workflow Mining that takes into account both flexibility and uncertainty in real environments. It performs automatic discovery of process models expressed in a probabilistic logic. It uses the existing DPML algorithm for extracting first-order logic constraints from process logs. The constraints are then translated into Markov Logic to learn their weights. Inference on the resulting Markov Logic model allows a probabilistic classification of test traces, by assigning them the probability of being compliant to the model. We applied this approach to three datasets and compared it with DPML alone, five Petri net- and EPC-based process mining algorithms and Tilde. The technique is able to better classify new execution traces, showing higher accuracy and areas under the PR/ROC curves in most cases. }, scopus = {2-s2.0-84969808336}, wos = {WOS:000375005000004} }
@article{RigCotBel17-IJAR-IJ, author = {Fabrizio Riguzzi and Giuseppe Cota and Elena Bellodi and Riccardo Zese }, title = {Causal Inference in {cplint}}, journal = {International Journal of Approximate Reasoning}, year = {2017}, publisher = {Elsevier}, address = {Amsterdam}, copyright = {Elsevier}, doi = {10.1016/j.ijar.2017.09.007}, pdf = {http://ml.unife.it/wp-content/uploads/Papers/RigCotBel-IJAR17.pdf}, abstract = { cplint is a suite of programs for reasoning and learning with Probabilistic Logic Programming languages that follow the distribution semantics. In this paper we describe how we have extended cplint to perform causal reasoning. In particular, we consider Pearl's do calculus for models where all the variables are measured. The two cplint modules for inference, PITA and MCINTYRE, have been extended for computing the effect of actions/interventions on these models. We also executed experiments comparing exact and approximate inference with conditional and causal queries, showing that causal inference is often cheaper than conditional inference. }, keywords = { Probabilistic Logic Programming, Distribution Semantics, Logic Programs with Annotated Disjunctions, ProbLog, Causal Inference, Statistical Relational Artificial Intelligence }, volume = {91}, pages = {216-232}, month = {December}, number = {Supplement C}, issn = {0888-613X}, scopus = {2-s2.0-84992199737}, wos = {WOS:000391080100020} }
@article{AlbBelCot17-IA-IJ, author = {Marco Alberti and Elena Bellodi and Giuseppe Cota and Fabrizio Riguzzi and Riccardo Zese}, title = {\texttt{cplint} on {SWISH}: Probabilistic Logical Inference with a Web Browser}, journal = {Intelligenza Artificiale}, publisher = {IOS Press}, copyright = {IOS Press}, year = {2017}, issn-print = {1724-8035}, issn-online = {2211-0097}, url = {http://ml.unife.it/wp-content/uploads/Papers/AlbBelCot-IA17.pdf}, abstract = { \texttt{cplint} on SWISH is a web application that allows users to perform reasoning tasks on probabilistic logic programs. Both inference and learning systems can be performed: conditional probabilities with exact, rejection sampling and Metropolis-Hasting methods. Moreover, the system now allows hybrid programs, i.e., programs where some of the random variables are continuous. To perform inference on such programs likelihood weighting and particle filtering are used. \texttt{cplint} on SWISH is also able to sample goals' arguments and to graph the results. This paper reports on advances and new features of \texttt{cplint} on SWISH, including the capability of drawing the binary decision diagrams created during the inference processes. }, keywords = { Logic Programming, Probabilistic Logic Programming, Distribution Semantics, Logic Programs with Annotated Disjunctions, Web Applications }, volume = {11}, number = {1}, doi = {10.3233/IA-170106}, pages = {47--64}, wos = {WOS:000399736500004} }
@article{BelLamRig17-SPE-IJ, author = {Elena Bellodi and Evelina Lamma and Fabrizio Riguzzi and Riccardo Zese and Giuseppe Cota}, title = {A web system for reasoning with probabilistic {OWL}}, journal = {Software: Practice and Experience}, publisher = {Wiley}, copyright = {Wiley}, year = {2017}, doi = {10.1002/spe.2410}, issn = {1097-024X}, month = {January}, pages = {125--142}, volume = {47}, number = {1}, scopus = {2-s2.0-84992412060}, url = {http://ml.unife.it/wp-content/uploads/Papers/BelLamRig-SPE16.pdf}, abstract = { We present the web application TRILL on SWISH, which allows the user to write probabilistic Description Logic (DL) theories and compute the probability of queries with just a web browser. Various probabilistic extensions of DLs have been proposed in the recent past, since uncertainty is a fundamental component of the Semantic Web. We consider probabilistic DL theories following our DISPONTE semantics. Axioms of a DISPONTE Knowledge Base (KB) can be annotated with a probability and the probability of queries can be computed with inference algorithms. TRILL is a probabilistic reasoner for DISPONTE KBs that is implemented in Prolog and exploits its backtracking facilities for handling the non-determinism of the tableau algorithm. TRILL on SWISH is based on SWISH, a recently proposed web framework for logic programming, based on various features and packages of SWI-Prolog (e.g., a web server and a library for creating remote Prolog engines and posing queries to them). TRILL on SWISH also allows users to cooperate in writing a probabilistic DL theory. It is free, open, and accessible on the Web at the url: \trillurl; it includes a number of examples that cover a wide range of domains and provide interesting Probabilistic Semantic Web applications. By building a web-based system, we allow users to experiment with Probabilistic DLs without the need to install a complex software stack. In this way we aim to reach out to a wider audience and popularize the Probabilistic Semantic Web. }, keywords = { Semantic Web, Web Applications, Description Logics, Probabilistic Description Logics, SWI-Prolog, Logic Programming } }
@article{RigBelZes17-IJAR-IJ, author = {Fabrizio Riguzzi and Elena Bellodi and Riccardo Zese and Giuseppe Cota and Evelina Lamma }, title = {A Survey of Lifted Inference Approaches for Probabilistic Logic Programming under the Distribution Semantics}, journal = {International Journal of Approximate Reasoning}, year = {2017}, publisher = {Elsevier}, address = {Amsterdam}, copyright = {Elsevier}, doi = {10.1016/j.ijar.2016.10.002}, url = {http://ml.unife.it/wp-content/uploads/Papers/RigBelZes-IJAR17.pdf}, volume = {80}, number = {Supplement C}, issn = {0888-613X}, pages = {313--333}, month = {January}, abstract = { Lifted inference aims at answering queries from statistical relational models by reasoning on populations of individuals as a whole instead of considering each individual singularly. Since the initial proposal by David Poole in 2003, many lifted inference techniques have appeared, by lifting different algorithms or using approximation involving different kinds of models, including parfactor graphs and Markov Logic Networks. Very recently lifted inference was applied to Probabilistic Logic Programming (PLP) under the distribution semantics, with proposals such as LP2 and Weighted First-Order Model Counting (WFOMC). Moreover, techniques for dealing with aggregation parfactors can be directly applied to PLP. In this paper we survey these approaches and present an experimental comparison on five models. The results show that WFOMC outperforms the other approaches, being able to exploit more symmetries. }, keywords = {Probabilistic Logic Programming, Lifted Inference, Variable Elimination, Distribution Semantics, ProbLog, Statistical Relational Artificial Intelligence }, scopus = {2-s2.0-84992199737}, wos = {WOS:000391080100020} }
@article{ZesBelRig18-AMAI-IJ, author = {Riccardo Zese and Elena Bellodi and Fabrizio Riguzzi and Giuseppe Cota and Evelina Lamma }, title = {Tableau Reasoning for Description Logics and its Extension to Probabilities}, journal = {Annals of Mathematics and Artificial Intelligence}, publisher = {Springer}, copyright = {Springer}, year = {2018}, issn-print = {1012-2443}, issn-online = {1573-7470}, url = {http://ml.unife.it/wp-content/uploads/Papers/ZesBelRig-AMAI16.pdf}, pdf = {http://rdcu.be/kONG}, month = {March}, day = {01}, volume = {82}, number = {1}, pages = {101--130}, doi = {10.1007/s10472-016-9529-3}, abstract = { The increasing popularity of the Semantic Web drove to a wide- spread adoption of Description Logics (DLs) for modeling real world domains. To help the diffusion of DLs, a large number of reasoning algorithms have been developed. Usually these algorithms are implemented in procedural languages such as Java or C++. Most of the reasoners exploit the tableau algorithm which features non-determinism, that is not easily handled by those languages. Prolog directly manages non-determinism, thus is a good candidate for dealing with the tableau's non-deterministic expansion rules. We present TRILL, for "Tableau Reasoner for descrIption Logics in pro- Log", that implements a tableau algorithm and is able to return explanations for queries and their corresponding probability, and TRILLP , for "TRILL powered by Pinpointing formulas", which is able to compute a Boolean for- mula representing the set of explanations for a query. Reasoning on real world domains also requires the capability of managing probabilistic and uncertain information. We show how TRILL and TRILLP can be used to compute the probability of queries to knowledge bases following DISPONTE semantics. Experiments comparing these with other systems show the feasibility of the approach.}, keywords = { Description Logics, Tableau, Prolog, Semantic Web}, scopus = {2-s2.0-84990986085} }
@article{GavLam18-FI-IJ, author = {Gavanelli, Marco and Lamma, Evelina and Riguzzi, Fabrizio and Bellodi, Elena and Zese, Riccardo and Cota, Giuseppe}, title = {Reasoning on Datalog+- Ontologies with Abductive Logic Programming}, year = {2018}, journal = {Fundamenta Informaticae}, copyright = {IOS Press}, volume = {159}, doi = {10.3233/FI-2018-1658}, pages = {65--93}, pdf = {http://ml.unife.it/wp-content/uploads/Papers/GavLam-FI18.pdf}, scopus = {2-s2.0-85043572529} }
@article{ZesBelCot19-TPLP-IJ, title = {Probabilistic {DL} Reasoning with Pinpointing Formulas: A Prolog-based Approach}, doi = {10.1017/S1471068418000480}, journal = {Theory and Practice of Logic Programming}, publisher = {Cambridge University Press}, copyright = {Cambridge University Press}, author = {Zese, Riccardo and Cota, Giuseppe and Lamma, Evelina and Bellodi, Elena and Riguzzi, Fabrizio}, pages = {449--476}, year = {2019}, volume = {19}, number = {3}, pdf = {https://arxiv.org/pdf/1809.06180.pdf}, scopus = {2-s2.0-85060024345}, doi = {10.1017/S1471068418000480} }
@article{BelAlbRig20-TPLP-IJ, author = {Elena Bellodi and Marco Alberti and Fabrizio Riguzzi and Riccardo Zese}, title = {{MAP} Inference for Probabilistic Logic Programming}, journal = {Theory and Practice of Logic Programming}, publisher = {Cambridge University Press}, copyright = {Cambridge University Press}, year = {2020}, url = {https://arxiv.org/abs/2008.01394}, volume = {20}, doi = {10.1017/S1471068420000174}, pdf = {https://arxiv.org/pdf/2008.01394.pdf}, number = {5}, pages = {641--655} }
@article{RigBelZesAlbLam21-ML-IJ, author = {Riguzzi, Fabrizio and Bellodi, Elena and Zese, Riccardo and Alberti, Marco and Lamma, Evelina}, title = {Probabilistic inductive constraint logic}, journal = {Machine Learning}, year = {2021}, volume = {110}, issue = {4}, pages = {723-754}, doi = {10.1007/s10994-020-05911-6}, pdf = {https://link.springer.com/content/pdf/10.1007/s10994-020-05911-6.pdf}, publisher = {Springer}, issn = {08856125}, abstract = {Probabilistic logical models deal effectively with uncertain relations and entities typical of many real world domains. In the field of probabilistic logic programming usually the aim is to learn these kinds of models to predict specific atoms or predicates of the domain, called target atoms/predicates. However, it might also be useful to learn classifiers for interpretations as a whole: to this end, we consider the models produced by the inductive constraint logic system, represented by sets of integrity constraints, and we propose a probabilistic version of them. Each integrity constraint is annotated with a probability, and the resulting probabilistic logical constraint model assigns a probability of being positive to interpretations. To learn both the structure and the parameters of such probabilistic models we propose the system PASCAL for “probabilistic inductive constraint logic”. Parameter learning can be performed using gradient descent or L-BFGS. PASCAL has been tested on 11 datasets and compared with a few statistical relational systems and a system that builds relational decision trees (TILDE): we demonstrate that this system achieves better or comparable results in terms of area under the precision–recall and receiver operating characteristic curves, in a comparable execution time.} }
@article{BelAlbRig21-TPLP-IJ, author = {Elena Bellodi and Marco Gavanelli and Riccardo Zese and Evelina Lamma and Fabrizio Riguzzi}, title = {Nonground Abductive Logic Programming with Probabilistic Integrity Constraints}, journal = {Theory and Practice of Logic Programming}, publisher = {Cambridge University Press}, copyright = {Cambridge University Press}, year = {2021}, url = {https://arxiv.org/abs/2108.03033}, volume = {21}, doi = {10.1017/S1471068421000417}, pdf = {https://arxiv.org/pdf/2108.03033.pdf}, number = {5}, pages = {557--574} }
@article{ZesBelLucAlv21-IEEE-IJ, author = {Riccardo Zese and Elena Bellodi and Chiara Luciani and Stefano Alvisi}, title = {Neural Network Techniques for Detecting Intra-Domestic Water Leaks of Different Magnitude}, journal = {IEEE Access}, publisher = {IEEE}, year = {2021}, url = {https://ieeexplore.ieee.org/document/9530653}, volume = {9}, doi = {10.1109/ACCESS.2021.3111113}, pages = {126135 - 126147}, isbn-online = {2169-3536} }
@article{AzzBellFer2022-IJAR-IJ, title = {Abduction with probabilistic logic programming under the distribution semantics}, journal = {International Journal of Approximate Reasoning}, volume = {142}, pages = {41-63}, year = {2022}, issn = {0888-613X}, doi = {10.1016/j.ijar.2021.11.003}, url = {https://www.sciencedirect.com/science/article/pii/S0888613X2100181X}, author = {Damiano Azzolini and Elena Bellodi and Stefano Ferilli and Fabrizio Riguzzi and Riccardo Zese}, keywords = {Abduction, Distribution semantics, Probabilistic logic programming, Statistical relational artificial intelligence}, abstract = {In Probabilistic Abductive Logic Programming we are given a probabilistic logic program, a set of abducible facts, and a set of constraints. Inference in probabilistic abductive logic programs aims to find a subset of the abducible facts that is compatible with the constraints and that maximizes the joint probability of the query and the constraints. In this paper, we extend the PITA reasoner with an algorithm to perform abduction on probabilistic abductive logic programs exploiting Binary Decision Diagrams. Tests on several synthetic datasets show the effectiveness of our approach.}, scopus = {2-s2.0-85119493622} }
@article{ZesBell-SPE-IJ, author = {Zese, Riccardo and Bellodi, Elena}, title = {A web application for reasoning on probabilistic description logics knowledge bases}, journal = {Software: Practice and Experience}, pages = {1--22}, volume = {In Press}, keywords = {description logics, inference, probabilistic description logics, semantic web, web applications}, doi = {https://doi.org/10.1002/spe.3212}, url = {https://onlinelibrary.wiley.com/doi/abs/10.1002/spe.3212}, year = {2023} }
@article{AzzBelKieRig24-TPLP-IJ, title = {Solving Decision Theory Problems with Probabilistic Answer Set Programming}, author = {Damiano Azzolini and Elena Bellodi and Rafael Kiesel and Fabrizio Riguzzi}, year = {2024}, journal = {Theory and Practice of Logic Programming}, publisher = {Cambridge University Press}, pdf = {https://arxiv.org/pdf/2408.11371}, url = {https://arxiv.org/abs/2408.11371} }
This file was generated by bibtex2html 1.98.