author = {Floriana Esposito AND Stefano Ferilli AND Evelina Lamma AND
                Paola Mello AND Michela Milano AND Fabrizio Riguzzi AND Giovanni Semeraro},
  title = {Cooperation of Abduction and Induction in Logic Programming},
  booktitle = {Abductive and Inductive Reasoning: Essays on thier Relation
  and Integration},
  editor = {Peter A. Flach and Antonis C. Kakas},
  publisher = {Kluwer  Academic Publishers},
  address = {Dordrecht, \TheNetherlands},
  year = {2000},
  abstract = {We propose an integration of abduction and induction where the two
inference processes cooperate in order to perform more powerful inferences.
We assume the definitions of abduction and induction as given in Abductive
Logic Programming and Inductive Logic Programming.
Abduction helps
induction by generating atomic hypotheses that can be used as new examples
or for completing an incomplete background knowledge.
Induction helps
abduction by generalizing explanations.

We present a learning algorithm that integrates abduction and induction.
The algorithm solves a new learning problem where both the background and the
target theory are abductive theories and abductive derivability is used as the
example coverage relation.

We then show how the algorithm can be applied to learning from incomplete
knowledge and learning exceptions.},
  keywords = {Abduction, Negation, Integrity_Constraints},
  month = apr,
  pages = {233--252},
  http = {},
  pdf = {},
  copyright = {Kluwer  Academic Publishers},
  ebook-isbn = {978-94-017-0606-3},
  doi = {10.1007/978-94-017-0606-3},
  hardcover-isbn = {978-0-7923-6250-0},
  softcover-isbn = {978-90-481-5433-3},
  series = {Applied Logic Series},
  issn = {1386-2790}
  author = {Evelina Lamma AND Fabrizio Riguzzi AND Sergio Storari},
  title = {Improving the K2 Algorithm Using
Association Rule Parameters},
  booktitle = {Modern Information Processing: From Theory to
  editor = {Bernadette Bouchon-Meunier and Giulianella Coletti and Ronald
  publisher = {Elsevier},
  address = {Amsterdam, \TheNetherlands},
  isbn = {0-444-52075-9},
  year = {2006},
  pages = {207--217},
  doi = {10.1016/B978-044452075-3/50018-2},
  pdf = {},
  url = {},
  abstract = {
A Bayesian network is an appropriate tool to work with the
uncertainty  that is typical of real-life applications. Bayesian
network arcs represent statistical dependence between different
variables and can be automatically elicited from database by
Bayesian network learning algorithms such as K2. In the data
mining field, association rules can also be interpreted  as
expressing statistical dependence relations. In this paper we
present an extension of K2 called K2-rules that exploits a
parameter normally defined in relation to association rules for
learning Bayesian networks. We compare K2-rules with K2 and TPDA
on the problems of learning four Bayesian networks. The
experiments show that K2-rules improves both K2 and TPDA with
respect to the quality of the learned network and K2 with respect
to the execution time},
  keywords = {
Bayesian Networks, Machine Learning, Association
  author = {Massimiliano Cattafi and Evelina Lamma and Fabrizio Riguzzi and Sergio Storari},
  title = {Incremental Declarative Process Mining},
  booktitle = {Smart Information and Knowledge Management: Advances, Challenges,
       and Critical Issues},
  year = {2010},
  editor = {Ngoc Thanh Nguyen and Edward Szczerbicki},
  publisher = {Springer},
  address = {Heidelberg, \Germany},
  series = {Studies in Computational Intelligence},
  issn = {1860-949X},
  isbn = {978-3-642-04583-7},
  doi = {10.1007/978-3-642-04584-4_5},
  volume = {260},
  pages = {103--127},
  abstract = {Business organizations achieve their mission by performing a number 
of processes. These span from simple sequences of actions to complex 
structured sets of activities with complex interrelation among them. The 
field of Business Processes Management studies how to describe, analyze, 
preserve and improve processes. In particular the subfield of Process 
Mining aims at  inferring a model of the processes from logs (i.e. the 
collected records of performed activities).
Moreover,  processes can change over time to reflect mutated conditions, therefore 
it is often necessary to update the model. We call this activity Incremental 
Process Mining. To solve this problem, we modify the process mining system 
DPML  to obtain IPM (Incremental Process Miner), which employs a subset of the 
SCIFF language to represent models and adopts techniques developed in 
Inductive Logic Programming to perform theory revision. The experimental 
results show that is more convenient to revise a theory rather than 
learning a new one from scratch.
  keywords = {Business Processes, Process Mining, Theory Revision},
  url = {},
  pdf = {},
  copyright = {Springer},
  scopus = {2-s2.0-74049114164}
  editor = { Ting Yu and Nitesh Chawla and Simeon Simoff},
  author = {Marco Gavanelli and Fabrizio Riguzzi and Michela Milano and Paolo Cagnoli},
  title = {Constraint and Optimization techniques for supporting  Policy Making},
  booktitle = {Computational Intelligent Data Analysis for Sustainable Development},
  year = {2013},
  series = {Data Mining and Knowledge Discovery Series},
  publisher = {Chapman \& Hall/CRC},
  chapter = {12},
  pages = {361-382},
  abstract = {Public institutions develop policies and plans in order to achieve
economic and social development while preserving the environment. This
is a difficult task where computational intelligence data analysis
techniques can provide an important contribution. The policy maker has
to take decisions by optimizing a set of often conflicting objectives
and satisfying a set of constraints. The aim is to reduce negative
impacts and enhance positive impacts of plan decisions on the
environment, society and economy, exploiting all the data that is
available on the territory that is targeted.
Up to now, only agent-based simulation models have been proposed in
the literature for policy making. In these models, agents represent
the parties involved in the decision making and implementation process
and simulation is used in order to evaluate the impacts of the policy.
Agent-based simulation models provide ``individual level models'': we
claim that the policy planning activity needs also a global
perspective that faces the problem at a global level while tightly
interacting with the individual level model.
We thus propose a mathematical optimization model that can be applied to
regional planning. In the model, decision variables represent
political decisions (for instance the magnitude of a given activity in
the regional plan), potential outcomes are associated with each
decision by considering the available data,  constraints limit possible
combination of assignments of decision variables, and objectives
can be used either to evaluate alternative solutions, or translated
into additional constraints. The model has been solved with Constraint
Programming techniques.
The model has been tested on the Emilia-Romagna regional energy plan.
The results have been validated with an expert in policy making and
impact assessment to evaluate the accuracy of the results.},
  url = {},
  doi = {10.1201/b14799-18},
  pdf = {},
  isbn = {978-1-43-989594-8},
  isbn = {978-1-4398-9595-5},
  address = {Abingdon, UK}
  year = {2014},
  isbn = {978-3-319-13412-3},
  booktitle = {Uncertainty Reasoning for the Semantic Web III},
  series = {Lecture Notes in Computer Science},
  editor = {Bobillo, Fernando and Carvalho, Rommel N. and Costa, Paulo C.G. and d'Amato, Claudia and Fanizzi, Nicola and Laskey, Kathryn B. and Laskey, Kenneth J. and Lukasiewicz, Thomas and Nickles, Matthias and Pool, Michael},
  doi = {10.1007/978-3-319-13413-0_4},
  title = {Learning Probabilistic Description Logics},
  publisher = {Springer International Publishing},
  copyright = {Springer International Publishing},
  author = {Riguzzi, Fabrizio and Bellodi, Elena and Lamma, Evelina and Zese, Riccardo and Cota, Giuseppe},
  pages = {63-78},
  pdf = {},
  language = {English},
  volume = {8816},
  note = {The original publication is available at
  year = {2014},
  isbn = {978-3-319-13412-3},
  booktitle = {Uncertainty Reasoning for the Semantic Web III},
  series = {Lecture Notes in Computer Science},
  editor = {Bobillo, Fernando and Carvalho, Rommel N. and Costa, Paulo C.G. and d'Amato, Claudia and Fanizzi, Nicola and Laskey, Kathryn B. and Laskey, Kenneth J. and Lukasiewicz, Thomas and Nickles, Matthias and Pool, Michael},
  doi = {10.1007/978-3-319-13413-0_5},
  title = {Semantics and Inference for Probabilistic Description Logics},
  publisher = {Springer International Publishing},
  copyright = {Springer International Publishing},
  author = {Zese, Riccardo and Bellodi, Elena and Lamma, Evelina and Riguzzi, Fabrizio and Aguiari, Fabiano},
  pages = {79-99},
  language = {English},
  volume = {8816},
  url = {},
  note = {The original publication is available at
  author = {Arnaud {Nguembang Fadja} and Fabrizio Riguzzi},
  title = {Probabilistic Logic Programming in Action},
  booktitle = {Towards Integrative Machine Learning and Knowledge Extraction: BIRS Workshop, Banff, AB, Canada, July 24-26, 2015, Revised Selected Papers},
  year = {2017},
  editor = {Andreas Holzinger and Randy Goebel and Massimo Ferri and Vasile Palade},
  publisher = {Springer},
  address = {Heidelberg, \Germany},
  series = {Lecture Notes in Computer Science},
  volume = {10344},
  copyright = {Springer},
  doi = {10.1007/978-3-319-69775-8_5},
  pdf = {},
  abstract = {Probabilistic Programming (PP) has  recently emerged as an
effective approach  for building complex probabilistic models. Until recently PP was mostly
focused on
 functional programming  while now Probabilistic Logic Programming (PLP)
 forms a significant subfield.
In this paper we aim at presenting a quick overview of the features of current languages and systems
We first present the basic
semantics  for probabilistic logic programs and then consider extensions for
dealing with infinite structures and continuous random variables.
To show the modeling features of PLP in action, we present several examples:
 a simple generator of random 2D tile maps,
an encoding of Markov Logic Networks, the  truel game,
the coupon collector problem, the one-dimensional random walk, latent Dirichlet allocation and the Indian GPA problem.
These examples show the maturity of PLP.
  pages = {89--116},
  keywords = {Probabilistic Logic Programming, Probabilistic Logical Inference, Hybrid programs},
  scopus = {2-s2.0-85033590324},
  issn = {1860-949X},
  isbn-print = {978-3-319-69774-1},
  isbn-online = {978-3-319-69775-8},
  note = {The final publication is available at Springer via
  title = {A Survey of Probabilistic Logic Programming},
  author = {Fabrizio Riguzzi and Theresa Swift},
  year = {2018},
  editor = {Michael Kifer and Yanhong A. Liu},
  booktitle = {Declarative Logic Programming: Theory, Systems, and Applications},
  publisher = {Association for Computing Machinery and Morgan \& Claypool},
  pdf = {},
  doi = {10.1145/3191315.3191319},
  isbn = {978-1-97000-199-0},
  pages = {185-228},
  address = {New York, NY, USA},
  abstract = {The combination of logic programming and probability has proven useful for modeling domains with complex and uncertain relationships among elements. Many probabilistic logic programming (PLP) semantics have been proposed; among these, the distribution semantics has recently gained increased attention and has been adopted by many languages such as the Independent Choice Logic, PRISM, Logic Programs with Annotated Disjunctions, ProbLog, and P-log.

This chapter reviews the distribution semantics, beginning with the simplest case with stratified Datalog programs, and showing how the definition is extended to programs that include function symbols and non-stratified negation. The languages that adopt the distribution semantics are also discussed and compared both to one another and to Bayesian networks.We then survey existing approaches for inference in PLP languages that follow the distribution semantics. We concentrate on the PRISM, ProbLog, and PITA systems. The PRISM system was one of the first and can be applied when certain restrictions on the program hold. ProbLog introduced the use of Binary Decision Diagrams that provide a computational basis for removing these restrictions and so performing inference over more general classes of logic programs. PITA speeds up inference by using tabling and answer subsumption. It supports general probabilistic programs, but can easily be optimized for simpler settings and even possibilistic uncertain reasoning. The chapter also discusses the computational complexity of the various approaches together with techniques for limiting it by resorting to approximation.},
  keywords = {Probabilistic Logic Programming, Distribution Semantics, Statistical Relational Artificial
  title = {A Framework for Reasoning on Probabilistic Description Logics},
  author = {Cota, Giuseppe and Zese, Riccardo and Bellodi, Elena and Lamma, Evelina and Riguzzi, Fabrizio},
  booktitle = {Applications and Practices in Ontology Design, Extraction, and Reasoning},
  series = {Studies on the Semantic Web},
  volume = {49},
  editor = {Cota, Giuseppe and Daquino, Marilena and Pozzato, Gian Luca},
  isbn = {978-1-64368-142-9},
  doi = {10.3233/SSW200040},
  language = {English},
  pages = {127-144},
  year = {2020},
  publisher = {{IOS} Press},
  abstract = {While there exist several reasoners for Description Logics, very few of them can cope with uncertainty. BUNDLE is an inference framework that can exploit several OWL (non-probabilistic) reasoners to perform inference over Probabilistic Description Logics.
	In this chapter, we report the latest advances implemented in BUNDLE. In particular, BUNDLE can now interface with the reasoners of the TRILL system, thus providing a uniform method to execute probabilistic queries using different settings. BUNDLE can be easily extended and can be used either as a standalone desktop application or as a library in OWL API-based applications that need to reason over Probabilistic Description Logics.
	The reasoning performance heavily depends on the reasoner and method used to compute the probability. We provide a comparison of the different reasoning settings on several datasets.
  copyright = {Akademische Verlagsgesellschaft AKA GmbH, Berlin}
  author = {Zese, Riccardo and Bellodi, Elena and Fraccaroli, Michele and Riguzzi, Fabrizio and Lamma, Evelina},
  editor = {Micheloni, Rino and Zambelli, Cristian},
  title = {Neural Networks and Deep Learning Fundamentals},
  booktitle = {Machine Learning and Non-volatile Memories},
  year = {2022},
  publisher = {Springer International Publishing},
  address = {Cham},
  pages = {23--42},
  abstract = {In the last decade, Neural Networks (NNs) have come to the fore as one of the most powerful and versatile approaches to many machine learning tasks. Deep Learning (DL)Deep Learning (DL), the latest incarnation of NNs, is nowadays applied in every scenario that needs models able to predict or classify data. From computer vision to speech-to-text, DLDeep Learning (DL) techniques are able to achieve super-human performance in many cases. This chapter is devoted to give a (not comprehensive) introduction to the field, describing the main branches and model architectures, in order to try to give a roadmap of this area to the reader.},
  isbn = {978-3-031-03841-9},
  doi = {10.1007/978-3-031-03841-9_2},
  url = {}

This file was generated by bibtex2html 1.98.