journals.bib

@article{LamMelMil99-InfSciences99-IJ,
  author = {Evelina ~Lamma AND Paola ~Mello AND Michela ~Milano AND Fabrizio
   ~Riguzzi},
  title = {Integrating Induction and Abduction in Logic
Programming},
  journal = {Information Sciences},
  pages = {25--54},
  volume = {116},
  number = {1},
  year = 1999,
  month = may,
  abstract = {We propose an approach for the integration of abduction and induction in
Logic Programming. We define an Abductive Learning Problem as an
extended Inductive Logic Programming problem where both the
background and target theories are abductive theories and where
abductive derivability is used as the coverage relation instead
of deductive derivability. The two main benefits of this
integration are the possibility of learning in presence of
incomplete knowledge and the increased expressive power of the
background and target theories.  We present the system LAP
(Learning Abductive Programs) that is able to solve this extended
learning problem and we describe, by means of examples, four
different learning tasks that can be performed by the system:
learning from incomplete knowledge, learning rules with
exceptions, learning from integrity constraints and learning
recursive predicates.},
  keywords = {Abduction, Negation, Integrity Constraints},
  publisher = {Elsevier Science},
  address = {Amsterdam, \TheNetherlands},
  pdf = {http://ml.unife.it/wp-content/uploads/Papers/LamMelMil-IS99.pdf},
  doi = {10.1016/S0020-0255(98)10092-0},
  url = {http://www.sciencedirect.com/science/article/pii/S0020025598100920},
  copyright = {Elsevier}
}
@article{LamPerRig00-ML-IJ,
  author = {Evelina Lamma and Fabrizio Riguzzi and Lu\'{i}s Moniz Pereira},
  title = {Strategies in Combined Learning via Logic Programs},
  journal = {Machine Learning},
  volume = {38},
  number = {1/2},
  year = {2000},
  month = {January/February},
  pages = {63--87},
  keywords = {ILP Implementation,ILP Theory,Knowledge Representation,Negation},
  abstract = {We discuss the adoption of a three-valued setting for
inductive concept learning. Distinguishing between what is true, what
is false and what is unknown can be useful in situations where decisions
have to be taken on the basis of scarce, ambiguous, or downright contradictory
information. In a three-valued setting, we learn a definition for both
the target concept and its opposite, considering positive and negative
examples as instances of two disjoint classes. To this purpose, we
adopt Extended Logic Programs (ELP) under a Well-Founded Semantics
with explicit negation WFSX as the representation formalism for learning,
and show how ELPs can be used to specify combinations of strategies
in a declarative way also coping with contradiction and exceptions.
Explicit negation is used to represent the opposite concept, while
default negation is used to ensure consistency and to handle exceptions
to general rules. Exceptions are represented by examples covered by
the definition for a concept that belong to the training set for the
opposite concept.

Standard Inductive Logic Programming techniques are employed to learn
the concept and its opposite. Depending on the adopted technique, we
can learn the most general or the least general definition. Thus, four
epistemological varieties occur, resulting from the combination of
most general and least general solutions for the positive and negative
concept. We discuss the factors that should be taken into account when
choosing and strategically combining the generality levels for positive
and negative concepts.

In the paper, we also handle the issue of strategic combination of
possibly contradictory learnt definitions of a predicate and its explicit
negation.

All in all, we show that extended logic programs under well-founded
semantics with explicit negation add expressivity to learning tasks,
and allow the tackling of a number of representation and strategic
issues in a principled way.

Our techniques have been implemented and examples run on a state-of-the-art
logic programming system with tabling which implements WFSX.},
  pdf = {http://ml.unife.it/wp-content/uploads/Papers/LamRigPer-ML00.pdf},
  publisher = {Springer Netherlands},
  address = {Dordrecht, \TheNetherlands},
  doi = {10.1023/A:1007681906490},
  http = {http://link.springer.com/article/10.1023%2FA%3A1007681906490},
  copyright = {Springer Netherlands},
  note = {The original publication is available at \url{http://www.springerlink.com}}
}
@article{KakRig00-NGC-IJ,
  author = {Antonis C. Kakas AND Fabrizio Riguzzi},
  title = {Abductive Concept Learning},
  journal = {New Generation Computing},
  volume = {18},
  number = {3},
  year = {2000},
  pages = {243--294},
  keywords = {Abduction, Integrity Constraints, Multiple Predicate Learning},
  address = {Tokyo, \Japan},
  month = may,
  publisher = {Ohmsha, Ltd. and Springer},
  abstract = {We investigate how abduction and induction can be integrated into a common
learning framework. In particular, we consider an extension of Inductive
Logic Programming (ILP) for the case in which both the background and the
target theories are abductive logic programs and where an abductive notion
of entailment is used as the basic coverage relation for learning. This extended
learning framework has been called Abductive Concept Learning (ACL). In
this framework, it is possible to learn with incomplete background
information about the training examples by exploiting the hypothetical
reasoning of abduction. We also study how the ACL framework can be
used as a basis for multiple predicate learning.

An algorithm for ACL is developed by suitably extending the top-down ILP
method: the deductive proof procedure of Logic Programming is replaced by
an abductive proof procedure for Abductive Logic Programming. This
algorithm also incorporates a phase for learning integrity constraints by
suitably employing a system that learns from interpretations like ICL. The
framework of ACL thus integrates the two ILP settings of explanatory
(predictive) learning and confirmatory (descriptive) learning. The above
algorithm has been implemented into a system also called ACL\footnote{The learning systems developed in this work together
with sample experimental data can be found at the following
address: {\tt http://www-lia.deis.unibo.it/Software/ACL/}} Several
experiments have been performed that show the effectiveness of the ACL
framework in learning from incomplete data and its appropriate use for
multiple predicate learning.},
  pdf = {http://ml.unife.it/wp-content/uploads/Papers/KakRIg-NGC00.pdf},
  http = {http://link.springer.com/article/10.1007%2FBF03037531},
  doi = {10.1007/BF03037531},
  copyright = {Ohmsha, Ltd. and Springer}
}
@article{CucMelPic01-IDA-IJ,
  author = {Rita Cucchiara and Paola Mello and Massimo Piccardi and Fabrizio Riguzzi},
  title = {An Application of Machine Learning and Statistics to Defect Detection},
  journal = {Intelligent Data Analysis},
  year = {2001},
  volume = {5},
  number = {2},
  pages = {151--164},
  publisher = {{IOS} Press},
  address = {Amsterdam, \TheNetherlands},
  month = {March/April},
  pdf = {http://ml.unife.it/wp-content/uploads/Papers/CucMelPic-IDA01.pdf},
  doi = {10.3233/IDA-2001-5205},
  copyright = {Rita Cucchiara, Paola Mello, Massimo Piccardi, Fabrizio Riguzzi, exclusively licensed to {IOS} Press}
}
@article{LamMaeMel01-ENTCS-IJ,
  author = {Evelina Lamma and Leonardo Maestrami and Paola Mello and Fabrizio Riguzzi
    and Sergio Storari},
  title = {Rule-based Programming for Building Expert Systems: a Comparison in the Microbiological Data Validation and Surveillance Domain},
  journal = {Electronic Notes in Theoretical Computer Science},
  volume = {59},
  issue = {4},
  publisher = {Elsevier Science Publishers},
  editor = {Mark van den Brand and Rakesh Verma},
  year = {2001},
  address = {Amsterdam, \TheNetherlands},
  doi = {10.1016/S1571-0661(04)00299-3},
  url = {http://www.sciencedirect.com/science/article/pii/S1571066104002993},
  pdf = {http://ml.unife.it/wp-content/uploads/Papers/LamMaeMel-ENTCS01.pdf},
  month = sep,
  abstract = {In this work, we compare three rule-based programming tools used for building an
expert system for microbiological laboratory data validation and bacteria infections
monitoring. The first prototype of the system was implemented in KAPPA-PC. We
report on the implementation and performance by comparing KAPPA-PC with two
other more recent tools, namely JESS and ILOG JRULES. In order to test each
tool we realized three simple test applications capable to perform some tasks that
are peculiar of our expert system.},
  keywords = {Expert Systems, Knowledge-based Systems, Microbiology}
}
@article{LamMelRig03-NGC-IJ,
  author = {Evelina Lamma and Fabrizio Riguzzi and Sergio Storari and Paola Mello and
   Annamaria Nanetti},
  title = {Discovering Validation Rules from Micro-biological Data},
  journal = {New Generation Computing},
  year = {2003},
  volume = {21},
  number = {2},
  pages = {123--134},
  publisher = {Ohmsha, Ltd. and Springer},
  address = {Tokyo, \Japan},
  month = feb,
  pdf = {http://ml.unife.it/wp-content/uploads/Papers/LamRigStoMelNan-NGC03.pdf},
  http = {http://www.springerlink.com/content/b816tm18j5715810},
  doi = {10.1007/BF03037630},
  copyright = {Ohmsha, Ltd. and Springer},
  abstract = {A huge amount of data is daily collected from clinical mi-
crobiology laboratories. These data concern the resistance or susceptibil-
ity of bacteria to tested antibiotics. Almost all microbiology laboratories
follow standard antibiotic testing guidelines which suggest antibiotic test
execution methods and result interpretation and validation (among them,
those annually published by NCCLS). Guidelines basically specify, for
each species, the antibiotics to be tested, how to interpret the results of
tests and a list of exceptions regarding particular antibiotic test results.
Even if these standards are quite assessed, they do not consider pecu-
liar features of a given hospital laboratory, which possibly influence the
antimicrobial test results, and the further validation process.
In order to improve and better tailor the validation process, we have
applied knowledge discovery techniques, and data mining in particular,
to microbiological data with the purpose of discovering new validation
rules, not yet included in NCCLS guidelines, but considered plausible and
correct by interviewed experts. In particular, we applied the knowledge
discovery process in order to find (association) rules relating to each other
the susceptibility or resistance of a bacterium to different antibiotics.
This approach is not antithetic, but complementary to that based on
NCCLS rules: it proved very effective in validating some of them, and
also in extending that compendium. In this respect, the new discovered
knowledge has lead microbiologists to be aware of new correlations among
some antimicrobial test results, which were previously unnoticed. Last
but not least, the new discovered rules, taking into account the history
of the considered laboratory, are better tailored to the hospital situation,
and this is very important since some resistances to antibiotics are specific
to particular, local hospital environments.},
  keywords = {Knowledge Discovery and Data mining, Microbiology, Knowledge Based Systems, Knowledge Elicitation}
}
@article{LamRigPer03-NGC-IJ,
  author = {Evelina Lamma and Fabrizio Riguzzi and Lu\'\i{}s Moniz Pereira},
  title = {Belief Revision via {L}amarckian Evolution},
  journal = {New Generation Computing},
  abstract = {We present a system for performing belief revision in a
multi-agent environment.  The system is called GBR (Genetic
Belief Revisor) and it is based on a genetic algorithm. In this
setting, different individuals are exposed to different
experiences. This may happen because the world surrounding an
agent changes over time or because  we allow agents exploring
different parts of the world. The algorithm permits the exchange
of chromosomes from different agents and combines two different
evolution strategies, one based on Darwin's and the other  on
Lamarck's evolutionary theory. The algorithm therefore includes
also a Lamarckian operator that changes the memes of an agent in
order to improve their fitness. The operator is implemented by
means of a belief revision procedure that, by tracing logical
derivations, identifies the memes leading to contradiction.
Moreover, the algorithm comprises a special crossover mechanism
for memes in which a meme can be acquired from another agent only
if the other agent has ``accessed'' the meme, i.e. if an
application of the Lamarckian operator has read or modified the
meme.


Experiments have been performed on the $n$-queen problem and on a
problem of digital circuit diagnosis. In the case of the
$n$-queen problem, the addition of the Lamarckian operator in the
single agent case improves the fitness of the best solution. In
both cases the experiments show that the distribution of
constraints, even if it may lead to a reduction of the fitness of
the best solution, does not produce a significant reduction.},
  publisher = {Ohmsha, Ltd. and Springer},
  address = {Tokyo, \Japan},
  keywords = {Genetic_Algorithms,Theory_Revision},
  year = {2003},
  volume = {21},
  number = {3},
  month = aug,
  pages = {247--275},
  pdf = {http://ml.unife.it/wp-content/uploads/Papers/LamRigPer-NGC03.pdf},
  http = {http://www.springerlink.com/content/063764w6n3847825/},
  doi = {10.1007/BF03037475},
  copyright = {Ohmsha, Ltd. and Springer}
}
@article{LamMelRig04-CJ-IJ,
  author = {Evelina  Lamma and Paola Mello and Fabrizio Riguzzi},
  title = {A System for Measuring Function Points from an {ER}-{DFD} Specification},
  journal = {The Computer Journal},
  abstract = {We present a tool for measuring the Function Point
software metric from the specification of a software system
expressed in the form of an Entity Relationship diagram plus a
Data Flow Diagram (ER-DFD).  First, the informal and general
Function Point counting rules are translated into rigorous rules
expressing properties of the ER-DFD.  Then, the rigorous rules
are translated into Prolog.  The measures given by the system on
a number of case studies are in accordance with those of human
experts.},
  publisher = {Oxford University Press},
  address = {Oxford, \UK},
  keywords = {Software Engineering, Software Metrics, Function Points},
  year = {2004},
  volume = {47},
  number = {3},
  pages = {358--372},
  month = may,
  pdf = {http://ml.unife.it/wp-content/uploads/Papers/fun.pdf},
  issn = {0010--4620},
  doi = {10.1093/comjnl/47.3.358},
  copyright = {Evelina  Lamma, Paola Mello and Fabrizio Riguzzi, licensed exclusively to The British Computer Society}
}
@article{LamMelNan06-TITB-IJ,
  author = {Evelina Lamma and Paola Mello and Annamaria Nanetti and
  Fabrizio Riguzzi and Sergio Storari and Gianfranco Valastro},
  title = {Artificial Intelligence Techniques for Monitoring Dangerous Infections},
  journal = {IEEE Transaction on Information Technology in Biomedicine},
  year = {2006},
  publisher = {IEEE Computer Society Press},
  address = {Washington, DC, \USA},
  volume = {10},
  number = {1},
  pages = {143-155},
  month = jan,
  issn = {1089-7771},
  doi = {10.1109/TITB.2005.855537},
  pdf = {http://ml.unife.it/wp-content/uploads/Papers/LamMelNanRigStoVal-TITB06.pdf},
  abstract = {
The monitoring and detection of nosocomial infections is a very
important problem arising in hospitals. A hospital-acquired or
nosocomial infection is a disease that develops after the
admission into the hospital and it is the consequence of a
treatment, not necessarily a surgical one, performed by the
medical staff. Nosocomial infections are dangerous because they
are caused by bacteria which have dangerous (critical) resistance
to antibiotics. This problem is very serious all over the world.
In Italy, actually almost 5-8\% of the patients admitted into
hospitals develop this kind of infection. In order to reduce this
figure, policies for controlling infections should be adopted by
medical practitioners. In order to support them in this complex
task, we have developed a system, called MERCURIO, capable of
managing different aspects of the problem. The objectives of this
system are the validation of microbiological data and the
creation of a real time epidemiological information system. The
system is useful for laboratory physicians, because it supports
them in the execution of the microbiological analyses; for
clinicians, because it supports them in the definition of the
prophylaxis, of the most suitable antibiotic therapy and in the
monitoring of patients' infections, and for epidemiologists,
because it allows them to identify outbreaks and to study
infection dynamics. In order to achieve these objectives we have
adopted expert system and data mining techniques. We have also
integrated a statistical module that monitors the diffusion of
nosocomial infections over time in the hospital and that strictly
interacts with the knowledge based module. Data mining techniques
have been used for improving the system knowledge base. The
knowledge discovery process is not antithetic, but complementary
to the one based on manual knowledge elicitation. In order to
verify the reliability of the tasks performed by MERCURIO and the
usefulness of the knowledge discovery approach, we performed a
test based on a dataset of real infection events. In the
validation task MERCURIO achieved an accuracy of 98.5\%, a
sensitivity of 98.5\% and a specificity of 99\%. In the therapy
suggestion task MERCURIO achieved very high Accuracy and
Specificity as well. The  executed test provided many insights to
experts too (we discovered some of their mistakes). The knowledge
discovery approach was very effective in validating part of
MERCURIO knowledge base and also in extending it with new
validation rules, confirmed by  interviewed microbiologists and
peculiar to the hospital laboratory under consideration.},
  keywords = {Microbiology,  Knowledge Based Systems, Decision Support Systems,
Data Mining, Classification},
  copyright = {IEEE}
}
@article{Rig08-ML-IJ,
  author = {
 Fabrizio Riguzzi},
  title = {{ALLPAD}: Approximate Learning of Logic Programs with Annotated Disjunctions},
  journal = {Machine Learning},
  note = {The original publication is available at \url{http://www.springerlink.com}},
  year = {2008},
  volume = {70},
  number = {2-3},
  month = mar,
  pdf = {http://ml.unife.it/wp-content/uploads/Papers/Rig-ML07.pdf},
  doi = {10.1007/s10994-007-5032-8 },
  abstract = {Logic Programs with Annotated Disjunctions (LPADs) provide a simple 
and elegant framework for representing probabilistic knowledge in logic programming. 
In this paper we consider the problem of learning ground LPADs starting from a set of 
interpretations annotated with their probability. We present the system ALLPAD for 
solving this problem. ALLPAD modifies the previous system LLPAD in order to tackle 
real world learning problems more effectively. This is achieved by looking for an 
approximate solution rather than a perfect one. A number of experiments have been 
performed on real and artificial data for evaluating ALLPAD, showing the feasibility 
of the approach},
  keywords = {Inductive logic programming, Probabilistic logic programming, Statistical relational learning,
 Logic programs with annotated disjunctions},
  pages = {207--223},
  publisher = {Springer},
  address = {Heidelberg, \Germany},
  copyright = {Springer}
}
@article{Rig09-LJIGPL-IJ,
  author = {Fabrizio Riguzzi},
  title = {Extended Semantics and  Inference for the {Independent Choice Logic}},
  journal = {Logic Journal of the IGPL},
  publisher = {Oxford University Press},
  volume = {17},
  number = {6},
  pages = {589--629},
  address = {Oxford, \UK},
  year = {2009},
  abstract = {The Independent Choice Logic (ICL) is a  language for expressing 
probabilistic information in logic programming that adopts a distribution 
semantics: an ICL theory defines a distribution over a set of possible worlds 
that are normal logic programs. The probability of a query is then given by the 
sum of the probabilities of worlds where the query is true.

The ICL semantics requires the theories to be acyclic. This is a strong 
limitation that rules out many interesting programs.
In this paper we present an extension of the ICL semantics that allows theories 
to be modularly acyclic.

Inference with ICL can be performed with the Cilog2 system that  computes 
explanations to queries and then  makes them mutually incompatible by means of 
an iterative algorithm.

We propose the system PICL (for Probabilistic inference with ICL) that computes 
the explanations to queries by means of a modification of SLDNF\--resolution 
and then makes them mutually incompatible by means of Binary Decision Diagrams.

PICL and Cilog2 are compared on problems that involve computing the probability 
of a connection between two nodes in biological graphs and social networks. 
PICL turned to be more efficient, handling larger networks/more complex queries 
in a shorter time than Cilog2. This is true both for marginal and for 
conditional queries.
},
  doi = {10.1093/jigpal/jzp025},
  keywords = {Probabilistic Logic Programming, Independent Choice Logic, Modularly acyclic programs, SLDNF-Resolution},
  copyright = {Fabrizio Riguzzi, exclusively licensed to Oxford University Press},
  pdf = {http://ml.unife.it/wp-content/uploads/Papers/Rig09-LJIGPL-IJ.pdf}
}
@article{CheLamMel09-TOPNOC-IJ,
  author = {Federico Chesani and Evelina Lamma and
Paola Mello and Marco Montali   and Fabrizio Riguzzi and Sergio
Storari},
  title = {Exploiting Inductive Logic Programming Techniques for Declarative 
Process Mining},
  journal = {LNCS Transactions on Petri Nets and Other Models of Concurrency, 
{ToPNoC} {II}},
  year = {2009},
  publisher = {Springer},
  address = {Heidelberg, \Germany},
  note = {The original publication is available at \url{http://www.springerlink.com}},
  series = {Lecture Notes on Computer Science},
  volume = {5460},
  pages = {278--295},
  doi = {10.1007/978-3-642-00899-3_16},
  issn = {1867-7193},
  pdf = {http://ml.unife.it/wp-content/uploads/Papers/CheLamMel-TOPNOC09.pdf},
  url = {http://www.springerlink.com/content/c4j2k38675588759/},
  abstract = {In the last few years, there has been a growing interest in the
adoption of declarative paradigms for modeling and verifying process
models. These paradigms provide an abstract and human understandable
way of specifying constraints that must hold among activities
executions rather than focusing on a specific procedural solution.
Mining such declarative descriptions is still an open challenge. In
this paper, we present a logic-based approach for tackling this
problem.  It relies on Inductive Logic Programming techniques and,
in particular, on a modified version of the Inductive Constraint
Logic algorithm. We investigate how, by properly tuning the learning
algorithm, the approach can be adopted to mine models expressed in
the ConDec notation, a graphical language for the declarative
specification of business processes. Then, we sketch how such a
mining framework has been concretely  implemented as a ProM plug-in
called DecMiner. We finally discuss the effectiveness of the
approach by means of an example which shows the ability of the
language to model concurrent activities and of DecMiner to learn
such a model.},
  keywords = {Process Mining, Inductive Logic Programming, Declarative Process Languages},
  copyright = {Springer}
}
@article{StoRigLam09-IDA-IJ,
  author = {Sergio Storari and Fabrizio Riguzzi and Evelina Lamma},
  title = {Exploiting Association and Correlation Rules Parameters for Learning 
Bayesian Networks},
  journal = {Intelligent Data Analysis},
  year = {2009},
  pages = {	689--701},
  publisher = {{IOS} Press},
  volume = {13},
  issue = {5},
  address = {Amsterdam, \TheNetherlands},
  pdf = {http://ml.unife.it/wp-content/uploads/Papers/StoRigLam-IDA09.pdf},
  doi = {10.3233/IDA-2009-0388},
  abstract = { In data mining, association and correlation rules
are inferred from data in order to highlight  statistical dependencies among 
attributes. The metrics defined for evaluating these rules can be exploited to 
score relationships between attributes in Bayesian network learning. In this 
paper, we propose two novel methods for learning Bayesian networks from data 
that are
based on the K2 learning algorithm and that improve it by exploiting parameters
normally defined for association and correlation rules.
In particular, we propose the algorithms K2-Lift and K2-$X^{2}$, that exploit 
the lift metric and the $X^2$ metric respectively. We compare 
K2\--Lift, K2-$X^{2}$ with K2 on artificial data and on 
three test Bayesian networks. The experiments show that both our algorithms
improve K2 with respect to the quality of the
learned network. Moreover, a comparison of K2\--Lift and K2-$X^{2}$ with a 
genetic algorithm approach on two benchmark networks show superior results on 
one network and comparable results on the other.},
  keywords = {Bayesian Networks Learning, K2, Association Rules,  Correlation
  Rules},
  copyright = {Sergio Storari, Fabrizio Riguzzi and Evelina Lamma, exclusively licensed to {IOS} Press}
}
@article{GavRigMilCag10-ICLP10-IJ,
  author = {Marco Gavanelli and Fabrizio Riguzzi and Michela Milano and Paolo Cagnoli},
  title = {{L}ogic-{B}ased {D}ecision {S}upport for {S}trategic {E}nvironmental {A}ssessment},
  year = {2010},
  editor = {M.~Hermenegildo and T.~Schaub},
  month = jul,
  journal = {Theory and Practice of Logic Programming, 26th Int'l.
Conference on Logic Programming (ICLP'10) Special Issue},
  pdf = {http://ml.unife.it/wp-content/uploads/Papers/GavRigMilCag-ICLP10.pdf},
  volume = {10},
  number = {4-6},
  publisher = {Cambridge University Press},
  copyright = {Cambridge University Press},
  abstract = {Strategic Environmental Assessment is a procedure aimed at
introducing systematic assessment of the environmental effects of
plans and programs. This procedure is based on the so called
coaxial matrices that define dependencies between plan activities
(infrastructures, plants, resource extractions, buildings, etc.)
and positive and negative environmental impacts, and dependencies
between these impacts and environmental receptors. Up to now, this
procedure is manually implemented by environmental experts for
checking the environmental effects of a given plan or program, but
it is never applied during the plan/program construction. A
decision support system, based on a clear logic semantics, would
be an invaluable tool not only in assessing a single, already
defined plan, but also during the planning process in order to
produce an optimized, environmentally assessed plan and to study
possible alternative scenarios. We propose two logic-based
approaches to the problem, one based on Constraint Logic
Programming and one on Probabilistic Logic Programming that could
be, in the future, conveniently merged to exploit the advantages
of both. We test the proposed approaches on a real energy plan and
we discuss their limitations and advantages.},
  keywords = {Strategic Environmental Assessment, Regional Planning, Constraint
Logic Programming, Probabilistic Logic Programming, Causality},
  doi = {10.1017/S1471068410000335},
  pages = {643--658},
  arxiv = {1007.3159}
}
@article{Rig10-FI-IJ,
  author = {Fabrizio Riguzzi},
  title = {{SLGAD} Resolution for Inference on {Logic Programs with Annotated 
Disjunctions}},
  journal = {Fundamenta Informaticae},
  abstract = {Logic Programs with Annotated Disjunctions (LPADs) allow to express 
probabilistic information in logic programming. The semantics of an LPAD is 
given in terms of well\--founded models of the normal logic programs obtained 
by selecting one disjunct from each ground LPAD clause. 

Inference on LPADs can be performed using either the system Ailog2, that was 
developed for the Independent Choice Logic, or SLDNFAD, an algorithm based on 
SLDNF.  However, both of these algorithms run the risk of going into infinite 
loops and of performing redundant computations.

In order to avoid these problems, we present SLGAD resolution that  computes 
the (conditional) probability of a ground query from a range\--restricted LPAD 
and is  based on  SLG resolution for normal logic programs. As SLG, it uses 
tabling to avoid some infinite loops and to avoid redundant computations.

The performances of SLGAD are evaluated on classical benchmarks for normal logic 
programs under the well\--founded semantics, namely a 2\--person game and the 
ancestor relation, and on a game of dice.

SLGAD is compared with  Ailog2 and  SLDNFAD on the problems in which they do 
not go into infinite loops, namely those that are described by a  modularly 
acyclic program.

On the 2\--person game and the ancestor relation, SLGAD is more expensive than 
SLDNFAD on problems where SLDNFAD succeeds but is faster than Ailog2 when the 
query is true in an exponential number of instances.

If the program requires the repeated computation of similar goals, as for the 
dice game, then SLGAD outperforms both Ailog2 and SLDNFAD.},
  keywords = {Probabilistic Logic Programming, Well-Founded Semantics, Logic Programs with Annotated Disjunctions, SLG Resolution},
  month = oct,
  volume = {102},
  number = {3-4},
  year = {2010},
  pages = {429--466},
  doi = {10.3233/FI-2010-392},
  publisher = {{IOS} Press},
  pdf = {http://ml.unife.it/wp-content/uploads/Papers/Rig10-FI-IJ.pdf},
  scopus = {2-s2.0-78650327867},
  isi = {WOS:000284311600008}
}
@article{RigSwi11-ICLP11-IJ,
  author = {Fabrizio Riguzzi and Terrance Swift},
  title = {The {PITA} System: Tabling and Answer Subsumption for Reasoning under Uncertainty},
  year = {2011},
  journal = {Theory and Practice of Logic Programming, 27th International
Conference on Logic Programming (ICLP'11) Special Issue, Lexington, Kentucky
6-10 July 2011},
  editor = {John Gallagher and Michael Gelfond},
  volume = {11},
  number = {4--5},
  publisher = {Cambridge University Press},
  copyright = {Cambridge University Press},
  abstract = {Many real world domains require the representation of a measure of
uncertainty.  The most common such representation is probability, and
the combination of probability with logic programs has given rise to
the field of Probabilistic Logic Programming (PLP), leading to
languages such as the Independent Choice Logic, Logic Programs with
Annotated Disjunctions (LPADs), Problog, PRISM and others. These languages
share a similar distribution semantics, and methods have been devised
to translate programs between these languages. 
The complexity of computing the probability of queries to these
general PLP programs is very high due to the need to combine the
probabilities of explanations that may not be exclusive.  As one
alternative, the PRISM system reduces the complexity of query
answering by restricting the form of programs it can evaluate.  As an
entirely different alternative, Possibilistic Logic Programs adopt a
simpler metric of uncertainty than probability.

Each of these approaches -- general PLP, restricted PLP, and
Possibilistic Logic Programming -- can be useful in different domains
depending on the form of uncertainty to be represented, on the form of
programs needed to model problems, and on the scale of the problems to
be solved.  In this paper, we show how the PITA system, which
originally supported the general PLP language of LPADs, can also
efficiently support restricted PLP and Possibilistic Logic Programs.
PITA relies on tabling with answer subsumption and consists of a
transformation along with an API for library functions that interface
with answer subsumption.  We show that, by adapting its transformation
and library functions, PITA can be parameterized to PITA(IND,EXC) 
which supports the restricted PLP of PRISM, including optimizations
that reduce non-discriminating arguments and the computation of
Viterbi paths.  Furthermore, we show PITA to be competitive with PRISM
for complex queries to Hidden Markov Model examples, and sometimes
much faster.
We further show how PITA can be parameterized to PITA(COUNT) which
computes the number of different explanations for a subgoal, and to
PITA(POSS) which scalably implements Possibilistic Logic Programming.
PITA is a supported package in version 3.3 of XSB.
},
  keywords = {Probabilistic Logic Programming, Possibilistic Logic Programming, Tabling, Answer Subsumption, Program Transformation},
  pages = {433--449},
  pdf = {http://ml.unife.it/wp-content/uploads/Papers/RigSwi-ICLP11.pdf},
  doi = {10.1017/S147106841100010X},
  url = {http://arxiv.org/pdf/1107.4747v1},
  arxiv = {1107.4747}
}
@article{AlbGavLam11-IA-IJ,
  author = {Marco Alberti and Marco Gavanelli and Evelina Lamma and Fabrizio Riguzzi and Sergio Storari},
  title = {Learning specifications of interaction protocols and business processes and proving their properties},
  journal = {Intelligenza artificiale},
  year = 2011,
  volume = 5,
  number = 1,
  pages = {71--75},
  month = feb,
  doi = {10.3233/IA-2011-0006},
  issn = {1724-8035},
  abstract = {In this paper, we overview our recent research
  activity concerning the induction of Logic Programming
  specifications, and the proof of their properties via Abductive
  Logic Programming. Both the inductive and abductive tools here
  briefly described have been applied to respectively learn and verify
  (properties of) interaction protocols in multi-agent systems, Web
  service choreographies, careflows and business processes.},
  pdf = {http://ml.unife.it/wp-content/uploads/Papers/AlbGavLam-IA08.pdf}
}
@article{BelRig12-IA-IJ,
  author = {Elena Bellodi and Fabrizio Riguzzi},
  title = { Experimentation of an Expectation Maximization Algorithm for Probabilistic Logic Programs},
  year = {2012},
  journal = {Intelligenza Artificiale},
  publisher = {IOS Press},
  copyright = {IOS Press},
  pdf = {http://ml.unife.it/wp-content/uploads/Papers/BelRig12-IA-IJ.pdf},
  abstract = {Statistical Relational Learning and Probabilistic Inductive Logic Programming are two emerging fields that use representation languages able to combine logic and probability. In the field of Logic Programming, the distribution semantics is one of the prominent approaches for representing uncertainty and underlies many languages such as ICL, PRISM, ProbLog and LPADs.
Learning the parameters for such languages requires an Expectation Maximization algorithm since their equivalent Bayesian networks contain hidden variables.
EMBLEM (EM over BDDs for probabilistic Logic programs Efficient Mining) is an EM algorithm for languages following the distribution semantics that computes expectations directly on the Binary Decision Diagrams that are built for inference.
In this paper we present experiments comparing EMBLEM with LeProbLog, Alchemy, CEM, RIB and LFI-ProbLog on six real world datasets. The results show that EMBLEM is able to solve problems on which the other systems fail and it often achieves significantly higher areas under the Precision Recall and the ROC curves in a similar time.},
  keywords = {Statistical Relational Learning, Probabilistic Inductive Logic Programming, Probabilistic Logic Programming,  Expectation Maximization, Binary Decision Diagrams,
Logic Programs with Annotated Disjunctions
},
  volume = {8},
  number = {1},
  pages = {3-18},
  doi = {10.3233/IA-2012-0027}
}
@article{RigDiM12-ML-IJ,
  author = {Fabrizio Riguzzi and  Di Mauro, Nicola},
  title = {Applying the Information Bottleneck to Statistical Relational Learning},
  year = {2012},
  journal = {Machine Learning},
  volume = {86},
  number = {1},
  pages = {89--114},
  note = {The original publication is available at \url{http://www.springerlink.com}},
  pdf = {http://ml.unife.it/wp-content/uploads/Papers/RigDiM11-ML-IJ.pdf},
  doi = {10.1007/s10994-011-5247-6},
  publisher = {Springer},
  copyright = {Springer},
  address = {Heidelberg, Germany},
  abstract = {In this  paper we  propose to apply  the Information  Bottleneck (IB) approach  to the  sub-class of
Statistical Relational  Learning (SRL) languages that  are reducible to Bayesian  networks. When the
resulting networks involve hidden variables, learning these languages requires the use of techniques
for  learning from incomplete  data such  as the  Expectation Maximization  (EM) algorithm.
Recently, the IB approach was shown to be able to avoid some of the local maxima in which EM can get
trapped when learning with hidden variables.  Here  we present the algorithm Relational Information Bottleneck
(RIB)
that learns the parameters of SRL languages reducible
to Bayesian Networks.
 In particular, we present the specialization  of RIB to a  language belonging to the family of languages based on the distribution semantics, Logic Programs with  Annotated Disjunction (LPADs). This language is prototypical for such a family and its equivalent Bayesian networks contain hidden  variables. RIB is evaluated on the IMDB, Cora and artificial datasets and compared with LeProbLog, EM, Alchemy and PRISM.
The  experimental results show that  RIB has good performances especially when some logical atoms are unobserved.
Moreover, it is particularly suitable when learning from interpretations that share the same Herbrand base.},
  keywords = {Statistical Relational Learning, Probabilistic Inductive Logic Programming, Probabilistic Logic Programming,  Information Bottleneck,
Logic Programs with Annotated Disjunctions
}
}
@article{DiMFraEta13-IA-IJ,
  author = {Nicola Di Mauro and
               Paolo Frasconi and
               Fabrizio Angiulli and
               Davide Bacciu and
               Marco de Gemmis and
               Floriana Esposito and
               Nicola Fanizzi and
               Stefano Ferilli and
               Marco Gori and
               Francesca A. Lisi and
               Pasquale Lops and
               Donato Malerba and
               Alessio Micheli and
               Marcello Pelillo and
               Francesco Ricci and
               Fabrizio Riguzzi and
               Lorenza Saitta and
               Giovanni Semeraro},
  title = {Italian Machine Learning and Data Mining research: The last
               years},
  journal = {Intelligenza Artificiale},
  volume = {7},
  number = {2},
  year = {2013},
  pages = {77-89},
  doi = {10.3233/IA-130050},
  copyright = {{IOS} Press},
  publisher = {{IOS} Press},
  abstract = {With the increasing amount of information in electronic form the fields of Machine Learning and Data Mining continue to grow by providing new advances in theory, applications and systems. The aim of this paper is to consider some recent theoretical aspects and approaches to ML and DM with an emphasis on the Italian research.}
}
@article{Rig13-FI-IJ,
  author = {Fabrizio Riguzzi},
  title = {{MCINTYRE}: A {Monte Carlo} System for Probabilistic Logic Programming},
  journal = {Fundamenta Informaticae},
  abstract = {Probabilistic Logic Programming is receiving an increasing attention for its ability to model domains with complex and uncertain relations among entities. 
In this paper we concentrate on the problem of approximate inference in probabilistic logic programming languages based on the distribution semantics.
A successful approximate approach is based on Monte Carlo sampling, that consists in verifying the truth of the query in a normal program sampled from the probabilistic program.
The ProbLog system includes such an algorithm and so does the cplint suite.
In this paper we propose an approach for Monte Carlo inference that is based on a program transformation that translates a probabilistic program into a normal program to which the query can be posed.  The current sample is stored in the internal database of the Yap Prolog engine.
The resulting system, called MCINTYRE for Monte Carlo INference wiTh Yap REcord, is evaluated on various problems: biological networks, artificial datasets and a hidden Markov model.  MCINTYRE is compared with the Monte Carlo algorithms of ProbLog and  and with the  exact inference  of the PITA system. The results show  that MCINTYRE is faster than the other Monte Carlo systems.},
  keywords = {Probabilistic Logic Programming,
Monte Carlo Methods,
Logic Programs with Annotated Disjunctions,
ProbLog},
  year = {2013},
  publisher = {{IOS} Press},
  pdf = {http://ml.unife.it/wp-content/uploads/Papers/Rig13-FI-IJ.pdf},
  doi = {10.3233/FI-2013-847},
  volume = {124},
  number = {4},
  pages = {521-541},
  copyright = {IOS Press}
}
@article{BelRig13-IDA-IJ,
  author = {Elena Bellodi and Fabrizio Riguzzi},
  title = { Expectation {Maximization} over Binary Decision Diagrams for Probabilistic Logic Programs},
  year = {2013},
  volume = {17},
  number = {2},
  journal = {Intelligent Data Analysis},
  publisher = {IOS Press},
  copyright = {IOS Press},
  pages = {343-363},
  doi = {10.3233/IDA-130582},
  pdf = {http://ml.unife.it/wp-content/uploads/Papers/BelRig13-IDA-IJ.pdf},
  abstract = {Recently much work in Machine Learning has concentrated on using expressive representation languages that combine aspects of logic and probability. A whole field has emerged, called Statistical Relational Learning, rich of successful applications in a variety of domains.
In this paper we present a Machine Learning technique targeted to Probabilistic Logic Programs, a family of formalisms where uncertainty is represented using Logic Programming tools.
Among various proposals for Probabilistic Logic Programming, the one based on the distribution semantics is gaining popularity and is the basis for languages such as ICL, PRISM, ProbLog and Logic Programs with Annotated Disjunctions.
This paper proposes a technique for learning parameters of these languages. Since their equivalent Bayesian networks contain hidden variables, an Expectation Maximization (EM) algorithm is adopted.
In order to speed the computation up, expectations are computed directly on the Binary Decision Diagrams that are built for inference.
The resulting system, called EMBLEM for ``EM over Bdds for probabilistic Logic programs Efficient Mining'', has been applied to a number of datasets and showed good performances both in terms of speed and memory usage. In particular its speed allows the execution of a high number of restarts, resulting in good  quality of the solutions.},
  keywords = {Statistical Relational Learning, Probabilistic Inductive Logic Programming, Probabilistic Logic Programs, Logic Programs with Annotated Disjunctions, Expectation Maximization, Binary Decision Diagrams
}
}
@article{RigSwi13-TPLP-IJ,
  author = {Fabrizio Riguzzi and Terrance Swift},
  title = {Well\--Definedness and Efficient Inference for Probabilistic Logic Programming under the Distribution Semantics },
  year = {2013},
  month = {March},
  journal = {Theory and Practice of Logic Programming},
  editor = { Wolfgang Faber and Nicola Leone},
  publisher = {Cambridge University Press},
  copyright = {Cambridge University Press},
  keywords = {Probabilistic Logic Programming, Possibilistic Logic Programming, Tabling, Answer Subsumption, Program Transformation},
  abstract = {The distribution semantics is one of the most prominent approaches for the combination of logic programming and probability theory. Many languages follow this semantics, such as Independent Choice Logic, PRISM, pD, Logic Programs with Annotated Disjunctions (LPADs)  and ProbLog. 

When a program contains functions symbols, the distribution semantics
is well\--defined only if the set of explanations for a query is
finite and so is each explanation. Well\--definedness is usually
either explicitly imposed or is achieved by severely limiting the
class of allowed programs.
In this paper we identify a larger class of programs for which the
semantics is well\--defined together with an efficient procedure for
computing the probability of queries.
Since LPADs offer the most general syntax, we present our results for
them, but our results are applicable to all languages under the
distribution semantics.

We present the algorithm ``Probabilistic Inference with Tabling and
Answer subsumption'' (PITA) that computes the probability of
queries by transforming a probabilistic program into a normal program
and then applying SLG resolution with answer subsumption.
PITA has been implemented in XSB and tested on six domains: two
with function symbols and four without.  The execution times are
compared with those of ProbLog, cplint and
CVE. PITA was almost always able to solve larger problems in a
shorter time, on domains with and without function symbols.},
  keywords = {Probabilistic Logic Programming, Tabling, Answer Subsumption, Logic Programs with Annotated Disjunction, Program Transformation},
  doi = {10.1017/S1471068411000664},
  arxiv = {1110.0631},
  pages = {279-302},
  volume = {13},
  number = {Special Issue 02 - 25th Annual GULP Conference},
  scopus = {84874625061},
  isi = {000315867300007},
  url = {http://arxiv.org/pdf/1110.0631v1}
}
@article{RigBelZes14-FAI-IJ,
  author = {Riguzzi, Fabrizio  and  Bellodi, Elena  and  Zese, Riccardo},
  title = {A History of Probabilistic Inductive Logic Programming},
  journal = {Frontiers in Robotics and AI},
  volume = {1},
  year = {2014},
  number = {6},
  url = {http://www.frontiersin.org/computational_intelligence/10.3389/frobt.2014.00006/abstract},
  doi = {10.3389/frobt.2014.00006},
  issn = {2296-9144},
  abstract = {The field of Probabilistic Logic Programming (PLP) has seen significant advances in the last 20?years, with many proposals for languages that combine probability with logic programming. Since the start, the problem of learning probabilistic logic programs has been the focus of much attention. Learning these programs represents a whole subfield of Inductive Logic Programming (ILP). In Probabilistic ILP (PILP), two problems are considered: learning the parameters of a program given the structure (the rules) and learning both the structure and the parameters. Usually, structure learning systems use parameter learning as a subroutine. In this article, we present an overview of PILP and discuss the main results.},
  pages = {1-5},
  keywords = {logic programming, probabilistic programming, inductive logic programming, probabilistic logic
programming, statistical relational learning},
  copyright = {by the authors}
}
@article{RigSwi14-TOCL-IJ,
  author = { Fabrizio Riguzzi and Terrance Swift},
  title = {Terminating Evaluation of Logic Programs with Finite Three-Valued
  Models},
  journal = {ACM Transactions on Computational Logic},
  publisher = {ACM},
  copyright = {ACM},
  volume = {15},
  number = {4},
  year = {2014},
  doi = {10.1145/2629337},
  abstract = {
As evaluation methods for logic programs have become more sophisticated, the classes of programs for which
termination can be guaranteed have expanded. From the perspective of answer set programs that include
function symbols, recent work has identified classes for which grounding routines can terminate either on
the entire program [Calimeri et al. 2008] or on suitable queries [Baselice et al. 2009]. From the perspective
of tabling, it has long been known that a tabling technique called subgoal abstraction provides good termination properties for definite programs [Tamaki and Sato 1986], and this result was recently extended
to stratified programs via the class of bounded term-size programs [Riguzzi and Swift 2013]. In this paper
we provide a formal definition of tabling with subgoal abstraction resulting in the SLG
SA algorithm. Moreover, we discuss a declarative characterization of the queries and programs for which SLG
SA terminates. We
call this class strongly bounded term-size programs and show its equivalence to programs with finite wellfounded models. For normal programs strongly bounded term-size programs strictly includes the finitely
ground programs of [Calimeri et al. 2008]. SLG
SA has an asymptotic complexity on strongly bounded termsize programs equal to the best known and produces a residual program that can be sent to an answer set
programming system. Finally, we describe the implementation of subgoal abstraction within the SLG-WAM
of XSB and provide performance results.},
  keywords = {Logic Programming, Tabled Logic Programming, Termination},
  http = {http://dl.acm.org/authorize?N05388},
  pdf = {http://ml.unife.it/wp-content/uploads/Papers/RigSwi14-TOCL.pdf},
  pages = {32:1--32:38},
  month = {September},
  issn = {1529-3785},
  address = {New York, NY, USA}
}
@article{BelLamRig14-ICLP-IJ,
  author = { Elena Bellodi and Evelina Lamma and Fabrizio Riguzzi and Santos Costa, Vitor and Riccardo Zese},
  title = {Lifted Variable Elimination for Probabilistic Logic Programming},
  journal = {Theory and Practice of Logic Programming},
  publisher = {Cambridge University Press},
  copyright = {Cambridge University Press},
  number = {Special issue 4-5 - ICLP 2014},
  volume = {14},
  year = {2014},
  pages = {681-695},
  doi = {10.1017/S1471068414000283},
  pdf = {http://arxiv.org/abs/1405.3218},
  keywords = {Probabilistic Logic Programming, Lifted Inference,
  Variable Elimination, Distribution Semantics, ProbLog,
  Statistical Relational Artificial Intelligence},
  abstract = {Lifted inference has been proposed for various probabilistic logical
  frameworks in order to compute the probability of queries in a time that
  depends on the size of the domains of the random variables rather than the
  number of instances. Even if various authors have underlined its importance
  for probabilistic logic programming (PLP), lifted inference has been applied
  up to now only to relational languages outside of logic programming. In this
  paper we adapt Generalized Counting First Order Variable Elimination (GC-FOVE)
  to the problem of computing the probability of queries to probabilistic logic
  programs under the distribution semantics. In particular, we extend the Prolog
  Factor Language (PFL) to include two new types of factors that are needed for
  representing ProbLog programs. These factors take into account the existing
  causal independence relationships among random variables and are managed by
  the extension to variable elimination proposed by Zhang and Poole for dealing
  with convergent variables and heterogeneous factors. Two new operators are
  added to GC-FOVE for treating heterogeneous factors. The resulting algorithm,
  called LP2 for Lifted Probabilistic Logic Programming, has been implemented
  by modifying the PFL implementation of GC-FOVE and tested on three benchmarks
  for lifted inference. A comparison with PITA and ProbLog2 shows the potential
  of the approach.},
  isi = {000343203200019},
  scopus = {84904624147}
}
@article{Rig14-CJ-IJ,
  author = {Fabrizio Riguzzi},
  title = {Speeding Up Inference for Probabilistic Logic Programs},
  journal = { The Computer Journal},
  publisher = {Oxford University Press},
  copyright = {Oxford University Press},
  year = {2014},
  volume = {57},
  number = {3},
  pages = {347-363},
  pdf = {http://ml.unife.it/wp-content/uploads/Papers/Rig-CJ13.pdf},
  doi = {10.1093/comjnl/bxt096},
  abstract = {Probabilistic Logic Programming (PLP) allows to represent domains containing many entities connected by uncertain relations and has many applications in particular in Machine Learning.
PITA is a PLP algorithm for computing the probability of queries that exploits tabling, answer subsumption and Binary Decision Diagrams (BDDs). PITA does not impose any restriction on the programs. Other algorithms, such as PRISM, reduce computation time by imposing restrictions on the program, namely that subgoals are independent and that clause bodies are mutually exclusive. Another assumption that simplifies inference is that clause bodies are independent. In this paper we  present the algorithms PITA(IND,IND) and PITA(OPT). PITA(IND,IND) assumes that subgoals and clause bodies are independent. PITA(OPT) instead first checks whether these assumptions hold for subprograms and subgoals: if they do, PITA(OPT) uses a simplified calculation, otherwise it resorts to BDDs. Experiments on a number of benchmark datasets show that PITA(IND,IND) is the fastest on datasets respecting the assumptions while PITA(OPT) is a good option when nothing is known about a dataset.},
  keywords = {Logic Programming, Probabilistic Logic Programming, Distribution Semantics, Logic Programs with Annotated Disjunctions, PRISM, ProbLog}
}
@article{DiMBelRig15-ML-IJ,
  author = {Di Mauro, Nicola  and Elena Bellodi and Fabrizio Riguzzi},
  title = {Bandit-Based {Monte-Carlo} Structure Learning of
Probabilistic Logic Programs},
  journal = {Machine Learning},
  publisher = {Springer International Publishing},
  copyright = {Springer International Publishing},
  year = {2015},
  volume = {100},
  number = {1},
  pages = {127-156},
  month = {July},
  doi = {10.1007/s10994-015-5510-3},
  url = {http://ml.unife.it/wp-content/uploads/Papers/DiMBelRig-ML15.pdf},
  keywords = {probabilistic inductive logic programming, statistical relational learning, structure learning, distribution semantics, logic programs with annotated disjunction},
  abstract = {Probabilistic Logic Programming can be used to model domains with complex and uncertain relationships among entities. While the problem of learning the
parameters of such programs has been considered by various authors, the problem
of learning the structure is yet to be explored in depth. In this work we present an
approximate search method based on a one-player game approach, called LEMUR. It
sees the problem of learning the structure of a probabilistic logic program as a multiarmed bandit problem, relying on the Monte-Carlo tree search UCT algorithm that
combines the precision of tree search with the generality of random sampling. LEMUR
works by modifying the UCT algorithm in a fashion similar to FUSE, that considers a
finite unknown horizon and deals with the problem of having a huge branching factor.
The proposed system has been tested on various real-world datasets and has shown
good performance with respect to other state of the art statistical relational learning
approaches in terms of classification abilities.},
  note = {The original publication is available at
\url{http://link.springer.com}}
}
@article{RigBelLamZes15-SW-IJ,
  author = {Fabrizio Riguzzi and Elena Bellodi and Evelina Lamma and Riccardo Zese},
  title = {Probabilistic Description Logics under the Distribution Semantics},
  journal = {Semantic Web - Interoperability, Usability, Applicability},
  volume = {6},
  number = {5},
  pages = {447-501},
  pdf = {http://ml.unife.it/wp-content/uploads/Papers/RigBelLamZes-SW14.pdf},
  year = {2015},
  doi = {10.3233/SW-140154},
  abstract = {
Representing uncertain information is crucial for modeling real world domains. In this paper we present a technique for the integration of probabilistic information in Description Logics (DLs) that is based on the distribution semantics for probabilistic logic programs. In the resulting approach, that we called DISPONTE, the axioms of a probabilistic knowledge base
(KB) can be annotated with a real number between 0 and 1. A probabilistic knowledge base then defines a probability
distribution over regular KBs called worlds and the probability of a given query can be obtained from the joint distribution of the worlds and the query by marginalization.
We present the algorithm BUNDLE for computing the probability of queries from DISPONTE KBs. The algorithm exploits an underlying  DL reasoner, such as Pellet, that is able to return explanations for  queries. The explanations are encoded in a Binary Decision Diagram from which the probability of the query is computed.
The experimentation of BUNDLE shows that it can handle probabilistic KBs of realistic size.
},
  keywords = { Probabilistic Ontologies, Probabilistic Description Logics, OWL, Probabilistic Logic Programming, Distribution Semantics}
}
@article{BelRig15-TPLP-IJ,
  author = {Elena Bellodi and Fabrizio Riguzzi},
  title = {Structure Learning of Probabilistic Logic Programs by Searching the Clause Space},
  journal = {Theory and Practice of Logic Programming},
  publisher = {Cambridge University Press},
  copyright = {Cambridge University Press},
  year = {2015},
  volume = {15},
  number = {2},
  pages = {169-212},
  pdf = {http://arxiv.org/abs/1309.2080},
  url = {http://journals.cambridge.org/abstract_S1471068413000689},
  doi = {10.1017/S1471068413000689},
  keywords = {probabilistic inductive logic programming, statistical relational learning, structure learning, distribution semantics, logic programs with annotated disjunction, CP-logic},
  abstract = {Learning probabilistic logic programming languages is receiving an increasing attention,
and systems are available for learning the parameters (PRISM, LeProbLog, LFI-ProbLog
and EMBLEM) or both structure and parameters (SEM-CP-logic and SLIPCASE) of these
languages. In this paper we present the algorithm SLIPCOVER for "Structure LearnIng
of Probabilistic logic programs by searChing OVER the clause space." It performs a beam
search in the space of probabilistic clauses and a greedy search in the space of theories
using the log likelihood of the data as the guiding heuristics. To estimate the log likelihood,
SLIPCOVER performs Expectation Maximization with EMBLEM. The algorithm has been
tested on five real world datasets and compared with SLIPCASE, SEM-CP-logic, Aleph and
two algorithms for learning Markov Logic Networks (Learning using Structural Motifs (LSM)
and ALEPH++ExactL1). SLIPCOVER achieves higher areas under the precision-recall and
receiver operating characteristic curves in most cases.}
}
@article{RigBelLam16-SPE-IJ,
  author = {Fabrizio Riguzzi and Elena Bellodi and Evelina Lamma and
  Riccardo Zese and Giuseppe Cota},
  title = {Probabilistic Logic Programming on the Web},
  journal = {Software: Practice and Experience},
  publisher = {Wiley},
  copyright = {Wiley},
  year = {2016},
  issn = {1097-024X},
  url = {http://ml.unife.it/wp-content/uploads/Papers/RigBelLam-SPE16.pdf},
  abstract = {
We present the web application "cplint on SWISH", that allows the user
to write probabilistic logic programs and compute the probability of queries
with just a web browser. The application is based on SWISH, a recently
proposed web framework for logic programming. SWISH is based on various
features and packages of SWI-Prolog, in particular its web server and
its Pengine library, that allow to create remote Prolog engines and to pose
queries to them. In order to develop the web application, we started from
the PITA system which is included in cplint, a suite of programs for reasoning
on Logic Programs with Annotated Disjunctions, by porting PITA
to SWI-Prolog. Moreover, we modified the PITA library so that it can be
executed in a multi-threading environment. Developing "cplint on SWISH"
also required modification of the JavaScript SWISH code that creates and
queries Pengines. "cplint on SWISH" includes a number of examples that
cover a wide range of domains and provide interesting applications of Probabilistic
Logic Programming (PLP). By providing a web interface to cplint
we allow users to experiment with PLP without the need to install a system,
a procedure which is often complex, error prone and limited mainly to the
Linux platform. In this way, we aim to reach out to a wider audience and
popularize PLP.},
  keywords = { Logic Programming, Probabilistic Logic Programming,
Distribution Semantics, Logic Programs with Annotated Disjunctions, Web
Applications
},
  doi = {10.1002/spe.2386},
  volume = {46},
  number = {10},
  pages = {1381-1396},
  month = {October},
  wos = {WOS:000383624900005},
  scopus = {2-s2.0-84951829971}
}
@article{Rig16-IJAR-IJ,
  author = {Fabrizio Riguzzi},
  title = {The Distribution Semantics for Normal Programs with Function Symbols},
  journal = {International Journal of Approximate Reasoning},
  year = {2016},
  publisher = {Elsevier},
  address = {Amsterdam},
  doi = {10.1016/j.ijar.2016.05.005},
  volume = {77},
  number = {Supplement C},
  pages = {1 - 19},
  issn = {0888-613X},
  month = {October},
  url = {http://ml.unife.it/wp-content/uploads/Papers/Rig-IJAR16.pdf},
  copyright = {Elsevier},
  abstract = {The distribution semantics integrates logic programming and probability theory using a possible worlds approach.
Its intuitiveness and simplicity has made it the most
widely used semantics for probabilistic logic programming,
with successful applications in many domains.
When the program has function symbols, the semantics was defined for special cases: either the program has to be definite or the queries must have a finite number of finite explanations.
In this paper we show that it is possible to define the semantics for all programs. We also show that this definition coincides with that of Sato and Kameya on positive programs.
Moreover, we highlight possible approaches for inference, both exact and
approximate.
},
  keywords = {Distribution Semantics, Function Symbols,
ProbLog,
Probabilistic Logic Programming
},
  wos = {WOS:000381164500001},
  scopus = {2-s2.0-84973659532}
}
@article{BelRigLam16-IDA-IJ,
  author = {Elena Bellodi and Fabrizio Riguzzi and Evelina Lamma},
  title = {Statistical Relational Learning for Workflow Mining},
  journal = {Intelligent Data Analysis},
  publisher = {IOS Press},
  copyright = {IOS Press},
  year = {2016},
  doi = {10.3233/IDA-160818},
  month = {April},
  volume = {20},
  number = {3},
  pages = {515-541},
  url = {http://ml.unife.it/wp-content/uploads/Papers/BelRigLam-IDA15.pdf},
  keywords = {Workflow Mining, Process Mining, Knowledge-based Process Models, Inductive Logic Programming, Statistical Relational Learning,
Business Process Management
},
  abstract = {
The management of business processes can support  efficiency improvements in organizations. One of the most interesting problems is the mining and representation of process models in a declarative language.
Various recently proposed knowledge-based languages showed advantages over graph-based procedural notations.
Moreover, rapid changes of the environment require organizations to check how compliant are new process instances with the deployed models.
We present a Statistical Relational Learning approach to Workflow Mining that takes into account both flexibility and uncertainty in real environments.
It performs  automatic discovery of  process models expressed in a probabilistic logic.
It  uses the existing DPML  algorithm  for extracting  first-order logic constraints from process logs. The constraints are then translated into Markov Logic to learn their weights.
Inference on the resulting Markov Logic  model allows a probabilistic classification of test traces, by assigning them the probability of being compliant to the model.
We applied this approach to three datasets and compared it with DPML alone, five Petri net- and EPC-based process mining algorithms and Tilde.
The technique is able to better classify new execution traces, showing higher  accuracy and areas under the PR/ROC curves in most cases.
},
  scopus = {2-s2.0-84969808336},
  wos = {WOS:000375005000004}
}
@article{RigCotBel17-IJAR-IJ,
  author = {Fabrizio Riguzzi and Giuseppe Cota and
        Elena Bellodi and Riccardo Zese  },
  title = {Causal Inference in {cplint}},
  journal = {International Journal of Approximate Reasoning},
  year = {2017},
  publisher = {Elsevier},
  address = {Amsterdam},
  copyright = {Elsevier},
  doi = {10.1016/j.ijar.2017.09.007},
  pdf = {http://ml.unife.it/wp-content/uploads/Papers/RigCotBel-IJAR17.pdf},
  abstract = {
cplint is a suite of programs for reasoning and learning with Probabilistic Logic
Programming languages that follow the distribution semantics.
In this paper we describe how we have extended cplint to perform causal reasoning.
In particular, we consider Pearl's do calculus for models where all
the variables are measured.
The two cplint  modules for inference, PITA and MCINTYRE, have been extended for
computing the effect of actions/interventions on these models.
We also executed experiments comparing exact and approximate inference with
conditional and causal queries, showing that causal inference is often cheaper than conditional inference.
},
  keywords = {
Probabilistic Logic Programming, Distribution Semantics, Logic Programs with Annotated Disjunctions, ProbLog, Causal Inference, Statistical Relational Artificial Intelligence
},
  volume = {91},
  pages = {216-232},
  month = {December},
  number = {Supplement C},
  issn = {0888-613X},
  scopus = {2-s2.0-84992199737},
  wos = {WOS:000391080100020}
}
@article{AlbBelCot17-IA-IJ,
  author = {Marco Alberti and Elena Bellodi and Giuseppe Cota and
  Fabrizio Riguzzi and Riccardo Zese},
  title = {\texttt{cplint} on {SWISH}: Probabilistic Logical Inference with a Web Browser},
  journal = {Intelligenza Artificiale},
  publisher = {IOS Press},
  copyright = {IOS Press},
  year = {2017},
  issn-print = {1724-8035},
  issn-online = {2211-0097},
  url = {http://ml.unife.it/wp-content/uploads/Papers/AlbBelCot-IA17.pdf},
  abstract = {
\texttt{cplint} on SWISH is a web application that allows users to
perform reasoning tasks on probabilistic logic programs.
Both inference and learning systems can be performed: conditional probabilities with exact,
rejection sampling and Metropolis-Hasting methods. Moreover, the system now allows hybrid programs,
i.e., programs where some of the random variables are continuous. To perform inference on such programs likelihood weighting and particle filtering are used.
\texttt{cplint} on SWISH is also able to sample goals' arguments and
to graph the results. This paper reports on advances and new features
of \texttt{cplint} on SWISH, including the capability of drawing the
binary decision diagrams created during the inference processes.
},
  keywords = { Logic Programming, Probabilistic Logic Programming,
Distribution Semantics, Logic Programs with Annotated Disjunctions, Web
Applications
},
  volume = {11},
  number = {1},
  doi = {10.3233/IA-170106},
  pages = {47--64},
  wos = {WOS:000399736500004}
}
@article{BelLamRig17-SPE-IJ,
  author = {Elena Bellodi and Evelina Lamma and Fabrizio Riguzzi and
  Riccardo Zese and Giuseppe Cota},
  title = {A web system for reasoning with probabilistic {OWL}},
  journal = {Software: Practice and Experience},
  publisher = {Wiley},
  copyright = {Wiley},
  year = {2017},
  doi = {10.1002/spe.2410},
  issn = {1097-024X},
  month = {January},
  pages = {125--142},
  volume = {47},
  number = {1},
  scopus = {2-s2.0-84992412060},
  url = {http://ml.unife.it/wp-content/uploads/Papers/BelLamRig-SPE16.pdf},
  abstract = {
We present the web application TRILL on SWISH, which allows the user to write probabilistic Description Logic (DL) theories and compute the probability of queries with just a web browser.
Various probabilistic extensions of DLs have been proposed  in the recent past, since uncertainty is a fundamental component of the Semantic Web.
We consider probabilistic DL theories following our DISPONTE semantics.  Axioms of a DISPONTE Knowledge Base (KB) can be annotated with a probability and the probability of queries can be computed with inference algorithms.
TRILL is a probabilistic reasoner for DISPONTE KBs that is implemented in Prolog  and exploits its backtracking facilities for handling the non-determinism of the tableau algorithm.
TRILL on SWISH is based on SWISH, a recently proposed web framework for logic programming, based on various features and packages of SWI-Prolog (e.g., a web server and a library for creating remote Prolog engines and  posing queries to them).  TRILL on SWISH also allows users to cooperate in writing a probabilistic DL theory.
It is free, open, and accessible on the Web at the url: \trillurl; it includes a number of examples that cover a wide range of domains and provide interesting Probabilistic Semantic Web applications.
By building a web-based system, we allow users to experiment with Probabilistic DLs without the need to install a complex software stack. In this way we aim to reach out to a wider audience and popularize the Probabilistic Semantic Web.
},
  keywords = { Semantic Web, Web Applications, Description Logics, Probabilistic Description Logics, SWI-Prolog, Logic Programming
}
}
@article{RigBelZes17-IJAR-IJ,
  author = {Fabrizio Riguzzi and
        Elena Bellodi and Riccardo Zese and
        Giuseppe Cota and
        Evelina Lamma },
  title = {A Survey of Lifted Inference Approaches for Probabilistic
Logic Programming under the Distribution Semantics},
  journal = {International Journal of Approximate Reasoning},
  year = {2017},
  publisher = {Elsevier},
  address = {Amsterdam},
  copyright = {Elsevier},
  doi = {10.1016/j.ijar.2016.10.002},
  url = {http://ml.unife.it/wp-content/uploads/Papers/RigBelZes-IJAR17.pdf},
  volume = {80},
  number = {Supplement C},
  issn = {0888-613X},
  pages = {313--333},
  month = {January},
  abstract = {
Lifted inference aims at answering queries from statistical relational models by reasoning on populations of individuals as a
whole instead of considering each individual singularly.
Since the initial proposal by David Poole in 2003, many lifted inference techniques have appeared, by lifting different algorithms or using approximation involving different kinds of models, including parfactor graphs and Markov Logic Networks.
Very recently lifted inference was applied to Probabilistic Logic Programming (PLP) under the distribution semantics, with proposals such as LP2 and Weighted First-Order Model Counting
(WFOMC). Moreover, techniques for dealing with aggregation parfactors can be directly applied to PLP.
In this paper we survey these approaches and present an
experimental comparison on five models.
The results show that  WFOMC outperforms the other approaches, being able to exploit more symmetries.
},
  keywords = {Probabilistic Logic Programming, Lifted Inference, Variable Elimination, Distribution Semantics, ProbLog, Statistical Relational Artificial Intelligence
},
  scopus = {2-s2.0-84992199737},
  wos = {WOS:000391080100020}
}
@article{ZesBelRig18-AMAI-IJ,
  author = {Riccardo Zese and
        Elena Bellodi  and
        Fabrizio Riguzzi and
        Giuseppe Cota and
        Evelina Lamma },
  title = {Tableau Reasoning for Description Logics and its Extension to Probabilities},
  journal = {Annals of Mathematics and Artificial Intelligence},
  publisher = {Springer},
  copyright = {Springer},
  year = {2018},
  issn-print = {1012-2443},
  issn-online = {1573-7470},
  url = {http://ml.unife.it/wp-content/uploads/Papers/ZesBelRig-AMAI16.pdf},
  pdf = {http://rdcu.be/kONG},
  month = {March},
  day = {01},
  volume = {82},
  number = {1},
  pages = {101--130},
  doi = {10.1007/s10472-016-9529-3},
  abstract = {
The increasing popularity of the Semantic Web drove to a wide-
spread adoption of Description Logics (DLs) for modeling real world domains.
To help the diffusion of DLs, a large number of reasoning algorithms have been
developed. Usually these algorithms are implemented in procedural languages
such as Java or C++. Most of the reasoners exploit the tableau algorithm
which features non-determinism, that is not easily handled by those languages.
Prolog directly manages non-determinism, thus is a good candidate for dealing
with the tableau's non-deterministic expansion rules.
We present TRILL, for "Tableau Reasoner for descrIption Logics in pro-
Log", that implements a tableau algorithm and is able to return explanations
for queries and their corresponding probability, and TRILLP , for "TRILL
powered by Pinpointing formulas", which is able to compute a Boolean for-
mula representing the set of explanations for a query. Reasoning on real world
domains also requires the capability of managing probabilistic and uncertain
information. We show how TRILL and TRILLP can be used to compute the
probability of queries to knowledge bases following DISPONTE semantics.
Experiments comparing these with other systems show the feasibility of the
approach.},
  keywords = { Description Logics, Tableau, Prolog, Semantic Web},
  scopus = {2-s2.0-84990986085}
}
@article{GavLam18-FI-IJ,
  author = {Gavanelli, Marco and Lamma, Evelina and Riguzzi, Fabrizio and Bellodi, Elena and Zese, Riccardo and Cota, Giuseppe},
  title = {Reasoning on Datalog+- Ontologies with Abductive Logic Programming},
  year = {2018},
  journal = {Fundamenta Informaticae},
  copyright = {IOS Press},
  volume = {159},
  doi = {10.3233/FI-2018-1658},
  pages = {65--93},
  pdf = {http://ml.unife.it/wp-content/uploads/Papers/GavLam-FI18.pdf},
  scopus = {2-s2.0-85043572529}
}
@article{AzzRigLam19-Info-IJ,
  author = {Damiano Azzolini  and Fabrizio Riguzzi and Evelina Lamma},
  title = {Studying Transaction Fees in the Bitcoin Blockchain with Probabilistic Logic Programming},
  journal = {Information},
  publisher = {MDPI},
  copyright = {CCBY},
  year = {2019},
  pdf = {https://www.mdpi.com/2078-2489/10/11/335/pdf},
  doi = {10.3390/info10110335},
  abstract = {
In Bitcoin, if a miner is able to solve a computationally hard problem called proof of work, it will receive an amount of bitcoin as a reward which is the sum of the fees for the transactions included in a block plus an amount inversely proportional to the number of blocks discovered so far. At the moment of writing, the block reward is several orders of magnitude greater than the sum of transaction fees. Usually, miners try to collect the largest reward by including transactions associated with high fees. The main purpose of transaction fees is to prevent network spamming. However, they are also used to prioritize transactions. In order to use the minimum amount of fees, users usually have to find a compromise between fees and urgency of a transaction. In this paper, we develop a probabilistic logic model to experimentally analyze how fees affect confirmation time and miner's revenue and to predict if an increase of average fees will generate a situation when the miner gets more reward by not following the protocol.},
  keywords = { bitcoin, blockchain, probabilistic logic programming
},
  address = {Basel, Switzerland},
  volume = {10},
  number = {11},
  pages = {335}
}
@article{NguRig19-ML-IJ,
  author = {Nguembang Fadja, Arnaud  and Fabrizio Riguzzi},
  title = {Lifted Discriminative Learning of Probabilistic Logic Programs},
  journal = {Machine Learning},
  publisher = {Springer},
  copyright = {Springer},
  year = {2019},
  doi = {10.1007/s10994-018-5750-0},
  abstract = {
Probabilistic logic programming (PLP) provides a powerful tool for reason- ing with uncertain relational models. However, learning probabilistic logic programs is expensive due to the high cost of inference. Among the proposals to overcome this problem, one of the most promising is lifted inference. In this paper we consider PLP models that are amenable to lifted inference and present an algorithm for performing parameter and structure learning of these models from positive and negative exam- ples. We discuss parameter learning with EM and LBFGS and structure learning with LIFTCOVER, an algorithm similar to SLIPCOVER. The results of the comparison of LIFTCOVER with SLIPCOVER on 12 datasets show that it can achieve solutions of similar or better quality in a fraction of the time.
},
  keywords = { Statistical Relational Learning, Probabilistic Inductive Logic Program- ming, Probabilistic Logic Programming, Lifted Inference, Expectation Maximization
},
  scopus = {2-s2.0-85052570852},
  volume = {108},
  number = {7},
  pages = {1111--1135}
}
@article{ZesBelCot19-TPLP-IJ,
  title = {Probabilistic {DL} Reasoning with Pinpointing Formulas: A Prolog-based Approach},
  doi = {10.1017/S1471068418000480},
  journal = {Theory and Practice of Logic Programming},
  publisher = {Cambridge University Press},
  copyright = {Cambridge University Press},
  author = {Zese, Riccardo and Cota, Giuseppe and Lamma, Evelina and Bellodi, Elena and Riguzzi, Fabrizio},
  pages = {449--476},
  year = {2019},
  volume = {19},
  number = {3},
  pdf = {https://arxiv.org/pdf/1809.06180.pdf},
  scopus = {2-s2.0-85060024345},
  doi = {10.1017/S1471068418000480}
}
@article{WieRigKow19-TPLP-IJ,
  author = {
Jan Wielemaker and Fabrizio Riguzzi and Bob Kowalski and Torbj\"orn Lager and Fariba Sadri and Miguel Calejo },
  title = {Using {SWISH} to realise interactive web based tutorials for logic based languages },
  journal = {Theory and Practice of Logic Programming},
  year = {2019},
  volume = {19},
  doi = {10.1017/S1471068418000522},
  number = {2},
  publisher = {Cambridge University Press},
  copyright = {Cambridge University Press},
  pages = {229-261},
  pdf = {https://arxiv.org/pdf/1808.08042.pdf},
  abstrac = {Programming environments have evolved from purely text based to using graphical user interfaces, 
  and now we see a move toward web-based interfaces, such as Jupyter. Web-based interfaces allow for the 
  creation of interactive documents that consist of text and programs, as well as their output. The output 
  can be rendered using web technology as, for example, text, tables, charts, or graphs. This approach 
  is particularly suitable for capturing data analysis workflows and creating interactive educational 
  material. This article describes SWISH, a web front-end for Prolog that consists of a web server 
  implemented in SWI-Prolog and a client web application written in JavaScript. SWISH provides a 
  web server where multiple users can manipulate and run the same material, and it can be adapted 
  to support Prolog extensions. In this article we describe the architecture of SWISH, and describe 
  two case studies of extensions of Prolog, namely Probabilistic Logic Programming and Logic Production 
  System, which have used SWISH to provide tutorial sites.},
  keywords = {Prolog, logic programming system, notebook interface, web},
  scopus = {2-s2.0-85061599946}
}
@article{AlbGavLam20-FI-IJ,
  author = {Marco Alberti and
               Marco Gavanelli and
               Evelina Lamma and
               Fabrizio Riguzzi and
               Ken Satoh and
               Riccardo Zese},
  title = {Dischargeable Obligations in the {SCIFF} Framework},
  journal = {Fundamenta Informaticae},
  volume = {176},
  number = {3-4},
  pages = {321--348},
  year = {2020},
  doi = {10.3233/FI-2020-1976},
  publisher = {IOS Press}
}
@article{CheCotGavLamMelRig20-EAAI-IJ,
  author = {Federico Chesani and
               Giuseppe Cota and
               Marco Gavanelli and
               Evelina Lamma and
               Paola Mello and
               Fabrizio Riguzzi},
  title = {Declarative and Mathematical Programming approaches to Decision Support
               Systems for food recycling},
  journal = {Engineering Applications of Artificial Intelligence},
  volume = {95},
  pages = {103861},
  year = {2020},
  doi = {10.1016/j.engappai.2020.103861},
  scopus = {2-s2.0-85089188550}
}
@article{BelAlbRig20-TPLP-IJ,
  author = {Elena Bellodi and Marco Alberti and Fabrizio Riguzzi and Riccardo Zese},
  title = {{MAP} Inference for Probabilistic Logic Programming},
  journal = {Theory and Practice of Logic Programming},
  publisher = {Cambridge University Press},
  copyright = {Cambridge University Press},
  year = {2020},
  url = {https://arxiv.org/abs/2008.01394},
  volume = {20},
  doi = {10.1017/S1471068420000174},
  pdf = {https://arxiv.org/pdf/2008.01394.pdf},
  number = {5},
  pages = {641--655}
}
@article{NguRigBerTru2021-BioDM-IJ,
  abstract = {With the increase in the size of genomic datasets describing variability in populations, extracting relevant information becomes increasingly useful as well as complex. Recently, computational methodologies such as Supervised Machine Learning and specifically Convolutional Neural Networks have been proposed to make inferences on demographic and adaptive processes using genomic data. Even though it was already shown to be powerful and efficient in different fields of investigation, Supervised Machine Learning has still to be explored as to unfold its enormous potential in evolutionary genomics.},
  author = {Nguembang Fadja, Arnaud and Riguzzi, Fabrizio and Bertorelle, Giorgio and Trucchi, Emiliano},
  doi = {10.1186/s13040-021-00280-9},
  isbn = {1756-0381},
  journal = {BioData Mining},
  number = {1},
  pages = {51},
  title = {Identification of natural selection in genomic data with deep convolutional neural network},
  volume = {14},
  year = {2021}
}
@article{FraLamRig21-ML-IJ,
  title = {Symbolic {DNN-Tuner}},
  author = {Michele Fraccaroli and
               Evelina Lamma and
               Fabrizio Riguzzi},
  journal = {Machine Learning},
  publisher = {Springer},
  copyright = {Springer},
  year = {2021},
  abstract = {Hyper-Parameter Optimization (HPO) occupies a fundamental role
in Deep Learning systems due to the number of hyper-parameters (HPs) to be
set. The state-of-the-art of HPO methods are Grid Search, Random Search and
Bayesian Optimization. The  rst two methods try all possible combinations
and random combination of the HPs values, respectively. This is performed in
a blind manner, without any information for choosing the new set of HPs val-
ues. Bayesian Optimization (BO), instead, keeps track of past results and uses
them to build a probabilistic model mapping HPs into a probability density of
the objective function. Bayesian Optimization builds a surrogate probabilistic
model of the objective function,  nds the HPs values that perform best on the
surrogate model and updates it with new results. In this paper, we improve BO
applied to Deep Neural Network (DNN) by adding an analysis of the results
of the network on training and validation sets. This analysis is performed by
exploiting rule-based programming, and in particular by using Probabilistic
Logic Programming. The resulting system, called Symbolic DNN-Tuner, logi-
cally evaluates the results obtained from the training and the validation phase
and, by applying symbolic tuning rules,  xes the network architecture, and its
HPs, therefore improving performance. We also show the e ectiveness of the
proposed approach, by an experimental evaluation on literature and real-life
datasets.},
  keywords = {Deep Learning   Hyper-Parameter Optimization   Probabilistic
Logic Programming},
  doi = {10.1007/s10994-021-06097-1},
  isbn = {1573-0565}
}
@article{NguRigLam21-ML-IJ,
  author = {Nguembang Fadja, Arnaud  and Fabrizio Riguzzi and Evelina Lamma},
  title = {Learning Hierarchical Probabilistic Logic Programs},
  journal = {Machine Learning},
  publisher = {Springer},
  copyright = {Springer},
  year = {2021},
  doi = {10.1007/s10994-021-06016-4},
  url = {https://link.springer.com/content/pdf/10.1007/s10994-021-06016-4.pdf},
  abstract = {
Probabilistic logic programming (PLP) combines logic programs and probabilities. Due to its expressiveness and simplicity, it has been considered as a powerful tool for learning and reasoning in relational domains characterized by uncertainty. Still, learning the parameter and the structure of general PLP is computationally expensive due to the inference cost. We have recently proposed a restriction of the general PLP language called hierarchical PLP (HPLP) in which clauses and predicates are hierarchically organized. HPLPs can be converted into arithmetic circuits or deep neural networks and inference is much cheaper than for general PLP. In this paper we present algorithms for learning both the parameters and the structure of HPLPs from data. We first present an algorithm, called parameter learning for hierarchical probabilistic logic programs (PHIL) which performs parameter estimation of HPLPs using gradient descent and expectation maximization. We also propose structure learning of hierarchical probabilistic logic programming (SLEAHP), that learns both the structure and the parameters of HPLPs from data. Experiments were performed comparing PHIL and SLEAHP with PLP and Markov Logic Networks state-of-the art systems for parameter and structure learning respectively. PHIL was compared with EMBLEM, ProbLog2 and Tuffy and SLEAHP with SLIPCOVER, PROBFOIL+, MLB-BC, MLN-BT and RDN-B. The experiments on five well known datasets show that our algorithms achieve similar and often better accuracies but in a shorter time.
},
  keywords = {Probabilistic Logic Programming, Distribution Semantics, Arithmetic Circuits, Gradient Descent, Back-propagation},
  address = {Berlin, Germany},
  scopus = {2-s2.0-85107994928},
  volume = {110},
  number = {7},
  pages = {1637--1693},
  isbn = {1573-0565}
}
@article{AzzRig21-ICLP-IJ,
  title = {Optimizing Probabilities in Probabilistic Logic Programs},
  doi = {10.1017/S1471068421000260},
  journal = {Theory and Practice of Logic Programming},
  publisher = {Cambridge University Press},
  copyright = {Cambridge University Press},
  author = {Azzolini, Damiano and Riguzzi, Fabrizio},
  year = {2021},
  volume = {21},
  number = {5},
  pages = {543--556},
  url = {https://arxiv.org/pdf/2108.03095},
  pdf = {https://arxiv.org/pdf/2108.03095.pdf}
}
@article{AzzRigLam21-AIJ-IJ,
  title = {A Semantics for Hybrid Probabilistic Logic Programs with Function Symbols},
  author = {Azzolini, Damiano and Riguzzi, Fabrizio and Lamma, Evelina},
  journal = {Artificial Intelligence},
  year = {2021},
  copyright = {Elsevier},
  issn = {0004-3702},
  url = {http://ml.unife.it/wp-content/uploads/Papers/AzzRigLam21-AIJ-IJ.pdf},
  doi = {10.1016/j.artint.2021.103452},
  note = {The final publication is available at Elsevier via \url{https://doi.org/10.1016/j.artint.2021.103452} },
  volume = {294},
  pages = {103452}
}
@article{LosVen21-JEGTP-IJ,
  author = {Losi, Enzo and Venturini, Mauro and Manservigi, Lucrezia and Ceschini, Giuseppe Fabio and Bechini, Giovanni and Cota, Giuseppe and Riguzzi, Fabrizio},
  title = {Structured Methodology for Clustering Gas Turbine Transients by means of Multi-variate Time Series},
  year = {2021},
  publisher = {ASME},
  journal = {Journal of Engineering for Gas Turbines and Power},
  volume = {143},
  number = {3},
  pages = {031014-1 (13 pages)},
  doi = {10.1115/1.4049503}
}
@article{RigBelZesAlbLam21-ML-IJ,
  author = {Riguzzi, Fabrizio and Bellodi, Elena and Zese, Riccardo and Alberti, Marco and Lamma, Evelina},
  title = {Probabilistic inductive constraint logic},
  journal = {Machine Learning},
  year = {2021},
  volume = {110},
  issue = {4},
  pages = {723-754},
  doi = {10.1007/s10994-020-05911-6},
  pdf = {https://link.springer.com/content/pdf/10.1007/s10994-020-05911-6.pdf},
  publisher = {Springer},
  issn = {08856125},
  abstract = {Probabilistic logical models deal effectively with uncertain relations and entities typical of many real world domains. In the field of probabilistic logic programming usually the aim is to learn these kinds of models to predict specific atoms or predicates of the domain, called target atoms/predicates. However, it might also be useful to learn classifiers for interpretations as a whole: to this end, we consider the models produced by the inductive constraint logic system, represented by sets of integrity constraints, and we propose a probabilistic version of them. Each integrity constraint is annotated with a probability, and the resulting probabilistic logical constraint model assigns a probability of being positive to interpretations. To learn both the structure and the parameters of such probabilistic models we propose the system PASCAL for “probabilistic inductive constraint logic”. Parameter learning can be performed using gradient descent or L-BFGS. PASCAL has been tested on 11 datasets and compared with a few statistical relational systems and a system that builds relational decision trees (TILDE): we demonstrate that this system achieves better or comparable results in terms of area under the precision–recall and receiver operating characteristic curves, in a comparable execution time.}
}
@article{BelAlbRig21-TPLP-IJ,
  author = {Elena Bellodi and
               Marco Gavanelli and
               Riccardo Zese and
               Evelina Lamma and
               Fabrizio Riguzzi},
  title = {Nonground Abductive Logic Programming with Probabilistic Integrity Constraints},
  journal = {Theory and Practice of Logic Programming},
  publisher = {Cambridge University Press},
  copyright = {Cambridge University Press},
  year = {2021},
  url = {https://arxiv.org/abs/2108.03033},
  volume = {21},
  doi = {10.1017/S1471068421000417},
  pdf = {https://arxiv.org/pdf/2108.03033.pdf},
  number = {5},
  pages = {557--574}
}
@article{RocChiNal2022-APPSCI-IJ,
  author = {Rocchi, Alessandro and Chiozzi, Andrea and Nale, Marco and Nikolic, Zeljana and Riguzzi, Fabrizio and Mantovan, Luana and Gilli, Alessandro and Benvenuti, Elena},
  title = {A Machine Learning Framework for Multi-Hazard Risk Assessment at the Regional Scale in Earthquake and Flood-Prone Areas},
  journal = {Applied Sciences},
  volume = {12},
  year = {2022},
  number = {2},
  article-number = {583},
  url = {https://www.mdpi.com/2076-3417/12/2/583},
  issn = {2076-3417},
  abstract = {Communities are confronted with the rapidly growing impact of disasters, due to many factors that cause an increase in the vulnerability of society combined with an increase in hazardous events such as earthquakes and floods. The possible impacts of such events are large, also in developed countries, and governments and stakeholders must adopt risk reduction strategies at different levels of management stages of the communities. This study is aimed at proposing a sound qualitative multi-hazard risk analysis methodology for the assessment of combined seismic and hydraulic risk at the regional scale, which can assist governments and stakeholders in decision making and prioritization of interventions. The method is based on the use of machine learning techniques to aggregate large datasets made of many variables different in nature each of which carries information related to specific risk components and clusterize observations. The framework is applied to the case study of the Emilia Romagna region, for which the different municipalities are grouped into four homogeneous clusters ranked in terms of relative levels of combined risk. The proposed approach proves to be robust and delivers a very useful tool for hazard management and disaster mitigation, particularly for multi-hazard modeling at the regional scale.},
  doi = {10.3390/app12020583}
}
@article{AzzBellFer2022-IJAR-IJ,
  title = {Abduction with probabilistic logic programming under the distribution semantics},
  journal = {International Journal of Approximate Reasoning},
  volume = {142},
  pages = {41-63},
  year = {2022},
  issn = {0888-613X},
  doi = {10.1016/j.ijar.2021.11.003},
  url = {https://www.sciencedirect.com/science/article/pii/S0888613X2100181X},
  author = {Damiano Azzolini and Elena Bellodi and Stefano Ferilli and Fabrizio Riguzzi and Riccardo Zese},
  keywords = {Abduction, Distribution semantics, Probabilistic logic programming, Statistical relational artificial intelligence},
  abstract = {In Probabilistic Abductive Logic Programming we are given a probabilistic logic program, a set of abducible facts, and a set of constraints. Inference in probabilistic abductive logic programs aims to find a subset of the abducible facts that is compatible with the constraints and that maximizes the joint probability of the query and the constraints. In this paper, we extend the PITA reasoner with an algorithm to perform abduction on probabilistic abductive logic programs exploiting Binary Decision Diagrams. Tests on several synthetic datasets show the effectiveness of our approach.},
  scopus = {2-s2.0-85119493622}
}
@article{FraLamRig2022-SwX-IJ,
  title = {Symbolic {DNN-Tuner}: A {Python} and {ProbLog}-based system for optimizing Deep Neural Networks hyperparameters},
  journal = {SoftwareX},
  volume = {17},
  pages = {100957},
  year = {2022},
  issn = {2352-7110},
  doi = {10.1016/j.softx.2021.100957},
  url = {https://www.sciencedirect.com/science/article/pii/S2352711021001825},
  author = {Michele Fraccaroli and Evelina Lamma and Fabrizio Riguzzi},
  keywords = {Deep learning, Probabilistic Logic Programming, Hyper-parameters tuning, Neural-symbolic integration},
  abstract = {The application of deep learning models to increasingly complex contexts has led to a rise in the complexity of the models themselves. Due to this, there is an increase in the number of hyper-parameters (HPs) to be set and Hyper-Parameter Optimization (HPO) algorithms occupy a fundamental role in deep learning. Bayesian Optimization (BO) is the state-of-the-art of HPO for deep learning models. BO keeps track of past results and uses them to build a probabilistic model, building a probability density of HPs. This work aims to improve BO applied to Deep Neural Networks (DNNs) by an analysis of the results of the network on training and validation sets. This analysis is obtained by applying symbolic tuning rules, implemented in Probabilistic Logic Programming (PLP). The resulting system, called Symbolic DNN-Tuner, logically evaluates the results obtained from the training and the validation phase and, by applying symbolic tuning rules, fixes the network architecture, and its HPs, leading to improved performance. In this paper, we present the general system and its implementation. We also show its graphical interface and a simple example of execution.}
}
@article{LosVen22-JEGTP-IJ,
  author = {Losi, Enzo and Venturini, Mauro and Manservigi, Lucrezia and Ceschini, Giuseppe Fabio and Bechini, Giovanni and Cota, Giuseppe and Riguzzi, Fabrizio},
  title = {Prediction of Gas Turbine Trip: A Novel Methodology Based on Random Forest Models},
  journal = {Journal of Engineering for Gas Turbines and Power},
  volume = {144},
  number = {3},
  year = {2022},
  issn = {0742-4795},
  doi = {10.1115/1.4053194},
  publisher = asme_p,
  note = {{GTP-21-1324}}
}
@article{AzzRig2022-CRYPT-IJ,
  author = {Azzolini, Damiano and Riguzzi, Fabrizio},
  title = {Probabilistic Logic Models for the Lightning Network},
  journal = {Cryptography},
  volume = {6},
  year = {2022},
  number = {2},
  article-number = {29},
  url = {https://www.mdpi.com/2410-387X/6/2/29},
  pdf = {https://www.mdpi.com/2410-387X/6/2/29/pdf?version=1655360685},
  issn = {2410-387X},
  doi = {10.3390/cryptography6020029}
}
@article{AzzRig23-IJAR-IJ,
  title = {Lifted Inference for Statistical Statements in Probabilistic Answer Set Programming},
  author = {Damiano Azzolini and Fabrizio Riguzzi},
  journal = {International Journal of Approximate Reasoning},
  year = {2023},
  doi = {10.1016/j.ijar.2023.109040},
  pages = {109040},
  volume = {163},
  issn = {0888-613X},
  url = {https://www.sciencedirect.com/science/article/pii/S0888613X23001718},
  keywords = {Statistical statements, Probabilistic answer set programming, Lifted inference},
  abstract = {In 1990, Halpern proposed the distinction between Type 1 and Type 2 statements: the former express statistical information about a domain of interest while the latter define a degree of belief. An example of Type 1 statement is “30% of the elements of a domain share the same property” while an example of Type 2 statement is “the element x has the property y with probability p”. Recently, Type 1 statements were given an interpretation in terms of probabilistic answer set programs under the credal semantics in the PASTA framework. The algorithm proposed for inference requires the enumeration of all the answer sets of a given program, and so it is impractical for domains of not trivial size. The field of lifted inference aims to identify programs where inference can be computed without grounding the program. In this paper, we identify some classes of PASTA programs for which we apply lifted inference and develop compact formulas to compute the probability bounds of a query without the need to generate all the possible answer sets.},
  scopus = {2-s2.0-85174067981}
}
@article{SchVDBRig23-TPLP-IJ,
  title = {Automatic Differentiation in Prolog},
  doi = {10.1017/S1471068423000145},
  journal = {Theory and Practice of Logic Programming},
  publisher = {Cambridge University Press},
  author = {Schrijvers, Tom and Van Den Berg, Birthe and Riguzzi, Fabrizio},
  year = {2023},
  pages = {900–917},
  volume = {23},
  number = {4},
  pdf = {https://arxiv.org/pdf/2305.07878.pdf}
}
@article{GreSalFab23-Biomed-IJ,
  author = {Greco, Salvatore and Salatiello, Alessandro and Fabbri, Nicolò and Riguzzi, Fabrizio and Locorotondo, Emanuele and Spaggiari, Riccardo and De Giorgi, Alfredo and Passaro, Angelina},
  title = {Rapid Assessment of {COVID-19} Mortality Risk with {GASS} Classifiers},
  journal = {Biomedicines},
  volume = {11},
  year = {2023},
  number = {3},
  article-number = {831},
  url = {https://www.mdpi.com/2227-9059/11/3/831},
  issn = {2227-9059},
  abstract = {Risk prediction models are fundamental to effectively triage incoming COVID-19 patients. However, current triaging methods often have poor predictive performance, are based on variables that are expensive to measure, and often lead to hard-to-interpret decisions. We introduce two new classification methods that can predict COVID-19 mortality risk from the automatic analysis of routine clinical variables with high accuracy and interpretability. SVM22-GASS and Clinical-GASS classifiers leverage machine learning methods and clinical expertise, respectively. Both were developed using a derivation cohort of 499 patients from the first wave of the pandemic and were validated with an independent validation cohort of 250 patients from the second pandemic phase. The Clinical-GASS classifier is a threshold-based classifier that leverages the General Assessment of SARS-CoV-2 Severity (GASS) score, a COVID-19-specific clinical score that recently showed its effectiveness in predicting the COVID-19 mortality risk. The SVM22-GASS model is a binary classifier that non-linearly processes clinical data using a Support Vector Machine (SVM). In this study, we show that SMV22-GASS was able to predict the mortality risk of the validation cohort with an AUC of 0.87 and an accuracy of 0.88, better than most scores previously developed. Similarly, the Clinical-GASS classifier predicted the mortality risk of the validation cohort with an AUC of 0.77 and an accuracy of 0.78, on par with other established and emerging machine-learning-based methods. Our results demonstrate the feasibility of accurate COVID-19 mortality risk prediction using only routine clinical variables, readily collected in the early stages of hospital admission.},
  doi = {10.3390/biomedicines11030831}
}
@article{AzzBelKieRig24-TPLP-IJ,
  title = {Solving Decision Theory Problems with Probabilistic Answer Set Programming},
  author = {Damiano Azzolini and Elena Bellodi and Rafael Kiesel and Fabrizio Riguzzi},
  year = {2024},
  journal = {Theory and Practice of Logic Programming},
  publisher = {Cambridge University Press},
  pdf = {https://arxiv.org/pdf/2408.11371},
  url = {https://arxiv.org/abs/2408.11371}
}
@article{Rig24-QMI-IJ,
  title = {Quantum Algorithms for Weighted Constrained Sampling and Weighted Model Counting},
  author = {Fabrizio Riguzzi},
  year = {2024},
  journal = {Quantum Machine Intelligence},
  volume = {6},
  number = {2},
  pages = {73},
  doi = {10.1007/s42484-024-00209-5}
}
@article{AzzRig24-TPLP-IJ,
  title = {Probabilistic Answer Set Programming with Discrete and Continuous Random Variables},
  doi = {10.1017/S1471068424000437},
  journal = {Theory and Practice of Logic Programming},
  publisher = {Cambridge University Press},
  author = {Azzolini, Damiano and Riguzzi, Fabrizio},
  year = {2024},
  pages = {1--32},
  url = {https://www.cambridge.org/core/journals/theory-and-practice-of-logic-programming/article/probabilistic-answer-set-programming-with-discrete-and-continuous-random-variables/2BE5A5EAEFFA47D29CA57C2969FF1B9E}
}
@article{AzzRig24-ICLP-IJ,
  title = {Fast Inference for Probabilistic Answer Set Programs via the Residual Program},
  author = {Damiano Azzolini and Fabrizio Riguzzi},
  year = {2024},
  url = {https://arxiv.org/abs/2408.07524},
  pdf = {https://arxiv.org/pdf/2408.07524},
  journal = {Theory and Practice of Logic Programming},
  publisher = {Cambridge University Press}
}
@article{AzzGenRig24-ICLP-IJ,
  title = {Symbolic Parameter Learning in Probabilistic Answer Set Programming},
  author = {Damiano Azzolini and Elisabetta Gentili and Fabrizio Riguzzi},
  year = {2024},
  pdf = {https://arxiv.org/abs/2408.08732},
  url = {https://arxiv.org/pdf/2408.08732},
  journal = {Theory and Practice of Logic Programming},
  publisher = {Cambridge University Press}
}
@article{BizFraLam24-FAI-IJ,
  author = {Bizzarri, Alice and Fraccaroli, Michele and Lamma, Evelina and Riguzzi, Fabrizio},
  title = {Integration between constrained optimization and deep networks: a survey},
  journal = {Frontiers in Artificial Intelligence},
  volume = {7},
  year = {2024},
  url = {https://www.frontiersin.org/articles/10.3389/frai.2024.1414707},
  doi = {10.3389/frai.2024.1414707},
  issn = {2624-8212},
  abstract = {Integration between constrained optimization and deep networks has garnered significant interest from both research and industrial laboratories. Optimization techniques can be employed to optimize the choice of network structure based not only on loss and accuracy but also on physical constraints. Additionally, constraints can be imposed during training to enhance the performance of networks in specific contexts. This study surveys the literature on the integration of constrained optimization with deep networks. Specifically, we examine the integration of hyper-parameter tuning with physical constraints, such as the number of FLOPS (FLoating point Operations Per Second), a measure of computational capacity, latency, and other factors. This study also considers the use of context-specific knowledge constraints to improve network performance. We discuss the integration of constraints in neural architecture search (NAS), considering the problem as both a multi-objective optimization (MOO) challenge and through the imposition of penalties in the loss function. Furthermore, we explore various approaches that integrate logic with deep neural networks (DNNs). In particular, we examine logic-neural integration through constrained optimization applied during the training of NNs and the use of semantic loss, which employs the probabilistic output of the networks to enforce constraints on the output.}
}
@article{Rig24-JCS-IJ,
  article_type = {journal},
  title = {Machine Learning Approaches for the Prediction of Gas Turbine Transients},
  author = {Fadja, Arnaud Nguembang and Cota, Giuseppe and Bertasi, Francesco and Riguzzi, Fabrizio and Losi, Enzo and Manservigi, Lucrezia and Venturini, Mauro and Bechini, Giovanni},
  volume = {20},
  number = {5},
  year = {2024},
  month = {Feb},
  pages = {495-510},
  doi = {10.3844/jcssp.2024.495.510},
  url = {https://thescipub.com/abstract/jcssp.2024.495.510},
  abstract = {Gas Turbine (GT) emergency shutdowns can lead to energy production interruption and may also reduce the lifespan of a turbine. In order to remain competitive in the market, it is necessary to improve the reliability and availability of GTs by developing predictive maintenance systems that are able to predict future conditions of GTs within a certain time. Predicting such situations not only helps to take corrective measures to avoid service unavailability but also eases the process of maintenance and considerably reduces maintenance costs. Huge amounts of sensor data are collected from (GTs) making monitoring impossible for human operators even with the help of computers. Machine learning techniques could provide support for handling large amounts of sensor data and building decision models for predicting GT future conditions. The paper presents an application of machine learning based on decision trees and k-nearest neighbors for predicting the rotational speed of gas turbines. The aim is to distinguish steady states (e.g., GT operation at normal conditions) from transients (e.g., GT trip or shutdown). The different steps of a machine learning pipeline, starting from data extraction to model testing are implemented and analyzed. Experiments are performed by applying decision trees, extremely randomized trees, and k-nearest neighbors to sensor data collected from GTs located in different countries. The trained models were able to predict steady state and transient with more than 93% accuracy. This research advances predictive maintenance methods and suggests exploring advanced machine learning algorithms, real-time data integration, and explainable AI techniques to enhance gas turbine behavior understanding and develop more adaptable maintenance systems for industrial applications.},
  journal = {Journal of Computer Science},
  publisher = {Science Publications}
}

This file was generated by bibtex2html 1.98.