@article{AzzRig24-ICLP-IJ, title = {Fast Inference for Probabilistic Answer Set Programs via the Residual Program}, author = {Damiano Azzolini and Fabrizio Riguzzi}, year = {2024}, url = {https://arxiv.org/pdf/2408.07524}, journal = {Theory and Practice of Logic Programming}, publisher = {Cambridge University Press} }

@article{AzzGenRig24-ICLP-IJ, title = {Symbolic Parameter Learning in Probabilistic Answer Set Programming}, author = {Damiano Azzolini and Elisabetta Gentili and Fabrizio Riguzzi}, year = {2024}, url = {https://arxiv.org/pdf/2408.08732}, journal = {Theory and Practice of Logic Programming}, publisher = {Cambridge University Press} }

@inproceedings{AzzBonGenRig24-CILC-NC, title = {Logic Programming for Knowledge Graph Completion}, author = {Damiano Azzolini and Matteo Bonato and Elisabetta Gentili and Fabrizio Riguzzi}, year = {2024}, editor = {Emanuele {De Angelis} and Maurizio Proietti}, booktitle = {Proceedings of the 39th Italian Conference on Computational Logic (CILC2024)}, series = {CEUR Workshop Proceedings}, publisher = {Sun {SITE} Central Europe}, address = {Aachen, Germany}, issn = {1613-0073}, venue = {Rome, Italy}, volume = {3733}, pages = {1--14}, pdf = {https://ceur-ws.org/Vol-3733/paper4.pdf} }

@article{Rig24-arXiv-TR, title = {Quantum Algorithms for Weighted Constrained Sampling and Weighted Model Counting}, author = {Fabrizio Riguzzi}, year = {2024}, journal = {arXiv}, volume = {abs/2407.12816}, primaryclass = {quant-ph}, url = {https://arxiv.org/abs/2407.12816}, pdf = {https://arxiv.org/pdf/2407.12816} }

@article{BizFraLam24-FAI-IJ, author = {Bizzarri, Alice and Fraccaroli, Michele and Lamma, Evelina and Riguzzi, Fabrizio}, title = {Integration between constrained optimization and deep networks: a survey}, journal = {Frontiers in Artificial Intelligence}, volume = {7}, year = {2024}, url = {https://www.frontiersin.org/articles/10.3389/frai.2024.1414707}, doi = {10.3389/frai.2024.1414707}, issn = {2624-8212}, abstract = {Integration between constrained optimization and deep networks has garnered significant interest from both research and industrial laboratories. Optimization techniques can be employed to optimize the choice of network structure based not only on loss and accuracy but also on physical constraints. Additionally, constraints can be imposed during training to enhance the performance of networks in specific contexts. This study surveys the literature on the integration of constrained optimization with deep networks. Specifically, we examine the integration of hyper-parameter tuning with physical constraints, such as the number of FLOPS (FLoating point Operations Per Second), a measure of computational capacity, latency, and other factors. This study also considers the use of context-specific knowledge constraints to improve network performance. We discuss the integration of constraints in neural architecture search (NAS), considering the problem as both a multi-objective optimization (MOO) challenge and through the imposition of penalties in the loss function. Furthermore, we explore various approaches that integrate logic with deep neural networks (DNNs). In particular, we examine logic-neural integration through constrained optimization applied during the training of NNs and the use of semantic loss, which employs the probabilistic output of the networks to enforce constraints on the output.} }

@inproceedings{AzzRig24-UAI-IC, title = {Inference in Probabilistic Answer Set Programs with Imprecise Probabilities via Optimization}, author = {Damiano Azzolini and Fabrizio Riguzzi}, booktitle = {The 40th Conference on Uncertainty in Artificial Intelligence}, year = {2024}, url = {https://openreview.net/forum?id=h5VFqO681Y} }

@article{Rig23-JCS-IJ, article_type = {journal}, title = {Machine Learning Approaches for the Prediction of Gas Turbine Transients}, author = {Fadja, Arnaud Nguembang and Cota, Giuseppe and Bertasi, Francesco and Riguzzi, Fabrizio and Losi, Enzo and Manservigi, Lucrezia and Venturini, Mauro and Bechini, Giovanni}, volume = {20}, number = {5}, year = {2024}, month = {Feb}, pages = {495-510}, doi = {10.3844/jcssp.2024.495.510}, url = {https://thescipub.com/abstract/jcssp.2024.495.510}, abstract = {Gas Turbine (GT) emergency shutdowns can lead to energy production interruption and may also reduce the lifespan of a turbine. In order to remain competitive in the market, it is necessary to improve the reliability and availability of GTs by developing predictive maintenance systems that are able to predict future conditions of GTs within a certain time. Predicting such situations not only helps to take corrective measures to avoid service unavailability but also eases the process of maintenance and considerably reduces maintenance costs. Huge amounts of sensor data are collected from (GTs) making monitoring impossible for human operators even with the help of computers. Machine learning techniques could provide support for handling large amounts of sensor data and building decision models for predicting GT future conditions. The paper presents an application of machine learning based on decision trees and k-nearest neighbors for predicting the rotational speed of gas turbines. The aim is to distinguish steady states (e.g., GT operation at normal conditions) from transients (e.g., GT trip or shutdown). The different steps of a machine learning pipeline, starting from data extraction to model testing are implemented and analyzed. Experiments are performed by applying decision trees, extremely randomized trees, and k-nearest neighbors to sensor data collected from GTs located in different countries. The trained models were able to predict steady state and transient with more than 93% accuracy. This research advances predictive maintenance methods and suggests exploring advanced machine learning algorithms, real-time data integration, and explainable AI techniques to enhance gas turbine behavior understanding and develop more adaptable maintenance systems for industrial applications.}, journal = {Journal of Computer Science}, publisher = {Science Publications} }

@inproceedings{AzzBelRig24-ILP-IC, author = {Azzolini, Damiano and Bellodi, Elena and Riguzzi, Fabrizio}, editor = {Muggleton, Stephen H. and Tamaddoni-Nezhad, Alireza}, title = {Learning the Parameters of Probabilistic Answer Set Programs}, booktitle = {Inductive Logic Programming - ILP 2022}, year = {2024}, publisher = {Springer Nature Switzerland}, address = {Cham}, pages = {1--14}, isbn = {978-3-031-55630-2}, series = {Lecture Notes in Computer Science}, volume = {14363}, doi = {10.1007/978-3-031-55630-2_1}, url = {https://link.springer.com/chapter/10.1007/978-3-031-55630-2_1} }

@inproceedings{GenBizAzz23-ILP-IC, author = {Gentili, Elisabetta and Bizzarri, Alice and Azzolini, Damiano and Zese, Riccardo and Riguzzi, Fabrizio}, editor = {Bellodi, Elena and Lisi, Francesca Alessandra and Zese, Riccardo}, title = {Regularization in Probabilistic Inductive Logic Programming}, booktitle = {Inductive Logic Programming - ILP 2023}, year = {2023}, publisher = {Springer Nature Switzerland}, address = {Cham}, pages = {16--29}, isbn = {978-3-031-49299-0}, doi = {10.1007/978-3-031-49299-0_2}, series = {Lecture Notes in Computer Science}, volume = {14363}, url = {https://link.springer.com/chapter/10.1007/978-3-031-49299-0_2} }

@inproceedings{AzzBelRig2023-summary-statements-IC, author = {Azzolini, Damiano and Bellodi, Elena and Riguzzi, Fabrizio}, title = {Summary of Statistical Statements in Probabilistic Logic Programming}, year = {2023}, journal = {Electronic Proceedings in Theoretical Computer Science, EPTCS}, volume = {385}, pages = {384 -- 385}, doi = {10.4204/EPTCS.385.41}, url = {https://cgi.cse.unsw.edu.au/~eptcs/content.cgi?ICLP2023#EPTCS385.41} }

@inproceedings{AzzBelRig2023-towardsdt-IC, author = {Azzolini, Damiano and Bellodi, Elena and Riguzzi, Fabrizio}, title = {Towards a Representation of Decision Theory Problems with Probabilistic Answer Set Programs}, year = {2023}, journal = {Electronic Proceedings in Theoretical Computer Science, EPTCS}, volume = {385}, pages = {190 -- 191}, doi = {10.4204/EPTCS.385.19}, url = {https://cgi.cse.unsw.edu.au/~eptcs/content.cgi?ICLP2023#EPTCS385.19} }

@inproceedings{AzzRig2023-AIXIA-IC, title = {Inference in Probabilistic Answer Set Programming under the Credal Semantics}, author = {Damiano Azzolini and Fabrizio Riguzzi}, booktitle = {AIxIA 2023 - Advances in Artificial Intelligence}, year = {2023}, editor = {Roberto Basili and Domenico Lembo and Carla Limongelli and Andrea Orlandini}, publisher = {Springer}, volume = {14318}, address = {Heidelberg, Germany}, series = {Lecture Notes in Artificial Intelligence}, venue = {Roma, Italy}, eventdate = {November 6--9, 2023}, doi = {10.1007/978-3-031-47546-7_25}, url = {https://link.springer.com/chapter/10.1007/978-3-031-47546-7_25}, pages = {367-380} }

@article{AzzRig23-IJAR-IJ, title = {Lifted Inference for Statistical Statements in Probabilistic Answer Set Programming}, author = {Damiano Azzolini and Fabrizio Riguzzi}, journal = {International Journal of Approximate Reasoning}, year = {2023}, doi = {10.1016/j.ijar.2023.109040}, pages = {109040}, volume = {163}, issn = {0888-613X}, url = {https://www.sciencedirect.com/science/article/pii/S0888613X23001718}, keywords = {Statistical statements, Probabilistic answer set programming, Lifted inference}, abstract = {In 1990, Halpern proposed the distinction between Type 1 and Type 2 statements: the former express statistical information about a domain of interest while the latter define a degree of belief. An example of Type 1 statement is “30% of the elements of a domain share the same property” while an example of Type 2 statement is “the element x has the property y with probability p”. Recently, Type 1 statements were given an interpretation in terms of probabilistic answer set programs under the credal semantics in the PASTA framework. The algorithm proposed for inference requires the enumeration of all the answer sets of a given program, and so it is impractical for domains of not trivial size. The field of lifted inference aims to identify programs where inference can be computed without grounding the program. In this paper, we identify some classes of PASTA programs for which we apply lifted inference and develop compact formulas to compute the probability bounds of a query without the need to generate all the possible answer sets.}, scopus = {2-s2.0-85174067981} }

@article{SchVDBRig23-TPLP-IJ, title = {Automatic Differentiation in Prolog}, doi = {10.1017/S1471068423000145}, journal = {Theory and Practice of Logic Programming}, publisher = {Cambridge University Press}, author = {Schrijvers, Tom and Van Den Berg, Birthe and Riguzzi, Fabrizio}, year = {2023}, pages = {900–917}, volume = {23}, number = {4}, pdf = {https://arxiv.org/pdf/2305.07878.pdf} }

@inproceedings{AzzGenRigPLP2023-IW, author = {Azzolini, Damiano and Gentili, Elisabetta and Riguzzi, Fabrizio}, title = {Link Prediction in Knowledge Graphs with Probabilistic Logic Programming: Work in Progress}, series = {{CEUR} Workshop Proceedings}, booktitle = {Proceedings of the International Conference on Logic Programming 2023 Workshops co-located with the 39th International Conference on Logic Programming ({ICLP} 2023)}, editor = {Arias, Joaquín and Batsakis, Sotiris and Faber, Wolfgang and Gupta, Gopal and Pacenza, Francesco and Papadakis, Emmanuel and Robaldo, Livio and Ruckschloss, Kilian and Salazar, Elmer and Saribatur, Zeynep G. and Tachmazidis, Ilias and Weitkamper, Felix and Wyner, Adam}, volume = {3437}, pages = {1--4}, publisher = {CEUR-WS.org}, year = {2023}, url = {https://ceur-ws.org/Vol-3437/short5PLP.pdf} }

@inproceedings{AzzBelRigMAP-AIXIA-IC, author = {Azzolini, Damiano and Bellodi, Elena and Riguzzi, Fabrizio}, editor = {Dovier, Agostino and Montanari, Angelo and Orlandini, Andrea}, title = {{MAP} Inference in Probabilistic Answer Set Programs}, booktitle = {AIxIA 2022 -- Advances in Artificial Intelligence}, year = {2023}, publisher = {Springer International Publishing}, address = {Cham}, pages = {413--426}, abstract = {Reasoning with uncertain data is a central task in artificial intelligence. In some cases, the goal is to find the most likely assignment to a subset of random variables, named query variables, while some other variables are observed. This task is called Maximum a Posteriori (MAP). When the set of query variables is the complement of the observed variables, the task goes under the name of Most Probable Explanation (MPE). In this paper, we introduce the definitions of cautious and brave MAP and MPE tasks in the context of Probabilistic Answer Set Programming under the credal semantics and provide an algorithm to solve them. Empirical results show that the brave version of both tasks is usually faster to compute. On the brave MPE task, the adoption of a state-of-the-art ASP solver makes the computation much faster than a naive approach based on the enumeration of all the worlds.}, isbn = {978-3-031-27181-6}, url = {https://link.springer.com/chapter/10.1007/978-3-031-27181-6_29}, doi = {10.1007/978-3-031-27181-6_29} }

@inproceedings{AzzBelRigApprox-AIXIA-IC, author = {Azzolini, Damiano and Bellodi, Elena and Riguzzi, Fabrizio}, editor = {Dovier, Agostino and Montanari, Angelo and Orlandini, Andrea}, title = {Approximate Inference in Probabilistic Answer Set Programming for Statistical Probabilities}, booktitle = {AIxIA 2022 -- Advances in Artificial Intelligence}, year = {2023}, publisher = {Springer International Publishing}, address = {Cham}, pages = {33--46}, abstract = {``Type 1'' statements were introduced by Halpern in 1990 with the goal to represent statistical information about a domain of interest. These are of the form ``x{\%} of the elements share the same property''. The recently proposed language PASTA (Probabilistic Answer set programming for STAtistical probabilities) extends Probabilistic Logic Programs under the Distribution Semantics and allows the definition of this type of statements. To perform exact inference, PASTA programs are converted into probabilistic answer set programs under the Credal Semantics. However, this algorithm is infeasible for scenarios when more than a few random variables are involved. Here, we propose several algorithms to perform both conditional and unconditional approximate inference in PASTA programs and test them on different benchmarks. The results show that approximate algorithms scale to hundreds of variables and thus can manage real world domains.}, isbn = {978-3-031-27181-6}, url = {https://link.springer.com/chapter/10.1007/978-3-031-27181-6_3}, doi = {10.1007/978-3-031-27181-6_3} }

@article{GreSalFab23-Biomed-IJ, author = {Greco, Salvatore and Salatiello, Alessandro and Fabbri, Nicolò and Riguzzi, Fabrizio and Locorotondo, Emanuele and Spaggiari, Riccardo and De Giorgi, Alfredo and Passaro, Angelina}, title = {Rapid Assessment of {COVID-19} Mortality Risk with {GASS} Classifiers}, journal = {Biomedicines}, volume = {11}, year = {2023}, number = {3}, article-number = {831}, url = {https://www.mdpi.com/2227-9059/11/3/831}, issn = {2227-9059}, abstract = {Risk prediction models are fundamental to effectively triage incoming COVID-19 patients. However, current triaging methods often have poor predictive performance, are based on variables that are expensive to measure, and often lead to hard-to-interpret decisions. We introduce two new classification methods that can predict COVID-19 mortality risk from the automatic analysis of routine clinical variables with high accuracy and interpretability. SVM22-GASS and Clinical-GASS classifiers leverage machine learning methods and clinical expertise, respectively. Both were developed using a derivation cohort of 499 patients from the first wave of the pandemic and were validated with an independent validation cohort of 250 patients from the second pandemic phase. The Clinical-GASS classifier is a threshold-based classifier that leverages the General Assessment of SARS-CoV-2 Severity (GASS) score, a COVID-19-specific clinical score that recently showed its effectiveness in predicting the COVID-19 mortality risk. The SVM22-GASS model is a binary classifier that non-linearly processes clinical data using a Support Vector Machine (SVM). In this study, we show that SMV22-GASS was able to predict the mortality risk of the validation cohort with an AUC of 0.87 and an accuracy of 0.88, better than most scores previously developed. Similarly, the Clinical-GASS classifier predicted the mortality risk of the validation cohort with an AUC of 0.77 and an accuracy of 0.78, on par with other established and emerging machine-learning-based methods. Our results demonstrate the feasibility of accurate COVID-19 mortality risk prediction using only routine clinical variables, readily collected in the early stages of hospital admission.}, doi = {10.3390/biomedicines11030831} }

*This file was generated by
bibtex2html 1.98.*