@COMMENT This bibtex publication entry came from Jeff Bilmes's publication pages at @COMMENT http://melodi.ee.washington.edu/~bilmes @COMMENT The complete bibfile can be found at: @COMMENT http://melodi.ee.washington.edu/~bilmes/bib/bilmes.bib @COMMENT - @InProceedings{narang2024uai-online-sub-sup, Author = {Adhyyan Narang and Omid Sadeghi and Lillian J. Ratliff and Maryam Fazel and Jeff Bilmes}, Title = {Efficient Interactive Maximization of BP and Weakly Submodular Objectives}, booktitle = {Uncertainty in Artificial Intelligence (UAI)}, year = {2024}, address = {Barcelona, Spain}, month = {July}, publisher = {AUAI}, abstract = {In the context of online interactive machine learning with combinatorial objectives, we extend purely submodular prior work to more general non-submodular objectives. This includes: (1) those that are additively decomposable into a sum of two terms (a monotone submodular and monotone supermodular term, known as a BP decomposition); and (2) those that are only weakly submodular. In both cases, this allows representing not only competitive (submodular) but also complementary (supermodular) relationships between objects, enhancing this setting to a broader range of applications (e.g., movie recommendations, medical treatments, etc.) where this is beneficial. In the two-term case, moreover, we study not only the more typical monolithic feedback approach but also a novel framework where feedback is available separately for each term. With real-world practicality and scalability in mind, we integrate \Nystrom{} sketching techniques to significantly improve the computational complexity, including for the purely submodular case. In the Gaussian process contextual bandits setting, we show sub-linear theoretical regret bounds in all cases. We also empirically show good applicability to recommendation systems and data subset selection.}, }