People

Kavya Gupta
Saarland Informatics Campus
Building E1.1, Room 2.26
About me
I am a Postdoctoral researcher with Prof. Isabel Valera at Saarland University since March 2024. I recieved my Ph.D. from University Paris-Saclay in 2023 under the supervision of Prof. Jean-Christophe Pesquet. I am an alumna of IIITD, India. I am currently working on Society-Aware Machine Learning.
My research focuses on various aspects of understanding vulnerabilities in neural networks. I study the robustness of neural networks: understanding their behavior and providing formal guarantees on the robustness using Lipschitz bounds for making AI systems work in their safe set throughout their life cycle. I am focused on making AI systems secure and trustworthy with scalable and efficient methods. My research interests include robust machine learning, optimization, computer vision.
I am interested in research which leads to innovative as well as practical and real outcomes. Feel free to reach out on twitter or via email.
Publications
2026
Vo, Huyen Thuc Khanh; Valera, Isabel
Hellinger Multimodal Variational Autoencoders Proceedings Article Spotlight
In: The 29th International Conference on Artificial Intelligence and Statistics, 2026.
Abstract | Links | BibTeX | Tags: huyen, isabel, spotlight
@inproceedings{<LineBreak>vo2026hellinger,
title = {Hellinger Multimodal Variational Autoencoders},
author = {Huyen Thuc Khanh Vo and Isabel Valera},
url = {https://openreview.net/forum?id=mxHyYltMUa},
year = {2026},
date = {2026-01-01},
urldate = {2026-01-01},
booktitle = {The 29th International Conference on Artificial Intelligence and Statistics},
abstract = {Multimodal variational autoencoders (VAEs) are widely used for weakly supervised generative learning with multiple modalities. Predominant methods aggregate unimodal inference distributions using either a product of experts (PoE), a mixture of experts (MoE), or their combinations to approximate the joint posterior. In this work, we revisit multimodal inference through the lens of probabilistic opinion pooling, an optimization-based approach. We start from Hölder pooling with α=0.5, which corresponds to the unique symmetric member of the α-divergence family, and derive a moment-matching approximation, termed Hellinger. We then leverage such an approximation to propose HELVAE, a multimodal VAE that avoids sub-sampling, yielding an efficient yet effective model that: (i) learns more expressive latent representations as additional modalities are observed; and (ii) empirically achieves better trade-offs between generative coherence and quality, outperforming state-of-the-art multimodal VAE models.},
keywords = {huyen, isabel, spotlight},
pubstate = {published},
tppubtype = {inproceedings}
}
2025
Almodóvar, Alejandro; Javaloy, Adrián; Parras, Juan; Zazo, Santiago; Valera, Isabel
DeCaFlow: A Deconfounding Causal Generative Model Journal Article Spotlight
In: CoRR, vol. abs/2503.15114, 2025.
Abstract | Links | BibTeX | Tags: adrian, isabel, spotlight
@article{DBLP:journals/corr/abs-2503-15114,
title = {DeCaFlow: A Deconfounding Causal Generative Model},
author = {Alejandro Almodóvar and Adrián Javaloy and Juan Parras and Santiago Zazo and Isabel Valera},
url = {https://doi.org/10.48550/arXiv.2503.15114},
doi = {10.48550/ARXIV.2503.15114},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {CoRR},
volume = {abs/2503.15114},
abstract = {We introduce DeCaFlow, a deconfounding causal generative model. Training once per dataset using just observational data and the underlying causal graph, DeCaFlow enables accurate causal inference on continuous variables under the presence of hidden confounders. Specifically, we extend previous results on causal estimation under hidden confounding to show that a single instance of DeCaFlow provides correct estimates for all causal queries identifiable with do-calculus, leveraging proxy variables to adjust for the causal effects when do-calculus alone is insufficient. Moreover, we show that counterfactual queries are identifiable as long as their interventional counterparts are identifiable, and thus are also correctly estimated by DeCaFlow. Our empirical results on diverse settings (including the Ecoli70 dataset, with 3 independent hidden confounders, tens of observed variables and hundreds of causal queries) show that DeCaFlow outperforms existing approaches, while demonstrating its out-of-the-box applicability to any given causal graph},
keywords = {adrian, isabel, spotlight},
pubstate = {published},
tppubtype = {article}
}
2022
Javaloy, Adrián; Meghdadi, Maryam; Valera, Isabel
Mitigating Modality Collapse in Multimodal VAEs via Impartial Optimization Journal Article Spotlight
In: CoRR, vol. abs/2206.04496, 2022.
Abstract | Links | BibTeX | Tags: adrian, isabel, maryam, project-robustgenerative, spotlight, variational autoencoder
@article{DBLP:journals/corr/abs-2206-04496,
title = {Mitigating Modality Collapse in Multimodal VAEs via Impartial Optimization},
author = {Adrián Javaloy and Maryam Meghdadi and Isabel Valera},
url = {https://doi.org/10.48550/arXiv.2206.04496},
doi = {10.48550/arXiv.2206.04496},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2206.04496},
abstract = {A number of variational autoencoders (VAEs) have recently emerged with the aim of modeling multimodal data, e.g., to jointly model images and their corresponding captions. Still, multimodal VAEs tend to focus solely on a subset of the modalities, e.g., by fitting the image while neglecting the caption. We refer to this limitation as modality collapse. In this work, we argue that this effect is a consequence of conflicting gradients during multimodal VAE training. We show how to detect the sub-graphs in the computational graphs where gradients conflict (impartiality blocks), as well as how to leverage existing gradient-conflict solutions from multitask learning to mitigate modality collapse. That is, to ensure impartial optimization across modalities. We apply our training framework to several multimodal VAE models, losses and datasets from the literature, and empirically show that our framework significantly improves the reconstruction performance, conditional generation, and coherence of the latent space across modalities.},
keywords = {adrian, isabel, maryam, project-robustgenerative, spotlight, variational autoencoder},
pubstate = {published},
tppubtype = {article}
}
Javaloy, Adrián; Valera, Isabel
RotoGrad: Gradient Homogenization in Multitask Learning Proceedings Article Spotlight
In: The Tenth International Conference on Learning Representations, ICLR 2022, Virtual Event, April 25-29, 2022, OpenReview.net, 2022.
Abstract | Links | BibTeX | Tags: adrian, isabel, spotlight
@inproceedings{DBLP:conf/iclr/JavaloyV22,
title = {RotoGrad: Gradient Homogenization in Multitask Learning},
author = {Adrián Javaloy and Isabel Valera},
url = {https://openreview.net/forum?id=T8wHz4rnuGL},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {The Tenth International Conference on Learning Representations, ICLR
2022, Virtual Event, April 25-29, 2022},
publisher = {OpenReview.net},
abstract = {Multitask learning is being increasingly adopted in applications domains like computer vision and reinforcement learning. However, optimally exploiting its advantages remains a major challenge due to the effect of negative transfer. Previous works have tracked down this issue to the disparities in gradient magnitudes and directions across tasks, when optimizing the shared network parameters. While recent work has acknowledged that negative transfer is a two-fold problem, existing approaches fall short as they only focus on either homogenizing the gradient magnitude across tasks; or greedily change the gradient directions, overlooking future conflicts. In this work, we introduce RotoGrad, an algorithm that tackles negative transfer as a whole: it jointly homogenizes gradient magnitudes and directions, while ensuring training convergence. We show that RotoGrad outperforms competing methods in complex problems, including multi-label classification in CelebA and computer vision tasks in the NYUv2 dataset. A Pytorch implementation can be found in https://github.com/adrianjav/rotograd.},
keywords = {adrian, isabel, spotlight},
pubstate = {published},
tppubtype = {inproceedings}
}
