|
@article{alfaro2026pdpr,
title = {PDPR: Panoramic-depth place recognition through the fusion of visual and geometric-aware features},
journal = {Neurocomputing},
volume = {677},
pages = {133112},
year = {2026},
issn = {0925-2312},
doi = {https://doi.org/10.1016/j.neucom.2026.133112},
url = {https://www.sciencedirect.com/science/article/pii/S0925231226005096},
author = {Marcos Alfaro and Juan José Cabrera and Arturo Gil and Oscar Reinoso and Luis Payá},
keywords = {Place recognition, Panoramic images, Monocular depth estimation, Data fusion},
abstract = {Omnidirectional cameras are a suitable and cost-effective choice for Visual Place Recognition (VPR), as they provide comprehensive information from the scene regardless of the robot orientation. However, vision sensors are vulnerable to environmental appearance changes (e.g., illumination, weather, season or moving objects). While multi-modal sensing approaches can overcome these challenges, they introduce significant cost and system complexity. This paper introduces PDPR (Panoramic-Depth Place Recognition), a novel fusion framework that enhances the robustness of VPR methods by integrating visual data with geometric features derived from monocular depth estimation techniques, while using a single-camera setup. In the ablation study, both early and late fusion strategies are evaluated to optimally combine appearance-based and depth-derived features. The extensive evaluation on challenging, indoor and outdoor datasets demonstrates that PDPR consistently boosts retrieval performance across multiple state-of-the-art VPR models. Furthermore, this improvement is achieved without requiring any fine tuning, allowing our method to function as a pluggable module for pretrained models. Consequently, this work presents a powerful, practical and low-cost solution for robust VPR, with high potential to scale as monocular depth estimation and VPR models continue to improve. The project website can be found at https://marcosalfaro.github.io/projects-PDPR/.}
}
|