@inproceedings{12432,
  abstract     = {We present CertifyHAM, a deterministic algorithm that takes a graph G as input and either finds a Hamilton cycle of G or outputs that such a cycle does not exist. If G ∼ G(n, p) and p ≥
100 log n/n then the expected running time of CertifyHAM is O(n/p) which is best possible. This improves upon previous results due to Gurevich and Shelah, Thomason and Alon, and
Krivelevich, who proved analogous results for p being constant, p ≥ 12n −1/3 and p ≥ 70n
−1/2 respectively.},
  author       = {Anastos, Michael},
  booktitle    = {63rd Annual IEEE Symposium on Foundations of Computer Science},
  isbn         = {9781665455190},
  issn         = {0272-5428},
  location     = {Denver, CO, United States},
  pages        = {919--930},
  publisher    = {Institute of Electrical and Electronics Engineers},
  title        = {{Solving the Hamilton cycle problem fast on average}},
  doi          = {10.1109/FOCS54457.2022.00091},
  volume       = {2022-October},
  year         = {2022},
}

@inproceedings{12452,
  abstract     = {Portrait viewpoint and illumination editing is an important problem with several applications in VR/AR, movies, and photography. Comprehensive knowledge of geometry and illumination is critical for obtaining photorealistic results. Current methods are unable to explicitly model in 3D while handing both viewpoint and illumination editing from a single image. In this paper, we propose VoRF, a novel approach that can take even a single portrait image as input and relight human heads under novel illuminations that can be viewed from arbitrary viewpoints. VoRF represents a human head as a continuous volumetric field and learns a prior model of human heads using a coordinate-based MLP with separate latent spaces for identity and illumination. The prior model is learnt in an auto-decoder manner over a diverse class of head shapes and appearances, allowing VoRF to generalize to novel test identities from a single input image. Additionally, VoRF has a reflectance MLP that uses the intermediate features of the prior model for rendering One-Light-at-A-Time (OLAT) images under novel views. We synthesize novel illuminations by combining these OLAT images with target environment maps. Qualitative and quantitative evaluations demonstrate the effectiveness of VoRF for relighting and novel view synthesis even when applied to unseen subjects under uncontrolled illuminations.},
  author       = {Rao, Pramod and B R, Mallikarjun and Fox, Gereon and Weyrich, Tim and Bickel, Bernd and Seidel, Hans-Peter and Pfister, Hanspeter and Matusik, Wojciech and Tewari, Ayush and Theobalt, Christian and Elgharib, Mohamed},
  booktitle    = {33rd British Machine Vision Conference},
  location     = {London, United Kingdom},
  publisher    = {British Machine Vision Association and Society for Pattern Recognition},
  title        = {{VoRF: Volumetric Relightable Faces}},
  year         = {2022},
}

@article{12480,
  abstract     = {We consider the problem of estimating a signal from measurements obtained via a generalized linear model. We focus on estimators based on approximate message passing (AMP), a family of iterative algorithms with many appealing features: the performance of AMP in the high-dimensional limit can be succinctly characterized under suitable model assumptions; AMP can also be tailored to the empirical distribution of the signal entries, and for a wide class of estimation problems, AMP is conjectured to be optimal among all polynomial-time algorithms. However, a major issue of AMP is that in many models (such as phase retrieval), it requires an initialization correlated with the ground-truth signal and independent from the measurement matrix. Assuming that such an initialization is available is typically not realistic. In this paper, we solve this problem by proposing an AMP algorithm initialized with a spectral estimator. With such an initialization, the standard AMP analysis fails since the spectral estimator depends in a complicated way on the design matrix. Our main contribution is a rigorous characterization of the performance of AMP with spectral initialization in the high-dimensional limit. The key technical idea is to define and analyze a two-phase artificial AMP algorithm that first produces the spectral estimator, and then closely approximates the iterates of the true AMP. We also provide numerical results that demonstrate the validity of the proposed approach.},
  author       = {Mondelli, Marco and Venkataramanan, Ramji},
  issn         = {1742-5468},
  journal      = {Journal of Statistical Mechanics: Theory and Experiment},
  keywords     = {Statistics, Probability and Uncertainty, Statistics and Probability, Statistical and Nonlinear Physics},
  number       = {11},
  publisher    = {IOP Publishing},
  title        = {{Approximate message passing with spectral initialization for generalized linear models}},
  doi          = {10.1088/1742-5468/ac9828},
  volume       = {2022},
  year         = {2022},
}

@article{12495,
  abstract     = {Fairness-aware learning aims at constructing classifiers that not only make accurate predictions, but also do not discriminate against specific groups. It is a fast-growing area of
machine learning with far-reaching societal impact. However, existing fair learning methods
are vulnerable to accidental or malicious artifacts in the training data, which can cause
them to unknowingly produce unfair classifiers. In this work we address the problem of
fair learning from unreliable training data in the robust multisource setting, where the
available training data comes from multiple sources, a fraction of which might not be representative of the true data distribution. We introduce FLEA, a filtering-based algorithm
that identifies and suppresses those data sources that would have a negative impact on
fairness or accuracy if they were used for training. As such, FLEA is not a replacement of
prior fairness-aware learning methods but rather an augmentation that makes any of them
robust against unreliable training data. We show the effectiveness of our approach by a
diverse range of experiments on multiple datasets. Additionally, we prove formally that
–given enough data– FLEA protects the learner against corruptions as long as the fraction of
affected data sources is less than half. Our source code and documentation are available at
https://github.com/ISTAustria-CVML/FLEA.},
  author       = {Iofinova, Eugenia B and Konstantinov, Nikola H and Lampert, Christoph},
  issn         = {2835-8856},
  journal      = {Transactions on Machine Learning Research},
  publisher    = {ML Research Press},
  title        = {{FLEA: Provably robust fair multisource learning from unreliable training data}},
  year         = {2022},
}

@inproceedings{12508,
  abstract     = {We explore the notion of history-determinism in the context of timed automata (TA). History-deterministic automata are those in which nondeterminism can be resolved on the fly, based on the run constructed thus far. History-determinism is a robust property that admits different game-based characterisations, and history-deterministic specifications allow for game-based verification without an expensive determinization step.
We show yet another characterisation of history-determinism in terms of fair simulation, at the general level of labelled transition systems: a system is history-deterministic precisely if and only if it fairly simulates all language smaller systems.
For timed automata over infinite timed words it is known that universality is undecidable for Büchi TA. We show that for history-deterministic TA with arbitrary parity acceptance, timed universality, inclusion, and synthesis all remain decidable and are ExpTime-complete.
For the subclass of TA with safety or reachability acceptance, we show that checking whether such an automaton is history-deterministic is decidable (in ExpTime), and history-deterministic TA with safety acceptance are effectively determinizable without introducing new automata states.},
  author       = {Henzinger, Thomas A and Lehtinen, Karoliina and Totzke, Patrick},
  booktitle    = {33rd International Conference on Concurrency Theory},
  isbn         = {9783959772464},
  issn         = {1868-8969},
  location     = {Warsaw, Poland},
  pages        = {14:1--14:21},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{History-deterministic timed automata}},
  doi          = {10.4230/LIPIcs.CONCUR.2022.14},
  volume       = {243},
  year         = {2022},
}

@inproceedings{12509,
  abstract     = {A graph game is a two-player zero-sum game in which the players move a token throughout a graph to produce an infinite path, which determines the winner or payoff of the game. In bidding games, both players have budgets, and in each turn, we hold an "auction" (bidding) to determine which player moves the token. In this survey, we consider several bidding mechanisms and their effect on the properties of the game. Specifically, bidding games, and in particular bidding games of infinite duration, have an intriguing equivalence with random-turn games in which in each turn, the player who moves is chosen randomly. We summarize how minor changes in the bidding mechanism lead to unexpected differences in the equivalence with random-turn games.},
  author       = {Avni, Guy and Henzinger, Thomas A},
  booktitle    = {47th International Symposium on Mathematical Foundations of Computer Science},
  isbn         = {9783959772563},
  issn         = {1868-8969},
  location     = {Vienna, Austria},
  pages        = {3:1--3:6},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{An updated survey of bidding games on graphs}},
  doi          = {10.4230/LIPIcs.MFCS.2022.3},
  volume       = {241},
  year         = {2022},
}

@article{12510,
  abstract     = {We introduce a new statistical verification algorithm that formally quantifies the behavioral robustness of any time-continuous process formulated as a continuous-depth model. Our algorithm solves a set of global optimization (Go) problems over a given time horizon to construct a tight enclosure (Tube) of the set of all process executions starting from a ball of initial states. We call our algorithm GoTube. Through its construction, GoTube ensures that the bounding tube is conservative up to a desired probability and up to a desired tightness.
 GoTube is implemented in JAX and optimized to scale to complex continuous-depth neural network models. Compared to advanced reachability analysis tools for time-continuous neural networks, GoTube does not accumulate overapproximation errors between time steps and avoids the infamous wrapping effect inherent in symbolic techniques. We show that GoTube substantially outperforms state-of-the-art verification tools in terms of the size of the initial ball, speed, time-horizon, task completion, and scalability on a large set of experiments.
 GoTube is stable and sets the state-of-the-art in terms of its ability to scale to time horizons well beyond what has been previously possible.},
  author       = {Gruenbacher, Sophie A. and Lechner, Mathias and Hasani, Ramin and Rus, Daniela and Henzinger, Thomas A and Smolka, Scott A. and Grosu, Radu},
  isbn         = {978577358350},
  issn         = {2374-3468},
  journal      = {Proceedings of the AAAI Conference on Artificial Intelligence},
  keywords     = {General Medicine},
  number       = {6},
  pages        = {6755--6764},
  publisher    = {Association for the Advancement of Artificial Intelligence},
  title        = {{GoTube: Scalable statistical verification of continuous-depth models}},
  doi          = {10.1609/aaai.v36i6.20631},
  volume       = {36},
  year         = {2022},
}

@inproceedings{12516,
  abstract     = {The homogeneous continuous LWE (hCLWE) problem is to distinguish samples of a specific high-dimensional Gaussian mixture from standard normal samples. It was shown to be at least as hard as Learning with Errors, but no reduction in the other direction is currently known.
We present four new public-key encryption schemes based on the hardness of hCLWE, with varying tradeoffs between decryption and security errors, and different discretization techniques. Our schemes yield a polynomial-time algorithm for solving hCLWE using a Statistical Zero-Knowledge oracle.},
  author       = {Bogdanov, Andrej and Cueto Noval, Miguel and Hoffmann, Charlotte and Rosen, Alon},
  booktitle    = {Theory of Cryptography},
  isbn         = {9783031223648},
  issn         = {1611-3349},
  location     = {Chicago, IL, United States},
  pages        = {565--592},
  publisher    = {Springer Nature},
  title        = {{Public-Key Encryption from Homogeneous CLWE}},
  doi          = {10.1007/978-3-031-22365-5_20},
  volume       = {13748},
  year         = {2022},
}

@inproceedings{12529,
  abstract     = {We consider turn-based stochastic 2-player games on graphs with ω-regular winning conditions. We provide a direct symbolic algorithm for solving such games when the winning condition is formulated as a Rabin condition. For a stochastic Rabin game with k pairs over a game graph with n vertices, our algorithm runs in O(nk+2k!) symbolic steps, which improves the state of the art.
We have implemented our symbolic algorithm, along with performance optimizations including parallellization and acceleration, in a BDD-based synthesis tool called Fairsyn. We demonstrate the superiority of Fairsyn compared to the state of the art on a set of synthetic benchmarks derived from the VLTS benchmark suite and on a control system benchmark from the literature. In our experiments, Fairsyn performed significantly faster with up to two orders of magnitude improvement in computation time.},
  author       = {Banerjee, Tamajit and Majumdar, Rupak and Mallik, Kaushik and Schmuck, Anne-Kathrin and Soudjani, Sadegh},
  booktitle    = {28th International Conference on Tools and Algorithms for the Construction and Analysis of Systems},
  location     = {Munich, Germany},
  pages        = {81--98},
  publisher    = {Springer Nature},
  title        = {{A direct symbolic algorithm for solving stochastic rabin games}},
  doi          = {10.1007/978-3-030-99527-0_5},
  volume       = {13244},
  year         = {2022},
}

@inproceedings{12530,
  abstract     = {We present BOCoSy, a tool for Bounded symbolic Output-feedback Controller Synthesis. Given a specification, BOCoSy synthesizes symbolic output-feedback controllers which interact with a given plant via a pre-defined finite symbolic interface. BOCoSy solves this problem by a new lazy abstraction-refinement technique which starts with a very coarse abstraction of the external trace semantics of the given plant and iteratively removes non-admissible behavior from this abstract model until a controller is found. BOCoSy steers the search for controllers towards small and concise state space representations by utilizing ideas from bounded synthesis. As a result, BOCoSy returns small and explainable controllers that are still powerful enough to solve the given synthesis problem. We show that BOCoSy is able to synthesize small, human readable symbolic controllers quickly on a set of benchmarks.},
  author       = {Finkbeiner, Bernd and Mallik, Kaushik and Passing, Noemi and Schledjewski, Malte and Schmuck, Anne-Kathrin},
  booktitle    = {25th ACM International Conference on Hybrid Systems: Computation and Control},
  isbn         = {9781450391962},
  location     = {Milan, Italy},
  pages        = {24:1--24:11},
  publisher    = {ACM},
  title        = {{BOCoSy: Small but powerful symbolic output-feedback control}},
  doi          = {10.1145/3501710.3519535},
  year         = {2022},
}

@inproceedings{12536,
  abstract     = {We consider the problem of estimating a rank-1 signal corrupted by structured rotationally invariant noise, and address the following question: how well do inference algorithms perform when the noise statistics is unknown and hence Gaussian noise is assumed? While the matched Bayes-optimal setting with unstructured noise is well understood, the analysis of this mismatched problem is only at its premises. In this paper, we make a step towards understanding the effect of the strong source of mismatch which is the noise statistics. Our main technical contribution is the rigorous analysis of a Bayes estimator and of an approximate message passing (AMP) algorithm, both of which incorrectly assume a Gaussian setup. The first result exploits the theory of spherical integrals and of low-rank matrix perturbations; the idea behind the second one is to design and analyze an artificial AMP which, by taking advantage of the flexibility in the denoisers, is able to "correct" the mismatch. Armed with these sharp asymptotic characterizations, we unveil a rich and often unexpected phenomenology. For example, despite AMP is in principle designed to efficiently compute the Bayes estimator, the former is outperformed by the latter in terms of mean-square error. We show that this performance gap is due to an incorrect estimation of the signal norm. In fact, when the SNR is large enough, the overlaps of the AMP and the Bayes estimator coincide, and they even match those of optimal estimators taking into account the structure of the noise.},
  author       = {Barbier, Jean and Hou, TianQi and Mondelli, Marco and Saenz, Manuel},
  booktitle    = {36th Annual Conference on Neural Information Processing Systems},
  isbn         = {9781713871088},
  location     = {New Orleans, LA, United States},
  title        = {{The price of ignorance: How much does it cost to forget noise structure in low-rank matrix estimation?}},
  volume       = {35},
  year         = {2022},
}

@inproceedings{12537,
  abstract     = {The Neural Tangent Kernel (NTK) has emerged as a powerful tool to provide memorization, optimization and generalization guarantees in deep neural networks. A line of work has studied the NTK spectrum for two-layer and deep networks with at least a layer with Ω(N) neurons, N being the number of training samples. Furthermore, there is increasing evidence suggesting that deep networks with sub-linear layer widths are powerful memorizers and optimizers, as long as the number of parameters exceeds the number of samples. Thus, a natural open question is whether the NTK is well conditioned in such a challenging sub-linear setup. In this paper, we answer this question in the affirmative. Our key technical contribution is a lower bound on the smallest NTK eigenvalue for deep networks with the minimum possible over-parameterization: the number of parameters is roughly Ω(N) and, hence, the number of neurons is as little as Ω(N−−√). To showcase the applicability of our NTK bounds, we provide two results concerning memorization capacity and optimization guarantees for gradient descent training.},
  author       = {Bombari, Simone and Amani, Mohammad Hossein and Mondelli, Marco},
  booktitle    = {36th Conference on Neural Information Processing Systems},
  isbn         = {9781713871088},
  issn         = {1049-5258},
  location     = {New Orleans, LA, United States},
  pages        = {7628--7640},
  publisher    = {Neural Information Processing Systems Foundation},
  title        = {{Memorization and optimization in deep neural networks with minimum over-parameterization}},
  volume       = {35},
  year         = {2022},
}

@article{12538,
  abstract     = {In this paper, we study the compression of a target two-layer neural network with N nodes into a compressed network with M<N nodes. More precisely, we consider the setting in which the weights of the target network are i.i.d. sub-Gaussian, and we minimize the population L_2 loss between the outputs of the target and of the compressed network, under the assumption of Gaussian inputs. By using tools from high-dimensional probability, we show that this non-convex problem can be simplified when the target network is sufficiently over-parameterized, and provide the error rate of this approximation as a function of the input dimension and N. In this mean-field limit, the simplified objective, as well as the optimal weights of the compressed network, does not depend on the realization of the target network, but only on expected scaling factors. Furthermore, for networks with ReLU activation, we conjecture that the optimum of the simplified optimization problem is achieved by taking weights on the Equiangular Tight Frame (ETF), while the scaling of the weights and the orientation of the ETF depend on the parameters of the target network. Numerical evidence is provided to support this conjecture.},
  author       = {Amani, Mohammad Hossein and Bombari, Simone and Mondelli, Marco and Pukdee, Rattana and Rini, Stefano},
  isbn         = {9781665483414},
  journal      = {IEEE Information Theory Workshop},
  location     = {Mumbai, India},
  pages        = {588--593},
  publisher    = {IEEE},
  title        = {{Sharp asymptotics on the compression of two-layer neural networks}},
  doi          = {10.1109/ITW54588.2022.9965870},
  year         = {2022},
}

@inproceedings{12540,
  abstract     = {We consider the problem of signal estimation in generalized linear models defined via rotationally invariant design matrices. Since these matrices can have an arbitrary spectral distribution, this model is well suited for capturing complex correlation structures which often arise in applications. We propose a novel family of approximate message passing (AMP) algorithms for signal estimation, and rigorously characterize their performance in the high-dimensional limit via a state evolution recursion. Our rotationally invariant AMP has complexity of the same order as the existing AMP derived under the restrictive assumption of a Gaussian design; our algorithm also recovers this existing AMP as a special case. Numerical results showcase a performance close to Vector AMP (which is conjectured to be Bayes-optimal in some settings), but obtained with a much lower complexity, as the proposed algorithm does not require a computationally expensive singular value decomposition.},
  author       = {Venkataramanan, Ramji and Kögler, Kevin and Mondelli, Marco},
  booktitle    = {Proceedings of the 39th International Conference on Machine Learning},
  location     = {Baltimore, MD, United States},
  publisher    = {ML Research Press},
  title        = {{Estimation in rotationally invariant generalized linear models via approximate message passing}},
  volume       = {162},
  year         = {2022},
}

@inproceedings{12568,
  abstract     = {We treat the problem of risk-aware control for stochastic shortest path (SSP) on Markov decision processes (MDP). Typically, expectation is considered for SSP, which however is oblivious to the incurred risk. We present an alternative view, instead optimizing conditional value-at-risk (CVaR), an established risk measure. We treat both Markov chains as well as MDP and introduce, through novel insights, two algorithms, based on linear programming and value iteration, respectively. Both algorithms offer precise and provably correct solutions. Evaluation of our prototype implementation shows that risk-aware control is feasible on several moderately sized models.},
  author       = {Meggendorfer, Tobias},
  booktitle    = {Proceedings of the 36th AAAI Conference on Artificial Intelligence, AAAI 2022},
  isbn         = {1577358767},
  issn         = {2374-3468},
  location     = {Virtual},
  number       = {9},
  pages        = {9858--9867},
  publisher    = {Association for the Advancement of Artificial Intelligence},
  title        = {{Risk-aware stochastic shortest path}},
  doi          = {10.1609/aaai.v36i9.21222},
  volume       = {36},
  year         = {2022},
}

@article{12573,
  abstract     = {Supraglacial debris strongly modulates glacier melt rates and can be decisive for ice dynamics and mountain hydrology. It is ubiquitous in High-Mountain Asia, yet because its thickness and supply rate from local topography are poorly known, our ability to forecast regional glacier change and streamflow is limited. Here we combined remote sensing and numerical modelling to resolve supraglacial debris thickness by altitude for 4689 glaciers in High-Mountain Asia, and debris-supply rate to 4141 of those glaciers. Our results reveal extensively thin supraglacial debris and high spatial variability in both debris thickness and supply rate. Debris-supply rate increases with the temperature and slope of debris-supply slopes regionally, and debris thickness increases as ice flow decreases locally. Our centennial-scale estimates of debris-supply rate are typically an order of magnitude or more lower than millennial-scale estimates of headwall-erosion rate from Beryllium-10 cosmogenic nuclides, potentially reflecting episodic debris supply to the region’s glaciers.},
  author       = {McCarthy, Michael and Miles, Evan and Kneib, Marin and Buri, Pascal and Fugger, Stefan and Pellicciotti, Francesca},
  issn         = {2662-4435},
  journal      = {Communications Earth & Environment},
  keywords     = {General Earth and Planetary Sciences, General Environmental Science},
  publisher    = {Springer Nature},
  title        = {{Supraglacial debris thickness and supply rate in High-Mountain Asia}},
  doi          = {10.1038/s43247-022-00588-2},
  volume       = {3},
  year         = {2022},
}

@article{12574,
  abstract     = {Melt from supraglacial ice cliffs is an important contributor to the mass loss of debris-covered glaciers. However, ice cliff contribution is difficult to quantify as they are highly dynamic features, and the paucity of observations of melt rates and their variability leads to large modelling uncertainties. We quantify monsoon season melt and 3D evolution of four ice cliffs over two debris-covered glaciers in High Mountain Asia (Langtang Glacier, Nepal, and 24K Glacier, China) at very high resolution using terrestrial photogrammetry applied to imagery captured from time-lapse cameras installed on lateral moraines. We derive weekly flow-corrected digital elevation models (DEMs) of the glacier surface with a maximum vertical bias of ±0.2 m for Langtang Glacier and ±0.05 m for 24K Glacier and use change detection to determine distributed melt rates at the surfaces of the ice cliffs throughout the study period. We compare the measured melt patterns with those derived from a 3D energy balance model to derive the contribution of the main energy fluxes. We find that ice cliff melt varies considerably throughout the melt season, with maximum melt rates of 5 to 8 cm d−1, and their average melt rates are 11–14 (Langtang) and 4.5 (24K) times higher than the surrounding debris-covered ice. Our results highlight the influence of redistributed supraglacial debris on cliff melt. At both sites, ice cliff albedo is influenced by the presence of thin debris at the ice cliff surface, which is largely controlled on 24K Glacier by liquid precipitation events that wash away this debris. Slightly thicker or patchy debris reduces melt by 1–3 cm d−1 at all sites. Ultimately, our observations show a strong spatio-temporal variability in cliff area at each site, which is controlled by supraglacial streams and ponds and englacial cavities that promote debris slope destabilisation and the lateral expansion of the cliffs. These findings highlight the need to better represent processes of debris redistribution in ice cliff models, to in turn improve estimates of ice cliff contribution to glacier melt and the long-term geomorphological evolution of debris-covered glacier surfaces.},
  author       = {Kneib, Marin and Miles, Evan S. and Buri, Pascal and Fugger, Stefan and McCarthy, Michael and Shaw, Thomas E. and Chuanxi, Zhao and Truffer, Martin and Westoby, Matthew J. and Yang, Wei and Pellicciotti, Francesca},
  issn         = {1994-0424},
  journal      = {The Cryosphere},
  keywords     = {Earth-Surface Processes, Water Science and Technology},
  number       = {11},
  pages        = {4701--4725},
  publisher    = {Copernicus Publications},
  title        = {{Sub-seasonal variability of supraglacial ice cliff melt rates and associated processes from time-lapse photogrammetry}},
  doi          = {10.5194/tc-16-4701-2022},
  volume       = {16},
  year         = {2022},
}

@article{12575,
  abstract     = {The current Chilean megadrought has led to acute water shortages in central Chile since 2010. Glaciers have provided vital fresh water to the region's rivers, but the quantity, timing and sustainability of that provision remain unclear. Here we combine in-situ, remote sensing and climate reanalysis data to show that from 2010 to 2018 during the megadrought, unsustainable imbalance ablation of glaciers (ablation not balanced by new snowfall) strongly buffered the late-summer discharge of the Maipo River, a primary source of water to Santiago. If there had been no glaciers, water availability would have been reduced from December through May, with a 31 ± 19% decrease during March. Our results indicate that while the annual contributions of imbalance ablation to river discharge during the megadrought have been small compared to those from precipitation and sustainable balance ablation, they have nevertheless been a substantial input to a hydrological system that was already experiencing high water stress. The water-equivalent volume of imbalance ablation generated in the Maipo Basin between 2010 and 2018 was 740 × 106 m3 (19 ± 12 mm yr−1), approximately 3.4 times the capacity of the basin's El Yeso Reservoir. This is equivalent to 14% of Santiago's potable water use in that time, while total glacier ablation was equivalent to 59%. We show that glacier retreat will exacerbate river discharge deficits and further jeopardize water availability in central Chile if precipitation deficits endure, and conjecture that these effects will be amplified by climatic warming.},
  author       = {McCarthy, Michael and Meier, Fabienne and Fatichi, Simone and Stocker, Benjamin D. and Shaw, Thomas E. and Miles, Evan and Dussaillant, Inés and Pellicciotti, Francesca},
  issn         = {2328-4277},
  journal      = {Earth's Future},
  keywords     = {Earth and Planetary Sciences (miscellaneous), General Environmental Science},
  number       = {10},
  publisher    = {American Geophysical Union},
  title        = {{Glacier contributions to river discharge during the current Chilean megadrought}},
  doi          = {10.1029/2022ef002852},
  volume       = {10},
  year         = {2022},
}

@article{12576,
  abstract     = {Glacier health across High Mountain Asia (HMA) is highly heterogeneous and strongly governed by regional climate, which is variably influenced by monsoon dynamics and the westerlies. We explore four decades of glacier energy and mass balance at three climatically distinct sites across HMA by utilising a detailed land surface model driven by bias-corrected Weather Research and Forecasting meteorological forcing. All three glaciers have experienced long-term mass losses (ranging from −0.04 ± 0.09 to −0.59 ± 0.20 m w.e. a<jats:sup>−1</jats:sup>) consistent with widespread warming across the region. However, complex and contrasting responses of glacier energy and mass balance to the patterns of the Indian Summer Monsoon were evident, largely driven by the role snowfall timing, amount and phase. A later monsoon onset generates less total snowfall to the glacier in the southeastern Tibetan Plateau during May–June, augmenting net shortwave radiation and affecting annual mass balance (−0.5 m w.e. on average compared to early onset years). Conversely, timing of the monsoon’s arrival has limited impact for the Nepalese Himalaya which is more strongly governed by the temperature and snowfall amount during the core monsoon season. In the arid central Tibetan Plateau, a later monsoon arrival results in a 40 mm (58%) increase of May–June snowfall on average compared to early onset years, likely driven by the greater interaction of westerly storm events. Meanwhile, a late monsoon cessation at this site sees an average 200 mm (192%) increase in late summer precipitation due to monsoonal storms. A trend towards weaker intensity monsoon conditions in recent decades, combined with long-term warming patterns, has produced predominantly negative glacier mass balances for all sites (up to 1 m w.e. more mass loss in the Nepalese Himalaya compared to strong monsoon intensity years) but sub-regional variability in monsoon timing can additionally complicate this response.},
  author       = {Shaw, T E and Miles, E S and Chen, D and Jouberton, A and Kneib, M and Fugger, S and Ou, T and Lai, H-W and Fujita, K and Yang, W and Fatichi, S and Pellicciotti, Francesca},
  issn         = {1748-9326},
  journal      = {Environmental Research Letters},
  keywords     = {Public Health, Environmental and Occupational Health, General Environmental Science, Renewable Energy, Sustainability and the Environment},
  number       = {10},
  publisher    = {IOP Publishing},
  title        = {{Multi-decadal monsoon characteristics and glacier response in High Mountain Asia}},
  doi          = {10.1088/1748-9326/ac9008},
  volume       = {17},
  year         = {2022},
}

@article{12577,
  abstract     = {Glaciers are key components of the mountain water towers of Asia and are vital for downstream domestic, agricultural, and industrial uses. The glacier mass loss rate over the southeastern Tibetan Plateau is among the highest in Asia and has accelerated in recent decades. This acceleration has been attributed to increased warming, but the mechanisms behind these glaciers’ high sensitivity to warming remain unclear, while the influence of changes in precipitation over the past decades is poorly quantified. Here, we reconstruct glacier mass changes and catchment runoff since 1975 at a benchmark glacier, Parlung No. 4, to shed light on the drivers of recent mass losses for the monsoonal, spring-accumulation glaciers of the Tibetan Plateau. Our modeling demonstrates how a temperature increase (mean of 0.39<jats:sup>∘</jats:sup>C ⋅dec<jats:sup>−1</jats:sup>since 1990) has accelerated mass loss rates by altering both the ablation and accumulation regimes in a complex manner. The majority of the post-2000 mass loss occurred during the monsoon months, caused by simultaneous decreases in the solid precipitation ratio (from 0.70 to 0.56) and precipitation amount (–10%), leading to reduced monsoon accumulation (–26%). Higher solid precipitation in spring (+18%) during the last two decades was increasingly important in mitigating glacier mass loss by providing mass to the glacier and protecting it from melting in the early monsoon. With bare ice exposed to warmer temperatures for longer periods, icemelt and catchment discharge have unsustainably intensified since the start of the 21st century, raising concerns for long-term water supply and hazard occurrence in the region.},
  author       = {Jouberton, Achille and Shaw, Thomas E. and Miles, Evan and McCarthy, Michael and Fugger, Stefan and Ren, Shaoting and Dehecq, Amaury and Yang, Wei and Pellicciotti, Francesca},
  issn         = {1091-6490},
  journal      = {PNAS},
  keywords     = {Multidisciplinary},
  number       = {37},
  publisher    = {Proceedings of the National Academy of Sciences},
  title        = {{Warming-induced monsoon precipitation phase change intensifies glacier mass loss in the southeastern Tibetan Plateau}},
  doi          = {10.1073/pnas.2109796119},
  volume       = {119},
  year         = {2022},
}

