@article{9675,
  abstract     = {The visualization of data is indispensable in scientific research, from the early stages when human insight forms to the final step of communicating results. In computational physics, chemistry and materials science, it can be as simple as making a scatter plot or as straightforward as looking through the snapshots of atomic positions manually. However, as a result of the "big data" revolution, these conventional approaches are often inadequate. The widespread adoption of high-throughput computation for materials discovery and the associated community-wide repositories have given rise to data sets that contain an enormous number of compounds and atomic configurations. A typical data set contains thousands to millions of atomic structures, along with a diverse range of properties such as formation energies, band gaps, or bioactivities.It would thus be desirable to have a data-driven and automated framework for visualizing and analyzing such structural data sets. The key idea is to construct a low-dimensional representation of the data, which facilitates navigation, reveals underlying patterns, and helps to identify data points with unusual attributes. Such data-intensive maps, often employing machine learning methods, are appearing more and more frequently in the literature. However, to the wider community, it is not always transparent how these maps are made and how they should be interpreted. Furthermore, while these maps undoubtedly serve a decorative purpose in academic publications, it is not always apparent what extra information can be garnered from reading or making them.This Account attempts to answer such questions. We start with a concise summary of the theory of representing chemical environments, followed by the introduction of a simple yet practical conceptual approach for generating structure maps in a generic and automated manner. Such analysis and mapping is made nearly effortless by employing the newly developed software tool ASAP. To showcase the applicability to a wide variety of systems in chemistry and materials science, we provide several illustrative examples, including crystalline and amorphous materials, interfaces, and organic molecules. In these examples, the maps not only help to sift through large data sets but also reveal hidden patterns that could be easily missed using conventional analyses.The explosion in the amount of computed information in chemistry and materials science has made visualization into a science in itself. Not only have we benefited from exploiting these visualization methods in previous works, we also believe that the automated mapping of data sets will in turn stimulate further creativity and exploration, as well as ultimately feed back into future advances in the respective fields.},
  author       = {Cheng, Bingqing and Griffiths, Ryan-Rhys and Wengert, Simon and Kunkel, Christian and Stenczel, Tamas and Zhu, Bonan and Deringer, Volker L. and Bernstein, Noam and Margraf, Johannes T. and Reuter, Karsten and Csanyi, Gabor},
  issn         = {1520-4898},
  journal      = {Accounts of Chemical Research},
  number       = {9},
  pages        = {1981--1991},
  publisher    = {American Chemical Society},
  title        = {{Mapping materials and molecules}},
  doi          = {10.1021/acs.accounts.0c00403},
  volume       = {53},
  year         = {2020},
}

@article{9685,
  abstract     = {Hydrogen, the simplest and most abundant element in the Universe, develops a remarkably complex behaviour upon compression^1. Since Wigner predicted the dissociation and metallization of solid hydrogen at megabar pressures almost a century ago^2, several efforts have been made to explain the many unusual properties of dense hydrogen, including a rich and poorly understood solid polymorphism^1,3-5, an anomalous melting line6 and the possible transition to a superconducting state^7. Experiments at such extreme conditions are challenging and often lead to hard-to-interpret and controversial observations, whereas theoretical investigations are constrained by the huge computational cost of sufficiently accurate quantum mechanical calculations. Here we present a theoretical study of the phase diagram of dense hydrogen that uses machine learning to 'learn' potential-energy surfaces and interatomic forces from reference calculations and then predict them at low computational cost, overcoming length- and timescale limitations. We reproduce both the re-entrant melting behaviour and the polymorphism of the solid phase. Simulations using our machine-learning-based potentials provide evidence for a continuous molecular-to-atomic transition in the liquid, with no first-order transition observed above the melting line. This suggests a smooth transition between insulating and metallic layers in giant gas planets, and reconciles existing discrepancies between experiments as a manifestation of supercritical behaviour.},
  author       = {Cheng, Bingqing and Mazzola, Guglielmo and Pickard, Chris J. and Ceriotti, Michele},
  issn         = {1476-4687},
  journal      = {Nature},
  number       = {7824},
  pages        = {217--220},
  publisher    = {Springer Nature},
  title        = {{Evidence for supercritical behaviour of high-pressure liquid hydrogen}},
  doi          = {10.1038/s41586-020-2677-y},
  volume       = {585},
  year         = {2020},
}

@unpublished{9699,
  abstract     = {We investigate the structural similarities between liquid water and 53 ices, including 20 known crystalline phases. We base such similarity comparison on the local environments that consist of atoms within a certain cutoff radius of a central atom. We reveal that liquid water explores the local environments of the diverse ice phases, by directly comparing the environments in these phases using general atomic descriptors, and also by demonstrating that a machine-learning potential trained on liquid water alone can predict the densities, the lattice energies, and vibrational properties of the
ices. The finding that the local environments characterising the different ice phases are found in water sheds light on water phase behaviors, and rationalizes the transferability of water models between different phases.},
  author       = {Monserrat, Bartomeu and Brandenburg, Jan Gerit and Engel, Edgar A. and Cheng, Bingqing},
  booktitle    = {arXiv},
  title        = {{Extracting ice phases from liquid water: Why a machine-learning water model generalizes so well}},
  doi          = {10.48550/arXiv.2006.13316},
  year         = {2020},
}

@misc{9708,
  abstract     = {This research data supports 'Hard antinodal gap revealed by quantum oscillations in the pseudogap regime of underdoped high-Tc superconductors'. A Readme file for plotting each figure is provided.},
  author       = {Hartstein, Mate and Hsu, Yu-Te and Modic, Kimberly A and Porras, Juan and Loew, Toshinao and Le Tacon, Matthieu and Zuo, Huakun and Wang, Jinhua and Zhu, Zengwei and Chan, Mun and McDonald, Ross and Lonzarich, Gilbert and Keimer, Bernhard and Sebastian, Suchitra and Harrison, Neil},
  publisher    = {Apollo - University of Cambridge},
  title        = {{Accompanying dataset for 'Hard antinodal gap revealed by quantum oscillations in the pseudogap regime of underdoped high-Tc superconductors'}},
  doi          = {10.17863/cam.50169},
  year         = {2020},
}

@misc{9713,
  abstract     = {Additional analyses of the trajectories},
  author       = {Gupta, Chitrak and Khaniya, Umesh and Chan, Chun Kit and Dehez, Francois and Shekhar, Mrinal and Gunner, M.R. and Sazanov, Leonid A and Chipot, Christophe and Singharoy, Abhishek},
  publisher    = {American Chemical Society },
  title        = {{Supporting information}},
  doi          = {10.1021/jacs.9b13450.s001},
  year         = {2020},
}

@misc{9776,
  author       = {Grah, Rok and Friedlander, Tamar},
  publisher    = {Public Library of Science},
  title        = {{Supporting information}},
  doi          = {10.1371/journal.pcbi.1007642.s001},
  year         = {2020},
}

@misc{9777,
  author       = {Grah, Rok and Friedlander, Tamar},
  publisher    = {Public Library of Science},
  title        = {{Maximizing crosstalk}},
  doi          = {10.1371/journal.pcbi.1007642.s002},
  year         = {2020},
}

@misc{9779,
  author       = {Grah, Rok and Friedlander, Tamar},
  publisher    = {Public Library of Science},
  title        = {{Distribution of crosstalk values}},
  doi          = {10.1371/journal.pcbi.1007642.s003},
  year         = {2020},
}

@misc{9780,
  abstract     = {PADREV : 4,4'-dimethoxy[1,1'-biphenyl]-2,2',5,5'-tetrol
Space Group: C 2 (5), Cell: a 24.488(16)Å b 5.981(4)Å c 3.911(3)Å, α 90° β 91.47(3)° γ 90°},
  author       = {Schlemmer, Werner and Nothdurft, Philipp and Petzold, Alina and Riess, Gisbert and Frühwirt, Philipp and Schmallegger, Max and Gescheidt-Demner, Georg and Fischer, Roland and Freunberger, Stefan Alexander and Kern, Wolfgang and Spirk, Stefan},
  publisher    = {CCDC},
  title        = {{CCDC 1991959: Experimental Crystal Structure Determination}},
  doi          = {10.5517/ccdc.csd.cc24vsrk},
  year         = {2020},
}

@misc{9798,
  abstract     = {Fitness interactions between mutations can influence a population’s evolution in many different ways. While epistatic effects are difficult to measure precisely, important information is captured by the mean and variance of log fitnesses for individuals carrying different numbers of mutations. We derive predictions for these quantities from a class of simple fitness landscapes, based on models of optimizing selection on quantitative traits. We also explore extensions to the models, including modular pleiotropy, variable effect sizes, mutational bias and maladaptation of the wild type. We illustrate our approach by reanalysing a large dataset of mutant effects in a yeast snoRNA. Though characterized by some large epistatic effects, these data give a good overall fit to the non-epistatic null model, suggesting that epistasis might have limited influence on the evolutionary dynamics in this system. We also show how the amount of epistasis depends on both the underlying fitness landscape and the distribution of mutations, and so is expected to vary in consistent ways between new mutations, standing variation and fixed mutations.},
  author       = {Fraisse, Christelle and Welch, John J.},
  publisher    = {Royal Society of London},
  title        = {{Simulation code for Fig S2 from the distribution of epistasis on simple fitness landscapes}},
  doi          = {10.6084/m9.figshare.7957472.v1},
  year         = {2020},
}

@misc{9799,
  abstract     = {Fitness interactions between mutations can influence a population’s evolution in many different ways. While epistatic effects are difficult to measure precisely, important information is captured by the mean and variance of log fitnesses for individuals carrying different numbers of mutations. We derive predictions for these quantities from a class of simple fitness landscapes, based on models of optimizing selection on quantitative traits. We also explore extensions to the models, including modular pleiotropy, variable effect sizes, mutational bias and maladaptation of the wild type. We illustrate our approach by reanalysing a large dataset of mutant effects in a yeast snoRNA. Though characterized by some large epistatic effects, these data give a good overall fit to the non-epistatic null model, suggesting that epistasis might have limited influence on the evolutionary dynamics in this system. We also show how the amount of epistasis depends on both the underlying fitness landscape and the distribution of mutations, and so is expected to vary in consistent ways between new mutations, standing variation and fixed mutations.},
  author       = {Fraisse, Christelle and Welch, John J.},
  publisher    = {Royal Society of London},
  title        = {{Simulation code for Fig S1 from the distribution of epistasis on simple fitness landscapes}},
  doi          = {10.6084/m9.figshare.7957469.v1},
  year         = {2020},
}

@misc{9814,
  abstract     = {Data and mathematica notebooks for plotting figures from Language learning with communication between learners},
  author       = {Ibsen-Jensen, Rasmus and Tkadlec, Josef and Chatterjee, Krishnendu and Nowak, Martin},
  publisher    = {Royal Society},
  title        = {{Data and mathematica notebooks for plotting figures from language learning with communication between learners from language acquisition with communication between learners}},
  doi          = {10.6084/m9.figshare.5973013.v1},
  year         = {2020},
}

@article{5681,
  abstract     = {We introduce dynamically warping grids for adaptive liquid simulation. Our primary contributions are a strategy for dynamically deforming regular grids over the course of a simulation and a method for efficiently utilizing these deforming grids for liquid simulation. Prior work has shown that unstructured grids are very effective for adaptive fluid simulations. However, unstructured grids often lead to complicated implementations and a poor cache hit rate due to inconsistent memory access. Regular grids, on the other hand, provide a fast, fixed memory access pattern and straightforward implementation. Our method combines the advantages of both: we leverage the simplicity of regular grids while still achieving practical and controllable spatial adaptivity. We demonstrate that our method enables adaptive simulations that are fast, flexible, and robust to null-space issues. At the same time, our method is simple to implement and takes advantage of existing highly-tuned algorithms.},
  author       = {Hikaru, Ibayashi and Wojtan, Christopher J and Thuerey, Nils and Igarashi, Takeo and Ando, Ryoichi},
  issn         = {1941-0506},
  journal      = {IEEE Transactions on Visualization and Computer Graphics},
  number       = {6},
  pages        = {2288--2302},
  publisher    = {IEEE},
  title        = {{Simulating liquids on dynamically warping grids}},
  doi          = {10.1109/TVCG.2018.2883628},
  volume       = {26},
  year         = {2020},
}

@article{6358,
  abstract     = {We study dynamical optimal transport metrics between density matricesassociated to symmetric Dirichlet forms on finite-dimensional C∗-algebras.  Our settingcovers  arbitrary  skew-derivations  and  it  provides  a  unified  framework  that  simultaneously  generalizes  recently  constructed  transport  metrics  for  Markov  chains,  Lindblad  equations,  and  the  Fermi  Ornstein–Uhlenbeck  semigroup.   We  develop  a  non-nommutative differential calculus that allows us to obtain non-commutative Ricci curvature  bounds,  logarithmic  Sobolev  inequalities,  transport-entropy  inequalities,  andspectral gap estimates.},
  author       = {Carlen, Eric A. and Maas, Jan},
  issn         = {1572-9613},
  journal      = {Journal of Statistical Physics},
  number       = {2},
  pages        = {319--378},
  publisher    = {Springer Nature},
  title        = {{Non-commutative calculus, optimal transport and functional inequalities  in dissipative quantum systems}},
  doi          = {10.1007/s10955-019-02434-w},
  volume       = {178},
  year         = {2020},
}

@article{6359,
  abstract     = {The strong rate of convergence of the Euler-Maruyama scheme for nondegenerate SDEs with irregular drift coefficients is considered. In the case of α-Hölder drift in the recent literature the rate α/2 was proved in many related situations. By exploiting the regularising effect of the noise more efficiently, we show that the rate is in fact arbitrarily close to 1/2 for all α>0. The result extends to Dini continuous coefficients, while in d=1 also to all bounded measurable coefficients.},
  author       = {Dareiotis, Konstantinos and Gerencser, Mate},
  issn         = {1083-6489},
  journal      = {Electronic Journal of Probability},
  publisher    = {Institute of Mathematical Statistics},
  title        = {{On the regularisation of the noise for the Euler-Maruyama scheme with irregular drift}},
  doi          = {10.1214/20-EJP479},
  volume       = {25},
  year         = {2020},
}

@article{6488,
  abstract     = {We prove a central limit theorem for the difference of linear eigenvalue statistics of a sample covariance matrix W˜ and its minor W. We find that the fluctuation of this difference is much smaller than those of the individual linear statistics, as a consequence of the strong correlation between the eigenvalues of W˜ and W. Our result identifies the fluctuation of the spatial derivative of the approximate Gaussian field in the recent paper by Dumitru and Paquette. Unlike in a similar result for Wigner matrices, for sample covariance matrices, the fluctuation may entirely vanish.},
  author       = {Cipolloni, Giorgio and Erdös, László},
  issn         = {2010-3271},
  journal      = {Random Matrices: Theory and Application},
  number       = {3},
  publisher    = {World Scientific Publishing},
  title        = {{Fluctuations for differences of linear eigenvalue statistics for sample covariance matrices}},
  doi          = {10.1142/S2010326320500069},
  volume       = {9},
  year         = {2020},
}

@article{6563,
  abstract     = {This paper presents two algorithms. The first decides the existence of a pointed homotopy between given simplicial maps 𝑓,𝑔:𝑋→𝑌, and the second computes the group [𝛴𝑋,𝑌]∗ of pointed homotopy classes of maps from a suspension; in both cases, the target Y is assumed simply connected. More generally, these algorithms work relative to 𝐴⊆𝑋.},
  author       = {Filakovský, Marek and Vokřínek, Lukas},
  issn         = {1615-3383},
  journal      = {Foundations of Computational Mathematics},
  pages        = {311--330},
  publisher    = {Springer Nature},
  title        = {{Are two given maps homotopic? An algorithmic viewpoint}},
  doi          = {10.1007/s10208-019-09419-x},
  volume       = {20},
  year         = {2020},
}

@article{6593,
  abstract     = {We consider the monotone variational inequality problem in a Hilbert space and describe a projection-type method with inertial terms under the following properties: (a) The method generates a strongly convergent iteration sequence; (b) The method requires, at each iteration, only one projection onto the feasible set and two evaluations of the operator; (c) The method is designed for variational inequality for which the underline operator is monotone and uniformly continuous; (d) The method includes an inertial term. The latter is also shown to speed up the convergence in our numerical results. A comparison with some related methods is given and indicates that the new method is promising.},
  author       = {Shehu, Yekini and Li, Xiao-Huan and Dong, Qiao-Li},
  issn         = {1572-9265},
  journal      = {Numerical Algorithms},
  pages        = {365--388},
  publisher    = {Springer Nature},
  title        = {{An efficient projection-type method for monotone variational inequalities in Hilbert spaces}},
  doi          = {10.1007/s11075-019-00758-y},
  volume       = {84},
  year         = {2020},
}

@article{6649,
  abstract     = {While Hartree–Fock theory is well established as a fundamental approximation for interacting fermions, it has been unclear how to describe corrections to it due to many-body correlations. In this paper we start from the Hartree–Fock state given by plane waves and introduce collective particle–hole pair excitations. These pairs can be approximately described by a bosonic quadratic Hamiltonian. We use Bogoliubov theory to construct a trial state yielding a rigorous Gell-Mann–Brueckner–type upper bound to the ground state energy. Our result justifies the random-phase approximation in the mean-field scaling regime, for repulsive, regular interaction potentials.
},
  author       = {Benedikter, Niels P and Nam, Phan Thành and Porta, Marcello and Schlein, Benjamin and Seiringer, Robert},
  issn         = {1432-0916},
  journal      = {Communications in Mathematical Physics},
  pages        = {2097–2150},
  publisher    = {Springer Nature},
  title        = {{Optimal upper bound for the correlation energy of a Fermi gas in the mean-field regime}},
  doi          = {10.1007/s00220-019-03505-5},
  volume       = {374},
  year         = {2020},
}

@article{6748,
  abstract     = {Fitting a function by using linear combinations of a large number N of `simple' components is one of the most fruitful ideas in statistical learning. This idea lies at the core of a variety of methods, from two-layer neural networks to kernel regression, to boosting. In general, the resulting risk minimization problem is non-convex and is solved by gradient descent or its variants. Unfortunately, little is known about global convergence properties of these approaches.
Here we consider the problem of learning a concave function f on a compact convex domain Ω⊆ℝd, using linear combinations of `bump-like' components (neurons). The parameters to be fitted are the centers of N bumps, and the resulting empirical risk minimization problem is highly non-convex. We prove that, in the limit in which the number of neurons diverges, the evolution of gradient descent converges to a Wasserstein gradient flow in the space of probability distributions over Ω. Further, when the bump width δ tends to 0, this gradient flow has a limit which is a viscous porous medium equation. Remarkably, the cost function optimized by this gradient flow exhibits a special property known as displacement convexity, which implies exponential convergence rates for N→∞, δ→0. Surprisingly, this asymptotic theory appears to capture well the behavior for moderate values of δ,N. Explaining this phenomenon, and understanding the dependence on δ,N in a quantitative manner remains an outstanding challenge.},
  author       = {Javanmard, Adel and Mondelli, Marco and Montanari, Andrea},
  issn         = {1941-7330},
  journal      = {Annals of Statistics},
  number       = {6},
  pages        = {3619--3642},
  publisher    = {Institute of Mathematical Statistics},
  title        = {{Analysis of a two-layer neural network via displacement convexity}},
  doi          = {10.1214/20-AOS1945},
  volume       = {48},
  year         = {2020},
}

