@article{11573,
  abstract     = {We present dynamical measurements from the KMOS (K-band multi-object spectrograph) Deep Survey (KDS), which comprises 77 typical star-forming galaxies at z ≃ 3.5 in the mass range 9.0 < log (M⋆/M⊙) < 10.5. These measurements constrain the internal dynamics, the intrinsic velocity dispersions (σint) and rotation velocities (VC) of galaxies in the high-redshift Universe. The mean velocity dispersion of the galaxies in our sample is σint=70.8+3.3−3.1kms−1⁠, revealing that the increasing average σint with increasing redshift, reported for z ≲ 2, continues out to z ≃ 3.5. Only 36 ± 8 per cent of our galaxies are rotation-dominated (VC/σint > 1), with the sample average VC/σint value much smaller than at lower redshift. After carefully selecting comparable star-forming samples at multiple epochs, we find that the rotation-dominated fraction evolves with redshift with a z−0.2 dependence. The rotation-dominated KDS galaxies show no clear offset from the local rotation velocity–stellar mass (i.e. VC–M⋆) relation, although a smaller fraction of the galaxies are on the relation due to the increase in the dispersion-dominated fraction. These observations are consistent with a simple equilibrium model picture, in which random motions are boosted in high-redshift galaxies by a combination of the increasing gas fractions, accretion efficiency, specific star formation rate and stellar feedback and which may provide significant pressure support against gravity on the galactic disc scale.},
  author       = {Turner, O. J. and Cirasuolo, M. and Harrison, C. M. and McLure, R. J. and Dunlop, J. S. and Swinbank, A. M. and Johnson, H. L. and Sobral, D. and Matthee, Jorryt J and Sharples, R. M.},
  issn         = {1365-2966},
  journal      = {Monthly Notices of the Royal Astronomical Society},
  keywords     = {Space and Planetary Science, Astronomy and Astrophysics, galaxies: evolution, galaxies: high-redshift, galaxies: kinematics and dynamics},
  number       = {2},
  pages        = {1280--1320},
  publisher    = {Oxford University Press},
  title        = {{The KMOS Deep Survey (KDS) – I. Dynamical measurements of typical star-forming galaxies at z ≃ 3.5}},
  doi          = {10.1093/mnras/stx1366},
  volume       = {471},
  year         = {2017},
}

@article{1159,
  abstract     = {Auxin steers numerous physiological processes in plants, making the tight control of its endogenous levels and spatiotemporal distribution a necessity. This regulation is achieved by different mechanisms, including auxin biosynthesis, metabolic conversions, degradation, and transport. Here, we introduce cis-cinnamic acid (c-CA) as a novel and unique addition to a small group of endogenous molecules affecting in planta auxin concentrations. c-CA is the photo-isomerization product of the phenylpropanoid pathway intermediate trans-CA (t-CA). When grown on c-CA-containing medium, an evolutionary diverse set of plant species were shown to exhibit phenotypes characteristic for high auxin levels, including inhibition of primary root growth, induction of root hairs, and promotion of adventitious and lateral rooting. By molecular docking and receptor binding assays, we showed that c-CA itself is neither an auxin nor an anti-auxin, and auxin profiling data revealed that c-CA does not significantly interfere with auxin biosynthesis. Single cell-based auxin accumulation assays showed that c-CA, and not t-CA, is a potent inhibitor of auxin efflux. Auxin signaling reporters detected changes in spatiotemporal distribution of the auxin response along the root of c-CA-treated plants, and long-distance auxin transport assays showed no inhibition of rootward auxin transport. Overall, these results suggest that the phenotypes of c-CA-treated plants are the consequence of a local change in auxin accumulation, induced by the inhibition of auxin efflux. This work reveals a novel mechanism how plants may regulate auxin levels and adds a novel, naturally occurring molecule to the chemical toolbox for the studies of auxin homeostasis.},
  author       = {Steenackers, Ward and Klíma, Petr and Quareshy, Mussa and Cesarino, Igor and Kumpf, Robert and Corneillie, Sander and Araújo, Pedro and Viaene, Tom and Goeminne, Geert and Nowack, Moritz and Ljung, Karin and Friml, Jirí and Blakeslee, Joshua and Novák, Ondřej and Zažímalová, Eva and Napier, Richard and Boerjan, Wout and Vanholme, Bartel},
  issn         = {0032-0889},
  journal      = {Plant Physiology},
  number       = {1},
  pages        = {552 -- 565},
  publisher    = {American Society of Plant Biologists},
  title        = {{Cis-cinnamic acid is a novel natural auxin efflux inhibitor that promotes lateral root formation}},
  doi          = {10.1104/pp.16.00943},
  volume       = {173},
  year         = {2017},
}

@article{1160,
  abstract     = {We investigate fundamental nonlinear dynamics of ferrofluidic Taylor-Couette flow - flow confined be-tween two concentric independently rotating cylinders - consider small aspect ratio by solving the ferro-hydrodynamical equations, carrying out systematic bifurcation analysis. Without magnetic field, we find steady flow patterns, previously observed with a simple fluid, such as those containing normal one- or two vortex cells, as well as anomalous one-cell and twin-cell flow states. However, when a symmetry-breaking transverse magnetic field is present, all flow states exhibit stimulated, finite two-fold mode. Various bifurcations between steady and unsteady states can occur, corresponding to the transitions between the two-cell and one-cell states. While unsteady, axially oscillating flow states can arise, we also detect the emergence of new unsteady flow states. In particular, we uncover two new states: one contains only the azimuthally oscillating solution in the configuration of the twin-cell flow state, and an-other a rotating flow state. Topologically, these flow states are a limit cycle and a quasiperiodic solution on a two-torus, respectively. Emergence of new flow states in addition to observed ones with classical fluid, indicates that richer but potentially more controllable dynamics in ferrofluidic flows, as such flow states depend on the external magnetic field.},
  author       = {Altmeyer, Sebastian and Do, Younghae and Lai, Ying},
  issn         = {2045-2322},
  journal      = {Scientific Reports},
  publisher    = {Nature Publishing Group},
  title        = {{Dynamics of ferrofluidic flow in the Taylor-Couette system with a small aspect ratio}},
  doi          = {10.1038/srep40012},
  volume       = {7},
  year         = {2017},
}

@article{1161,
  abstract     = {Coordinated changes of cell shape are often the result of the excitable, wave-like dynamics of the actin cytoskeleton. New work shows that, in migrating cells, protrusion waves arise from mechanochemical crosstalk between adhesion sites, membrane tension and the actin protrusive machinery.},
  author       = {Müller, Jan and Sixt, Michael K},
  issn         = {09609822},
  journal      = {Current Biology},
  number       = {1},
  pages        = {R24 -- R25},
  publisher    = {Cell Press},
  title        = {{Cell migration: Making the waves}},
  doi          = {10.1016/j.cub.2016.11.035},
  volume       = {27},
  year         = {2017},
}

@article{1162,
  abstract     = {Selected universal experimental properties of high-temperature superconducting (HTS) cuprates have been singled out in the last decade. One of the pivotal challenges in this field is the designation of a consistent interpretation framework within which we can describe quantitatively the universal features of those systems. Here we analyze in a detailed manner the principal experimental data and compare them quantitatively with the approach based on a single-band model of strongly correlated electrons supplemented with strong antiferromagnetic (super)exchange interaction (the so-called t−J−U model). The model rationale is provided by estimating its microscopic parameters on the basis of the three-band approach for the Cu-O plane. We use our original full Gutzwiller wave-function solution by going beyond the renormalized mean-field theory (RMFT) in a systematic manner. Our approach reproduces very well the observed hole doping (δ) dependence of the kinetic-energy gain in the superconducting phase, one of the principal non-Bardeen-Cooper-Schrieffer features of the cuprates. The calculated Fermi velocity in the nodal direction is practically δ-independent and its universal value agrees very well with that determined experimentally. Also, a weak doping dependence of the Fermi wave vector leads to an almost constant value of the effective mass in a pure superconducting phase which is both observed in experiment and reproduced within our approach. An assessment of the currently used models (t−J, Hubbard) is carried out and the results of the canonical RMFT as a zeroth-order solution are provided for comparison to illustrate the necessity of the introduced higher-order contributions.},
  author       = {Spałek, Jozef and Zegrodnik, Michał and Kaczmarczyk, Jan},
  issn         = {2469-9950},
  journal      = {Physical Review B - Condensed Matter and Materials Physics},
  number       = {2},
  publisher    = {American Physical Society},
  title        = {{Universal properties of high temperature superconductors from real space pairing t-J-U model and its quantitative comparison with experiment}},
  doi          = {10.1103/PhysRevB.95.024506},
  volume       = {95},
  year         = {2017},
}

@article{1163,
  abstract     = {We investigate the effect of the electron-hole (e-h) symmetry breaking on d-wave superconductivity induced by non-local effects of correlations in the generalized Hubbard model. The symmetry breaking is introduced in a two-fold manner: by the next-to-nearest neighbor hopping of electrons and by the charge-bond interaction - the off-diagonal term of the Coulomb potential. Both terms lead to a pronounced asymmetry of the superconducting order parameter. The next-to-nearest neighbor hopping enhances superconductivity for h-doping, while diminishes it for e-doping. The charge-bond interaction alone leads to the opposite effect and, additionally, to the kinetic-energy gain upon condensation in the underdoped regime. With both terms included, with similar amplitudes, the height of the superconducting dome and the critical doping remain in favor of h-doping. The influence of the charge-bond interaction on deviations from symmetry of the shape of the gap at the Fermi surface in the momentum space is briefly discussed.},
  author       = {Wysokiński, Marcin and Kaczmarczyk, Jan},
  issn         = {09538984},
  journal      = {Journal of Physics: Condensed Matter},
  number       = {8},
  publisher    = {IOP Publishing},
  title        = {{Unconventional superconductivity in generalized Hubbard model role of electron–hole symmetry breaking terms}},
  doi          = {10.1088/1361-648X/aa532f},
  volume       = {29},
  year         = {2017},
}

@unpublished{11633,
  abstract     = {Our understanding of stars through asteroseismic data analysis is limited by our ability to take advantage of the huge amount of observed stars provided by space missions such as CoRoT, Kepler , K2, and soon TESS and PLATO. Global seismic pipelines provide global stellar parameters such as mass and radius using the mean seismic parameters, as well as the effective temperature. These pipelines are commonly used automatically on thousands of stars observed by K2 for 3 months (and soon TESS for at least ∼ 1 month). However, pipelines are not immune from misidentifying noise peaks and stellar oscillations. Therefore, new validation techniques are required to assess the quality of these results. We present a new metric called FliPer (Flicker in Power), which takes into account the average variability at all measured time scales. The proper calibration of FliPer enables us to obtain good estimations of global stellar parameters such as surface gravity that are robust against the influence of noise peaks and hence are an excellent way to find faults in asteroseismic pipelines.},
  author       = {Bugnet, Lisa Annabelle and Garcia, R. A. and Davies, G. R. and Mathur, S. and Corsaro, E.},
  booktitle    = {arXiv},
  keywords     = {asteroseismology - methods, data analysis - stars, oscillations},
  title        = {{FliPer: Checking the reliability of global seismic parameters from automatic pipelines}},
  doi          = {10.48550/arXiv.1711.02890},
  year         = {2017},
}

@inproceedings{11651,
  abstract     = {Diffusions and related random walk procedures are of central importance in many areas of machine learning, data analysis, and applied mathematics. Because they spread mass agnostically at each step in an iterative manner, they can sometimes spread mass “too aggressively,” thereby failing to find the “right” clusters. We introduce a novel Capacity Releasing Diffusion (CRD) Process, which is both faster and stays more local than the classical spectral diffusion process. As an application, we use our CRD Process to develop an improved local algorithm for graph clustering. Our local graph clustering method can find local clusters in a model of clustering where one begins the CRD Process in a cluster whose vertices are connected better internally than externally by an O(log2n) factor, where n is the number of nodes in the cluster. Thus, our CRD Process is the first local graph clustering algorithm that is not subject to the well-known quadratic Cheeger barrier. Our result requires a certain smoothness condition, which we expect to be an artifact of our analysis. Our empirical evaluation demonstrates improved results, in particular for realistic social graphs where there are moderately good—but not very good—clusters.},
  author       = {Wang, Di and Fountoulakis, Kimon and Henzinger, Monika H and Mahoney, Michael W. and Rao ,  Satish},
  booktitle    = {Proceedings of the 34th International Conference on Machine Learning},
  issn         = {2640-3498},
  location     = {Sydney, Australia},
  pages        = {3598--3607},
  publisher    = {ML Research Press},
  title        = {{Capacity releasing diffusion for speed and locality}},
  volume       = {70},
  year         = {2017},
}

@article{11665,
  abstract     = {We study the problem of maintaining a breadth-first spanning tree (BFS tree) in partially dynamic distributed networks modeling a sequence of either failures or additions of communication links (but not both). We present deterministic (1+ϵ)-approximation algorithms whose amortized time (over some number of link changes) is sublinear in D, the maximum diameter of the network.

Our technique also leads to a deterministic (1+ϵ)-approximate incremental algorithm for single-source shortest paths in the sequential (usual RAM) model. Prior to our work, the state of the art was the classic exact algorithm of Even and Shiloach (1981), which is optimal under some assumptions (Roditty and Zwick 2011; Henzinger et al. 2015). Our result is the first to show that, in the incremental setting, this bound can be beaten in certain cases if some approximation is allowed.},
  author       = {Henzinger, Monika H and Krinninger, Sebastian and Nanongkai, Danupon},
  issn         = {1549-6333},
  journal      = {ACM Transactions on Algorithms},
  number       = {4},
  publisher    = {Association for Computing Machinery},
  title        = {{Sublinear-time maintenance of breadth-first spanning trees in partially dynamic networks}},
  doi          = {10.1145/3146550},
  volume       = {13},
  year         = {2017},
}

@article{11676,
  abstract     = {We study the problem of maximizing a monotone submodular function with viability constraints. This problem originates from computational biology, where we are given a phylogenetic tree over a set of species and a directed graph, the so-called food web, encoding viability constraints between these species. These food webs usually have constant depth. The goal is to select a subset of k species that satisfies the viability constraints and has maximal phylogenetic diversity. As this problem is known to be NP-hard, we investigate approximation algorithms. We present the first constant factor approximation algorithm if the depth is constant. Its approximation ratio is (1−1e√). This algorithm not only applies to phylogenetic trees with viability constraints but for arbitrary monotone submodular set functions with viability constraints. Second, we show that there is no (1−1/e+ϵ)-approximation algorithm for our problem setting (even for additive functions) and that there is no approximation algorithm for a slight extension of this setting.},
  author       = {Dvořák, Wolfgang and Henzinger, Monika H and Williamson, David P.},
  issn         = {1432-0541},
  journal      = {Algorithmica},
  keywords     = {Approximation algorithms, Submodular functions, Phylogenetic diversity, Viability constraints},
  number       = {1},
  pages        = {152--172},
  publisher    = {Springer Nature},
  title        = {{Maximizing a submodular function with viability constraints}},
  doi          = {10.1007/s00453-015-0066-y},
  volume       = {77},
  year         = {2017},
}

@article{1168,
  abstract     = {Optimum experimental design theory has recently been extended for parameter estimation in copula models. The use of these models allows one to gain in flexibility by considering the model parameter set split into marginal and dependence parameters. However, this separation also leads to the natural issue of estimating only a subset of all model parameters. In this work, we treat this problem with the application of the (Formula presented.)-optimality to copula models. First, we provide an extension of the corresponding equivalence theory. Then, we analyze a wide range of flexible copula models to highlight the usefulness of (Formula presented.)-optimality in many possible scenarios. Finally, we discuss how the usage of the introduced design criterion also relates to the more general issue of copula selection and optimal design for model discrimination.},
  author       = {Perrone, Elisa and Rappold, Andreas and Müller, Werner},
  journal      = {Statistical Methods and Applications},
  number       = {3},
  pages        = {403 -- 418},
  publisher    = {Springer},
  title        = {{D inf s optimality in copula models}},
  doi          = {10.1007/s10260-016-0375-6},
  volume       = {26},
  year         = {2017},
}

@article{1169,
  abstract     = {Dispersal is a crucial factor in natural evolution, since it determines the habitat experienced by any population and defines the spatial scale of interactions between individuals. There is compelling evidence for systematic differences in dispersal characteristics within the same population, i.e., genotype-dependent dispersal. The consequences of genotype-dependent dispersal on other evolutionary phenomena, however, are poorly understood. In this article we investigate the effect of genotype-dependent dispersal on spatial gene frequency patterns, using a generalization of the classical diffusion model of selection and dispersal. Dispersal is characterized by the variance of dispersal (diffusion coefficient) and the mean displacement (directional advection term). We demonstrate that genotype-dependent dispersal may change the qualitative behavior of Fisher waves, which change from being “pulled” to being “pushed” wave fronts as the discrepancy in dispersal between genotypes increases. The speed of any wave is partitioned into components due to selection, genotype-dependent variance of dispersal, and genotype-dependent mean displacement. We apply our findings to wave fronts maintained by selection against heterozygotes. Furthermore, we identify a benefit of increased variance of dispersal, quantify its effect on the speed of the wave, and discuss the implications for the evolution of dispersal strategies.},
  author       = {Novak, Sebastian and Kollár, Richard},
  issn         = {0016-6731},
  journal      = {Genetics},
  number       = {1},
  pages        = {367 -- 374},
  publisher    = {Genetics Society of America},
  title        = {{Spatial gene frequency waves under genotype dependent dispersal}},
  doi          = {10.1534/genetics.116.193946},
  volume       = {205},
  year         = {2017},
}

@article{1173,
  abstract     = {We introduce the Voronoi functional of a triangulation of a finite set of points in the Euclidean plane and prove that among all geometric triangulations of the point set, the Delaunay triangulation maximizes the functional. This result neither extends to topological triangulations in the plane nor to geometric triangulations in three and higher dimensions.},
  author       = {Edelsbrunner, Herbert and Glazyrin, Alexey and Musin, Oleg and Nikitenko, Anton},
  issn         = {0209-9683},
  journal      = {Combinatorica},
  number       = {5},
  pages        = {887 -- 910},
  publisher    = {Springer},
  title        = {{The Voronoi functional is maximized by the Delaunay triangulation in the plane}},
  doi          = {10.1007/s00493-016-3308-y},
  volume       = {37},
  year         = {2017},
}

@inproceedings{1174,
  abstract     = {Security of cryptographic applications is typically defined by security games. The adversary, within certain resources, cannot win with probability much better than 0 (for unpredictability applications, like one-way functions) or much better than 1/2 (indistinguishability applications for instance encryption schemes). In so called squared-friendly applications the winning probability of the adversary, for different values of the application secret randomness, is not only close to 0 or 1/2 on average, but also concentrated in the sense that its second central moment is small. The class of squared-friendly applications, which contains all unpredictability applications and many indistinguishability applications, is particularly important for key derivation. Barak et al. observed that for square-friendly applications one can beat the &quot;RT-bound&quot;, extracting secure keys with significantly smaller entropy loss. In turn Dodis and Yu showed that in squared-friendly applications one can directly use a &quot;weak&quot; key, which has only high entropy, as a secure key. In this paper we give sharp lower bounds on square security assuming security for &quot;weak&quot; keys. We show that any application which is either (a) secure with weak keys or (b) allows for entropy savings for keys derived by universal hashing, must be square-friendly. Quantitatively, our lower bounds match the positive results of Dodis and Yu and Barak et al. (TCC\'13, CRYPTO\'11) Hence, they can be understood as a general characterization of squared-friendly applications. While the positive results on squared-friendly applications where derived by one clever application of the Cauchy-Schwarz Inequality, for tight lower bounds we need more machinery. In our approach we use convex optimization techniques and some theory of circular matrices.},
  author       = {Skórski, Maciej},
  issn         = {1868-8969},
  location     = {Hannover, Germany},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Lower bounds on key derivation for square-friendly applications}},
  doi          = {10.4230/LIPIcs.STACS.2017.57},
  volume       = {66},
  year         = {2017},
}

@inproceedings{1175,
  abstract     = {We study space complexity and time-space trade-offs with a focus not on peak memory usage but on overall memory consumption throughout the computation.  Such a cumulative space measure was introduced for the computational model of parallel black pebbling by [Alwen and Serbinenko ’15] as a tool for obtaining results in cryptography. We consider instead the non- deterministic black-white pebble game and prove optimal cumulative space lower bounds and trade-offs, where in order to minimize pebbling time the space has to remain large during a significant fraction of the pebbling. We also initiate the study of cumulative space in proof complexity, an area where other space complexity measures have been extensively studied during the last 10–15 years. Using and extending the connection between proof complexity and pebble games in [Ben-Sasson and Nordström ’08, ’11] we obtain several strong cumulative space results for (even parallel versions of) the resolution proof system, and outline some possible future directions of study of this, in our opinion, natural and interesting space measure.},
  author       = {Alwen, Joel F and De Rezende, Susanna and Nordstrom, Jakob and Vinyals, Marc},
  editor       = {Papadimitriou, Christos},
  issn         = {1868-8969},
  location     = {Berkeley, CA, United States},
  pages        = {38:1--38--21},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Cumulative space in black-white pebbling and resolution}},
  doi          = {10.4230/LIPIcs.ITCS.2017.38},
  volume       = {67},
  year         = {2017},
}

@inproceedings{1176,
  abstract     = {The algorithm Argon2i-B of Biryukov, Dinu and Khovratovich is currently being considered by the IRTF (Internet Research Task Force) as a new de-facto standard for password hashing. An older version (Argon2i-A) of the same algorithm was chosen as the winner of the recent Password Hashing Competition. An important competitor to Argon2i-B is the recently introduced Balloon Hashing (BH) algorithm of Corrigan-Gibs, Boneh and Schechter. A key security desiderata for any such algorithm is that evaluating it (even using a custom device) requires a large amount of memory amortized across multiple instances. Alwen and Blocki (CRYPTO 2016) introduced a class of theoretical attacks against Argon2i-A and BH. While these attacks yield large asymptotic reductions in the amount of memory, it was not, a priori, clear if (1) they can be extended to the newer Argon2i-B, (2) the attacks are effective on any algorithm for practical parameter ranges (e.g., 1GB of memory) and (3) if they can be effectively instantiated against any algorithm under realistic hardware constrains. In this work we answer all three of these questions in the affirmative for all three algorithms. This is also the first work to analyze the security of Argon2i-B. In more detail, we extend the theoretical attacks of Alwen and Blocki (CRYPTO 2016) to the recent Argon2i-B proposal demonstrating severe asymptotic deficiencies in its security. Next we introduce several novel heuristics for improving the attack's concrete memory efficiency even when on-chip memory bandwidth is bounded. We then simulate our attacks on randomly sampled Argon2i-A, Argon2i-B and BH instances and measure the resulting memory consumption for various practical parameter ranges and for a variety of upperbounds on the amount of parallelism available to the attacker. Finally we describe, implement, and test a new heuristic for applying the Alwen-Blocki attack to functions employing a technique developed by Corrigan-Gibs et al. for improving concrete security of memory-hard functions. We analyze the collected data and show the effects various parameters have on the memory consumption of the attack. In particular, we can draw several interesting conclusions about the level of security provided by these functions. · For the Alwen-Blocki attack to fail against practical memory parameters, Argon2i-B must be instantiated with more than 10 passes on memory - beyond the "paranoid" parameter setting in the current IRTF proposal. · The technique of Corrigan-Gibs for improving security can also be overcome by the Alwen-Blocki attack under realistic hardware constraints. · On a positive note, both the asymptotic and concrete security of Argon2i-B seem to improve on that of Argon2i-A.},
  author       = {Alwen, Joel F and Blocki, Jeremiah},
  isbn         = {978-150905761-0},
  location     = {Paris, France},
  publisher    = {IEEE},
  title        = {{Towards practical attacks on Argon2i and balloon hashing}},
  doi          = {10.1109/EuroSP.2017.47},
  year         = {2017},
}

@inproceedings{11772,
  abstract     = {A dynamic graph algorithm is a data structure that supports operations on dynamically changing graphs.},
  author       = {Henzinger, Monika H},
  booktitle    = {44th International Conference on Current Trends in Theory and Practice of Computer Science},
  isbn         = {9783319731162},
  issn         = {0302-9743},
  location     = {Krems, Austria},
  pages        = {40–44},
  publisher    = {Springer Nature},
  title        = {{The state of the art in dynamic graph algorithms}},
  doi          = {10.1007/978-3-319-73117-9_3},
  volume       = {10706},
  year         = {2017},
}

@inproceedings{1178,
  abstract     = {For any pair (X, Z) of correlated random variables we can think of Z as a randomized function of X. If the domain of Z is small, one can make this function computationally efficient by allowing it to be only approximately correct. In folklore this problem is known as simulating auxiliary inputs. This idea of simulating auxiliary information turns out to be a very usefull tool, finding applications in complexity theory, cryptography, pseudorandomness and zero-knowledge. In this paper we revisit this problem, achieving the following results: (a) We present a novel boosting algorithm for constructing the simulator. This boosting proof is of independent interest, as it shows how to handle “negative mass” issues when constructing probability measures by shifting distinguishers in descent algorithms. Our technique essentially fixes the flaw in the TCC’14 paper “How to Fake Auxiliary Inputs”. (b) The complexity of our simulator is better than in previous works, including results derived from the uniform min-max theorem due to Vadhan and Zheng. To achieve (s,ϵ) -indistinguishability we need the complexity O(s⋅25ℓϵ−2) in time/circuit size, which improve previous bounds by a factor of ϵ−2. In particular, with we get meaningful provable security for the EUROCRYPT’09 leakage-resilient stream cipher instantiated with a standard 256-bit block cipher, like },
  author       = {Skórski, Maciej},
  pages        = {159 -- 179},
  publisher    = {Springer},
  title        = {{Simulating auxiliary inputs, revisited}},
  doi          = {10.1007/978-3-662-53641-4_7},
  volume       = {9985},
  year         = {2017},
}

@article{1180,
  abstract     = {In this article we define an algebraic vertex of a generalized polyhedron and show that the set of algebraic vertices is the smallest set of points needed to define the polyhedron. We prove that the indicator function of a generalized polytope P is a linear combination of indicator functions of simplices whose vertices are algebraic vertices of P. We also show that the indicator function of any generalized polyhedron is a linear combination, with integer coefficients, of indicator functions of cones with apices at algebraic vertices and line-cones. The concept of an algebraic vertex is closely related to the Fourier–Laplace transform. We show that a point v is an algebraic vertex of a generalized polyhedron P if and only if the tangent cone of P, at v, has non-zero Fourier–Laplace transform.},
  author       = {Akopyan, Arseniy and Bárány, Imre and Robins, Sinai},
  issn         = {0001-8708},
  journal      = {Advances in Mathematics},
  pages        = {627 -- 644},
  publisher    = {Academic Press},
  title        = {{Algebraic vertices of non-convex polyhedra}},
  doi          = {10.1016/j.aim.2016.12.026},
  volume       = {308},
  year         = {2017},
}

@inproceedings{11829,
  abstract     = {In recent years it has become popular to study dynamic problems in a sensitivity setting: Instead of allowing for an arbitrary sequence of updates, the sensitivity model only allows to apply batch updates of small size to the original input data. The sensitivity model is particularly appealing since recent strong conditional lower bounds ruled out fast algorithms for many dynamic problems, such as shortest paths, reachability, or subgraph connectivity.

In this paper we prove conditional lower bounds for these and additional problems in a sensitivity setting. For example, we show that under the Boolean Matrix Multiplication (BMM) conjecture combinatorial algorithms cannot compute the (4/3-\varepsilon)-approximate diameter of an undirected unweighted dense graph with truly subcubic preprocessing time and truly subquadratic update/query time. This result is surprising since in the static setting it is not clear whether a reduction from BMM to diameter is possible. We further show under the BMM conjecture that many problems, such as reachability or approximate shortest paths, cannot be solved faster than by recomputation from scratch even after only one or two edge insertions. We extend our reduction from BMM to Diameter to give a reduction from All Pairs Shortest Paths to Diameter under one deletion in weighted graphs. This is intriguing, as in the static setting it is a big open problem whether Diameter is as hard as APSP. We further get a nearly tight lower bound for shortest paths after two edge deletions based on the APSP conjecture. We give more lower bounds under the Strong Exponential Time Hypothesis. Many of our lower bounds also hold for static oracle data structures where no sensitivity is required.

Finally, we give the first algorithm for the (1+\varepsilon)-approximate radius, diameter, and eccentricity problems in directed or undirected unweighted graphs in case of single edges failures. The algorithm has a truly subcubic running time for graphs with a truly subquadratic number of edges; it is tight w.r.t. the conditional lower bounds we obtain.},
  author       = {Henzinger, Monika H and Lincoln, Andrea and Neumann, Stefan and Vassilevska Williams, Virginia},
  booktitle    = {8th Innovations in Theoretical Computer Science Conference},
  isbn         = {9783959770293},
  issn         = {1868-8969},
  location     = {Berkley, CA, United States},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Conditional hardness for sensitivity problems}},
  doi          = {10.4230/LIPICS.ITCS.2017.26},
  volume       = {67},
  year         = {2017},
}

