@article{291,
  abstract     = {Over the past decade, the edge of chaos has proven to be a fruitful starting point for investigations of shear flows when the laminar base flow is linearly stable. Numerous computational studies of shear flows demonstrated the existence of states that separate laminar and turbulent regions of the state space. In addition, some studies determined invariant solutions that reside on this edge. In this paper, we study the unstable manifold of one such solution with the aid of continuous symmetry reduction, which we formulate here for the simultaneous quotiening of axial and azimuthal symmetries. Upon our investigation of the unstable manifold, we discover a previously unknown traveling-wave solution on the laminar-turbulent boundary with a relatively complex structure. By means of low-dimensional projections, we visualize different dynamical paths that connect these solutions to the turbulence. Our numerical experiments demonstrate that the laminar-turbulent boundary exhibits qualitatively different regions whose properties are influenced by the nearby invariant solutions.},
  author       = {Budanur, Nazmi B and Hof, Björn},
  journal      = {Physical Review Fluids},
  number       = {5},
  publisher    = {American Physical Society},
  title        = {{Complexity of the laminar-turbulent boundary in pipe flow}},
  doi          = {10.1103/PhysRevFluids.3.054401},
  volume       = {3},
  year         = {2018},
}

@article{292,
  abstract     = {Retina is a paradigmatic system for studying sensory encoding: the transformation of light into spiking activity of ganglion cells. The inverse problem, where stimulus is reconstructed from spikes, has received less attention, especially for complex stimuli that should be reconstructed “pixel-by-pixel”. We recorded around a hundred neurons from a dense patch in a rat retina and decoded movies of multiple small randomly-moving discs. We constructed nonlinear (kernelized and neural network) decoders that improved significantly over linear results. An important contribution to this was the ability of nonlinear decoders to reliably separate between neural responses driven by locally fluctuating light signals, and responses at locally constant light driven by spontaneous-like activity. This improvement crucially depended on the precise, non-Poisson temporal structure of individual spike trains, which originated in the spike-history dependence of neural responses. We propose a general principle by which downstream circuitry could discriminate between spontaneous and stimulus-driven activity based solely on higher-order statistical structure in the incoming spike trains.},
  author       = {Botella Soler, Vicent and Deny, Stephane and Martius, Georg S and Marre, Olivier and Tkacik, Gasper},
  journal      = {PLoS Computational Biology},
  number       = {5},
  publisher    = {Public Library of Science},
  title        = {{Nonlinear decoding of a complex movie from the mammalian retina}},
  doi          = {10.1371/journal.pcbi.1006057},
  volume       = {14},
  year         = {2018},
}

@article{293,
  abstract     = {People sometimes make their admirable deeds and accomplishments hard to spot, such as by giving anonymously or avoiding bragging. Such ‘buried’ signals are hard to reconcile with standard models of signalling or indirect reciprocity, which motivate costly pro-social behaviour by reputational gains. To explain these phenomena, we design a simple game theory model, which we call the signal-burying game. This game has the feature that senders can bury their signal by deliberately reducing the probability of the signal being observed. If the signal is observed, however, it is identified as having been buried. We show under which conditions buried signals can be maintained, using static equilibrium concepts and calculations of the evolutionary dynamics. We apply our analysis to shed light on a number of otherwise puzzling social phenomena, including modesty, anonymous donations, subtlety in art and fashion, and overeagerness.},
  author       = {Hoffman, Moshe and Hilbe, Christian and Nowak, Martin},
  journal      = {Nature Human Behaviour},
  pages        = {397 -- 404},
  publisher    = {Nature Publishing Group},
  title        = {{The signal-burying game can explain why we obscure positive traits and good deeds}},
  doi          = {10.1038/s41562-018-0354-z},
  volume       = {2},
  year         = {2018},
}

@article{294,
  abstract     = {We developed a method to calculate two-photon processes in quantum mechanics that replaces the infinite summation over the intermediate states by a perturbation expansion. This latter consists of a series of commutators that involve position, momentum, and Hamiltonian quantum operators. We analyzed several single- and many-particle cases for which a closed-form solution to the perturbation expansion exists, as well as more complicated cases for which a solution is found by convergence. Throughout the article, Rayleigh and Raman scattering are taken as examples of two-photon processes. The present method provides a clear distinction between the Thomson scattering, regarded as classical scattering, and quantum contributions. Such a distinction lets us derive general results concerning light scattering. Finally, possible extensions to the developed formalism are discussed.},
  author       = {Fratini, Filippo and Safari, Laleh and Amaro, Pedro and Santos, José},
  journal      = {Physical Review A - Atomic, Molecular, and Optical Physics},
  number       = {4},
  publisher    = {American Physical Society},
  title        = {{Two-photon processes based on quantum commutators}},
  doi          = {10.1103/PhysRevA.97.043842},
  volume       = {97},
  year         = {2018},
}

@article{295,
  abstract     = {We prove upper and lower bounds on the ground-state energy of the ideal two-dimensional anyon gas. Our bounds are extensive in the particle number, as for fermions, and linear in the statistics parameter (Formula presented.). The lower bounds extend to Lieb–Thirring inequalities for all anyons except bosons.},
  author       = {Lundholm, Douglas and Seiringer, Robert},
  journal      = {Letters in Mathematical Physics},
  number       = {11},
  pages        = {2523--2541},
  publisher    = {Springer},
  title        = {{Fermionic behavior of ideal anyons}},
  doi          = {10.1007/s11005-018-1091-y},
  volume       = {108},
  year         = {2018},
}

@article{296,
  abstract     = {The thermodynamic description of many-particle systems rests on the assumption of ergodicity, the ability of a system to explore all allowed configurations in the phase space. Recent studies on many-body localization have revealed the existence of systems that strongly violate ergodicity in the presence of quenched disorder. Here, we demonstrate that ergodicity can be weakly broken by a different mechanism, arising from the presence of special eigenstates in the many-body spectrum that are reminiscent of quantum scars in chaotic non-interacting systems. In the single-particle case, quantum scars correspond to wavefunctions that concentrate in the vicinity of unstable periodic classical trajectories. We show that many-body scars appear in the Fibonacci chain, a model with a constrained local Hilbert space that has recently been experimentally realized in a Rydberg-atom quantum simulator. The quantum scarred eigenstates are embedded throughout the otherwise thermalizing many-body spectrum but lead to direct experimental signatures, as we show for periodic recurrences that reproduce those observed in the experiment. Our results suggest that scarred many-body bands give rise to a new universality class of quantum dynamics, opening up opportunities for the creation of novel states with long-lived coherence in systems that are now experimentally realizable.},
  author       = {Turner, Christopher and Michailidis, Alexios and Abanin, Dmitry and Serbyn, Maksym and Papić, Zlatko},
  journal      = {Nature Physics},
  pages        = {745 -- 749},
  publisher    = {Nature Publishing Group},
  title        = {{Weak ergodicity breaking from quantum many-body scars}},
  doi          = {10.1038/s41567-018-0137-5},
  volume       = {14},
  year         = {2018},
}

@inproceedings{297,
  abstract     = {Graph games played by two players over finite-state graphs are central in many problems in computer science. In particular, graph games with ω -regular winning conditions, specified as parity objectives, which can express properties such as safety, liveness, fairness, are the basic framework for verification and synthesis of reactive systems. The decisions for a player at various states of the graph game are represented as strategies. While the algorithmic problem for solving graph games with parity objectives has been widely studied, the most prominent data-structure for strategy representation in graph games has been binary decision diagrams (BDDs). However, due to the bit-level representation, BDDs do not retain the inherent flavor of the decisions of strategies, and are notoriously hard to minimize to obtain succinct representation. In this work we propose decision trees for strategy representation in graph games. Decision trees retain the flavor of decisions of strategies and allow entropy-based minimization to obtain succinct trees. However, decision trees work in settings (e.g., probabilistic models) where errors are allowed, and overfitting of data is typically avoided. In contrast, for strategies in graph games no error is allowed, and the decision tree must represent the entire strategy. We develop new techniques to extend decision trees to overcome the above obstacles, while retaining the entropy-based techniques to obtain succinct trees. We have implemented our techniques to extend the existing decision tree solvers. We present experimental results for problems in reactive synthesis to show that decision trees provide a much more efficient data-structure for strategy representation as compared to BDDs.},
  author       = {Brázdil, Tomáš and Chatterjee, Krishnendu and Kretinsky, Jan and Toman, Viktor},
  location     = {Thessaloniki, Greece},
  pages        = {385 -- 407},
  publisher    = {Springer},
  title        = {{Strategy representation by decision trees in reactive synthesis}},
  doi          = {10.1007/978-3-319-89960-2_21},
  volume       = {10805},
  year         = {2018},
}

@inproceedings{298,
  abstract     = {Memory-hard functions (MHF) are functions whose evaluation cost is dominated by memory cost. MHFs are egalitarian, in the sense that evaluating them on dedicated hardware (like FPGAs or ASICs) is not much cheaper than on off-the-shelf hardware (like x86 CPUs). MHFs have interesting cryptographic applications, most notably to password hashing and securing blockchains.

Alwen and Serbinenko [STOC’15] define the cumulative memory complexity (cmc) of a function as the sum (over all time-steps) of the amount of memory required to compute the function. They advocate that a good MHF must have high cmc. Unlike previous notions, cmc takes into account that dedicated hardware might exploit amortization and parallelism. Still, cmc has been critizised as insufficient, as it fails to capture possible time-memory trade-offs; as memory cost doesn’t scale linearly, functions with the same cmc could still have very different actual hardware cost.

In this work we address this problem, and introduce the notion of sustained-memory complexity, which requires that any algorithm evaluating the function must use a large amount of memory for many steps. We construct functions (in the parallel random oracle model) whose sustained-memory complexity is almost optimal: our function can be evaluated using n steps and   O(n/log(n))  memory, in each step making one query to the (fixed-input length) random oracle, while any algorithm that can make arbitrary many parallel queries to the random oracle, still needs   Ω(n/log(n))  memory for   Ω(n)  steps.

As has been done for various notions (including cmc) before, we reduce the task of constructing an MHFs with high sustained-memory complexity to proving pebbling lower bounds on DAGs. Our main technical contribution is the construction is a family of DAGs on n nodes with constant indegree with high “sustained-space complexity”, meaning that any parallel black-pebbling strategy requires   Ω(n/log(n))  pebbles for at least   Ω(n)  steps.

Along the way we construct a family of maximally “depth-robust” DAGs with maximum indegree   O(logn) , improving upon the construction of Mahmoody et al. [ITCS’13] which had maximum indegree   O(log2n⋅},
  author       = {Alwen, Joel F and Blocki, Jeremiah and Pietrzak, Krzysztof Z},
  location     = {Tel Aviv, Israel},
  pages        = {99 -- 130},
  publisher    = {Springer},
  title        = {{Sustained space complexity}},
  doi          = {10.1007/978-3-319-78375-8_4},
  volume       = {10821},
  year         = {2018},
}

@inproceedings{299,
  abstract     = {We introduce in this paper   AMT 2.0 , a tool for qualitative and quantitative analysis of hybrid continuous and Boolean signals that combine numerical values and discrete events. The evaluation of the signals is based on rich temporal specifications expressed in extended Signal Temporal Logic (xSTL), which integrates Timed Regular Expressions (TRE) within Signal Temporal Logic (STL). The tool features qualitative monitoring (property satisfaction checking), trace diagnostics for explaining and justifying property violations and specification-driven measurement of quantitative features of the signal.},
  author       = {Nickovic, Dejan and Lebeltel, Olivier and Maler, Oded and Ferrere, Thomas and Ulus, Dogan},
  editor       = {Beyer, Dirk and Huisman, Marieke},
  location     = {Thessaloniki, Greece},
  pages        = {303 -- 319},
  publisher    = {Springer},
  title        = {{AMT 2.0: Qualitative and quantitative trace analysis with extended signal temporal logic}},
  doi          = {10.1007/978-3-319-89963-3_18},
  volume       = {10806},
  year         = {2018},
}

@inproceedings{300,
  abstract     = {We introduce a formal quantitative notion of “bit security” for a general type of cryptographic games (capturing both decision and search problems), aimed at capturing the intuition that a cryptographic primitive with k-bit security is as hard to break as an ideal cryptographic function requiring a brute force attack on a k-bit key space. Our new definition matches the notion of bit security commonly used by cryptographers and cryptanalysts when studying search (e.g., key recovery) problems, where the use of the traditional definition is well established. However, it produces a quantitatively different metric in the case of decision (indistinguishability) problems, where the use of (a straightforward generalization of) the traditional definition is more problematic and leads to a number of paradoxical situations or mismatches between theoretical/provable security and practical/common sense intuition. Key to our new definition is to consider adversaries that may explicitly declare failure of the attack. We support and justify the new definition by proving a number of technical results, including tight reductions between several standard cryptographic problems, a new hybrid theorem that preserves bit security, and an application to the security analysis of indistinguishability primitives making use of (approximate) floating point numbers. This is the first result showing that (standard precision) 53-bit floating point numbers can be used to achieve 100-bit security in the context of cryptographic primitives with general indistinguishability-based security definitions. Previous results of this type applied only to search problems, or special types of decision problems.},
  author       = {Micciancio, Daniele and Walter, Michael},
  location     = {Tel Aviv, Israel},
  pages        = {3 -- 28},
  publisher    = {Springer},
  title        = {{On the bit security of cryptographic primitives}},
  doi          = {10.1007/978-3-319-78381-9_1},
  volume       = {10820},
  year         = {2018},
}

@inproceedings{302,
  abstract     = {At ITCS 2013, Mahmoody, Moran and Vadhan [MMV13] introduce and construct publicly verifiable proofs of sequential work, which is a protocol for proving that one spent sequential computational work related to some statement. The original motivation for such proofs included non-interactive time-stamping and universally verifiable CPU benchmarks. A more recent application, and our main motivation, are blockchain designs, where proofs of sequential work can be used – in combination with proofs of space – as a more ecological and economical substitute for proofs of work which are currently used to secure Bitcoin and other cryptocurrencies. The construction proposed by [MMV13] is based on a hash function and can be proven secure in the random oracle model, or assuming inherently sequential hash-functions, which is a new standard model assumption introduced in their work. In a proof of sequential work, a prover gets a “statement” χ, a time parameter N and access to a hash-function H, which for the security proof is modelled as a random oracle. Correctness requires that an honest prover can make a verifier accept making only N queries to H, while soundness requires that any prover who makes the verifier accept must have made (almost) N sequential queries to H. Thus a solution constitutes a proof that N time passed since χ was received. Solutions must be publicly verifiable in time at most polylogarithmic in N. The construction of [MMV13] is based on “depth-robust” graphs, and as a consequence has rather poor concrete parameters. But the major drawback is that the prover needs not just N time, but also N space to compute a proof. In this work we propose a proof of sequential work which is much simpler, more efficient and achieves much better concrete bounds. Most importantly, the space required can be as small as log (N) (but we get better soundness using slightly more memory than that). An open problem stated by [MMV13] that our construction does not solve either is achieving a “unique” proof, where even a cheating prover can only generate a single accepting proof. This property would be extremely useful for applications to blockchains.},
  author       = {Cohen, Bram and Pietrzak, Krzysztof Z},
  location     = {Tel Aviv, Israel},
  pages        = {451 -- 467},
  publisher    = {Springer},
  title        = {{Simple proofs of sequential work}},
  doi          = {10.1007/978-3-319-78375-8_15},
  volume       = {10821},
  year         = {2018},
}

@article{303,
  abstract     = {The theory of tropical series, that we develop here, firstly appeared in the study of the growth of pluriharmonic functions. Motivated by waves in sandpile models we introduce a dynamic on the set of tropical series, and it is experimentally observed that this dynamic obeys a power law. So, this paper serves as a compilation of results we need for other articles and also introduces several objects interesting by themselves.},
  author       = {Kalinin, Nikita and Shkolnikov, Mikhail},
  journal      = {Discrete and Continuous Dynamical Systems- Series A},
  number       = {6},
  pages        = {2827 -- 2849},
  publisher    = {AIMS},
  title        = {{Introduction to tropical series and wave dynamic on them}},
  doi          = {10.3934/dcds.2018120},
  volume       = {38},
  year         = {2018},
}

@article{304,
  abstract     = {Additive manufacturing has recently seen drastic improvements in resolution, making it now possible to fabricate features at scales of hundreds or even dozens of nanometers, which previously required very expensive lithographic methods.
As a result, additive manufacturing now seems poised for optical applications, including those relevant to computer graphics, such as material design, as well as display and imaging applications.
 
In this work, we explore the use of additive manufacturing for generating structural colors, where the structures are designed using a fabrication-aware optimization process.
This requires a combination of full-wave simulation, a feasible parameterization of the design space, and a tailored optimization procedure.
Many of these components should be re-usable for the design of other optical structures at this scale.
 
We show initial results of material samples fabricated based on our designs.
While these suffer from the prototype character of state-of-the-art fabrication hardware, we believe they clearly demonstrate the potential of additive nanofabrication for structural colors and other graphics applications.},
  author       = {Auzinger, Thomas and Heidrich, Wolfgang and Bickel, Bernd},
  journal      = {ACM Transactions on Graphics},
  number       = {4},
  publisher    = {ACM},
  title        = {{Computational design of nanostructural color for additive manufacturing}},
  doi          = {10.1145/3197517.3201376},
  volume       = {37},
  year         = {2018},
}

@article{305,
  abstract     = {The hanging-drop network (HDN) is a technology platform based on a completely open microfluidic network at the bottom of an inverted, surface-patterned substrate. The platform is predominantly used for the formation, culturing, and interaction of self-assembled spherical microtissues (spheroids) under precisely controlled flow conditions. Here, we describe design, fabrication, and operation of microfluidic hanging-drop networks.},
  author       = {Misun, Patrick and Birchler, Axel and Lang, Moritz and Hierlemann, Andreas and Frey, Olivier},
  journal      = {Methods in Molecular Biology},
  pages        = {183 -- 202},
  publisher    = {Springer},
  title        = {{Fabrication and operation of microfluidic hanging drop networks}},
  doi          = {10.1007/978-1-4939-7792-5_15},
  volume       = {1771},
  year         = {2018},
}

@article{306,
  abstract     = {A cornerstone of statistical inference, the maximum entropy framework is being increasingly applied to construct descriptive and predictive models of biological systems, especially complex biological networks, from large experimental data sets. Both its broad applicability and the success it obtained in different contexts hinge upon its conceptual simplicity and mathematical soundness. Here we try to concisely review the basic elements of the maximum entropy principle, starting from the notion of ‘entropy’, and describe its usefulness for the analysis of biological systems. As examples, we focus specifically on the problem of reconstructing gene interaction networks from expression data and on recent work attempting to expand our system-level understanding of bacterial metabolism. Finally, we highlight some extensions and potential limitations of the maximum entropy approach, and point to more recent developments that are likely to play a key role in the upcoming challenges of extracting structures and information from increasingly rich, high-throughput biological data.},
  author       = {De Martino, Andrea and De Martino, Daniele},
  journal      = {Heliyon},
  number       = {4},
  publisher    = {Elsevier},
  title        = {{An introduction to the maximum entropy approach and its application to inference problems in biology}},
  doi          = {10.1016/j.heliyon.2018.e00596},
  volume       = {4},
  year         = {2018},
}

@article{307,
  abstract     = {Spontaneous emission spectra of two initially excited closely spaced identical atoms are very sensitive to the strength and the direction of the applied magnetic field. We consider the relevant schemes that ensure the determination of the mutual spatial orientation of the atoms and the distance between them by entirely optical means. A corresponding theoretical description is given accounting for the dipole-dipole interaction between the two atoms in the presence of a magnetic field and for polarizations of the quantum field interacting with magnetic sublevels of the two-atom system. },
  author       = {Redchenko, Elena and Makarov, Alexander and Yudson, Vladimir},
  journal      = { Physical Review A - Atomic, Molecular, and Optical Physics},
  number       = {4},
  publisher    = {American Physical Society},
  title        = {{Nanoscopy of pairs of atoms by fluorescence in a magnetic field}},
  doi          = {10.1103/PhysRevA.97.043812},
  volume       = {97},
  year         = {2018},
}

@article{308,
  abstract     = {Migrating cells penetrate tissue barriers during development, inflammatory responses, and tumor metastasis. We study if migration in vivo in such three-dimensionally confined environments requires changes in the mechanical properties of the surrounding cells using embryonic Drosophila melanogaster hemocytes, also called macrophages, as a model. We find that macrophage invasion into the germband through transient separation of the apposing ectoderm and mesoderm requires cell deformations and reductions in apical tension in the ectoderm. Interestingly, the genetic pathway governing these mechanical shifts acts downstream of the only known tumor necrosis factor superfamily member in Drosophila, Eiger, and its receptor, Grindelwald. Eiger-Grindelwald signaling reduces levels of active Myosin in the germband ectodermal cortex through the localization of a Crumbs complex component, Patj (Pals-1-associated tight junction protein). We therefore elucidate a distinct molecular pathway that controls tissue tension and demonstrate the importance of such regulation for invasive migration in vivo.},
  author       = {Ratheesh, Aparna and Biebl, Julia and Smutny, Michael and Veselá, Jana and Papusheva, Ekaterina and Krens, Gabriel and Kaufmann, Walter and György, Attila and Casano, Alessandra M and Siekhaus, Daria E},
  journal      = {Developmental Cell},
  number       = {3},
  pages        = {331 -- 346},
  publisher    = {Elsevier},
  title        = {{Drosophila TNF modulates tissue tension in the embryo to facilitate macrophage invasive migration}},
  doi          = {10.1016/j.devcel.2018.04.002},
  volume       = {45},
  year         = {2018},
}

@inproceedings{309,
  abstract     = {We present an efficient algorithm for a problem in the interface between clustering and graph embeddings. An embedding ' : G ! M of a graph G into a 2manifold M maps the vertices in V (G) to distinct points and the edges in E(G) to interior-disjoint Jordan arcs between the corresponding vertices. In applications in clustering, cartography, and visualization, nearby vertices and edges are often bundled to a common node or arc, due to data compression or low resolution. This raises the computational problem of deciding whether a given map ' : G ! M comes from an embedding. A map ' : G ! M is a weak embedding if it can be perturbed into an embedding ψ: G ! M with k' "k < " for every &quot; &gt; 0. A polynomial-time algorithm for recognizing weak embeddings was recently found by Fulek and Kyncl [14], which reduces to solving a system of linear equations over Z2. It runs in O(n2!) O(n4:75) time, where 2:373 is the matrix multiplication exponent and n is the number of vertices and edges of G. We improve the running time to O(n log n). Our algorithm is also conceptually simpler than [14]: We perform a sequence of local operations that gradually &quot;untangles&quot; the image '(G) into an embedding (G), or reports that ' is not a weak embedding. It generalizes a recent technique developed for the case that G is a cycle and the embedding is a simple polygon [1], and combines local constraints on the orientation of subgraphs directly, thereby eliminating the need for solving large systems of linear equations.},
  author       = {Akitaya, Hugo and Fulek, Radoslav and Tóth, Csaba},
  location     = {New Orleans, LA, USA},
  pages        = {274 -- 292},
  publisher    = {ACM},
  title        = {{Recognizing weak embeddings of graphs}},
  doi          = {10.1137/1.9781611975031.20},
  year         = {2018},
}

@article{31,
  abstract     = {Correlations in sensory neural networks have both extrinsic and intrinsic origins. Extrinsic or stimulus correlations arise from shared inputs to the network and, thus, depend strongly on the stimulus ensemble. Intrinsic or noise correlations reflect biophysical mechanisms of interactions between neurons, which are expected to be robust to changes in the stimulus ensemble. Despite the importance of this distinction for understanding how sensory networks encode information collectively, no method exists to reliably separate intrinsic interactions from extrinsic correlations in neural activity data, limiting our ability to build predictive models of the network response. In this paper we introduce a general strategy to infer population models of interacting neurons that collectively encode stimulus information. The key to disentangling intrinsic from extrinsic correlations is to infer the couplings between neurons separately from the encoding model and to combine the two using corrections calculated in a mean-field approximation. We demonstrate the effectiveness of this approach in retinal recordings. The same coupling network is inferred from responses to radically different stimulus ensembles, showing that these couplings indeed reflect stimulus-independent interactions between neurons. The inferred model predicts accurately the collective response of retinal ganglion cell populations as a function of the stimulus.},
  author       = {Ferrari, Ulisse and Deny, Stephane and Chalk, Matthew J and Tkacik, Gasper and Marre, Olivier and Mora, Thierry},
  issn         = {2470-0045},
  journal      = {Physical Review E},
  number       = {4},
  publisher    = {American Physical Society},
  title        = {{Separating intrinsic interactions from extrinsic correlations in a network of sensory neurons}},
  doi          = {10.1103/PhysRevE.98.042410},
  volume       = {98},
  year         = {2018},
}

@inproceedings{310,
  abstract     = {A model of computation that is widely used in the formal analysis of reactive systems is symbolic algorithms. In this model the access to the input graph is restricted to consist of symbolic operations, which are expensive in comparison to the standard RAM operations. We give lower bounds on the number of symbolic operations for basic graph problems such as the computation of the strongly connected components and of the approximate diameter as well as for fundamental problems in model checking such as safety, liveness, and coliveness. Our lower bounds are linear in the number of vertices of the graph, even for constant-diameter graphs. For none of these problems lower bounds on the number of symbolic operations were known before. The lower bounds show an interesting separation of these problems from the reachability problem, which can be solved with O(D) symbolic operations, where D is the diameter of the graph. Additionally we present an approximation algorithm for the graph diameter which requires Õ(n/D) symbolic steps to achieve a (1 +ϵ)-approximation for any constant &gt; 0. This compares to O(n/D) symbolic steps for the (naive) exact algorithm and O(D) symbolic steps for a 2-approximation. Finally we also give a refined analysis of the strongly connected components algorithms of [15], showing that it uses an optimal number of symbolic steps that is proportional to the sum of the diameters of the strongly connected components.},
  author       = {Chatterjee, Krishnendu and Dvorák, Wolfgang and Henzinger, Monika H and Loitzenbauer, Veronika},
  location     = {New Orleans, Louisiana, United States},
  pages        = {2341 -- 2356},
  publisher    = {ACM},
  title        = {{Lower bounds for symbolic computation on graphs: Strongly connected components, liveness, safety, and diameter}},
  doi          = {10.1137/1.9781611975031.151},
  year         = {2018},
}

