@inproceedings{6648,
  abstract     = {Various kinds of data are routinely represented as discrete probability distributions. Examples include text documents summarized by histograms of word occurrences and images represented as histograms of oriented gradients. Viewing a discrete probability distribution as a point in the standard simplex of the appropriate dimension, we can understand collections of such objects in geometric and topological terms. Importantly, instead of using the standard Euclidean distance, we look into dissimilarity measures with information-theoretic justification, and we develop the theory
needed for applying topological data analysis in this setting. In doing so, we emphasize constructions that enable the usage of existing computational topology software in this context.},
  author       = {Edelsbrunner, Herbert and Virk, Ziga and Wagner, Hubert},
  booktitle    = {35th International Symposium on Computational Geometry},
  isbn         = {9783959771047},
  location     = {Portland, OR, United States},
  pages        = {31:1--31:14},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Topological data analysis in information space}},
  doi          = {10.4230/LIPICS.SOCG.2019.31},
  volume       = {129},
  year         = {2019},
}

@article{6650,
  abstract     = {We propose a novel technique for the automatic design of molds to cast highly complex shapes. The technique generates composite, two-piece molds. Each mold piece is made up of a hard plastic shell and a flexible silicone part. Thanks to the thin, soft, and smartly shaped silicone part, which is kept in place by a hard plastic shell, we can cast objects of unprecedented complexity. An innovative algorithm based on a volumetric analysis defines the layout of the internal cuts in the silicone mold part. Our approach can robustly handle thin protruding features and intertwined topologies that have caused previous methods to fail. We compare our results with state of the art techniques, and we demonstrate the casting of shapes with extremely complex geometry.},
  author       = {Alderighi, Thomas and Malomo, Luigi and Giorgi, Daniela and Bickel, Bernd and Cignoni, Paolo and Pietroni, Nico},
  issn         = {0730-0301},
  journal      = {ACM Transactions on Graphics},
  number       = {4},
  publisher    = {ACM},
  title        = {{Volume-aware design of composite molds}},
  doi          = {10.1145/3306346.3322981},
  volume       = {38},
  year         = {2019},
}

@article{6657,
  abstract     = {In this article a model is described how Open Access definitions can be formed on the basis of objective criteria. The common Open Access definitions such as "gold" and "green" are not exactly defined. This becomes a problem as soon as one begins to measure Open Access, for example if the development of the Open Access share should be monitored. This was discussed in the working group on Open Access Monitoring  of  the  AT2OA  project  and  the  present  model  was  developed, which is based on 5 critics with 4 characteristics: location, licence, version, embargo and conditions of the Open Access publication are taken into account. In the meantime, the model has also been tested in practice using R scripts, and the initial results are quite promising.},
  author       = {Danowski, Patrick},
  issn         = {1022-2588},
  journal      = {Mitteilungen der Vereinigung Österreichischer Bibliothekarinnen und Bibliothekare},
  number       = {1},
  pages        = {59--65},
  publisher    = {Vereinigung Österreichischer Bibliothekarinnen und Bibliothekare},
  title        = {{An Austrian proposal for the classification of Open Access Tuples (COAT) - distinguish different open access types beyond colors}},
  doi          = {10.31263/voebm.v72i1.2276},
  volume       = {72},
  year         = {2019},
}

@article{6658,
  abstract     = {New genes are a major source of novelties, and a disproportionate amount of them are known to show testis expression in later phases of male gametogenesis in different groups such as mammals and plants. Here, we propose that this enhanced expression is a consequence of haploid selection during the latter stages of male gametogenesis. Because emerging adaptive mutations will be fixed faster if their phenotypes are expressed by haploid rather than diploid genotypes, new genes with advantageous functions arising during this unique stage of development have a better chance to become fixed. To test this hypothesis, expression levels of genes of differing evolutionary age were examined at various stages of Drosophila spermatogenesis. We found, consistent with a model based on haploid selection, that new Drosophila genes are both expressed in later haploid phases of spermatogenesis and harbor a significant enrichment of adaptive mutations. Additionally, the observed overexpression of new genes in the latter phases of spermatogenesis was limited to the autosomes. Because all male cells exhibit hemizygous expression for X-linked genes (and therefore effectively haploid), there is no expectation that selection acting on late spermatogenesis will have a different effect on X-linked genes in comparison to initial diploid phases. Together, our proposed hypothesis and the analyzed data suggest that natural selection in haploid cells elucidates several aspects of the origin of new genes by explaining the general prevalence of their testis expression, and a parsimonious solution for new alleles to avoid being lost by genetic drift or pseudogenization. },
  author       = {Raices, Julia and Otto, Paulo and Vibranovski, Maria},
  issn         = { 1549-5469},
  journal      = {Genome Research},
  number       = {7},
  pages        = {1115--1122},
  publisher    = {Cold Spring Harbor Laboratory Press},
  title        = {{Haploid selection drives new gene male germline expression}},
  doi          = {10.1101/gr.238824.118},
  volume       = {29},
  year         = {2019},
}

@article{6659,
  abstract     = {Chemical labeling of proteins with synthetic molecular probes offers the possibility to probe the functions of proteins of interest in living cells. However, the methods for covalently labeling targeted proteins using complementary peptide tag-probe pairs are still limited, irrespective of the versatility of such pairs in biological research. Herein, we report the new CysHis tag-Ni(II) probe pair for the specific covalent labeling of proteins. A broad-range evaluation of the reactivity profiles of the probe and the CysHis peptide tag afforded a tag-probe pair with an optimized and high labeling selectivity and reactivity. In particular, the labeling specificity of this pair was notably improved compared to the previously reported one. This pair was successfully utilized for the fluorescence imaging of membrane proteins on the surfaces of living cells, demonstrating its potential utility in biological research.},
  author       = {Zenmyo, Naoki and Tokumaru, Hiroki and Uchinomiya, Shohei and Fuchida, Hirokazu and Tabata, Shigekazu and Hamachi, Itaru and Shigemoto, Ryuichi and Ojida, Akio},
  issn         = {0009-2673},
  journal      = {Bulletin of the Chemical Society of Japan},
  number       = {5},
  pages        = {995--1000},
  publisher    = {Bulletin of the Chemical Society of Japan},
  title        = {{Optimized reaction pair of the CysHis tag and Ni(II)-NTA probe for highly selective chemical labeling of membrane proteins}},
  doi          = {10.1246/bcsj.20190034},
  volume       = {92},
  year         = {2019},
}

@article{6660,
  abstract     = {Commercially available full-color 3D printing allows for detailed control of material deposition in a volume, but an exact reproduction of a target surface appearance is hampered by the strong subsurface scattering that causes nontrivial volumetric cross-talk at the print surface. Previous work showed how an iterative optimization scheme based on accumulating absorptive materials at the surface can be used to find a volumetric distribution of print materials that closely approximates a given target appearance.

In this work, we first revisit the assumption that pushing the absorptive materials to the surface results in minimal volumetric cross-talk. We design a full-fledged optimization on a small domain for this task and confirm this previously reported heuristic. Then, we extend the above approach that is critically limited to color reproduction on planar surfaces, to arbitrary 3D shapes. Our method enables high-fidelity color texture reproduction on 3D prints by effectively compensating for internal light scattering within arbitrarily shaped objects. In addition, we propose a content-aware gamut mapping that significantly improves color reproduction for the pathological case of thin geometric features. Using a wide range of sample objects with complex textures and geometries, we demonstrate color reproduction whose fidelity is superior to state-of-the-art drivers for color 3D printers.},
  author       = {Sumin, Denis and Weyrich, Tim and Rittig, Tobias and Babaei, Vahid and Nindel, Thomas and Wilkie, Alexander and Didyk, Piotr and Bickel, Bernd and Křivánek, Jaroslav and Myszkowski, Karol},
  issn         = {0730-0301},
  journal      = {ACM Transactions on Graphics},
  number       = {4},
  publisher    = {ACM},
  title        = {{Geometry-aware scattering compensation for 3D printing}},
  doi          = {10.1145/3306346.3322992},
  volume       = {38},
  year         = {2019},
}

@article{6662,
  abstract     = {In phase retrieval, we want to recover an unknown signal 𝑥∈ℂ𝑑 from n quadratic measurements of the form 𝑦𝑖=|⟨𝑎𝑖,𝑥⟩|2+𝑤𝑖, where 𝑎𝑖∈ℂ𝑑 are known sensing vectors and 𝑤𝑖 is measurement noise. We ask the following weak recovery question: What is the minimum number of measurements n needed to produce an estimator 𝑥^(𝑦) that is positively correlated with the signal 𝑥? We consider the case of Gaussian vectors 𝑎𝑎𝑖. We prove that—in the high-dimensional limit—a sharp phase transition takes place, and we locate the threshold in the regime of vanishingly small noise. For 𝑛≤𝑑−𝑜(𝑑), no estimator can do significantly better than random and achieve a strictly positive correlation. For 𝑛≥𝑑+𝑜(𝑑), a simple spectral estimator achieves a positive correlation. Surprisingly, numerical simulations with the same spectral estimator demonstrate promising performance with realistic sensing matrices. Spectral methods are used to initialize non-convex optimization algorithms in phase retrieval, and our approach can boost the performance in this setting as well. Our impossibility result is based on classical information-theoretic arguments. The spectral algorithm computes the leading eigenvector of a weighted empirical covariance matrix. We obtain a sharp characterization of the spectral properties of this random matrix using tools from free probability and generalizing a recent result by Lu and Li. Both the upper bound and lower bound generalize beyond phase retrieval to measurements 𝑦𝑖 produced according to a generalized linear model. As a by-product of our analysis, we compare the threshold of the proposed spectral method with that of a message passing algorithm.},
  author       = {Mondelli, Marco and Montanari, Andrea},
  issn         = {1615-3383},
  journal      = {Foundations of Computational Mathematics},
  number       = {3},
  pages        = {703--773},
  publisher    = {Springer},
  title        = {{Fundamental limits of weak recovery with applications to phase retrieval}},
  doi          = {10.1007/s10208-018-9395-y},
  volume       = {19},
  year         = {2019},
}

@article{6663,
  abstract     = {Consider the problem of constructing a polar code of block length N for a given transmission channel W. Previous approaches require one to compute the reliability of the N synthetic channels and then use only those that are sufficiently reliable. However, we know from two independent works by Schürch and by Bardet et al. that the synthetic channels are partially ordered with respect to degradation. Hence, it is natural to ask whether the partial order can be exploited to reduce the computational burden of the construction problem. We show that, if we take advantage of the partial order, we can construct a polar code by computing the reliability of roughly a fraction 1/ log 3/2 N of the synthetic channels. In particular, we prove that N/ log 3/2 N is a lower bound on the number of synthetic channels to be considered and such a bound is tight up to a multiplicative factor log log N. This set of roughly N/ log 3/2 N synthetic channels is universal, in the sense that it allows one to construct polar codes for any W, and it can be identified by solving a maximum matching problem on a bipartite graph. Our proof technique consists of reducing the construction problem to the problem of computing the maximum cardinality of an antichain for a suitable partially ordered set. As such, this method is general, and it can be used to further improve the complexity of the construction problem, in case a refined partial order on the synthetic channels of polar codes is discovered.},
  author       = {Mondelli, Marco and Hassani, Hamed and Urbanke, Rudiger},
  journal      = {IEEE},
  number       = {5},
  pages        = {2782--2791},
  publisher    = {IEEE},
  title        = {{Construction of polar codes with sublinear complexity}},
  doi          = {10.1109/tit.2018.2889667},
  volume       = {65},
  year         = {2019},
}

@article{6671,
  abstract     = {In this paper we discuss three results. The first two concern general sets of positive reach: we first characterize the reach of a closed set by means of a bound on the metric distortion between the distance measured in the ambient Euclidean space and the shortest path distance measured in the set. Secondly, we prove that the intersection of a ball with radius less than the reach with the set is geodesically convex, meaning that the shortest path between any two points in the intersection lies itself in the intersection. For our third result we focus on manifolds with positive reach and give a bound on the angle between tangent spaces at two different points in terms of the reach and the distance between the two points.},
  author       = {Boissonnat, Jean-Daniel and Lieutier, André and Wintraecken, Mathijs},
  issn         = {2367-1734},
  journal      = {Journal of Applied and Computational Topology},
  number       = {1-2},
  pages        = {29–58},
  publisher    = {Springer Nature},
  title        = {{The reach, metric distortion, geodesic convexity and the variation of tangent spaces}},
  doi          = {10.1007/s41468-019-00029-8},
  volume       = {3},
  year         = {2019},
}

@article{6672,
  abstract     = {The construction of anisotropic triangulations is desirable for various applications, such as the numerical solving of partial differential equations and the representation of surfaces in graphics. To solve this notoriously difficult problem in a practical way, we introduce the discrete Riemannian Voronoi diagram, a discrete structure that approximates the Riemannian Voronoi diagram. This structure has been implemented and was shown to lead to good triangulations in $\mathbb{R}^2$ and on surfaces embedded in $\mathbb{R}^3$ as detailed in our experimental companion paper. In this paper, we study theoretical aspects of our structure. Given a finite set of points $\mathcal{P}$ in a domain $\Omega$ equipped with a Riemannian metric, we compare the discrete Riemannian Voronoi diagram of $\mathcal{P}$ to its Riemannian Voronoi diagram. Both diagrams have dual structures called the discrete Riemannian Delaunay and the Riemannian Delaunay complex. We provide conditions that guarantee that these dual structures are identical. It then follows from previous results that the discrete Riemannian Delaunay complex can be embedded in $\Omega$ under sufficient conditions, leading to an anisotropic triangulation with curved simplices. Furthermore, we show that, under similar conditions, the simplices of this triangulation can be straightened.},
  author       = {Boissonnat, Jean-Daniel and Rouxel-Labbé, Mael and Wintraecken, Mathijs},
  issn         = {1095-7111},
  journal      = {SIAM Journal on Computing},
  number       = {3},
  pages        = {1046--1097},
  publisher    = {Society for Industrial & Applied Mathematics (SIAM)},
  title        = {{Anisotropic triangulations via discrete Riemannian Voronoi diagrams}},
  doi          = {10.1137/17m1152292},
  volume       = {48},
  year         = {2019},
}

@inproceedings{6676,
  abstract     = {It is impossible to deterministically solve wait-free consensus in an asynchronous system. The classic proof uses a valency argument, which constructs an infinite execution by repeatedly extending a finite execution. We introduce extension-based proofs, a class of impossibility proofs that are modelled as an interaction between a prover and a protocol and that include valency arguments.

Using proofs based on combinatorial topology, it has been shown that it is impossible to deterministically solve k-set agreement among n > k ≥ 2 processes in a wait-free manner. However, it was unknown whether proofs based on simpler techniques were possible. We show that this impossibility result cannot be obtained by an extension-based proof and, hence, extension-based proofs are limited in power.},
  author       = {Alistarh, Dan-Adrian and Aspnes, James and Ellen, Faith and Gelashvili, Rati and Zhu, Leqi},
  booktitle    = {Proceedings of the 51st Annual ACM SIGACT Symposium on Theory of Computing},
  isbn         = {9781450367059},
  location     = {Phoenix, AZ, United States},
  pages        = {986--996},
  publisher    = {ACM},
  title        = {{Why extension-based proofs fail}},
  doi          = {10.1145/3313276.3316407},
  year         = {2019},
}

@article{6680,
  abstract     = {This paper analyzes how partial selfing in a large source population influences its ability to colonize a new habitat via the introduction of a few founder individuals. Founders experience inbreeding depression due to partially recessive deleterious alleles as well as maladaptation to the new environment due to selection on a large number of additive loci. I first introduce a simplified version of the Inbreeding History Model (Kelly, 2007) in order to characterize mutation‐selection balance in a large, partially selfing source population under selection involving multiple non‐identical loci. I then use individual‐based simulations to study the eco‐evolutionary dynamics of founders establishing in the new habitat under a model of hard selection. The study explores how selfing rate shapes establishment probabilities of founders via effects on both inbreeding depression and adaptability to the new environment, and also distinguishes the effects of selfing on the initial fitness of founders from its effects on the long‐term adaptive response of the populations they found. A high rate of (but not complete) selfing is found to aid establishment over a wide range of parameters, even in the absence of mate limitation. The sensitivity of the results to assumptions about the nature of polygenic selection are discussed.},
  author       = {Sachdeva, Himani},
  issn         = {1558-5646},
  journal      = {Evolution},
  number       = {9},
  pages        = {1729--1745},
  publisher    = {Wiley},
  title        = {{Effect of partial selfing and polygenic selection on establishment in a new habitat}},
  doi          = {10.1111/evo.13812},
  volume       = {73},
  year         = {2019},
}

@article{6710,
  abstract     = {Sexual dimorphism in morphology, physiology or life history traits is common in dioecious plants at reproductive maturity, but it is typically inconspicuous or absent in juveniles. Although plants of different sexes probably begin to diverge in gene expression both before their reproduction commences and before dimorphism becomes readily apparent, to our knowledge transcriptome-wide differential gene expression has yet to be demonstrated for any angiosperm species.},
  author       = {Cossard, Guillaume and Toups, Melissa A and Pannell, John },
  issn         = {1095-8290},
  journal      = {Annals of botany},
  number       = {7},
  pages        = {1119--1131},
  publisher    = {Oxford University Press},
  title        = {{Sexual dimorphism and rapid turnover in gene expression in pre-reproductive seedlings of a dioecious herb}},
  doi          = {10.1093/aob/mcy183},
  volume       = {123},
  year         = {2019},
}

@article{6717,
  abstract     = {With the recent publication by Silpe and Bassler (2019), considering phage detection of a bacterial quorum-sensing (QS) autoinducer, we now have as many as five examples of phage-associated intercellular communication (Table 1). Each potentially involves ecological inferences by phages as to concentrations of surrounding phage-infected or uninfected bacteria. While the utility of phage detection of bacterial QS molecules may at first glance appear to be straightforward, we suggest in this commentary that the underlying ecological explanation is unlikely to be simple.},
  author       = {Igler, Claudia and Abedon, Stephen T.},
  journal      = {Frontiers in Microbiology},
  publisher    = {Frontiers},
  title        = {{Commentary: A host-produced quorum-sensing autoinducer controls a phage lysis-lysogeny decision}},
  doi          = {10.3389/fmicb.2019.01171},
  volume       = {10},
  year         = {2019},
}

@inproceedings{6725,
  abstract     = {A Valued Constraint Satisfaction Problem (VCSP) provides a common framework that can express a wide range of discrete optimization problems. A VCSP instance is given by a finite set of variables, a finite domain of labels, and an objective function to be minimized. This function is represented as a sum of terms where each term depends on a subset of the variables. To obtain different classes of optimization problems, one can restrict all terms to come from a fixed set Γ of cost functions, called a language. 
Recent breakthrough results have established a complete complexity classification of such classes with respect to language Γ: if all cost functions in Γ satisfy a certain algebraic condition then all Γ-instances can be solved in polynomial time, otherwise the problem is NP-hard. Unfortunately, testing this condition for a given language Γ is known to be NP-hard. We thus study exponential algorithms for this meta-problem. We show that the tractability condition of a finite-valued language Γ can be tested in O(3‾√3|D|⋅poly(size(Γ))) time, where D is the domain of Γ and poly(⋅) is some fixed polynomial. We also obtain a matching lower bound under the Strong Exponential Time Hypothesis (SETH). More precisely, we prove that for any constant δ<1 there is no O(3‾√3δ|D|) algorithm, assuming that SETH holds.},
  author       = {Kolmogorov, Vladimir},
  booktitle    = {46th International Colloquium on Automata, Languages and Programming},
  isbn         = {978-3-95977-109-2},
  issn         = {1868-8969},
  location     = {Patras, Greece},
  pages        = {77:1--77:12},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Testing the complexity of a valued CSP language}},
  doi          = {10.4230/LIPICS.ICALP.2019.77},
  volume       = {132},
  year         = {2019},
}

@inbook{6726,
  abstract     = {Randomness is an essential part of any secure cryptosystem, but many constructions rely on distributions that are not uniform. This is particularly true for lattice based cryptosystems, which more often than not make use of discrete Gaussian distributions over the integers. For practical purposes it is crucial to evaluate the impact that approximation errors have on the security of a scheme to provide the best possible trade-off between security and performance. Recent years have seen surprising results allowing to use relatively low precision while maintaining high levels of security. A key insight in these results is that sampling a distribution with low relative error can provide very strong security guarantees. Since floating point numbers provide guarantees on the relative approximation error, they seem a suitable tool in this setting, but it is not obvious which sampling algorithms can actually profit from them. While previous works have shown that inversion sampling can be adapted to provide a low relative error (Pöppelmann et al., CHES 2014; Prest, ASIACRYPT 2017), other works have called into question if this is possible for other sampling techniques (Zheng et al., Eprint report 2018/309). In this work, we consider all sampling algorithms that are popular in the cryptographic setting and analyze the relationship of floating point precision and the resulting relative error. We show that all of the algorithms either natively achieve a low relative error or can be adapted to do so.},
  author       = {Walter, Michael},
  booktitle    = {Progress in Cryptology – AFRICACRYPT 2019},
  editor       = {Buchmann, J and Nitaj, A and Rachidi, T},
  isbn         = {978-3-0302-3695-3},
  issn         = {1611-3349},
  location     = {Rabat, Morocco},
  pages        = {157--180},
  publisher    = {Springer Nature},
  title        = {{Sampling the integers with low relative error}},
  doi          = {10.1007/978-3-030-23696-0_9},
  volume       = {11627},
  year         = {2019},
}

@inproceedings{6747,
  abstract     = {We establish connections between the problem of learning a two-layer neural network and tensor decomposition. We consider a model with feature vectors x∈ℝd, r hidden units with weights {wi}1≤i≤r and output y∈ℝ, i.e., y=∑ri=1σ(w𝖳ix), with activation functions given by low-degree polynomials. In particular, if σ(x)=a0+a1x+a3x3, we prove that no polynomial-time learning algorithm can outperform the trivial predictor that assigns to each example the response variable 𝔼(y), when d3/2≪r≪d2. Our conclusion holds for a `natural data distribution', namely standard Gaussian feature vectors x, and output distributed according to a two-layer neural network with random isotropic weights, and under a certain complexity-theoretic assumption on tensor decomposition. Roughly speaking, we assume that no polynomial-time algorithm can substantially outperform current methods for tensor decomposition based on the sum-of-squares hierarchy. We also prove generalizations of this statement for higher degree polynomial activations, and non-random weight vectors. Remarkably, several existing algorithms for learning two-layer networks with rigorous guarantees are based on tensor decomposition. Our results support the idea that this is indeed the core computational difficulty in learning such networks, under the stated generative model for the data. As a side result, we show that under this model learning the network requires accurate learning of its weights, a property that does not hold in a more general setting. },
  author       = {Mondelli, Marco and Montanari, Andrea},
  booktitle    = {Proceedings of the 22nd International Conference on Artificial Intelligence and Statistics},
  location     = {Naha, Okinawa, Japan},
  pages        = {1051--1060},
  publisher    = {Proceedings of Machine Learning Research},
  title        = {{On the connection between learning two-layers neural networks and tensor  decomposition}},
  volume       = {89},
  year         = {2019},
}

@article{6750,
  abstract     = {Polar codes have gained extensive attention during the past few years and recently they have been selected for the next generation of wireless communications standards (5G). Successive-cancellation-based (SC-based) decoders, such as SC list (SCL) and SC flip (SCF), provide a reasonable error performance for polar codes at the cost of low decoding speed. Fast SC-based decoders, such as Fast-SSC, Fast-SSCL, and Fast-SSCF, identify the special constituent codes in a polar code graph off-line, produce a list of operations, store the list in memory, and feed the list to the decoder to decode the constituent codes in order efficiently, thus increasing the decoding speed. However, the list of operations is dependent on the code rate and as the rate changes, a new list is produced, making fast SC-based decoders not rate-flexible. In this paper, we propose a completely rate-flexible fast SC-based decoder by creating the list of operations directly in hardware, with low implementation complexity. We further propose a hardware architecture implementing the proposed method and show that the area occupation of the rate-flexible fast SC-based decoder in this paper is only 38% of the total area of the memory-based base-line decoder when 5G code rates are supported. },
  author       = {Hashemi, Seyyed Ali and Condo, Carlo and Mondelli, Marco and Gross, Warren J},
  issn         = {1053-587X},
  journal      = {IEEE Transactions on Signal Processing},
  number       = {22},
  publisher    = {IEEE},
  title        = {{Rate-flexible fast polar decoders}},
  doi          = {10.1109/TSP.2019.2944738},
  volume       = {67},
  year         = {2019},
}

@article{6752,
  abstract     = {Two-player games on graphs are widely studied in formal methods, as they model the interaction between a system and its environment. The game is played by moving a token throughout a graph to produce an infinite path. There are several common modes to determine how the players move the token through the graph; e.g., in turn-based games the players alternate turns in moving the token. We study the bidding mode of moving the token, which, to the best of our knowledge, has never been studied in infinite-duration games. The following bidding rule was previously defined and called Richman bidding. Both players have separate budgets, which sum up to 1. In each turn, a bidding takes place: Both players submit bids simultaneously, where a bid is legal if it does not exceed the available budget, and the higher bidder pays his bid to the other player and moves the token. The central question studied in bidding games is a necessary and sufficient initial budget for winning the game: a threshold budget in a vertex is a value t ∈ [0, 1] such that if Player 1’s budget exceeds t, he can win the game; and if Player 2’s budget exceeds 1 − t, he can win the game. Threshold budgets were previously shown to exist in every vertex of a reachability game, which have an interesting connection with random-turn games—a sub-class of simple stochastic games in which the player who moves is chosen randomly. We show the existence of threshold budgets for a qualitative class of infinite-duration games, namely parity games, and a quantitative class, namely mean-payoff games. The key component of the proof is a quantitative solution to strongly connected mean-payoff bidding games in which we extend the connection with random-turn games to these games, and construct explicit optimal strategies for both players.},
  author       = {Avni, Guy and Henzinger, Thomas A and Chonev, Ventsislav K},
  issn         = {1557-735X},
  journal      = {Journal of the ACM},
  number       = {4},
  publisher    = {ACM},
  title        = {{Infinite-duration bidding games}},
  doi          = {10.1145/3340295},
  volume       = {66},
  year         = {2019},
}

@article{6755,
  abstract     = {Differentiated sex chromosomes are accompanied by a difference in gene dose between X/Z-specific and autosomal genes. At the transcriptomic level, these sex-linked genes can lead to expression imbalance, or gene dosage can be compensated by epigenetic mechanisms and results into expression level equalization. Schistosoma mansoni has been previously described as a ZW species (i.e., female heterogamety, in opposition to XY male heterogametic species) with a partial dosage compensation, but underlying mechanisms are still unexplored. Here, we combine transcriptomic (RNA-Seq) and epigenetic data (ChIP-Seq against H3K4me3, H3K27me3,andH4K20me1histonemarks) in free larval cercariae and intravertebrate parasitic stages. For the first time, we describe differences in dosage compensation status in ZW females, depending on the parasitic status: free cercariae display global dosage compensation, whereas intravertebrate stages show a partial dosage compensation. We also highlight regional differences of gene expression along the Z chromosome in cercariae, but not in the intravertebrate stages. Finally, we feature a consistent permissive chromatin landscape of the Z chromosome in both sexes and stages. We argue that dosage compensation in schistosomes is characterized by chromatin remodeling mechanisms in the Z-specific region.},
  author       = {Picard, Marion A L and Vicoso, Beatriz and Roquis, David and Bulla, Ingo and Augusto, Ronaldo C. and Arancibia, Nathalie and Grunau, Christoph and Boissier, Jérôme and Cosseau, Céline},
  issn         = {1759-6653},
  journal      = {Genome biology and evolution},
  number       = {7},
  pages        = {1909--1922},
  publisher    = {Oxford University Press},
  title        = {{Dosage compensation throughout the Schistosoma mansoni lifecycle: Specific chromatin landscape of the Z chromosome}},
  doi          = {10.1093/gbe/evz133},
  volume       = {11},
  year         = {2019},
}

