@inproceedings{1729,
  abstract     = {We present a computer-aided programming approach to concurrency. The approach allows programmers to program assuming a friendly, non-preemptive scheduler, and our synthesis procedure inserts synchronization to ensure that the final program works even with a preemptive scheduler. The correctness specification is implicit, inferred from the non-preemptive behavior. Let us consider sequences of calls that the program makes to an external interface. The specification requires that any such sequence produced under a preemptive scheduler should be included in the set of such sequences produced under a non-preemptive scheduler. The solution is based on a finitary abstraction, an algorithm for bounded language inclusion modulo an independence relation, and rules for inserting synchronization. We apply the approach to device-driver programming, where the driver threads call the software interface of the device and the API provided by the operating system. Our experiments demonstrate that our synthesis method is precise and efficient, and, since it does not require explicit specifications, is more practical than the conventional approach based on user-provided assertions.},
  author       = {Cerny, Pavol and Clarke, Edmund and Henzinger, Thomas A and Radhakrishna, Arjun and Ryzhyk, Leonid and Samanta, Roopsha and Tarrach, Thorsten},
  location     = {San Francisco, CA, United States},
  pages        = {180 -- 197},
  publisher    = {Springer},
  title        = {{From non-preemptive to preemptive scheduling using synchronization synthesis}},
  doi          = {10.1007/978-3-319-21668-3_11},
  volume       = {9207},
  year         = {2015},
}

@article{1666,
  abstract     = {Evolution of gene regulation is crucial for our understanding of the phenotypic differences between species, populations and individuals. Sequence-specific binding of transcription factors to the regulatory regions on the DNA is a key regulatory mechanism that determines gene expression and hence heritable phenotypic variation. We use a biophysical model for directional selection on gene expression to estimate the rates of gain and loss of transcription factor binding sites (TFBS) in finite populations under both point and insertion/deletion mutations. Our results show that these rates are typically slow for a single TFBS in an isolated DNA region, unless the selection is extremely strong. These rates decrease drastically with increasing TFBS length or increasingly specific protein-DNA interactions, making the evolution of sites longer than ∼ 10 bp unlikely on typical eukaryotic speciation timescales. Similarly, evolution converges to the stationary distribution of binding sequences very slowly, making the equilibrium assumption questionable. The availability of longer regulatory sequences in which multiple binding sites can evolve simultaneously, the presence of “pre-sites” or partially decayed old sites in the initial sequence, and biophysical cooperativity between transcription factors, can all facilitate gain of TFBS and reconcile theoretical calculations with timescales inferred from comparative genomics.},
  author       = {Tugrul, Murat and Paixao, Tiago and Barton, Nicholas H and Tkacik, Gasper},
  journal      = {PLoS Genetics},
  number       = {11},
  publisher    = {Public Library of Science},
  title        = {{Dynamics of transcription factor binding site evolution}},
  doi          = {10.1371/journal.pgen.1005639},
  volume       = {11},
  year         = {2015},
}

@phdthesis{1401,
  abstract     = {The human ability to recognize objects in complex scenes has driven research in the computer vision field over couple of decades. This thesis focuses on the object recognition task in images. That is, given the image, we want the computer system to be able to predict the class of the object that appears in the image. A recent successful attempt to bridge semantic understanding of the image perceived by humans and by computers uses attribute-based models. Attributes are semantic properties of the objects shared across different categories, which humans and computers can decide on. To explore the attribute-based models we take a statistical machine learning approach, and address two key learning challenges in view of object recognition task: learning augmented attributes as mid-level discriminative feature representation, and learning with attributes as privileged information. Our main contributions are parametric and non-parametric models and algorithms to solve these frameworks. In the parametric approach, we explore an autoencoder model combined with the large margin nearest neighbor principle for mid-level feature learning, and linear support vector machines for learning with privileged information. In the non-parametric approach, we propose a supervised Indian Buffet Process for automatic augmentation of semantic attributes, and explore the Gaussian Processes classification framework for learning with privileged information. A thorough experimental analysis shows the effectiveness of the proposed models in both parametric and non-parametric views.},
  author       = {Sharmanska, Viktoriia},
  issn         = {2663-337X},
  pages        = {144},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{Learning with attributes for object recognition: Parametric and non-parametrics views}},
  doi          = {10.15479/at:ista:1401},
  year         = {2015},
}

@article{1792,
  abstract     = {Motivated by recent ideas of Harman (Unif. Distrib. Theory, 2010) we develop a new concept of variation of multivariate functions on a compact Hausdorff space with respect to a collection D of subsets. We prove a general version of the Koksma-Hlawka theorem that holds for this notion of variation and discrepancy with respect to D. As special cases, we obtain Koksma-Hlawka inequalities for classical notions, such as extreme or isotropic discrepancy. For extreme discrepancy, our result coincides with the usual Koksma-Hlawka theorem. We show that the space of functions of bounded D-variation contains important discontinuous functions and is closed under natural algebraic operations. Finally, we illustrate the results on concrete integration problems from integral geometry and stereology.},
  author       = {Pausinger, Florian and Svane, Anne},
  journal      = {Journal of Complexity},
  number       = {6},
  pages        = {773 -- 797},
  publisher    = {Academic Press},
  title        = {{A Koksma-Hlawka inequality for general discrepancy systems}},
  doi          = {10.1016/j.jco.2015.06.002},
  volume       = {31},
  year         = {2015},
}

@article{1709,
  abstract     = {The competition for resources among cells, individuals or species is a fundamental characteristic of evolution. Biological all-pay auctions have been used to model situations where multiple individuals compete for a single resource. However, in many situations multiple resources with various values exist and single reward auctions are not applicable. We generalize the model to multiple rewards and study the evolution of strategies. In biological all-pay auctions the bid of an individual corresponds to its strategy and is equivalent to its payment in the auction. The decreasingly ordered rewards are distributed according to the decreasingly ordered bids of the participating individuals. The reproductive success of an individual is proportional to its fitness given by the sum of the rewards won minus its payments. Hence, successful bidding strategies spread in the population. We find that the results for the multiple reward case are very different from the single reward case. While the mixed strategy equilibrium in the single reward case with more than two players consists of mostly low-bidding individuals, we show that the equilibrium can convert to many high-bidding individuals and a few low-bidding individuals in the multiple reward case. Some reward values lead to a specialization among the individuals where one subpopulation competes for the rewards and the other subpopulation largely avoids costly competitions. Whether the mixed strategy equilibrium is an evolutionarily stable strategy (ESS) depends on the specific values of the rewards.},
  author       = {Reiter, Johannes and Kanodia, Ayush and Gupta, Raghav and Nowak, Martin and Chatterjee, Krishnendu},
  journal      = {Proceedings of the Royal Society of London Series B Biological Sciences},
  number       = {1812},
  publisher    = {Royal Society},
  title        = {{Biological auctions with multiple rewards}},
  doi          = {10.1098/rspb.2015.1041},
  volume       = {282},
  year         = {2015},
}

@phdthesis{1400,
  abstract     = {Cancer results from an uncontrolled growth of abnormal cells. Sequentially accumulated genetic and epigenetic alterations decrease cell death and increase cell replication. We used mathematical models to quantify the effect of driver gene mutations. The recently developed targeted therapies can lead to dramatic regressions. However, in solid cancers, clinical responses are often short-lived because resistant cancer cells evolve. We estimated that approximately 50 different mutations can confer resistance to a typical targeted therapeutic agent. We find that resistant cells are likely to be present in expanded subclones before the start of the treatment. The dominant strategy to prevent the evolution of resistance is combination therapy. Our analytical results suggest that in most patients, dual therapy, but not monotherapy, can result in long-term disease control. However, long-term control can only occur if there are no possible mutations in the genome that can cause cross-resistance to both drugs. Furthermore, we showed that simultaneous therapy with two drugs is much more likely to result in long-term disease control than sequential therapy with the same drugs. To improve our understanding of the underlying subclonal evolution we reconstruct the evolutionary history of a patient's cancer from next-generation sequencing data of spatially-distinct DNA samples. Using a quantitative measure of genetic relatedness, we found that pancreatic cancers and their metastases demonstrated a higher level of relatedness than that expected for any two cells randomly taken from a normal tissue. This minimal amount of genetic divergence among advanced lesions indicates that genetic heterogeneity, when quantitatively defined, is not a fundamental feature of the natural history of untreated pancreatic cancers. Our newly developed, phylogenomic tool Treeomics finds evidence for seeding patterns of metastases and can directly be used to discover rules governing the evolution of solid malignancies to transform cancer into a more predictable disease.},
  author       = {Reiter, Johannes},
  issn         = {2663-337X},
  pages        = {183},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{The subclonal evolution of cancer}},
  year         = {2015},
}

@inproceedings{1502,
  abstract     = {We extend the theory of input-output conformance with operators for merge and quotient. The former is useful when testing against multiple requirements or views. The latter can be used to generate tests for patches of an already tested system. Both operators can combine systems with different action alphabets, which is usually the case when constructing complex systems and specifications from parts, for instance different views as well as newly defined functionality of a~previous version of the system.},
  author       = {Beneš, Nikola and Daca, Przemyslaw and Henzinger, Thomas A and Kretinsky, Jan and Nickovic, Dejan},
  isbn         = {978-1-4503-3471-6},
  location     = {Montreal, QC, Canada},
  pages        = {101 -- 110},
  publisher    = {ACM},
  title        = {{Complete composition operators for IOCO-testing theory}},
  doi          = {10.1145/2737166.2737175},
  year         = {2015},
}

@article{1501,
  abstract     = {We consider Markov decision processes (MDPs) which are a standard model for probabilistic systems. We focus on qualitative properties for MDPs that can express that desired behaviors of the system arise almost-surely (with probability 1) or with positive probability. We introduce a new simulation relation to capture the refinement relation of MDPs with respect to qualitative properties, and present discrete graph algorithms with quadratic complexity to compute the simulation relation. We present an automated technique for assume-guarantee style reasoning for compositional analysis of two-player games by giving a counterexample guided abstraction-refinement approach to compute our new simulation relation. We show a tight link between two-player games and MDPs, and as a consequence the results for games are lifted to MDPs with qualitative properties. We have implemented our algorithms and show that the compositional analysis leads to significant improvements. },
  author       = {Chatterjee, Krishnendu and Chmelik, Martin and Daca, Przemyslaw},
  journal      = {Formal Methods in System Design},
  number       = {2},
  pages        = {230 -- 264},
  publisher    = {Springer},
  title        = {{CEGAR for compositional analysis of qualitative properties in Markov decision processes}},
  doi          = {10.1007/s10703-015-0235-2},
  volume       = {47},
  year         = {2015},
}

@phdthesis{1399,
  abstract     = {This thesis is concerned with the computation and approximation of intrinsic volumes. Given a smooth body M and a certain digital approximation of it, we develop algorithms to approximate various intrinsic volumes of M using only measurements taken from its digital approximations. The crucial idea behind our novel algorithms is to link the recent theory of persistent homology to the theory of intrinsic volumes via the Crofton formula from integral geometry and, in particular, via Euler characteristic computations. Our main contributions are a multigrid convergent digital algorithm to compute the first intrinsic volume of a solid body in R^n as well as an appropriate integration pipeline to approximate integral-geometric integrals defined over the Grassmannian manifold.},
  author       = {Pausinger, Florian},
  issn         = {2663-337X},
  pages        = {144},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{On the approximation of intrinsic volumes}},
  year         = {2015},
}

@article{1525,
  abstract     = {Based on 16 recommendations, efforts should be made to achieve the following goal: By 2025, all scholarly publication activity in Austria should be Open Access. In other words, the final versions of all scholarly publications resulting from the support of public resources must be freely accessible on the Internet without delay (Gold Open Access). The resources required to meet this obligation shall be provided to the authors, or the cost of the publication venues shall be borne directly by the research organisations.},
  author       = {Bauer, Bruno and Blechl, Guido and Bock, Christoph and Danowski, Patrick and Ferus, Andreas and Graschopf, Anton and König, Thomas and Mayer, Katja and Reckling, Falk and Rieck, Katharina and Seitz, Peter and Stöger, Herwig and Welzig, Elvira},
  journal      = {VÖB Mitteilungen},
  number       = {3},
  pages        = {580 -- 607},
  publisher    = {Verein Österreichischer Bibliothekare},
  title        = {{Arbeitsgruppe „Nationale Strategie“ des Open Access Network Austria OANA}},
  doi          = {10.5281/zenodo.33178},
  volume       = {68},
  year         = {2015},
}

@misc{9719,
  abstract     = {Parasitism creates selection for resistance mechanisms in host populations and is hypothesized to promote increased host evolvability. However, the influence of these traits on host evolution when parasites are no longer present is unclear. We used experimental evolution and whole-genome sequencing of Escherichia coli to determine the effects of past and present exposure to parasitic viruses (phages) on the spread of mutator alleles, resistance, and bacterial competitive fitness. We found that mutator alleles spread rapidly during adaptation to any of four different phage species, and this pattern was even more pronounced with multiple phages present simultaneously. However, hypermutability did not detectably accelerate adaptation in the absence of phages and recovery of fitness costs associated with resistance. Several lineages evolved phage resistance through elevated mucoidy, and during subsequent evolution in phage-free conditions they rapidly reverted to nonmucoid, phage-susceptible phenotypes. Genome sequencing revealed that this phenotypic reversion was achieved by additional genetic changes rather than by genotypic reversion of the initial resistance mutations. Insertion sequence (IS) elements played a key role in both the acquisition of resistance and adaptation in the absence of parasites; unlike single nucleotide polymorphisms, IS insertions were not more frequent in mutator lineages. Our results provide a genetic explanation for rapid reversion of mucoidy, a phenotype observed in other bacterial species including human pathogens. Moreover, this demonstrates that the types of genetic change underlying adaptation to fitness costs, and consequently the impact of evolvability mechanisms such as increased point-mutation rates, depend critically on the mechanism of resistance.},
  author       = {Wielgoss, Sébastien and Bergmiller, Tobias and Bischofberger, Anna M. and Hall, Alex R.},
  publisher    = {Dryad},
  title        = {{Data from: Adaptation to parasites and costs of parasite resistance in mutator and non-mutator bacteria}},
  doi          = {10.5061/dryad.cj910},
  year         = {2015},
}

@inproceedings{1610,
  abstract     = {The edit distance between two words w1, w2 is the minimal number of word operations (letter insertions, deletions, and substitutions) necessary to transform w1 to w2. The edit distance generalizes to languages L1,L2, where the edit distance is the minimal number k such that for every word from L1 there exists a word in L2 with edit distance at most k. We study the edit distance computation problem between pushdown automata and their subclasses. The problem of computing edit distance to pushdown automata is undecidable, and in practice, the interesting question is to compute the edit distance from a pushdown automaton (the implementation, a standard model for programs with recursion) to a regular language (the specification). In this work, we present a complete picture of decidability and complexity for deciding whether, for a given threshold k, the edit distance from a pushdown automaton to a finite automaton is at most k.},
  author       = {Chatterjee, Krishnendu and Henzinger, Thomas A and Ibsen-Jensen, Rasmus and Otop, Jan},
  booktitle    = {42nd International Colloquium on Automata, Languages, and Programming},
  isbn         = {978-3-662-47665-9},
  location     = {Kyoto, Japan},
  number       = {Part II},
  pages        = {121 -- 133},
  publisher    = {Springer Nature},
  title        = {{Edit distance for pushdown automata}},
  doi          = {10.1007/978-3-662-47666-6_10},
  volume       = {9135},
  year         = {2015},
}

@misc{5438,
  abstract     = {The edit distance between two words w1, w2 is the minimal number of word operations (letter insertions, deletions, and substitutions) necessary to transform w1 to w2. The edit distance generalizes to languages L1, L2, where the edit distance is the minimal number k such that for every word from L1 there exists a word in L2 with edit distance at most k. We study the edit distance computation problem between pushdown automata and their subclasses.
The problem of computing edit distance to a pushdown automaton is undecidable, and in practice, the interesting question is to compute the edit distance from a pushdown automaton (the implementation, a standard model for programs with recursion) to a regular language (the specification). In this work, we present a complete picture of decidability and complexity for deciding whether, for a given threshold k, the edit distance from a pushdown automaton to a finite automaton is at most k. },
  author       = {Chatterjee, Krishnendu and Henzinger, Thomas A and Ibsen-Jensen, Rasmus and Otop, Jan},
  issn         = {2664-1690},
  pages        = {15},
  publisher    = {IST Austria},
  title        = {{Edit distance for pushdown automata}},
  doi          = {10.15479/AT:IST-2015-334-v1-1},
  year         = {2015},
}

@article{1619,
  abstract     = {The emergence of drug resistant pathogens is a serious public health problem. It is a long-standing goal to predict rates of resistance evolution and design optimal treatment strategies accordingly. To this end, it is crucial to reveal the underlying causes of drug-specific differences in the evolutionary dynamics leading to resistance. However, it remains largely unknown why the rates of resistance evolution via spontaneous mutations and the diversity of mutational paths vary substantially between drugs. Here we comprehensively quantify the distribution of fitness effects (DFE) of mutations, a key determinant of evolutionary dynamics, in the presence of eight antibiotics representing the main modes of action. Using precise high-throughput fitness measurements for genome-wide Escherichia coli gene deletion strains, we find that the width of the DFE varies dramatically between antibiotics and, contrary to conventional wisdom, for some drugs the DFE width is lower than in the absence of stress. We show that this previously underappreciated divergence in DFE width among antibiotics is largely caused by their distinct drug-specific dose-response characteristics. Unlike the DFE, the magnitude of the changes in tolerated drug concentration resulting from genome-wide mutations is similar for most drugs but exceptionally small for the antibiotic nitrofurantoin, i.e., mutations generally have considerably smaller resistance effects for nitrofurantoin than for other drugs. A population genetics model predicts that resistance evolution for drugs with this property is severely limited and confined to reproducible mutational paths. We tested this prediction in laboratory evolution experiments using the “morbidostat”, a device for evolving bacteria in well-controlled drug environments. Nitrofurantoin resistance indeed evolved extremely slowly via reproducible mutations—an almost paradoxical behavior since this drug causes DNA damage and increases the mutation rate. Overall, we identified novel quantitative characteristics of the evolutionary landscape that provide the conceptual foundation for predicting the dynamics of drug resistance evolution.},
  author       = {Chevereau, Guillaume and Dravecka, Marta and Batur, Tugce and Guvenek, Aysegul and Ayhan, Dilay and Toprak, Erdal and Bollenbach, Mark Tobias},
  journal      = {PLoS Biology},
  number       = {11},
  publisher    = {Public Library of Science},
  title        = {{Quantifying the determinants of evolutionary dynamics leading to drug resistance}},
  doi          = {10.1371/journal.pbio.1002299},
  volume       = {13},
  year         = {2015},
}

@article{11582,
  abstract     = {We have observed a sample of typical z ∼ 1 star-forming galaxies, selected from the HiZELS survey, with the new K-band Multi-Object Spectrograph (KMOS) near-infrared, multi-integral field unit instrument on the Very Large Telescope (VLT), in order to obtain their dynamics and metallicity gradients. The majority of our galaxies have a metallicity gradient consistent with being flat or negative (i.e. higher metallicity cores than outskirts). Intriguingly, we find a trend between metallicity gradient and specific star formation rate (sSFR), such that galaxies with a high sSFR tend to have relatively metal poor centres, a result which is strengthened when combined with data sets from the literature. This result appears to explain the discrepancies reported between different high-redshift studies and varying claims for evolution. From a galaxy evolution perspective, the trend we see would mean that a galaxy's sSFR is governed by the amount of metal-poor gas that can be funnelled into its core, triggered either by merging or through efficient accretion. In fact, merging may play a significant role as it is the starburst galaxies at all epochs, which have the more positive metallicity gradients. Our results may help to explain the origin of the fundamental metallicity relation, in which galaxies at a fixed mass are observed to have lower metallicities at higher star formation rates, especially if the metallicity is measured in an aperture encompassing only the central regions of the galaxy. Finally, we note that this study demonstrates the power of KMOS as an efficient instrument for large-scale resolved galaxy surveys.},
  author       = {Stott, John P. and Sobral, David and Swinbank, A. M. and Smail, Ian and Bower, Richard and Best, Philip N. and Sharples, Ray M. and Geach, James E. and Matthee, Jorryt J},
  issn         = {1365-2966},
  journal      = {Monthly Notices of the Royal Astronomical Society},
  keywords     = {Space and Planetary Science, Astronomy and Astrophysics, galaxies: abundances, galaxies: evolution, galaxies: kinematics and dynamics},
  number       = {3},
  pages        = {2695--2704},
  publisher    = {Oxford University Press},
  title        = {{A relationship between specific star formation rate and metallicity gradient within z ∼ 1 galaxies from KMOS-HiZELS}},
  doi          = {10.1093/mnras/stu1343},
  volume       = {443},
  year         = {2014},
}

@article{11583,
  abstract     = {Candidate galaxies at redshifts of z ∼ 10 are now being found in extremely deep surveys, probing very small areas. As a consequence, candidates are very faint, making spectroscopic confirmation practically impossible. In order to overcome such limitations, we have undertaken the CF-HiZELS survey, which is a large-area, medium-depth near-infrared narrow-band survey targeted at z = 8.8 Lyman α (Lyα) emitters (LAEs) and covering 10 deg2 in part of the SSA22 field with the Canada–France–Hawaii Telescope (CFHT). We surveyed a comoving volume of 4.7 × 106 Mpc3 to a Lyα luminosity limit of 6.3 × 1043舁erg舁s−1. We look for Lyα candidates by applying the following criteria: (i) clear emission-line source, (ii) no optical detections (ugriz from CFHTLS), (iii) no visible detection in the optical stack (ugriz > 27), (iv) visually checked reliable NBJ and J detections and (v) J − K ≤ 0. We compute photometric redshifts and remove a significant amount of dusty lower redshift line-emitters at z ∼ 1.4 or 2.2. A total of 13 Lyα candidates were found, of which two are marked as strong candidates, but the majority have very weak constraints on their spectral energy distributions. Using follow-up observations with SINFONI/VLT, we are able to exclude the most robust candidates as LAEs. We put a strong constraint on the Lyα luminosity function at z ∼ 9 and make realistic predictions for ongoing and future surveys. Our results show that surveys for the highest redshift LAEs are susceptible of multiple contaminations and that spectroscopic follow-up is absolutely necessary.},
  author       = {Matthee, Jorryt J and Sobral, David and Swinbank, A. M. and Smail, Ian and Best, P. N. and Kim, Jae-Woo and Franx, Marijn and Milvang-Jensen, Bo and Fynbo, Johan},
  issn         = {1365-2966},
  journal      = {Monthly Notices of the Royal Astronomical Society},
  keywords     = {Space and Planetary Science, Astronomy and Astrophysics, galaxies: evolution, galaxies: high-redshift, cosmology: observations, dark ages, reionization, first stars},
  number       = {3},
  pages        = {2375--2387},
  publisher    = {Oxford University Press},
  title        = {{A 10 deg2 Lyman α survey at z=8.8 with spectroscopic follow-up: Strong constraints on the luminosity function and implications for other surveys}},
  doi          = {10.1093/mnras/stu392},
  volume       = {440},
  year         = {2014},
}

@article{11750,
  abstract     = {We report on the magnetic properties of a hot-pressed FeSb 2 sample. We find a significant increase in the magnetic susceptibility in our sample when compared with the values previously reported for the polycrystalline sample. The pronounced Curie tail at low temperature corresponds to 0.2% of Fe 2+ impurities per mole. In the intrinsic conductivity region, the susceptibility due to free carriers shows thermally activated behavior and is consistent with the data reported for single crystal FeSb 2 . Based on our data and analysis, while the enhanced magnetic susceptibility in our sample comes mainly from a small amount of unreacted Fe, the contribution from the enhanced carrier density due to lattice and strain defects arising from the ball milling process is also significant. Existence of an unreacted Fe phase is evidenced by small coercivity values of ~100 observed at 50 and 300 K.},
  author       = {Pokharel, Mani and Zhao, Huaizhou and Modic, Kimberly A and Ren, Zhifeng and Opeil, Cyril},
  issn         = {1941-0069},
  journal      = {IEEE Transactions on Magnetics},
  number       = {5},
  publisher    = {Institute of Electrical and Electronics Engineers},
  title        = {{Magnetic properties of hot-pressed FeSb2}},
  doi          = {10.1109/TMAG.2013.2292607},
  volume       = {50},
  year         = {2014},
}

@inproceedings{11789,
  abstract     = {We study a weighted online bipartite matching problem: G(V 1, V 2, E) is a weighted bipartite graph where V 1 is known beforehand and the vertices of V 2 arrive online. The goal is to match vertices of V 2 as they arrive to vertices in V 1, so as to maximize the sum of weights of edges in the matching. If assignments to V 1 cannot be changed, no bounded competitive ratio is achievable. We study the weighted online matching problem with free disposal, where vertices in V 1 can be assigned multiple times, but only get credit for the maximum weight edge assigned to them over the course of the algorithm. For this problem, the greedy algorithm is 0.5-competitive and determining whether a better competitive ratio is achievable is a well known open problem.

We identify an interesting special case where the edge weights are decomposable as the product of two factors, one corresponding to each end point of the edge. This is analogous to the well studied related machines model in the scheduling literature, although the objective functions are different. For this case of decomposable edge weights, we design a 0.5664 competitive randomized algorithm in complete bipartite graphs. We show that such instances with decomposable weights are non-trivial by establishing upper bounds of 0.618 for deterministic and 0.8 for randomized algorithms.

A tight competitive ratio of 1 − 1/e ≈ 0.632 was known previously for both the 0-1 case as well as the case where edge weights depend on the offline vertices only, but for these cases, reassignments cannot change the quality of the solution. Beating 0.5 for weighted matching where reassignments are necessary has been a significant challenge. We thus give the first online algorithm with competitive ratio strictly better than 0.5 for a non-trivial case of weighted matching with free disposal.},
  author       = {Charikar, Moses and Henzinger, Monika H and Nguyễn, Huy L.},
  booktitle    = {22nd Annual European Symposium on Algorithms},
  isbn         = {978-366244776-5},
  issn         = {0302-9743},
  location     = {Wroclaw, Poland},
  pages        = {260 -- 271},
  publisher    = {Springer Nature},
  title        = {{Online bipartite matching with decomposable weights}},
  doi          = {10.1007/978-3-662-44777-2_22},
  volume       = {8737},
  year         = {2014},
}

@inproceedings{11790,
  abstract     = {Assume a seller wants to sell a digital product in a social network where a buyer’s valuation of the item has positive network externalities from her neighbors that already have the item. The goal of the seller is to maximize his revenue. Previous work on this problem [7] studies the case where clients are offered the item in sequence and have to pay personalized prices. This is highly infeasible in large scale networks such as the Facebook graph: (1) Offering items to the clients one after the other consumes a large amount of time, and (2) price-discrimination of clients could appear unfair to them and result in negative client reaction or could conflict with legal requirements.

We study a setting dealing with these issues. Specifically, the item is offered in parallel to multiple clients at the same time and at the same price. This is called a round. We show that with O(logn) rounds, where n is the number of clients, a constant factor of the revenue with price discrimination can be achieved and that this is not possible with o(logn) rounds. Moreover we show that it is APX-hard to maximize the revenue and we give constant factor approximation algorithms for various further settings of limited price discrimination.},
  author       = {Cigler, Luděk and Dvořák, Wolfgang and Henzinger, Monika H and Starnberger, Martin},
  booktitle    = {10th International Conference of Web and Internet Economics},
  issn         = {0302-9743},
  location     = {Beijing, China},
  pages        = {44 -- 57},
  publisher    = {Springer Nature},
  title        = {{Limiting price discrimination when selling products with positive network externalities}},
  doi          = {10.1007/978-3-319-13129-0_4},
  volume       = {8877},
  year         = {2014},
}

@article{118,
  abstract     = {While the penetration of objects into granular media is well-studied, there is little understanding of how objects settle in gravities, geff, different from that of Earth - a scenario potentially relevant to the geomorphology of planets and asteroids and also to their exploration using man-made devices. By conducting experiments in an accelerating frame, we explore geff ranging from 0.4 g to 1.2 g. Surprisingly, we find that the rest depth is independent of geff and also that the time required for the object to come to rest scales like geff-1/2. With discrete element modeling simulations, we reproduce the experimental results and extend the range of geff to objects as small as asteroids and as large as Jupiter. Our results shed light on the initial stage of sedimentation into dry granular media across a range of celestial bodies and also have implications for the design of man-made, extraterrestrial vehicles and structures. Key Points The settling depth in granular media is independent of gravity The settling time scales like g-1/2 Layering driven by granular sedimentation should be similar.},
  author       = {Altshuler, Ernesto and Torres, H and González_Pita, A and Sánchez, Colina G and Pérez Penichet, Carlos and Waitukaitis, Scott R and Hidalgo, Rauól},
  journal      = {Geophysical Research Letters},
  number       = {9},
  pages        = {3032 -- 3037},
  publisher    = {Wiley-Blackwell},
  title        = {{Settling into dry granular media in different gravities}},
  doi          = {10.1002/2014GL059229},
  volume       = {41},
  year         = {2014},
}

