@article{12582,
  abstract     = {Supraglacial debris covers 7% of mountain glacier area globally and generally reduces glacier surface melt. Enhanced energy absorption at ice cliffs and supraglacial ponds scattered across the debris surface leads these features to contribute disproportionately to glacier-wide ablation. However, the degree to which cliffs and ponds actually increase melt rates remains unclear, as these features have only been studied in a detailed manner for selected locations, almost exclusively in High Mountain Asia. In this study we model the surface energy balance for debris-covered ice, ice cliffs, and supraglacial ponds with a set of automatic weather station records representing the global prevalence of debris-covered glacier ice. We generate 5000 random sets of values for physical parameters using probability distributions derived from literature, which we use to investigate relative melt rates and to isolate the melt responses of debris, cliffs and ponds to the site-specific meteorological forcing. Modelled sub-debris melt rates are primarily controlled by debris thickness and thermal conductivity. At a reference thickness of 0.1 m, sub-debris melt rates vary considerably, differing by up to a factor of four between sites, mainly attributable to air temperature differences. We find that melt rates for ice cliffs are consistently 2–3× the melt rate for clean glacier ice, but this melt enhancement decays with increasing clean ice melt rates. Energy absorption at supraglacial ponds is dominated by latent heat exchange and is therefore highly sensitive to wind speed and relative humidity, but is generally less than for clean ice. Our results provide reference melt enhancement factors for melt modelling of debris-covered glacier sites, globally, while highlighting the need for direct measurement of debris-covered glacier surface characteristics, physical parameters, and local meteorological conditions at a variety of sites around the world.},
  author       = {Miles, E S and Steiner, J F and Buri, P and Immerzeel, W W and Pellicciotti, Francesca},
  issn         = {1748-9326},
  journal      = {Environmental Research Letters},
  keywords     = {Public Health, Environmental and Occupational Health, General Environmental Science, Renewable Energy, Sustainability and the Environment},
  number       = {6},
  publisher    = {IOP Publishing},
  title        = {{Controls on the relative melt rates of debris-covered glacier surfaces}},
  doi          = {10.1088/1748-9326/ac6966},
  volume       = {17},
  year         = {2022},
}

@inproceedings{12664,
  abstract     = {Randomized smoothing is currently considered the state-of-the-art method to obtain certifiably robust classifiers. Despite its remarkable performance, the method is associated with various serious problems such as “certified accuracy waterfalls”, certification vs. accuracy trade-off, or even fairness issues. Input-dependent smoothing approaches have been proposed with intention of overcoming these flaws. However, we demonstrate that these methods lack formal guarantees and so the resulting certificates are not justified. We show that in general, the input-dependent smoothing suffers from the curse of dimensionality, forcing the variance function to have low semi-elasticity. On the other hand, we provide a theoretical and practical framework that enables the usage of input-dependent smoothing even in the presence of the curse of dimensionality, under strict restrictions. We present one concrete design of the smoothing variance function and test it on CIFAR10 and MNIST. Our design mitigates some of the problems of classical smoothing and is formally underlined, yet further improvement of the design is still necessary.},
  author       = {Súkeník, Peter and Kuvshinov, Aleksei and Günnemann, Stephan},
  booktitle    = {Proceedings of the 39th International Conference on Machine Learning},
  location     = {Baltimore, MD, United States},
  pages        = {20697--20743},
  publisher    = {ML Research Press},
  title        = {{Intriguing properties of input-dependent randomized smoothing}},
  volume       = {162},
  year         = {2022},
}

@article{12670,
  abstract     = {DNA methylation plays essential homeostatic functions in eukaryotic genomes. In animals, DNA methylation is also developmentally regulated and, in turn, regulates development. In the past two decades, huge research effort has endorsed the understanding that DNA methylation plays a similar role in plant development, especially during sexual reproduction. The power of whole-genome sequencing and cell isolation techniques, as well as bioinformatics tools, have enabled recent studies to reveal dynamic changes in DNA methylation during germline development. Furthermore, the combination of these technological advances with genetics, developmental biology and cell biology tools has revealed functional methylation reprogramming events that control gene and transposon activities in flowering plant germlines. In this review, we discuss the major advances in our knowledge of DNA methylation dynamics during male and female germline development in flowering plants.},
  author       = {He, Shengbo and Feng, Xiaoqi},
  issn         = {1744-7909},
  journal      = {Journal of Integrative Plant Biology},
  keywords     = {Plant Science, General Biochemistry, Genetics and Molecular Biology, Biochemistry},
  number       = {12},
  pages        = {2240--2251},
  publisher    = {Wiley},
  title        = {{DNA methylation dynamics during germline development}},
  doi          = {10.1111/jipb.13422},
  volume       = {64},
  year         = {2022},
}

@article{12671,
  abstract     = {Sperm chromatin is typically transformed by protamines into a compact and transcriptionally inactive state1,2. Sperm cells of flowering plants lack protamines, yet they have small, transcriptionally active nuclei with chromatin condensed through an unknown mechanism3,4. Here we show that a histone variant, H2B.8, mediates sperm chromatin and nuclear condensation in Arabidopsis thaliana. Loss of H2B.8 causes enlarged sperm nuclei with dispersed chromatin, whereas ectopic expression in somatic cells produces smaller nuclei with aggregated chromatin. This result demonstrates that H2B.8 is sufficient for chromatin condensation. H2B.8 aggregates transcriptionally inactive AT-rich chromatin into phase-separated condensates, which facilitates nuclear compaction without reducing transcription. Reciprocal crosses show that mutation of h2b.8 reduces male transmission, which suggests that H2B.8-mediated sperm compaction is important for fertility. Altogether, our results reveal a new mechanism of nuclear compaction through global aggregation of unexpressed chromatin. We propose that H2B.8 is an evolutionary innovation of flowering plants that achieves nuclear condensation compatible with active transcription.},
  author       = {Buttress, Toby and He, Shengbo and Wang, Liang and Zhou, Shaoli and Saalbach, Gerhard and Vickers, Martin and Li, Guohong and Li, Pilong and Feng, Xiaoqi},
  issn         = {1476-4687},
  journal      = {Nature},
  number       = {7936},
  pages        = {614--622},
  publisher    = {Springer Nature},
  title        = {{Histone H2B.8 compacts flowering plant sperm through chromatin phase separation}},
  doi          = {10.1038/s41586-022-05386-6},
  volume       = {611},
  year         = {2022},
}

@unpublished{12677,
  abstract     = {In modern sample-driven Prophet Inequality, an adversary chooses a sequence of n items with values v1,v2,…,vn to be presented to a decision maker (DM). The process follows in two phases. In the first phase (sampling phase), some items, possibly selected at random, are revealed to the DM, but she can never accept them. In the second phase, the DM is presented with the other items in a random order and online fashion. For each item, she must make an irrevocable decision to either accept the item and stop the process or reject the item forever and proceed to the next item. The goal of the DM is to maximize the expected value as compared to a Prophet (or offline algorithm) that has access to all information. In this setting, the sampling phase has no cost and is not part of the optimization process. However, in many scenarios, the samples are obtained as part of the decision-making process.
We model this aspect as a two-phase Prophet Inequality where an adversary chooses a sequence of 2n items with values v1,v2,…,v2n and the items are randomly ordered. Finally, there are two phases of the Prophet Inequality problem with the first n-items and the rest of the items, respectively. We show that some basic algorithms achieve a ratio of at most 0.450. We present an algorithm that achieves a ratio of at least 0.495. Finally, we show that for every algorithm the ratio it can achieve is at most 0.502. Hence our algorithm is near-optimal.},
  author       = {Chatterjee, Krishnendu and Mohammadi, Mona and Saona Urmeneta, Raimundo J},
  booktitle    = {arXiv},
  title        = {{Repeated prophet inequality with near-optimal bounds}},
  doi          = {10.48550/ARXIV.2209.14368},
  year         = {2022},
}

@article{12684,
  abstract     = {Given a place  ω  of a global function field  K  over a finite field, with associated affine function ring  Rω  and completion  Kω , the aim of this paper is to give an effective joint equidistribution result for renormalized primitive lattice points  (a,b)∈Rω2  in the plane  Kω2 , and for renormalized solutions to the gcd equation  ax+by=1 . The main tools are techniques of Goronik and Nevo for counting lattice points in well-rounded families of subsets. This gives a sharper analog in positive characteristic of a result of Nevo and the first author for the equidistribution of the primitive lattice points in  \ZZ2 .},
  author       = {Horesh, Tal and Paulin, Frédéric},
  issn         = {2118-8572},
  journal      = {Journal de Theorie des Nombres de Bordeaux},
  number       = {3},
  pages        = {679--703},
  publisher    = {Université de Bordeaux},
  title        = {{Effective equidistribution of lattice points in positive characteristic}},
  doi          = {10.5802/JTNB.1222},
  volume       = {34},
  year         = {2022},
}

@unpublished{12750,
  abstract     = {Quantum kinetically constrained models have recently attracted significant attention due to their anomalous dynamics and thermalization. In this work, we introduce a hitherto unexplored family of kinetically constrained models featuring a conserved particle number and strong inversion-symmetry breaking due to facilitated hopping. We demonstrate that these models provide a generic example of so-called quantum Hilbert space fragmentation, that is manifested in disconnected sectors in the Hilbert space that are not apparent in the computational basis. Quantum Hilbert space fragmentation leads to an exponential in system size number of eigenstates with exactly zero entanglement entropy across several bipartite cuts. These eigenstates can be probed dynamically using quenches from simple initial product states. In addition, we study the particle spreading under unitary dynamics launched from the domain wall state, and find faster than diffusive dynamics at high particle densities, that crosses over into logarithmically slow relaxation at smaller densities. Using a classically simulable cellular automaton, we reproduce the logarithmic dynamics observed in the quantum case. Our work suggests that particle conserving constrained models with inversion symmetry breaking realize so far unexplored universality classes of dynamics and invite their further theoretical and experimental studies.},
  author       = {Brighi, Pietro and Ljubotina, Marko and Serbyn, Maksym},
  booktitle    = {arXiv},
  title        = {{Hilbert space fragmentation and slow dynamics in particle-conserving quantum East models}},
  doi          = {10.48550/arXiv.2210.15607},
  year         = {2022},
}

@inproceedings{12775,
  abstract     = {We consider the problem of approximating the reachability probabilities in Markov decision processes (MDP) with uncountable (continuous) state and action spaces. While there are algorithms that, for special classes of such MDP, provide a sequence of approximations converging to the true value in the limit, our aim is to obtain an algorithm with guarantees on the precision of the approximation.
As this problem is undecidable in general, assumptions on the MDP are necessary. Our main contribution is to identify sufficient assumptions that are as weak as possible, thus approaching the "boundary" of which systems can be correctly and reliably analyzed. To this end, we also argue why each of our assumptions is necessary for algorithms based on processing finitely many observations.
We present two solution variants. The first one provides converging lower bounds under weaker assumptions than typical ones from previous works concerned with guarantees. The second one then utilizes stronger assumptions to additionally provide converging upper bounds. Altogether, we obtain an anytime algorithm, i.e. yielding a sequence of approximants with known and iteratively improving precision, converging to the true value in the limit. Besides, due to the generality of our assumptions, our algorithms are very general templates, readily allowing for various heuristics from literature in contrast to, e.g., a specific discretization algorithm. Our theoretical contribution thus paves the way for future practical improvements without sacrificing correctness guarantees.},
  author       = {Grover, Kush and Kretinsky, Jan and Meggendorfer, Tobias and Weininger, Maimilian},
  booktitle    = {33rd International Conference on Concurrency Theory },
  issn         = {1868-8969},
  location     = {Warsaw, Poland},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Anytime guarantees for reachability in uncountable Markov decision processes}},
  doi          = {10.4230/LIPIcs.CONCUR.2022.11},
  volume       = {243},
  year         = {2022},
}

@article{12776,
  abstract     = {An improved asymptotic formula is established for the number of rational points of bounded height on the split smooth del Pezzo surface of degree 5. The proof uses the five conic bundle structures on the surface.},
  author       = {Browning, Timothy D},
  issn         = {1076-9803},
  journal      = {New York Journal of Mathematics},
  pages        = {1193 -- 1229},
  publisher    = {State University of New York},
  title        = {{Revisiting the Manin–Peyre conjecture for the split del Pezzo surface of degree 5}},
  volume       = {28},
  year         = {2022},
}

@inproceedings{12780,
  abstract     = {The ability to scale out training workloads has been one of the key performance enablers of deep learning. The main scaling approach is data-parallel GPU-based training, which has been boosted by hardware and software support for highly efficient point-to-point communication, and in particular via hardware bandwidth over-provisioning. Overprovisioning comes at a cost: there is an order of magnitude price difference between "cloud-grade" servers with such support, relative to their popular "consumer-grade" counterparts, although single server-grade and consumer-grade GPUs can have similar computational envelopes.

In this paper, we show that the costly hardware overprovisioning approach can be supplanted via algorithmic and system design, and propose a framework called CGX, which provides efficient software support for compressed communication in ML applications, for both multi-GPU single-node training, as well as larger-scale multi-node training. CGX is based on two technical advances: At the system level, it relies on a re-developed communication stack for ML frameworks, which provides flexible, highly-efficient support for compressed communication. At the application level, it provides seamless, parameter-free integration with popular frameworks, so that end-users do not have to modify training recipes, nor significant training code. This is complemented by a layer-wise adaptive compression technique which dynamically balances compression gains with accuracy preservation. CGX integrates with popular ML frameworks, providing up to 3X speedups for multi-GPU nodes based on commodity hardware, and order-of-magnitude improvements in the multi-node setting, with negligible impact on accuracy.},
  author       = {Markov, Ilia and Ramezanikebrya, Hamidreza and Alistarh, Dan-Adrian},
  booktitle    = {Proceedings of the 23rd ACM/IFIP International Middleware Conference},
  isbn         = {9781450393409},
  location     = {Quebec, QC, Canada},
  pages        = {241--254},
  publisher    = {Association for Computing Machinery},
  title        = {{CGX: Adaptive system support for communication-efficient deep learning}},
  doi          = {10.1145/3528535.3565248},
  year         = {2022},
}

@article{12793,
  abstract     = {Let F be a global function field with constant field Fq. Let G be a reductive group over Fq. We establish a variant of Arthur's truncated kernel for G and for its Lie algebra which generalizes Arthur's original construction. We establish a coarse geometric expansion for our variant truncation.
As applications, we consider some existence and uniqueness problems of some cuspidal automorphic representations for the functions field of the projective line P1Fq with two points of ramifications.},
  author       = {Yu, Hongjie},
  issn         = {1945-5844},
  journal      = {Pacific Journal of Mathematics},
  keywords     = {Arthur–Selberg trace formula, cuspidal automorphic representations, global function fields},
  number       = {1},
  pages        = {193--237},
  publisher    = {Mathematical Sciences Publishers},
  title        = {{ A coarse geometric expansion of a variant of Arthur's truncated traces and some applications}},
  doi          = {10.2140/pjm.2022.321.193},
  volume       = {321},
  year         = {2022},
}

@unpublished{12860,
  abstract     = {Memorization of the relation between entities in a dataset can lead to privacy issues when using a trained model for question answering. We introduce Relational Memorization (RM) to understand, quantify and control this phenomenon. While bounding general memorization can have detrimental effects on the performance of a trained model, bounding RM does not prevent effective learning. The difference is most pronounced when the data distribution is long-tailed, with many queries having only few training examples: Impeding general memorization prevents effective learning, while impeding only relational memorization still allows learning general properties of the underlying concepts. We formalize the notion of Relational Privacy (RP) and, inspired by Differential Privacy (DP), we provide a possible definition of Differential Relational Privacy (DrP). These notions can be used to describe and compute bounds on the amount of RM in a trained model. We illustrate Relational Privacy concepts in experiments with large-scale models for Question Answering.},
  author       = {Bombari, Simone and Achille, Alessandro and Wang, Zijian and Wang, Yu-Xiang and Xie, Yusheng and Singh, Kunwar Yashraj and Appalaraju, Srikar and Mahadevan, Vijay and Soatto, Stefano},
  booktitle    = {arXiv},
  title        = {{Towards differential relational privacy and its use in question answering}},
  doi          = {10.48550/arXiv.2203.16701},
  year         = {2022},
}

@inproceedings{12894,
  author       = {Schlögl, Alois and Hornoiu, Andrei and Elefante, Stefano and Stadlbauer, Stephan},
  booktitle    = {ASHPC22 - Austrian-Slovenian HPC Meeting 2022},
  isbn         = {978-3-200-08499-5},
  location     = {Grundlsee, Austria},
  pages        = {7},
  publisher    = {EuroCC Austria c/o Universität Wien},
  title        = {{Where is the sweet spot? A procurement story of general purpose compute nodes}},
  doi          = {10.25365/phaidra.337},
  year         = {2022},
}

@article{12923,
  abstract     = {Photoredox-mediated Ni-catalyzed cross-couplings are powerful transformations to form carbon–heteroatom bonds and are generally photocatalyzed by noble metal complexes. Low-cost and easy-to-prepare carbon dots (CDs) are attractive quasi-homogeneous photocatalyst alternatives, but their applicability is limited by their short photoluminescence (PL) lifetimes. By tuning the surface and PL properties of CDs, we designed colloidal CD nano-photocatalysts for a broad range of Ni-mediated cross-couplings between aryl halides and nucleophiles. In particular, a CD decorated with amino groups permitted coupling to a wide range of aryl halides and thiols under mild, base-free conditions. Mechanistic studies suggested dynamic quenching of the CD excited state by the Ni co-catalyst and identified that pyridinium iodide (pyHI), a previously used additive in metallaphotocatalyzed cross-couplings, can also act as a photocatalyst in such transformations.},
  author       = {Zhao, Zhouxiang and Pieber, Bartholomäus and Delbianco, Martina},
  issn         = {2155-5435},
  journal      = {ACS Catalysis},
  keywords     = {Catalysis, General Chemistry},
  number       = {22},
  pages        = {13831--13837},
  publisher    = {American Chemical Society},
  title        = {{Modulating the surface and photophysical properties of carbon dots to access colloidal photocatalysts for cross-couplings}},
  doi          = {10.1021/acscatal.2c04025},
  volume       = {12},
  year         = {2022},
}

@article{12924,
  abstract     = {We demonstrate that several visible-light-mediated carbon−heteroatom cross-coupling reactions can be carried out using a photoactive NiII precatalyst that forms in situ from a nickel salt and a bipyridine ligand decorated with two carbazole groups (Ni(Czbpy)Cl2). The activation of this precatalyst towards cross-coupling reactions follows a hitherto undisclosed mechanism that is different from previously reported light-responsive nickel complexes that undergo metal-to-ligand charge transfer. Theoretical and spectroscopic investigations revealed that irradiation of Ni(Czbpy)Cl2 with visible light causes an initial intraligand charge transfer event that triggers productive catalysis. Ligand polymerization affords a porous, recyclable organic polymer for heterogeneous nickel catalysis of cross-coupling reactions. The heterogeneous catalyst shows stable performance in a packed-bed flow reactor during a week of continuous operation.},
  author       = {Cavedon, Cristian and Gisbertz, Sebastian and Reischauer, Susanne and Vogl, Sarah and Sperlich, Eric and Burke, John H. and Wallick, Rachel F. and Schrottke, Stefanie and Hsu, Wei‐Hsin and Anghileri, Lucia and Pfeifer, Yannik and Richter, Noah and Teutloff, Christian and Müller‐Werkmeister, Henrike and Cambié, Dario and Seeberger, Peter H. and Vura‐Weis, Josh and van der Veen, Renske M. and Thomas, Arne and Pieber, Bartholomäus},
  issn         = {1521-3773},
  journal      = {Angewandte Chemie International Edition},
  keywords     = {General Chemistry, Catalysis},
  number       = {46},
  publisher    = {Wiley},
  title        = {{Intraligand charge transfer enables visible‐light‐mediated Nickel‐catalyzed cross-coupling reactions}},
  doi          = {10.1002/anie.202211433},
  volume       = {61},
  year         = {2022},
}

@article{12938,
  abstract     = {In this work, a feed-forward artificial neural network (FF-ANN) design capable of locating eigensolutions to Schrödinger's equation via self-supervised learning is outlined. Based on the input potential determining the nature of the quantum problem, the presented FF-ANN strategy identifies valid solutions solely by minimizing Schrödinger's equation encoded in a suitably designed global loss function. In addition to benchmark calculations of prototype systems with known analytical solutions, the outlined methodology was also applied to experimentally accessible quantum systems, such as the vibrational states of molecular hydrogen H2 and its isotopologues HD and D2 as well as the torsional tunnel splitting in the phenol molecule. It is shown that in conjunction with the use of SIREN activation functions a high accuracy in the energy eigenvalues and wavefunctions is achieved without the requirement to adjust the implementation to the vastly different range of input potentials, thereby even considering problems under periodic boundary conditions.},
  author       = {Gamper, Jakob and Kluibenschedl, Florian and Weiss, Alexander K. H. and Hofer, Thomas S.},
  issn         = {1463-9076},
  journal      = {Physical Chemistry Chemical Physics},
  keywords     = {Physical and Theoretical Chemistry, General Physics and Astronomy},
  number       = {41},
  pages        = {25191--25202},
  publisher    = {Royal Society of Chemistry},
  title        = {{From vibrational spectroscopy and quantum tunnelling to periodic band structures – a self-supervised, all-purpose neural network approach to general quantum problems}},
  doi          = {10.1039/d2cp03921d},
  volume       = {24},
  year         = {2022},
}

@misc{13064,
  abstract     = {Genetically informed, deep-phenotyped biobanks are an important research resource and it is imperative that the most powerful, versatile, and efficient analysis approaches are used. Here, we apply our recently developed Bayesian grouped mixture of regressions model (GMRM) in the UK and Estonian Biobanks and obtain the highest genomic prediction accuracy reported to date across 21 heritable traits. When compared to other approaches, GMRM accuracy was greater than annotation prediction models run in the LDAK or LDPred-funct software by 15% (SE 7%) and 14% (SE 2%), respectively, and was 18% (SE 3%) greater than a baseline BayesR model without single-nucleotide polymorphism (SNP) markers grouped into minor allele frequency–linkage disequilibrium (MAF-LD) annotation categories. For height, the prediction accuracy R 2 was 47% in a UK Biobank holdout sample, which was 76% of the estimated h SNP 2 . We then extend our GMRM prediction model to provide mixed-linear model association (MLMA) SNP marker estimates for genome-wide association (GWAS) discovery, which increased the independent loci detected to 16,162 in unrelated UK Biobank individuals, compared to 10,550 from BoltLMM and 10,095 from Regenie, a 62 and 65% increase, respectively. The average χ2 value of the leading markers increased by 15.24 (SE 0.41) for every 1% increase in prediction accuracy gained over a baseline BayesR model across the traits. Thus, we show that modeling genetic associations accounting for MAF and LD differences among SNP markers, and incorporating prior knowledge of genomic function, is important for both genomic prediction and discovery in large-scale individual-level studies.},
  author       = {Orliac, Etienne and Trejo Banos, Daniel and Ojavee, Sven and Läll, Kristi and Mägi, Reedik and Visscher, Peter and Robinson, Matthew Richard},
  publisher    = {Dryad},
  title        = {{Improving genome-wide association discovery and genomic prediction accuracy in biobank data}},
  doi          = {10.5061/DRYAD.GTHT76HMZ},
  year         = {2022},
}

@misc{13066,
  abstract     = {Chromosomal inversions have been shown to play a major role in local adaptation by suppressing recombination between alternative arrangements and maintaining beneficial allele combinations. However, so far, their importance relative to the remaining genome remains largely unknown. Understanding the genetic architecture of adaptation requires better estimates of how loci of different effect sizes contribute to phenotypic variation. Here, we used three Swedish islands where the marine snail Littorina saxatilis has repeatedly evolved into two distinct ecotypes along a habitat transition. We estimated the contribution of inversion polymorphisms to phenotypic divergence while controlling for polygenic effects in the remaining genome using a quantitative genetics framework. We confirmed the importance of inversions but showed that contributions of loci outside inversions are of similar magnitude, with variable proportions dependent on the trait and the population. Some inversions showed consistent effects across all sites, whereas others exhibited site-specific effects, indicating that the genomic basis for replicated phenotypic divergence is only partly shared. The contributions of sexual dimorphism as well as environmental factors to phenotypic variation were significant but minor compared to inversions and polygenic background. Overall, this integrated approach provides insight into the multiple mechanisms contributing to parallel phenotypic divergence.},
  author       = {Koch, Eva and Ravinet, Mark and Westram, Anja M and Jonannesson, Kerstin and Butlin, Roger},
  publisher    = {Dryad},
  title        = {{Data from: Genetic architecture of repeated phenotypic divergence in Littorina saxatilis ecotype evolution}},
  doi          = {10.5061/DRYAD.M905QFV4B},
  year         = {2022},
}

@misc{13076,
  abstract     = {The source code for replicating experiments presented in the paper.

The implementation of the designed priority schedulers can be found in Galois-2.2.1/include/Galois/WorkList/:
StealingMultiQueue.h is the StealingMultiQueue.
MQOptimized/ contains MQ Optimized variants.

We provide images that contain all the dependencies and datasets. Images can be pulled from npostnikova/mq-based-schedulers repository, or downloaded from Zenodo. See readme for more detail.},
  author       = {Postnikova, Anastasiia and Koval, Nikita and Nadiradze, Giorgi and Alistarh, Dan-Adrian},
  publisher    = {Zenodo},
  title        = {{Multi-queues can be state-of-the-art priority schedulers}},
  doi          = {10.5281/ZENODO.5733408},
  year         = {2022},
}

@inproceedings{13239,
  abstract     = {Brains are thought to engage in predictive learning - learning to predict upcoming stimuli - to construct an internal model of their environment. This is especially notable for spatial navigation, as first described by Tolman’s latent learning tasks. However, predictive learning has also been observed in sensory cortex, in settings unrelated to spatial navigation. Apart from normative frameworks such as active inference or efficient coding, what could be the utility of learning to predict the patterns of occurrence of correlated stimuli? Here we show that prediction, and thereby the construction of an internal model of sequential stimuli, can bootstrap the learning process of a working memory task in a recurrent neural network. We implemented predictive learning alongside working memory match-tasks, and networks emerged to solve the prediction task first by encoding information across time to predict upcoming stimuli, and then eavesdropped on this solution to solve the matching task. Eavesdropping was most beneficial when neural resources were limited. Hence, predictive learning acts as a general neural mechanism to learn to store sensory information that can later be essential for working memory tasks.},
  author       = {Van Der Plas, Thijs L. and Vogels, Tim P and Manohar, Sanjay G.},
  booktitle    = {Proceedings of Machine Learning Research},
  issn         = {2640-3498},
  pages        = {518--531},
  publisher    = {ML Research Press},
  title        = {{Predictive learning enables neural networks to learn complex working memory tasks}},
  volume       = {199},
  year         = {2022},
}

