@article{20157,
  abstract     = {The focus of much of contemporary research ethics is on compliance with established protocols. However, large data-driven neuroscience research raises new ethical concerns that have no agreed-upon solution. Here we reflect on these challenges and propose better integration of public and patient involvement in this evolving landscape.},
  author       = {Stahl, Bernd and Ogoh, George and Schumann, Gunter and Walter, Henrik and Stahl, Bernd and Young, Allan H. and Desrivières, Sylvane and Clinton, Nicholas and Thompson, Paul and Schwalber, Ameli and Liu, Jingyu and Calhoun, Vince and Chang, Xiao and Xia, Yunman and Gong, Yanting and Jia, Tianye and Renner, Paul and Hese, Sören and Giner, Arantxa and Sanchez, Mavi and Alvarez, Elena and Spanlang, Bernhard and Pearmund, Charlie and Athanasiadis, Anastasios Polykarpos and Otten, Lisa and Pitel, Séverine and Petkoski, Spase and Jirsa, Viktor and Schmitt, Karen and Wilbertz, Johannes and Patraskaki, Myrto and Sommer, Peter and Heilmann-Heimbach, Stefanie and Mathey, Carina M. and Miller, Abigail and Claus, Isabelle and Nöthen, Markus M. and Hoffmann, Per and Forstner, Andreas J. and Pastor, Alvaro and Gallego, Jaime and Orosa, Francisco Eiroa and Viapiana, Guillem Feixas and Slater, Mel and Marr, Lena and Novarino, Gaia and Marquand, Andre and Böttger, Sarah Jane and Tschorn, Mira and Rapp, Michael and Ask, Helga and Kjelkenes, Rikka and Fernandez, Sara and Van Der Meer, Dennis and Westlye, Lars T. and Andreassen, Ole A. and Aden, Rieke and Seefried, Beke and Siehl, Sebastian and Nees, Frauke and Neidhart, Maja and Stringaris, Argyris and Schwarz, Emanuel and Holz, Nathalie and Tost, Heike and Meyer-Lindenberg, Andreas and Christmann, Nina and Jansone, Karina and Banaschewski, Tobias and Banks, Jamie and Schepanski, Kerstin and Schütz, Tatjana and Taron, Ulrike Helene and Eils, Roland and Roy, Jean Charles and Lett, Tristram A. and Kebir, Hedi and Polemiti, Elli and Hitchen, Esther and Jentsch, Marcel and Serin, Emin and Bernas, Antoine and Vaidya, Nilakshi and Twardziok, Sven and Ralser, Markus and Heinz, Andreas and Walter, Henrik},
  issn         = {2731-6076},
  journal      = {Nature Mental Health},
  number       = {10},
  publisher    = {Springer Nature},
  title        = {{Rethinking ethics in interdisciplinary and big data-driven neuroscience projects}},
  doi          = {10.1038/s44220-024-00320-3},
  volume       = {2},
  year         = {2024},
}

@inproceedings{18755,
  abstract     = {A universalthresholdizer (UT), constructed from a threshold fully homomorphic encryption by Boneh et. al , Crypto 2018, is a general framework for universally thresholdizing many cryptographic schemes. However, their framework is insufficient to construct strongly secure threshold schemes, such as threshold signatures and threshold public-key encryption, etc.

In this paper, we strengthen the security definition for a universal thresholdizer and propose a scheme which satisfies our stronger security notion. Our UT scheme is an improvement of Boneh et. al ’s construction at the level of threshold fully homomorphic encryption using a key homomorphic pseudorandom function. We apply our strongly secure UT scheme to construct strongly secure threshold signatures and threshold public-key encryption.},
  author       = {Ebrahimi, Ehsan and Yadav, Anshu},
  booktitle    = {30th International Conference on the Theory and Application of Cryptology and Information Security},
  isbn         = {9789819608904},
  issn         = {1611-3349},
  location     = {Kolkata, India},
  pages        = {207--239},
  publisher    = {Springer Nature},
  title        = {{Strongly secure universal thresholdizer}},
  doi          = {10.1007/978-981-96-0891-1_7},
  volume       = {15486},
  year         = {2024},
}

@inproceedings{18756,
  abstract     = {The evasive LWE assumption, proposed by Wee [Eurocrypt’22 Wee] for constructing a lattice-based optimal broadcast encryption, has shown to be a powerful assumption, adopted by subsequent works to construct advanced primitives ranging from ABE variants to obfuscation for null circuits. However, a closer look reveals significant differences among the precise assumption statements involved in different works, leading to the fundamental question of how these assumptions compare to each other. In this work, we initiate a more systematic study on evasive LWE assumptions:
(i) Based on the standard LWE assumption, we construct simple counterexamples against three private-coin evasive LWE variants, used in [Crypto’22 Tsabary, Asiacrypt’22 VWW, Crypto’23 ARYY] respectively, showing that these assumptions are unlikely to hold.

(ii) Based on existing evasive LWE variants and our counterexamples, we propose and define three classes of plausible evasive LWE assumptions, suitably capturing all existing variants for which we are not aware of non-obfuscation-based counterexamples.

(iii) We show that under our assumption formulations, the security proofs of [Asiacrypt’22 VWW] and [Crypto’23 ARYY] can be recovered, and we reason why the security proof of [Crypto’22 Tsabary] is also plausibly repairable using an appropriate evasive LWE assumption.},
  author       = {Brzuska, Chris and Ünal, Akin and Woo, Ivy K.Y.},
  booktitle    = {30th International Conference on the Theory and Application of Cryptology and Information Security},
  isbn         = {9789819608935},
  issn         = {1611-3349},
  location     = {Kolkata, India},
  pages        = {418--449},
  publisher    = {Springer Nature},
  title        = {{Evasive LWE assumptions: Definitions, classes, and counterexamples}},
  doi          = {10.1007/978-981-96-0894-2_14},
  volume       = {15487},
  year         = {2024},
}

@article{18757,
  abstract     = {Segmentation is a critical data processing step in many applications of cryo-electron tomography. Downstream analyses, such as subtomogram averaging, are often based on segmentation results, and are thus critically dependent on the availability of open-source software for accurate as well as high-throughput tomogram segmentation. There is a need for more user-friendly, flexible, and comprehensive segmentation software that offers an insightful overview of all steps involved in preparing automated segmentations. Here, we present Ais: a dedicated tomogram segmentation package that is geared towards both high performance and accessibility, available on GitHub. In this report, we demonstrate two common processing steps that can be greatly accelerated with Ais: particle picking for subtomogram averaging, and generating many-feature segmentations of cellular architecture based on in situ tomography data. Featuring comprehensive annotation, segmentation, and rendering functionality, as well as an open repository for trained models at aiscryoet.org, we hope that Ais will help accelerate research and dissemination of data involving cryoET.},
  author       = {Last, Mart G.F. and Abendstein, Leoni and Voortman, Lenard M. and Sharp, Thomas H.},
  issn         = {2050-084X},
  journal      = {eLife},
  publisher    = {eLife Sciences Publications},
  title        = {{Streamlining segmentation of cryo-electron tomography datasets with Ais}},
  doi          = {10.7554/eLife.98552},
  volume       = {13},
  year         = {2024},
}

@inproceedings{18758,
  abstract     = {MaxCut is a classical NP-complete problem and a crucial building block in many combinatorial algorithms. The famous Edwards-Erdős bound states that any connected graph on n vertices with m edges contains a cut of size at least m/2+(n-1)/4. Crowston, Jones and Mnich [Algorithmica, 2015] showed that the MaxCut problem on simple connected graphs admits an FPT algorithm, where the parameter k is the difference between the desired cut size c and the lower bound given by the Edwards-Erdős bound. This was later improved by Etscheid and Mnich [Algorithmica, 2017] to run in parameterized linear time, i.e., f(k)⋅ O(m). We improve upon this result in two ways: Firstly, we extend the algorithm to work also for multigraphs (alternatively, graphs with positive integer weights). Secondly, we change the parameter; instead of the difference to the Edwards-Erdős bound, we use the difference to the Poljak-Turzík bound. The Poljak-Turzík bound states that any weighted graph G has a cut of size at least (w(G))/2+(w_MSF(G))/4, where w(G) denotes the total weight of G, and w_MSF(G) denotes the weight of its minimum spanning forest. In connected simple graphs the two bounds are equivalent, but for multigraphs the Poljak-Turzík bound can be larger and thus yield a smaller parameter k. Our algorithm also runs in parameterized linear time, i.e., f(k)⋅ O(m+n).},
  author       = {Lill, Jonas and Petrova, Kalina H and Weber, Simon},
  booktitle    = {19th International Symposium on Parameterized and Exact Computation},
  isbn         = {9783959773539},
  issn         = {1868-8969},
  location     = {Egham, United Kingdom},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Linear-time MaxCut in multigraphs parameterized above the Poljak-Turzík bound}},
  doi          = {10.4230/LIPIcs.IPEC.2024.2},
  volume       = {321},
  year         = {2024},
}

@article{18760,
  abstract     = {With the remarkable sensitivity and resolution of JWST in the infrared, measuring rest-optical kinematics of galaxies at z > 5 has become possible for the first time. This study pilots a new method for measuring galaxy dynamics for highly multiplexed, unbiased samples by combining FRESCO NIRCam grism spectroscopy and JADES medium-band imaging. Here we present one of the first JWST kinematic measurements for a galaxy at z > 5. We find a significant velocity gradient, which, if interpreted as rotation, yields Vrot = 305 ± 70 km s−1, and we hence refer to this galaxy as Twister-z5. With a rest-frame optical effective radius of re = 2.25 kpc, the high rotation velocity in this galaxy is not due to a compact size, as may be expected in the early Universe, but rather to a high total mass, (math formula). This is a factor of roughly 10× higher than the stellar mass within re. We also observe that the radial Hα equivalent width profile and the specific star formation rate map from resolved stellar population modeling are centrally depressed by a factor of ∼1.5 from the center to re. Combined with the morphology of the line-emitting gas in comparison to the continuum, this centrally suppressed star formation is consistent with a star-forming disk surrounding a bulge growing inside out. While large, rapidly rotating disks are common to z ∼ 2, the existence of one after only 1 Gyr of cosmic time, shown for the first time in ionized gas, adds to the growing evidence that some galaxies matured earlier than expected in the history of the Universe.},
  author       = {Nelson, Erica and Brammer, Gabriel and Giménez-Arteaga, Clara and Oesch, Pascal A. and Naidu, Rohan P. and Übler, Hannah and Matharu, Jasleen and Shapley, Alice E. and Whitaker, Katherine E. and Wisnioski, Emily and Förster Schreiber, Natascha M. and Smit, Renske and Van Dokkum, Pieter and Chisholm, John and Endsley, Ryan and Hartley, Abigail I. and Gibson, Justus and Giovinazzo, Emma and Illingworth, Garth and Labbe, Ivo and Maseda, Michael V. and Matthee, Jorryt J and Covelo Paz, Alba and Price, Sedona H. and Reddy, Naveen A. and Shivaei, Irene and Weibel, Andrea and Wuyts, Stijn and Xiao, Mengyuan and Alberts, Stacey and Baker, William M. and Bunker, Andrew J. and Cameron, Alex J. and Charlot, Stephane and Eisenstein, Daniel J. and De Graaff, Anna and Ji, Zhiyuan and Johnson, Benjamin D. and Jones, Gareth C. and Maiolino, Roberto and Robertson, Brant and Sandles, Lester and Suess, Katherine A. and Tacchella, Sandro and Williams, Christina C. and Witstok, Joris},
  issn         = {2041-8213},
  journal      = {Astrophysical Journal Letters},
  number       = {2},
  publisher    = {IOP Publishing},
  title        = {{Ionized gas kinematics with FRESCO: An extended, massive, rapidly rotating galaxy at z = 5.4}},
  doi          = {10.3847/2041-8213/ad7b17},
  volume       = {976},
  year         = {2024},
}

@article{18761,
  abstract     = {Termites, together with cockroaches, belong to the Blattodea. They possess an XX/XY sex determination system which has evolved from an XX/X0 system present in other Blattodean species, such as cockroaches and wood roaches. Little is currently known about the sex chromosomes of termites, their gene content, or their evolution. We here investigate the X chromosome of multiple termite species and compare them with the X chromosome of cockroaches using genomic and transcriptomic data. We find that the X chromosome of the termite Macrotermes natalensis is large and differentiated showing hall marks of sex chromosome evolution such as dosage compensation, while this does not seem to be the case in the other two termite species investigated here where sex chromosomes may be evolutionary younger. Furthermore, the X chromosome in M. natalensis is different from the X chromosome found in the cockroach Blattella germanica indicating that sex chromosome turn-over events may have happened during termite evolution.},
  author       = {Fraser, Roxanne and Moraa, Ruth and Djolai, Annika and Meisenheimer, Nils and Laube, Sophie and Vicoso, Beatriz and Huylmans, Ann K},
  issn         = {1759-6653},
  journal      = {Genome Biology and Evolution},
  number       = {12},
  publisher    = {Oxford University Press},
  title        = {{Evidence for a novel X chromosome in termites}},
  doi          = {10.1093/gbe/evae265},
  volume       = {16},
  year         = {2024},
}

@article{18762,
  abstract     = {Consider the random variable $\mathrm{Tr}( f_1(W)A_1\dots f_k(W)A_k)$ where $W$ is an $N\times N$ Hermitian Wigner matrix, $k\in\mathbb{N}$, and choose (possibly $N$-dependent) regular functions $f_1,\dots, f_k$ as well as bounded deterministic matrices $A_1,\dots,A_k$. We give a functional central limit theorem showing that the fluctuations around the expectation are Gaussian. Moreover, we determine the limiting covariance structure and give explicit error bounds in terms of the scaling of $f_1,\dots,f_k$ and the number of traceless matrices among $A_1,\dots,A_k$, thus extending the results of [Cipolloni, Erdős, Schröder 2023] to products of arbitrary length $k\geq2$. As an application, we consider the fluctuation of $\mathrm{Tr}(\mathrm{e}^{\mathrm{i} tW}A_1\mathrm{e}^{-\mathrm{i} tW}A_2)$ around its thermal value $\mathrm{Tr}(A_1)\mathrm{Tr}(A_2)$ when $t$ is large and give an explicit formula for the variance.},
  author       = {Reker, Jana},
  issn         = {1083-6489},
  journal      = {Electronic Journal of Probability},
  publisher    = {Institute of Mathematical Statistics},
  title        = {{Multi-point functional central limit theorem for Wigner matrices}},
  doi          = {10.1214/24-EJP1247},
  volume       = {29},
  year         = {2024},
}

@article{18779,
  abstract     = {Unsupervised segmentation in biological and non-biological images is only partially resolved. Segmentation either requires arbitrary thresholds or large teaching datasets. Here, we propose a spatial autocorrelation method based on Local Moran’s <jats:italic>I</jats:italic> coefficient to differentiate signal, background, and noise in any type of image. The method, originally described for geoinformatics, does not require a predefined intensity threshold or teaching algorithm for image segmentation and allows quantitative comparison of samples obtained in different conditions. It utilizes relative intensity as well as spatial information of neighboring elements to select spatially contiguous groups of pixels. We demonstrate that Moran’s method outperforms threshold-based method in both artificially generated as well as in natural images especially when background noise is substantial. This superior performance can be attributed to the exclusion of false positive pixels resulting from isolated, high intensity pixels in high noise conditions. To test the method’s power in real situation, we used high power confocal images of the somatosensory thalamus immunostained for Kv4.2 and Kv4.3 (A-type) voltage-gated potassium channels in mice. Moran’s method identified high-intensity Kv4.2 and Kv4.3 ion channel clusters in the thalamic neuropil. Spatial distribution of these clusters displayed strong correlation with large sensory axon terminals of subcortical origin. The unique association of the special presynaptic terminals and a postsynaptic voltage-gated ion channel cluster was confirmed with electron microscopy. These data demonstrate that Moran’s method is a rapid, simple image segmentation method optimal for variable and high noise conditions.},
  author       = {Dávid, Csaba and Giber, Kristóf and Szigeti, Margit Katalin and Köllő, Mihály and Nusser, Zoltan and Acsady, Laszlo},
  issn         = {2050-084X},
  journal      = {eLife},
  publisher    = {eLife Sciences Publications},
  title        = {{A novel image segmentation method based on spatial autocorrelation identifies A-type potassium channel clusters in the thalamus}},
  doi          = {10.7554/elife.89361},
  volume       = {12},
  year         = {2024},
}

@inproceedings{18847,
  abstract     = {Machine Learning and AI have the potential to transform data-driven
scientific discovery, enabling accurate predictions for several scientific
phenomena. As many scientific questions are inherently causal, this paper looks
at the causal inference task of treatment effect estimation, where the outcome
of interest is recorded in high-dimensional observations in a Randomized
Controlled Trial (RCT). Despite being the simplest possible causal setting and
a perfect fit for deep learning, we theoretically find that many common choices
in the literature may lead to biased estimates. To test the practical impact of
these considerations, we recorded ISTAnt, the first real-world benchmark for
causal inference downstream tasks on high-dimensional observations as an RCT
studying how garden ants (Lasius neglectus) respond to microparticles applied
onto their colony members by hygienic grooming. Comparing 6 480 models
fine-tuned from state-of-the-art visual backbones, we find that the sampling
and modeling choices significantly affect the accuracy of the causal estimate,
and that classification accuracy is not a proxy thereof. We further validated
the analysis, repeating it on a synthetically generated visual data set
controlling the causal model. Our results suggest that future benchmarks should
carefully consider real downstream scientific questions, especially causal
ones. Further, we highlight guidelines for representation learning methods to
help answer causal questions in the sciences.},
  author       = {Cadei, Riccardo and Lindorfer, Lukas and Cremer, Sylvia and Schmid, Cordelia and Locatello, Francesco},
  booktitle    = {ICML 2024 Workshop AI4Science},
  publisher    = {Curran Associates},
  title        = {{Smoke and mirrors in causal downstream tasks}},
  volume       = {38},
  year         = {2024},
}

@article{18856,
  abstract     = {This research is aimed to solve the tweet/user geolocation prediction task and provide a flexible methodology for the geo-tagging of textual big data. The suggested approach implements neural networks for natural language processing (NLP) to estimate the location as coordinate pairs (longitude, latitude) and two-dimensional Gaussian Mixture Models (GMMs). The scope of proposed models has been finetuned on a Twitter dataset using pretrained Bidirectional Encoder Representations from Transformers (BERT) as base models. Performance metrics show a median error of fewer than 30 km on a worldwide-level, and fewer than 15 km on the US-level datasets for the models trained and evaluated on text features of tweets' content and metadata context. Our source code and data are available at https://github.com/K4TEL/geo-twitter.git.},
  author       = {Lutsai, Kateryna and Lampert, Christoph},
  issn         = {1948-660X},
  journal      = {Journal of Spatial Information Science},
  number       = {29},
  pages        = {69--99},
  publisher    = {University of Maine},
  title        = {{Predicting the geolocation of tweets using transformer models on customized data}},
  doi          = {10.5311/JOSIS.2024.29.295},
  year         = {2024},
}

@inproceedings{18875,
  abstract     = {Current state-of-the-art methods for differentially private model training are based on matrix factorization techniques. However, these methods suffer from high computational overhead because they require numerically solving a demanding optimization problem to determine an approximately optimal factorization prior to the actual model training. In this work, we present a new matrix factorization approach, BSR, which overcomes this computational bottleneck. By exploiting properties of the standard matrix square root, BSR allows to efficiently handle also large-scale problems. For the key scenario of stochastic gradient descent with momentum and weight decay, we even derive analytical expressions for BSR that render the computational overhead negligible. We prove bounds on the approximation quality that hold both in the centralized and in the federated learning setting. Our numerical experiments demonstrate that models trained using BSR perform on par with the best existing methods, while completely avoiding their computational overhead.},
  author       = {Kalinin, Nikita and Lampert, Christoph},
  booktitle    = {38th Annual Conference on Neural Information Processing Systems},
  issn         = {1049-5258},
  location     = {Vancouver, Canada},
  publisher    = {Neural Information Processing Systems Foundation},
  title        = {{Banded square root matrix factorization for differentially private model training}},
  volume       = {37},
  year         = {2024},
}

@inproceedings{18890,
  abstract     = {Deep Neural Collapse (DNC) refers to the surprisingly rigid structure of the data representations in the final layers of Deep Neural Networks (DNNs). Though the phenomenon has been measured in a variety of settings, its emergence is typically explained via data-agnostic approaches, such as the unconstrained features model. In this work, we introduce a data-dependent setting where DNC forms due to feature learning through the average gradient outer product (AGOP). The AGOP is defined with respect to a learned predictor and is equal to the uncentered covariance matrix of its input-output gradients averaged over the training dataset. The Deep Recursive Feature Machine (Deep RFM) is a method that constructs a neural network by iteratively mapping the data with the AGOP and applying an untrained random feature map. We demonstrate empirically that DNC occurs in Deep RFM across standard settings as a consequence of the projection with the AGOP matrix computed at each layer. Further, we theoretically explain DNC in Deep RFM in an asymptotic setting and as a result of kernel learning. We then provide evidence that this mechanism holds for neural networks more generally. In particular, we show that the right singular vectors and values of the weights can be responsible for the majority of within-class variability collapse for DNNs trained in the feature learning regime. As observed in recent work, this singular structure is highly correlated with that of the AGOP.},
  author       = {Beaglehole, Daniel and Súkeník, Peter and Mondelli, Marco and Belkin, Mikhail},
  booktitle    = {38th Annual Conference on Neural Information Processing Systems},
  issn         = {1049-5258},
  location     = {Vancouver, Canada},
  publisher    = {Neural Information Processing Systems Foundation},
  title        = {{Average gradient outer product as a mechanism for deep neural collapse}},
  volume       = {37},
  year         = {2024},
}

@inproceedings{18891,
  abstract     = {Deep neural networks (DNNs) exhibit a surprising structure in their final layer
known as neural collapse (NC), and a growing body of works has currently investigated the propagation of neural collapse to earlier layers of DNNs – a phenomenon
called deep neural collapse (DNC). However, existing theoretical results are restricted to special cases: linear models, only two layers or binary classification.
In contrast, we focus on non-linear models of arbitrary depth in multi-class classification and reveal a surprising qualitative shift. As soon as we go beyond two
layers or two classes, DNC stops being optimal for the deep unconstrained features
model (DUFM) – the standard theoretical framework for the analysis of collapse.
The main culprit is a low-rank bias of multi-layer regularization schemes: this bias
leads to optimal solutions of even lower rank than the neural collapse. We support
our theoretical findings with experiments on both DUFM and real data, which show
the emergence of the low-rank structure in the solution found by gradient descent.},
  author       = {Súkeník, Peter and Lampert, Christoph and Mondelli, Marco},
  booktitle    = {38th Annual Conference on Neural Information Processing Systems},
  location     = {Vancouver, Canada},
  publisher    = {Neural Information Processing Systems Foundation},
  title        = {{Neural collapse versus low-rank bias: Is deep neural collapse really optimal?}},
  volume       = {37},
  year         = {2024},
}

@misc{18895,
  abstract     = {ISTAnt is a new ecological dataset for social immunity and represents the first real-world benchmark for causal inference downstream tasks on high-dimensional observations. It analyzes grooming behavior in the ant Lasius neglectus in groups of three worker ants. The workers for the experiment were obtained from their laboratory stock colony, which had been collected from the field in 2022 in the Botanical Garden Jena, Germany. Ant collection and all experimental work were performed in compliance with international, national and institutional regulations and ethical guidelines. For the experiment, the body surface of one of the three ants was treated with a suspension of either of two microparticle types (diameter ~5 µm) to induce grooming by the two nestmates, which were individually color-coded by application of a dot of blue or orange paint, respectively. The three ants were housed in small plastic containers (diameter 28mm, height 30mm) with moistened, plastered ground and the interior walls covered with PTFE (polytetrafluoroethane) to hamper climbing by the ants. Filming occurred in a temperature- and humidity-controlled room at 23°C within a custom-made filming box with controlled lighting and ventilation conditions. We set up nine ant groups at a time (always containing both treatments) and placed them randomly on positions 1-9 marked on the floor in a 3x3 grid, about 3mm from each other. The experiment was performed on two consecutive days. Videos were acquired using a USB camera (FLIR blackfly S BFS-U3-120S4C, Teledyne FLIR) with a high-performance lens (HP Series 25mm Focal Length, Edmund optics 86-572) in OBS studio 29.0.0 \citep{bailey2017obs} at a framerate of 30 FPS and a resolution of 2500x2500 pixels. From each original video (105x105 mm), we generated nine individual videos .mkv (each ~32x32 mm, 770x770 pixels) by determining exact coordinates per container from one frame in GIMP 2.10.36 and cropping of the videos with FFmpeg 6.1.1. Annotation was performed over two consecutive days by three observers who had not been involved in the experimental setup or recording and were unaware of the treatment assignments to ensure bias-free behavioral annotation. They annotated the behavior of the ants during video observations, using custom-made software that saves the start and end frames of behaviors marked in a .csv file (see 'annotations' folder). In one of the videos, one of the nestmates' legs got inadvertently stuck to its body surface during the color-coding, interfering with its behavior, so the video was discarded. This left 44 videos from 5 independent setups (n=24 of treatment 1 and n=20 of treatment 2) of 10 minutes each for a total of 792 000 annotated frames (see 'video' folder). For each video, we provide the following information: the number of the set to which it belongs (1-5); the number of the position within the set reflecting the position of the ant group under the camera (1-9), for which we also provide ‘coordinates’ in the 3x3 grid (taking values -1/0/1 for both X and Y axis); treatment (1 or 2); the hour of the day when the recording was started (in 24h CEST); experimental day (A or B); the top left coordinate of the cropping square from the original video (CropX/CropY); the person annotating the video (given as A, B, C); the date of annotation (1: first day, 2: second day) and in which order the videos were annotated by each person, both reflecting a possible training effect of the person (see 'experiments_settings.csv' file).},
  author       = {Cadei, Riccardo and Locatello, Francesco and Cremer, Sylvia M and Lindorfer, Lukas and Schmid, Cordelia},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{ISTAnt}},
  doi          = {10.6084/M9.FIGSHARE.26484934.V2},
  year         = {2024},
}

@inproceedings{18897,
  abstract     = {Score-based generative models (SGMs) are powerful tools to sample from complex data distributions. Their underlying idea is to (i) run a forward process for time T1 by adding noise to the data, (ii) estimate its score function, and (iii) use such estimate to run a reverse process. As the reverse process is initialized with the stationary distribution of the forward one, the existing analysis paradigm requires T1→∞. This is however problematic: from a theoretical viewpoint, for a given precision of the score approximation, the convergence guarantee fails as T1 diverges; from a practical viewpoint, a large T1 increases computational costs and leads to error propagation. This paper addresses the issue by considering a version of the popular predictor-corrector scheme: after running the forward process, we first estimate the final distribution via an inexact Langevin dynamics and then revert the process. Our key technical contribution is to provide convergence guarantees which require to run the forward process only for a fixed finite time T1. Our bounds exhibit a mild logarithmic dependence on the input dimension and the subgaussian norm of the target distribution, have minimal assumptions on the data, and require only to control the L2 loss on the score approximation, which is the quantity minimized in practice.},
  author       = {Pedrotti, Francesco and Maas, Jan and Mondelli, Marco},
  booktitle    = {Transactions on Machine Learning Research},
  issn         = {2835-8856},
  title        = {{Improved convergence of score-based diffusion models via prediction-correction}},
  year         = {2024},
}

@book{18899,
  abstract     = {The flourishing theory of classical optimal transport concerns mass transportation at minimal cost. This book introduces the reader to optimal transport on quantum structures, i.e., optimal transportation between quantum states and related non-commutative concepts of mass transportation. It contains lecture notes on

classical optimal transport and Wasserstein gradient flows
dynamics and quantum optimal transport
quantum couplings and many-body problems
quantum channels and qubits

These notes are based on lectures given by the authors at the "Optimal Transport on Quantum Structures" School held at the Erdös Center in Budapest in the fall of 2022. The lecture notes are complemented by two survey chapters presenting the state of the art in different research areas of non-commutative optimal transport.},
  editor       = {Maas, Jan and Rademacher, Simone Anna Elvira and Titkos, Tamás and Virosztek, Daniel},
  isbn         = {9783031504655},
  issn         = {2947-9460},
  publisher    = {Springer Nature},
  title        = {{Optimal Transport on Quantum Structures}},
  doi          = {10.1007/978-3-031-50466-2},
  volume       = {29},
  year         = {2024},
}

@article{18900,
  abstract     = {We prove that certain closable derivations on the GNS Hilbert space associated with a non-tracial weight on a von Neumann algebra give rise to GNS-symmetric semigroups of contractive completely positive maps on the von Neumann algebra.},
  author       = {Wirth, Melchior},
  issn         = {1687-0247},
  journal      = {International Mathematics Research Notices},
  number       = {14},
  pages        = {10597--10614},
  publisher    = {Oxford University Press},
  title        = {{Modular completely Dirichlet forms as squares of derivations}},
  doi          = {10.1093/imrn/rnae092},
  volume       = {2024},
  year         = {2024},
}

@article{18904,
  abstract     = {The Planetary Transits and Oscillations of stars mission (PLATO) will allow us to measure surface rotation and monitor photometric activity of tens of thousands of main sequence solar-type and subgiant stars. This paper is the first of a series dedicated to the preparation of the analysis of stellar surface rotation and photospheric activity with the near-future PLATO data. We describe in this work the strategy that will be implemented in the PLATO pipeline to measure stellar surface rotation, photometric activity, and long-term modulations. The algorithms are applied on both noise-free and noisy simulations of solar-type stars, which include activity cycles, latitudinal differential rotation, and spot evolution. PLATO simulated systematics are included in the noisy light curves. We show that surface rotation periods can be recovered with confidence for most of the stars with only six months of observations and that the recovery rate of the analysis significantly improves as additional observations are collected. This means that the first PLATO data release will already provide a substantial set of measurements for this quantity, with a significant refinement on their quality as the instrument obtains longer light curves. Measuring the Schwabe-like magnetic activity cycle during the mission will require that the same field be observed over a significant timescale (more than four years). Nevertheless, PLATO will provide a vast and robust sample of solar-type stars with constraints on the activity-cycle length. Such a sample is lacking from previous missions dedicated to space photometry.},
  author       = {Breton, S. N. and Lanza, A. F. and Messina, S. and Pagano, I. and Bugnet, Lisa Annabelle and Corsaro, E. and García, R. A. and Mathur, S. and Santos, A. R. G. and Aigrain, S. and Amard, L. and Brun, A. S. and Degott, L. and Noraz, Q. and Palakkatharappil, D. B. and Panetier, E. and Strugarek, A. and Belkacem, K. and Goupil, M.-J and Ouazzani, R. M. and Philidet, J. and Renié, C. and Roth, O.},
  issn         = {1432-0746},
  journal      = {Astronomy and Astrophysics},
  publisher    = {EDP Sciences},
  title        = {{Measuring stellar surface rotation and activity with the PLATO mission. I. Strategy and application to simulated light curves}},
  doi          = {10.1051/0004-6361/202449893},
  volume       = {689},
  year         = {2024},
}

@inproceedings{18906,
  abstract     = {Expander decompositions of graphs have significantly advanced the understanding of many classical graph problems and led to numerous fundamental theoretical results. However, their adoption in practice has been hindered due to their inherent intricacies and large hidden factors in their asymptotic running times. Here, we introduce the first practically efficient algorithm for computing expander decompositions and their hierarchies and demonstrate its effectiveness and utility by incorporating it as the core component in a novel solver for the normalized cut graph clustering objective.
Our extensive experiments on a variety of large graphs show that our expander-based algorithm outperforms state-of-the-art solvers for normalized cut with respect to solution quality by a large margin on a variety of graph classes such as citation, e-mail, and social networks or web graphs while remaining competitive in running time.},
  author       = {Hanauer, Kathrin and Henzinger, Monika H and Münk, Robin and Räcke, Harald and Vötsch, Maximilian},
  booktitle    = {Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining},
  isbn         = {9798400704901},
  location     = {Barcelona, Spain},
  pages        = {1016--1027},
  publisher    = {ACM},
  title        = {{Expander hierarchies for normalized cuts on graphs}},
  doi          = {10.1145/3637528.3671978},
  year         = {2024},
}

