[{"date_created":"2023-08-09T13:08:04Z","article_number":"abq2811","intvolume":"         8","quality_controlled":"1","language":[{"iso":"eng"}],"publication_identifier":{"eissn":["2375-2548"]},"date_updated":"2023-08-22T07:24:01Z","external_id":{"pmid":["35857523"],"arxiv":["2206.04099"]},"publication":"Science Advances","extern":"1","main_file_link":[{"url":"https://doi.org/10.1126/sciadv.abq2811","open_access":"1"}],"citation":{"chicago":"Svoboda, Vít, Niraghatam Bhargava Ram, Denitsa Rangelova Baykusheva, Daniel Zindel, Max D. J. Waters, Benjamin Spenger, Manuel Ochsner, Holger Herburger, Jürgen Stohner, and Hans Jakob Wörner. “Femtosecond Photoelectron Circular Dichroism of Chemical Reactions.” <i>Science Advances</i>. American Association for the Advancement of Science, 2022. <a href=\"https://doi.org/10.1126/sciadv.abq2811\">https://doi.org/10.1126/sciadv.abq2811</a>.","ama":"Svoboda V, Ram NB, Baykusheva DR, et al. Femtosecond photoelectron circular dichroism of chemical reactions. <i>Science Advances</i>. 2022;8(28). doi:<a href=\"https://doi.org/10.1126/sciadv.abq2811\">10.1126/sciadv.abq2811</a>","ista":"Svoboda V, Ram NB, Baykusheva DR, Zindel D, Waters MDJ, Spenger B, Ochsner M, Herburger H, Stohner J, Wörner HJ. 2022. Femtosecond photoelectron circular dichroism of chemical reactions. Science Advances. 8(28), abq2811.","mla":"Svoboda, Vít, et al. “Femtosecond Photoelectron Circular Dichroism of Chemical Reactions.” <i>Science Advances</i>, vol. 8, no. 28, abq2811, American Association for the Advancement of Science, 2022, doi:<a href=\"https://doi.org/10.1126/sciadv.abq2811\">10.1126/sciadv.abq2811</a>.","apa":"Svoboda, V., Ram, N. B., Baykusheva, D. R., Zindel, D., Waters, M. D. J., Spenger, B., … Wörner, H. J. (2022). Femtosecond photoelectron circular dichroism of chemical reactions. <i>Science Advances</i>. American Association for the Advancement of Science. <a href=\"https://doi.org/10.1126/sciadv.abq2811\">https://doi.org/10.1126/sciadv.abq2811</a>","ieee":"V. Svoboda <i>et al.</i>, “Femtosecond photoelectron circular dichroism of chemical reactions,” <i>Science Advances</i>, vol. 8, no. 28. American Association for the Advancement of Science, 2022.","short":"V. Svoboda, N.B. Ram, D.R. Baykusheva, D. Zindel, M.D.J. Waters, B. Spenger, M. Ochsner, H. Herburger, J. Stohner, H.J. Wörner, Science Advances 8 (2022)."},"volume":8,"publisher":"American Association for the Advancement of Science","day":"15","doi":"10.1126/sciadv.abq2811","type":"journal_article","article_type":"original","publication_status":"published","author":[{"last_name":"Svoboda","first_name":"Vít","full_name":"Svoboda, Vít"},{"first_name":"Niraghatam Bhargava","full_name":"Ram, Niraghatam Bhargava","last_name":"Ram"},{"full_name":"Baykusheva, Denitsa Rangelova","id":"71b4d059-2a03-11ee-914d-dfa3beed6530","first_name":"Denitsa Rangelova","last_name":"Baykusheva"},{"first_name":"Daniel","full_name":"Zindel, Daniel","last_name":"Zindel"},{"last_name":"Waters","full_name":"Waters, Max D. J.","first_name":"Max D. J."},{"last_name":"Spenger","first_name":"Benjamin","full_name":"Spenger, Benjamin"},{"first_name":"Manuel","full_name":"Ochsner, Manuel","last_name":"Ochsner"},{"last_name":"Herburger","first_name":"Holger","full_name":"Herburger, Holger"},{"last_name":"Stohner","full_name":"Stohner, Jürgen","first_name":"Jürgen"},{"last_name":"Wörner","full_name":"Wörner, Hans Jakob","first_name":"Hans Jakob"}],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","oa":1,"abstract":[{"lang":"eng","text":"Understanding the chirality of molecular reaction pathways is essential for a broad range of fundamental and applied sciences. However, the current ability to probe chirality on the time scale of primary processes underlying chemical reactions remains very limited. Here, we demonstrate time-resolved photoelectron circular dichroism (TRPECD) with ultrashort circularly polarized vacuum-ultraviolet (VUV) pulses from a tabletop source. We demonstrate the capabilities of VUV-TRPECD by resolving the chirality changes in time during the photodissociation of atomic iodine from two chiral molecules. We identify several general key features of TRPECD, which include the ability to probe dynamical chirality along the complete photochemical reaction path, the sensitivity to the local chirality of the evolving scattering potential, and the influence of electron scattering off dissociating photofragments. Our results are interpreted by comparison with high-level ab-initio calculations of transient PECDs from molecular photoionization calculations. Our experimental and theoretical techniques define a general approach to femtochirality."}],"date_published":"2022-07-15T00:00:00Z","scopus_import":"1","status":"public","arxiv":1,"article_processing_charge":"No","pmid":1,"month":"07","year":"2022","issue":"28","_id":"13992","oa_version":"Published Version","title":"Femtosecond photoelectron circular dichroism of chemical reactions","keyword":["Multidisciplinary"]},{"date_created":"2023-08-09T13:08:15Z","intvolume":"        76","language":[{"iso":"eng"}],"quality_controlled":"1","publication_identifier":{"issn":["0009-4293"],"eissn":["2673-2424"]},"date_updated":"2023-08-22T07:26:39Z","publication":"Chimia","extern":"1","main_file_link":[{"url":"https://doi.org/10.2533/chimia.2022.520","open_access":"1"}],"citation":{"ista":"Gong X, Jordan I, Huppert M, Heck S, Baykusheva DR, Jelovina D, Schild A, Wörner HJ. 2022. Attosecond photoionization dynamics: from molecules over clusters to the liquid phase. Chimia. 76(6), 520–528.","apa":"Gong, X., Jordan, I., Huppert, M., Heck, S., Baykusheva, D. R., Jelovina, D., … Wörner, H. J. (2022). Attosecond photoionization dynamics: from molecules over clusters to the liquid phase. <i>Chimia</i>. Swiss Chemical Society. <a href=\"https://doi.org/10.2533/chimia.2022.520\">https://doi.org/10.2533/chimia.2022.520</a>","mla":"Gong, Xiaochun, et al. “Attosecond Photoionization Dynamics: From Molecules over Clusters to the Liquid Phase.” <i>Chimia</i>, vol. 76, no. 6, Swiss Chemical Society, 2022, pp. 520–28, doi:<a href=\"https://doi.org/10.2533/chimia.2022.520\">10.2533/chimia.2022.520</a>.","ieee":"X. Gong <i>et al.</i>, “Attosecond photoionization dynamics: from molecules over clusters to the liquid phase,” <i>Chimia</i>, vol. 76, no. 6. Swiss Chemical Society, pp. 520–528, 2022.","short":"X. Gong, I. Jordan, M. Huppert, S. Heck, D.R. Baykusheva, D. Jelovina, A. Schild, H.J. Wörner, Chimia 76 (2022) 520–528.","chicago":"Gong, Xiaochun, Inga Jordan, Martin Huppert, Saijoscha Heck, Denitsa Rangelova Baykusheva, Denis Jelovina, Axel Schild, and Hans Jakob Wörner. “Attosecond Photoionization Dynamics: From Molecules over Clusters to the Liquid Phase.” <i>Chimia</i>. Swiss Chemical Society, 2022. <a href=\"https://doi.org/10.2533/chimia.2022.520\">https://doi.org/10.2533/chimia.2022.520</a>.","ama":"Gong X, Jordan I, Huppert M, et al. Attosecond photoionization dynamics: from molecules over clusters to the liquid phase. <i>Chimia</i>. 2022;76(6):520-528. doi:<a href=\"https://doi.org/10.2533/chimia.2022.520\">10.2533/chimia.2022.520</a>"},"volume":76,"page":"520-528","publisher":"Swiss Chemical Society","doi":"10.2533/chimia.2022.520","day":"29","type":"journal_article","publication_status":"published","article_type":"original","author":[{"first_name":"Xiaochun","full_name":"Gong, Xiaochun","last_name":"Gong"},{"last_name":"Jordan","full_name":"Jordan, Inga","first_name":"Inga"},{"last_name":"Huppert","full_name":"Huppert, Martin","first_name":"Martin"},{"first_name":"Saijoscha","full_name":"Heck, Saijoscha","last_name":"Heck"},{"full_name":"Baykusheva, Denitsa Rangelova","id":"71b4d059-2a03-11ee-914d-dfa3beed6530","first_name":"Denitsa Rangelova","last_name":"Baykusheva"},{"full_name":"Jelovina, Denis","first_name":"Denis","last_name":"Jelovina"},{"full_name":"Schild, Axel","first_name":"Axel","last_name":"Schild"},{"last_name":"Wörner","full_name":"Wörner, Hans Jakob","first_name":"Hans Jakob"}],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","oa":1,"abstract":[{"text":"Photoionization is a process taking place on attosecond time scales. How its properties evolve from isolated particles to the condensed phase is an open question of both fundamental and practical relevance. Here, we review recent work that has advanced the study of photoionization dynamics from atoms to molecules, clusters and the liquid phase. The first measurements of molecular photoionization delays have revealed the attosecond dynamics of electron emission from a molecular shape resonance and their sensitivity to the molecular potential. Using electron-ion coincidence spectroscopy these measurements have been extended from isolated molecules to clusters. A continuous increase of the delays with the water-cluster size has been observed up to a size of 4-5 molecules, followed by a saturation towards larger clusters. Comparison with calculations has revealed a correlation of the time delay with the spatial extension of the created electron hole. Using cylindrical liquid-microjet techniques, these measurements have also been extended to liquid water, revealing a delay relative to isolated water molecules that was very similar to the largest water clusters studied. Detailed modeling based on Monte-Carlo simulations confirmed that these delays are dominated by the contributions of the first two solvation shells, which agrees with the results of the cluster measurements. These combined results open the perspective of experimentally characterizing the delocalization of electronic wave functions in complex systems and studying their evolution on attosecond time scales.","lang":"eng"}],"date_published":"2022-06-29T00:00:00Z","scopus_import":"1","status":"public","article_processing_charge":"No","month":"06","year":"2022","issue":"6","_id":"13993","oa_version":"Published Version","title":"Attosecond photoionization dynamics: from molecules over clusters to the liquid phase","keyword":["General Medicine","General Chemistry"]},{"arxiv":1,"abstract":[{"lang":"eng","text":"Ultrafast lasers are an increasingly important tool to control and stabilize emergent phases in quantum materials. Among a variety of possible excitation protocols, a particularly intriguing route is the direct light engineering of microscopic electronic parameters, such as the electron hopping and the local Coulomb repulsion (Hubbard \r\nU). In this work, we use time-resolved x-ray absorption spectroscopy to demonstrate the light-induced renormalization of the Hubbard U in a cuprate superconductor, La1.905Ba0.095CuO4. We show that intense femtosecond laser pulses induce a substantial redshift of the upper Hubbard band while leaving the Zhang-Rice singlet energy unaffected. By comparing the experimental data to time-dependent spectra of single- and three-band Hubbard models, we assign this effect to an approximately 140-meV reduction of the on-site Coulomb repulsion on the copper sites. Our demonstration of a dynamical Hubbard U renormalization in a copper oxide paves the way to a novel strategy for the manipulation of superconductivity and magnetism as well as to the realization of other long-range-ordered phases in light-driven quantum materials."}],"scopus_import":"1","date_published":"2022-01-20T00:00:00Z","status":"public","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","oa":1,"article_type":"original","publication_status":"published","author":[{"last_name":"Baykusheva","first_name":"Denitsa Rangelova","id":"71b4d059-2a03-11ee-914d-dfa3beed6530","full_name":"Baykusheva, Denitsa Rangelova"},{"last_name":"Jang","full_name":"Jang, Hoyoung","first_name":"Hoyoung"},{"first_name":"Ali A.","full_name":"Husain, Ali A.","last_name":"Husain"},{"last_name":"Lee","first_name":"Sangjun","full_name":"Lee, Sangjun"},{"last_name":"TenHuisen","full_name":"TenHuisen, Sophia F. R.","first_name":"Sophia F. R."},{"full_name":"Zhou, Preston","first_name":"Preston","last_name":"Zhou"},{"first_name":"Sunwook","full_name":"Park, Sunwook","last_name":"Park"},{"first_name":"Hoon","full_name":"Kim, Hoon","last_name":"Kim"},{"last_name":"Kim","first_name":"Jin-Kwang","full_name":"Kim, Jin-Kwang"},{"last_name":"Kim","full_name":"Kim, Hyeong-Do","first_name":"Hyeong-Do"},{"first_name":"Minseok","full_name":"Kim, Minseok","last_name":"Kim"},{"last_name":"Park","full_name":"Park, Sang-Youn","first_name":"Sang-Youn"},{"full_name":"Abbamonte, Peter","first_name":"Peter","last_name":"Abbamonte"},{"full_name":"Kim, B. J.","first_name":"B. J.","last_name":"Kim"},{"full_name":"Gu, G. D.","first_name":"G. D.","last_name":"Gu"},{"full_name":"Wang, Yao","first_name":"Yao","last_name":"Wang"},{"full_name":"Mitrano, Matteo","first_name":"Matteo","last_name":"Mitrano"}],"keyword":["General Physics and Astronomy"],"title":"Ultrafast renormalization of the on-site Coulomb repulsion in a cuprate superconductor","issue":"1","_id":"13994","year":"2022","oa_version":"Published Version","month":"01","article_processing_charge":"No","date_updated":"2024-10-14T12:23:26Z","external_id":{"arxiv":["2109.13229"]},"quality_controlled":"1","language":[{"iso":"eng"}],"intvolume":"        12","publication_identifier":{"eissn":["2160-3308"]},"date_created":"2023-08-09T13:08:26Z","article_number":"011013","day":"20","doi":"10.1103/physrevx.12.011013","publisher":"American Physical Society","type":"journal_article","citation":{"ieee":"D. R. Baykusheva <i>et al.</i>, “Ultrafast renormalization of the on-site Coulomb repulsion in a cuprate superconductor,” <i>Physical Review X</i>, vol. 12, no. 1. American Physical Society, 2022.","apa":"Baykusheva, D. R., Jang, H., Husain, A. A., Lee, S., TenHuisen, S. F. R., Zhou, P., … Mitrano, M. (2022). Ultrafast renormalization of the on-site Coulomb repulsion in a cuprate superconductor. <i>Physical Review X</i>. American Physical Society. <a href=\"https://doi.org/10.1103/physrevx.12.011013\">https://doi.org/10.1103/physrevx.12.011013</a>","mla":"Baykusheva, Denitsa Rangelova, et al. “Ultrafast Renormalization of the On-Site Coulomb Repulsion in a Cuprate Superconductor.” <i>Physical Review X</i>, vol. 12, no. 1, 011013, American Physical Society, 2022, doi:<a href=\"https://doi.org/10.1103/physrevx.12.011013\">10.1103/physrevx.12.011013</a>.","ista":"Baykusheva DR, Jang H, Husain AA, Lee S, TenHuisen SFR, Zhou P, Park S, Kim H, Kim J-K, Kim H-D, Kim M, Park S-Y, Abbamonte P, Kim BJ, Gu GD, Wang Y, Mitrano M. 2022. Ultrafast renormalization of the on-site Coulomb repulsion in a cuprate superconductor. Physical Review X. 12(1), 011013.","short":"D.R. Baykusheva, H. Jang, A.A. Husain, S. Lee, S.F.R. TenHuisen, P. Zhou, S. Park, H. Kim, J.-K. Kim, H.-D. Kim, M. Kim, S.-Y. Park, P. Abbamonte, B.J. Kim, G.D. Gu, Y. Wang, M. Mitrano, Physical Review X 12 (2022).","chicago":"Baykusheva, Denitsa Rangelova, Hoyoung Jang, Ali A. Husain, Sangjun Lee, Sophia F. R. TenHuisen, Preston Zhou, Sunwook Park, et al. “Ultrafast Renormalization of the On-Site Coulomb Repulsion in a Cuprate Superconductor.” <i>Physical Review X</i>. American Physical Society, 2022. <a href=\"https://doi.org/10.1103/physrevx.12.011013\">https://doi.org/10.1103/physrevx.12.011013</a>.","ama":"Baykusheva DR, Jang H, Husain AA, et al. Ultrafast renormalization of the on-site Coulomb repulsion in a cuprate superconductor. <i>Physical Review X</i>. 2022;12(1). doi:<a href=\"https://doi.org/10.1103/physrevx.12.011013\">10.1103/physrevx.12.011013</a>"},"volume":12,"main_file_link":[{"open_access":"1","url":"https://doi.org/10.1103/PhysRevX.12.011013"}],"extern":"1","publication":"Physical Review X"},{"month":"04","article_processing_charge":"No","alternative_title":["PMLR"],"title":" Faster one-sample stochastic conditional gradient method for composite convex minimization","year":"2022","_id":"14093","oa_version":"Preprint","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","oa":1,"publication_status":"published","author":[{"last_name":"Dresdner","first_name":"Gideon","full_name":"Dresdner, Gideon"},{"first_name":"Maria-Luiza","full_name":"Vladarean, Maria-Luiza","last_name":"Vladarean"},{"full_name":"Rätsch, Gunnar","first_name":"Gunnar","last_name":"Rätsch"},{"first_name":"Francesco","full_name":"Locatello, Francesco","id":"26cfd52f-2483-11ee-8040-88983bcc06d4","last_name":"Locatello","orcid":"0000-0002-4850-0683"},{"full_name":"Cevher, Volkan","first_name":"Volkan","last_name":"Cevher"},{"first_name":"Alp","full_name":"Yurtsever, Alp","last_name":"Yurtsever"}],"conference":{"start_date":"2022-03-28","name":"AISTATS: Conference on Artificial Intelligence and Statistics","end_date":"2022-03-30","location":"Virtual"},"arxiv":1,"date_published":"2022-04-01T00:00:00Z","abstract":[{"text":" We propose a stochastic conditional gradient method (CGM) for minimizing convex finite-sum objectives formed as a sum of smooth and non-smooth terms. Existing CGM variants for this template either suffer from slow convergence rates, or require carefully increasing the batch size over the course of the algorithm’s execution, which leads to computing full gradients. In contrast, the proposed method, equipped with a stochastic average gradient (SAG) estimator, requires only one sample per iteration. Nevertheless, it guarantees fast convergence rates on par with more sophisticated variance reduction techniques. In applications we put special emphasis on problems with a large number of separable constraints. Such problems are prevalent among semidefinite programming (SDP) formulations arising in machine learning and theoretical computer science. We provide numerical experiments on matrix completion, unsupervised clustering, and sparsest-cut SDPs. ","lang":"eng"}],"scopus_import":"1","status":"public","extern":"1","main_file_link":[{"url":"https://arxiv.org/abs/2202.13212","open_access":"1"}],"publication":"Proceedings of the 25th International Conference on Artificial Intelligence and Statistics","page":"8439-8457","day":"01","publisher":"ML Research Press","type":"conference","citation":{"short":"G. Dresdner, M.-L. Vladarean, G. Rätsch, F. Locatello, V. Cevher, A. Yurtsever, in:, Proceedings of the 25th International Conference on Artificial Intelligence and Statistics, ML Research Press, 2022, pp. 8439–8457.","ieee":"G. Dresdner, M.-L. Vladarean, G. Rätsch, F. Locatello, V. Cevher, and A. Yurtsever, “ Faster one-sample stochastic conditional gradient method for composite convex minimization,” in <i>Proceedings of the 25th International Conference on Artificial Intelligence and Statistics</i>, Virtual, 2022, vol. 151, pp. 8439–8457.","mla":"Dresdner, Gideon, et al. “ Faster One-Sample Stochastic Conditional Gradient Method for Composite Convex Minimization.” <i>Proceedings of the 25th International Conference on Artificial Intelligence and Statistics</i>, vol. 151, ML Research Press, 2022, pp. 8439–57.","apa":"Dresdner, G., Vladarean, M.-L., Rätsch, G., Locatello, F., Cevher, V., &#38; Yurtsever, A. (2022).  Faster one-sample stochastic conditional gradient method for composite convex minimization. In <i>Proceedings of the 25th International Conference on Artificial Intelligence and Statistics</i> (Vol. 151, pp. 8439–8457). Virtual: ML Research Press.","ista":"Dresdner G, Vladarean M-L, Rätsch G, Locatello F, Cevher V, Yurtsever A. 2022.  Faster one-sample stochastic conditional gradient method for composite convex minimization. Proceedings of the 25th International Conference on Artificial Intelligence and Statistics. AISTATS: Conference on Artificial Intelligence and Statistics, PMLR, vol. 151, 8439–8457.","ama":"Dresdner G, Vladarean M-L, Rätsch G, Locatello F, Cevher V, Yurtsever A.  Faster one-sample stochastic conditional gradient method for composite convex minimization. In: <i>Proceedings of the 25th International Conference on Artificial Intelligence and Statistics</i>. Vol 151. ML Research Press; 2022:8439-8457.","chicago":"Dresdner, Gideon, Maria-Luiza Vladarean, Gunnar Rätsch, Francesco Locatello, Volkan Cevher, and Alp Yurtsever. “ Faster One-Sample Stochastic Conditional Gradient Method for Composite Convex Minimization.” In <i>Proceedings of the 25th International Conference on Artificial Intelligence and Statistics</i>, 151:8439–57. ML Research Press, 2022."},"volume":151,"date_created":"2023-08-21T09:27:43Z","department":[{"_id":"FrLo"}],"date_updated":"2023-09-06T10:28:17Z","external_id":{"arxiv":["2202.13212"]},"intvolume":"       151","language":[{"iso":"eng"}],"quality_controlled":"1","publication_identifier":{"issn":["2640-3498"]}},{"status":"public","scopus_import":"1","date_published":"2022-12-01T00:00:00Z","abstract":[{"lang":"eng","text":"Magnetic fields can drastically change predictions of evolutionary models of massive stars via mass-loss quenching, magnetic braking, and efficient angular momentum transport, which we aim to quantify in this work. We use the MESA software instrument to compute an extensive main-sequence grid of stellar structure and evolution models, as well as isochrones, accounting for the effects attributed to a surface fossil magnetic field. The grid is densely populated in initial mass (3–60 M⊙), surface equatorial magnetic field strength (0–50 kG), and metallicity (representative of the Solar neighbourhood and the Magellanic Clouds). We use two magnetic braking and two chemical mixing schemes and compare the model predictions for slowly rotating, nitrogen-enriched (‘Group 2’) stars with observations in the Large Magellanic Cloud. We quantify a range of initial field strengths that allow for producing Group 2 stars and find that typical values (up to a few kG) lead to solutions. Between the subgrids, we find notable departures in surface abundances and evolutionary paths. In our magnetic models, chemical mixing is always less efficient compared to non-magnetic models due to the rapid spin-down. We identify that quasi-chemically homogeneous main sequence evolution by efficient mixing could be prevented by fossil magnetic fields. We recommend comparing this grid of evolutionary models with spectropolarimetric and spectroscopic observations with the goals of (i) revisiting the derived stellar parameters of known magnetic stars, and (ii) observationally constraining the uncertain magnetic braking and chemical mixing schemes."}],"arxiv":1,"author":[{"full_name":"Keszthelyi, Z.","first_name":"Z.","last_name":"Keszthelyi"},{"last_name":"Koter","first_name":"A. de","full_name":"Koter, A. de"},{"id":"d0648d0c-0f64-11ee-a2e0-dd0faa2e4f7d","full_name":"Götberg, Ylva Louise Linsdotter","first_name":"Ylva Louise Linsdotter","last_name":"Götberg","orcid":"0000-0002-6960-6911"},{"first_name":"G.","full_name":"Meynet, G.","last_name":"Meynet"},{"full_name":"Brands, S. A.","first_name":"S. A.","last_name":"Brands"},{"last_name":"Petit","first_name":"V.","full_name":"Petit, V."},{"last_name":"Carrington","full_name":"Carrington, M.","first_name":"M."},{"first_name":"A. David-Uraz","full_name":"A. David-Uraz, A. David-Uraz","last_name":"A. David-Uraz"},{"first_name":"S. T.","full_name":"Geen, S. T.","last_name":"Geen"},{"last_name":"Georgy","full_name":"Georgy, C.","first_name":"C."},{"last_name":"Hirschi","first_name":"R.","full_name":"Hirschi, R."},{"last_name":"Puls","first_name":"J.","full_name":"Puls, J."},{"full_name":"Ramalatswa, K. J.","first_name":"K. J.","last_name":"Ramalatswa"},{"last_name":"Shultz","first_name":"M. E.","full_name":"Shultz, M. E."},{"first_name":"A. ud-Doula","full_name":"A. ud-Doula, A. ud-Doula","last_name":"A. ud-Doula"}],"article_type":"original","publication_status":"published","oa":1,"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","oa_version":"Published Version","year":"2022","_id":"14098","issue":"2","title":"The effects of surface fossil magnetic fields on massive star evolution: IV. Grids of models at solar, LMC, and SMC metallicities","article_processing_charge":"No","month":"12","publication_identifier":{"eissn":["1365-2966"],"issn":["0035-8711"]},"intvolume":"       517","quality_controlled":"1","language":[{"iso":"eng"}],"external_id":{"arxiv":["2209.06350"]},"date_updated":"2023-08-22T13:18:34Z","date_created":"2023-08-21T10:11:21Z","volume":517,"citation":{"short":"Z. Keszthelyi, A. de Koter, Y.L.L. Götberg, G. Meynet, S.A. Brands, V. Petit, M. Carrington, A.D.-U. A. David-Uraz, S.T. Geen, C. Georgy, R. Hirschi, J. Puls, K.J. Ramalatswa, M.E. Shultz, A. ud-Doula A. ud-Doula, Monthly Notices of the Royal Astronomical Society 517 (2022) 2028–2055.","ieee":"Z. Keszthelyi <i>et al.</i>, “The effects of surface fossil magnetic fields on massive star evolution: IV. Grids of models at solar, LMC, and SMC metallicities,” <i>Monthly Notices of the Royal Astronomical Society</i>, vol. 517, no. 2. Oxford Academic, pp. 2028–2055, 2022.","ista":"Keszthelyi Z, Koter A de, Götberg YLL, Meynet G, Brands SA, Petit V, Carrington M, A. David-Uraz AD-U, Geen ST, Georgy C, Hirschi R, Puls J, Ramalatswa KJ, Shultz ME, A. ud-Doula A ud-Doula. 2022. The effects of surface fossil magnetic fields on massive star evolution: IV. Grids of models at solar, LMC, and SMC metallicities. Monthly Notices of the Royal Astronomical Society. 517(2), 2028–2055.","apa":"Keszthelyi, Z., Koter, A. de, Götberg, Y. L. L., Meynet, G., Brands, S. A., Petit, V., … A. ud-Doula, A. ud-Doula. (2022). The effects of surface fossil magnetic fields on massive star evolution: IV. Grids of models at solar, LMC, and SMC metallicities. <i>Monthly Notices of the Royal Astronomical Society</i>. Oxford Academic. <a href=\"https://doi.org/10.1093/mnras/stac2598\">https://doi.org/10.1093/mnras/stac2598</a>","mla":"Keszthelyi, Z., et al. “The Effects of Surface Fossil Magnetic Fields on Massive Star Evolution: IV. Grids of Models at Solar, LMC, and SMC Metallicities.” <i>Monthly Notices of the Royal Astronomical Society</i>, vol. 517, no. 2, Oxford Academic, 2022, pp. 2028–55, doi:<a href=\"https://doi.org/10.1093/mnras/stac2598\">10.1093/mnras/stac2598</a>.","ama":"Keszthelyi Z, Koter A de, Götberg YLL, et al. The effects of surface fossil magnetic fields on massive star evolution: IV. Grids of models at solar, LMC, and SMC metallicities. <i>Monthly Notices of the Royal Astronomical Society</i>. 2022;517(2):2028-2055. doi:<a href=\"https://doi.org/10.1093/mnras/stac2598\">10.1093/mnras/stac2598</a>","chicago":"Keszthelyi, Z., A. de Koter, Ylva Louise Linsdotter Götberg, G. Meynet, S. A. Brands, V. Petit, M. Carrington, et al. “The Effects of Surface Fossil Magnetic Fields on Massive Star Evolution: IV. Grids of Models at Solar, LMC, and SMC Metallicities.” <i>Monthly Notices of the Royal Astronomical Society</i>. Oxford Academic, 2022. <a href=\"https://doi.org/10.1093/mnras/stac2598\">https://doi.org/10.1093/mnras/stac2598</a>."},"type":"journal_article","page":"2028-2055","doi":"10.1093/mnras/stac2598","publisher":"Oxford Academic","day":"01","publication":"Monthly Notices of the Royal Astronomical Society","extern":"1","main_file_link":[{"open_access":"1","url":"https://doi.org/10.1093/mnras/stac2598"}]},{"status":"public","date_published":"2022-11-14T00:00:00Z","language":[{"iso":"eng"}],"abstract":[{"text":"Magnetism can greatly impact the evolution of stars. In some stars with OBA spectral types there is direct evidence via the Zeeman effect for stable, large-scale magnetospheres, which lead to the spin-down of the stellar surface and reduced mass loss. So far, a comprehensive grid of stellar structure and evolution models accounting for these effects was lacking. For this reason, we computed and studied models with two magnetic braking and two chemical mixing schemes in three metallicity environments with the MESA software instrument. We find notable differences between the subgrids, which affects the model predictions and thus the detailed characterisation of stars. We are able to quantify the impact of magnetic fields in terms of preventing quasi-chemically homogeneous evolution and producing slowly-rotating, nitrogen-enriched (\"Group 2\") stars. Our model grid is fully open access and open source.","lang":"eng"}],"external_id":{"arxiv":["2211.07060"]},"arxiv":1,"date_updated":"2023-08-22T13:20:15Z","article_number":"2211.07060","author":[{"last_name":"Keszthelyi","full_name":"Keszthelyi, Z.","first_name":"Z."},{"last_name":"Koter","full_name":"Koter, A. de","first_name":"A. de"},{"orcid":"0000-0002-6960-6911","last_name":"Götberg","first_name":"Ylva Louise Linsdotter","id":"d0648d0c-0f64-11ee-a2e0-dd0faa2e4f7d","full_name":"Götberg, Ylva Louise Linsdotter"},{"first_name":"G.","full_name":"Meynet, G.","last_name":"Meynet"},{"full_name":"Brands, S. A.","first_name":"S. A.","last_name":"Brands"},{"first_name":"V.","full_name":"Petit, V.","last_name":"Petit"},{"first_name":"M.","full_name":"Carrington, M.","last_name":"Carrington"},{"last_name":"A. David-Uraz","full_name":"A. David-Uraz, A. David-Uraz","first_name":"A. David-Uraz"},{"full_name":"Geen, S. T.","first_name":"S. T.","last_name":"Geen"},{"first_name":"C.","full_name":"Georgy, C.","last_name":"Georgy"},{"first_name":"R.","full_name":"Hirschi, R.","last_name":"Hirschi"},{"last_name":"Puls","full_name":"Puls, J.","first_name":"J."},{"full_name":"Ramalatswa, K. J.","first_name":"K. J.","last_name":"Ramalatswa"},{"first_name":"M. E.","full_name":"Shultz, M. E.","last_name":"Shultz"},{"full_name":"A. ud-Doula, A. ud-Doula","first_name":"A. ud-Doula","last_name":"A. ud-Doula"}],"date_created":"2023-08-21T10:11:37Z","publication_status":"submitted","oa":1,"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","oa_version":"Submitted Version","year":"2022","citation":{"apa":"Keszthelyi, Z., Koter, A. de, Götberg, Y. L. L., Meynet, G., Brands, S. A., Petit, V., … A. ud-Doula, A. ud-Doula. (n.d.). Spin-down and reduced mass loss in early-type stars with large-scale magnetic fields. <i>arXiv</i>. <a href=\"https://doi.org/10.48550/arXiv.2211.07060\">https://doi.org/10.48550/arXiv.2211.07060</a>","mla":"Keszthelyi, Z., et al. “Spin-down and Reduced Mass Loss in Early-Type Stars with Large-Scale Magnetic Fields.” <i>ArXiv</i>, 2211.07060, doi:<a href=\"https://doi.org/10.48550/arXiv.2211.07060\">10.48550/arXiv.2211.07060</a>.","ista":"Keszthelyi Z, Koter A de, Götberg YLL, Meynet G, Brands SA, Petit V, Carrington M, A. David-Uraz AD-U, Geen ST, Georgy C, Hirschi R, Puls J, Ramalatswa KJ, Shultz ME, A. ud-Doula A ud-Doula. Spin-down and reduced mass loss in early-type stars with large-scale magnetic fields. arXiv, 2211.07060.","ieee":"Z. Keszthelyi <i>et al.</i>, “Spin-down and reduced mass loss in early-type stars with large-scale magnetic fields,” <i>arXiv</i>. .","short":"Z. Keszthelyi, A. de Koter, Y.L.L. Götberg, G. Meynet, S.A. Brands, V. Petit, M. Carrington, A.D.-U. A. David-Uraz, S.T. Geen, C. Georgy, R. Hirschi, J. Puls, K.J. Ramalatswa, M.E. Shultz, A. ud-Doula A. ud-Doula, ArXiv (n.d.).","chicago":"Keszthelyi, Z., A. de Koter, Ylva Louise Linsdotter Götberg, G. Meynet, S. A. Brands, V. Petit, M. Carrington, et al. “Spin-down and Reduced Mass Loss in Early-Type Stars with Large-Scale Magnetic Fields.” <i>ArXiv</i>, n.d. <a href=\"https://doi.org/10.48550/arXiv.2211.07060\">https://doi.org/10.48550/arXiv.2211.07060</a>.","ama":"Keszthelyi Z, Koter A de, Götberg YLL, et al. Spin-down and reduced mass loss in early-type stars with large-scale magnetic fields. <i>arXiv</i>. doi:<a href=\"https://doi.org/10.48550/arXiv.2211.07060\">10.48550/arXiv.2211.07060</a>"},"_id":"14099","title":"Spin-down and reduced mass loss in early-type stars with large-scale magnetic fields","type":"preprint","day":"14","doi":"10.48550/arXiv.2211.07060","article_processing_charge":"No","publication":"arXiv","month":"11","extern":"1","main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2211.07060","open_access":"1"}]},{"_id":"14106","year":"2022","oa_version":"Preprint","title":"Are two heads the same as one? Identifying disparate treatment in fair neural networks","alternative_title":["Advances in Neural Information Processing Systems"],"article_processing_charge":"No","month":"12","date_published":"2022-12-15T00:00:00Z","abstract":[{"lang":"eng","text":"We show that deep networks trained to satisfy demographic parity often do so\r\nthrough a form of race or gender awareness, and that the more we force a network\r\nto be fair, the more accurately we can recover race or gender from the internal state\r\nof the network. Based on this observation, we investigate an alternative fairness\r\napproach: we add a second classification head to the network to explicitly predict\r\nthe protected attribute (such as race or gender) alongside the original task. After\r\ntraining the two-headed network, we enforce demographic parity by merging the\r\ntwo heads, creating a network with the same architecture as the original network.\r\nWe establish a close relationship between existing approaches and our approach\r\nby showing (1) that the decisions of a fair classifier are well-approximated by our\r\napproach, and (2) that an unfair and optimally accurate classifier can be recovered\r\nfrom a fair classifier and our second head predicting the protected attribute. We use\r\nour explicit formulation to argue that the existing fairness approaches, just as ours,\r\ndemonstrate disparate treatment and that they are likely to be unlawful in a wide\r\nrange of scenarios under US law."}],"scopus_import":"1","status":"public","arxiv":1,"conference":{"location":"New Orleans, LA, United States","name":"NeurIPS: Neural Information Processing Systems","end_date":"2022-12-09","start_date":"2022-11-28"},"publication_status":"published","author":[{"last_name":"Lohaus","first_name":"Michael","full_name":"Lohaus, Michael"},{"full_name":"Kleindessner, Matthäus","first_name":"Matthäus","last_name":"Kleindessner"},{"full_name":"Kenthapadi, Krishnaram","first_name":"Krishnaram","last_name":"Kenthapadi"},{"first_name":"Francesco","full_name":"Locatello, Francesco","id":"26cfd52f-2483-11ee-8040-88983bcc06d4","last_name":"Locatello","orcid":"0000-0002-4850-0683"},{"first_name":"Chris","full_name":"Russell, Chris","last_name":"Russell"}],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","oa":1,"citation":{"ama":"Lohaus M, Kleindessner M, Kenthapadi K, Locatello F, Russell C. Are two heads the same as one? Identifying disparate treatment in fair neural networks. In: <i>36th Conference on Neural Information Processing Systems</i>. Vol 35. Neural Information Processing Systems Foundation; 2022:16548-16562.","chicago":"Lohaus, Michael, Matthäus Kleindessner, Krishnaram Kenthapadi, Francesco Locatello, and Chris Russell. “Are Two Heads the Same as One? Identifying Disparate Treatment in Fair Neural Networks.” In <i>36th Conference on Neural Information Processing Systems</i>, 35:16548–62. Neural Information Processing Systems Foundation, 2022.","short":"M. Lohaus, M. Kleindessner, K. Kenthapadi, F. Locatello, C. Russell, in:, 36th Conference on Neural Information Processing Systems, Neural Information Processing Systems Foundation, 2022, pp. 16548–16562.","apa":"Lohaus, M., Kleindessner, M., Kenthapadi, K., Locatello, F., &#38; Russell, C. (2022). Are two heads the same as one? Identifying disparate treatment in fair neural networks. In <i>36th Conference on Neural Information Processing Systems</i> (Vol. 35, pp. 16548–16562). New Orleans, LA, United States: Neural Information Processing Systems Foundation.","mla":"Lohaus, Michael, et al. “Are Two Heads the Same as One? Identifying Disparate Treatment in Fair Neural Networks.” <i>36th Conference on Neural Information Processing Systems</i>, vol. 35, Neural Information Processing Systems Foundation, 2022, pp. 16548–62.","ista":"Lohaus M, Kleindessner M, Kenthapadi K, Locatello F, Russell C. 2022. Are two heads the same as one? Identifying disparate treatment in fair neural networks. 36th Conference on Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems, Advances in Neural Information Processing Systems, vol. 35, 16548–16562.","ieee":"M. Lohaus, M. Kleindessner, K. Kenthapadi, F. Locatello, and C. Russell, “Are two heads the same as one? Identifying disparate treatment in fair neural networks,” in <i>36th Conference on Neural Information Processing Systems</i>, New Orleans, LA, United States, 2022, vol. 35, pp. 16548–16562."},"volume":35,"day":"15","publisher":"Neural Information Processing Systems Foundation","page":"16548-16562","type":"conference","publication":"36th Conference on Neural Information Processing Systems","main_file_link":[{"url":"https://arxiv.org/abs/2204.04440","open_access":"1"}],"extern":"1","quality_controlled":"1","language":[{"iso":"eng"}],"intvolume":"        35","publication_identifier":{"isbn":["9781713871088"]},"date_updated":"2024-10-14T12:27:01Z","external_id":{"arxiv":["2204.04440"]},"date_created":"2023-08-21T12:12:42Z","department":[{"_id":"FrLo"}]},{"status":"public","abstract":[{"text":"Amodal perception requires inferring the full shape of an object that is partially occluded. This task is particularly challenging on two levels: (1) it requires more information than what is contained in the instant retina or imaging sensor, (2) it is difficult to obtain enough well-annotated amodal labels for supervision. To this end, this paper develops a new framework of\r\nSelf-supervised amodal Video object segmentation (SaVos). Our method efficiently leverages the visual information of video temporal sequences to infer the amodal mask of objects. The key intuition is that the occluded part of an object can be explained away if that part is visible in other frames, possibly deformed as long as the deformation can be reasonably learned.\r\nAccordingly, we derive a novel self-supervised learning paradigm that efficiently utilizes the visible object parts as the supervision to guide the training on videos. In addition to learning type prior to complete masks for known types, SaVos also learns the spatiotemporal prior, which is also useful for the amodal task and could generalize to unseen types. The proposed\r\nframework achieves the state-of-the-art performance on the synthetic amodal segmentation benchmark FISHBOWL and the real world benchmark KINS-Video-Car. Further, it lends itself well to being transferred to novel distributions using test-time adaptation, outperforming existing models even after the transfer to a new distribution.","lang":"eng"}],"language":[{"iso":"eng"}],"date_published":"2022-10-23T00:00:00Z","arxiv":1,"external_id":{"arxiv":["2210.12733"]},"conference":{"start_date":"2022-11-28","name":"NeurIPS: Neural Information Processing Systems","end_date":"2022-12-01","location":"New Orleans, LA, United States"},"date_updated":"2023-09-11T09:34:17Z","department":[{"_id":"FrLo"}],"author":[{"last_name":"Yao","first_name":"Jian","full_name":"Yao, Jian"},{"last_name":"Hong","first_name":"Yuxin","full_name":"Hong, Yuxin"},{"first_name":"Chiyu","full_name":"Wang, Chiyu","last_name":"Wang"},{"full_name":"Xiao, Tianjun","first_name":"Tianjun","last_name":"Xiao"},{"first_name":"Tong","full_name":"He, Tong","last_name":"He"},{"orcid":"0000-0002-4850-0683","last_name":"Locatello","id":"26cfd52f-2483-11ee-8040-88983bcc06d4","full_name":"Locatello, Francesco","first_name":"Francesco"},{"last_name":"Wipf","full_name":"Wipf, David","first_name":"David"},{"last_name":"Fu","full_name":"Fu, Yanwei","first_name":"Yanwei"},{"last_name":"Zhang","full_name":"Zhang, Zheng","first_name":"Zheng"}],"publication_status":"published","date_created":"2023-08-21T12:13:25Z","oa":1,"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","oa_version":"Preprint","citation":{"mla":"Yao, Jian, et al. “Self-Supervised Amodal Video Object Segmentation.” <i>36th Conference on Neural Information Processing Systems</i>, 2022, doi:<a href=\"https://doi.org/10.48550/arXiv.2210.12733\">10.48550/arXiv.2210.12733</a>.","ista":"Yao J, Hong Y, Wang C, Xiao T, He T, Locatello F, Wipf D, Fu Y, Zhang Z. 2022. Self-supervised amodal video object segmentation. 36th Conference on Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems.","apa":"Yao, J., Hong, Y., Wang, C., Xiao, T., He, T., Locatello, F., … Zhang, Z. (2022). Self-supervised amodal video object segmentation. In <i>36th Conference on Neural Information Processing Systems</i>. New Orleans, LA, United States. <a href=\"https://doi.org/10.48550/arXiv.2210.12733\">https://doi.org/10.48550/arXiv.2210.12733</a>","ieee":"J. Yao <i>et al.</i>, “Self-supervised amodal video object segmentation,” in <i>36th Conference on Neural Information Processing Systems</i>, New Orleans, LA, United States, 2022.","short":"J. Yao, Y. Hong, C. Wang, T. Xiao, T. He, F. Locatello, D. Wipf, Y. Fu, Z. Zhang, in:, 36th Conference on Neural Information Processing Systems, 2022.","chicago":"Yao, Jian, Yuxin Hong, Chiyu Wang, Tianjun Xiao, Tong He, Francesco Locatello, David Wipf, Yanwei Fu, and Zheng Zhang. “Self-Supervised Amodal Video Object Segmentation.” In <i>36th Conference on Neural Information Processing Systems</i>, 2022. <a href=\"https://doi.org/10.48550/arXiv.2210.12733\">https://doi.org/10.48550/arXiv.2210.12733</a>.","ama":"Yao J, Hong Y, Wang C, et al. Self-supervised amodal video object segmentation. In: <i>36th Conference on Neural Information Processing Systems</i>. ; 2022. doi:<a href=\"https://doi.org/10.48550/arXiv.2210.12733\">10.48550/arXiv.2210.12733</a>"},"_id":"14107","year":"2022","type":"conference","title":"Self-supervised amodal video object segmentation","day":"23","doi":"10.48550/arXiv.2210.12733","article_processing_charge":"No","publication":"36th Conference on Neural Information Processing Systems","month":"10","main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.2210.12733"}],"extern":"1"},{"citation":{"chicago":"Zietlow, Dominik, Michael Lohaus, Guha Balakrishnan, Matthaus Kleindessner, Francesco Locatello, Bernhard Scholkopf, and Chris Russell. “Leveling down in Computer Vision: Pareto Inefficiencies in Fair Deep Classifiers.” In <i>2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, 10400–411. Institute of Electrical and Electronics Engineers, 2022. <a href=\"https://doi.org/10.1109/cvpr52688.2022.01016\">https://doi.org/10.1109/cvpr52688.2022.01016</a>.","ama":"Zietlow D, Lohaus M, Balakrishnan G, et al. Leveling down in computer vision: Pareto inefficiencies in fair deep classifiers. In: <i>2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>. Institute of Electrical and Electronics Engineers; 2022:10400-10411. doi:<a href=\"https://doi.org/10.1109/cvpr52688.2022.01016\">10.1109/cvpr52688.2022.01016</a>","apa":"Zietlow, D., Lohaus, M., Balakrishnan, G., Kleindessner, M., Locatello, F., Scholkopf, B., &#38; Russell, C. (2022). Leveling down in computer vision: Pareto inefficiencies in fair deep classifiers. In <i>2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i> (pp. 10400–10411). New Orleans, LA, United States: Institute of Electrical and Electronics Engineers. <a href=\"https://doi.org/10.1109/cvpr52688.2022.01016\">https://doi.org/10.1109/cvpr52688.2022.01016</a>","ista":"Zietlow D, Lohaus M, Balakrishnan G, Kleindessner M, Locatello F, Scholkopf B, Russell C. 2022. Leveling down in computer vision: Pareto inefficiencies in fair deep classifiers. 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition. CVPR: Conference on Computer Vision and Pattern Recognition, 10400–10411.","mla":"Zietlow, Dominik, et al. “Leveling down in Computer Vision: Pareto Inefficiencies in Fair Deep Classifiers.” <i>2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, Institute of Electrical and Electronics Engineers, 2022, pp. 10400–11, doi:<a href=\"https://doi.org/10.1109/cvpr52688.2022.01016\">10.1109/cvpr52688.2022.01016</a>.","ieee":"D. Zietlow <i>et al.</i>, “Leveling down in computer vision: Pareto inefficiencies in fair deep classifiers,” in <i>2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, New Orleans, LA, United States, 2022, pp. 10400–10411.","short":"D. Zietlow, M. Lohaus, G. Balakrishnan, M. Kleindessner, F. Locatello, B. Scholkopf, C. Russell, in:, 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition, Institute of Electrical and Electronics Engineers, 2022, pp. 10400–10411."},"type":"conference","day":"01","publisher":"Institute of Electrical and Electronics Engineers","doi":"10.1109/cvpr52688.2022.01016","page":"10400-10411","publication":"2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition","main_file_link":[{"url":"https://arxiv.org/abs/2203.04913","open_access":"1"}],"extern":"1","publication_identifier":{"isbn":["9781665469470"],"issn":["1063-6919"],"eissn":["2575-7075"]},"quality_controlled":"1","language":[{"iso":"eng"}],"external_id":{"arxiv":["2203.04913"]},"date_updated":"2023-09-11T09:19:14Z","department":[{"_id":"FrLo"}],"date_created":"2023-08-21T12:18:00Z","oa_version":"Preprint","_id":"14114","year":"2022","title":"Leveling down in computer vision: Pareto inefficiencies in fair deep classifiers","article_processing_charge":"No","month":"07","status":"public","date_published":"2022-07-01T00:00:00Z","abstract":[{"lang":"eng","text":"Algorithmic fairness is frequently motivated in terms of a trade-off in which overall performance is decreased so as to improve performance on disadvantaged groups where the algorithm would otherwise be less accurate. Contrary to this, we find that applying existing fairness approaches to computer vision improve fairness by degrading the performance of classifiers across all groups (with increased degradation on the best performing groups). Extending the bias-variance decomposition for classification to fairness, we theoretically explain why the majority of fairness methods designed for low capacity models should not be used in settings involving high-capacity models, a scenario common to computer vision. We corroborate this analysis with extensive experimental support that shows that many of the fairness heuristics used in computer vision also degrade performance on the most disadvantaged groups. Building on these insights, we propose an adaptive augmentation strategy that, uniquely, of all methods tested, improves performance for the disadvantaged groups."}],"scopus_import":"1","arxiv":1,"conference":{"start_date":"2022-06-18","end_date":"2022-06-24","name":"CVPR: Conference on Computer Vision and Pattern Recognition","location":"New Orleans, LA, United States"},"author":[{"first_name":"Dominik","full_name":"Zietlow, Dominik","last_name":"Zietlow"},{"last_name":"Lohaus","full_name":"Lohaus, Michael","first_name":"Michael"},{"first_name":"Guha","full_name":"Balakrishnan, Guha","last_name":"Balakrishnan"},{"first_name":"Matthaus","full_name":"Kleindessner, Matthaus","last_name":"Kleindessner"},{"first_name":"Francesco","full_name":"Locatello, Francesco","id":"26cfd52f-2483-11ee-8040-88983bcc06d4","orcid":"0000-0002-4850-0683","last_name":"Locatello"},{"last_name":"Scholkopf","first_name":"Bernhard","full_name":"Scholkopf, Bernhard"},{"first_name":"Chris","full_name":"Russell, Chris","last_name":"Russell"}],"publication_status":"published","oa":1,"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87"},{"year":"2022","_id":"14168","citation":{"short":"N. Rahaman, M. Weiss, F. Locatello, C. Pal, Y. Bengio, B. Schölkopf, L.E. Li, N. Ballas, in:, 36th Conference on Neural Information Processing Systems, 2022.","ieee":"N. Rahaman <i>et al.</i>, “Neural attentive circuits,” in <i>36th Conference on Neural Information Processing Systems</i>, New Orleans, United States, 2022, vol. 35.","ista":"Rahaman N, Weiss M, Locatello F, Pal C, Bengio Y, Schölkopf B, Li LE, Ballas N. 2022. Neural attentive circuits. 36th Conference on Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems,  Advances in Neural Information Processing Systems, vol. 35.","mla":"Rahaman, Nasim, et al. “Neural Attentive Circuits.” <i>36th Conference on Neural Information Processing Systems</i>, vol. 35, 2022.","apa":"Rahaman, N., Weiss, M., Locatello, F., Pal, C., Bengio, Y., Schölkopf, B., … Ballas, N. (2022). Neural attentive circuits. In <i>36th Conference on Neural Information Processing Systems</i> (Vol. 35). New Orleans, United States.","ama":"Rahaman N, Weiss M, Locatello F, et al. Neural attentive circuits. In: <i>36th Conference on Neural Information Processing Systems</i>. Vol 35. ; 2022.","chicago":"Rahaman, Nasim, Martin Weiss, Francesco Locatello, Chris Pal, Yoshua Bengio, Bernhard Schölkopf, Li Erran Li, and Nicolas Ballas. “Neural Attentive Circuits.” In <i>36th Conference on Neural Information Processing Systems</i>, Vol. 35, 2022."},"volume":35,"oa_version":"Preprint","day":"14","title":"Neural attentive circuits","type":"conference","publication":"36th Conference on Neural Information Processing Systems","alternative_title":[" Advances in Neural Information Processing Systems"],"article_processing_charge":"No","extern":"1","main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.2210.08031"}],"month":"10","intvolume":"        35","abstract":[{"text":"Recent work has seen the development of general purpose neural architectures\r\nthat can be trained to perform tasks across diverse data modalities. General\r\npurpose models typically make few assumptions about the underlying\r\ndata-structure and are known to perform well in the large-data regime. At the\r\nsame time, there has been growing interest in modular neural architectures that\r\nrepresent the data using sparsely interacting modules. These models can be more\r\nrobust out-of-distribution, computationally efficient, and capable of\r\nsample-efficient adaptation to new data. However, they tend to make\r\ndomain-specific assumptions about the data, and present challenges in how\r\nmodule behavior (i.e., parameterization) and connectivity (i.e., their layout)\r\ncan be jointly learned. In this work, we introduce a general purpose, yet\r\nmodular neural architecture called Neural Attentive Circuits (NACs) that\r\njointly learns the parameterization and a sparse connectivity of neural modules\r\nwithout using domain knowledge. NACs are best understood as the combination of\r\ntwo systems that are jointly trained end-to-end: one that determines the module\r\nconfiguration and the other that executes it on an input. We demonstrate\r\nqualitatively that NACs learn diverse and meaningful module configurations on\r\nthe NLVR2 dataset without additional supervision. Quantitatively, we show that\r\nby incorporating modularity in this way, NACs improve upon a strong non-modular\r\nbaseline in terms of low-shot adaptation on CIFAR and CUBs dataset by about\r\n10%, and OOD robustness on Tiny ImageNet-R by about 2.5%. Further, we find that\r\nNACs can achieve an 8x speedup at inference time while losing less than 3%\r\nperformance. Finally, we find NACs to yield competitive results on diverse data\r\nmodalities spanning point-cloud classification, symbolic processing and\r\ntext-classification from ASCII bytes, thereby confirming its general purpose\r\nnature.","lang":"eng"}],"language":[{"iso":"eng"}],"date_published":"2022-10-14T00:00:00Z","status":"public","date_updated":"2023-09-11T09:29:09Z","external_id":{"arxiv":["2210.08031"]},"conference":{"start_date":"2022-11-29","name":"NeurIPS: Neural Information Processing Systems","end_date":"2022-12-01","location":"New Orleans, United States"},"arxiv":1,"date_created":"2023-08-22T13:57:27Z","publication_status":"published","author":[{"last_name":"Rahaman","first_name":"Nasim","full_name":"Rahaman, Nasim"},{"first_name":"Martin","full_name":"Weiss, Martin","last_name":"Weiss"},{"orcid":"0000-0002-4850-0683","last_name":"Locatello","id":"26cfd52f-2483-11ee-8040-88983bcc06d4","full_name":"Locatello, Francesco","first_name":"Francesco"},{"last_name":"Pal","full_name":"Pal, Chris","first_name":"Chris"},{"last_name":"Bengio","first_name":"Yoshua","full_name":"Bengio, Yoshua"},{"full_name":"Schölkopf, Bernhard","first_name":"Bernhard","last_name":"Schölkopf"},{"last_name":"Li","first_name":"Li Erran","full_name":"Li, Li Erran"},{"full_name":"Ballas, Nicolas","first_name":"Nicolas","last_name":"Ballas"}],"department":[{"_id":"FrLo"}],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","oa":1},{"conference":{"location":"Baltimore, MD, United States","name":"International Conference on Machine Learning","end_date":"2022-07-23","start_date":"2022-07-17"},"arxiv":1,"status":"public","abstract":[{"lang":"eng","text":"The idea behind object-centric representation learning is that natural scenes can better be modeled as compositions of objects and their relations as opposed to distributed representations. This inductive bias can be injected into neural networks to potentially improve systematic generalization and performance of downstream tasks in scenes with multiple objects. In this paper, we train state-of-the-art unsupervised models on five common multi-object datasets and evaluate segmentation metrics and downstream object property prediction. In addition, we study generalization and robustness by investigating the settings where either a single object is out of distribution -- e.g., having an unseen color, texture, or shape -- or global properties of the scene are altered -- e.g., by occlusions, cropping, or increasing the number of objects. From our experimental study, we find object-centric representations to be useful for\r\ndownstream tasks and generally robust to most distribution shifts affecting objects. However, when the distribution shift affects the input in a less structured manner, robustness in terms of segmentation and downstream task performance may vary significantly across models and distribution shifts. "}],"date_published":"2022-07-22T00:00:00Z","oa":1,"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","author":[{"first_name":"Andrea","full_name":"Dittadi, Andrea","last_name":"Dittadi"},{"last_name":"Papa","full_name":"Papa, Samuele","first_name":"Samuele"},{"last_name":"Vita","full_name":"Vita, Michele De","first_name":"Michele De"},{"last_name":"Schölkopf","full_name":"Schölkopf, Bernhard","first_name":"Bernhard"},{"last_name":"Winther","first_name":"Ole","full_name":"Winther, Ole"},{"id":"26cfd52f-2483-11ee-8040-88983bcc06d4","full_name":"Locatello, Francesco","first_name":"Francesco","orcid":"0000-0002-4850-0683","last_name":"Locatello"}],"publication_status":"submitted","title":"Generalization and robustness implications in object-centric learning","oa_version":"Preprint","year":"2022","_id":"14170","month":"07","article_processing_charge":"No","alternative_title":["PMLR"],"external_id":{"arxiv":["2107.00637"]},"date_updated":"2023-09-11T10:08:14Z","intvolume":"      2022","quality_controlled":"1","language":[{"iso":"eng"}],"department":[{"_id":"FrLo"}],"date_created":"2023-08-22T13:59:55Z","type":"conference","page":"5221-5285","publisher":"ML Research Press","day":"22","volume":2022,"citation":{"short":"A. Dittadi, S. Papa, M.D. Vita, B. Schölkopf, O. Winther, F. Locatello, in:, Proceedings of the 39th International Conference on Machine Learning, ML Research Press, n.d., pp. 5221–5285.","mla":"Dittadi, Andrea, et al. “Generalization and Robustness Implications in Object-Centric Learning.” <i>Proceedings of the 39th International Conference on Machine Learning</i>, vol. 2022, ML Research Press, pp. 5221–85.","apa":"Dittadi, A., Papa, S., Vita, M. D., Schölkopf, B., Winther, O., &#38; Locatello, F. (n.d.). Generalization and robustness implications in object-centric learning. In <i>Proceedings of the 39th International Conference on Machine Learning</i> (Vol. 2022, pp. 5221–5285). Baltimore, MD, United States: ML Research Press.","ista":"Dittadi A, Papa S, Vita MD, Schölkopf B, Winther O, Locatello F. Generalization and robustness implications in object-centric learning. Proceedings of the 39th International Conference on Machine Learning. International Conference on Machine Learning, PMLR, vol. 2022, 5221–5285.","ieee":"A. Dittadi, S. Papa, M. D. Vita, B. Schölkopf, O. Winther, and F. Locatello, “Generalization and robustness implications in object-centric learning,” in <i>Proceedings of the 39th International Conference on Machine Learning</i>, Baltimore, MD, United States, vol. 2022, pp. 5221–5285.","ama":"Dittadi A, Papa S, Vita MD, Schölkopf B, Winther O, Locatello F. Generalization and robustness implications in object-centric learning. In: <i>Proceedings of the 39th International Conference on Machine Learning</i>. Vol 2022. ML Research Press; :5221-5285.","chicago":"Dittadi, Andrea, Samuele Papa, Michele De Vita, Bernhard Schölkopf, Ole Winther, and Francesco Locatello. “Generalization and Robustness Implications in Object-Centric Learning.” In <i>Proceedings of the 39th International Conference on Machine Learning</i>, 2022:5221–85. ML Research Press, n.d."},"extern":"1","main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/2107.00637"}],"publication":"Proceedings of the 39th International Conference on Machine Learning"},{"abstract":[{"lang":"eng","text":"This paper demonstrates how to recover causal graphs from the score of the\r\ndata distribution in non-linear additive (Gaussian) noise models. Using score\r\nmatching algorithms as a building block, we show how to design a new generation\r\nof scalable causal discovery methods. To showcase our approach, we also propose\r\na new efficient method for approximating the score's Jacobian, enabling to\r\nrecover the causal graph. Empirically, we find that the new algorithm, called\r\nSCORE, is competitive with state-of-the-art causal discovery methods while\r\nbeing significantly faster."}],"date_published":"2022-07-22T00:00:00Z","status":"public","conference":{"location":"Baltimore, MD, United States","start_date":"2022-07-17","end_date":"2022-07-23","name":"International Conference on Machine Learning"},"arxiv":1,"publication_status":"published","author":[{"last_name":"Rolland","full_name":"Rolland, Paul","first_name":"Paul"},{"last_name":"Cevher","full_name":"Cevher, Volkan","first_name":"Volkan"},{"full_name":"Kleindessner, Matthäus","first_name":"Matthäus","last_name":"Kleindessner"},{"first_name":"Chris","full_name":"Russel, Chris","last_name":"Russel"},{"last_name":"Schölkopf","full_name":"Schölkopf, Bernhard","first_name":"Bernhard"},{"first_name":"Dominik","full_name":"Janzing, Dominik","last_name":"Janzing"},{"first_name":"Francesco","id":"26cfd52f-2483-11ee-8040-88983bcc06d4","full_name":"Locatello, Francesco","orcid":"0000-0002-4850-0683","last_name":"Locatello"}],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","oa":1,"year":"2022","_id":"14171","oa_version":"Preprint","title":"Score matching enables causal discovery of nonlinear additive noise  models","alternative_title":["PMLR"],"article_processing_charge":"No","month":"07","intvolume":"       162","language":[{"iso":"eng"}],"quality_controlled":"1","date_updated":"2023-09-11T10:14:20Z","external_id":{"arxiv":["2203.04413"]},"date_created":"2023-08-22T14:00:18Z","department":[{"_id":"FrLo"}],"citation":{"ieee":"P. Rolland <i>et al.</i>, “Score matching enables causal discovery of nonlinear additive noise  models,” in <i>Proceedings of the 39th International Conference on Machine Learning</i>, Baltimore, MD, United States, 2022, vol. 162, pp. 18741–18753.","ista":"Rolland P, Cevher V, Kleindessner M, Russel C, Schölkopf B, Janzing D, Locatello F. 2022. Score matching enables causal discovery of nonlinear additive noise  models. Proceedings of the 39th International Conference on Machine Learning. International Conference on Machine Learning, PMLR, vol. 162, 18741–18753.","mla":"Rolland, Paul, et al. “Score Matching Enables Causal Discovery of Nonlinear Additive Noise  Models.” <i>Proceedings of the 39th International Conference on Machine Learning</i>, vol. 162, ML Research Press, 2022, pp. 18741–53.","apa":"Rolland, P., Cevher, V., Kleindessner, M., Russel, C., Schölkopf, B., Janzing, D., &#38; Locatello, F. (2022). Score matching enables causal discovery of nonlinear additive noise  models. In <i>Proceedings of the 39th International Conference on Machine Learning</i> (Vol. 162, pp. 18741–18753). Baltimore, MD, United States: ML Research Press.","short":"P. Rolland, V. Cevher, M. Kleindessner, C. Russel, B. Schölkopf, D. Janzing, F. Locatello, in:, Proceedings of the 39th International Conference on Machine Learning, ML Research Press, 2022, pp. 18741–18753.","chicago":"Rolland, Paul, Volkan Cevher, Matthäus Kleindessner, Chris Russel, Bernhard Schölkopf, Dominik Janzing, and Francesco Locatello. “Score Matching Enables Causal Discovery of Nonlinear Additive Noise  Models.” In <i>Proceedings of the 39th International Conference on Machine Learning</i>, 162:18741–53. ML Research Press, 2022.","ama":"Rolland P, Cevher V, Kleindessner M, et al. Score matching enables causal discovery of nonlinear additive noise  models. In: <i>Proceedings of the 39th International Conference on Machine Learning</i>. Vol 162. ML Research Press; 2022:18741-18753."},"volume":162,"page":"18741-18753","day":"22","publisher":"ML Research Press","type":"conference","publication":"Proceedings of the 39th International Conference on Machine Learning","extern":"1","main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/2203.04413"}]},{"author":[{"full_name":"Schott, Lukas","first_name":"Lukas","last_name":"Schott"},{"last_name":"Kügelgen","full_name":"Kügelgen, Julius von","first_name":"Julius von"},{"last_name":"Träuble","full_name":"Träuble, Frederik","first_name":"Frederik"},{"last_name":"Gehler","first_name":"Peter","full_name":"Gehler, Peter"},{"full_name":"Russell, Chris","first_name":"Chris","last_name":"Russell"},{"first_name":"Matthias","full_name":"Bethge, Matthias","last_name":"Bethge"},{"full_name":"Schölkopf, Bernhard","first_name":"Bernhard","last_name":"Schölkopf"},{"first_name":"Francesco","id":"26cfd52f-2483-11ee-8040-88983bcc06d4","full_name":"Locatello, Francesco","last_name":"Locatello","orcid":"0000-0002-4850-0683"},{"last_name":"Brendel","full_name":"Brendel, Wieland","first_name":"Wieland"}],"department":[{"_id":"FrLo"}],"publication_status":"published","date_created":"2023-08-22T14:00:50Z","oa":1,"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","status":"public","date_published":"2022-04-25T00:00:00Z","abstract":[{"text":"An important component for generalization in machine learning is to uncover underlying latent factors of variation as well as the mechanism through which each factor acts in the world. In this paper, we test whether 17 unsupervised, weakly supervised, and fully supervised representation learning approaches correctly infer the generative factors of variation in simple datasets (dSprites, Shapes3D, MPI3D) from controlled environments, and on our contributed CelebGlow dataset. In contrast to prior robustness work that introduces novel factors of variation during test time, such as blur or other (un)structured noise, we here recompose, interpolate, or extrapolate only existing factors of variation from the training data set (e.g., small and medium-sized objects during training and large objects during testing). Models\r\nthat learn the correct mechanism should be able to generalize to this benchmark. In total, we train and test 2000+ models and observe that all of them struggle to learn the underlying mechanism regardless of supervision signal and architectural bias. Moreover, the generalization capabilities of all tested models drop significantly as we move from artificial datasets towards\r\nmore realistic real-world datasets. Despite their inability to identify the correct mechanism, the models are quite modular as their ability to infer other in-distribution factors remains fairly stable, providing only a single factoris out-of-distribution. These results point to an important yet understudied problem of learning mechanistic models of observations that can facilitate\r\ngeneralization.","lang":"eng"}],"language":[{"iso":"eng"}],"quality_controlled":"1","external_id":{"arxiv":["2107.08221"]},"conference":{"location":"Virtual","end_date":"2022-04-29","name":"ICLR: International Conference on Learning Representations","start_date":"2022-04-25"},"arxiv":1,"date_updated":"2023-09-11T09:40:52Z","article_processing_charge":"No","publication":"10th International Conference on Learning Representations","month":"04","extern":"1","main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.2107.08221"}],"oa_version":"Preprint","year":"2022","citation":{"chicago":"Schott, Lukas, Julius von Kügelgen, Frederik Träuble, Peter Gehler, Chris Russell, Matthias Bethge, Bernhard Schölkopf, Francesco Locatello, and Wieland Brendel. “Visual Representation Learning Does Not Generalize Strongly within the  Same Domain.” In <i>10th International Conference on Learning Representations</i>, 2022.","ama":"Schott L, Kügelgen J von, Träuble F, et al. Visual representation learning does not generalize strongly within the  same domain. In: <i>10th International Conference on Learning Representations</i>. ; 2022.","ieee":"L. Schott <i>et al.</i>, “Visual representation learning does not generalize strongly within the  same domain,” in <i>10th International Conference on Learning Representations</i>, Virtual, 2022.","apa":"Schott, L., Kügelgen, J. von, Träuble, F., Gehler, P., Russell, C., Bethge, M., … Brendel, W. (2022). Visual representation learning does not generalize strongly within the  same domain. In <i>10th International Conference on Learning Representations</i>. Virtual.","mla":"Schott, Lukas, et al. “Visual Representation Learning Does Not Generalize Strongly within the  Same Domain.” <i>10th International Conference on Learning Representations</i>, 2022.","ista":"Schott L, Kügelgen J von, Träuble F, Gehler P, Russell C, Bethge M, Schölkopf B, Locatello F, Brendel W. 2022. Visual representation learning does not generalize strongly within the  same domain. 10th International Conference on Learning Representations. ICLR: International Conference on Learning Representations.","short":"L. Schott, J. von Kügelgen, F. Träuble, P. Gehler, C. Russell, M. Bethge, B. Schölkopf, F. Locatello, W. Brendel, in:, 10th International Conference on Learning Representations, 2022."},"_id":"14172","title":"Visual representation learning does not generalize strongly within the  same domain","type":"conference","day":"25"},{"citation":{"chicago":"Wenzel, Florian, Andrea Dittadi, Peter Vincent Gehler, Carl-Johann Simon-Gabriel Carl-Johann Simon-Gabriel, Max Horn, Dominik Zietlow, David Kernert, et al. “Assaying Out-of-Distribution Generalization in Transfer Learning.” In <i>36th Conference on Neural Information Processing Systems</i>, 35:7181–98. Neural Information Processing Systems Foundation, 2022.","ama":"Wenzel F, Dittadi A, Gehler PV, et al. Assaying out-of-distribution generalization in transfer learning. In: <i>36th Conference on Neural Information Processing Systems</i>. Vol 35. Neural Information Processing Systems Foundation; 2022:7181-7198.","ieee":"F. Wenzel <i>et al.</i>, “Assaying out-of-distribution generalization in transfer learning,” in <i>36th Conference on Neural Information Processing Systems</i>, New Orleans, LA, United States, 2022, vol. 35, pp. 7181–7198.","ista":"Wenzel F, Dittadi A, Gehler PV, Carl-Johann Simon-Gabriel C-JS-G, Horn M, Zietlow D, Kernert D, Russell C, Brox T, Schiele B, Schölkopf B, Locatello F. 2022. Assaying out-of-distribution generalization in transfer learning. 36th Conference on Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems, Advances in Neural Information Processing Systems, vol. 35, 7181–7198.","apa":"Wenzel, F., Dittadi, A., Gehler, P. V., Carl-Johann Simon-Gabriel, C.-J. S.-G., Horn, M., Zietlow, D., … Locatello, F. (2022). Assaying out-of-distribution generalization in transfer learning. In <i>36th Conference on Neural Information Processing Systems</i> (Vol. 35, pp. 7181–7198). New Orleans, LA, United States: Neural Information Processing Systems Foundation.","mla":"Wenzel, Florian, et al. “Assaying Out-of-Distribution Generalization in Transfer Learning.” <i>36th Conference on Neural Information Processing Systems</i>, vol. 35, Neural Information Processing Systems Foundation, 2022, pp. 7181–98.","short":"F. Wenzel, A. Dittadi, P.V. Gehler, C.-J.S.-G. Carl-Johann Simon-Gabriel, M. Horn, D. Zietlow, D. Kernert, C. Russell, T. Brox, B. Schiele, B. Schölkopf, F. Locatello, in:, 36th Conference on Neural Information Processing Systems, Neural Information Processing Systems Foundation, 2022, pp. 7181–7198."},"volume":35,"publisher":"Neural Information Processing Systems Foundation","day":"15","page":"7181-7198","type":"conference","publication":"36th Conference on Neural Information Processing Systems","main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/2207.09239"}],"extern":"1","language":[{"iso":"eng"}],"quality_controlled":"1","intvolume":"        35","publication_identifier":{"isbn":["9781713871088"]},"date_updated":"2023-09-06T10:34:43Z","external_id":{"arxiv":["2207.09239"]},"date_created":"2023-08-22T14:01:13Z","department":[{"_id":"FrLo"}],"_id":"14173","year":"2022","oa_version":"Preprint","title":"Assaying out-of-distribution generalization in transfer learning","alternative_title":["Advances in Neural Information Processing Systems"],"article_processing_charge":"No","month":"12","scopus_import":"1","abstract":[{"text":"Since out-of-distribution generalization is a generally ill-posed problem, various proxy targets (e.g., calibration, adversarial robustness, algorithmic corruptions, invariance across shifts) were studied across different research programs resulting in different recommendations. While sharing the same aspirational goal, these approaches have never been tested under the same\r\nexperimental conditions on real data. In this paper, we take a unified view of previous work, highlighting message discrepancies that we address empirically, and providing recommendations on how to measure the robustness of a model and how to improve it. To this end, we collect 172 publicly available dataset pairs for training and out-of-distribution evaluation of accuracy, calibration error, adversarial attacks, environment invariance, and synthetic corruptions. We fine-tune over 31k networks, from nine different architectures in the many- and\r\nfew-shot setting. Our findings confirm that in- and out-of-distribution accuracies tend to increase jointly, but show that their relation is largely dataset-dependent, and in general more nuanced and more complex than posited by previous, smaller scale studies.","lang":"eng"}],"date_published":"2022-12-15T00:00:00Z","status":"public","arxiv":1,"conference":{"location":"New Orleans, LA, United States","start_date":"2022-11-28","name":"NeurIPS: Neural Information Processing Systems","end_date":"2022-12-09"},"publication_status":"published","author":[{"full_name":"Wenzel, Florian","first_name":"Florian","last_name":"Wenzel"},{"first_name":"Andrea","full_name":"Dittadi, Andrea","last_name":"Dittadi"},{"last_name":"Gehler","first_name":"Peter Vincent","full_name":"Gehler, Peter Vincent"},{"last_name":"Carl-Johann Simon-Gabriel","full_name":"Carl-Johann Simon-Gabriel, Carl-Johann Simon-Gabriel","first_name":"Carl-Johann Simon-Gabriel"},{"first_name":"Max","full_name":"Horn, Max","last_name":"Horn"},{"last_name":"Zietlow","full_name":"Zietlow, Dominik","first_name":"Dominik"},{"last_name":"Kernert","first_name":"David","full_name":"Kernert, David"},{"full_name":"Russell, Chris","first_name":"Chris","last_name":"Russell"},{"last_name":"Brox","full_name":"Brox, Thomas","first_name":"Thomas"},{"full_name":"Schiele, Bernt","first_name":"Bernt","last_name":"Schiele"},{"full_name":"Schölkopf, Bernhard","first_name":"Bernhard","last_name":"Schölkopf"},{"id":"26cfd52f-2483-11ee-8040-88983bcc06d4","full_name":"Locatello, Francesco","first_name":"Francesco","orcid":"0000-0002-4850-0683","last_name":"Locatello"}],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","oa":1},{"day":"25","type":"conference","title":"The role of pretrained representations for the OOD generalization of  reinforcement learning agents","citation":{"chicago":"Dittadi, Andrea, Frederik Träuble, Manuel Wüthrich, Felix Widmaier, Peter Gehler, Ole Winther, Francesco Locatello, Olivier Bachem, Bernhard Schölkopf, and Stefan Bauer. “The Role of Pretrained Representations for the OOD Generalization of  Reinforcement Learning Agents.” In <i>10th International Conference on Learning Representations</i>, 2022.","ama":"Dittadi A, Träuble F, Wüthrich M, et al. The role of pretrained representations for the OOD generalization of  reinforcement learning agents. In: <i>10th International Conference on Learning Representations</i>. ; 2022.","ieee":"A. Dittadi <i>et al.</i>, “The role of pretrained representations for the OOD generalization of  reinforcement learning agents,” in <i>10th International Conference on Learning Representations</i>, Virtual, 2022.","ista":"Dittadi A, Träuble F, Wüthrich M, Widmaier F, Gehler P, Winther O, Locatello F, Bachem O, Schölkopf B, Bauer S. 2022. The role of pretrained representations for the OOD generalization of  reinforcement learning agents. 10th International Conference on Learning Representations. ICLR: International Conference on Learning Representations.","apa":"Dittadi, A., Träuble, F., Wüthrich, M., Widmaier, F., Gehler, P., Winther, O., … Bauer, S. (2022). The role of pretrained representations for the OOD generalization of  reinforcement learning agents. In <i>10th International Conference on Learning Representations</i>. Virtual.","mla":"Dittadi, Andrea, et al. “The Role of Pretrained Representations for the OOD Generalization of  Reinforcement Learning Agents.” <i>10th International Conference on Learning Representations</i>, 2022.","short":"A. Dittadi, F. Träuble, M. Wüthrich, F. Widmaier, P. Gehler, O. Winther, F. Locatello, O. Bachem, B. Schölkopf, S. Bauer, in:, 10th International Conference on Learning Representations, 2022."},"_id":"14174","year":"2022","oa_version":"Preprint","main_file_link":[{"open_access":"1","url":" https://doi.org/10.48550/arXiv.2107.05686"}],"extern":"1","month":"04","publication":"10th International Conference on Learning Representations","article_processing_charge":"No","date_updated":"2023-09-11T09:48:36Z","arxiv":1,"external_id":{"arxiv":["2107.05686"]},"conference":{"location":"Virtual","start_date":"2022-04-25","name":"ICLR: International Conference on Learning Representations","end_date":"2022-04-29"},"date_published":"2022-04-25T00:00:00Z","quality_controlled":"1","abstract":[{"text":"Building sample-efficient agents that generalize out-of-distribution (OOD) in real-world settings remains a fundamental unsolved problem on the path towards achieving higher-level cognition. One particularly promising approach is to begin with low-dimensional, pretrained representations of our world, which should facilitate efficient downstream learning and generalization. By training 240 representations and over 10,000 reinforcement learning (RL) policies on a simulated robotic setup, we evaluate to what extent different properties of\r\npretrained VAE-based representations affect the OOD generalization of downstream agents. We observe that many agents are surprisingly robust to realistic distribution shifts, including the challenging sim-to-real case. In addition, we find that the generalization performance of a simple downstream proxy task reliably predicts the generalization performance of our RL agents\r\nunder a wide range of OOD settings. Such proxy tasks can thus be used to select pretrained representations that will lead to agents that generalize.","lang":"eng"}],"language":[{"iso":"eng"}],"status":"public","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","oa":1,"publication_status":"published","date_created":"2023-08-22T14:02:13Z","department":[{"_id":"FrLo"}],"author":[{"last_name":"Dittadi","first_name":"Andrea","full_name":"Dittadi, Andrea"},{"last_name":"Träuble","first_name":"Frederik","full_name":"Träuble, Frederik"},{"full_name":"Wüthrich, Manuel","first_name":"Manuel","last_name":"Wüthrich"},{"last_name":"Widmaier","full_name":"Widmaier, Felix","first_name":"Felix"},{"last_name":"Gehler","first_name":"Peter","full_name":"Gehler, Peter"},{"last_name":"Winther","first_name":"Ole","full_name":"Winther, Ole"},{"orcid":"0000-0002-4850-0683","last_name":"Locatello","first_name":"Francesco","full_name":"Locatello, Francesco","id":"26cfd52f-2483-11ee-8040-88983bcc06d4"},{"last_name":"Bachem","full_name":"Bachem, Olivier","first_name":"Olivier"},{"full_name":"Schölkopf, Bernhard","first_name":"Bernhard","last_name":"Schölkopf"},{"first_name":"Stefan","full_name":"Bauer, Stefan","last_name":"Bauer"}]},{"date_updated":"2023-09-11T09:52:20Z","conference":{"location":"Virtual","name":"ICLR: International Conference on Learning Representations","end_date":"2022-04-29","start_date":"2022-04-25"},"external_id":{"arxiv":["2110.05304"]},"arxiv":1,"language":[{"iso":"eng"}],"abstract":[{"text":"Predicting the future trajectory of a moving agent can be easy when the past trajectory continues smoothly but is challenging when complex interactions with other agents are involved. Recent deep learning approaches for trajectory prediction show promising performance and partially attribute this to successful reasoning about agent-agent interactions. However, it remains unclear which features such black-box models actually learn to use for making predictions. This paper proposes a procedure that quantifies the contributions\r\nof different cues to model performance based on a variant of Shapley values. Applying this procedure to state-of-the-art trajectory prediction methods on standard benchmark datasets shows that they are, in fact, unable to reason about interactions. Instead, the past trajectory of the target is the only feature used for predicting its future. For a task with richer social\r\ninteraction patterns, on the other hand, the tested models do pick up such interactions to a certain extent, as quantified by our feature attribution method. We discuss the limits of the proposed method and its links to causality.","lang":"eng"}],"date_published":"2022-04-25T00:00:00Z","quality_controlled":"1","status":"public","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","oa":1,"publication_status":"published","date_created":"2023-08-22T14:02:34Z","author":[{"first_name":"Osama","full_name":"Makansi, Osama","last_name":"Makansi"},{"last_name":"Kügelgen","first_name":"Julius von","full_name":"Kügelgen, Julius von"},{"first_name":"Francesco","id":"26cfd52f-2483-11ee-8040-88983bcc06d4","full_name":"Locatello, Francesco","last_name":"Locatello","orcid":"0000-0002-4850-0683"},{"last_name":"Gehler","first_name":"Peter","full_name":"Gehler, Peter"},{"last_name":"Janzing","first_name":"Dominik","full_name":"Janzing, Dominik"},{"full_name":"Brox, Thomas","first_name":"Thomas","last_name":"Brox"},{"last_name":"Schölkopf","full_name":"Schölkopf, Bernhard","first_name":"Bernhard"}],"department":[{"_id":"FrLo"}],"day":"25","title":"You mostly walk alone: Analyzing feature attribution in trajectory prediction","type":"conference","year":"2022","citation":{"short":"O. Makansi, J. von Kügelgen, F. Locatello, P. Gehler, D. Janzing, T. Brox, B. Schölkopf, in:, 10th International Conference on Learning Representations, 2022.","mla":"Makansi, Osama, et al. “You Mostly Walk Alone: Analyzing Feature Attribution in Trajectory Prediction.” <i>10th International Conference on Learning Representations</i>, 2022.","ista":"Makansi O, Kügelgen J von, Locatello F, Gehler P, Janzing D, Brox T, Schölkopf B. 2022. You mostly walk alone: Analyzing feature attribution in trajectory prediction. 10th International Conference on Learning Representations. ICLR: International Conference on Learning Representations.","apa":"Makansi, O., Kügelgen, J. von, Locatello, F., Gehler, P., Janzing, D., Brox, T., &#38; Schölkopf, B. (2022). You mostly walk alone: Analyzing feature attribution in trajectory prediction. In <i>10th International Conference on Learning Representations</i>. Virtual.","ieee":"O. Makansi <i>et al.</i>, “You mostly walk alone: Analyzing feature attribution in trajectory prediction,” in <i>10th International Conference on Learning Representations</i>, Virtual, 2022.","ama":"Makansi O, Kügelgen J von, Locatello F, et al. You mostly walk alone: Analyzing feature attribution in trajectory prediction. In: <i>10th International Conference on Learning Representations</i>. ; 2022.","chicago":"Makansi, Osama, Julius von Kügelgen, Francesco Locatello, Peter Gehler, Dominik Janzing, Thomas Brox, and Bernhard Schölkopf. “You Mostly Walk Alone: Analyzing Feature Attribution in Trajectory Prediction.” In <i>10th International Conference on Learning Representations</i>, 2022."},"_id":"14175","oa_version":"Preprint","extern":"1","main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2110.05304","open_access":"1"}],"month":"04","publication":"10th International Conference on Learning Representations","article_processing_charge":"No"},{"month":"11","extern":"1","main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2211.02348","open_access":"1"}],"article_processing_charge":"No","publication":"36th Conference on Neural Information Processing Systems","title":"A general purpose neural architecture for geospatial systems","type":"conference","day":"04","oa_version":"Preprint","year":"2022","_id":"14215","citation":{"chicago":"Rahaman, Nasim, Martin Weiss, Frederik Träuble, Francesco Locatello, Alexandre Lacoste, Yoshua Bengio, Chris Pal, Li Erran Li, and Bernhard Schölkopf. “A General Purpose Neural Architecture for Geospatial Systems.” In <i>36th Conference on Neural Information Processing Systems</i>, n.d.","ama":"Rahaman N, Weiss M, Träuble F, et al. A general purpose neural architecture for geospatial systems. In: <i>36th Conference on Neural Information Processing Systems</i>.","ista":"Rahaman N, Weiss M, Träuble F, Locatello F, Lacoste A, Bengio Y, Pal C, Li LE, Schölkopf B. A general purpose neural architecture for geospatial systems. 36th Conference on Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems.","mla":"Rahaman, Nasim, et al. “A General Purpose Neural Architecture for Geospatial Systems.” <i>36th Conference on Neural Information Processing Systems</i>.","apa":"Rahaman, N., Weiss, M., Träuble, F., Locatello, F., Lacoste, A., Bengio, Y., … Schölkopf, B. (n.d.). A general purpose neural architecture for geospatial systems. In <i>36th Conference on Neural Information Processing Systems</i>. New Orleans, LA, United States.","ieee":"N. Rahaman <i>et al.</i>, “A general purpose neural architecture for geospatial systems,” in <i>36th Conference on Neural Information Processing Systems</i>, New Orleans, LA, United States.","short":"N. Rahaman, M. Weiss, F. Träuble, F. Locatello, A. Lacoste, Y. Bengio, C. Pal, L.E. Li, B. Schölkopf, in:, 36th Conference on Neural Information Processing Systems, n.d."},"oa":1,"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","author":[{"full_name":"Rahaman, Nasim","first_name":"Nasim","last_name":"Rahaman"},{"last_name":"Weiss","full_name":"Weiss, Martin","first_name":"Martin"},{"first_name":"Frederik","full_name":"Träuble, Frederik","last_name":"Träuble"},{"orcid":"0000-0002-4850-0683","last_name":"Locatello","full_name":"Locatello, Francesco","id":"26cfd52f-2483-11ee-8040-88983bcc06d4","first_name":"Francesco"},{"first_name":"Alexandre","full_name":"Lacoste, Alexandre","last_name":"Lacoste"},{"last_name":"Bengio","full_name":"Bengio, Yoshua","first_name":"Yoshua"},{"full_name":"Pal, Chris","first_name":"Chris","last_name":"Pal"},{"last_name":"Li","full_name":"Li, Li Erran","first_name":"Li Erran"},{"first_name":"Bernhard","full_name":"Schölkopf, Bernhard","last_name":"Schölkopf"}],"department":[{"_id":"FrLo"}],"date_created":"2023-08-22T14:21:47Z","publication_status":"submitted","conference":{"location":"New Orleans, LA, United States","name":"NeurIPS: Neural Information Processing Systems","end_date":"2022-12-09","start_date":"2022-11-28"},"external_id":{"arxiv":["2211.02348"]},"arxiv":1,"date_updated":"2023-09-13T09:35:59Z","status":"public","date_published":"2022-11-04T00:00:00Z","language":[{"iso":"eng"}],"abstract":[{"text":"Geospatial Information Systems are used by researchers and Humanitarian Assistance and Disaster Response (HADR) practitioners to support a wide variety of important applications. However, collaboration between these actors is difficult due to the heterogeneous nature of geospatial data modalities (e.g., multi-spectral images of various resolutions, timeseries, weather data) and diversity of tasks (e.g., regression of human activity indicators or detecting forest fires). In this work, we present a roadmap towards the construction of a general-purpose neural architecture (GPNA) with a geospatial inductive bias, pre-trained on large amounts of unlabelled earth observation data in a self-supervised manner. We envision how such a model may facilitate cooperation between members of the community. We show preliminary results on the first step of the roadmap, where we instantiate an architecture that can process a wide variety of geospatial data modalities and demonstrate that it can achieve competitive performance with domain-specific architectures on tasks relating to the U.N.'s Sustainable Development Goals.","lang":"eng"}],"quality_controlled":"1"},{"status":"public","abstract":[{"lang":"eng","text":"Although reinforcement learning has seen remarkable progress over the last years, solving robust dexterous object-manipulation tasks in multi-object settings remains a challenge. In this paper, we focus on models that can learn manipulation tasks in fixed multi-object settings and extrapolate this skill zero-shot without any drop in performance when the number of objects changes. We consider the generic task of bringing a specific cube out of a set to a goal position. We find that previous approaches, which primarily leverage attention and graph neural network-based architectures, do not generalize their skills when the number of input objects changes while scaling as K2. We propose an alternative plug-and-play module based on relational inductive biases to overcome these limitations. Besides exceeding performances in their training environment, we show that our approach, which scales linearly in K, allows agents to extrapolate and generalize zero-shot to any new object number."}],"language":[{"iso":"eng"}],"date_published":"2022-01-31T00:00:00Z","external_id":{"arxiv":["2201.13388"]},"arxiv":1,"date_updated":"2024-10-14T12:27:39Z","author":[{"full_name":"Mambelli, Davide","first_name":"Davide","last_name":"Mambelli"},{"first_name":"Frederik","full_name":"Träuble, Frederik","last_name":"Träuble"},{"last_name":"Bauer","full_name":"Bauer, Stefan","first_name":"Stefan"},{"full_name":"Schölkopf, Bernhard","first_name":"Bernhard","last_name":"Schölkopf"},{"full_name":"Locatello, Francesco","id":"26cfd52f-2483-11ee-8040-88983bcc06d4","first_name":"Francesco","last_name":"Locatello","orcid":"0000-0002-4850-0683"}],"article_number":"2201.13388","department":[{"_id":"FrLo"}],"date_created":"2023-08-22T14:23:16Z","publication_status":"submitted","oa":1,"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","oa_version":"Preprint","year":"2022","_id":"14220","citation":{"chicago":"Mambelli, Davide, Frederik Träuble, Stefan Bauer, Bernhard Schölkopf, and Francesco Locatello. “Compositional Multi-Object Reinforcement Learning with Linear Relation Networks.” <i>ArXiv</i>, n.d. <a href=\"https://doi.org/10.48550/arXiv.2201.13388\">https://doi.org/10.48550/arXiv.2201.13388</a>.","ama":"Mambelli D, Träuble F, Bauer S, Schölkopf B, Locatello F. Compositional multi-object reinforcement learning with linear relation networks. <i>arXiv</i>. doi:<a href=\"https://doi.org/10.48550/arXiv.2201.13388\">10.48550/arXiv.2201.13388</a>","ieee":"D. Mambelli, F. Träuble, S. Bauer, B. Schölkopf, and F. Locatello, “Compositional multi-object reinforcement learning with linear relation networks,” <i>arXiv</i>. .","ista":"Mambelli D, Träuble F, Bauer S, Schölkopf B, Locatello F. Compositional multi-object reinforcement learning with linear relation networks. arXiv, 2201.13388.","apa":"Mambelli, D., Träuble, F., Bauer, S., Schölkopf, B., &#38; Locatello, F. (n.d.). Compositional multi-object reinforcement learning with linear relation networks. <i>arXiv</i>. <a href=\"https://doi.org/10.48550/arXiv.2201.13388\">https://doi.org/10.48550/arXiv.2201.13388</a>","mla":"Mambelli, Davide, et al. “Compositional Multi-Object Reinforcement Learning with Linear Relation Networks.” <i>ArXiv</i>, 2201.13388, doi:<a href=\"https://doi.org/10.48550/arXiv.2201.13388\">10.48550/arXiv.2201.13388</a>.","short":"D. Mambelli, F. Träuble, S. Bauer, B. Schölkopf, F. Locatello, ArXiv (n.d.)."},"title":"Compositional multi-object reinforcement learning with linear relation networks","type":"preprint","day":"31","doi":"10.48550/arXiv.2201.13388","article_processing_charge":"No","publication":"arXiv","month":"01","extern":"1","main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2201.13388","open_access":"1"}]},{"day":"17","doi":"10.48550/arXiv.2211.09606","title":"Incremental approximate maximum flow in m1/2+o(1) update time","type":"preprint","year":"2022","citation":{"ama":"Goranci G, Henzinger M. Incremental approximate maximum flow in m1/2+o(1) update time. <i>arXiv</i>. doi:<a href=\"https://doi.org/10.48550/arXiv.2211.09606\">10.48550/arXiv.2211.09606</a>","chicago":"Goranci, Gramoz, and Monika Henzinger. “Incremental Approximate Maximum Flow in M1/2+o(1) Update Time.” <i>ArXiv</i>, n.d. <a href=\"https://doi.org/10.48550/arXiv.2211.09606\">https://doi.org/10.48550/arXiv.2211.09606</a>.","short":"G. Goranci, M. Henzinger, ArXiv (n.d.).","ieee":"G. Goranci and M. Henzinger, “Incremental approximate maximum flow in m1/2+o(1) update time,” <i>arXiv</i>. .","apa":"Goranci, G., &#38; Henzinger, M. (n.d.). Incremental approximate maximum flow in m1/2+o(1) update time. <i>arXiv</i>. <a href=\"https://doi.org/10.48550/arXiv.2211.09606\">https://doi.org/10.48550/arXiv.2211.09606</a>","mla":"Goranci, Gramoz, and Monika Henzinger. “Incremental Approximate Maximum Flow in M1/2+o(1) Update Time.” <i>ArXiv</i>, 2211.09606, doi:<a href=\"https://doi.org/10.48550/arXiv.2211.09606\">10.48550/arXiv.2211.09606</a>.","ista":"Goranci G, Henzinger M. Incremental approximate maximum flow in m1/2+o(1) update time. arXiv, 2211.09606."},"_id":"14236","oa_version":"Preprint","extern":"1","main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.2211.09606"}],"month":"11","publication":"arXiv","article_processing_charge":"No","date_updated":"2024-11-06T12:01:45Z","external_id":{"arxiv":["2211.09606"]},"arxiv":1,"date_published":"2022-11-17T00:00:00Z","abstract":[{"lang":"eng","text":"We show an $(1+\\epsilon)$-approximation algorithm for maintaining maximum $s$-$t$ flow under $m$ edge insertions in $m^{1/2+o(1)} \\epsilon^{-1/2}$ amortized update time for directed, unweighted graphs. This constitutes the first sublinear dynamic maximum flow algorithm in general sparse graphs with arbitrarily good approximation guarantee."}],"language":[{"iso":"eng"}],"status":"public","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","oa":1,"date_created":"2023-08-25T15:04:29Z","publication_status":"submitted","article_number":"2211.09606","author":[{"first_name":"Gramoz","full_name":"Goranci, Gramoz","last_name":"Goranci"},{"first_name":"Monika H","id":"540c9bbd-f2de-11ec-812d-d04a5be85630","full_name":"Henzinger, Monika H","last_name":"Henzinger","orcid":"0000-0002-5008-6530"}]},{"month":"02","article_processing_charge":"No","title":"Affine dimers from characteristic polygons","keyword":["dimer model","hyperplane arrangement","torus","lattice polygon"],"oa_version":"Published Version","year":"2022","_id":"14248","oa":1,"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","author":[{"last_name":"Holmes","id":"3a443b4c-080d-11ed-979a-feb062bdcee0","full_name":"Holmes, Daniel","first_name":"Daniel"}],"article_type":"original","publication_status":"published","arxiv":1,"status":"public","date_published":"2022-02-13T00:00:00Z","abstract":[{"text":"Recent work by Forsgård indicates that not every convex lattice polygon arises as the characteristic polygon of an affine dimer or, equivalently, an admissible oriented line arrangement on the torus in general position. We begin the classication of convex lattice polygons arising as characteristic polygons of affine dimers. We present several general constructions of new affine dimers from old, and an algorithm for finding affine dimers with prescribed polygon.\r\n\r\nWith these tools we prove that all lattice triangles, generalised parallelograms, and polygons of genus at most two admit an affine dimer.","lang":"eng"}],"extern":"1","main_file_link":[{"url":"https://journals.calstate.edu/pump/article/view/2711","open_access":"1"}],"publication":"PUMP Journal of Undergraduate Research","type":"journal_article","page":"24-51","publisher":"California State University","day":"13","volume":5,"citation":{"ama":"Holmes D. Affine dimers from characteristic polygons. <i>PUMP Journal of Undergraduate Research</i>. 2022;5:24-51.","chicago":"Holmes, Daniel. “Affine Dimers from Characteristic Polygons.” <i>PUMP Journal of Undergraduate Research</i>. California State University, 2022.","short":"D. Holmes, PUMP Journal of Undergraduate Research 5 (2022) 24–51.","ieee":"D. Holmes, “Affine dimers from characteristic polygons,” <i>PUMP Journal of Undergraduate Research</i>, vol. 5. California State University, pp. 24–51, 2022.","ista":"Holmes D. 2022. Affine dimers from characteristic polygons. PUMP Journal of Undergraduate Research. 5, 24–51.","mla":"Holmes, Daniel. “Affine Dimers from Characteristic Polygons.” <i>PUMP Journal of Undergraduate Research</i>, vol. 5, California State University, 2022, pp. 24–51.","apa":"Holmes, D. (2022). Affine dimers from characteristic polygons. <i>PUMP Journal of Undergraduate Research</i>. California State University."},"date_created":"2023-08-29T13:08:09Z","external_id":{"arxiv":["2110.01703"]},"corr_author":"1","date_updated":"2024-10-09T21:06:47Z","publication_identifier":{"issn":["2576-3725"]},"intvolume":"         5","language":[{"iso":"eng"}],"quality_controlled":"1"}]
