[{"publication_identifier":{"eissn":["2640-3498"]},"month":"07","external_id":{"arxiv":["2212.13468"]},"main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2212.13468","open_access":"1"}],"oa":1,"project":[{"_id":"059876FA-7A3F-11EA-A408-12923DDC885E","name":"Prix Lopez-Loretta 2019 - Marco Mondelli"}],"quality_controlled":"1","conference":{"name":"ICML: International Conference on Machine Learning","end_date":"2023-07-29","start_date":"2023-07-23","location":"Honolulu, Hawaii, HI, United States"},"language":[{"iso":"eng"}],"acknowledgement":"Aleksandr Shevchenko, Kevin Kogler and Marco Mondelli are supported by the 2019 Lopez-Loreta Prize. Hamed Hassani acknowledges the support by the NSF CIF award (1910056) and the NSF Institute for CORE Emerging Methods in Data Science (EnCORE).","year":"2023","department":[{"_id":"MaMo"},{"_id":"DaAl"}],"publisher":"ML Research Press","publication_status":"published","author":[{"first_name":"Aleksandr","last_name":"Shevchenko","id":"F2B06EC2-C99E-11E9-89F0-752EE6697425","full_name":"Shevchenko, Aleksandr"},{"id":"94ec913c-dc85-11ea-9058-e5051ab2428b","first_name":"Kevin","last_name":"Kögler","full_name":"Kögler, Kevin"},{"full_name":"Hassani, Hamed","last_name":"Hassani","first_name":"Hamed"},{"full_name":"Mondelli, Marco","last_name":"Mondelli","first_name":"Marco","orcid":"0000-0002-3242-7020","id":"27EB676C-8706-11E9-9510-7717E6697425"}],"volume":202,"date_updated":"2023-10-31T08:52:28Z","date_created":"2023-10-29T23:01:17Z","scopus_import":"1","article_processing_charge":"No","day":"30","citation":{"ama":"Shevchenko A, Kögler K, Hassani H, Mondelli M. Fundamental limits of two-layer autoencoders, and achieving them with gradient methods. In: Proceedings of the 40th International Conference on Machine Learning. Vol 202. ML Research Press; 2023:31151-31209.","ista":"Shevchenko A, Kögler K, Hassani H, Mondelli M. 2023. Fundamental limits of two-layer autoencoders, and achieving them with gradient methods. Proceedings of the 40th International Conference on Machine Learning. ICML: International Conference on Machine Learning, PMLR, vol. 202, 31151–31209.","ieee":"A. Shevchenko, K. Kögler, H. Hassani, and M. Mondelli, “Fundamental limits of two-layer autoencoders, and achieving them with gradient methods,” in Proceedings of the 40th International Conference on Machine Learning, Honolulu, Hawaii, HI, United States, 2023, vol. 202, pp. 31151–31209.","apa":"Shevchenko, A., Kögler, K., Hassani, H., & Mondelli, M. (2023). Fundamental limits of two-layer autoencoders, and achieving them with gradient methods. In Proceedings of the 40th International Conference on Machine Learning (Vol. 202, pp. 31151–31209). Honolulu, Hawaii, HI, United States: ML Research Press.","mla":"Shevchenko, Aleksandr, et al. “Fundamental Limits of Two-Layer Autoencoders, and Achieving Them with Gradient Methods.” Proceedings of the 40th International Conference on Machine Learning, vol. 202, ML Research Press, 2023, pp. 31151–209.","short":"A. Shevchenko, K. Kögler, H. Hassani, M. Mondelli, in:, Proceedings of the 40th International Conference on Machine Learning, ML Research Press, 2023, pp. 31151–31209.","chicago":"Shevchenko, Aleksandr, Kevin Kögler, Hamed Hassani, and Marco Mondelli. “Fundamental Limits of Two-Layer Autoencoders, and Achieving Them with Gradient Methods.” In Proceedings of the 40th International Conference on Machine Learning, 202:31151–209. ML Research Press, 2023."},"publication":"Proceedings of the 40th International Conference on Machine Learning","page":"31151-31209","date_published":"2023-07-30T00:00:00Z","type":"conference","alternative_title":["PMLR"],"abstract":[{"lang":"eng","text":"Autoencoders are a popular model in many branches of machine learning and lossy data compression. However, their fundamental limits, the performance of gradient methods and the features learnt during optimization remain poorly understood, even in the two-layer setting. In fact, earlier work has considered either linear autoencoders or specific training regimes (leading to vanishing or diverging compression rates). Our paper addresses this gap by focusing on non-linear two-layer autoencoders trained in the challenging proportional regime in which the input dimension scales linearly with the size of the representation. Our results characterize the minimizers of the population risk, and show that such minimizers are achieved by gradient methods; their structure is also unveiled, thus leading to a concise description of the features obtained via training. For the special case of a sign activation function, our analysis establishes the fundamental limits for the lossy compression of Gaussian sources via (shallow) autoencoders. Finally, while the results are proved for Gaussian data, numerical simulations on standard datasets display the universality of the theoretical predictions."}],"_id":"14459","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","intvolume":" 202","status":"public","title":"Fundamental limits of two-layer autoencoders, and achieving them with gradient methods","oa_version":"Preprint"},{"abstract":[{"lang":"eng","text":"We consider the problem of signal estimation in generalized linear models defined via rotationally invariant design matrices. Since these matrices can have an arbitrary spectral distribution, this model is well suited for capturing complex correlation structures which often arise in applications. We propose a novel family of approximate message passing (AMP) algorithms for signal estimation, and rigorously characterize their performance in the high-dimensional limit via a state evolution recursion. Our rotationally invariant AMP has complexity of the same order as the existing AMP derived under the restrictive assumption of a Gaussian design; our algorithm also recovers this existing AMP as a special case. Numerical results showcase a performance close to Vector AMP (which is conjectured to be Bayes-optimal in some settings), but obtained with a much lower complexity, as the proposed algorithm does not require a computationally expensive singular value decomposition."}],"type":"conference","oa_version":"Published Version","file":[{"file_id":"12547","relation":"main_file","success":1,"checksum":"67436eb0a660789514cdf9db79e84683","date_created":"2023-02-13T10:53:11Z","date_updated":"2023-02-13T10:53:11Z","access_level":"open_access","file_name":"2022_PMLR_Venkataramanan.pdf","creator":"dernst","file_size":2341343,"content_type":"application/pdf"}],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","_id":"12540","intvolume":" 162","status":"public","title":"Estimation in rotationally invariant generalized linear models via approximate message passing","ddc":["000"],"has_accepted_license":"1","article_processing_charge":"No","date_published":"2022-01-01T00:00:00Z","citation":{"short":"R. Venkataramanan, K. Kögler, M. Mondelli, in:, Proceedings of the 39th International Conference on Machine Learning, ML Research Press, 2022.","mla":"Venkataramanan, Ramji, et al. “Estimation in Rotationally Invariant Generalized Linear Models via Approximate Message Passing.” Proceedings of the 39th International Conference on Machine Learning, vol. 162, 22, ML Research Press, 2022.","chicago":"Venkataramanan, Ramji, Kevin Kögler, and Marco Mondelli. “Estimation in Rotationally Invariant Generalized Linear Models via Approximate Message Passing.” In Proceedings of the 39th International Conference on Machine Learning, Vol. 162. ML Research Press, 2022.","ama":"Venkataramanan R, Kögler K, Mondelli M. Estimation in rotationally invariant generalized linear models via approximate message passing. In: Proceedings of the 39th International Conference on Machine Learning. Vol 162. ML Research Press; 2022.","ieee":"R. Venkataramanan, K. Kögler, and M. Mondelli, “Estimation in rotationally invariant generalized linear models via approximate message passing,” in Proceedings of the 39th International Conference on Machine Learning, Baltimore, MD, United States, 2022, vol. 162.","apa":"Venkataramanan, R., Kögler, K., & Mondelli, M. (2022). Estimation in rotationally invariant generalized linear models via approximate message passing. In Proceedings of the 39th International Conference on Machine Learning (Vol. 162). Baltimore, MD, United States: ML Research Press.","ista":"Venkataramanan R, Kögler K, Mondelli M. 2022. Estimation in rotationally invariant generalized linear models via approximate message passing. Proceedings of the 39th International Conference on Machine Learning. ICML: International Conference on Machine Learning vol. 162, 22."},"publication":"Proceedings of the 39th International Conference on Machine Learning","file_date_updated":"2023-02-13T10:53:11Z","article_number":"22","author":[{"last_name":"Venkataramanan","first_name":"Ramji","full_name":"Venkataramanan, Ramji"},{"id":"94ec913c-dc85-11ea-9058-e5051ab2428b","last_name":"Kögler","first_name":"Kevin","full_name":"Kögler, Kevin"},{"last_name":"Mondelli","first_name":"Marco","orcid":"0000-0002-3242-7020","id":"27EB676C-8706-11E9-9510-7717E6697425","full_name":"Mondelli, Marco"}],"volume":162,"date_created":"2023-02-10T13:49:04Z","date_updated":"2023-02-13T10:54:58Z","acknowledgement":"The authors would like to thank the anonymous reviewers for their helpful comments. KK and MM were partially supported by the 2019 Lopez-Loreta Prize.","year":"2022","department":[{"_id":"MaMo"}],"publisher":"ML Research Press","publication_status":"published","conference":{"name":"ICML: International Conference on Machine Learning","end_date":"2022-07-23","start_date":"2022-07-17","location":"Baltimore, MD, United States"},"language":[{"iso":"eng"}],"oa":1,"project":[{"name":"Prix Lopez-Loretta 2019 - Marco Mondelli","_id":"059876FA-7A3F-11EA-A408-12923DDC885E"}],"quality_controlled":"1"}]