[{"quality_controlled":"1","type":"conference","external_id":{"arxiv":["2510.15846"]},"_id":"21474","abstract":[{"text":"Rendering novel, relit views of a human head, given a monocular portrait image as input, is an inherently underconstrained problem. The traditional graphics solution is to explicitly decompose the input image into geometry, material and lighting via differentiable rendering; but this is constrained by the multiple assumptions and approximations of the underlying models and parameterizations of these scene components. We propose 3DPR, an image-based relighting model that leverages generative priors learnt from multi-view One-Light-at-A-Time (OLAT) images captured in a light stage. We introduce a new diverse and large-scale multi-view 4K OLAT dataset of 139 subjects to learn a high-quality prior over the distribution of high-frequency face reflectance. We leverage the latent space of a pre-trained generative head model that provides a rich prior over face geometry learnt from in-the-wild image datasets. The input portrait is first embedded in the latent manifold of such a model through an encoder-based inversion process. Then a novel triplane-based reflectance network trained on our lightstage data is used to synthesize high-fidelity OLAT images to enable image-based relighting. Our reflectance network operates in the latent space of the generative head model, crucially enabling a relatively small number of lightstage images to train the reflectance model. Combining the generated OLATs according to a given HDRI environment maps yields physically accurate environmental relighting results. Through quantitative and qualitative evaluations, we demonstrate that 3DPR outperforms previous methods, particularly in preserving identity and in capturing lighting effects such as specularities, self-shadows, and subsurface scattering.","lang":"eng"}],"language":[{"iso":"eng"}],"day":"14","citation":{"apa":"Rao, P., Meka, A., Zhou, X., Fox, G., Mallikarjun, B. R., Zhan, F., … Theobalt, C. (2025). 3DPR: Single image 3D portrait relighting with generative priors. In <i>Proceedings SIGGRAPH Asia 2025 Conference Papers 2025</i>. Hong Kong, Hong Kong: Association for Computing Machinery. <a href=\"https://doi.org/10.1145/3757377.3763962\">https://doi.org/10.1145/3757377.3763962</a>","mla":"Rao, Pramod, et al. “3DPR: Single Image 3D Portrait Relighting with Generative Priors.” <i>Proceedings SIGGRAPH Asia 2025 Conference Papers 2025</i>, 108, Association for Computing Machinery, 2025, doi:<a href=\"https://doi.org/10.1145/3757377.3763962\">10.1145/3757377.3763962</a>.","short":"P. Rao, A. Meka, X. Zhou, G. Fox, B.R. Mallikarjun, F. Zhan, T. Weyrich, B. Bickel, H. Pfister, W. Matusik, T. Beeler, M. Elgharib, M. Habermann, C. Theobalt, in:, Proceedings SIGGRAPH Asia 2025 Conference Papers 2025, Association for Computing Machinery, 2025.","ieee":"P. Rao <i>et al.</i>, “3DPR: Single image 3D portrait relighting with generative priors,” in <i>Proceedings SIGGRAPH Asia 2025 Conference Papers 2025</i>, Hong Kong, Hong Kong, 2025.","chicago":"Rao, Pramod, Abhimitra Meka, Xilong Zhou, Gereon Fox, B. R. Mallikarjun, Fangneng Zhan, Tim Weyrich, et al. “3DPR: Single Image 3D Portrait Relighting with Generative Priors.” In <i>Proceedings SIGGRAPH Asia 2025 Conference Papers 2025</i>. Association for Computing Machinery, 2025. <a href=\"https://doi.org/10.1145/3757377.3763962\">https://doi.org/10.1145/3757377.3763962</a>.","ama":"Rao P, Meka A, Zhou X, et al. 3DPR: Single image 3D portrait relighting with generative priors. In: <i>Proceedings SIGGRAPH Asia 2025 Conference Papers 2025</i>. Association for Computing Machinery; 2025. doi:<a href=\"https://doi.org/10.1145/3757377.3763962\">10.1145/3757377.3763962</a>","ista":"Rao P, Meka A, Zhou X, Fox G, Mallikarjun BR, Zhan F, Weyrich T, Bickel B, Pfister H, Matusik W, Beeler T, Elgharib M, Habermann M, Theobalt C. 2025. 3DPR: Single image 3D portrait relighting with generative priors. Proceedings SIGGRAPH Asia 2025 Conference Papers 2025. SA: SIGGRAPH Asia, 108."},"has_accepted_license":"1","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","ddc":["000"],"year":"2025","article_number":"108","OA_place":"publisher","date_published":"2025-12-14T00:00:00Z","file_date_updated":"2026-03-23T14:41:07Z","publication":"Proceedings SIGGRAPH Asia 2025 Conference Papers 2025","date_updated":"2026-03-23T14:45:58Z","status":"public","scopus_import":"1","conference":{"end_date":"2025-12-18","location":"Hong Kong, Hong Kong","name":"SA: SIGGRAPH Asia","start_date":"2025-12-15"},"publication_status":"published","file":[{"file_name":"2025_SiggraphAsia_Rao.pdf","checksum":"a3dc426cdf7bbd84a192e5140bb3bb49","relation":"main_file","file_id":"21479","date_updated":"2026-03-23T14:41:07Z","creator":"dernst","access_level":"open_access","file_size":57903731,"date_created":"2026-03-23T14:41:07Z","success":1,"content_type":"application/pdf"}],"publication_identifier":{"isbn":["9798400721373"]},"arxiv":1,"OA_type":"gold","oa_version":"Published Version","title":"3DPR: Single image 3D portrait relighting with generative priors","date_created":"2026-03-22T23:04:35Z","oa":1,"doi":"10.1145/3757377.3763962","acknowledgement":"This work was supported by the ERC Consolidator Grant 4DReply (770784) and Saarbrücken Research Center for Visual Comput- ing, Interaction, and AI. We thank Oleksandr Sotnychenko for helping us with setting up data capture. Finally, we thank Shrisha Bharadwaj for discussions, proofreading and innumerable support.","month":"12","department":[{"_id":"BeBi"}],"author":[{"last_name":"Rao","full_name":"Rao, Pramod","first_name":"Pramod"},{"first_name":"Abhimitra","last_name":"Meka","full_name":"Meka, Abhimitra"},{"last_name":"Zhou","full_name":"Zhou, Xilong","first_name":"Xilong"},{"first_name":"Gereon","last_name":"Fox","full_name":"Fox, Gereon"},{"full_name":"Mallikarjun, B. R.","last_name":"Mallikarjun","first_name":"B. R."},{"last_name":"Zhan","full_name":"Zhan, Fangneng","first_name":"Fangneng"},{"last_name":"Weyrich","full_name":"Weyrich, Tim","first_name":"Tim"},{"last_name":"Bickel","full_name":"Bickel, Bernd","orcid":"0000-0001-6511-9385","id":"49876194-F248-11E8-B48F-1D18A9856A87","first_name":"Bernd"},{"full_name":"Pfister, Hanspeter","last_name":"Pfister","first_name":"Hanspeter"},{"first_name":"Wojciech","last_name":"Matusik","full_name":"Matusik, Wojciech"},{"first_name":"Thabo","full_name":"Beeler, Thabo","last_name":"Beeler"},{"first_name":"Mohamed","full_name":"Elgharib, Mohamed","last_name":"Elgharib"},{"full_name":"Habermann, Marc","last_name":"Habermann","first_name":"Marc"},{"last_name":"Theobalt","full_name":"Theobalt, Christian","first_name":"Christian"}],"publisher":"Association for Computing Machinery","article_processing_charge":"No","tmp":{"image":"/images/cc_by_nc.png","short":"CC BY-NC (4.0)","name":"Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0)","legal_code_url":"https://creativecommons.org/licenses/by-nc/4.0/legalcode"}},{"citation":{"mla":"Bhargava, Manas, et al. “Mesh Simplification for Unfolding.” <i>Computer Graphics Forum</i>, vol. 44, no. 1, e15269, Wiley, 2025, doi:<a href=\"https://doi.org/10.1111/cgf.15269\">10.1111/cgf.15269</a>.","short":"M. Bhargava, C. Schreck, M. Freire, P.A. Hugron, S. Lefebvre, S. Sellán, B. Bickel, Computer Graphics Forum 44 (2025).","ieee":"M. Bhargava <i>et al.</i>, “Mesh simplification for unfolding,” <i>Computer Graphics Forum</i>, vol. 44, no. 1. Wiley, 2025.","apa":"Bhargava, M., Schreck, C., Freire, M., Hugron, P. A., Lefebvre, S., Sellán, S., &#38; Bickel, B. (2025). Mesh simplification for unfolding. <i>Computer Graphics Forum</i>. Wiley. <a href=\"https://doi.org/10.1111/cgf.15269\">https://doi.org/10.1111/cgf.15269</a>","ama":"Bhargava M, Schreck C, Freire M, et al. Mesh simplification for unfolding. <i>Computer Graphics Forum</i>. 2025;44(1). doi:<a href=\"https://doi.org/10.1111/cgf.15269\">10.1111/cgf.15269</a>","ista":"Bhargava M, Schreck C, Freire M, Hugron PA, Lefebvre S, Sellán S, Bickel B. 2025. Mesh simplification for unfolding. Computer Graphics Forum. 44(1), e15269.","chicago":"Bhargava, Manas, Camille Schreck, M. Freire, P. A. Hugron, S. Lefebvre, S. Sellán, and Bernd Bickel. “Mesh Simplification for Unfolding.” <i>Computer Graphics Forum</i>. Wiley, 2025. <a href=\"https://doi.org/10.1111/cgf.15269\">https://doi.org/10.1111/cgf.15269</a>."},"day":"01","has_accepted_license":"1","quality_controlled":"1","type":"journal_article","external_id":{"isi":["001357046100001"],"arxiv":["2408.06944"]},"_id":"18565","language":[{"iso":"eng"}],"abstract":[{"text":"We present a computational approach for unfolding 3D shapes isometrically into the plane as a single patch without overlapping triangles. This is a hard, sometimes impossible, problem, which existing methods are forced to soften by allowing for map distortions or multiple patches. Instead, we propose a geometric relaxation of the problem: We modify the input shape until it admits an overlap‐free unfolding. We achieve this by locally displacing vertices and collapsing edges, guided by the unfolding process. We validate our algorithm quantitatively and qualitatively on a large dataset of complex shapes and show its proficiency by fabricating real shapes from paper.","lang":"eng"}],"OA_place":"publisher","date_published":"2025-02-01T00:00:00Z","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","ddc":["006"],"year":"2025","intvolume":"        44","article_number":"e15269","isi":1,"date_updated":"2026-04-07T11:50:09Z","publication":"Computer Graphics Forum","file_date_updated":"2025-04-16T09:06:45Z","scopus_import":"1","issue":"1","status":"public","file":[{"relation":"main_file","date_updated":"2024-11-19T09:23:20Z","file_id":"18567","creator":"mbhargav","file_name":"Mesh_Simplification_For_Unfolding_cgf_submission_supplemental_video.mp4","checksum":"34acdd9bfbe43f00eb6c7656afef3ac6","file_size":36999751,"date_created":"2024-11-19T09:23:20Z","content_type":"video/mp4","success":1,"access_level":"open_access"},{"file_id":"19576","creator":"dernst","relation":"main_file","date_updated":"2025-04-16T09:06:45Z","file_name":"2025_CompGraphicsForum_Bhargava.pdf","checksum":"efb06b01bae37f470954601bc004374d","file_size":5188265,"date_created":"2025-04-16T09:06:45Z","content_type":"application/pdf","success":1,"access_level":"open_access"}],"publication_status":"published","OA_type":"hybrid","oa_version":"Published Version","date_created":"2024-11-19T09:14:32Z","title":"Mesh simplification for unfolding","keyword":["fabrication","single patch unfolding","mesh simplification"],"publication_identifier":{"eissn":["1467-8659"],"issn":["0167-7055"]},"arxiv":1,"department":[{"_id":"GradSch"},{"_id":"BeBi"}],"month":"02","article_type":"original","volume":44,"author":[{"id":"FF8FA64C-AA6A-11E9-99AD-50D4E5697425","first_name":"Manas","orcid":"0009-0007-6138-6890","last_name":"Bhargava","full_name":"Bhargava, Manas"},{"full_name":"Schreck, Camille","last_name":"Schreck","id":"2B14B676-F248-11E8-B48F-1D18A9856A87","first_name":"Camille"},{"first_name":"M.","full_name":"Freire, M.","last_name":"Freire"},{"first_name":"P. A.","last_name":"Hugron","full_name":"Hugron, P. A."},{"first_name":"S.","last_name":"Lefebvre","full_name":"Lefebvre, S."},{"first_name":"S.","full_name":"Sellán, S.","last_name":"Sellán"},{"full_name":"Bickel, Bernd","last_name":"Bickel","orcid":"0000-0001-6511-9385","id":"49876194-F248-11E8-B48F-1D18A9856A87","first_name":"Bernd"}],"doi":"10.1111/cgf.15269","oa":1,"acknowledgement":"Researchers from INRIA received support from the DORNELL Inria Challenge. Silvia Sellán acknowledges support from NSERC Vanier Doctoral Scholarship and an MIT SoE Postdoctoral Fellowship for Engineering Excellence.","related_material":{"record":[{"relation":"dissertation_contains","id":"20276","status":"public"}]},"publisher":"Wiley","article_processing_charge":"Yes (via OA deal)","tmp":{"image":"/images/cc_by_nc.png","short":"CC BY-NC (4.0)","name":"Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0)","legal_code_url":"https://creativecommons.org/licenses/by-nc/4.0/legalcode"},"corr_author":"1"},{"OA_place":"repository","date_published":"2025-08-31T00:00:00Z","main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2508.05410","open_access":"1"}],"user_id":"8b945eb4-e2f2-11eb-945a-df72226e66a9","year":"2025","citation":{"apa":"Bhargava, M., Hiraki, T., Strugaru, I.-M., Zhang, Y., Piovarci, M., Daraio, C., … Bickel, B. (n.d.). Computational design and fabrication of modular robots with untethered control. <i>arXiv</i>. <a href=\"https://doi.org/10.48550/arXiv.2508.05410\">https://doi.org/10.48550/arXiv.2508.05410</a>","mla":"Bhargava, Manas, et al. “Computational Design and Fabrication of Modular Robots with Untethered Control.” <i>ArXiv</i>, doi:<a href=\"https://doi.org/10.48550/arXiv.2508.05410\">10.48550/arXiv.2508.05410</a>.","ieee":"M. Bhargava <i>et al.</i>, “Computational design and fabrication of modular robots with untethered control,” <i>arXiv</i>. .","short":"M. Bhargava, T. Hiraki, I.-M. Strugaru, Y. Zhang, M. Piovarci, C. Daraio, D. Iwai, B. Bickel, ArXiv (n.d.).","chicago":"Bhargava, Manas, Takefumi Hiraki, Irina-Malina Strugaru, Yuhan Zhang, Michael Piovarci, Chiara Daraio, Daisuke Iwai, and Bernd Bickel. “Computational Design and Fabrication of Modular Robots with Untethered Control.” <i>ArXiv</i>, n.d. <a href=\"https://doi.org/10.48550/arXiv.2508.05410\">https://doi.org/10.48550/arXiv.2508.05410</a>.","ama":"Bhargava M, Hiraki T, Strugaru I-M, et al. Computational design and fabrication of modular robots with untethered control. <i>arXiv</i>. doi:<a href=\"https://doi.org/10.48550/arXiv.2508.05410\">10.48550/arXiv.2508.05410</a>","ista":"Bhargava M, Hiraki T, Strugaru I-M, Zhang Y, Piovarci M, Daraio C, Iwai D, Bickel B. Computational design and fabrication of modular robots with untethered control. arXiv, <a href=\"https://doi.org/10.48550/arXiv.2508.05410\">10.48550/arXiv.2508.05410</a>."},"day":"31","external_id":{"arxiv":["2508.05410"]},"abstract":[{"lang":"eng","text":"Natural organisms utilize distributed actuation through their musculoskeletal\r\nsystems to adapt their gait for traversing diverse terrains or to morph their\r\nbodies for varied tasks. A longstanding challenge in robotics is to emulate\r\nthis capability of natural organisms, which has motivated the development of\r\nnumerous soft robotic systems. However, such systems are generally optimized\r\nfor a single functionality, lack the ability to change form or function on\r\ndemand, or remain tethered to bulky control systems. To address these\r\nlimitations, we present a framework for designing and controlling robots that\r\nutilize distributed actuation. We propose a novel building block that\r\nintegrates 3D-printed bones with liquid crystal elastomer (LCE) muscles as\r\nlightweight actuators, enabling the modular assembly of musculoskeletal robots.\r\nWe developed LCE rods that contract in response to infrared radiation, thereby\r\nproviding localized, untethered control over the distributed skeletal network\r\nand producing global deformations of the robot. To fully capitalize on the\r\nextensive design space, we introduce two computational tools: one for\r\noptimizing the robot's skeletal graph to achieve multiple target deformations,\r\nand another for co-optimizing skeletal designs and control gaits to realize\r\ndesired locomotion. We validate our framework by constructing several robots\r\nthat demonstrate complex shape morphing, diverse control schemes, and\r\nenvironmental adaptability. Our system integrates advances in modular material\r\nbuilding, untethered and distributed control, and computational design to\r\nintroduce a new generation of robots that brings us closer to the capabilities\r\nof living organisms."}],"_id":"20286","language":[{"iso":"eng"}],"type":"preprint","ec_funded":1,"status":"public","publication":"arXiv","date_updated":"2026-04-07T11:50:09Z","date_created":"2025-09-04T09:14:11Z","title":"Computational design and fabrication of modular robots with untethered control","oa_version":"Preprint","arxiv":1,"publication_status":"draft","related_material":{"record":[{"relation":"dissertation_contains","status":"public","id":"20276"}]},"article_processing_charge":"No","corr_author":"1","tmp":{"short":"CC BY (4.0)","image":"/images/cc_by.png","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)"},"author":[{"full_name":"Bhargava, Manas","last_name":"Bhargava","orcid":"0009-0007-6138-6890","first_name":"Manas","id":"FF8FA64C-AA6A-11E9-99AD-50D4E5697425"},{"full_name":"Hiraki, Takefumi","last_name":"Hiraki","first_name":"Takefumi"},{"last_name":"Strugaru","full_name":"Strugaru, Irina-Malina","first_name":"Irina-Malina","id":"2afc607f-f128-11eb-9611-8f2a0dfcf074"},{"full_name":"Zhang, Yuhan","last_name":"Zhang","first_name":"Yuhan"},{"full_name":"Piovarci, Michael","last_name":"Piovarci","orcid":"0000-0002-5062-4474","first_name":"Michael","id":"62E473F4-5C99-11EA-A40E-AF823DDC885E"},{"first_name":"Chiara","full_name":"Daraio, Chiara","last_name":"Daraio"},{"full_name":"Iwai, Daisuke","last_name":"Iwai","first_name":"Daisuke"},{"id":"49876194-F248-11E8-B48F-1D18A9856A87","first_name":"Bernd","orcid":"0000-0001-6511-9385","full_name":"Bickel, Bernd","last_name":"Bickel"}],"month":"08","department":[{"_id":"BeBi"}],"project":[{"name":"MATERIALIZABLE: Intelligent fabrication-oriented Computational Design and Modeling","grant_number":"715767","call_identifier":"H2020","_id":"24F9549A-B435-11E9-9278-68D0E5697425"}],"doi":"10.48550/arXiv.2508.05410","oa":1,"acknowledgement":"The authors express gratitude to Magali Lorion for assisting in the initial fabrication of LCEs,\r\nPengbin Tang for providing the code for simulating discrete elastic rods, the Imaging and\r\nOptics Facility at ISTA for assisting with the spectrometry measurements, and the MIBA\r\nmachine shop at ISTA for their support in manufacturing various devices.\r\nFunding: This project was supported by the European Research Council (ERC) under\r\nthe European Union’s Horizon 2020 research and innovation program (Grant Agreement No.\r\n715767 -– MATERIALIZABLE)."},{"month":"04","department":[{"_id":"BeBi"}],"article_type":"original","volume":132,"author":[{"first_name":"Pramod","full_name":"Rao, Pramod","last_name":"Rao"},{"first_name":"B. R.","last_name":"Mallikarjun","full_name":"Mallikarjun, B. R."},{"first_name":"Gereon","last_name":"Fox","full_name":"Fox, Gereon"},{"full_name":"Weyrich, Tim","last_name":"Weyrich","first_name":"Tim"},{"last_name":"Bickel","full_name":"Bickel, Bernd","first_name":"Bernd","id":"49876194-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0001-6511-9385"},{"last_name":"Pfister","full_name":"Pfister, Hanspeter","first_name":"Hanspeter"},{"full_name":"Matusik, Wojciech","last_name":"Matusik","first_name":"Wojciech"},{"full_name":"Zhan, Fangneng","last_name":"Zhan","first_name":"Fangneng"},{"first_name":"Ayush","full_name":"Tewari, Ayush","last_name":"Tewari"},{"last_name":"Theobalt","full_name":"Theobalt, Christian","first_name":"Christian"},{"first_name":"Mohamed","last_name":"Elgharib","full_name":"Elgharib, Mohamed"}],"oa":1,"doi":"10.1007/s11263-023-01899-3","acknowledgement":"Open Access funding enabled and organized by Projekt DEAL.","pmid":1,"publisher":"Springer Nature","article_processing_charge":"Yes (via OA deal)","tmp":{"short":"CC BY (4.0)","image":"/images/cc_by.png","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)"},"file":[{"date_updated":"2024-07-22T11:07:14Z","file_id":"17304","relation":"main_file","creator":"dernst","file_name":"2024_IJCV_Rao.pdf","checksum":"5eef1d920f6fe700d7856098000d05f1","date_created":"2024-07-22T11:07:14Z","file_size":9942520,"content_type":"application/pdf","success":1,"access_level":"open_access"}],"publication_status":"published","oa_version":"Published Version","title":"A deeper analysis of volumetric relightiable faces","date_created":"2023-11-05T23:00:54Z","publication_identifier":{"issn":["0920-5691"],"eissn":["1573-1405"]},"isi":1,"date_updated":"2025-08-05T13:28:58Z","publication":"International Journal of Computer Vision","file_date_updated":"2024-07-22T11:07:14Z","scopus_import":"1","status":"public","page":"1148-1166","day":"01","citation":{"apa":"Rao, P., Mallikarjun, B. R., Fox, G., Weyrich, T., Bickel, B., Pfister, H., … Elgharib, M. (2024). A deeper analysis of volumetric relightiable faces. <i>International Journal of Computer Vision</i>. Springer Nature. <a href=\"https://doi.org/10.1007/s11263-023-01899-3\">https://doi.org/10.1007/s11263-023-01899-3</a>","mla":"Rao, Pramod, et al. “A Deeper Analysis of Volumetric Relightiable Faces.” <i>International Journal of Computer Vision</i>, vol. 132, Springer Nature, 2024, pp. 1148–66, doi:<a href=\"https://doi.org/10.1007/s11263-023-01899-3\">10.1007/s11263-023-01899-3</a>.","ieee":"P. Rao <i>et al.</i>, “A deeper analysis of volumetric relightiable faces,” <i>International Journal of Computer Vision</i>, vol. 132. Springer Nature, pp. 1148–1166, 2024.","short":"P. Rao, B.R. Mallikarjun, G. Fox, T. Weyrich, B. Bickel, H. Pfister, W. Matusik, F. Zhan, A. Tewari, C. Theobalt, M. Elgharib, International Journal of Computer Vision 132 (2024) 1148–1166.","chicago":"Rao, Pramod, B. R. Mallikarjun, Gereon Fox, Tim Weyrich, Bernd Bickel, Hanspeter Pfister, Wojciech Matusik, et al. “A Deeper Analysis of Volumetric Relightiable Faces.” <i>International Journal of Computer Vision</i>. Springer Nature, 2024. <a href=\"https://doi.org/10.1007/s11263-023-01899-3\">https://doi.org/10.1007/s11263-023-01899-3</a>.","ama":"Rao P, Mallikarjun BR, Fox G, et al. A deeper analysis of volumetric relightiable faces. <i>International Journal of Computer Vision</i>. 2024;132:1148-1166. doi:<a href=\"https://doi.org/10.1007/s11263-023-01899-3\">10.1007/s11263-023-01899-3</a>","ista":"Rao P, Mallikarjun BR, Fox G, Weyrich T, Bickel B, Pfister H, Matusik W, Zhan F, Tewari A, Theobalt C, Elgharib M. 2024. A deeper analysis of volumetric relightiable faces. International Journal of Computer Vision. 132, 1148–1166."},"has_accepted_license":"1","quality_controlled":"1","type":"journal_article","external_id":{"pmid":["38549787"],"isi":["001091935600002"]},"_id":"14488","abstract":[{"lang":"eng","text":"Portrait viewpoint and illumination editing is an important problem with several applications in VR/AR, movies, and photography. Comprehensive knowledge of geometry and illumination is critical for obtaining photorealistic results. Current methods are unable to explicitly model in 3D while handling both viewpoint and illumination editing from a single image. In this paper, we propose VoRF, a novel approach that can take even a single portrait image as input and relight human heads under novel illuminations that can be viewed from arbitrary viewpoints. VoRF represents a human head as a continuous volumetric field and learns a prior model of human heads using a coordinate-based MLP with individual latent spaces for identity and illumination. The prior model is learned in an auto-decoder manner over a diverse class of head shapes and appearances, allowing VoRF to generalize to novel test identities from a single input image. Additionally, VoRF has a reflectance MLP that uses the intermediate features of the prior model for rendering One-Light-at-A-Time (OLAT) images under novel views. We synthesize novel illuminations by combining these OLAT images with target environment maps. Qualitative and quantitative evaluations demonstrate the effectiveness of VoRF for relighting and novel view synthesis, even when applied to unseen subjects under uncontrolled illumination. This work is an extension of Rao et al. (VoRF: Volumetric Relightable Faces 2022). We provide extensive evaluation and ablative studies of our model and also provide an application, where any face can be relighted using textual input."}],"language":[{"iso":"eng"}],"date_published":"2024-04-01T00:00:00Z","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","ddc":["000"],"year":"2024","intvolume":"       132"},{"quality_controlled":"1","type":"conference","external_id":{"isi":["001282218200059"]},"_id":"18912","abstract":[{"lang":"eng","text":"This paper presents a computational method for automatically creating fabricable 3D wire sculptures from various input modalities, including 3D models, images, and even text. There are several challenges to wire art creation. For example, artists must express the desired visual as a sparse wire representation. It is also difficult to manually bend wires in the air without guidance to fabricate the designed 3D curves. Our workflow solves these challenges by using two core techniques. First, we present an algorithm that automatically generates a fabricable 3D curve representation of the target based on a loss function that measures the semantic distance between the rendered curve and the target. The loss function can be defined using different pre-trained vision-language neural networks to generate wire art from different input types. The loss function is then optimized using differentiable rendering specifically targeting 3D parametric curves. Our method can incorporate various fabrication constraints on the wire as additional regularization terms in the optimization process. Second, we present an algorithm to generate a 3D printable jig structure that can be used to fabricate the generated wire path. The major challenge in the jig generation stems from the design of an intersection-free surface mesh for 3D printing, which we address with our inflation algorithm. The experimental results indicate that our method can handle a wider range of input types and can produce physically fabricable wire shapes compared to previous wire generation methods. Various wire arts have been fabricated using our 3D-printed jig to demonstrate its effectiveness in 3D wire bending."}],"language":[{"iso":"eng"}],"citation":{"ieee":"K. Tojo, A. Shamir, B. Bickel, and N. Umetani, “Fabricable 3D wire art,” in <i>SIGGRAPH ’24: ACM SIGGRAPH 2024 Conference Papers</i>, Denver, CO, United States, 2024.","short":"K. Tojo, A. Shamir, B. Bickel, N. Umetani, in:, SIGGRAPH ’24: ACM SIGGRAPH 2024 Conference Papers, ACM, 2024.","mla":"Tojo, Kenji, et al. “Fabricable 3D Wire Art.” <i>SIGGRAPH ’24: ACM SIGGRAPH 2024 Conference Papers</i>, 134, ACM, 2024, doi:<a href=\"https://doi.org/10.1145/3641519.3657453\">10.1145/3641519.3657453</a>.","apa":"Tojo, K., Shamir, A., Bickel, B., &#38; Umetani, N. (2024). Fabricable 3D wire art. In <i>SIGGRAPH ’24: ACM SIGGRAPH 2024 Conference Papers</i>. Denver, CO, United States: ACM. <a href=\"https://doi.org/10.1145/3641519.3657453\">https://doi.org/10.1145/3641519.3657453</a>","ista":"Tojo K, Shamir A, Bickel B, Umetani N. 2024. Fabricable 3D wire art. SIGGRAPH ’24: ACM SIGGRAPH 2024 Conference Papers. SIGGRAPH: Computer Graphics and Interactive Techniques Conference, 134.","ama":"Tojo K, Shamir A, Bickel B, Umetani N. Fabricable 3D wire art. In: <i>SIGGRAPH ’24: ACM SIGGRAPH 2024 Conference Papers</i>. ACM; 2024. doi:<a href=\"https://doi.org/10.1145/3641519.3657453\">10.1145/3641519.3657453</a>","chicago":"Tojo, Kenji, Ariel Shamir, Bernd Bickel, and Nobuyuki Umetani. “Fabricable 3D Wire Art.” In <i>SIGGRAPH ’24: ACM SIGGRAPH 2024 Conference Papers</i>. ACM, 2024. <a href=\"https://doi.org/10.1145/3641519.3657453\">https://doi.org/10.1145/3641519.3657453</a>."},"day":"01","user_id":"317138e5-6ab7-11ef-aa6d-ffef3953e345","year":"2024","article_number":"134","date_published":"2024-07-01T00:00:00Z","isi":1,"publication":"SIGGRAPH '24: ACM SIGGRAPH 2024 Conference Papers","date_updated":"2025-09-09T12:06:57Z","status":"public","scopus_import":"1","conference":{"name":"SIGGRAPH: Computer Graphics and Interactive Techniques Conference","start_date":"2024-07-28","end_date":"2024-08-01","location":"Denver, CO, United States"},"publication_status":"published","publication_identifier":{"isbn":["9798400705250"]},"OA_type":"closed access","oa_version":"None","title":"Fabricable 3D wire art","date_created":"2025-01-27T13:47:35Z","doi":"10.1145/3641519.3657453","acknowledgement":"The authors thank the anonymous reviewers for their valuable comments and suggestions for improving the paper. This work was supported by JSPS KAKENHI Grant Numbers JP21K11910, 23KJ0699 and JST AdCORP, Grant Number JPMJKB2302, Japan. This work was partially supported by Israel Science Foundation Grant number 1390/19 and Joint NSFC-ISF Research Grant no. 3077/23. We thank Riku Toyota for his useful advice on wire selection and Takeo Igarashi for his assistance in arranging the collaboration of the authors.","month":"07","department":[{"_id":"BeBi"}],"author":[{"first_name":"Kenji","last_name":"Tojo","full_name":"Tojo, Kenji"},{"first_name":"Ariel","last_name":"Shamir","full_name":"Shamir, Ariel"},{"full_name":"Bickel, Bernd","last_name":"Bickel","orcid":"0000-0001-6511-9385","id":"49876194-F248-11E8-B48F-1D18A9856A87","first_name":"Bernd"},{"full_name":"Umetani, Nobuyuki","last_name":"Umetani","first_name":"Nobuyuki"}],"publisher":"ACM","article_processing_charge":"No","corr_author":"1"},{"external_id":{"arxiv":["2407.10487"],"isi":["001282218200076"]},"_id":"17374","abstract":[{"lang":"eng","text":"Achieving photorealistic 3D view synthesis and relighting of human portraits is pivotal for advancing AR/VR applications. Existing methodologies in portrait relighting demonstrate substantial limitations in terms of generalization and 3D consistency, coupled with inaccuracies in physically realistic lighting and identity preservation. Furthermore, personalization from a single view is difficult to achieve and often requires multiview images during the testing phase or involves slow optimization processes. This paper introduces Lite2Relight , a novel technique that can predict 3D consistent head poses of portraits while performing physically plausible light editing at interactive speed. Our method uniquely extends the generative capabilities and efficient volumetric representation of EG3D, leveraging a lightstage dataset to implicitly disentangle face reflectance and perform relighting under target HDRI environment maps. By utilizing a pre-trained geometry-aware encoder and a feature alignment module, we map input images into a relightable 3D space, enhancing them with a strong face geometry and reflectance prior. Through extensive quantitative and qualitative evaluations, we show that our method outperforms the state-of-the-art methods in terms of efficacy, photorealism, and practical application. This includes producing 3D-consistent results of the full head, including hair, eyes, and expressions. Lite2Relight paves the way for large-scale adoption of photorealistic portrait editing in various domains, offering a robust, interactive solution to a previously constrained problem."}],"language":[{"iso":"eng"}],"quality_controlled":"1","type":"conference","citation":{"short":"P. Rao, G. Fox, A. Meka, B.R. Mallikarjun, F. Zhan, T. Weyrich, B. Bickel, H. Pfister, W. Matusik, M. Elgharib, C. Theobalt, in:, Proceedings - SIGGRAPH 2024 Conference Papers, Association for Computing Machinery, 2024.","ieee":"P. Rao <i>et al.</i>, “Lite2Relight: 3D-aware single image portrait relighting,” in <i>Proceedings - SIGGRAPH 2024 Conference Papers</i>, Denver, CO, United States, 2024.","mla":"Rao, Pramod, et al. “Lite2Relight: 3D-Aware Single Image Portrait Relighting.” <i>Proceedings - SIGGRAPH 2024 Conference Papers</i>, 41, Association for Computing Machinery, 2024, doi:<a href=\"https://doi.org/10.1145/3641519.3657470\">10.1145/3641519.3657470</a>.","apa":"Rao, P., Fox, G., Meka, A., Mallikarjun, B. R., Zhan, F., Weyrich, T., … Theobalt, C. (2024). Lite2Relight: 3D-aware single image portrait relighting. In <i>Proceedings - SIGGRAPH 2024 Conference Papers</i>. Denver, CO, United States: Association for Computing Machinery. <a href=\"https://doi.org/10.1145/3641519.3657470\">https://doi.org/10.1145/3641519.3657470</a>","ista":"Rao P, Fox G, Meka A, Mallikarjun BR, Zhan F, Weyrich T, Bickel B, Pfister H, Matusik W, Elgharib M, Theobalt C. 2024. Lite2Relight: 3D-aware single image portrait relighting. Proceedings - SIGGRAPH 2024 Conference Papers. SIGGRAPH: Computer Graphics and Interactive Techniques Conference, 41.","ama":"Rao P, Fox G, Meka A, et al. Lite2Relight: 3D-aware single image portrait relighting. In: <i>Proceedings - SIGGRAPH 2024 Conference Papers</i>. Association for Computing Machinery; 2024. doi:<a href=\"https://doi.org/10.1145/3641519.3657470\">10.1145/3641519.3657470</a>","chicago":"Rao, Pramod, Gereon Fox, Abhimitra Meka, B. R. Mallikarjun, Fangneng Zhan, Tim Weyrich, Bernd Bickel, et al. “Lite2Relight: 3D-Aware Single Image Portrait Relighting.” In <i>Proceedings - SIGGRAPH 2024 Conference Papers</i>. Association for Computing Machinery, 2024. <a href=\"https://doi.org/10.1145/3641519.3657470\">https://doi.org/10.1145/3641519.3657470</a>."},"day":"13","has_accepted_license":"1","article_number":"41","ddc":["000"],"user_id":"317138e5-6ab7-11ef-aa6d-ffef3953e345","year":"2024","date_published":"2024-07-13T00:00:00Z","file_date_updated":"2024-08-05T08:25:18Z","date_updated":"2025-09-08T08:48:22Z","publication":"Proceedings - SIGGRAPH 2024 Conference Papers","isi":1,"status":"public","scopus_import":"1","conference":{"location":"Denver, CO, United States","end_date":"2024-08-01","start_date":"2024-07-27","name":"SIGGRAPH: Computer Graphics and Interactive Techniques Conference"},"publication_status":"published","file":[{"checksum":"4650f6d1419e675929133e46a91ca177","file_name":"2024_SIGGRAPH_Rao.pdf","date_updated":"2024-08-05T08:25:18Z","relation":"main_file","creator":"dernst","file_id":"17387","access_level":"open_access","content_type":"application/pdf","success":1,"date_created":"2024-08-05T08:25:18Z","file_size":59683257}],"arxiv":1,"publication_identifier":{"isbn":["9798400705250"]},"title":"Lite2Relight: 3D-aware single image portrait relighting","date_created":"2024-08-04T22:01:21Z","oa_version":"Published Version","doi":"10.1145/3641519.3657470","oa":1,"acknowledgement":"This work was supported by the ERC Consolidator Grant 4DReply (770784). We extend our gratitude to Shrisha Bharadwaj for providing feedback and constant support.","author":[{"last_name":"Rao","full_name":"Rao, Pramod","first_name":"Pramod"},{"first_name":"Gereon","full_name":"Fox, Gereon","last_name":"Fox"},{"first_name":"Abhimitra","full_name":"Meka, Abhimitra","last_name":"Meka"},{"full_name":"Mallikarjun, B. R.","last_name":"Mallikarjun","first_name":"B. R."},{"last_name":"Zhan","full_name":"Zhan, Fangneng","first_name":"Fangneng"},{"first_name":"Tim","last_name":"Weyrich","full_name":"Weyrich, Tim"},{"full_name":"Bickel, Bernd","last_name":"Bickel","first_name":"Bernd","id":"49876194-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0001-6511-9385"},{"full_name":"Pfister, Hanspeter","last_name":"Pfister","first_name":"Hanspeter"},{"first_name":"Wojciech","full_name":"Matusik, Wojciech","last_name":"Matusik"},{"first_name":"Mohamed","last_name":"Elgharib","full_name":"Elgharib, Mohamed"},{"first_name":"Christian","last_name":"Theobalt","full_name":"Theobalt, Christian"}],"month":"07","department":[{"_id":"BeBi"}],"article_processing_charge":"Yes (in subscription journal)","tmp":{"short":"CC BY (4.0)","image":"/images/cc_by.png","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)"},"publisher":"Association for Computing Machinery"},{"main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2305.05944","open_access":"1"}],"date_published":"2023-07-23T00:00:00Z","year":"2023","user_id":"317138e5-6ab7-11ef-aa6d-ffef3953e345","article_number":"20","day":"23","citation":{"short":"K. Tojo, A. Shamir, B. Bickel, N. Umetani, in:, SIGGRAPH 2023 Conference Proceedings, Association for Computing Machinery, 2023.","ieee":"K. Tojo, A. Shamir, B. Bickel, and N. Umetani, “Stealth shaper: Reflectivity optimization as surface stylization,” in <i>SIGGRAPH 2023 Conference Proceedings</i>, Los Angeles, CA, United States, 2023.","mla":"Tojo, Kenji, et al. “Stealth Shaper: Reflectivity Optimization as Surface Stylization.” <i>SIGGRAPH 2023 Conference Proceedings</i>, 20, Association for Computing Machinery, 2023, doi:<a href=\"https://doi.org/10.1145/3588432.3591542\">10.1145/3588432.3591542</a>.","apa":"Tojo, K., Shamir, A., Bickel, B., &#38; Umetani, N. (2023). Stealth shaper: Reflectivity optimization as surface stylization. In <i>SIGGRAPH 2023 Conference Proceedings</i>. Los Angeles, CA, United States: Association for Computing Machinery. <a href=\"https://doi.org/10.1145/3588432.3591542\">https://doi.org/10.1145/3588432.3591542</a>","ista":"Tojo K, Shamir A, Bickel B, Umetani N. 2023. Stealth shaper: Reflectivity optimization as surface stylization. SIGGRAPH 2023 Conference Proceedings. SIGGRAPH: Computer Graphics and Interactive Techniques Conference, 20.","ama":"Tojo K, Shamir A, Bickel B, Umetani N. Stealth shaper: Reflectivity optimization as surface stylization. In: <i>SIGGRAPH 2023 Conference Proceedings</i>. Association for Computing Machinery; 2023. doi:<a href=\"https://doi.org/10.1145/3588432.3591542\">10.1145/3588432.3591542</a>","chicago":"Tojo, Kenji, Ariel Shamir, Bernd Bickel, and Nobuyuki Umetani. “Stealth Shaper: Reflectivity Optimization as Surface Stylization.” In <i>SIGGRAPH 2023 Conference Proceedings</i>. Association for Computing Machinery, 2023. <a href=\"https://doi.org/10.1145/3588432.3591542\">https://doi.org/10.1145/3588432.3591542</a>."},"type":"conference","quality_controlled":"1","language":[{"iso":"eng"}],"_id":"14241","abstract":[{"text":"We present a technique to optimize the reflectivity of a surface while preserving its overall shape. The naïve optimization of the mesh vertices using the gradients of reflectivity simulations results in undesirable distortion. In contrast, our robust formulation optimizes the surface normal as an independent variable that bridges the reflectivity term with differential rendering, and the regularization term with as-rigid-as-possible elastic energy. We further adaptively subdivide the input mesh to improve the convergence. Consequently, our method can minimize the retroreflectivity of a wide range of input shapes, resulting in sharply creased shapes ubiquitous among stealth aircraft and Sci-Fi vehicles. Furthermore, by changing the reward for the direction of the outgoing light directions, our method can be applied to other reflectivity design tasks, such as the optimization of architectural walls to concentrate light in a specific region. We have tested the proposed method using light-transport simulations and real-world 3D-printed objects.","lang":"eng"}],"external_id":{"arxiv":["2305.05944"],"isi":["001117690500020"]},"scopus_import":"1","status":"public","isi":1,"publication":"SIGGRAPH 2023 Conference Proceedings","date_updated":"2025-09-09T12:49:15Z","oa_version":"Preprint","title":"Stealth shaper: Reflectivity optimization as surface stylization","date_created":"2023-08-27T22:01:17Z","arxiv":1,"publication_identifier":{"isbn":["9798400701597"]},"publication_status":"published","conference":{"location":"Los Angeles, CA, United States","end_date":"2023-08-10","start_date":"2023-08-06","name":"SIGGRAPH: Computer Graphics and Interactive Techniques Conference"},"publisher":"Association for Computing Machinery","corr_author":"1","article_processing_charge":"No","department":[{"_id":"BeBi"}],"month":"07","author":[{"first_name":"Kenji","full_name":"Tojo, Kenji","last_name":"Tojo"},{"last_name":"Shamir","full_name":"Shamir, Ariel","first_name":"Ariel"},{"orcid":"0000-0001-6511-9385","first_name":"Bernd","id":"49876194-F248-11E8-B48F-1D18A9856A87","full_name":"Bickel, Bernd","last_name":"Bickel"},{"first_name":"Nobuyuki","last_name":"Umetani","full_name":"Umetani, Nobuyuki"}],"acknowledgement":"The authors would like to thank Yuki Koyama and Takeo Igarashi for early discussions, and Yuta Yaguchi for support in 3D printing. This research is partially supported by the Israel Science Foundation grant number 1390/19.\r\n","oa":1,"doi":"10.1145/3588432.3591542"},{"file_date_updated":"2023-12-04T08:04:14Z","isi":1,"date_updated":"2025-09-09T13:33:58Z","publication":"ACM Transactions on Graphics","status":"public","scopus_import":"1","issue":"5","type":"journal_article","quality_controlled":"1","language":[{"iso":"eng"}],"_id":"14628","abstract":[{"text":"We introduce a compact, intuitive procedural graph representation for cellular metamaterials, which are small-scale, tileable structures that can be architected to exhibit many useful material properties. Because the structures’ “architectures” vary widely—with elements such as beams, thin shells, and solid bulks—it is difficult to explore them using existing representations. Generic approaches like voxel grids are versatile, but it is cumbersome to represent and edit individual structures; architecture-specific approaches address these issues, but are incompatible with one another. By contrast, our procedural graph succinctly represents the construction process for any structure using a simple skeleton annotated with spatially varying thickness. To express the highly constrained triply periodic minimal surfaces (TPMS) in this manner, we present the first fully automated version of the conjugate surface construction method, which allows novices to create complex TPMS from intuitive input. We demonstrate our representation’s expressiveness, accuracy, and compactness by constructing a wide range of established structures and hundreds of novel structures with diverse architectures and material properties. We also conduct a user study to verify our representation’s ease-of-use and ability to expand engineers’ capacity for exploration.","lang":"eng"}],"external_id":{"isi":["001086833300007"]},"has_accepted_license":"1","citation":{"apa":"Makatura, L., Wang, B., Chen, Y.-L., Deng, B., Wojtan, C., Bickel, B., &#38; Matusik, W. (2023). Procedural metamaterials: A unified procedural graph for metamaterial design. <i>ACM Transactions on Graphics</i>. Association for Computing Machinery. <a href=\"https://doi.org/10.1145/3605389\">https://doi.org/10.1145/3605389</a>","ieee":"L. Makatura <i>et al.</i>, “Procedural metamaterials: A unified procedural graph for metamaterial design,” <i>ACM Transactions on Graphics</i>, vol. 42, no. 5. Association for Computing Machinery, 2023.","short":"L. Makatura, B. Wang, Y.-L. Chen, B. Deng, C. Wojtan, B. Bickel, W. Matusik, ACM Transactions on Graphics 42 (2023).","mla":"Makatura, Liane, et al. “Procedural Metamaterials: A Unified Procedural Graph for Metamaterial Design.” <i>ACM Transactions on Graphics</i>, vol. 42, no. 5, 168, Association for Computing Machinery, 2023, doi:<a href=\"https://doi.org/10.1145/3605389\">10.1145/3605389</a>.","chicago":"Makatura, Liane, Bohan Wang, Yi-Lu Chen, Bolei Deng, Chris Wojtan, Bernd Bickel, and Wojciech Matusik. “Procedural Metamaterials: A Unified Procedural Graph for Metamaterial Design.” <i>ACM Transactions on Graphics</i>. Association for Computing Machinery, 2023. <a href=\"https://doi.org/10.1145/3605389\">https://doi.org/10.1145/3605389</a>.","ista":"Makatura L, Wang B, Chen Y-L, Deng B, Wojtan C, Bickel B, Matusik W. 2023. Procedural metamaterials: A unified procedural graph for metamaterial design. ACM Transactions on Graphics. 42(5), 168.","ama":"Makatura L, Wang B, Chen Y-L, et al. Procedural metamaterials: A unified procedural graph for metamaterial design. <i>ACM Transactions on Graphics</i>. 2023;42(5). doi:<a href=\"https://doi.org/10.1145/3605389\">10.1145/3605389</a>"},"day":"01","year":"2023","user_id":"317138e5-6ab7-11ef-aa6d-ffef3953e345","ddc":["531","006"],"article_number":"168","intvolume":"        42","date_published":"2023-10-01T00:00:00Z","acknowledgement":"The authors thank Mina Konaković Luković and Michael Foshey for their early contributions to this project, David Palmer and Paul Zhang for their insightful discussions about minimal surfaces and the CSCM, Julian Panetta for providing the Elastic Textures code, and Hannes Hergeth for his feedback and support. We also thank our user study participants and anonymous reviewers.\r\nThis material is based upon work supported by the National Science Foundation\r\n(NSF) Graduate Research Fellowship under Grant No. 2141064; the MIT Morningside\r\nAcademy for Design Fellowship; the Defense Advanced Research Projects Agency\r\n(DARPA) Grant No. FA8750-20-C-0075; the ERC Consolidator Grant No. 101045083,\r\n“CoDiNA: Computational Discovery of Numerical Algorithms for Animation and Simulation of Natural Phenomena”; and the NewSat project, which is co-funded by the Operational Program for Competitiveness and Internationalisation (COMPETE2020), Portugal 2020, the European Regional Development Fund (ERDF), and the Portuguese Foundation for Science and Technology (FTC) under the MIT Portugal program.","oa":1,"doi":"10.1145/3605389","article_type":"original","project":[{"name":"Computational Discovery of Numerical Algorithms for Animation and Simulation of Natural Phenomena","_id":"34bc2376-11ca-11ed-8bc3-9a3b3961a088","grant_number":"101045083"}],"month":"10","department":[{"_id":"GradSch"},{"_id":"ChWo"},{"_id":"BeBi"}],"volume":42,"author":[{"first_name":"Liane","full_name":"Makatura, Liane","last_name":"Makatura"},{"last_name":"Wang","full_name":"Wang, Bohan","first_name":"Bohan"},{"first_name":"Yi-Lu","id":"0b467602-dbcd-11ea-9d1d-ed480aa46b70","full_name":"Chen, Yi-Lu","last_name":"Chen"},{"first_name":"Bolei","full_name":"Deng, Bolei","last_name":"Deng"},{"full_name":"Wojtan, Christopher J","last_name":"Wojtan","id":"3C61F1D2-F248-11E8-B48F-1D18A9856A87","first_name":"Christopher J","orcid":"0000-0001-6646-5546"},{"orcid":"0000-0001-6511-9385","first_name":"Bernd","id":"49876194-F248-11E8-B48F-1D18A9856A87","last_name":"Bickel","full_name":"Bickel, Bernd"},{"full_name":"Matusik, Wojciech","last_name":"Matusik","first_name":"Wojciech"}],"publisher":"Association for Computing Machinery","article_processing_charge":"Yes (in subscription journal)","publication_status":"published","file":[{"creator":"yichen","file_id":"14630","date_updated":"2023-11-29T15:16:01Z","relation":"main_file","checksum":"0192f597d7a2ceaf89baddfd6190d4c8","file_name":"tog-22-0089-File004.zip","success":1,"content_type":"application/zip","date_created":"2023-11-29T15:16:01Z","file_size":95467870,"access_level":"open_access"},{"checksum":"7fb024963be81933494f38de191e4710","file_name":"tog-22-0089-File005.zip","date_updated":"2023-11-29T15:16:01Z","relation":"main_file","creator":"yichen","file_id":"14631","access_level":"open_access","content_type":"application/zip","success":1,"file_size":103731880,"date_created":"2023-11-29T15:16:01Z"},{"access_level":"open_access","success":1,"content_type":"application/pdf","file_size":57067476,"date_created":"2023-12-04T08:04:14Z","checksum":"b7d6829ce396e21cac9fae0ec7130a6b","file_name":"2023_ACMToG_Makatura.pdf","relation":"main_file","creator":"dernst","file_id":"14638","date_updated":"2023-12-04T08:04:14Z"}],"publication_identifier":{"eissn":["1557-7368"],"issn":["0730-0301"]},"keyword":["Computer Graphics and Computer-Aided Design"],"oa_version":"Published Version","title":"Procedural metamaterials: A unified procedural graph for metamaterial design","date_created":"2023-11-29T15:02:03Z"},{"file_date_updated":"2023-05-16T08:28:37Z","date_updated":"2025-04-14T07:28:57Z","publication":"Computer Graphics Forum","isi":1,"page":"397-409","status":"public","issue":"2","ec_funded":1,"scopus_import":"1","language":[{"iso":"eng"}],"_id":"12972","abstract":[{"text":"Embroidery is a long-standing and high-quality approach to making logos and images on textiles. Nowadays, it can also be performed via automated machines that weave threads with high spatial accuracy. A characteristic feature of the appearance of the threads is a high degree of anisotropy. The anisotropic behavior is caused by depositing thin but long strings of thread. As a result, the stitched patterns convey both color and direction. Artists leverage this anisotropic behavior to enhance pure color images with textures, illusions of motion, or depth cues. However, designing colorful embroidery patterns with prescribed directionality is a challenging task, one usually requiring an expert designer. In this work, we propose an interactive algorithm that generates machine-fabricable embroidery patterns from multi-chromatic images equipped with user-specified directionality fields.We cast the problem of finding a stitching pattern into vector theory. To find a suitable stitching pattern, we extract sources and sinks from the divergence field of the vector field extracted from the input and use them to trace streamlines. We further optimize the streamlines to guarantee a smooth and connected stitching pattern. The generated patterns approximate the color distribution constrained by the directionality field. To allow for further artistic control, the trade-off between color match and directionality match can be interactively explored via an intuitive slider. We showcase our approach by fabricating several embroidery paths.","lang":"eng"}],"external_id":{"isi":["001000062600033"]},"type":"journal_article","quality_controlled":"1","has_accepted_license":"1","day":"08","citation":{"chicago":"Liu, Zhenyuan, Michael Piovarci, Christian Hafner, Raphael Charrondiere, and Bernd Bickel. “Directionality-Aware Design of Embroidery Patterns.” <i>Computer Graphics Forum</i>. Wiley, 2023. <a href=\"https://doi.org/10.1111/cgf.14770 \">https://doi.org/10.1111/cgf.14770 </a>.","ama":"Liu Z, Piovarci M, Hafner C, Charrondiere R, Bickel B. Directionality-aware design of embroidery patterns. <i>Computer Graphics Forum</i>. 2023;42(2):397-409. doi:<a href=\"https://doi.org/10.1111/cgf.14770 \">10.1111/cgf.14770 </a>","ista":"Liu Z, Piovarci M, Hafner C, Charrondiere R, Bickel B. 2023. Directionality-aware design of embroidery patterns. Computer Graphics Forum. 42(2), 397–409.","apa":"Liu, Z., Piovarci, M., Hafner, C., Charrondiere, R., &#38; Bickel, B. (2023). Directionality-aware design of embroidery patterns. <i>Computer Graphics Forum</i>. Saarbrucken, Germany: Wiley. <a href=\"https://doi.org/10.1111/cgf.14770 \">https://doi.org/10.1111/cgf.14770 </a>","mla":"Liu, Zhenyuan, et al. “Directionality-Aware Design of Embroidery Patterns.” <i>Computer Graphics Forum</i>, vol. 42, no. 2, Wiley, 2023, pp. 397–409, doi:<a href=\"https://doi.org/10.1111/cgf.14770 \">10.1111/cgf.14770 </a>.","ieee":"Z. Liu, M. Piovarci, C. Hafner, R. Charrondiere, and B. Bickel, “Directionality-aware design of embroidery patterns,” <i>Computer Graphics Forum</i>, vol. 42, no. 2. Wiley, pp. 397–409, 2023.","short":"Z. Liu, M. Piovarci, C. Hafner, R. Charrondiere, B. Bickel, Computer Graphics Forum 42 (2023) 397–409."},"intvolume":"        42","year":"2023","user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","ddc":["004"],"date_published":"2023-05-08T00:00:00Z","acknowledgement":"This work was supported by the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation program (grant agreement No 715767 – MATERIALIZABLE), and FWF Lise Meitner (Grant M 3319). We thank the anonymous reviewers for their insightful feedback; Solal Pirelli, Shardul Chiplunkar, and Paola Mejia for proofreading; everyone in the visual computing group at ISTA for inspiring lunch and coffee breaks; Thibault Tricard for help producing the results of Phasor Noise.","doi":"10.1111/cgf.14770 ","oa":1,"author":[{"full_name":"Liu, Zhenyuan","last_name":"Liu","orcid":"0000-0001-9200-5690","first_name":"Zhenyuan","id":"70f0d7cf-ae65-11ec-a14f-89dfc5505b19"},{"full_name":"Piovarci, Michael","last_name":"Piovarci","id":"62E473F4-5C99-11EA-A40E-AF823DDC885E","first_name":"Michael","orcid":"0000-0002-5062-4474"},{"full_name":"Hafner, Christian","last_name":"Hafner","first_name":"Christian","id":"400429CC-F248-11E8-B48F-1D18A9856A87"},{"full_name":"Charrondiere, Raphael","last_name":"Charrondiere","first_name":"Raphael","id":"a3a24133-2cc7-11ec-be88-8ddaf6f464b1"},{"orcid":"0000-0001-6511-9385","id":"49876194-F248-11E8-B48F-1D18A9856A87","first_name":"Bernd","full_name":"Bickel, Bernd","last_name":"Bickel"}],"volume":42,"project":[{"grant_number":"M03319","_id":"eb901961-77a9-11ec-83b8-f5c883a62027","name":"Perception-Aware Appearance Fabrication"},{"name":"MATERIALIZABLE: Intelligent fabrication-oriented Computational Design and Modeling","call_identifier":"H2020","_id":"24F9549A-B435-11E9-9278-68D0E5697425","grant_number":"715767"}],"article_type":"original","month":"05","department":[{"_id":"BeBi"}],"corr_author":"1","tmp":{"image":"/images/cc_by_nc_nd.png","short":"CC BY-NC-ND (4.0)","name":"Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International (CC BY-NC-ND 4.0)","legal_code_url":"https://creativecommons.org/licenses/by-nc-nd/4.0/legalcode"},"article_processing_charge":"No","publisher":"Wiley","publication_status":"published","conference":{"name":"EG: Eurographics","start_date":"2023-05-08","end_date":"2023-05-12","location":"Saarbrucken, Germany"},"file":[{"access_level":"open_access","file_size":24003702,"date_created":"2023-05-16T08:28:37Z","success":1,"content_type":"application/pdf","file_name":"Zhenyuan2023.pdf","checksum":"4c188c2be4745467a8790bbf5d6491aa","file_id":"12974","date_updated":"2023-05-16T08:28:37Z","creator":"mpiovarc","relation":"main_file"}],"publication_identifier":{"issn":["1467-8659"]},"keyword":["embroidery","design","directionality","density","image"],"date_created":"2023-05-16T08:47:25Z","title":"Directionality-aware design of embroidery patterns","oa_version":"Published Version"},{"scopus_import":"1","status":"public","isi":1,"publication":"SIGGRAPH ’23 Conference Proceedings","date_updated":"2025-04-15T07:43:53Z","file_date_updated":"2024-01-29T10:14:10Z","date_published":"2023-07-23T00:00:00Z","year":"2023","user_id":"3E5EF7F0-F248-11E8-B48F-1D18A9856A87","ddc":["004"],"article_number":"21","has_accepted_license":"1","day":"23","citation":{"chicago":"Condor, Jorge, Michael Piovarci, Bernd Bickel, and Piotr Didyk. “Gloss-Aware Color Correction for 3D Printing.” In <i>SIGGRAPH ’23 Conference Proceedings</i>. Association for Computing Machinery, 2023. <a href=\"https://doi.org/10.1145/3588432.3591546\">https://doi.org/10.1145/3588432.3591546</a>.","ista":"Condor J, Piovarci M, Bickel B, Didyk P. 2023. Gloss-aware color correction for 3D printing. SIGGRAPH ’23 Conference Proceedings. SIGGRAPH: Computer Graphics and Interactive Techniques Conference, 21.","ama":"Condor J, Piovarci M, Bickel B, Didyk P. Gloss-aware color correction for 3D printing. In: <i>SIGGRAPH ’23 Conference Proceedings</i>. Association for Computing Machinery; 2023. doi:<a href=\"https://doi.org/10.1145/3588432.3591546\">10.1145/3588432.3591546</a>","apa":"Condor, J., Piovarci, M., Bickel, B., &#38; Didyk, P. (2023). Gloss-aware color correction for 3D printing. In <i>SIGGRAPH ’23 Conference Proceedings</i>. Los Angeles, CA, United States: Association for Computing Machinery. <a href=\"https://doi.org/10.1145/3588432.3591546\">https://doi.org/10.1145/3588432.3591546</a>","ieee":"J. Condor, M. Piovarci, B. Bickel, and P. Didyk, “Gloss-aware color correction for 3D printing,” in <i>SIGGRAPH ’23 Conference Proceedings</i>, Los Angeles, CA, United States, 2023.","short":"J. Condor, M. Piovarci, B. Bickel, P. Didyk, in:, SIGGRAPH ’23 Conference Proceedings, Association for Computing Machinery, 2023.","mla":"Condor, Jorge, et al. “Gloss-Aware Color Correction for 3D Printing.” <i>SIGGRAPH ’23 Conference Proceedings</i>, 21, Association for Computing Machinery, 2023, doi:<a href=\"https://doi.org/10.1145/3588432.3591546\">10.1145/3588432.3591546</a>."},"type":"conference","quality_controlled":"1","language":[{"iso":"eng"}],"_id":"12979","abstract":[{"lang":"eng","text":"Color and gloss are fundamental aspects of surface appearance. State-of-the-art fabrication techniques can manipulate both properties of the printed 3D objects. However, in the context of appearance reproduction, perceptual aspects of color and gloss are usually handled separately, even though previous perceptual studies suggest their interaction. Our work is motivated by previous studies demonstrating a perceived color shift due to a change in the object's gloss, i.e., two samples with the same color but different surface gloss appear as they have different colors. In this paper, we conduct new experiments which support this observation and provide insights into the magnitude and direction of the perceived color change. We use the observations as guidance to design a new method that estimates and corrects the color shift enabling the fabrication of objects with the same perceived color but different surface gloss. We formulate the problem as an optimization procedure solved using differentiable rendering. We evaluate the effectiveness of our method in perceptual experiments with 3D objects fabricated using a multi-material 3D printer and demonstrate potential applications. "}],"external_id":{"isi":["001117690500021"]},"publisher":"Association for Computing Machinery","corr_author":"1","tmp":{"short":"CC BY (4.0)","image":"/images/cc_by.png","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)"},"article_processing_charge":"Yes (via OA deal)","project":[{"name":"Perception-Aware Appearance Fabrication","_id":"eb901961-77a9-11ec-83b8-f5c883a62027","grant_number":"M03319"}],"month":"07","department":[{"_id":"BeBi"}],"author":[{"full_name":"Condor, Jorge","last_name":"Condor","first_name":"Jorge"},{"full_name":"Piovarci, Michael","last_name":"Piovarci","orcid":"0000-0002-5062-4474","id":"62E473F4-5C99-11EA-A40E-AF823DDC885E","first_name":"Michael"},{"orcid":"0000-0001-6511-9385","id":"49876194-F248-11E8-B48F-1D18A9856A87","first_name":"Bernd","full_name":"Bickel, Bernd","last_name":"Bickel"},{"full_name":"Didyk, Piotr","last_name":"Didyk","first_name":"Piotr"}],"acknowledgement":"We thank Matthew S Zurawski for the 3D model of the car speed shape. This research has been supported by the Swiss National Science Foundation (SNSF, Grant 200502) and the FWF Lise Meitner (Grant M 3319).","doi":"10.1145/3588432.3591546","oa":1,"oa_version":"Published Version","title":"Gloss-aware color correction for 3D printing","date_created":"2023-05-16T09:34:13Z","publication_identifier":{"isbn":["9798400701597"]},"keyword":["color","gloss","perception","color compensation","color management"],"file":[{"content_type":"application/pdf","success":1,"file_size":42323971,"date_created":"2023-05-16T09:32:50Z","access_level":"open_access","file_id":"12983","relation":"main_file","date_updated":"2023-05-16T09:32:50Z","creator":"mpiovarc","checksum":"84a437739af5d46507928939b20c0c28","file_name":"Condor2023_supplemental.pdf"},{"access_level":"open_access","file_size":26079404,"date_created":"2024-01-29T10:14:10Z","success":1,"content_type":"application/pdf","file_name":"2023_Siggraph_Condor.pdf","checksum":"0f5c8b242e8e7c153c04888c4d0c6f37","creator":"dernst","relation":"main_file","date_updated":"2024-01-29T10:14:10Z","file_id":"14893"}],"conference":{"name":"SIGGRAPH: Computer Graphics and Interactive Techniques Conference","start_date":"2023-08-06","end_date":"2023-08-10","location":"Los Angeles, CA, United States"},"publication_status":"published"},{"file":[{"date_created":"2023-05-16T09:38:25Z","file_size":30817343,"content_type":"application/pdf","success":1,"access_level":"open_access","creator":"mpiovarc","date_updated":"2023-05-16T09:38:25Z","file_id":"12985","relation":"main_file","file_name":"Piovarci2023.pdf","checksum":"5f0a6867689e025a661bd0b4fd90b821"},{"access_level":"open_access","success":1,"content_type":"application/pdf","file_size":30281676,"date_created":"2024-04-16T05:52:18Z","checksum":"6dd371de5b517e5f184f9c2cbea4b8b3","file_name":"2023_ACM_Piovarci.pdf","creator":"dernst","file_id":"15324","date_updated":"2024-04-16T05:52:18Z","relation":"main_file"}],"publication_status":"published","conference":{"name":"SIGGRAPH: Computer Graphics and Interactive Techniques Conference","start_date":"2023-08-06","end_date":"2023-08-10","location":"Los Angeles, CA, United States"},"oa_version":"Published Version","title":"Skin-Screen: A computational fabrication framework for color tattoos","date_created":"2023-05-16T09:39:14Z","keyword":["appearance","modeling","reproduction","tattoo","skin color","gamut mapping","ink-optimization","prosthetic"],"publication_identifier":{"eissn":["1557-7368"],"issn":["0730-0301"]},"department":[{"_id":"BeBi"}],"month":"07","article_type":"original","project":[{"_id":"eb901961-77a9-11ec-83b8-f5c883a62027","grant_number":"M03319","name":"Perception-Aware Appearance Fabrication"}],"author":[{"last_name":"Piovarci","full_name":"Piovarci, Michael","orcid":"0000-0002-5062-4474","id":"62E473F4-5C99-11EA-A40E-AF823DDC885E","first_name":"Michael"},{"first_name":"Alexandre","full_name":"Chapiro, Alexandre","last_name":"Chapiro"},{"full_name":"Bickel, Bernd","last_name":"Bickel","orcid":"0000-0001-6511-9385","id":"49876194-F248-11E8-B48F-1D18A9856A87","first_name":"Bernd"}],"volume":42,"doi":"10.1145/3592432","oa":1,"acknowledgement":"We thank Todor Asenov and the Miba Machine Shop for their help in assembling the tattoo machine and manufacturing the substrates. We thank Geysler Rodrigues for the insightful discussions on tattooing practices from a professional artist's perspective. We thank Maria Fernanda Portugal for sharing a doctor's perspective on medical applications of tattoos. This work is graciously supported by the FWF Lise Meitner (Grant M 3319).","publisher":"Association for Computing Machinery","article_processing_charge":"Yes (via OA deal)","tmp":{"short":"CC BY (4.0)","image":"/images/cc_by.png","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)"},"corr_author":"1","day":"26","citation":{"ieee":"M. Piovarci, A. Chapiro, and B. Bickel, “Skin-Screen: A computational fabrication framework for color tattoos,” <i>ACM Transactions on Graphics</i>, vol. 42, no. 4. Association for Computing Machinery, 2023.","short":"M. Piovarci, A. Chapiro, B. Bickel, ACM Transactions on Graphics 42 (2023).","mla":"Piovarci, Michael, et al. “Skin-Screen: A Computational Fabrication Framework for Color Tattoos.” <i>ACM Transactions on Graphics</i>, vol. 42, no. 4, 67, Association for Computing Machinery, 2023, doi:<a href=\"https://doi.org/10.1145/3592432\">10.1145/3592432</a>.","apa":"Piovarci, M., Chapiro, A., &#38; Bickel, B. (2023). Skin-Screen: A computational fabrication framework for color tattoos. <i>ACM Transactions on Graphics</i>. Los Angeles, CA, United States: Association for Computing Machinery. <a href=\"https://doi.org/10.1145/3592432\">https://doi.org/10.1145/3592432</a>","ista":"Piovarci M, Chapiro A, Bickel B. 2023. Skin-Screen: A computational fabrication framework for color tattoos. ACM Transactions on Graphics. 42(4), 67.","ama":"Piovarci M, Chapiro A, Bickel B. Skin-Screen: A computational fabrication framework for color tattoos. <i>ACM Transactions on Graphics</i>. 2023;42(4). doi:<a href=\"https://doi.org/10.1145/3592432\">10.1145/3592432</a>","chicago":"Piovarci, Michael, Alexandre Chapiro, and Bernd Bickel. “Skin-Screen: A Computational Fabrication Framework for Color Tattoos.” <i>ACM Transactions on Graphics</i>. Association for Computing Machinery, 2023. <a href=\"https://doi.org/10.1145/3592432\">https://doi.org/10.1145/3592432</a>."},"has_accepted_license":"1","quality_controlled":"1","type":"journal_article","external_id":{"isi":["001044671300033"]},"_id":"12984","abstract":[{"lang":"eng","text":"Tattoos are a highly popular medium, with both artistic and medical applications. Although the mechanical process of tattoo application has evolved historically, the results are reliant on the artisanal skill of the artist. This can be especially challenging for some skin tones, or in cases where artists lack experience. We provide the first systematic overview of tattooing as a computational fabrication technique. We built an automated tattooing rig and a recipe for the creation of silicone sheets mimicking realistic skin tones, which allowed us to create an accurate model predicting tattoo appearance. This enables several exciting applications including tattoo previewing, color retargeting, novel ink spectra optimization, color-accurate prosthetics, and more."}],"language":[{"iso":"eng"}],"date_published":"2023-07-26T00:00:00Z","user_id":"3E5EF7F0-F248-11E8-B48F-1D18A9856A87","ddc":["004"],"year":"2023","intvolume":"        42","article_number":"67","isi":1,"acknowledged_ssus":[{"_id":"M-Shop"}],"publication":"ACM Transactions on Graphics","date_updated":"2025-04-15T07:43:53Z","file_date_updated":"2024-04-16T05:52:18Z","scopus_import":"1","issue":"4","status":"public"},{"publication_status":"published","conference":{"name":"SIGGRAPH: Computer Graphics and Interactive Techniques Conference","start_date":"2023-08-06","end_date":"2023-08-10","location":"Los Angeles, CA, United States"},"file":[{"access_level":"open_access","content_type":"application/pdf","success":1,"date_created":"2023-06-19T11:02:23Z","file_size":78940724,"checksum":"a0b0ba3b36f43a94388e8824613d812a","file_name":"2023_ACMToG_Freire.pdf","creator":"dernst","file_id":"13156","relation":"main_file","date_updated":"2023-06-19T11:02:23Z"},{"access_level":"open_access","content_type":"application/pdf","success":1,"date_created":"2023-06-20T12:20:51Z","file_size":34345905,"checksum":"b9206bbb67af82df49b7e7cdbde3410c","file_name":"2023_ACMToG_SuppMaterial_Freire.pdf","date_updated":"2023-06-20T12:20:51Z","file_id":"13157","creator":"dernst","relation":"main_file"}],"publication_identifier":{"issn":["0730-0301"],"eissn":["1557-7368"]},"keyword":["PCB design and layout","Mesh geometry models"],"title":"PCBend: Light up your 3D shapes with foldable circuit boards","date_created":"2023-05-22T08:37:04Z","oa_version":"Submitted Version","acknowledgement":"We thank the reviewers for the valuable feedback. We also thank the Miba Machine Shop at ISTA, PCBWay, and PragoBoard for helping us with fabrication and assembly. This project was supported by the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation program (Grant Agreement No. 715767 – MATERIALIZABLE).","oa":1,"doi":"10.1145/3592411","author":[{"full_name":"Freire, Marco","last_name":"Freire","first_name":"Marco"},{"orcid":"0009-0007-6138-6890","first_name":"Manas","id":"FF8FA64C-AA6A-11E9-99AD-50D4E5697425","full_name":"Bhargava, Manas","last_name":"Bhargava"},{"id":"2B14B676-F248-11E8-B48F-1D18A9856A87","first_name":"Camille","last_name":"Schreck","full_name":"Schreck, Camille"},{"full_name":"Hugron, Pierre-Alexandre","last_name":"Hugron","first_name":"Pierre-Alexandre"},{"first_name":"Bernd","id":"49876194-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0001-6511-9385","last_name":"Bickel","full_name":"Bickel, Bernd"},{"first_name":"Sylvain","full_name":"Lefebvre, Sylvain","last_name":"Lefebvre"}],"volume":42,"article_type":"original","project":[{"call_identifier":"H2020","_id":"24F9549A-B435-11E9-9278-68D0E5697425","grant_number":"715767","name":"MATERIALIZABLE: Intelligent fabrication-oriented Computational Design and Modeling"}],"department":[{"_id":"GradSch"},{"_id":"BeBi"}],"month":"07","tmp":{"short":"CC BY (4.0)","image":"/images/cc_by.png","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)"},"corr_author":"1","article_processing_charge":"No","publisher":"Association for Computing Machinery","related_material":{"record":[{"id":"20276","status":"public","relation":"dissertation_contains"}]},"abstract":[{"lang":"eng","text":"We propose a computational design approach for covering a surface with individually addressable RGB LEDs, effectively forming a low-resolution surface screen. To achieve a low-cost and scalable approach, we propose creating designs from flat PCB panels bent in-place along the surface of a 3D printed core. Working with standard rigid PCBs enables the use of\r\nestablished PCB manufacturing services, allowing the fabrication of designs with several hundred LEDs. \r\nOur approach optimizes the PCB geometry for folding, and then jointly optimizes the LED packing, circuit and routing, solving a challenging layout problem under strict manufacturing requirements. Unlike paper, PCBs cannot bend beyond a certain point without breaking. Therefore, we introduce parametric cut patterns acting as hinges, designed to allow bending while remaining compact. To tackle the joint optimization of placement, circuit and routing, we propose a specialized algorithm that splits the global problem into one sub-problem per triangle, which is then individually solved.\r\nOur technique generates PCB blueprints in a completely automated way. After being fabricated by a PCB manufacturing service, the boards are bent and glued by the user onto the 3D printed support. We demonstrate our technique on a range of physical models and virtual examples, creating intricate surface light patterns from hundreds of LEDs."}],"_id":"13049","language":[{"iso":"eng"}],"external_id":{"isi":["001044671300108"]},"type":"journal_article","quality_controlled":"1","has_accepted_license":"1","citation":{"chicago":"Freire, Marco, Manas Bhargava, Camille Schreck, Pierre-Alexandre Hugron, Bernd Bickel, and Sylvain Lefebvre. “PCBend: Light up Your 3D Shapes with Foldable Circuit Boards.” <i>Transactions on Graphics</i>. Association for Computing Machinery, 2023. <a href=\"https://doi.org/10.1145/3592411\">https://doi.org/10.1145/3592411</a>.","ista":"Freire M, Bhargava M, Schreck C, Hugron P-A, Bickel B, Lefebvre S. 2023. PCBend: Light up your 3D shapes with foldable circuit boards. Transactions on Graphics. 42(4), 142.","ama":"Freire M, Bhargava M, Schreck C, Hugron P-A, Bickel B, Lefebvre S. PCBend: Light up your 3D shapes with foldable circuit boards. <i>Transactions on Graphics</i>. 2023;42(4). doi:<a href=\"https://doi.org/10.1145/3592411\">10.1145/3592411</a>","apa":"Freire, M., Bhargava, M., Schreck, C., Hugron, P.-A., Bickel, B., &#38; Lefebvre, S. (2023). PCBend: Light up your 3D shapes with foldable circuit boards. <i>Transactions on Graphics</i>. Los Angeles, CA, United States: Association for Computing Machinery. <a href=\"https://doi.org/10.1145/3592411\">https://doi.org/10.1145/3592411</a>","ieee":"M. Freire, M. Bhargava, C. Schreck, P.-A. Hugron, B. Bickel, and S. Lefebvre, “PCBend: Light up your 3D shapes with foldable circuit boards,” <i>Transactions on Graphics</i>, vol. 42, no. 4. Association for Computing Machinery, 2023.","short":"M. Freire, M. Bhargava, C. Schreck, P.-A. Hugron, B. Bickel, S. Lefebvre, Transactions on Graphics 42 (2023).","mla":"Freire, Marco, et al. “PCBend: Light up Your 3D Shapes with Foldable Circuit Boards.” <i>Transactions on Graphics</i>, vol. 42, no. 4, 142, Association for Computing Machinery, 2023, doi:<a href=\"https://doi.org/10.1145/3592411\">10.1145/3592411</a>."},"day":"26","article_number":"142","intvolume":"        42","year":"2023","user_id":"8b945eb4-e2f2-11eb-945a-df72226e66a9","ddc":["006"],"date_published":"2023-07-26T00:00:00Z","file_date_updated":"2023-06-20T12:20:51Z","date_updated":"2026-04-07T11:50:09Z","publication":"Transactions on Graphics","acknowledged_ssus":[{"_id":"M-Shop"}],"isi":1,"status":"public","issue":"4","ec_funded":1,"scopus_import":"1"},{"doi":"10.1038/s41592-023-01936-6","oa":1,"acknowledgement":"We thank J. Vorlaufer, N. Agudelo and A. Wartak for microscope maintenance and troubleshooting, C. Kreuzinger and A. Freeman for technical assistance, M. Šuplata for hardware control support and M. Cunha dos Santos for initial exploration of software. We\r\nthank P. Henderson for advice on deep-learning training and M. Sixt, S. Boyd and T. Weiss for discussions and critical reading of the manuscript. L. Lavis (Janelia Research Campus) generously provided the JF585-HaloTag ligand. We acknowledge expert support by IST\r\nAustria’s scientific computing, imaging and optics, preclinical, library and laboratory support facilities and by the Miba machine shop. We gratefully acknowledge funding by the following sources: Austrian Science Fund (F.W.F.) grant no. I3600-B27 (J.G.D.), grant no. DK W1232\r\n(J.G.D. and J.M.M.) and grant no. Z 312-B27, Wittgenstein award (P.J.); the Gesellschaft für Forschungsförderung NÖ grant no. LSC18-022 (J.G.D.); an ISTA Interdisciplinary project grant (J.G.D. and B.B.); the European Union’s Horizon 2020 research and innovation programme,\r\nMarie-Skłodowska Curie grant 665385 (J.M.M. and J.L.); the European Union’s Horizon 2020 research and innovation programme, European Research Council grant no. 715767, MATERIALIZABLE (B.B.); grant no. 715508, REVERSEAUTISM (G.N.); grant no. 695568, SYNNOVATE (S.G.N.G.); and grant no. 692692, GIANTSYN (P.J.); the Simons\r\nFoundation Autism Research Initiative grant no. 529085 (S.G.N.G.); the Wellcome Trust Technology Development grant no. 202932 (S.G.N.G.); the Marie Skłodowska-Curie Actions Individual Fellowship no. 101026635 under the EU Horizon 2020 program (J.F.W.);\r\nthe Human Frontier Science Program postdoctoral fellowship LT000557/2018 (W.J.); and the National Science Foundation grant no. IIS-1835231 (H.P.) and NCS-FO-2124179 (H.P.).","department":[{"_id":"PeJo"},{"_id":"GaNo"},{"_id":"BeBi"},{"_id":"JoDa"},{"_id":"Bio"}],"month":"08","project":[{"name":"Optical control of synaptic function via adhesion molecules","grant_number":"I03600","call_identifier":"FWF","_id":"265CB4D0-B435-11E9-9278-68D0E5697425"},{"grant_number":"W1232","_id":"2548AE96-B435-11E9-9278-68D0E5697425","call_identifier":"FWF","name":"Molecular Drug Targets"},{"grant_number":"Z00312","_id":"25C5A090-B435-11E9-9278-68D0E5697425","call_identifier":"FWF","name":"Synaptic communication in neuronal microcircuits"},{"grant_number":"LS18-022","_id":"23889792-32DE-11EA-91FC-C7463DDC885E","name":"High content imaging to decode human immune cell interactions in health and allergic disease"},{"_id":"2564DBCA-B435-11E9-9278-68D0E5697425","call_identifier":"H2020","grant_number":"665385","name":"International IST Doctoral Program"},{"name":"MATERIALIZABLE: Intelligent fabrication-oriented Computational Design and Modeling","grant_number":"715767","call_identifier":"H2020","_id":"24F9549A-B435-11E9-9278-68D0E5697425"},{"_id":"25444568-B435-11E9-9278-68D0E5697425","call_identifier":"H2020","grant_number":"715508","name":"Probing the Reversibility of Autism Spectrum Disorders by Employing in vivo and in vitro Models"},{"name":"Biophysics and circuit function of a giant cortical glutamatergic synapse","_id":"25B7EB9E-B435-11E9-9278-68D0E5697425","call_identifier":"H2020","grant_number":"692692"},{"grant_number":"101026635","call_identifier":"H2020","_id":"fc2be41b-9c52-11eb-aca3-faa90aa144e9","name":"Synaptic computations of the hippocampal CA3 circuitry"},{"_id":"2668BFA0-B435-11E9-9278-68D0E5697425","grant_number":"LT00057","name":"High-speed 3D-nanoscopy to study the role of adhesion during 3D cell migration"}],"article_type":"original","volume":20,"author":[{"full_name":"Velicky, Philipp","last_name":"Velicky","first_name":"Philipp","id":"39BDC62C-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0002-2340-7431"},{"last_name":"Miguel Villalba","full_name":"Miguel Villalba, Eder","id":"3FB91342-F248-11E8-B48F-1D18A9856A87","first_name":"Eder","orcid":"0000-0001-5665-0430"},{"first_name":"Julia M","id":"443DB6DE-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0003-3862-1235","full_name":"Michalska, Julia M","last_name":"Michalska"},{"full_name":"Lyudchik, Julia","last_name":"Lyudchik","first_name":"Julia","id":"46E28B80-F248-11E8-B48F-1D18A9856A87"},{"first_name":"Donglai","last_name":"Wei","full_name":"Wei, Donglai"},{"first_name":"Zudi","full_name":"Lin, Zudi","last_name":"Lin"},{"first_name":"Jake","id":"63836096-4690-11EA-BD4E-32803DDC885E","orcid":"0000-0002-8698-3823","last_name":"Watson","full_name":"Watson, Jake"},{"first_name":"Jakob","full_name":"Troidl, Jakob","last_name":"Troidl"},{"full_name":"Beyer, Johanna","last_name":"Beyer","first_name":"Johanna"},{"first_name":"Yoav","id":"43DF3136-F248-11E8-B48F-1D18A9856A87","full_name":"Ben Simon, Yoav","last_name":"Ben Simon"},{"full_name":"Sommer, Christoph M","last_name":"Sommer","first_name":"Christoph M","id":"4DF26D8C-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0003-1216-9105"},{"full_name":"Jahr, Wiebke","last_name":"Jahr","id":"425C1CE8-F248-11E8-B48F-1D18A9856A87","first_name":"Wiebke","orcid":"0000-0003-0201-2315"},{"id":"9ac8f577-2357-11eb-997a-e566c5550886","first_name":"Alban","last_name":"Cenameri","full_name":"Cenameri, Alban"},{"first_name":"Johannes","last_name":"Broichhagen","full_name":"Broichhagen, Johannes"},{"first_name":"Seth G.N.","last_name":"Grant","full_name":"Grant, Seth G.N."},{"full_name":"Jonas, Peter M","last_name":"Jonas","orcid":"0000-0001-5001-4804","first_name":"Peter M","id":"353C1B58-F248-11E8-B48F-1D18A9856A87"},{"orcid":"0000-0002-7673-7178","first_name":"Gaia","id":"3E57A680-F248-11E8-B48F-1D18A9856A87","last_name":"Novarino","full_name":"Novarino, Gaia"},{"first_name":"Hanspeter","last_name":"Pfister","full_name":"Pfister, Hanspeter"},{"full_name":"Bickel, Bernd","last_name":"Bickel","orcid":"0000-0001-6511-9385","id":"49876194-F248-11E8-B48F-1D18A9856A87","first_name":"Bernd"},{"first_name":"Johann G","id":"42EFD3B6-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0001-8559-3973","last_name":"Danzl","full_name":"Danzl, Johann G"}],"publisher":"Springer Nature","article_processing_charge":"Yes (in subscription journal)","tmp":{"short":"CC BY (4.0)","image":"/images/cc_by.png","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)"},"corr_author":"1","related_material":{"link":[{"relation":"software","url":"https://github.com/danzllab/LIONESS"}],"record":[{"relation":"research_data","id":"12817","status":"public"},{"relation":"shorter_version","status":"public","id":"14770"},{"id":"11943","status":"public","relation":"earlier_version"},{"relation":"dissertation_contains","status":"public","id":"18674"}]},"pmid":1,"publication_status":"published","file":[{"success":1,"content_type":"application/pdf","date_created":"2025-02-26T08:01:57Z","file_size":14103039,"access_level":"open_access","date_updated":"2025-02-26T08:01:57Z","creator":"dernst","relation":"main_file","file_id":"19088","checksum":"a68e845780a82ea36d0d4d3212a87c10","file_name":"2023_NatureMethods_Velicky.pdf"}],"publication_identifier":{"issn":["1548-7091"],"eissn":["1548-7105"]},"OA_type":"hybrid","oa_version":"Published Version","title":"Dense 4D nanoscale reconstruction of living brain tissue","date_created":"2023-07-23T22:01:13Z","file_date_updated":"2025-02-26T08:01:57Z","isi":1,"acknowledged_ssus":[{"_id":"ScienComp"},{"_id":"Bio"},{"_id":"PreCl"},{"_id":"E-Lib"},{"_id":"LifeSc"},{"_id":"M-Shop"}],"date_updated":"2026-04-07T12:58:30Z","publication":"Nature Methods","page":"1256-1265","status":"public","scopus_import":"1","ec_funded":1,"quality_controlled":"1","type":"journal_article","external_id":{"isi":["001025621500001"],"pmid":["37429995"]},"language":[{"iso":"eng"}],"_id":"13267","abstract":[{"lang":"eng","text":"Three-dimensional (3D) reconstruction of living brain tissue down to an individual synapse level would create opportunities for decoding the dynamics and structure–function relationships of the brain’s complex and dense information processing network; however, this has been hindered by insufficient 3D resolution, inadequate signal-to-noise ratio and prohibitive light burden in optical imaging, whereas electron microscopy is inherently static. Here we solved these challenges by developing an integrated optical/machine-learning technology, LIONESS (live information-optimized nanoscopy enabling saturated segmentation). This leverages optical modifications to stimulated emission depletion microscopy in comprehensively, extracellularly labeled tissue and previous information on sample structure via machine learning to simultaneously achieve isotropic super-resolution, high signal-to-noise ratio and compatibility with living tissue. This allows dense deep-learning-based instance segmentation and 3D reconstruction at a synapse level, incorporating molecular, activity and morphodynamic information. LIONESS opens up avenues for studying the dynamic functional (nano-)architecture of living brain tissue."}],"citation":{"ista":"Velicky P, Miguel Villalba E, Michalska JM, Lyudchik J, Wei D, Lin Z, Watson J, Troidl J, Beyer J, Ben Simon Y, Sommer CM, Jahr W, Cenameri A, Broichhagen J, Grant SGN, Jonas PM, Novarino G, Pfister H, Bickel B, Danzl JG. 2023. Dense 4D nanoscale reconstruction of living brain tissue. Nature Methods. 20, 1256–1265.","ama":"Velicky P, Miguel Villalba E, Michalska JM, et al. Dense 4D nanoscale reconstruction of living brain tissue. <i>Nature Methods</i>. 2023;20:1256-1265. doi:<a href=\"https://doi.org/10.1038/s41592-023-01936-6\">10.1038/s41592-023-01936-6</a>","chicago":"Velicky, Philipp, Eder Miguel Villalba, Julia M Michalska, Julia Lyudchik, Donglai Wei, Zudi Lin, Jake Watson, et al. “Dense 4D Nanoscale Reconstruction of Living Brain Tissue.” <i>Nature Methods</i>. Springer Nature, 2023. <a href=\"https://doi.org/10.1038/s41592-023-01936-6\">https://doi.org/10.1038/s41592-023-01936-6</a>.","short":"P. Velicky, E. Miguel Villalba, J.M. Michalska, J. Lyudchik, D. Wei, Z. Lin, J. Watson, J. Troidl, J. Beyer, Y. Ben Simon, C.M. Sommer, W. Jahr, A. Cenameri, J. Broichhagen, S.G.N. Grant, P.M. Jonas, G. Novarino, H. Pfister, B. Bickel, J.G. Danzl, Nature Methods 20 (2023) 1256–1265.","ieee":"P. Velicky <i>et al.</i>, “Dense 4D nanoscale reconstruction of living brain tissue,” <i>Nature Methods</i>, vol. 20. Springer Nature, pp. 1256–1265, 2023.","mla":"Velicky, Philipp, et al. “Dense 4D Nanoscale Reconstruction of Living Brain Tissue.” <i>Nature Methods</i>, vol. 20, Springer Nature, 2023, pp. 1256–65, doi:<a href=\"https://doi.org/10.1038/s41592-023-01936-6\">10.1038/s41592-023-01936-6</a>.","apa":"Velicky, P., Miguel Villalba, E., Michalska, J. M., Lyudchik, J., Wei, D., Lin, Z., … Danzl, J. G. (2023). Dense 4D nanoscale reconstruction of living brain tissue. <i>Nature Methods</i>. Springer Nature. <a href=\"https://doi.org/10.1038/s41592-023-01936-6\">https://doi.org/10.1038/s41592-023-01936-6</a>"},"day":"01","has_accepted_license":"1","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","ddc":["570"],"year":"2023","intvolume":"        20","OA_place":"publisher","date_published":"2023-08-01T00:00:00Z"},{"publication_identifier":{"issn":["0730-0301"],"eissn":["1557-7368"]},"keyword":["Computer Graphics","Computational Design","Computational Geometry","Shape Modeling"],"oa_version":"Submitted Version","title":"The design space of Kirchhoff rods","date_created":"2023-07-04T07:41:30Z","publication_status":"published","file":[{"checksum":"4954c1cfa487725bc156dcfec872478a","file_name":"kirchhoff-rods.pdf","file_id":"13194","relation":"main_file","date_updated":"2023-07-04T08:11:28Z","creator":"chafner","access_level":"open_access","content_type":"application/pdf","success":1,"date_created":"2023-07-04T08:11:28Z","file_size":19635168},{"access_level":"open_access","content_type":"application/pdf","date_created":"2023-07-04T07:46:28Z","file_size":420909,"title":"Supplemental Material with Proofs","checksum":"79c9975fbc82ff71f1767331d2204cca","file_name":"supp-main.pdf","creator":"chafner","file_id":"13190","date_updated":"2023-07-04T07:46:28Z","relation":"supplementary_material"},{"content_type":"application/pdf","date_created":"2023-07-04T07:46:30Z","file_size":430086,"title":"Cheat Sheet for Notation","access_level":"open_access","creator":"chafner","file_id":"13191","date_updated":"2023-07-04T07:46:30Z","relation":"supplementary_material","checksum":"4ab647e4f03c711e1e6a5fc1eb8684db","file_name":"supp-cheat.pdf"},{"access_level":"open_access","file_size":268088064,"date_created":"2023-07-04T07:46:39Z","title":"Supplemental Video","content_type":"video/mp4","file_name":"kirchhoff-video-final.mp4","checksum":"c0fd9a57d012046de90c185ffa904b76","date_updated":"2023-07-04T07:46:39Z","file_id":"13192","relation":"supplementary_material","creator":"chafner"},{"checksum":"71b00712b489ada2cd9815910ee180a9","file_name":"matlab-submission.zip","creator":"chafner","file_id":"13193","date_updated":"2023-07-04T07:47:10Z","relation":"supplementary_material","access_level":"open_access","content_type":"application/x-zip-compressed","date_created":"2023-07-04T07:47:10Z","file_size":25790,"title":"Matlab Source Code with Example"}],"publisher":"Association for Computing Machinery","corr_author":"1","article_processing_charge":"No","related_material":{"record":[{"status":"public","id":"12897","relation":"part_of_dissertation"}]},"acknowledgement":"We thank the anonymous reviewers for their generous feedback, and Julian Fischer for his help in proving Proposition 1. This project has received funding from the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No. 715767).","doi":"10.1145/3606033","oa":1,"article_type":"original","project":[{"grant_number":"715767","call_identifier":"H2020","_id":"24F9549A-B435-11E9-9278-68D0E5697425","name":"MATERIALIZABLE: Intelligent fabrication-oriented Computational Design and Modeling"}],"month":"09","department":[{"_id":"BeBi"}],"author":[{"full_name":"Hafner, Christian","last_name":"Hafner","id":"400429CC-F248-11E8-B48F-1D18A9856A87","first_name":"Christian"},{"orcid":"0000-0001-6511-9385","first_name":"Bernd","id":"49876194-F248-11E8-B48F-1D18A9856A87","full_name":"Bickel, Bernd","last_name":"Bickel"}],"volume":42,"year":"2023","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","ddc":["516"],"article_number":"171","intvolume":"        42","date_published":"2023-09-20T00:00:00Z","type":"journal_article","quality_controlled":"1","language":[{"iso":"eng"}],"_id":"13188","abstract":[{"lang":"eng","text":"The Kirchhoff rod model describes the bending and twisting of slender elastic rods in three dimensions, and has been widely studied to enable the prediction of how a rod will deform, given its geometry and boundary conditions. In this work, we study a number of inverse problems with the goal of computing the geometry of a straight rod that will automatically deform to match a curved target shape after attaching its endpoints to a support structure. Our solution lets us finely control the static equilibrium state of a rod by varying the cross-sectional profiles along its length.\r\nWe also show that the set of physically realizable equilibrium states admits a concise geometric description in terms of linear line complexes, which leads to very efficient computational design algorithms. Implemented in an interactive software tool, they allow us to convert three-dimensional hand-drawn spline curves to elastic rods, and give feedback about the feasibility and practicality of a design in real time. We demonstrate the efficacy of our method by designing and manufacturing several physical prototypes with applications to interior design and soft robotics."}],"external_id":{"isi":["001086833300010"]},"has_accepted_license":"1","day":"20","citation":{"ista":"Hafner C, Bickel B. 2023. The design space of Kirchhoff rods. ACM Transactions on Graphics. 42(5), 171.","ama":"Hafner C, Bickel B. The design space of Kirchhoff rods. <i>ACM Transactions on Graphics</i>. 2023;42(5). doi:<a href=\"https://doi.org/10.1145/3606033\">10.1145/3606033</a>","chicago":"Hafner, Christian, and Bernd Bickel. “The Design Space of Kirchhoff Rods.” <i>ACM Transactions on Graphics</i>. Association for Computing Machinery, 2023. <a href=\"https://doi.org/10.1145/3606033\">https://doi.org/10.1145/3606033</a>.","short":"C. Hafner, B. Bickel, ACM Transactions on Graphics 42 (2023).","ieee":"C. Hafner and B. Bickel, “The design space of Kirchhoff rods,” <i>ACM Transactions on Graphics</i>, vol. 42, no. 5. Association for Computing Machinery, 2023.","mla":"Hafner, Christian, and Bernd Bickel. “The Design Space of Kirchhoff Rods.” <i>ACM Transactions on Graphics</i>, vol. 42, no. 5, 171, Association for Computing Machinery, 2023, doi:<a href=\"https://doi.org/10.1145/3606033\">10.1145/3606033</a>.","apa":"Hafner, C., &#38; Bickel, B. (2023). The design space of Kirchhoff rods. <i>ACM Transactions on Graphics</i>. Association for Computing Machinery. <a href=\"https://doi.org/10.1145/3606033\">https://doi.org/10.1145/3606033</a>"},"status":"public","scopus_import":"1","issue":"5","ec_funded":1,"file_date_updated":"2023-07-04T08:11:28Z","acknowledged_ssus":[{"_id":"M-Shop"}],"isi":1,"date_updated":"2026-04-27T22:30:03Z","publication":"ACM Transactions on Graphics"},{"year":"2022","ddc":["000"],"user_id":"317138e5-6ab7-11ef-aa6d-ffef3953e345","article_number":"112","intvolume":"        41","date_published":"2022-06-01T00:00:00Z","OA_place":"publisher","type":"journal_article","quality_controlled":"1","_id":"11442","abstract":[{"text":"Enabling additive manufacturing to employ a wide range of novel, functional materials can be a major boost to this technology. However, making such materials printable requires painstaking trial-and-error by an expert operator,\r\nas they typically tend to exhibit peculiar rheological or hysteresis properties. Even in the case of successfully finding the process parameters, there is no guarantee of print-to-print consistency due to material differences between batches. These challenges make closed-loop feedback an attractive option where the process parameters are adjusted on-the-fly. There are several challenges for designing an efficient controller: the deposition parameters are complex and highly coupled, artifacts occur after long time horizons, simulating the deposition is computationally costly, and learning on hardware is intractable. In this work, we demonstrate the feasibility of learning a closed-loop control policy for additive manufacturing using reinforcement learning. We show that approximate, but efficient, numerical simulation is\r\nsufficient as long as it allows learning the behavioral patterns of deposition that translate to real-world experiences. In combination with reinforcement learning, our model can be used to discover control policies that outperform\r\nbaseline controllers. Furthermore, the recovered policies have a minimal sim-to-real gap. We showcase this by applying our control policy in-vivo on a single-layer, direct ink writing printer. ","lang":"eng"}],"language":[{"iso":"eng"}],"external_id":{"arxiv":["2201.11819"],"isi":["000830989200091"]},"has_accepted_license":"1","citation":{"short":"M. Piovarci, M. Foshey, J. Xu, T. Erps, V. Babaei, P. Didyk, S. Rusinkiewicz, W. Matusik, B. Bickel, ACM Transactions on Graphics 41 (2022).","ieee":"M. Piovarci <i>et al.</i>, “Closed-loop control of direct ink writing via reinforcement learning,” <i>ACM Transactions on Graphics</i>, vol. 41, no. 4. Association for Computing Machinery, 2022.","mla":"Piovarci, Michael, et al. “Closed-Loop Control of Direct Ink Writing via Reinforcement Learning.” <i>ACM Transactions on Graphics</i>, vol. 41, no. 4, 112, Association for Computing Machinery, 2022, doi:<a href=\"https://doi.org/10.1145/3528223.3530144\">10.1145/3528223.3530144</a>.","apa":"Piovarci, M., Foshey, M., Xu, J., Erps, T., Babaei, V., Didyk, P., … Bickel, B. (2022). Closed-loop control of direct ink writing via reinforcement learning. <i>ACM Transactions on Graphics</i>. Association for Computing Machinery. <a href=\"https://doi.org/10.1145/3528223.3530144\">https://doi.org/10.1145/3528223.3530144</a>","ista":"Piovarci M, Foshey M, Xu J, Erps T, Babaei V, Didyk P, Rusinkiewicz S, Matusik W, Bickel B. 2022. Closed-loop control of direct ink writing via reinforcement learning. ACM Transactions on Graphics. 41(4), 112.","ama":"Piovarci M, Foshey M, Xu J, et al. Closed-loop control of direct ink writing via reinforcement learning. <i>ACM Transactions on Graphics</i>. 2022;41(4). doi:<a href=\"https://doi.org/10.1145/3528223.3530144\">10.1145/3528223.3530144</a>","chicago":"Piovarci, Michael, Michael Foshey, Jie Xu, Timothy Erps, Vahid Babaei, Piotr Didyk, Szymon Rusinkiewicz, Wojciech Matusik, and Bernd Bickel. “Closed-Loop Control of Direct Ink Writing via Reinforcement Learning.” <i>ACM Transactions on Graphics</i>. Association for Computing Machinery, 2022. <a href=\"https://doi.org/10.1145/3528223.3530144\">https://doi.org/10.1145/3528223.3530144</a>."},"day":"01","status":"public","PlanS_conform":"1","scopus_import":"1","issue":"4","ec_funded":1,"file_date_updated":"2022-06-28T08:32:58Z","isi":1,"publication":"ACM Transactions on Graphics","date_updated":"2025-09-10T09:36:45Z","publication_identifier":{"eissn":["1557-7368"],"issn":["0730-0301"]},"arxiv":1,"oa_version":"Submitted Version","OA_type":"hybrid","title":"Closed-loop control of direct ink writing via reinforcement learning","date_created":"2022-06-10T06:41:47Z","publication_status":"published","file":[{"success":1,"content_type":"application/pdf","date_created":"2022-06-28T08:32:58Z","file_size":33994829,"access_level":"open_access","relation":"main_file","creator":"dernst","date_updated":"2022-06-28T08:32:58Z","file_id":"11467","checksum":"27f6fe41c6ff84d50445cc9b0176d45b","file_name":"2022_ACM_acceptedversion_Piovarci.pdf"}],"publisher":"Association for Computing Machinery","corr_author":"1","tmp":{"short":"CC BY (4.0)","image":"/images/cc_by.png","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)"},"article_processing_charge":"No","related_material":{"link":[{"relation":"press_release","description":"News on ISTA website","url":"https://ista.ac.at/en/news/machine-learning-3d-printing-fluids/"}]},"acknowledgement":"This work is graciously supported by the following grant agencies: FWF Lise Meitner (Grant M 3319), SNSF (Grant 200502), ERC Starting Grant (MATERIALIZABLE-715767), NSF (Grant IIS-181507).\r\n","oa":1,"doi":"10.1145/3528223.3530144","project":[{"_id":"eb901961-77a9-11ec-83b8-f5c883a62027","grant_number":"M03319","name":"Perception-Aware Appearance Fabrication"},{"_id":"24F9549A-B435-11E9-9278-68D0E5697425","call_identifier":"H2020","grant_number":"715767","name":"MATERIALIZABLE: Intelligent fabrication-oriented Computational Design and Modeling"}],"article_type":"original","month":"06","department":[{"_id":"BeBi"}],"author":[{"full_name":"Piovarci, Michael","last_name":"Piovarci","id":"62E473F4-5C99-11EA-A40E-AF823DDC885E","first_name":"Michael","orcid":"0000-0002-5062-4474"},{"last_name":"Foshey","full_name":"Foshey, Michael","first_name":"Michael"},{"first_name":"Jie","last_name":"Xu","full_name":"Xu, Jie"},{"first_name":"Timothy","last_name":"Erps","full_name":"Erps, Timothy"},{"last_name":"Babaei","full_name":"Babaei, Vahid","first_name":"Vahid"},{"first_name":"Piotr","full_name":"Didyk, Piotr","last_name":"Didyk"},{"first_name":"Szymon","last_name":"Rusinkiewicz","full_name":"Rusinkiewicz, Szymon"},{"first_name":"Wojciech","full_name":"Matusik, Wojciech","last_name":"Matusik"},{"orcid":"0000-0001-6511-9385","first_name":"Bernd","id":"49876194-F248-11E8-B48F-1D18A9856A87","full_name":"Bickel, Bernd","last_name":"Bickel"}],"volume":41},{"oa_version":"Submitted Version","title":"Computational design of high-level interlocking puzzles","date_created":"2022-08-07T22:01:57Z","publication_identifier":{"eissn":["1557-7368"],"issn":["0730-0301"]},"file":[{"content_type":"application/pdf","success":1,"file_size":16896871,"date_created":"2022-08-28T07:56:19Z","access_level":"open_access","creator":"bbickel","file_id":"11992","date_updated":"2022-08-28T07:56:19Z","relation":"main_file","checksum":"0b51651be45b1b33f2072bd5d2686c69","file_name":"Chen-2022-High-LevelPuzzle_authorVersion.pdf"}],"publication_status":"published","related_material":{"link":[{"url":"https://ista.ac.at/en/news/unlocking-interlocking-riddles/","description":"News on ISTA website","relation":"press_release"}]},"publisher":"Association for Computing Machinery","article_processing_charge":"No","article_type":"original","project":[{"grant_number":"715767","_id":"24F9549A-B435-11E9-9278-68D0E5697425","call_identifier":"H2020","name":"MATERIALIZABLE: Intelligent fabrication-oriented Computational Design and Modeling"}],"month":"07","department":[{"_id":"BeBi"}],"author":[{"first_name":"Rulin","last_name":"Chen","full_name":"Chen, Rulin"},{"first_name":"Ziqi","full_name":"Wang, Ziqi","last_name":"Wang"},{"full_name":"Song, Peng","last_name":"Song","first_name":"Peng"},{"last_name":"Bickel","full_name":"Bickel, Bernd","orcid":"0000-0001-6511-9385","id":"49876194-F248-11E8-B48F-1D18A9856A87","first_name":"Bernd"}],"volume":41,"acknowledgement":"We thank the reviewers for the valuable comments, David Gontier for sharing the source code of the baseline design approach, Christian Hafner for proofreading the paper, Keenan Crane for the 3D model of Cow, and Thingiverse for the 3D models of Moai and Owl. This work was supported by the SUTD Start-up Research Grant (Number: SRG ISTD 2019 148), the Swiss National Science Foundation (NCCR Digital Fabrication Agreement #51NF40-141853), and\r\nthe European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (Grant Agreement No 715767 – MATERIALIZABLE).","doi":"10.1145/3528223.3530071","oa":1,"date_published":"2022-07-22T00:00:00Z","year":"2022","ddc":["000"],"user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","article_number":"150","intvolume":"        41","has_accepted_license":"1","citation":{"ama":"Chen R, Wang Z, Song P, Bickel B. Computational design of high-level interlocking puzzles. <i>ACM Transactions on Graphics</i>. 2022;41(4). doi:<a href=\"https://doi.org/10.1145/3528223.3530071\">10.1145/3528223.3530071</a>","ista":"Chen R, Wang Z, Song P, Bickel B. 2022. Computational design of high-level interlocking puzzles. ACM Transactions on Graphics. 41(4), 150.","chicago":"Chen, Rulin, Ziqi Wang, Peng Song, and Bernd Bickel. “Computational Design of High-Level Interlocking Puzzles.” <i>ACM Transactions on Graphics</i>. Association for Computing Machinery, 2022. <a href=\"https://doi.org/10.1145/3528223.3530071\">https://doi.org/10.1145/3528223.3530071</a>.","mla":"Chen, Rulin, et al. “Computational Design of High-Level Interlocking Puzzles.” <i>ACM Transactions on Graphics</i>, vol. 41, no. 4, 150, Association for Computing Machinery, 2022, doi:<a href=\"https://doi.org/10.1145/3528223.3530071\">10.1145/3528223.3530071</a>.","short":"R. Chen, Z. Wang, P. Song, B. Bickel, ACM Transactions on Graphics 41 (2022).","ieee":"R. Chen, Z. Wang, P. Song, and B. Bickel, “Computational design of high-level interlocking puzzles,” <i>ACM Transactions on Graphics</i>, vol. 41, no. 4. Association for Computing Machinery, 2022.","apa":"Chen, R., Wang, Z., Song, P., &#38; Bickel, B. (2022). Computational design of high-level interlocking puzzles. <i>ACM Transactions on Graphics</i>. Association for Computing Machinery. <a href=\"https://doi.org/10.1145/3528223.3530071\">https://doi.org/10.1145/3528223.3530071</a>"},"day":"22","type":"journal_article","quality_controlled":"1","_id":"11735","language":[{"iso":"eng"}],"abstract":[{"lang":"eng","text":"Interlocking puzzles are intriguing geometric games where the puzzle pieces are held together based on their geometric arrangement, preventing the puzzle from falling apart. High-level-of-difficulty, or simply high-level, interlocking puzzles are a subclass of interlocking puzzles that require multiple moves to take out the first subassembly from the puzzle. Solving a high-level interlocking puzzle is a challenging task since one has to explore many different configurations of the puzzle pieces until reaching a configuration where the first subassembly can be taken out. Designing a high-level interlocking puzzle with a user-specified level of difficulty is even harder since the puzzle pieces have to be interlocking in all the configurations before the first subassembly is taken out.\r\n\r\nIn this paper, we present a computational approach to design high-level interlocking puzzles. The core idea is to represent all possible configurations of an interlocking puzzle as well as transitions among these configurations using a rooted, undirected graph called a disassembly graph and leverage this graph to find a disassembly plan that requires a minimal number of moves to take out the first subassembly from the puzzle. At the design stage, our algorithm iteratively constructs the geometry of each puzzle piece to expand the disassembly graph incrementally, aiming to achieve a user-specified level of difficulty. We show that our approach allows efficient generation of high-level interlocking puzzles of various shape complexities, including new solutions not attainable by state-of-the-art approaches."}],"external_id":{"isi":["000830989200018"]},"scopus_import":"1","issue":"4","ec_funded":1,"status":"public","isi":1,"date_updated":"2025-04-14T07:28:57Z","publication":"ACM Transactions on Graphics","file_date_updated":"2022-08-28T07:56:19Z"},{"publisher":"Wiley","article_processing_charge":"No","corr_author":"1","oa":1,"doi":"10.1111/cgf.14581","month":"09","department":[{"_id":"BeBi"}],"article_type":"original","volume":41,"author":[{"last_name":"Alderighi","full_name":"Alderighi, Thomas","first_name":"Thomas"},{"full_name":"Malomo, Luigi","last_name":"Malomo","first_name":"Luigi"},{"last_name":"Auzinger","full_name":"Auzinger, Thomas","orcid":"0000-0002-1546-3265","id":"4718F954-F248-11E8-B48F-1D18A9856A87","first_name":"Thomas"},{"first_name":"Bernd","id":"49876194-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0001-6511-9385","full_name":"Bickel, Bernd","last_name":"Bickel"},{"last_name":"Cignoni","full_name":"Cignoni, Paulo","first_name":"Paulo"},{"full_name":"Pietroni, Nico","last_name":"Pietroni","first_name":"Nico"}],"keyword":["Computer Graphics and Computer-Aided Design"],"publication_identifier":{"issn":["0167-7055"],"eissn":["1467-8659"]},"oa_version":"Submitted Version","title":"State of the art in computational mould design","date_created":"2022-08-28T18:17:01Z","publication_status":"published","file":[{"file_size":32480850,"date_created":"2022-08-28T18:18:08Z","title":"pre-peer reviewed version","content_type":"application/pdf","description":"This is the pre-peer reviewed version of the following article: Alderighi, T., Malomo, L., Auzinger, T., Bickel, B., Cignoni, P. and Pietroni, N. (2022), State of the Art in Computational Mould Design. Computer Graphics Forum, which has been published in final form at https://doi.org/10.1111/cgf.14581. This article may be used for non-commercial purposes in accordance with Wiley Terms and Conditions for Use of Self-Archived Versions.","access_level":"open_access","relation":"main_file","file_id":"11994","date_updated":"2022-08-28T18:18:08Z","creator":"bbickel","file_name":"star_molding_preprint.pdf","checksum":"c40cc8ceb7b7f0512172b883d712198e"}],"page":"435-452","status":"public","scopus_import":"1","issue":"6","file_date_updated":"2022-08-28T18:18:08Z","isi":1,"publication":"Computer Graphics Forum","date_updated":"2024-10-09T21:03:21Z","user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","ddc":["000"],"year":"2022","intvolume":"        41","date_published":"2022-09-01T00:00:00Z","quality_controlled":"1","type":"journal_article","external_id":{"isi":["000842638900001"]},"_id":"11993","language":[{"iso":"eng"}],"abstract":[{"text":"Moulding refers to a set of manufacturing techniques in which a mould, usually a cavity or a solid frame, is used to shape a liquid or pliable material into an object of the desired shape. The popularity of moulding comes from its effectiveness, scalability and versatility in terms of employed materials. Its relevance as a fabrication process is demonstrated by the extensive literature covering different aspects related to mould design, from material flow simulation to the automation of mould geometry design. In this state-of-the-art report, we provide an extensive review of the automatic methods for the design of moulds, focusing on contributions from a geometric perspective. We classify existing mould design methods based on their computational approach and the nature of their target moulding process. We summarize the relationships between computational approaches and moulding techniques, highlighting their strengths and limitations. Finally, we discuss potential future research directions.","lang":"eng"}],"citation":{"mla":"Alderighi, Thomas, et al. “State of the Art in Computational Mould Design.” <i>Computer Graphics Forum</i>, vol. 41, no. 6, Wiley, 2022, pp. 435–52, doi:<a href=\"https://doi.org/10.1111/cgf.14581\">10.1111/cgf.14581</a>.","short":"T. Alderighi, L. Malomo, T. Auzinger, B. Bickel, P. Cignoni, N. Pietroni, Computer Graphics Forum 41 (2022) 435–452.","ieee":"T. Alderighi, L. Malomo, T. Auzinger, B. Bickel, P. Cignoni, and N. Pietroni, “State of the art in computational mould design,” <i>Computer Graphics Forum</i>, vol. 41, no. 6. Wiley, pp. 435–452, 2022.","apa":"Alderighi, T., Malomo, L., Auzinger, T., Bickel, B., Cignoni, P., &#38; Pietroni, N. (2022). State of the art in computational mould design. <i>Computer Graphics Forum</i>. Wiley. <a href=\"https://doi.org/10.1111/cgf.14581\">https://doi.org/10.1111/cgf.14581</a>","ama":"Alderighi T, Malomo L, Auzinger T, Bickel B, Cignoni P, Pietroni N. State of the art in computational mould design. <i>Computer Graphics Forum</i>. 2022;41(6):435-452. doi:<a href=\"https://doi.org/10.1111/cgf.14581\">10.1111/cgf.14581</a>","ista":"Alderighi T, Malomo L, Auzinger T, Bickel B, Cignoni P, Pietroni N. 2022. State of the art in computational mould design. Computer Graphics Forum. 41(6), 435–452.","chicago":"Alderighi, Thomas, Luigi Malomo, Thomas Auzinger, Bernd Bickel, Paulo Cignoni, and Nico Pietroni. “State of the Art in Computational Mould Design.” <i>Computer Graphics Forum</i>. Wiley, 2022. <a href=\"https://doi.org/10.1111/cgf.14581\">https://doi.org/10.1111/cgf.14581</a>."},"day":"01","has_accepted_license":"1"},{"title":"VoRF: Volumetric Relightable Faces","date_created":"2023-01-30T10:47:06Z","oa_version":"Published Version","publication_status":"published","conference":{"location":"London, United Kingdom","end_date":"2022-11-24","start_date":"2022-11-21","name":"BMVC: British Machine Vision Conference"},"file":[{"access_level":"open_access","file_size":5202710,"title":"VoRF: Volumetric Relightable Faces","date_created":"2023-01-30T10:48:18Z","content_type":"application/pdf","file_name":"vorf_main.pdf","checksum":"b60b70bb48700aee709c85a69231821d","creator":"bbickel","relation":"main_file","date_updated":"2023-01-30T10:48:18Z","file_id":"12453"},{"file_name":"vorf_supp.pdf","checksum":"ce5f4ce66eaaa1590ee5df989fca6f61","relation":"supplementary_material","date_updated":"2023-01-30T10:48:29Z","file_id":"12454","creator":"bbickel","access_level":"open_access","date_created":"2023-01-30T10:48:29Z","title":"VoRF: Volumetric Relightable Faces – SUPPLEMENTAL MATERIAL –","file_size":37953188,"content_type":"application/pdf"},{"date_created":"2023-01-30T10:48:37Z","file_size":57855492,"content_type":"video/mp4","access_level":"open_access","creator":"bbickel","relation":"supplementary_material","date_updated":"2023-01-30T10:48:37Z","file_id":"12455","file_name":"video.mp4","checksum":"08aecca434b08fee75ee1efe87943718"}],"article_processing_charge":"No","publisher":"British Machine Vision Association and Society for Pattern Recognition","oa":1,"acknowledgement":"This work was supported by the ERC Consolidator Grant 4DReply (770784).","author":[{"first_name":"Pramod","full_name":"Rao, Pramod","last_name":"Rao"},{"first_name":"Mallikarjun","full_name":"B R, Mallikarjun","last_name":"B R"},{"first_name":"Gereon","last_name":"Fox","full_name":"Fox, Gereon"},{"first_name":"Tim","full_name":"Weyrich, Tim","last_name":"Weyrich"},{"full_name":"Bickel, Bernd","last_name":"Bickel","orcid":"0000-0001-6511-9385","id":"49876194-F248-11E8-B48F-1D18A9856A87","first_name":"Bernd"},{"first_name":"Hans-Peter","full_name":"Seidel, Hans-Peter","last_name":"Seidel"},{"full_name":"Pfister, Hanspeter","last_name":"Pfister","first_name":"Hanspeter"},{"last_name":"Matusik","full_name":"Matusik, Wojciech","first_name":"Wojciech"},{"full_name":"Tewari, Ayush","last_name":"Tewari","first_name":"Ayush"},{"first_name":"Christian","last_name":"Theobalt","full_name":"Theobalt, Christian"},{"first_name":"Mohamed","last_name":"Elgharib","full_name":"Elgharib, Mohamed"}],"month":"12","department":[{"_id":"BeBi"}],"article_number":"708","ddc":["000"],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","year":"2022","date_published":"2022-12-01T00:00:00Z","main_file_link":[{"open_access":"1","url":"https://bmvc2022.mpi-inf.mpg.de/708/"}],"language":[{"iso":"eng"}],"_id":"12452","abstract":[{"text":"Portrait viewpoint and illumination editing is an important problem with several applications in VR/AR, movies, and photography. Comprehensive knowledge of geometry and illumination is critical for obtaining photorealistic results. Current methods are unable to explicitly model in 3D while handing both viewpoint and illumination editing from a single image. In this paper, we propose VoRF, a novel approach that can take even a single portrait image as input and relight human heads under novel illuminations that can be viewed from arbitrary viewpoints. VoRF represents a human head as a continuous volumetric field and learns a prior model of human heads using a coordinate-based MLP with separate latent spaces for identity and illumination. The prior model is learnt in an auto-decoder manner over a diverse class of head shapes and appearances, allowing VoRF to generalize to novel test identities from a single input image. Additionally, VoRF has a reflectance MLP that uses the intermediate features of the prior model for rendering One-Light-at-A-Time (OLAT) images under novel views. We synthesize novel illuminations by combining these OLAT images with target environment maps. Qualitative and quantitative evaluations demonstrate the effectiveness of VoRF for relighting and novel view synthesis even when applied to unseen subjects under uncontrolled illuminations.","lang":"eng"}],"quality_controlled":"1","type":"conference","citation":{"short":"P. Rao, M. B R, G. Fox, T. Weyrich, B. Bickel, H.-P. Seidel, H. Pfister, W. Matusik, A. Tewari, C. Theobalt, M. Elgharib, in:, 33rd British Machine Vision Conference, British Machine Vision Association and Society for Pattern Recognition, 2022.","ieee":"P. Rao <i>et al.</i>, “VoRF: Volumetric Relightable Faces,” in <i>33rd British Machine Vision Conference</i>, London, United Kingdom, 2022.","mla":"Rao, Pramod, et al. “VoRF: Volumetric Relightable Faces.” <i>33rd British Machine Vision Conference</i>, 708, British Machine Vision Association and Society for Pattern Recognition, 2022.","apa":"Rao, P., B R, M., Fox, G., Weyrich, T., Bickel, B., Seidel, H.-P., … Elgharib, M. (2022). VoRF: Volumetric Relightable Faces. In <i>33rd British Machine Vision Conference</i>. London, United Kingdom: British Machine Vision Association and Society for Pattern Recognition.","ista":"Rao P, B R M, Fox G, Weyrich T, Bickel B, Seidel H-P, Pfister H, Matusik W, Tewari A, Theobalt C, Elgharib M. 2022. VoRF: Volumetric Relightable Faces. 33rd British Machine Vision Conference. BMVC: British Machine Vision Conference, 708.","ama":"Rao P, B R M, Fox G, et al. VoRF: Volumetric Relightable Faces. In: <i>33rd British Machine Vision Conference</i>. British Machine Vision Association and Society for Pattern Recognition; 2022.","chicago":"Rao, Pramod, Mallikarjun B R, Gereon Fox, Tim Weyrich, Bernd Bickel, Hans-Peter Seidel, Hanspeter Pfister, et al. “VoRF: Volumetric Relightable Faces.” In <i>33rd British Machine Vision Conference</i>. British Machine Vision Association and Society for Pattern Recognition, 2022."},"day":"01","has_accepted_license":"1","status":"public","scopus_import":"1","file_date_updated":"2023-01-30T10:48:37Z","publication":"33rd British Machine Vision Conference","date_updated":"2023-10-31T08:40:55Z"},{"publication_identifier":{"issn":["0167-7055"],"eissn":["1467-8659"]},"date_created":"2022-03-27T17:34:17Z","title":"Worst-case rigidity analysis and optimization for assemblies with mechanical joints","oa_version":"Submitted Version","publication_status":"published","file":[{"file_size":19601689,"date_created":"2022-03-27T17:34:11Z","content_type":"application/pdf","access_level":"open_access","relation":"main_file","file_id":"10923","date_updated":"2022-03-27T17:34:11Z","creator":"bbickel","file_name":"paper.pdf","checksum":"b62188b07f5c000f1638c782ec92da41"}],"article_processing_charge":"No","publisher":"Wiley","oa":1,"doi":"10.1111/cgf.14490","acknowledgement":"This work was supported by the Research Grants Council of the Hong Kong Special Administrative Region, China [Project No.: CUHK 14201921] and the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No 715767 – MATERIALIZABLE). We thank the anonymous reviewers for their insightful feedback; Christian Hafner for proofreading and discussions; Ziqi Wang,\r\nHaisen Zhao, and Martin Hafskjold Thoresen for the helpful discussions; and the Miba Machine Shop at IST Austria for 3D printing the BUNNY and BOOMERANG models.","author":[{"full_name":"Liu, Zhenyuan","last_name":"Liu","orcid":"0000-0001-9200-5690","id":"70f0d7cf-ae65-11ec-a14f-89dfc5505b19","first_name":"Zhenyuan"},{"first_name":"Jingyu","full_name":"Hu, Jingyu","last_name":"Hu"},{"full_name":"Xu, Hao","last_name":"Xu","first_name":"Hao"},{"full_name":"Song, Peng","last_name":"Song","first_name":"Peng"},{"full_name":"Zhang, Ran","last_name":"Zhang","first_name":"Ran"},{"last_name":"Bickel","full_name":"Bickel, Bernd","orcid":"0000-0001-6511-9385","first_name":"Bernd","id":"49876194-F248-11E8-B48F-1D18A9856A87"},{"full_name":"Fu, Chi-Wing","last_name":"Fu","first_name":"Chi-Wing"}],"volume":41,"department":[{"_id":"BeBi"}],"month":"05","project":[{"grant_number":"715767","_id":"24F9549A-B435-11E9-9278-68D0E5697425","call_identifier":"H2020","name":"MATERIALIZABLE: Intelligent fabrication-oriented Computational Design and Modeling"}],"article_type":"original","intvolume":"        41","user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","ddc":["000"],"year":"2022","date_published":"2022-05-01T00:00:00Z","external_id":{"isi":["000802723900039"]},"language":[{"iso":"eng"}],"_id":"10922","abstract":[{"text":"We study structural rigidity for assemblies with mechanical joints. Existing methods identify whether an assembly is structurally rigid by assuming parts are perfectly rigid. Yet, an assembly identified as rigid may not be that “rigid” in practice, and existing methods cannot quantify how rigid an assembly is. We address this limitation by developing a new measure, worst-case rigidity, to quantify the rigidity of an assembly as the largest possible deformation that the assembly undergoes for arbitrary external loads of fixed magnitude. Computing worst-case rigidity is non-trivial due to non-rigid parts and different joint types. We thus formulate a new computational approach by encoding parts and their connections into a stiffness matrix, in which parts are modeled as deformable objects and joints as soft constraints. Based on this, we formulate worst-case rigidity analysis as an optimization that seeks the worst-case deformation of an assembly for arbitrary external loads, and solve the optimization problem via an eigenanalysis. Furthermore, we present methods to optimize the geometry and topology of various assemblies to enhance their rigidity, as guided by our rigidity measure. In the end, we validate our method on a variety of assembly structures with physical experiments and demonstrate its effectiveness by designing and fabricating several structurally rigid assemblies.","lang":"eng"}],"quality_controlled":"1","type":"journal_article","day":"01","citation":{"apa":"Liu, Z., Hu, J., Xu, H., Song, P., Zhang, R., Bickel, B., &#38; Fu, C.-W. (2022). Worst-case rigidity analysis and optimization for assemblies with mechanical joints. <i>Computer Graphics Forum</i>. Wiley. <a href=\"https://doi.org/10.1111/cgf.14490\">https://doi.org/10.1111/cgf.14490</a>","ieee":"Z. Liu <i>et al.</i>, “Worst-case rigidity analysis and optimization for assemblies with mechanical joints,” <i>Computer Graphics Forum</i>, vol. 41, no. 2. Wiley, pp. 507–519, 2022.","short":"Z. Liu, J. Hu, H. Xu, P. Song, R. Zhang, B. Bickel, C.-W. Fu, Computer Graphics Forum 41 (2022) 507–519.","mla":"Liu, Zhenyuan, et al. “Worst-Case Rigidity Analysis and Optimization for Assemblies with Mechanical Joints.” <i>Computer Graphics Forum</i>, vol. 41, no. 2, Wiley, 2022, pp. 507–19, doi:<a href=\"https://doi.org/10.1111/cgf.14490\">10.1111/cgf.14490</a>.","chicago":"Liu, Zhenyuan, Jingyu Hu, Hao Xu, Peng Song, Ran Zhang, Bernd Bickel, and Chi-Wing Fu. “Worst-Case Rigidity Analysis and Optimization for Assemblies with Mechanical Joints.” <i>Computer Graphics Forum</i>. Wiley, 2022. <a href=\"https://doi.org/10.1111/cgf.14490\">https://doi.org/10.1111/cgf.14490</a>.","ista":"Liu Z, Hu J, Xu H, Song P, Zhang R, Bickel B, Fu C-W. 2022. Worst-case rigidity analysis and optimization for assemblies with mechanical joints. Computer Graphics Forum. 41(2), 507–519.","ama":"Liu Z, Hu J, Xu H, et al. Worst-case rigidity analysis and optimization for assemblies with mechanical joints. <i>Computer Graphics Forum</i>. 2022;41(2):507-519. doi:<a href=\"https://doi.org/10.1111/cgf.14490\">10.1111/cgf.14490</a>"},"has_accepted_license":"1","status":"public","page":"507-519","ec_funded":1,"issue":"2","scopus_import":"1","file_date_updated":"2022-03-27T17:34:11Z","date_updated":"2025-04-14T07:28:57Z","publication":"Computer Graphics Forum","isi":1,"acknowledged_ssus":[{"_id":"M-Shop"}]},{"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","year":"2022","main_file_link":[{"open_access":"1","url":"https://doi.org/10.1101/2022.03.16.484431"}],"oa_version":"Preprint","date_created":"2022-08-23T11:07:59Z","OA_place":"repository","title":"Saturated reconstruction of living brain tissue","date_published":"2022-05-09T00:00:00Z","publication_status":"draft","type":"preprint","language":[{"iso":"eng"}],"_id":"11943","abstract":[{"lang":"eng","text":"Complex wiring between neurons underlies the information-processing network enabling all brain functions, including cognition and memory. For understanding how the network is structured, processes information, and changes over time, comprehensive visualization of the architecture of living brain tissue with its cellular and molecular components would open up major opportunities. However, electron microscopy (EM) provides nanometre-scale resolution required for full <jats:italic>in-silico</jats:italic> reconstruction<jats:sup>1–5</jats:sup>, yet is limited to fixed specimens and static representations. Light microscopy allows live observation, with super-resolution approaches<jats:sup>6–12</jats:sup> facilitating nanoscale visualization, but comprehensive 3D-reconstruction of living brain tissue has been hindered by tissue photo-burden, photobleaching, insufficient 3D-resolution, and inadequate signal-to-noise ratio (SNR). Here we demonstrate saturated reconstruction of living brain tissue. We developed an integrated imaging and analysis technology, adapting stimulated emission depletion (STED) microscopy<jats:sup>6,13</jats:sup> in extracellularly labelled tissue<jats:sup>14</jats:sup> for high SNR and near-isotropic resolution. Centrally, a two-stage deep-learning approach leveraged previously obtained information on sample structure to drastically reduce photo-burden and enable automated volumetric reconstruction down to single synapse level. Live reconstruction provides unbiased analysis of tissue architecture across time in relation to functional activity and targeted activation, and contextual understanding of molecular labelling. This adoptable technology will facilitate novel insights into the dynamic functional architecture of living brain tissue."}],"day":"09","citation":{"ista":"Velicky P, Miguel Villalba E, Michalska JM, Wei D, Lin Z, Watson J, Troidl J, Beyer J, Ben Simon Y, Sommer CM, Jahr W, Cenameri A, Broichhagen J, Grant SGN, Jonas PM, Novarino G, Pfister H, Bickel B, Danzl JG. Saturated reconstruction of living brain tissue. bioRxiv, <a href=\"https://doi.org/10.1101/2022.03.16.484431\">10.1101/2022.03.16.484431</a>.","ama":"Velicky P, Miguel Villalba E, Michalska JM, et al. Saturated reconstruction of living brain tissue. <i>bioRxiv</i>. doi:<a href=\"https://doi.org/10.1101/2022.03.16.484431\">10.1101/2022.03.16.484431</a>","chicago":"Velicky, Philipp, Eder Miguel Villalba, Julia M Michalska, Donglai Wei, Zudi Lin, Jake Watson, Jakob Troidl, et al. “Saturated Reconstruction of Living Brain Tissue.” <i>BioRxiv</i>. Cold Spring Harbor Laboratory, n.d. <a href=\"https://doi.org/10.1101/2022.03.16.484431\">https://doi.org/10.1101/2022.03.16.484431</a>.","ieee":"P. Velicky <i>et al.</i>, “Saturated reconstruction of living brain tissue,” <i>bioRxiv</i>. Cold Spring Harbor Laboratory.","short":"P. Velicky, E. Miguel Villalba, J.M. Michalska, D. Wei, Z. Lin, J. Watson, J. Troidl, J. Beyer, Y. Ben Simon, C.M. Sommer, W. Jahr, A. Cenameri, J. Broichhagen, S.G.N. Grant, P.M. Jonas, G. Novarino, H. Pfister, B. Bickel, J.G. Danzl, BioRxiv (n.d.).","mla":"Velicky, Philipp, et al. “Saturated Reconstruction of Living Brain Tissue.” <i>BioRxiv</i>, Cold Spring Harbor Laboratory, doi:<a href=\"https://doi.org/10.1101/2022.03.16.484431\">10.1101/2022.03.16.484431</a>.","apa":"Velicky, P., Miguel Villalba, E., Michalska, J. M., Wei, D., Lin, Z., Watson, J., … Danzl, J. G. (n.d.). Saturated reconstruction of living brain tissue. <i>bioRxiv</i>. Cold Spring Harbor Laboratory. <a href=\"https://doi.org/10.1101/2022.03.16.484431\">https://doi.org/10.1101/2022.03.16.484431</a>"},"status":"public","publisher":"Cold Spring Harbor Laboratory","article_processing_charge":"No","corr_author":"1","related_material":{"record":[{"status":"public","id":"13267","relation":"later_version"},{"relation":"dissertation_contains","id":"12470","status":"public"}]},"doi":"10.1101/2022.03.16.484431","oa":1,"department":[{"_id":"PeJo"},{"_id":"GaNo"},{"_id":"BeBi"},{"_id":"JoDa"}],"month":"05","author":[{"full_name":"Velicky, Philipp","last_name":"Velicky","orcid":"0000-0002-2340-7431","id":"39BDC62C-F248-11E8-B48F-1D18A9856A87","first_name":"Philipp"},{"full_name":"Miguel Villalba, Eder","last_name":"Miguel Villalba","id":"3FB91342-F248-11E8-B48F-1D18A9856A87","first_name":"Eder","orcid":"0000-0001-5665-0430"},{"last_name":"Michalska","full_name":"Michalska, Julia M","orcid":"0000-0003-3862-1235","id":"443DB6DE-F248-11E8-B48F-1D18A9856A87","first_name":"Julia M"},{"last_name":"Wei","full_name":"Wei, Donglai","first_name":"Donglai"},{"full_name":"Lin, Zudi","last_name":"Lin","first_name":"Zudi"},{"full_name":"Watson, Jake","last_name":"Watson","first_name":"Jake","id":"63836096-4690-11EA-BD4E-32803DDC885E","orcid":"0000-0002-8698-3823"},{"last_name":"Troidl","full_name":"Troidl, Jakob","first_name":"Jakob"},{"first_name":"Johanna","last_name":"Beyer","full_name":"Beyer, Johanna"},{"last_name":"Ben Simon","full_name":"Ben Simon, Yoav","id":"43DF3136-F248-11E8-B48F-1D18A9856A87","first_name":"Yoav"},{"last_name":"Sommer","full_name":"Sommer, Christoph M","orcid":"0000-0003-1216-9105","id":"4DF26D8C-F248-11E8-B48F-1D18A9856A87","first_name":"Christoph M"},{"last_name":"Jahr","full_name":"Jahr, Wiebke","id":"425C1CE8-F248-11E8-B48F-1D18A9856A87","first_name":"Wiebke","orcid":"0000-0003-0201-2315"},{"id":"9ac8f577-2357-11eb-997a-e566c5550886","first_name":"Alban","last_name":"Cenameri","full_name":"Cenameri, Alban"},{"first_name":"Johannes","last_name":"Broichhagen","full_name":"Broichhagen, Johannes"},{"first_name":"Seth G. N.","full_name":"Grant, Seth G. N.","last_name":"Grant"},{"first_name":"Peter M","id":"353C1B58-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0001-5001-4804","full_name":"Jonas, Peter M","last_name":"Jonas"},{"last_name":"Novarino","full_name":"Novarino, Gaia","orcid":"0000-0002-7673-7178","id":"3E57A680-F248-11E8-B48F-1D18A9856A87","first_name":"Gaia"},{"first_name":"Hanspeter","full_name":"Pfister, Hanspeter","last_name":"Pfister"},{"orcid":"0000-0001-6511-9385","first_name":"Bernd","id":"49876194-F248-11E8-B48F-1D18A9856A87","full_name":"Bickel, Bernd","last_name":"Bickel"},{"id":"42EFD3B6-F248-11E8-B48F-1D18A9856A87","first_name":"Johann G","orcid":"0000-0001-8559-3973","last_name":"Danzl","full_name":"Danzl, Johann G"}],"publication":"bioRxiv","date_updated":"2026-04-27T22:30:32Z"}]
