@inproceedings{21474,
  abstract     = {Rendering novel, relit views of a human head, given a monocular portrait image as input, is an inherently underconstrained problem. The traditional graphics solution is to explicitly decompose the input image into geometry, material and lighting via differentiable rendering; but this is constrained by the multiple assumptions and approximations of the underlying models and parameterizations of these scene components. We propose 3DPR, an image-based relighting model that leverages generative priors learnt from multi-view One-Light-at-A-Time (OLAT) images captured in a light stage. We introduce a new diverse and large-scale multi-view 4K OLAT dataset of 139 subjects to learn a high-quality prior over the distribution of high-frequency face reflectance. We leverage the latent space of a pre-trained generative head model that provides a rich prior over face geometry learnt from in-the-wild image datasets. The input portrait is first embedded in the latent manifold of such a model through an encoder-based inversion process. Then a novel triplane-based reflectance network trained on our lightstage data is used to synthesize high-fidelity OLAT images to enable image-based relighting. Our reflectance network operates in the latent space of the generative head model, crucially enabling a relatively small number of lightstage images to train the reflectance model. Combining the generated OLATs according to a given HDRI environment maps yields physically accurate environmental relighting results. Through quantitative and qualitative evaluations, we demonstrate that 3DPR outperforms previous methods, particularly in preserving identity and in capturing lighting effects such as specularities, self-shadows, and subsurface scattering.},
  author       = {Rao, Pramod and Meka, Abhimitra and Zhou, Xilong and Fox, Gereon and Mallikarjun, B. R. and Zhan, Fangneng and Weyrich, Tim and Bickel, Bernd and Pfister, Hanspeter and Matusik, Wojciech and Beeler, Thabo and Elgharib, Mohamed and Habermann, Marc and Theobalt, Christian},
  booktitle    = {Proceedings SIGGRAPH Asia 2025 Conference Papers 2025},
  isbn         = {9798400721373},
  location     = {Hong Kong, Hong Kong},
  publisher    = {Association for Computing Machinery},
  title        = {{3DPR: Single image 3D portrait relighting with generative priors}},
  doi          = {10.1145/3757377.3763962},
  year         = {2025},
}

@phdthesis{20276,
  abstract     = {Complex 3D shapes can be created by morphing flat 2D configurations. Such deformations
either preserve the intrinsic material geometry (e.g., folding paper) or modify it through
localized contraction. Once transformed, the 3D shape can be further controlled to achieve a
target functionality. A key challenge is to take the material specifications and the actuation
process as input to automatically design the target 3D shape and its functionality. This thesis
presents two novel computational pipelines for the design and control of shape-morphing
structures used to create functional prototypes.
The first pipeline borrows from the art of origami to fold paper into intricate shapes and
applies this principle to make 3D lighting displays. We introduce, PCBend a computational
design approach that covers a surface with individually addressable RGB LEDs, effectively
forming a low-resolution surface by folding rigid printed circuit boards (PCBs). We optimize
cut patterns on PCBs to act as hinges and co-design LED placement, circuit routing, and
fabrication constraints to produce PCB blueprints. The PCBs are fabricated using automated
standard manufacturing services with LEDs embedded on them. Finally, the fabricated PCBs
are cut along the contour and folded onto a 3D-printed support. The 3D lighting display is
then controlled to display complex surface light patterns.
Creating 3D shapes through folding is only possible if their planar configuration, called ”unfolding” exists without any distortion or overlap. Existing methods often permit distortion
or require multiple patches, which are unsuitable for fabrication pipelines that rely on folding
non-stretchable materials. We reinforce such fabrication pipelines by providing a geometric
relaxation to the problem, where the input shape is modified to admit overlap-free unfolding.
The second fabrication pipeline extends shape morphing to soft robotics by emulating nature’s
blueprint of distributed actuation. Inspired by vertebrates, we build musculoskeletal robots
using modular active actuators, employing Liquid Crystal Elastomers (LCEs) as shrinkable
artificial muscles integrated with 3D-printed bones. The chemical composition of LCEs is
altered to enable untethered actuation through infrared radiation, allowing active control of
individual muscles and their corresponding bones. The combined motion of individual bones
defines the robot’s overall shape and functionality. Our proposed system significantly expands
both the design and control spaces of soft robots, which we harness using our computational
design tools. We build several physical robots that exhibit complex shape morphing and varied
terrain navigation, showcasing the versatility of our pipeline.
This thesis explores applications ranging from intricate light patterns displayed on 3D shapes
formed by folding rigid PCBs to untethered robots that use contractile muscles to exhibit
shape morphing and locomotion. Through these examples, the thesis highlights how computational design and distributed actuation, integrated with novel materials, can transform
passive structures into functional prototypes.},
  author       = {Bhargava, Manas},
  isbn         = {978-3-99078-065-7},
  issn         = {2663-337X},
  pages        = {96},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{Design and control of deformable structures: From PCB lighting displays to elastomer robots}},
  doi          = {10.15479/AT-ISTA-20276},
  year         = {2025},
}

@article{18565,
  abstract     = {We present a computational approach for unfolding 3D shapes isometrically into the plane as a single patch without overlapping triangles. This is a hard, sometimes impossible, problem, which existing methods are forced to soften by allowing for map distortions or multiple patches. Instead, we propose a geometric relaxation of the problem: We modify the input shape until it admits an overlap‐free unfolding. We achieve this by locally displacing vertices and collapsing edges, guided by the unfolding process. We validate our algorithm quantitatively and qualitatively on a large dataset of complex shapes and show its proficiency by fabricating real shapes from paper.},
  author       = {Bhargava, Manas and Schreck, Camille and Freire, M. and Hugron, P. A. and Lefebvre, S. and Sellán, S. and Bickel, Bernd},
  issn         = {1467-8659},
  journal      = {Computer Graphics Forum},
  keywords     = {fabrication, single patch unfolding, mesh simplification},
  number       = {1},
  publisher    = {Wiley},
  title        = {{Mesh simplification for unfolding}},
  doi          = {10.1111/cgf.15269},
  volume       = {44},
  year         = {2025},
}

@unpublished{20286,
  abstract     = {Natural organisms utilize distributed actuation through their musculoskeletal
systems to adapt their gait for traversing diverse terrains or to morph their
bodies for varied tasks. A longstanding challenge in robotics is to emulate
this capability of natural organisms, which has motivated the development of
numerous soft robotic systems. However, such systems are generally optimized
for a single functionality, lack the ability to change form or function on
demand, or remain tethered to bulky control systems. To address these
limitations, we present a framework for designing and controlling robots that
utilize distributed actuation. We propose a novel building block that
integrates 3D-printed bones with liquid crystal elastomer (LCE) muscles as
lightweight actuators, enabling the modular assembly of musculoskeletal robots.
We developed LCE rods that contract in response to infrared radiation, thereby
providing localized, untethered control over the distributed skeletal network
and producing global deformations of the robot. To fully capitalize on the
extensive design space, we introduce two computational tools: one for
optimizing the robot's skeletal graph to achieve multiple target deformations,
and another for co-optimizing skeletal designs and control gaits to realize
desired locomotion. We validate our framework by constructing several robots
that demonstrate complex shape morphing, diverse control schemes, and
environmental adaptability. Our system integrates advances in modular material
building, untethered and distributed control, and computational design to
introduce a new generation of robots that brings us closer to the capabilities
of living organisms.},
  author       = {Bhargava, Manas and Hiraki, Takefumi and Strugaru, Irina-Malina and Zhang, Yuhan and Piovarci, Michael and Daraio, Chiara and Iwai, Daisuke and Bickel, Bernd},
  booktitle    = {arXiv},
  title        = {{Computational design and fabrication of modular robots with untethered control}},
  doi          = {10.48550/arXiv.2508.05410},
  year         = {2025},
}

@article{14488,
  abstract     = {Portrait viewpoint and illumination editing is an important problem with several applications in VR/AR, movies, and photography. Comprehensive knowledge of geometry and illumination is critical for obtaining photorealistic results. Current methods are unable to explicitly model in 3D while handling both viewpoint and illumination editing from a single image. In this paper, we propose VoRF, a novel approach that can take even a single portrait image as input and relight human heads under novel illuminations that can be viewed from arbitrary viewpoints. VoRF represents a human head as a continuous volumetric field and learns a prior model of human heads using a coordinate-based MLP with individual latent spaces for identity and illumination. The prior model is learned in an auto-decoder manner over a diverse class of head shapes and appearances, allowing VoRF to generalize to novel test identities from a single input image. Additionally, VoRF has a reflectance MLP that uses the intermediate features of the prior model for rendering One-Light-at-A-Time (OLAT) images under novel views. We synthesize novel illuminations by combining these OLAT images with target environment maps. Qualitative and quantitative evaluations demonstrate the effectiveness of VoRF for relighting and novel view synthesis, even when applied to unseen subjects under uncontrolled illumination. This work is an extension of Rao et al. (VoRF: Volumetric Relightable Faces 2022). We provide extensive evaluation and ablative studies of our model and also provide an application, where any face can be relighted using textual input.},
  author       = {Rao, Pramod and Mallikarjun, B. R. and Fox, Gereon and Weyrich, Tim and Bickel, Bernd and Pfister, Hanspeter and Matusik, Wojciech and Zhan, Fangneng and Tewari, Ayush and Theobalt, Christian and Elgharib, Mohamed},
  issn         = {1573-1405},
  journal      = {International Journal of Computer Vision},
  pages        = {1148--1166},
  publisher    = {Springer Nature},
  title        = {{A deeper analysis of volumetric relightiable faces}},
  doi          = {10.1007/s11263-023-01899-3},
  volume       = {132},
  year         = {2024},
}

@inproceedings{18912,
  abstract     = {This paper presents a computational method for automatically creating fabricable 3D wire sculptures from various input modalities, including 3D models, images, and even text. There are several challenges to wire art creation. For example, artists must express the desired visual as a sparse wire representation. It is also difficult to manually bend wires in the air without guidance to fabricate the designed 3D curves. Our workflow solves these challenges by using two core techniques. First, we present an algorithm that automatically generates a fabricable 3D curve representation of the target based on a loss function that measures the semantic distance between the rendered curve and the target. The loss function can be defined using different pre-trained vision-language neural networks to generate wire art from different input types. The loss function is then optimized using differentiable rendering specifically targeting 3D parametric curves. Our method can incorporate various fabrication constraints on the wire as additional regularization terms in the optimization process. Second, we present an algorithm to generate a 3D printable jig structure that can be used to fabricate the generated wire path. The major challenge in the jig generation stems from the design of an intersection-free surface mesh for 3D printing, which we address with our inflation algorithm. The experimental results indicate that our method can handle a wider range of input types and can produce physically fabricable wire shapes compared to previous wire generation methods. Various wire arts have been fabricated using our 3D-printed jig to demonstrate its effectiveness in 3D wire bending.},
  author       = {Tojo, Kenji and Shamir, Ariel and Bickel, Bernd and Umetani, Nobuyuki},
  booktitle    = {SIGGRAPH '24: ACM SIGGRAPH 2024 Conference Papers},
  isbn         = {9798400705250},
  location     = {Denver, CO, United States},
  publisher    = {ACM},
  title        = {{Fabricable 3D wire art}},
  doi          = {10.1145/3641519.3657453},
  year         = {2024},
}

@inproceedings{17374,
  abstract     = {Achieving photorealistic 3D view synthesis and relighting of human portraits is pivotal for advancing AR/VR applications. Existing methodologies in portrait relighting demonstrate substantial limitations in terms of generalization and 3D consistency, coupled with inaccuracies in physically realistic lighting and identity preservation. Furthermore, personalization from a single view is difficult to achieve and often requires multiview images during the testing phase or involves slow optimization processes. This paper introduces Lite2Relight , a novel technique that can predict 3D consistent head poses of portraits while performing physically plausible light editing at interactive speed. Our method uniquely extends the generative capabilities and efficient volumetric representation of EG3D, leveraging a lightstage dataset to implicitly disentangle face reflectance and perform relighting under target HDRI environment maps. By utilizing a pre-trained geometry-aware encoder and a feature alignment module, we map input images into a relightable 3D space, enhancing them with a strong face geometry and reflectance prior. Through extensive quantitative and qualitative evaluations, we show that our method outperforms the state-of-the-art methods in terms of efficacy, photorealism, and practical application. This includes producing 3D-consistent results of the full head, including hair, eyes, and expressions. Lite2Relight paves the way for large-scale adoption of photorealistic portrait editing in various domains, offering a robust, interactive solution to a previously constrained problem.},
  author       = {Rao, Pramod and Fox, Gereon and Meka, Abhimitra and Mallikarjun, B. R. and Zhan, Fangneng and Weyrich, Tim and Bickel, Bernd and Pfister, Hanspeter and Matusik, Wojciech and Elgharib, Mohamed and Theobalt, Christian},
  booktitle    = {Proceedings - SIGGRAPH 2024 Conference Papers},
  isbn         = {9798400705250},
  location     = {Denver, CO, United States},
  publisher    = {Association for Computing Machinery},
  title        = {{Lite2Relight: 3D-aware single image portrait relighting}},
  doi          = {10.1145/3641519.3657470},
  year         = {2024},
}

@inproceedings{14241,
  abstract     = {We present a technique to optimize the reflectivity of a surface while preserving its overall shape. The naïve optimization of the mesh vertices using the gradients of reflectivity simulations results in undesirable distortion. In contrast, our robust formulation optimizes the surface normal as an independent variable that bridges the reflectivity term with differential rendering, and the regularization term with as-rigid-as-possible elastic energy. We further adaptively subdivide the input mesh to improve the convergence. Consequently, our method can minimize the retroreflectivity of a wide range of input shapes, resulting in sharply creased shapes ubiquitous among stealth aircraft and Sci-Fi vehicles. Furthermore, by changing the reward for the direction of the outgoing light directions, our method can be applied to other reflectivity design tasks, such as the optimization of architectural walls to concentrate light in a specific region. We have tested the proposed method using light-transport simulations and real-world 3D-printed objects.},
  author       = {Tojo, Kenji and Shamir, Ariel and Bickel, Bernd and Umetani, Nobuyuki},
  booktitle    = {SIGGRAPH 2023 Conference Proceedings},
  isbn         = {9798400701597},
  location     = {Los Angeles, CA, United States},
  publisher    = {Association for Computing Machinery},
  title        = {{Stealth shaper: Reflectivity optimization as surface stylization}},
  doi          = {10.1145/3588432.3591542},
  year         = {2023},
}

@article{14628,
  abstract     = {We introduce a compact, intuitive procedural graph representation for cellular metamaterials, which are small-scale, tileable structures that can be architected to exhibit many useful material properties. Because the structures’ “architectures” vary widely—with elements such as beams, thin shells, and solid bulks—it is difficult to explore them using existing representations. Generic approaches like voxel grids are versatile, but it is cumbersome to represent and edit individual structures; architecture-specific approaches address these issues, but are incompatible with one another. By contrast, our procedural graph succinctly represents the construction process for any structure using a simple skeleton annotated with spatially varying thickness. To express the highly constrained triply periodic minimal surfaces (TPMS) in this manner, we present the first fully automated version of the conjugate surface construction method, which allows novices to create complex TPMS from intuitive input. We demonstrate our representation’s expressiveness, accuracy, and compactness by constructing a wide range of established structures and hundreds of novel structures with diverse architectures and material properties. We also conduct a user study to verify our representation’s ease-of-use and ability to expand engineers’ capacity for exploration.},
  author       = {Makatura, Liane and Wang, Bohan and Chen, Yi-Lu and Deng, Bolei and Wojtan, Christopher J and Bickel, Bernd and Matusik, Wojciech},
  issn         = {1557-7368},
  journal      = {ACM Transactions on Graphics},
  keywords     = {Computer Graphics and Computer-Aided Design},
  number       = {5},
  publisher    = {Association for Computing Machinery},
  title        = {{Procedural metamaterials: A unified procedural graph for metamaterial design}},
  doi          = {10.1145/3605389},
  volume       = {42},
  year         = {2023},
}

@article{12972,
  abstract     = {Embroidery is a long-standing and high-quality approach to making logos and images on textiles. Nowadays, it can also be performed via automated machines that weave threads with high spatial accuracy. A characteristic feature of the appearance of the threads is a high degree of anisotropy. The anisotropic behavior is caused by depositing thin but long strings of thread. As a result, the stitched patterns convey both color and direction. Artists leverage this anisotropic behavior to enhance pure color images with textures, illusions of motion, or depth cues. However, designing colorful embroidery patterns with prescribed directionality is a challenging task, one usually requiring an expert designer. In this work, we propose an interactive algorithm that generates machine-fabricable embroidery patterns from multi-chromatic images equipped with user-specified directionality fields.We cast the problem of finding a stitching pattern into vector theory. To find a suitable stitching pattern, we extract sources and sinks from the divergence field of the vector field extracted from the input and use them to trace streamlines. We further optimize the streamlines to guarantee a smooth and connected stitching pattern. The generated patterns approximate the color distribution constrained by the directionality field. To allow for further artistic control, the trade-off between color match and directionality match can be interactively explored via an intuitive slider. We showcase our approach by fabricating several embroidery paths.},
  author       = {Liu, Zhenyuan and Piovarci, Michael and Hafner, Christian and Charrondiere, Raphael and Bickel, Bernd},
  issn         = {1467-8659},
  journal      = {Computer Graphics Forum},
  keywords     = {embroidery, design, directionality, density, image},
  location     = {Saarbrucken, Germany},
  number       = {2},
  pages        = {397--409},
  publisher    = {Wiley},
  title        = {{Directionality-aware design of embroidery patterns}},
  doi          = {10.1111/cgf.14770 },
  volume       = {42},
  year         = {2023},
}

@inproceedings{12976,
  abstract     = {3D printing based on continuous deposition of materials, such as filament-based 3D printing, has seen widespread adoption thanks to its versatility in working with a wide range of materials. An important shortcoming of this type of technology is its limited multi-material capabilities. While there are simple hardware designs that enable multi-material printing in principle, the required software is heavily underdeveloped. A typical hardware design fuses together individual materials fed into a single chamber from multiple inlets before they are deposited. This design, however, introduces a time delay between the intended material mixture and its actual deposition. In this work, inspired by diverse path planning research in robotics, we show that this mechanical challenge can be addressed via improved printer control. We propose to formulate the search for optimal multi-material printing policies in a reinforcement
learning setup. We put forward a simple numerical deposition model that takes into account the non-linear material mixing and delayed material deposition. To validate our system we focus on color fabrication, a problem known for its strict requirements for varying material mixtures at a high spatial frequency. We demonstrate that our learned control policy outperforms state-of-the-art hand-crafted algorithms.},
  author       = {Liao, Kang and Tricard, Thibault and Piovarci, Michael and Seidel, Hans-Peter and Babaei, Vahid},
  booktitle    = {2023 IEEE International Conference on Robotics and Automation},
  issn         = {1050-4729},
  keywords     = {reinforcement learning, deposition, control, color, multi-filament},
  location     = {London, United Kingdom},
  pages        = {12345--12352},
  publisher    = {IEEE},
  title        = {{Learning deposition policies for fused multi-material 3D printing}},
  doi          = {10.1109/ICRA48891.2023.10160465},
  volume       = {2023},
  year         = {2023},
}

@inproceedings{12979,
  abstract     = {Color and gloss are fundamental aspects of surface appearance. State-of-the-art fabrication techniques can manipulate both properties of the printed 3D objects. However, in the context of appearance reproduction, perceptual aspects of color and gloss are usually handled separately, even though previous perceptual studies suggest their interaction. Our work is motivated by previous studies demonstrating a perceived color shift due to a change in the object's gloss, i.e., two samples with the same color but different surface gloss appear as they have different colors. In this paper, we conduct new experiments which support this observation and provide insights into the magnitude and direction of the perceived color change. We use the observations as guidance to design a new method that estimates and corrects the color shift enabling the fabrication of objects with the same perceived color but different surface gloss. We formulate the problem as an optimization procedure solved using differentiable rendering. We evaluate the effectiveness of our method in perceptual experiments with 3D objects fabricated using a multi-material 3D printer and demonstrate potential applications. },
  author       = {Condor, Jorge and Piovarci, Michael and Bickel, Bernd and Didyk, Piotr},
  booktitle    = {SIGGRAPH ’23 Conference Proceedings},
  isbn         = {9798400701597},
  keywords     = {color, gloss, perception, color compensation, color management},
  location     = {Los Angeles, CA, United States},
  publisher    = {Association for Computing Machinery},
  title        = {{Gloss-aware color correction for 3D printing}},
  doi          = {10.1145/3588432.3591546},
  year         = {2023},
}

@article{12984,
  abstract     = {Tattoos are a highly popular medium, with both artistic and medical applications. Although the mechanical process of tattoo application has evolved historically, the results are reliant on the artisanal skill of the artist. This can be especially challenging for some skin tones, or in cases where artists lack experience. We provide the first systematic overview of tattooing as a computational fabrication technique. We built an automated tattooing rig and a recipe for the creation of silicone sheets mimicking realistic skin tones, which allowed us to create an accurate model predicting tattoo appearance. This enables several exciting applications including tattoo previewing, color retargeting, novel ink spectra optimization, color-accurate prosthetics, and more.},
  author       = {Piovarci, Michael and Chapiro, Alexandre and Bickel, Bernd},
  issn         = {1557-7368},
  journal      = {ACM Transactions on Graphics},
  keywords     = {appearance, modeling, reproduction, tattoo, skin color, gamut mapping, ink-optimization, prosthetic},
  location     = {Los Angeles, CA, United States},
  number       = {4},
  publisher    = {Association for Computing Machinery},
  title        = {{Skin-Screen: A computational fabrication framework for color tattoos}},
  doi          = {10.1145/3592432},
  volume       = {42},
  year         = {2023},
}

@article{13265,
  abstract     = {In this study, we propose a computational framework for optimizing the continuity of the toolpath in fabricating surface models on an extrusion-based 3D printer. Toolpath continuity is a critical issue that influences both the quality and the efficiency of extrusion-based fabrication. Transfer moves lead to rough and bumpy surfaces, where this phenomenon worsens for materials with large viscosity, like clay. The effects of continuity on the surface models are even more severe in terms of the quality of the surface and the stability of the model. We introduce a criterion called the one–path patch (OPP) to represent a patch on the surface of the shell that can be traversed along one path by considering the constraints on fabrication. We study the properties of the OPPs and their merging operations to propose a bottom-up OPP merging procedure to decompose the given shell surface into a minimal number of OPPs, and to generate the “as-continuous-as-possible” (ACAP) toolpath. Furthermore, we augment the path planning algorithm with a curved-layer printing scheme that reduces staircase defects and improves the continuity of the toolpath by connecting multiple segments. We evaluated the ACAP algorithm on ceramic and thermoplastic materials, and the results showed that it improves the fabrication of surface models in terms of both efficiency and surface quality.},
  author       = {Zhong, Fanchao and Xu, Yonglai and Zhao, Haisen and Lu, Lin},
  issn         = {1557-7368},
  journal      = {ACM Transactions on Graphics},
  number       = {3},
  publisher    = {Association for Computing Machinery},
  title        = {{As-Continuous-As-Possible extrusion-based fabrication of surface models}},
  doi          = {10.1145/3575859},
  volume       = {42},
  year         = {2023},
}

@inproceedings{14798,
  abstract     = {A faithful reproduction of gloss is inherently difficult because of the limited dynamic range, peak luminance, and 3D capabilities of display devices. This work investigates how the display capabilities affect gloss appearance with respect to a real-world reference object. To this end, we employ an accurate imaging pipeline to achieve a perceptual gloss match between a virtual and real object presented side-by-side on an augmented-reality high-dynamic-range (HDR) stereoscopic display, which has not been previously attained to this extent. Based on this precise gloss reproduction, we conduct a series of gloss matching experiments to study how gloss perception degrades based on individual factors: object albedo, display luminance, dynamic range, stereopsis, and tone mapping. We support the study with a detailed analysis of individual factors, followed by an in-depth discussion on the observed perceptual effects. Our experiments demonstrate that stereoscopic presentation has a limited effect on the gloss matching task on our HDR display. However, both reduced luminance and dynamic range of the display reduce the perceived gloss. This means that the visual system cannot compensate for the changes in gloss appearance across luminance (lack of gloss constancy), and the tone mapping operator should be carefully selected when reproducing gloss on a low dynamic range (LDR) display.},
  author       = {Chen, Bin and Jindal, Akshay and Piovarci, Michael and Wang, Chao and Seidel, Hans Peter and Didyk, Piotr and Myszkowski, Karol and Serrano, Ana and Mantiuk, Rafał K.},
  booktitle    = {Proceedings of the SIGGRAPH Asia 2023 Conference},
  isbn         = {9798400703157},
  location     = {Sydney, Australia},
  publisher    = {Association for Computing Machinery},
  title        = {{The effect of display capabilities on the gloss consistency between real and virtual objects}},
  doi          = {10.1145/3610548.3618226},
  year         = {2023},
}

@article{13049,
  abstract     = {We propose a computational design approach for covering a surface with individually addressable RGB LEDs, effectively forming a low-resolution surface screen. To achieve a low-cost and scalable approach, we propose creating designs from flat PCB panels bent in-place along the surface of a 3D printed core. Working with standard rigid PCBs enables the use of
established PCB manufacturing services, allowing the fabrication of designs with several hundred LEDs. 
Our approach optimizes the PCB geometry for folding, and then jointly optimizes the LED packing, circuit and routing, solving a challenging layout problem under strict manufacturing requirements. Unlike paper, PCBs cannot bend beyond a certain point without breaking. Therefore, we introduce parametric cut patterns acting as hinges, designed to allow bending while remaining compact. To tackle the joint optimization of placement, circuit and routing, we propose a specialized algorithm that splits the global problem into one sub-problem per triangle, which is then individually solved.
Our technique generates PCB blueprints in a completely automated way. After being fabricated by a PCB manufacturing service, the boards are bent and glued by the user onto the 3D printed support. We demonstrate our technique on a range of physical models and virtual examples, creating intricate surface light patterns from hundreds of LEDs.},
  author       = {Freire, Marco and Bhargava, Manas and Schreck, Camille and Hugron, Pierre-Alexandre and Bickel, Bernd and Lefebvre, Sylvain},
  issn         = {1557-7368},
  journal      = {Transactions on Graphics},
  keywords     = {PCB design and layout, Mesh geometry models},
  location     = {Los Angeles, CA, United States},
  number       = {4},
  publisher    = {Association for Computing Machinery},
  title        = {{PCBend: Light up your 3D shapes with foldable circuit boards}},
  doi          = {10.1145/3592411},
  volume       = {42},
  year         = {2023},
}

@article{13267,
  abstract     = {Three-dimensional (3D) reconstruction of living brain tissue down to an individual synapse level would create opportunities for decoding the dynamics and structure–function relationships of the brain’s complex and dense information processing network; however, this has been hindered by insufficient 3D resolution, inadequate signal-to-noise ratio and prohibitive light burden in optical imaging, whereas electron microscopy is inherently static. Here we solved these challenges by developing an integrated optical/machine-learning technology, LIONESS (live information-optimized nanoscopy enabling saturated segmentation). This leverages optical modifications to stimulated emission depletion microscopy in comprehensively, extracellularly labeled tissue and previous information on sample structure via machine learning to simultaneously achieve isotropic super-resolution, high signal-to-noise ratio and compatibility with living tissue. This allows dense deep-learning-based instance segmentation and 3D reconstruction at a synapse level, incorporating molecular, activity and morphodynamic information. LIONESS opens up avenues for studying the dynamic functional (nano-)architecture of living brain tissue.},
  author       = {Velicky, Philipp and Miguel Villalba, Eder and Michalska, Julia M and Lyudchik, Julia and Wei, Donglai and Lin, Zudi and Watson, Jake and Troidl, Jakob and Beyer, Johanna and Ben Simon, Yoav and Sommer, Christoph M and Jahr, Wiebke and Cenameri, Alban and Broichhagen, Johannes and Grant, Seth G.N. and Jonas, Peter M and Novarino, Gaia and Pfister, Hanspeter and Bickel, Bernd and Danzl, Johann G},
  issn         = {1548-7105},
  journal      = {Nature Methods},
  pages        = {1256--1265},
  publisher    = {Springer Nature},
  title        = {{Dense 4D nanoscale reconstruction of living brain tissue}},
  doi          = {10.1038/s41592-023-01936-6},
  volume       = {20},
  year         = {2023},
}

@phdthesis{12897,
  abstract     = {Inverse design problems in fabrication-aware shape optimization are typically solved on discrete representations such as polygonal meshes. This thesis argues that there are benefits to treating these problems in the same domain as human designers, namely, the parametric one. One reason is that discretizing a parametric model usually removes the capability of making further manual changes to the design, because the human intent is captured by the shape parameters. Beyond this, knowledge about a design problem can sometimes reveal a structure that is present in a smooth representation, but is fundamentally altered by discretizing. In this case, working in the parametric domain may even simplify the optimization task. We present two lines of research that explore both of these aspects of fabrication-aware shape optimization on parametric representations.

The first project studies the design of plane elastic curves and Kirchhoff rods, which are common mathematical models for describing the deformation of thin elastic rods such as beams, ribbons, cables, and hair. Our main contribution is a characterization of all curved shapes that can be attained by bending and twisting elastic rods having a stiffness that is allowed to vary across the length. Elements like these can be manufactured using digital fabrication devices such as 3d printers and digital cutters, and have applications in free-form architecture and soft robotics.

We show that the family of curved shapes that can be produced this way admits geometric description that is concise and computationally convenient. In the case of plane curves, the geometric description is intuitive enough to allow a designer to determine whether a curved shape is physically achievable by visual inspection alone. We also present shape optimization algorithms that convert a user-defined curve in the plane or in three dimensions into the geometry of an elastic rod that will naturally deform to follow this curve when its endpoints are attached to a support structure. Implemented in an interactive software design tool, the rod geometry is generated in real time as the user edits a curve and enables fast prototyping. 

The second project tackles the problem of general-purpose shape optimization on CAD models using a novel variant of the extended finite element method (XFEM). Our goal is the decoupling between the simulation mesh and the CAD model, so no geometry-dependent meshing or remeshing needs to be performed when the CAD parameters change during optimization. This is achieved by discretizing the embedding space of the CAD model, and using a new high-accuracy numerical integration method to enable XFEM on free-form elements bounded by the parametric surface patches of the model. Our simulation is differentiable from the CAD parameters to the simulation output, which enables us to use off-the-shelf gradient-based optimization procedures. The result is a method that fits seamlessly into the CAD workflow because it works on the same representation as the designer, enabling the alternation of manual editing and fabrication-aware optimization at will.},
  author       = {Hafner, Christian},
  isbn         = {978-3-99078-031-2},
  issn         = {2663-337X},
  pages        = {180},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{Inverse shape design with parametric representations: Kirchhoff Rods and parametric surface models}},
  doi          = {10.15479/at:ista:12897},
  year         = {2023},
}

@article{13188,
  abstract     = {The Kirchhoff rod model describes the bending and twisting of slender elastic rods in three dimensions, and has been widely studied to enable the prediction of how a rod will deform, given its geometry and boundary conditions. In this work, we study a number of inverse problems with the goal of computing the geometry of a straight rod that will automatically deform to match a curved target shape after attaching its endpoints to a support structure. Our solution lets us finely control the static equilibrium state of a rod by varying the cross-sectional profiles along its length.
We also show that the set of physically realizable equilibrium states admits a concise geometric description in terms of linear line complexes, which leads to very efficient computational design algorithms. Implemented in an interactive software tool, they allow us to convert three-dimensional hand-drawn spline curves to elastic rods, and give feedback about the feasibility and practicality of a design in real time. We demonstrate the efficacy of our method by designing and manufacturing several physical prototypes with applications to interior design and soft robotics.},
  author       = {Hafner, Christian and Bickel, Bernd},
  issn         = {1557-7368},
  journal      = {ACM Transactions on Graphics},
  keywords     = {Computer Graphics, Computational Design, Computational Geometry, Shape Modeling},
  number       = {5},
  publisher    = {Association for Computing Machinery},
  title        = {{The design space of Kirchhoff rods}},
  doi          = {10.1145/3606033},
  volume       = {42},
  year         = {2023},
}

@article{11442,
  abstract     = {Enabling additive manufacturing to employ a wide range of novel, functional materials can be a major boost to this technology. However, making such materials printable requires painstaking trial-and-error by an expert operator,
as they typically tend to exhibit peculiar rheological or hysteresis properties. Even in the case of successfully finding the process parameters, there is no guarantee of print-to-print consistency due to material differences between batches. These challenges make closed-loop feedback an attractive option where the process parameters are adjusted on-the-fly. There are several challenges for designing an efficient controller: the deposition parameters are complex and highly coupled, artifacts occur after long time horizons, simulating the deposition is computationally costly, and learning on hardware is intractable. In this work, we demonstrate the feasibility of learning a closed-loop control policy for additive manufacturing using reinforcement learning. We show that approximate, but efficient, numerical simulation is
sufficient as long as it allows learning the behavioral patterns of deposition that translate to real-world experiences. In combination with reinforcement learning, our model can be used to discover control policies that outperform
baseline controllers. Furthermore, the recovered policies have a minimal sim-to-real gap. We showcase this by applying our control policy in-vivo on a single-layer, direct ink writing printer. },
  author       = {Piovarci, Michael and Foshey, Michael and Xu, Jie and Erps, Timothy and Babaei, Vahid and Didyk, Piotr and Rusinkiewicz, Szymon and Matusik, Wojciech and Bickel, Bernd},
  issn         = {1557-7368},
  journal      = {ACM Transactions on Graphics},
  number       = {4},
  publisher    = {Association for Computing Machinery},
  title        = {{Closed-loop control of direct ink writing via reinforcement learning}},
  doi          = {10.1145/3528223.3530144},
  volume       = {41},
  year         = {2022},
}

