@article{12972, abstract = {Embroidery is a long-standing and high-quality approach to making logos and images on textiles. Nowadays, it can also be performed via automated machines that weave threads with high spatial accuracy. A characteristic feature of the appearance of the threads is a high degree of anisotropy. The anisotropic behavior is caused by depositing thin but long strings of thread. As a result, the stitched patterns convey both color and direction. Artists leverage this anisotropic behavior to enhance pure color images with textures, illusions of motion, or depth cues. However, designing colorful embroidery patterns with prescribed directionality is a challenging task, one usually requiring an expert designer. In this work, we propose an interactive algorithm that generates machine-fabricable embroidery patterns from multi-chromatic images equipped with user-specified directionality fields.We cast the problem of finding a stitching pattern into vector theory. To find a suitable stitching pattern, we extract sources and sinks from the divergence field of the vector field extracted from the input and use them to trace streamlines. We further optimize the streamlines to guarantee a smooth and connected stitching pattern. The generated patterns approximate the color distribution constrained by the directionality field. To allow for further artistic control, the trade-off between color match and directionality match can be interactively explored via an intuitive slider. We showcase our approach by fabricating several embroidery paths.}, author = {Liu, Zhenyuan and Piovarci, Michael and Hafner, Christian and Charrondiere, Raphael and Bickel, Bernd}, issn = {1467-8659}, journal = {Computer Graphics Forum}, keywords = {embroidery, design, directionality, density, image}, location = {Saarbrucken, Germany}, number = {2}, pages = {397--409}, publisher = {Wiley}, title = {{Directionality-aware design of embroidery patterns}}, doi = {10.1111/cgf.14770 }, volume = {42}, year = {2023}, } @inproceedings{14241, abstract = {We present a technique to optimize the reflectivity of a surface while preserving its overall shape. The naïve optimization of the mesh vertices using the gradients of reflectivity simulations results in undesirable distortion. In contrast, our robust formulation optimizes the surface normal as an independent variable that bridges the reflectivity term with differential rendering, and the regularization term with as-rigid-as-possible elastic energy. We further adaptively subdivide the input mesh to improve the convergence. Consequently, our method can minimize the retroreflectivity of a wide range of input shapes, resulting in sharply creased shapes ubiquitous among stealth aircraft and Sci-Fi vehicles. Furthermore, by changing the reward for the direction of the outgoing light directions, our method can be applied to other reflectivity design tasks, such as the optimization of architectural walls to concentrate light in a specific region. We have tested the proposed method using light-transport simulations and real-world 3D-printed objects.}, author = {Tojo, Kenji and Shamir, Ariel and Bickel, Bernd and Umetani, Nobuyuki}, booktitle = {SIGGRAPH 2023 Conference Proceedings}, isbn = {9798400701597}, location = {Los Angeles, CA, United States}, publisher = {Association for Computing Machinery}, title = {{Stealth shaper: Reflectivity optimization as surface stylization}}, doi = {10.1145/3588432.3591542}, year = {2023}, } @article{14488, abstract = {Portrait viewpoint and illumination editing is an important problem with several applications in VR/AR, movies, and photography. Comprehensive knowledge of geometry and illumination is critical for obtaining photorealistic results. Current methods are unable to explicitly model in 3D while handling both viewpoint and illumination editing from a single image. In this paper, we propose VoRF, a novel approach that can take even a single portrait image as input and relight human heads under novel illuminations that can be viewed from arbitrary viewpoints. VoRF represents a human head as a continuous volumetric field and learns a prior model of human heads using a coordinate-based MLP with individual latent spaces for identity and illumination. The prior model is learned in an auto-decoder manner over a diverse class of head shapes and appearances, allowing VoRF to generalize to novel test identities from a single input image. Additionally, VoRF has a reflectance MLP that uses the intermediate features of the prior model for rendering One-Light-at-A-Time (OLAT) images under novel views. We synthesize novel illuminations by combining these OLAT images with target environment maps. Qualitative and quantitative evaluations demonstrate the effectiveness of VoRF for relighting and novel view synthesis, even when applied to unseen subjects under uncontrolled illumination. This work is an extension of Rao et al. (VoRF: Volumetric Relightable Faces 2022). We provide extensive evaluation and ablative studies of our model and also provide an application, where any face can be relighted using textual input.}, author = {Rao, Pramod and Mallikarjun, B. R. and Fox, Gereon and Weyrich, Tim and Bickel, Bernd and Pfister, Hanspeter and Matusik, Wojciech and Zhan, Fangneng and Tewari, Ayush and Theobalt, Christian and Elgharib, Mohamed}, issn = {1573-1405}, journal = {International Journal of Computer Vision}, publisher = {Springer Nature}, title = {{A deeper analysis of volumetric relightiable faces}}, doi = {10.1007/s11263-023-01899-3}, year = {2023}, } @article{14628, abstract = {We introduce a compact, intuitive procedural graph representation for cellular metamaterials, which are small-scale, tileable structures that can be architected to exhibit many useful material properties. Because the structures’ “architectures” vary widely—with elements such as beams, thin shells, and solid bulks—it is difficult to explore them using existing representations. Generic approaches like voxel grids are versatile, but it is cumbersome to represent and edit individual structures; architecture-specific approaches address these issues, but are incompatible with one another. By contrast, our procedural graph succinctly represents the construction process for any structure using a simple skeleton annotated with spatially varying thickness. To express the highly constrained triply periodic minimal surfaces (TPMS) in this manner, we present the first fully automated version of the conjugate surface construction method, which allows novices to create complex TPMS from intuitive input. We demonstrate our representation’s expressiveness, accuracy, and compactness by constructing a wide range of established structures and hundreds of novel structures with diverse architectures and material properties. We also conduct a user study to verify our representation’s ease-of-use and ability to expand engineers’ capacity for exploration.}, author = {Makatura, Liane and Wang, Bohan and Chen, Yi-Lu and Deng, Bolei and Wojtan, Christopher J and Bickel, Bernd and Matusik, Wojciech}, issn = {0730-0301}, journal = {ACM Transactions on Graphics}, keywords = {Computer Graphics and Computer-Aided Design}, number = {5}, publisher = {Association for Computing Machinery}, title = {{Procedural metamaterials: A unified procedural graph for metamaterial design}}, doi = {10.1145/3605389}, volume = {42}, year = {2023}, } @inproceedings{12976, abstract = {3D printing based on continuous deposition of materials, such as filament-based 3D printing, has seen widespread adoption thanks to its versatility in working with a wide range of materials. An important shortcoming of this type of technology is its limited multi-material capabilities. While there are simple hardware designs that enable multi-material printing in principle, the required software is heavily underdeveloped. A typical hardware design fuses together individual materials fed into a single chamber from multiple inlets before they are deposited. This design, however, introduces a time delay between the intended material mixture and its actual deposition. In this work, inspired by diverse path planning research in robotics, we show that this mechanical challenge can be addressed via improved printer control. We propose to formulate the search for optimal multi-material printing policies in a reinforcement learning setup. We put forward a simple numerical deposition model that takes into account the non-linear material mixing and delayed material deposition. To validate our system we focus on color fabrication, a problem known for its strict requirements for varying material mixtures at a high spatial frequency. We demonstrate that our learned control policy outperforms state-of-the-art hand-crafted algorithms.}, author = {Liao, Kang and Tricard, Thibault and Piovarci, Michael and Seidel, Hans-Peter and Babaei, Vahid}, booktitle = {2023 IEEE International Conference on Robotics and Automation}, issn = {1050-4729}, keywords = {reinforcement learning, deposition, control, color, multi-filament}, location = {London, United Kingdom}, pages = {12345--12352}, publisher = {IEEE}, title = {{Learning deposition policies for fused multi-material 3D printing}}, doi = {10.1109/ICRA48891.2023.10160465}, volume = {2023}, year = {2023}, } @article{13265, abstract = {In this study, we propose a computational framework for optimizing the continuity of the toolpath in fabricating surface models on an extrusion-based 3D printer. Toolpath continuity is a critical issue that influences both the quality and the efficiency of extrusion-based fabrication. Transfer moves lead to rough and bumpy surfaces, where this phenomenon worsens for materials with large viscosity, like clay. The effects of continuity on the surface models are even more severe in terms of the quality of the surface and the stability of the model. We introduce a criterion called the one–path patch (OPP) to represent a patch on the surface of the shell that can be traversed along one path by considering the constraints on fabrication. We study the properties of the OPPs and their merging operations to propose a bottom-up OPP merging procedure to decompose the given shell surface into a minimal number of OPPs, and to generate the “as-continuous-as-possible” (ACAP) toolpath. Furthermore, we augment the path planning algorithm with a curved-layer printing scheme that reduces staircase defects and improves the continuity of the toolpath by connecting multiple segments. We evaluated the ACAP algorithm on ceramic and thermoplastic materials, and the results showed that it improves the fabrication of surface models in terms of both efficiency and surface quality.}, author = {Zhong, Fanchao and Xu, Yonglai and Zhao, Haisen and Lu, Lin}, issn = {1557-7368}, journal = {ACM Transactions on Graphics}, number = {3}, publisher = {Association for Computing Machinery}, title = {{As-Continuous-As-Possible extrusion-based fabrication of surface models}}, doi = {10.1145/3575859}, volume = {42}, year = {2023}, } @article{13267, abstract = {Three-dimensional (3D) reconstruction of living brain tissue down to an individual synapse level would create opportunities for decoding the dynamics and structure–function relationships of the brain’s complex and dense information processing network; however, this has been hindered by insufficient 3D resolution, inadequate signal-to-noise ratio and prohibitive light burden in optical imaging, whereas electron microscopy is inherently static. Here we solved these challenges by developing an integrated optical/machine-learning technology, LIONESS (live information-optimized nanoscopy enabling saturated segmentation). This leverages optical modifications to stimulated emission depletion microscopy in comprehensively, extracellularly labeled tissue and previous information on sample structure via machine learning to simultaneously achieve isotropic super-resolution, high signal-to-noise ratio and compatibility with living tissue. This allows dense deep-learning-based instance segmentation and 3D reconstruction at a synapse level, incorporating molecular, activity and morphodynamic information. LIONESS opens up avenues for studying the dynamic functional (nano-)architecture of living brain tissue.}, author = {Velicky, Philipp and Miguel Villalba, Eder and Michalska, Julia M and Lyudchik, Julia and Wei, Donglai and Lin, Zudi and Watson, Jake and Troidl, Jakob and Beyer, Johanna and Ben Simon, Yoav and Sommer, Christoph M and Jahr, Wiebke and Cenameri, Alban and Broichhagen, Johannes and Grant, Seth G.N. and Jonas, Peter M and Novarino, Gaia and Pfister, Hanspeter and Bickel, Bernd and Danzl, Johann G}, issn = {1548-7105}, journal = {Nature Methods}, pages = {1256--1265}, publisher = {Springer Nature}, title = {{Dense 4D nanoscale reconstruction of living brain tissue}}, doi = {10.1038/s41592-023-01936-6}, volume = {20}, year = {2023}, } @inproceedings{14798, abstract = {A faithful reproduction of gloss is inherently difficult because of the limited dynamic range, peak luminance, and 3D capabilities of display devices. This work investigates how the display capabilities affect gloss appearance with respect to a real-world reference object. To this end, we employ an accurate imaging pipeline to achieve a perceptual gloss match between a virtual and real object presented side-by-side on an augmented-reality high-dynamic-range (HDR) stereoscopic display, which has not been previously attained to this extent. Based on this precise gloss reproduction, we conduct a series of gloss matching experiments to study how gloss perception degrades based on individual factors: object albedo, display luminance, dynamic range, stereopsis, and tone mapping. We support the study with a detailed analysis of individual factors, followed by an in-depth discussion on the observed perceptual effects. Our experiments demonstrate that stereoscopic presentation has a limited effect on the gloss matching task on our HDR display. However, both reduced luminance and dynamic range of the display reduce the perceived gloss. This means that the visual system cannot compensate for the changes in gloss appearance across luminance (lack of gloss constancy), and the tone mapping operator should be carefully selected when reproducing gloss on a low dynamic range (LDR) display.}, author = {Chen, Bin and Jindal, Akshay and Piovarci, Michael and Wang, Chao and Seidel, Hans Peter and Didyk, Piotr and Myszkowski, Karol and Serrano, Ana and Mantiuk, Rafał K.}, booktitle = {Proceedings of the SIGGRAPH Asia 2023 Conference}, isbn = {9798400703157}, location = {Sydney, Australia}, publisher = {Association for Computing Machinery}, title = {{The effect of display capabilities on the gloss consistency between real and virtual objects}}, doi = {10.1145/3610548.3618226}, year = {2023}, } @article{13049, abstract = {We propose a computational design approach for covering a surface with individually addressable RGB LEDs, effectively forming a low-resolution surface screen. To achieve a low-cost and scalable approach, we propose creating designs from flat PCB panels bent in-place along the surface of a 3D printed core. Working with standard rigid PCBs enables the use of established PCB manufacturing services, allowing the fabrication of designs with several hundred LEDs. Our approach optimizes the PCB geometry for folding, and then jointly optimizes the LED packing, circuit and routing, solving a challenging layout problem under strict manufacturing requirements. Unlike paper, PCBs cannot bend beyond a certain point without breaking. Therefore, we introduce parametric cut patterns acting as hinges, designed to allow bending while remaining compact. To tackle the joint optimization of placement, circuit and routing, we propose a specialized algorithm that splits the global problem into one sub-problem per triangle, which is then individually solved. Our technique generates PCB blueprints in a completely automated way. After being fabricated by a PCB manufacturing service, the boards are bent and glued by the user onto the 3D printed support. We demonstrate our technique on a range of physical models and virtual examples, creating intricate surface light patterns from hundreds of LEDs.}, author = {Freire, Marco and Bhargava, Manas and Schreck, Camille and Hugron, Pierre-Alexandre and Bickel, Bernd and Lefebvre, Sylvain}, issn = {1557-7368}, journal = {Transactions on Graphics}, keywords = {PCB design and layout, Mesh geometry models}, location = {Los Angeles, CA, United States}, number = {4}, publisher = {Association for Computing Machinery}, title = {{PCBend: Light up your 3D shapes with foldable circuit boards}}, doi = {10.1145/3592411}, volume = {42}, year = {2023}, } @article{12984, abstract = {Tattoos are a highly popular medium, with both artistic and medical applications. Although the mechanical process of tattoo application has evolved historically, the results are reliant on the artisanal skill of the artist. This can be especially challenging for some skin tones, or in cases where artists lack experience. We provide the first systematic overview of tattooing as a computational fabrication technique. We built an automated tattooing rig and a recipe for the creation of silicone sheets mimicking realistic skin tones, which allowed us to create an accurate model predicting tattoo appearance. This enables several exciting applications including tattoo previewing, color retargeting, novel ink spectra optimization, color-accurate prosthetics, and more.}, author = {Piovarci, Michael and Chapiro, Alexandre and Bickel, Bernd}, issn = {1557-7368}, journal = {Transactions on Graphics}, keywords = {appearance, modeling, reproduction, tattoo, skin color, gamut mapping, ink-optimization, prosthetic}, location = {Los Angeles, CA, United States}, number = {4}, publisher = {Association for Computing Machinery}, title = {{Skin-Screen: A computational fabrication framework for color tattoos}}, doi = {10.1145/3592432}, volume = {42}, year = {2023}, } @inproceedings{12979, abstract = {Color and gloss are fundamental aspects of surface appearance. State-of-the-art fabrication techniques can manipulate both properties of the printed 3D objects. However, in the context of appearance reproduction, perceptual aspects of color and gloss are usually handled separately, even though previous perceptual studies suggest their interaction. Our work is motivated by previous studies demonstrating a perceived color shift due to a change in the object's gloss, i.e., two samples with the same color but different surface gloss appear as they have different colors. In this paper, we conduct new experiments which support this observation and provide insights into the magnitude and direction of the perceived color change. We use the observations as guidance to design a new method that estimates and corrects the color shift enabling the fabrication of objects with the same perceived color but different surface gloss. We formulate the problem as an optimization procedure solved using differentiable rendering. We evaluate the effectiveness of our method in perceptual experiments with 3D objects fabricated using a multi-material 3D printer and demonstrate potential applications. }, author = {Condor, Jorge and Piovarci, Michael and Bickel, Bernd and Didyk, Piotr}, booktitle = {SIGGRAPH ’23 Conference Proceedings}, isbn = {9798400701597}, keywords = {color, gloss, perception, color compensation, color management}, location = {Los Angeles, CA, United States}, publisher = {Association for Computing Machinery}, title = {{Gloss-aware color correction for 3D printing}}, doi = {10.1145/3588432.3591546}, year = {2023}, } @phdthesis{12897, abstract = {Inverse design problems in fabrication-aware shape optimization are typically solved on discrete representations such as polygonal meshes. This thesis argues that there are benefits to treating these problems in the same domain as human designers, namely, the parametric one. One reason is that discretizing a parametric model usually removes the capability of making further manual changes to the design, because the human intent is captured by the shape parameters. Beyond this, knowledge about a design problem can sometimes reveal a structure that is present in a smooth representation, but is fundamentally altered by discretizing. In this case, working in the parametric domain may even simplify the optimization task. We present two lines of research that explore both of these aspects of fabrication-aware shape optimization on parametric representations. The first project studies the design of plane elastic curves and Kirchhoff rods, which are common mathematical models for describing the deformation of thin elastic rods such as beams, ribbons, cables, and hair. Our main contribution is a characterization of all curved shapes that can be attained by bending and twisting elastic rods having a stiffness that is allowed to vary across the length. Elements like these can be manufactured using digital fabrication devices such as 3d printers and digital cutters, and have applications in free-form architecture and soft robotics. We show that the family of curved shapes that can be produced this way admits geometric description that is concise and computationally convenient. In the case of plane curves, the geometric description is intuitive enough to allow a designer to determine whether a curved shape is physically achievable by visual inspection alone. We also present shape optimization algorithms that convert a user-defined curve in the plane or in three dimensions into the geometry of an elastic rod that will naturally deform to follow this curve when its endpoints are attached to a support structure. Implemented in an interactive software design tool, the rod geometry is generated in real time as the user edits a curve and enables fast prototyping. The second project tackles the problem of general-purpose shape optimization on CAD models using a novel variant of the extended finite element method (XFEM). Our goal is the decoupling between the simulation mesh and the CAD model, so no geometry-dependent meshing or remeshing needs to be performed when the CAD parameters change during optimization. This is achieved by discretizing the embedding space of the CAD model, and using a new high-accuracy numerical integration method to enable XFEM on free-form elements bounded by the parametric surface patches of the model. Our simulation is differentiable from the CAD parameters to the simulation output, which enables us to use off-the-shelf gradient-based optimization procedures. The result is a method that fits seamlessly into the CAD workflow because it works on the same representation as the designer, enabling the alternation of manual editing and fabrication-aware optimization at will.}, author = {Hafner, Christian}, isbn = {978-3-99078-031-2}, issn = {2663-337X}, pages = {180}, publisher = {Institute of Science and Technology Austria}, title = {{Inverse shape design with parametric representations: Kirchhoff Rods and parametric surface models}}, doi = {10.15479/at:ista:12897}, year = {2023}, } @article{13188, abstract = {The Kirchhoff rod model describes the bending and twisting of slender elastic rods in three dimensions, and has been widely studied to enable the prediction of how a rod will deform, given its geometry and boundary conditions. In this work, we study a number of inverse problems with the goal of computing the geometry of a straight rod that will automatically deform to match a curved target shape after attaching its endpoints to a support structure. Our solution lets us finely control the static equilibrium state of a rod by varying the cross-sectional profiles along its length. We also show that the set of physically realizable equilibrium states admits a concise geometric description in terms of linear line complexes, which leads to very efficient computational design algorithms. Implemented in an interactive software tool, they allow us to convert three-dimensional hand-drawn spline curves to elastic rods, and give feedback about the feasibility and practicality of a design in real time. We demonstrate the efficacy of our method by designing and manufacturing several physical prototypes with applications to interior design and soft robotics.}, author = {Hafner, Christian and Bickel, Bernd}, issn = {1557-7368}, journal = {ACM Transactions on Graphics}, keywords = {Computer Graphics, Computational Design, Computational Geometry, Shape Modeling}, number = {5}, publisher = {Association for Computing Machinery}, title = {{The design space of Kirchhoff rods}}, doi = {10.1145/3606033}, volume = {42}, year = {2023}, } @inproceedings{12135, abstract = {A good match of material appearance between real-world objects and their digital on-screen representations is critical for many applications such as fabrication, design, and e-commerce. However, faithful appearance reproduction is challenging, especially for complex phenomena, such as gloss. In most cases, the view-dependent nature of gloss and the range of luminance values required for reproducing glossy materials exceeds the current capabilities of display devices. As a result, appearance reproduction poses significant problems even with accurately rendered images. This paper studies the gap between the gloss perceived from real-world objects and their digital counterparts. Based on our psychophysical experiments on a wide range of 3D printed samples and their corresponding photographs, we derive insights on the influence of geometry, illumination, and the display’s brightness and measure the change in gloss appearance due to the display limitations. Our evaluation experiments demonstrate that using the prediction to correct material parameters in a rendering system improves the match of gloss appearance between real objects and their visualization on a display device.}, author = {Chen, Bin and Piovarci, Michael and Wang, Chao and Seidel, Hans-Peter and Didyk, Piotr and Myszkowski, Karol and Serrano, Ana}, booktitle = {SIGGRAPH Asia 2022 Conference Papers}, isbn = {9781450394703}, location = {Daegu, South Korea}, publisher = {Association for Computing Machinery}, title = {{Gloss management for consistent reproduction of real and virtual objects}}, doi = {10.1145/3550469.3555406}, volume = {2022}, year = {2022}, } @article{11442, abstract = {Enabling additive manufacturing to employ a wide range of novel, functional materials can be a major boost to this technology. However, making such materials printable requires painstaking trial-and-error by an expert operator, as they typically tend to exhibit peculiar rheological or hysteresis properties. Even in the case of successfully finding the process parameters, there is no guarantee of print-to-print consistency due to material differences between batches. These challenges make closed-loop feedback an attractive option where the process parameters are adjusted on-the-fly. There are several challenges for designing an efficient controller: the deposition parameters are complex and highly coupled, artifacts occur after long time horizons, simulating the deposition is computationally costly, and learning on hardware is intractable. In this work, we demonstrate the feasibility of learning a closed-loop control policy for additive manufacturing using reinforcement learning. We show that approximate, but efficient, numerical simulation is sufficient as long as it allows learning the behavioral patterns of deposition that translate to real-world experiences. In combination with reinforcement learning, our model can be used to discover control policies that outperform baseline controllers. Furthermore, the recovered policies have a minimal sim-to-real gap. We showcase this by applying our control policy in-vivo on a single-layer, direct ink writing printer. }, author = {Piovarci, Michael and Foshey, Michael and Xu, Jie and Erps, Timothy and Babaei, Vahid and Didyk, Piotr and Rusinkiewicz, Szymon and Matusik, Wojciech and Bickel, Bernd}, issn = {1557-7368}, journal = {ACM Transactions on Graphics}, number = {4}, publisher = {Association for Computing Machinery}, title = {{Closed-loop control of direct ink writing via reinforcement learning}}, doi = {10.1145/3528223.3530144}, volume = {41}, year = {2022}, } @article{10922, abstract = {We study structural rigidity for assemblies with mechanical joints. Existing methods identify whether an assembly is structurally rigid by assuming parts are perfectly rigid. Yet, an assembly identified as rigid may not be that “rigid” in practice, and existing methods cannot quantify how rigid an assembly is. We address this limitation by developing a new measure, worst-case rigidity, to quantify the rigidity of an assembly as the largest possible deformation that the assembly undergoes for arbitrary external loads of fixed magnitude. Computing worst-case rigidity is non-trivial due to non-rigid parts and different joint types. We thus formulate a new computational approach by encoding parts and their connections into a stiffness matrix, in which parts are modeled as deformable objects and joints as soft constraints. Based on this, we formulate worst-case rigidity analysis as an optimization that seeks the worst-case deformation of an assembly for arbitrary external loads, and solve the optimization problem via an eigenanalysis. Furthermore, we present methods to optimize the geometry and topology of various assemblies to enhance their rigidity, as guided by our rigidity measure. In the end, we validate our method on a variety of assembly structures with physical experiments and demonstrate its effectiveness by designing and fabricating several structurally rigid assemblies.}, author = {Liu, Zhenyuan and Hu, Jingyu and Xu, Hao and Song, Peng and Zhang, Ran and Bickel, Bernd and Fu, Chi-Wing}, issn = {1467-8659}, journal = {Computer Graphics Forum}, number = {2}, pages = {507--519}, publisher = {Wiley}, title = {{Worst-case rigidity analysis and optimization for assemblies with mechanical joints}}, doi = {10.1111/cgf.14490}, volume = {41}, year = {2022}, } @article{11735, abstract = {Interlocking puzzles are intriguing geometric games where the puzzle pieces are held together based on their geometric arrangement, preventing the puzzle from falling apart. High-level-of-difficulty, or simply high-level, interlocking puzzles are a subclass of interlocking puzzles that require multiple moves to take out the first subassembly from the puzzle. Solving a high-level interlocking puzzle is a challenging task since one has to explore many different configurations of the puzzle pieces until reaching a configuration where the first subassembly can be taken out. Designing a high-level interlocking puzzle with a user-specified level of difficulty is even harder since the puzzle pieces have to be interlocking in all the configurations before the first subassembly is taken out. In this paper, we present a computational approach to design high-level interlocking puzzles. The core idea is to represent all possible configurations of an interlocking puzzle as well as transitions among these configurations using a rooted, undirected graph called a disassembly graph and leverage this graph to find a disassembly plan that requires a minimal number of moves to take out the first subassembly from the puzzle. At the design stage, our algorithm iteratively constructs the geometry of each puzzle piece to expand the disassembly graph incrementally, aiming to achieve a user-specified level of difficulty. We show that our approach allows efficient generation of high-level interlocking puzzles of various shape complexities, including new solutions not attainable by state-of-the-art approaches.}, author = {Chen, Rulin and Wang, Ziqi and Song, Peng and Bickel, Bernd}, issn = {1557-7368}, journal = {ACM Transactions on Graphics}, number = {4}, publisher = {Association for Computing Machinery}, title = {{Computational design of high-level interlocking puzzles}}, doi = {10.1145/3528223.3530071}, volume = {41}, year = {2022}, } @article{11993, abstract = {Moulding refers to a set of manufacturing techniques in which a mould, usually a cavity or a solid frame, is used to shape a liquid or pliable material into an object of the desired shape. The popularity of moulding comes from its effectiveness, scalability and versatility in terms of employed materials. Its relevance as a fabrication process is demonstrated by the extensive literature covering different aspects related to mould design, from material flow simulation to the automation of mould geometry design. In this state-of-the-art report, we provide an extensive review of the automatic methods for the design of moulds, focusing on contributions from a geometric perspective. We classify existing mould design methods based on their computational approach and the nature of their target moulding process. We summarize the relationships between computational approaches and moulding techniques, highlighting their strengths and limitations. Finally, we discuss potential future research directions.}, author = {Alderighi, Thomas and Malomo, Luigi and Auzinger, Thomas and Bickel, Bernd and Cignoni, Paulo and Pietroni, Nico}, issn = {1467-8659}, journal = {Computer Graphics Forum}, keywords = {Computer Graphics and Computer-Aided Design}, number = {6}, pages = {435--452}, publisher = {Wiley}, title = {{State of the art in computational mould design}}, doi = {10.1111/cgf.14581}, volume = {41}, year = {2022}, } @inproceedings{12452, abstract = {Portrait viewpoint and illumination editing is an important problem with several applications in VR/AR, movies, and photography. Comprehensive knowledge of geometry and illumination is critical for obtaining photorealistic results. Current methods are unable to explicitly model in 3D while handing both viewpoint and illumination editing from a single image. In this paper, we propose VoRF, a novel approach that can take even a single portrait image as input and relight human heads under novel illuminations that can be viewed from arbitrary viewpoints. VoRF represents a human head as a continuous volumetric field and learns a prior model of human heads using a coordinate-based MLP with separate latent spaces for identity and illumination. The prior model is learnt in an auto-decoder manner over a diverse class of head shapes and appearances, allowing VoRF to generalize to novel test identities from a single input image. Additionally, VoRF has a reflectance MLP that uses the intermediate features of the prior model for rendering One-Light-at-A-Time (OLAT) images under novel views. We synthesize novel illuminations by combining these OLAT images with target environment maps. Qualitative and quantitative evaluations demonstrate the effectiveness of VoRF for relighting and novel view synthesis even when applied to unseen subjects under uncontrolled illuminations.}, author = {Rao, Pramod and B R, Mallikarjun and Fox, Gereon and Weyrich, Tim and Bickel, Bernd and Seidel, Hans-Peter and Pfister, Hanspeter and Matusik, Wojciech and Tewari, Ayush and Theobalt, Christian and Elgharib, Mohamed}, booktitle = {33rd British Machine Vision Conference}, location = {London, United Kingdom}, publisher = {British Machine Vision Association and Society for Pattern Recognition}, title = {{VoRF: Volumetric Relightable Faces}}, year = {2022}, } @unpublished{11943, abstract = {Complex wiring between neurons underlies the information-processing network enabling all brain functions, including cognition and memory. For understanding how the network is structured, processes information, and changes over time, comprehensive visualization of the architecture of living brain tissue with its cellular and molecular components would open up major opportunities. However, electron microscopy (EM) provides nanometre-scale resolution required for full in-silico reconstruction1–5, yet is limited to fixed specimens and static representations. Light microscopy allows live observation, with super-resolution approaches6–12 facilitating nanoscale visualization, but comprehensive 3D-reconstruction of living brain tissue has been hindered by tissue photo-burden, photobleaching, insufficient 3D-resolution, and inadequate signal-to-noise ratio (SNR). Here we demonstrate saturated reconstruction of living brain tissue. We developed an integrated imaging and analysis technology, adapting stimulated emission depletion (STED) microscopy6,13 in extracellularly labelled tissue14 for high SNR and near-isotropic resolution. Centrally, a two-stage deep-learning approach leveraged previously obtained information on sample structure to drastically reduce photo-burden and enable automated volumetric reconstruction down to single synapse level. Live reconstruction provides unbiased analysis of tissue architecture across time in relation to functional activity and targeted activation, and contextual understanding of molecular labelling. This adoptable technology will facilitate novel insights into the dynamic functional architecture of living brain tissue.}, author = {Velicky, Philipp and Miguel Villalba, Eder and Michalska, Julia M and Wei, Donglai and Lin, Zudi and Watson, Jake and Troidl, Jakob and Beyer, Johanna and Ben Simon, Yoav and Sommer, Christoph M and Jahr, Wiebke and Cenameri, Alban and Broichhagen, Johannes and Grant, Seth G. N. and Jonas, Peter M and Novarino, Gaia and Pfister, Hanspeter and Bickel, Bernd and Danzl, Johann G}, booktitle = {bioRxiv}, publisher = {Cold Spring Harbor Laboratory}, title = {{Saturated reconstruction of living brain tissue}}, doi = {10.1101/2022.03.16.484431}, year = {2022}, } @inproceedings{10148, abstract = {Tactile feedback of an object’s surface enables us to discern its material properties and affordances. This understanding is used in digital fabrication processes by creating objects with high-resolution surface variations to influence a user’s tactile perception. As the design of such surface haptics commonly relies on knowledge from real-life experiences, it is unclear how to adapt this information for digital design methods. In this work, we investigate replicating the haptics of real materials. Using an existing process for capturing an object’s microgeometry, we digitize and reproduce the stable surface information of a set of 15 fabric samples. In a psychophysical experiment, we evaluate the tactile qualities of our set of original samples and their replicas. From our results, we see that direct reproduction of surface variations is able to influence different psychophysical dimensions of the tactile perception of surface textures. While the fabrication process did not preserve all properties, our approach underlines that replication of surface microgeometries benefits fabrication methods in terms of haptic perception by covering a large range of tactile variations. Moreover, by changing the surface structure of a single fabricated material, its material perception can be influenced. We conclude by proposing strategies for capturing and reproducing digitized textures to better resemble the perceived haptics of the originals.}, author = {Degraen, Donald and Piovarci, Michael and Bickel, Bernd and Kruger, Antonio}, booktitle = {34th Annual ACM Symposium}, isbn = {978-1-4503-8635-7}, location = {Virtual}, pages = {954--971}, publisher = {Association for Computing Machinery}, title = {{Capturing tactile properties of real surfaces for haptic reproduction}}, doi = {10.1145/3472749.3474798}, year = {2021}, } @article{9241, abstract = {Volumetric light transport is a pervasive physical phenomenon, and therefore its accurate simulation is important for a broad array of disciplines. While suitable mathematical models for computing the transport are now available, obtaining the necessary material parameters needed to drive such simulations is a challenging task: direct measurements of these parameters from material samples are seldom possible. Building on the inverse scattering paradigm, we present a novel measurement approach which indirectly infers the transport parameters from extrinsic observations of multiple-scattered radiance. The novelty of the proposed approach lies in replacing structured illumination with a structured reflector bonded to the sample, and a robust fitting procedure that largely compensates for potential systematic errors in the calibration of the setup. We show the feasibility of our approach by validating simulations of complex 3D compositions of the measured materials against physical prints, using photo-polymer resins. As presented in this paper, our technique yields colorspace data suitable for accurate appearance reproduction in the area of 3D printing. Beyond that, and without fundamental changes to the basic measurement methodology, it could equally well be used to obtain spectral measurements that are useful for other application areas.}, author = {Elek, Oskar and Zhang, Ran and Sumin, Denis and Myszkowski, Karol and Bickel, Bernd and Wilkie, Alexander and Křivánek, Jaroslav and Weyrich, Tim}, issn = {1094-4087}, journal = {Optics Express}, number = {5}, pages = {7568--7588}, publisher = {The Optical Society}, title = {{Robust and practical measurement of volume transport parameters in solid photo-polymer materials for 3D printing}}, doi = {10.1364/OE.406095}, volume = {29}, year = {2021}, } @article{9376, abstract = {This paper presents a method for designing planar multistable compliant structures. Given a sequence of desired stable states and the corresponding poses of the structure, we identify the topology and geometric realization of a mechanism—consisting of bars and joints—that is able to physically reproduce the desired multistable behavior. In order to solve this problem efficiently, we build on insights from minimally rigid graph theory to identify simple but effective topologies for the mechanism. We then optimize its geometric parameters, such as joint positions and bar lengths, to obtain correct transitions between the given poses. Simultaneously, we ensure adequate stability of each pose based on an effective approximate error metric related to the elastic energy Hessian of the bars in the mechanism. As demonstrated by our results, we obtain functional multistable mechanisms of manageable complexity that can be fabricated using 3D printing. Further, we evaluated the effectiveness of our method on a large number of examples in the simulation and fabricated several physical prototypes.}, author = {Zhang, Ran and Auzinger, Thomas and Bickel, Bernd}, issn = {1557-7368}, journal = {ACM Transactions on Graphics}, keywords = {multistability, mechanism, computational design, rigidity}, number = {5}, publisher = {Association for Computing Machinery}, title = {{Computational design of planar multistable compliant structures}}, doi = {10.1145/3453477}, volume = {40}, year = {2021}, } @article{9408, abstract = {We present a computational design system that assists users to model, optimize, and fabricate quad-robots with soft skins. Our system addresses the challenging task of predicting their physical behavior by fully integrating the multibody dynamics of the mechanical skeleton and the elastic behavior of the soft skin. The developed motion control strategy uses an alternating optimization scheme to avoid expensive full space time-optimization, interleaving space-time optimization for the skeleton, and frame-by-frame optimization for the full dynamics. The output are motor torques to drive the robot to achieve a user prescribed motion trajectory. We also provide a collection of convenient engineering tools and empirical manufacturing guidance to support the fabrication of the designed quad-robot. We validate the feasibility of designs generated with our system through physics simulations and with a physically-fabricated prototype.}, author = {Feng, Xudong and Liu, Jiafeng and Wang, Huamin and Yang, Yin and Bao, Hujun and Bickel, Bernd and Xu, Weiwei}, issn = {10772626}, journal = {IEEE Transactions on Visualization and Computer Graphics}, number = {6}, publisher = {IEEE}, title = {{Computational design of skinned Quad-Robots}}, doi = {10.1109/TVCG.2019.2957218}, volume = {27}, year = {2021}, } @article{9819, abstract = {Photorealistic editing of head portraits is a challenging task as humans are very sensitive to inconsistencies in faces. We present an approach for high-quality intuitive editing of the camera viewpoint and scene illumination (parameterised with an environment map) in a portrait image. This requires our method to capture and control the full reflectance field of the person in the image. Most editing approaches rely on supervised learning using training data captured with setups such as light and camera stages. Such datasets are expensive to acquire, not readily available and do not capture all the rich variations of in-the-wild portrait images. In addition, most supervised approaches only focus on relighting, and do not allow camera viewpoint editing. Thus, they only capture and control a subset of the reflectance field. Recently, portrait editing has been demonstrated by operating in the generative model space of StyleGAN. While such approaches do not require direct supervision, there is a significant loss of quality when compared to the supervised approaches. In this paper, we present a method which learns from limited supervised training data. The training images only include people in a fixed neutral expression with eyes closed, without much hair or background variations. Each person is captured under 150 one-light-at-a-time conditions and under 8 camera poses. Instead of training directly in the image space, we design a supervised problem which learns transformations in the latent space of StyleGAN. This combines the best of supervised learning and generative adversarial modeling. We show that the StyleGAN prior allows for generalisation to different expressions, hairstyles and backgrounds. This produces high-quality photorealistic results for in-the-wild images and significantly outperforms existing methods. Our approach can edit the illumination and pose simultaneously, and runs at interactive rates.}, author = {Mallikarjun, B. R. and Tewari, Ayush and Dib, Abdallah and Weyrich, Tim and Bickel, Bernd and Seidel, Hans Peter and Pfister, Hanspeter and Matusik, Wojciech and Chevallier, Louis and Elgharib, Mohamed A. and Theobalt, Christian}, issn = {15577368}, journal = {ACM Transactions on Graphics}, number = {4}, publisher = {Association for Computing Machinery}, title = {{PhotoApp: Photorealistic appearance editing of head portraits}}, doi = {10.1145/3450626.3459765}, volume = {40}, year = {2021}, } @article{9820, abstract = {Material appearance hinges on material reflectance properties but also surface geometry and illumination. The unlimited number of potential combinations between these factors makes understanding and predicting material appearance a very challenging task. In this work, we collect a large-scale dataset of perceptual ratings of appearance attributes with more than 215,680 responses for 42,120 distinct combinations of material, shape, and illumination. The goal of this dataset is twofold. First, we analyze for the first time the effects of illumination and geometry in material perception across such a large collection of varied appearances. We connect our findings to those of the literature, discussing how previous knowledge generalizes across very diverse materials, shapes, and illuminations. Second, we use the collected dataset to train a deep learning architecture for predicting perceptual attributes that correlate with human judgments. We demonstrate the consistent and robust behavior of our predictor in various challenging scenarios, which, for the first time, enables estimating perceived material attributes from general 2D images. Since our predictor relies on the final appearance in an image, it can compare appearance properties across different geometries and illumination conditions. Finally, we demonstrate several applications that use our predictor, including appearance reproduction using 3D printing, BRDF editing by integrating our predictor in a differentiable renderer, illumination design, or material recommendations for scene design.}, author = {Serrano, Ana and Chen, Bin and Wang, Chao and Piovarci, Michael and Seidel, Hans Peter and Didyk, Piotr and Myszkowski, Karol}, issn = {15577368}, journal = {ACM Transactions on Graphics}, number = {4}, publisher = {Association for Computing Machinery}, title = {{The effect of shape and illumination on material perception: Model and applications}}, doi = {10.1145/3450626.3459813}, volume = {40}, year = {2021}, } @inproceedings{9957, abstract = {The reflectance field of a face describes the reflectance properties responsible for complex lighting effects including diffuse, specular, inter-reflection and self shadowing. Most existing methods for estimating the face reflectance from a monocular image assume faces to be diffuse with very few approaches adding a specular component. This still leaves out important perceptual aspects of reflectance as higher-order global illumination effects and self-shadowing are not modeled. We present a new neural representation for face reflectance where we can estimate all components of the reflectance responsible for the final appearance from a single monocular image. Instead of modeling each component of the reflectance separately using parametric models, our neural representation allows us to generate a basis set of faces in a geometric deformation-invariant space, parameterized by the input light direction, viewpoint and face geometry. We learn to reconstruct this reflectance field of a face just from a monocular image, which can be used to render the face from any viewpoint in any light condition. Our method is trained on a light-stage training dataset, which captures 300 people illuminated with 150 light conditions from 8 viewpoints. We show that our method outperforms existing monocular reflectance reconstruction methods, in terms of photorealism due to better capturing of physical premitives, such as sub-surface scattering, specularities, self-shadows and other higher-order effects.}, author = {B R, Mallikarjun and Tewari, Ayush and Oh, Tae-Hyun and Weyrich, Tim and Bickel, Bernd and Seidel, Hans-Peter and Pfister, Hanspeter and Matusik, Wojciech and Elgharib, Mohamed and Theobalt, Christian}, booktitle = {Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition}, isbn = {978-166544509-2}, issn = {1063-6919}, location = {Nashville, TN, United States; Virtual}, pages = {4791--4800}, publisher = {IEEE}, title = {{Monocular reconstruction of neural face reflectance fields}}, doi = {10.1109/CVPR46437.2021.00476}, year = {2021}, } @article{9547, abstract = {With the wider availability of full-color 3D printers, color-accurate 3D-print preparation has received increased attention. A key challenge lies in the inherent translucency of commonly used print materials that blurs out details of the color texture. Previous work tries to compensate for these scattering effects through strategic assignment of colored primary materials to printer voxels. To date, the highest-quality approach uses iterative optimization that relies on computationally expensive Monte Carlo light transport simulation to predict the surface appearance from subsurface scattering within a given print material distribution; that optimization, however, takes in the order of days on a single machine. In our work, we dramatically speed up the process by replacing the light transport simulation with a data-driven approach. Leveraging a deep neural network to predict the scattering within a highly heterogeneous medium, our method performs around two orders of magnitude faster than Monte Carlo rendering while yielding optimization results of similar quality level. The network is based on an established method from atmospheric cloud rendering, adapted to our domain and extended by a physically motivated weight sharing scheme that substantially reduces the network size. We analyze its performance in an end-to-end print preparation pipeline and compare quality and runtime to alternative approaches, and demonstrate its generalization to unseen geometry and material values. This for the first time enables full heterogenous material optimization for 3D-print preparation within time frames in the order of the actual printing time.}, author = {Rittig, Tobias and Sumin, Denis and Babaei, Vahid and Didyk, Piotr and Voloboy, Alexey and Wilkie, Alexander and Bickel, Bernd and Myszkowski, Karol and Weyrich, Tim and Křivánek, Jaroslav}, issn = {1467-8659}, journal = {Computer Graphics Forum}, number = {2}, pages = {205--219}, publisher = {Wiley}, title = {{Neural acceleration of scattering-aware color 3D printing}}, doi = {10.1111/cgf.142626}, volume = {40}, year = {2021}, } @article{10574, abstract = {The understanding of material appearance perception is a complex problem due to interactions between material reflectance, surface geometry, and illumination. Recently, Serrano et al. collected the largest dataset to date with subjective ratings of material appearance attributes, including glossiness, metallicness, sharpness and contrast of reflections. In this work, we make use of their dataset to investigate for the first time the impact of the interactions between illumination, geometry, and eight different material categories in perceived appearance attributes. After an initial analysis, we select for further analysis the four material categories that cover the largest range for all perceptual attributes: fabric, plastic, ceramic, and metal. Using a cumulative link mixed model (CLMM) for robust regression, we discover interactions between these material categories and four representative illuminations and object geometries. We believe that our findings contribute to expanding the knowledge on material appearance perception and can be useful for many applications, such as scene design, where any particular material in a given shape can be aligned with dominant classes of illumination, so that a desired strength of appearance attributes can be achieved.}, author = {Chen, Bin and Wang, Chao and Piovarci, Michael and Seidel, Hans Peter and Didyk, Piotr and Myszkowski, Karol and Serrano, Ana}, issn = {1432-2315}, journal = {Visual Computer}, number = {12}, pages = {2975--2987}, publisher = {Springer Nature}, title = {{The effect of geometry and illumination on appearance perception of different material categories}}, doi = {10.1007/s00371-021-02227-x}, volume = {37}, year = {2021}, } @article{10184, abstract = {We introduce a novel technique to automatically decompose an input object’s volume into a set of parts that can be represented by two opposite height fields. Such decomposition enables the manufacturing of individual parts using two-piece reusable rigid molds. Our decomposition strategy relies on a new energy formulation that utilizes a pre-computed signal on the mesh volume representing the accessibility for a predefined set of extraction directions. Thanks to this novel formulation, our method allows for efficient optimization of a fabrication-aware partitioning of volumes in a completely automatic way. We demonstrate the efficacy of our approach by generating valid volume partitionings for a wide range of complex objects and physically reproducing several of them.}, author = {Alderighi, Thomas and Malomo, Luigi and Bickel, Bernd and Cignoni, Paolo and Pietroni, Nico}, issn = {1557-7368 }, journal = {ACM Transactions on Graphics}, number = {6}, publisher = {Association for Computing Machinery}, title = {{Volume decomposition for two-piece rigid casting}}, doi = {10.1145/3478513.3480555}, volume = {40}, year = {2021}, } @article{9817, abstract = {Elastic bending of initially flat slender elements allows the realization and economic fabrication of intriguing curved shapes. In this work, we derive an intuitive but rigorous geometric characterization of the design space of plane elastic rods with variable stiffness. It enables designers to determine which shapes are physically viable with active bending by visual inspection alone. Building on these insights, we propose a method for efficiently designing the geometry of a flat elastic rod that realizes a target equilibrium curve, which only requires solving a linear program. We implement this method in an interactive computational design tool that gives feedback about the feasibility of a design, and computes the geometry of the structural elements necessary to realize it within an instant. The tool also offers an iterative optimization routine that improves the fabricability of a model while modifying it as little as possible. In addition, we use our geometric characterization to derive an algorithm for analyzing and recovering the stability of elastic curves that would otherwise snap out of their unstable equilibrium shapes by buckling. We show the efficacy of our approach by designing and manufacturing several physical models that are assembled from flat elements.}, author = {Hafner, Christian and Bickel, Bernd}, issn = {1557-7368}, journal = {ACM Transactions on Graphics}, keywords = {Computing methodologies, shape modeling, modeling and simulation, theory of computation, computational geometry, mathematics of computing, mathematical optimization}, location = {Virtual}, number = {4}, publisher = {Association for Computing Machinery}, title = {{The design space of plane elastic curves}}, doi = {10.1145/3450626.3459800}, volume = {40}, year = {2021}, } @article{9208, abstract = {Bending-active structures are able to efficiently produce complex curved shapes from flat panels. The desired deformation of the panels derives from the proper selection of their elastic properties. Optimized panels, called FlexMaps, are designed such that, once they are bent and assembled, the resulting static equilibrium configuration matches a desired input 3D shape. The FlexMaps elastic properties are controlled by locally varying spiraling geometric mesostructures, which are optimized in size and shape to match specific bending requests, namely the global curvature of the target shape. The design pipeline starts from a quad mesh representing the input 3D shape, which defines the edge size and the total amount of spirals: every quad will embed one spiral. Then, an optimization algorithm tunes the geometry of the spirals by using a simplified pre-computed rod model. This rod model is derived from a non-linear regression algorithm which approximates the non-linear behavior of solid FEM spiral models subject to hundreds of load combinations. This innovative pipeline has been applied to the project of a lightweight plywood pavilion named FlexMaps Pavilion, which is a single-layer piecewise twisted arch that fits a bounding box of 3.90x3.96x3.25 meters. This case study serves to test the applicability of this methodology at the architectural scale. The structure is validated via FE analyses and the fabrication of the full scale prototype.}, author = {Laccone, Francesco and Malomo, Luigi and Perez Rodriguez, Jesus and Pietroni, Nico and Ponchio, Federico and Bickel, Bernd and Cignoni, Paolo}, issn = {25233971}, journal = {SN Applied Sciences}, number = {9}, publisher = {Springer Nature}, title = {{A bending-active twisted-arch plywood structure: Computational design and fabrication of the FlexMaps Pavilion}}, doi = {10.1007/s42452-020-03305-w}, volume = {2}, year = {2020}, } @article{7220, abstract = {BACKGROUND:The introduction of image-guided methods to bypass surgery has resulted in optimized preoperative identification of the recipients and excellent patency rates. However, the recently presented methods have also been resource-consuming. In the present study, we have reported a cost-efficient planning workflow for extracranial-intracranial (EC-IC) revascularization combined with transdural indocyanine green videoangiography (tICG-VA). METHODS:We performed a retrospective review at a single tertiary referral center from 2011 to 2018. A novel software-derived workflow was applied for 25 of 92 bypass procedures during the study period. The precision and accuracy were assessed using tICG-VA identification of the cortical recipients and a comparison of the virtual and actual data. The data from a control group of 25 traditionally planned procedures were also matched. RESULTS:The intraoperative transfer time of the calculated coordinates averaged 0.8 minute (range, 0.4-1.9 minutes). The definitive recipients matched the targeted branches in 80%, and a neighboring branch was used in 16%. Our workflow led to a significant craniotomy size reduction in the study group compared with that in the control group (P = 0.005). tICG-VA was successfully applied in 19 cases. An average of 2 potential recipient arteries were identified transdurally, resulting in tailored durotomy and 3 craniotomy adjustments. Follow-up patency results were available for 49 bypass surgeries, comprising 54 grafts. The overall patency rate was 91% at a median follow-up period of 26 months. No significant difference was found in the patency rate between the study and control groups (P = 0.317). CONCLUSIONS:Our clinical results have validated the presented planning and surgical workflow and support the routine implementation of tICG-VA for recipient identification before durotomy.}, author = {Dodier, Philippe and Auzinger, Thomas and Mistelbauer, Gabriel and Wang, Wei Te and Ferraz-Leite, Heber and Gruber, Andreas and Marik, Wolfgang and Winter, Fabian and Fischer, Gerrit and Frischer, Josa M. and Bavinzski, Gerhard}, issn = {1878-8769}, journal = {World Neurosurgery}, number = {2}, pages = {e892--e902}, publisher = {Elsevier}, title = {{Novel software-derived workflow in extracranial–intracranial bypass surgery validated by transdural indocyanine green videoangiography}}, doi = {10.1016/j.wneu.2019.11.038}, volume = {134}, year = {2020}, } @article{7218, abstract = {The combined resection of skull-infiltrating tumours and immediate cranioplastic reconstruction predominantly relies on freehand-moulded solutions. Techniques that enable this procedure to be performed easily in routine clinical practice would be useful. A cadaveric study was developed in which a new software tool was used to perform single-stage reconstructions with prefabricated implants after the resection of skull-infiltrating pathologies. A novel 3D visualization and interaction framework was developed to create 10 virtual craniotomies in five cadaveric specimens. Polyether ether ketone (PEEK) implants were manufactured according to the bone defects. The image-guided craniotomy was reconstructed with PEEK and compared to polymethyl methacrylate (PMMA). Navigational accuracy and surgical precision were assessed. The PEEK workflow resulted in up to 10-fold shorter reconstruction times than the standard technique. Surgical precision was reflected by the mean 1.1 ± 0.29 mm distance between the virtual and real craniotomy, with submillimetre precision in 50%. Assessment of the global offset between virtual and actual craniotomy revealed an average shift of 4.5 ± 3.6 mm. The results validated the ‘elective single-stage cranioplasty’ technique as a state-of-the-art virtual planning method and surgical workflow. This patient-tailored workflow could significantly reduce surgical times compared to the traditional, intraoperative acrylic moulding method and may be an option for the reconstruction of bone defects in the craniofacial region.}, author = {Dodier, Philippe and Winter, Fabian and Auzinger, Thomas and Mistelbauer, Gabriel and Frischer, Josa M. and Wang, Wei Te and Mallouhi, Ammar and Marik, Wolfgang and Wolfsberger, Stefan and Reissig, Lukas and Hammadi, Firas and Matula, Christian and Baumann, Arnulf and Bavinzski, Gerhard}, issn = {1399-0020}, journal = {International Journal of Oral and Maxillofacial Surgery}, number = {8}, pages = {P1007--1015}, publisher = {Elsevier}, title = {{Single-stage bone resection and cranioplastic reconstruction: Comparison of a novel software-derived PEEK workflow with the standard reconstructive method}}, doi = {10.1016/j.ijom.2019.11.011}, volume = {49}, year = {2020}, } @phdthesis{8386, abstract = {Form versus function is a long-standing debate in various design-related fields, such as architecture as well as graphic and industrial design. A good design that balances form and function often requires considerable human effort and collaboration among experts from different professional fields. Computational design tools provide a new paradigm for designing functional objects. In computational design, form and function are represented as mathematical quantities, with the help of numerical and combinatorial algorithms, they can assist even novice users in designing versatile models that exhibit their desired functionality. This thesis presents three disparate research studies on the computational design of functional objects: The appearance of 3d print—we optimize the volumetric material distribution for faithfully replicating colored surface texture in 3d printing; the dynamic motion of mechanical structures— our design system helps the novice user to retarget various mechanical templates with different functionality to complex 3d shapes; and a more abstract functionality, multistability—our algorithm automatically generates models that exhibit multiple stable target poses. For each of these cases, our computational design tools not only ensure the functionality of the results but also permit the user aesthetic freedom over the form. Moreover, fabrication constraints were taken into account, which allow for the immediate creation of physical realization via 3D printing or laser cutting.}, author = {Zhang, Ran}, issn = {2663-337X}, pages = {148}, publisher = {Institute of Science and Technology Austria}, title = {{Structure-aware computational design and its application to 3D printable volume scattering, mechanism, and multistability}}, doi = {10.15479/AT:ISTA:8386}, year = {2020}, } @phdthesis{8366, abstract = {Fabrication of curved shells plays an important role in modern design, industry, and science. Among their remarkable properties are, for example, aesthetics of organic shapes, ability to evenly distribute loads, or efficient flow separation. They find applications across vast length scales ranging from sky-scraper architecture to microscopic devices. But, at the same time, the design of curved shells and their manufacturing process pose a variety of challenges. In this thesis, they are addressed from several perspectives. In particular, this thesis presents approaches based on the transformation of initially flat sheets into the target curved surfaces. This involves problems of interactive design of shells with nontrivial mechanical constraints, inverse design of complex structural materials, and data-driven modeling of delicate and time-dependent physical properties. At the same time, two newly-developed self-morphing mechanisms targeting flat-to-curved transformation are presented. In architecture, doubly curved surfaces can be realized as cold bent glass panelizations. Originally flat glass panels are bent into frames and remain stressed. This is a cost-efficient fabrication approach compared to hot bending, when glass panels are shaped plastically. However such constructions are prone to breaking during bending, and it is highly nontrivial to navigate the design space, keeping the panels fabricable and aesthetically pleasing at the same time. We introduce an interactive design system for cold bent glass façades, while previously even offline optimization for such scenarios has not been sufficiently developed. Our method is based on a deep learning approach providing quick and high precision estimation of glass panel shape and stress while handling the shape multimodality. Fabrication of smaller objects of scales below 1 m, can also greatly benefit from shaping originally flat sheets. In this respect, we designed new self-morphing shell mechanisms transforming from an initial flat state to a doubly curved state with high precision and detail. Our so-called CurveUps demonstrate the encodement of the geometric information into the shell. Furthermore, we explored the frontiers of programmable materials and showed how temporal information can additionally be encoded into a flat shell. This allows prescribing deformation sequences for doubly curved surfaces and, thus, facilitates self-collision avoidance enabling complex shapes and functionalities otherwise impossible. Both of these methods include inverse design tools keeping the user in the design loop.}, author = {Guseinov, Ruslan}, isbn = {978-3-99078-010-7}, issn = {2663-337X}, keywords = {computer-aided design, shape modeling, self-morphing, mechanical engineering}, pages = {118}, publisher = {Institute of Science and Technology Austria}, title = {{Computational design of curved thin shells: From glass façades to programmable matter}}, doi = {10.15479/AT:ISTA:8366}, year = {2020}, } @article{8562, abstract = {Cold bent glass is a promising and cost-efficient method for realizing doubly curved glass facades. They are produced by attaching planar glass sheets to curved frames and require keeping the occurring stress within safe limits. However, it is very challenging to navigate the design space of cold bent glass panels due to the fragility of the material, which impedes the form-finding for practically feasible and aesthetically pleasing cold bent glass facades. We propose an interactive, data-driven approach for designing cold bent glass facades that can be seamlessly integrated into a typical architectural design pipeline. Our method allows non-expert users to interactively edit a parametric surface while providing real-time feedback on the deformed shape and maximum stress of cold bent glass panels. Designs are automatically refined to minimize several fairness criteria while maximal stresses are kept within glass limits. We achieve interactive frame rates by using a differentiable Mixture Density Network trained from more than a million simulations. Given a curved boundary, our regression model is capable of handling multistable configurations and accurately predicting the equilibrium shape of the panel and its corresponding maximal stress. We show predictions are highly accurate and validate our results with a physical realization of a cold bent glass surface.}, author = {Gavriil, Konstantinos and Guseinov, Ruslan and Perez Rodriguez, Jesus and Pellis, Davide and Henderson, Paul M and Rist, Florian and Pottmann, Helmut and Bickel, Bernd}, issn = {1557-7368}, journal = {ACM Transactions on Graphics}, number = {6}, publisher = {Association for Computing Machinery}, title = {{Computational design of cold bent glass façades}}, doi = {10.1145/3414685.3417843}, volume = {39}, year = {2020}, } @misc{8375, abstract = {Supplementary movies showing the following sequences for spatio-temporarily programmed shells: input geometry and actuation time landscape; comparison of morphing processes from a camera recording and a simulation; final actuated shape.}, author = {Guseinov, Ruslan}, publisher = {Institute of Science and Technology Austria}, title = {{Supplementary data for "Computational design of curved thin shells: from glass façades to programmable matter"}}, doi = {10.15479/AT:ISTA:8375}, year = {2020}, } @misc{8761, author = {Guseinov, Ruslan}, publisher = {Institute of Science and Technology Austria}, title = {{Supplementary data for "Computational design of cold bent glass façades"}}, doi = {10.15479/AT:ISTA:8761}, year = {2020}, } @article{7262, abstract = {Advances in shape-morphing materials, such as hydrogels, shape-memory polymers and light-responsive polymers have enabled prescribing self-directed deformations of initially flat geometries. However, most proposed solutions evolve towards a target geometry without considering time-dependent actuation paths. To achieve more complex geometries and avoid self-collisions, it is critical to encode a spatial and temporal shape evolution within the initially flat shell. Recent realizations of time-dependent morphing are limited to the actuation of few, discrete hinges and cannot form doubly curved surfaces. Here, we demonstrate a method for encoding temporal shape evolution in architected shells that assume complex shapes and doubly curved geometries. The shells are non-periodic tessellations of pre-stressed contractile unit cells that soften in water at rates prescribed locally by mesostructure geometry. The ensuing midplane contraction is coupled to the formation of encoded curvatures. We propose an inverse design tool based on a data-driven model for unit cells’ temporal responses.}, author = {Guseinov, Ruslan and McMahan, Connor and Perez Rodriguez, Jesus and Daraio, Chiara and Bickel, Bernd}, issn = {2041-1723}, journal = {Nature Communications}, keywords = {Design, Synthesis and processing, Mechanical engineering, Polymers}, publisher = {Springer Nature}, title = {{Programming temporal morphing of self-actuated shells}}, doi = {10.1038/s41467-019-14015-2}, volume = {11}, year = {2020}, } @article{8766, abstract = {The “procedural” approach to animating ocean waves is the dominant algorithm for animating larger bodies of water in interactive applications as well as in off-line productions — it provides high visual quality with a low computational demand. In this paper, we widen the applicability of procedural water wave animation with an extension that guarantees the satisfaction of boundary conditions imposed by terrain while still approximating physical wave behavior. In combination with a particle system that models wave breaking, foam, and spray, this allows us to naturally model waves interacting with beaches and rocks. Our system is able to animate waves at large scales at interactive frame rates on a commodity PC.}, author = {Jeschke, Stefan and Hafner, Christian and Chentanez, Nuttapong and Macklin, Miles and Müller-Fischer, Matthias and Wojtan, Christopher J}, journal = {Computer Graphics forum}, location = {Online Symposium}, number = {8}, pages = {47--54}, publisher = {Wiley}, title = {{Making procedural water waves boundary-aware}}, doi = {10.1111/cgf.14100}, volume = {39}, year = {2020}, } @article{6660, abstract = {Commercially available full-color 3D printing allows for detailed control of material deposition in a volume, but an exact reproduction of a target surface appearance is hampered by the strong subsurface scattering that causes nontrivial volumetric cross-talk at the print surface. Previous work showed how an iterative optimization scheme based on accumulating absorptive materials at the surface can be used to find a volumetric distribution of print materials that closely approximates a given target appearance. In this work, we first revisit the assumption that pushing the absorptive materials to the surface results in minimal volumetric cross-talk. We design a full-fledged optimization on a small domain for this task and confirm this previously reported heuristic. Then, we extend the above approach that is critically limited to color reproduction on planar surfaces, to arbitrary 3D shapes. Our method enables high-fidelity color texture reproduction on 3D prints by effectively compensating for internal light scattering within arbitrarily shaped objects. In addition, we propose a content-aware gamut mapping that significantly improves color reproduction for the pathological case of thin geometric features. Using a wide range of sample objects with complex textures and geometries, we demonstrate color reproduction whose fidelity is superior to state-of-the-art drivers for color 3D printers.}, author = {Sumin, Denis and Weyrich, Tim and Rittig, Tobias and Babaei, Vahid and Nindel, Thomas and Wilkie, Alexander and Didyk, Piotr and Bickel, Bernd and Křivánek, Jaroslav and Myszkowski, Karol}, issn = {0730-0301}, journal = {ACM Transactions on Graphics}, number = {4}, publisher = {ACM}, title = {{Geometry-aware scattering compensation for 3D printing}}, doi = {10.1145/3306346.3322992}, volume = {38}, year = {2019}, } @article{6650, abstract = {We propose a novel technique for the automatic design of molds to cast highly complex shapes. The technique generates composite, two-piece molds. Each mold piece is made up of a hard plastic shell and a flexible silicone part. Thanks to the thin, soft, and smartly shaped silicone part, which is kept in place by a hard plastic shell, we can cast objects of unprecedented complexity. An innovative algorithm based on a volumetric analysis defines the layout of the internal cuts in the silicone mold part. Our approach can robustly handle thin protruding features and intertwined topologies that have caused previous methods to fail. We compare our results with state of the art techniques, and we demonstrate the casting of shapes with extremely complex geometry.}, author = {Alderighi, Thomas and Malomo, Luigi and Giorgi, Daniela and Bickel, Bernd and Cignoni, Paolo and Pietroni, Nico}, issn = {0730-0301}, journal = {ACM Transactions on Graphics}, number = {4}, publisher = {ACM}, title = {{Volume-aware design of composite molds}}, doi = {10.1145/3306346.3322981}, volume = {38}, year = {2019}, } @inproceedings{9261, abstract = {Bending-active structures are able to efficiently produce complex curved shapes starting from flat panels. The desired deformation of the panels derives from the proper selection of their elastic properties. Optimized panels, called FlexMaps, are designed such that, once they are bent and assembled, the resulting static equilibrium configuration matches a desired input 3D shape. The FlexMaps elastic properties are controlled by locally varying spiraling geometric mesostructures, which are optimized in size and shape to match the global curvature (i.e., bending requests) of the target shape. The design pipeline starts from a quad mesh representing the input 3D shape, which defines the edge size and the total amount of spirals: every quad will embed one spiral. Then, an optimization algorithm tunes the geometry of the spirals by using a simplified pre-computed rod model. This rod model is derived from a non-linear regression algorithm which approximates the non-linear behavior of solid FEM spiral models subject to hundreds of load combinations. This innovative pipeline has been applied to the project of a lightweight plywood pavilion named FlexMaps Pavilion, which is a single-layer piecewise twisted arc that fits a bounding box of 3.90x3.96x3.25 meters.}, author = {Laccone, Francesco and Malomo, Luigi and Perez Rodriguez, Jesus and Pietroni, Nico and Ponchio, Federico and Bickel, Bernd and Cignoni, Paolo}, booktitle = {IASS Symposium 2019 - 60th Anniversary Symposium of the International Association for Shell and Spatial Structures; Structural Membranes 2019 - 9th International Conference on Textile Composites and Inflatable Structures, FORM and FORCE}, isbn = {9788412110104}, issn = {2518-6582}, location = {Barcelona, Spain}, pages = {509--515}, publisher = {International Center for Numerical Methods in Engineering}, title = {{FlexMaps Pavilion: A twisted arc made of mesostructured flat flexible panels}}, year = {2019}, } @misc{7154, author = {Guseinov, Ruslan}, publisher = {Institute of Science and Technology Austria}, title = {{Supplementary data for "Programming temporal morphing of self-actuated shells"}}, doi = {10.15479/AT:ISTA:7154}, year = {2019}, } @article{7117, abstract = {We propose a novel generic shape optimization method for CAD models based on the eXtended Finite Element Method (XFEM). Our method works directly on the intersection between the model and a regular simulation grid, without the need to mesh or remesh, thus removing a bottleneck of classical shape optimization strategies. This is made possible by a novel hierarchical integration scheme that accurately integrates finite element quantities with sub-element precision. For optimization, we efficiently compute analytical shape derivatives of the entire framework, from model intersection to integration rule generation and XFEM simulation. Moreover, we describe a differentiable projection of shape parameters onto a constraint manifold spanned by user-specified shape preservation, consistency, and manufacturability constraints. We demonstrate the utility of our approach by optimizing mass distribution, strength-to-weight ratio, and inverse elastic shape design objectives directly on parameterized 3D CAD models.}, author = {Hafner, Christian and Schumacher, Christian and Knoop, Espen and Auzinger, Thomas and Bickel, Bernd and Bächer, Moritz}, issn = {0730-0301}, journal = {ACM Transactions on Graphics}, number = {6}, publisher = {ACM}, title = {{X-CAD: Optimizing CAD Models with Extended Finite Elements}}, doi = {10.1145/3355089.3356576}, volume = {38}, year = {2019}, } @article{304, abstract = {Additive manufacturing has recently seen drastic improvements in resolution, making it now possible to fabricate features at scales of hundreds or even dozens of nanometers, which previously required very expensive lithographic methods. As a result, additive manufacturing now seems poised for optical applications, including those relevant to computer graphics, such as material design, as well as display and imaging applications. In this work, we explore the use of additive manufacturing for generating structural colors, where the structures are designed using a fabrication-aware optimization process. This requires a combination of full-wave simulation, a feasible parameterization of the design space, and a tailored optimization procedure. Many of these components should be re-usable for the design of other optical structures at this scale. We show initial results of material samples fabricated based on our designs. While these suffer from the prototype character of state-of-the-art fabrication hardware, we believe they clearly demonstrate the potential of additive nanofabrication for structural colors and other graphics applications.}, author = {Auzinger, Thomas and Heidrich, Wolfgang and Bickel, Bernd}, journal = {ACM Transactions on Graphics}, number = {4}, publisher = {ACM}, title = {{Computational design of nanostructural color for additive manufacturing}}, doi = {10.1145/3197517.3201376}, volume = {37}, year = {2018}, } @article{12, abstract = {Molding is a popular mass production method, in which the initial expenses for the mold are offset by the low per-unit production cost. However, the physical fabrication constraints of the molding technique commonly restrict the shape of moldable objects. For a complex shape, a decomposition of the object into moldable parts is a common strategy to address these constraints, with plastic model kits being a popular and illustrative example. However, conducting such a decomposition requires considerable expertise, and it depends on the technical aspects of the fabrication technique, as well as aesthetic considerations. We present an interactive technique to create such decompositions for two-piece molding, in which each part of the object is cast between two rigid mold pieces. Given the surface description of an object, we decompose its thin-shell equivalent into moldable parts by first performing a coarse decomposition and then utilizing an active contour model for the boundaries between individual parts. Formulated as an optimization problem, the movement of the contours is guided by an energy reflecting fabrication constraints to ensure the moldability of each part. Simultaneously, the user is provided with editing capabilities to enforce aesthetic guidelines. Our interactive interface provides control of the contour positions by allowing, for example, the alignment of part boundaries with object features. Our technique enables a novel workflow, as it empowers novice users to explore the design space, and it generates fabrication-ready two-piece molds that can be used either for casting or industrial injection molding of free-form objects.}, author = {Nakashima, Kazutaka and Auzinger, Thomas and Iarussi, Emmanuel and Zhang, Ran and Igarashi, Takeo and Bickel, Bernd}, journal = {ACM Transaction on Graphics}, number = {4}, publisher = {ACM}, title = {{CoreCavity: Interactive shell decomposition for fabrication with two-piece rigid molds}}, doi = {10.1145/3197517.3201341}, volume = {37}, year = {2018}, } @article{398, abstract = {Objective: To report long-term results after Pipeline Embolization Device (PED) implantation, characterize complex and standard aneurysms comprehensively, and introduce a modified flow disruption scale. Methods: We retrospectively reviewed a consecutive series of 40 patients harboring 59 aneurysms treated with 54 PEDs. Aneurysm complexity was assessed using our proposed classification. Immediate angiographic results were analyzed using previously published grading scales and our novel flow disruption scale. Results: According to our new definition, 46 (78%) aneurysms were classified as complex. Most PED interventions were performed in the paraophthalmic and cavernous internal carotid artery segments. Excellent neurologic outcome (modified Rankin Scale 0 and 1) was observed in 94% of patients. Our data showed low permanent procedure-related mortality (0%) and morbidity (3%) rates. Long-term angiographic follow-up showed complete occlusion in 81% and near-total obliteration in a further 14%. Complete obliteration after deployment of a single PED was achieved in all standard aneurysms with 1-year follow-up. Our new scale was an independent predictor of aneurysm occlusion in a multivariable analysis. All aneurysms with a high flow disruption grade showed complete occlusion at follow-up regardless of PED number or aneurysm complexity. Conclusions: Treatment with the PED should be recognized as a primary management strategy for a highly selected cohort with predominantly complex intracranial aneurysms. We further show that a priori assessment of aneurysm complexity and our new postinterventional angiographic flow disruption scale predict occlusion probability and may help to determine the adequate number of per-aneurysm devices.}, author = {Dodier, Philippe and Frischer, Josa and Wang, Wei and Auzinger, Thomas and Mallouhi, Ammar and Serles, Wolfgang and Gruber, Andreas and Knosp, Engelbert and Bavinzski, Gerhard}, journal = {World Neurosurgery}, pages = {e568--e578}, publisher = {Elsevier}, title = {{Immediate flow disruption as a prognostic factor after flow diverter treatment long term experience with the pipeline embolization device}}, doi = {10.1016/j.wneu.2018.02.096}, volume = {13}, year = {2018}, } @article{4, abstract = {We present a data-driven technique to instantly predict how fluid flows around various three-dimensional objects. Such simulation is useful for computational fabrication and engineering, but is usually computationally expensive since it requires solving the Navier-Stokes equation for many time steps. To accelerate the process, we propose a machine learning framework which predicts aerodynamic forces and velocity and pressure fields given a threedimensional shape input. Handling detailed free-form three-dimensional shapes in a data-driven framework is challenging because machine learning approaches usually require a consistent parametrization of input and output. We present a novel PolyCube maps-based parametrization that can be computed for three-dimensional shapes at interactive rates. This allows us to efficiently learn the nonlinear response of the flow using a Gaussian process regression. We demonstrate the effectiveness of our approach for the interactive design and optimization of a car body.}, author = {Umetani, Nobuyuki and Bickel, Bernd}, journal = {ACM Trans. Graph.}, number = {4}, publisher = {ACM}, title = {{Learning three-dimensional flow for interactive aerodynamic design}}, doi = {10.1145/3197517.3201325}, volume = {37}, year = {2018}, } @article{13, abstract = {We propose a new method for fabricating digital objects through reusable silicone molds. Molds are generated by casting liquid silicone into custom 3D printed containers called metamolds. Metamolds automatically define the cuts that are needed to extract the cast object from the silicone mold. The shape of metamolds is designed through a novel segmentation technique, which takes into account both geometric and topological constraints involved in the process of mold casting. Our technique is simple, does not require changing the shape or topology of the input objects, and only requires off-the- shelf materials and technologies. We successfully tested our method on a set of challenging examples with complex shapes and rich geometric detail. © 2018 Association for Computing Machinery.}, author = {Alderighi, Thomas and Malomo, Luigi and Giorgi, Daniela and Pietroni, Nico and Bickel, Bernd and Cignoni, Paolo}, journal = {ACM Trans. Graph.}, number = {4}, publisher = {ACM}, title = {{Metamolds: Computational design of silicone molds}}, doi = {10.1145/3197517.3201381}, volume = {37}, year = {2018}, } @article{5976, abstract = {We propose FlexMaps, a novel framework for fabricating smooth shapes out of flat, flexible panels with tailored mechanical properties. We start by mapping the 3D surface onto a 2D domain as in traditional UV mapping to design a set of deformable flat panels called FlexMaps. For these panels, we design and obtain specific mechanical properties such that, once they are assembled, the static equilibrium configuration matches the desired 3D shape. FlexMaps can be fabricated from an almost rigid material, such as wood or plastic, and are made flexible in a controlled way by using computationally designed spiraling microstructures.}, author = {Malomo, Luigi and Perez Rodriguez, Jesus and Iarussi, Emmanuel and Pietroni, Nico and Miguel, Eder and Cignoni, Paolo and Bickel, Bernd}, issn = {0730-0301}, journal = {ACM Transactions on Graphics}, number = {6}, publisher = {Association for Computing Machinery (ACM)}, title = {{FlexMaps: Computational design of flat flexible shells for shaping 3D objects}}, doi = {10.1145/3272127.3275076}, volume = {37}, year = {2018}, } @article{6003, abstract = {Digital fabrication devices are powerful tools for creating tangible reproductions of 3D digital models. Most available printing technologies aim at producing an accurate copy of a tridimensional shape. However, fabrication technologies can also be used to create a stylistic representation of a digital shape. We refer to this class of methods as ‘stylized fabrication methods’. These methods abstract geometric and physical features of a given shape to create an unconventional representation, to produce an optical illusion or to devise a particular interaction with the fabricated model. In this state‐of‐the‐art report, we classify and overview this broad and emerging class of approaches and also propose possible directions for future research.}, author = {Bickel, Bernd and Cignoni, Paolo and Malomo, Luigi and Pietroni, Nico}, issn = {0167-7055}, journal = {Computer Graphics Forum}, number = {6}, pages = {325--342}, publisher = {Wiley}, title = {{State of the art on stylized fabrication}}, doi = {10.1111/cgf.13327}, volume = {37}, year = {2018}, } @inproceedings{6195, abstract = {In the context of robotic manipulation and grasping, the shift from a view that is static (force closure of a single posture) and contact-deprived (only contact for force closure is allowed, everything else is obstacle) towards a view that is dynamic and contact-rich (soft manipulation) has led to an increased interest in soft hands. These hands can easily exploit environmental constraints and object surfaces without risk, and safely interact with humans, but present also some challenges. Designing them is difficult, as well as predicting, modelling, and “programming” their interactions with the objects and the environment. This paper tackles the problem of simulating them in a fast and effective way, leveraging on novel and existing simulation technologies. We present a triple-layered simulation framework where dynamic properties such as stiffness are determined from slow but accurate FEM simulation data once, and then condensed into a lumped parameter model that can be used to fast simulate soft fingers and soft hands. We apply our approach to the simulation of soft pneumatic fingers.}, author = {Pozzi, Maria and Miguel Villalba, Eder and Deimel, Raphael and Malvezzi, Monica and Bickel, Bernd and Brock, Oliver and Prattichizzo, Domenico}, isbn = {9781538630815}, location = {Brisbane, Australia}, publisher = {IEEE}, title = {{Efficient FEM-based simulation of soft robots modeled as kinematic chains}}, doi = {10.1109/icra.2018.8461106}, year = {2018}, } @article{486, abstract = {Color texture reproduction in 3D printing commonly ignores volumetric light transport (cross-talk) between surface points on a 3D print. Such light diffusion leads to significant blur of details and color bleeding, and is particularly severe for highly translucent resin-based print materials. Given their widely varying scattering properties, this cross-talk between surface points strongly depends on the internal structure of the volume surrounding each surface point. Existing scattering-aware methods use simplified models for light diffusion, and often accept the visual blur as an immutable property of the print medium. In contrast, our work counteracts heterogeneous scattering to obtain the impression of a crisp albedo texture on top of the 3D print, by optimizing for a fully volumetric material distribution that preserves the target appearance. Our method employs an efficient numerical optimizer on top of a general Monte-Carlo simulation of heterogeneous scattering, supported by a practical calibration procedure to obtain scattering parameters from a given set of printer materials. Despite the inherent translucency of the medium, we reproduce detailed surface textures on 3D prints. We evaluate our system using a commercial, five-tone 3D print process and compare against the printer’s native color texturing mode, demonstrating that our method preserves high-frequency features well without having to compromise on color gamut.}, author = {Elek, Oskar and Sumin, Denis and Zhang, Ran and Weyrich, Tim and Myszkowski, Karol and Bickel, Bernd and Wilkie, Alexander and Krivanek, Jaroslav}, issn = {07300301}, journal = {ACM Transactions on Graphics}, number = {6}, publisher = {ACM}, title = {{Scattering-aware texture reproduction for 3D printing}}, doi = {10.1145/3130800.3130890}, volume = {36}, year = {2017}, } @inproceedings{1002, abstract = { We present an interactive design system to create functional mechanical objects. Our computational approach allows novice users to retarget an existing mechanical template to a user-specified input shape. Our proposed representation for a mechanical template encodes a parameterized mechanism, mechanical constraints that ensure a physically valid configuration, spatial relationships of mechanical parts to the user-provided shape, and functional constraints that specify an intended functionality. We provide an intuitive interface and optimization-in-the-loop approach for finding a valid configuration of the mechanism and the shape to ensure that higher-level functional goals are met. Our algorithm interactively optimizes the mechanism while the user manipulates the placement of mechanical components and the shape. Our system allows users to efficiently explore various design choices and to synthesize customized mechanical objects that can be fabricated with rapid prototyping technologies. We demonstrate the efficacy of our approach by retargeting various mechanical templates to different shapes and fabricating the resulting functional mechanical objects. }, author = {Zhang, Ran and Auzinger, Thomas and Ceylan, Duygu and Li, Wilmot and Bickel, Bernd}, issn = {07300301}, location = {Los Angeles, CA, United States }, number = {4}, publisher = {ACM}, title = {{Functionality-aware retargeting of mechanisms to 3D shapes}}, doi = {10.1145/3072959.3073710}, volume = {36}, year = {2017}, } @inproceedings{1001, abstract = {We present a computational approach for designing CurveUps, curvy shells that form from an initially flat state. They consist of small rigid tiles that are tightly held together by two pre-stretched elastic sheets attached to them. Our method allows the realization of smooth, doubly curved surfaces that can be fabricated as a flat piece. Once released, the restoring forces of the pre-stretched sheets support the object to take shape in 3D. CurveUps are structurally stable in their target configuration. The design process starts with a target surface. Our method generates a tile layout in 2D and optimizes the distribution, shape, and attachment areas of the tiles to obtain a configuration that is fabricable and in which the curved up state closely matches the target. Our approach is based on an efficient approximate model and a local optimization strategy for an otherwise intractable nonlinear optimization problem. We demonstrate the effectiveness of our approach for a wide range of shapes, all realized as physical prototypes.}, author = {Guseinov, Ruslan and Miguel, Eder and Bickel, Bernd}, location = {Los Angeles, CA, United States}, number = {4}, publisher = {ACM}, title = {{CurveUps: Shaping objects from flat plates with tension-actuated curvature}}, doi = {10.1145/3072959.3073709}, volume = {36}, year = {2017}, } @inproceedings{1097, abstract = {We present an interactive system for computational design, optimization, and fabrication of multicopters. Our computational approach allows non-experts to design, explore, and evaluate a wide range of different multicopters. We provide users with an intuitive interface for assembling a multicopter from a collection of components (e.g., propellers, motors, and carbon fiber rods). Our algorithm interactively optimizes shape and controller parameters of the current design to ensure its proper operation. In addition, we allow incorporating a variety of other metrics (such as payload, battery usage, size, and cost) into the design process and exploring tradeoffs between them. We show the efficacy of our method and system by designing, optimizing, fabricating, and operating multicopters with complex geometries and propeller configurations. We also demonstrate the ability of our optimization algorithm to improve the multicopter performance under different metrics.}, author = {Du, Tao and Schulz, Adriana and Zhu, Bo and Bickel, Bernd and Matusik, Wojciech}, location = {Macao, China}, number = {6}, publisher = {ACM}, title = {{Computational multicopter design}}, doi = {10.1145/2980179.2982427}, volume = {35}, year = {2016}, } @inproceedings{1099, abstract = {We present FlexMolds, a novel computational approach to automatically design flexible, reusable molds that, once 3D printed, allow us to physically fabricate, by means of liquid casting, multiple copies of complex shapes with rich surface details and complex topology. The approach to design such flexible molds is based on a greedy bottom-up search of possible cuts over an object, evaluating for each possible cut the feasibility of the resulting mold. We use a dynamic simulation approach to evaluate candidate molds, providing a heuristic to generate forces that are able to open, detach, and remove a complex mold from the object it surrounds. We have tested the approach with a number of objects with nontrivial shapes and topologies.}, author = {Malomo, Luigi and Pietroni, Nico and Bickel, Bernd and Cignoni, Paolo}, location = {Macao, China}, number = {6}, publisher = {ACM}, title = {{FlexMolds: Automatic design of flexible shells for molding}}, doi = {10.1145/2980179.2982397}, volume = {35}, year = {2016}, } @inproceedings{1319, abstract = {We present a novel optimization-based algorithm for the design and fabrication of customized, deformable input devices, capable of continuously sensing their deformation. We propose to embed piezoresistive sensing elements into flexible 3D printed objects. These sensing elements are then utilized to recover rich and natural user interactions at runtime. Designing such objects is a challenging and hard problem if attempted manually for all but the simplest geometries and deformations. Our method simultaneously optimizes the internal routing of the sensing elements and computes a mapping from low-level sensor readings to user-specified outputs in order to minimize reconstruction error. We demonstrate the power and flexibility of the approach by designing and fabricating a set of flexible input devices. Our results indicate that the optimization-based design greatly outperforms manual routings in terms of reconstruction accuracy and thus interaction fidelity.}, author = {Bächer, Moritz and Hepp, Benjamin and Pece, Fabrizio and Kry, Paul and Bickel, Bernd and Thomaszewski, Bernhard and Hilliges, Otmar}, location = {San Jose, California, USA}, pages = {3806 -- 3816}, publisher = {ACM}, title = {{DefSense: computational design of customized deformable input devices}}, doi = {10.1145/2858036.2858354}, year = {2016}, } @inproceedings{1364, abstract = {We present a computational method for designing wire sculptures consisting of interlocking wires. Our method allows the computation of aesthetically pleasing structures that are structurally stable, efficiently fabricatable with a 2D wire bending machine, and assemblable without the need of additional connectors. Starting from a set of planar contours provided by the user, our method automatically tests for the feasibility of a design, determines a discrete ordering of wires at intersection points, and optimizes for the rest shape of the individual wires to maximize structural stability under frictional contact. In addition to their application to art, wire sculptures present an extremely efficient and fast alternative for low-fidelity rapid prototyping because manufacturing time and required material linearly scales with the physical size of objects. We demonstrate the effectiveness of our approach on a varied set of examples, all of which we fabricated.}, author = {Miguel Villalba, Eder and Lepoutre, Mathias and Bickel, Bernd}, location = {Anaheim, CA, USA}, number = {4}, publisher = {ACM}, title = {{Computational design of stable planar-rod structures}}, doi = {10.1145/2897824.2925978}, volume = {35}, year = {2016}, } @article{1414, abstract = {In this paper, we present a method to model hyperelasticity that is well suited for representing the nonlinearity of real-world objects, as well as for estimating it from deformation examples. Previous approaches suffer several limitations, such as lack of integrability of elastic forces, failure to enforce energy convexity, lack of robustness of parameter estimation, or difficulty to model cross-modal effects. Our method avoids these problems by relying on a general energy-based definition of elastic properties. The accuracy of the resulting elastic model is maximized by defining an additive model of separable energy terms, which allow progressive parameter estimation. In addition, our method supports efficient modeling of extreme nonlinearities thanks to energy-limiting constraints. We combine our energy-based model with an optimization method to estimate model parameters from force-deformation examples, and we show successful modeling of diverse deformable objects, including cloth, human finger skin, and internal human anatomy in a medical imaging application.}, author = {Miguel Villalba, Eder and Miraut, David and Otaduy, Miguel}, journal = {Computer Graphics Forum}, number = {2}, pages = {385 -- 396}, publisher = {Wiley-Blackwell}, title = {{Modeling and estimation of energy-based hyperelastic objects}}, doi = {10.1111/cgf.12840}, volume = {35}, year = {2016}, } @article{1446, abstract = {The accuracy of interdisciplinarity measurements is directly related to the quality of the underlying bibliographic data. Existing indicators of interdisciplinarity are not capable of reflecting the inaccuracies introduced by incorrect and incomplete records because correct and complete bibliographic data can rarely be obtained. This is the case for the Rao–Stirling index, which cannot handle references that are not categorized into disciplinary fields. We introduce a method that addresses this problem. It extends the Rao–Stirling index to acknowledge missing data by calculating its interval of uncertainty using computational optimization. The evaluation of our method indicates that the uncertainty interval is not only useful for estimating the inaccuracy of interdisciplinarity measurements, but it also delivers slightly more accurate aggregated interdisciplinarity measurements than the Rao–Stirling index.}, author = {Calatrava Moreno, Maria and Auzinger, Thomas and Werthner, Hannes}, journal = {Scientometrics}, number = {1}, pages = {213 -- 232}, publisher = {Springer}, title = {{On the uncertainty of interdisciplinarity measurements due to incomplete bibliographic data}}, doi = {10.1007/s11192-016-1842-4}, volume = {107}, year = {2016}, } @inproceedings{1520, abstract = {Creating mechanical automata that can walk in stable and pleasing manners is a challenging task that requires both skill and expertise. We propose to use computational design to offset the technical difficulties of this process. A simple drag-and-drop interface allows casual users to create personalized walking toys from a library of pre-defined template mechanisms. Provided with this input, our method leverages physical simulation and evolutionary optimization to refine the mechanical designs such that the resulting toys are able to walk. The optimization process is guided by an intuitive set of objectives that measure the quality of the walking motions. We demonstrate our approach on a set of simulated mechanical toys with different numbers of legs and various distinct gaits. Two fabricated prototypes showcase the feasibility of our designs.}, author = {Bharaj, Gaurav and Coros, Stelian and Thomaszewski, Bernhard and Tompkin, James and Bickel, Bernd and Pfister, Hanspeter}, isbn = {978-1-4503-3496-9}, location = {Los Angeles, CA, United States}, pages = {93 -- 100}, publisher = {ACM}, title = {{Computational design of walking automata}}, doi = {10.1145/2786784.2786803}, year = {2015}, } @inproceedings{1625, abstract = {In recent years we have seen numerous improvements on 3D scanning and tracking of human faces, greatly advancing the creation of digital doubles for film and video games. However, despite the high-resolution quality of the reconstruction approaches available, current methods are unable to capture one of the most important regions of the face - the eye region. In this work we present the first method for detailed spatio-temporal reconstruction of eyelids. Tracking and reconstructing eyelids is extremely challenging, as this region exhibits very complex and unique skin deformation where skin is folded under while opening the eye. Furthermore, eyelids are often only partially visible and obstructed due to selfocclusion and eyelashes. Our approach is to combine a geometric deformation model with image data, leveraging multi-view stereo, optical flow, contour tracking and wrinkle detection from local skin appearance. Our deformation model serves as a prior that enables reconstruction of eyelids even under strong self-occlusions caused by rolling and folding skin as the eye opens and closes. The output is a person-specific, time-varying eyelid reconstruction with anatomically plausible deformations. Our high-resolution detailed eyelids couple naturally with current facial performance capture approaches. As a result, our method can largely increase the fidelity of facial capture and the creation of digital doubles.}, author = {Bermano, Amit and Beeler, Thabo and Kozlov, Yeara and Bradley, Derek and Bickel, Bernd and Gross, Markus}, location = {Los Angeles, CA, United States}, number = {4}, publisher = {ACM}, title = {{Detailed spatio-temporal reconstruction of eyelids}}, doi = {10.1145/2766924}, volume = {34}, year = {2015}, } @inproceedings{1626, abstract = {This paper introduces "OmniAD," a novel data-driven pipeline to model and acquire the aerodynamics of three-dimensional rigid objects. Traditionally, aerodynamics are examined through elaborate wind tunnel experiments or expensive fluid dynamics computations, and are only measured for a small number of discrete wind directions. OmniAD allows the evaluation of aerodynamic forces, such as drag and lift, for any incoming wind direction using a novel representation based on spherical harmonics. Our datadriven technique acquires the aerodynamic properties of an object simply by capturing its falling motion using a single camera. Once model parameters are estimated, OmniAD enables realistic realtime simulation of rigid bodies, such as the tumbling and gliding of leaves, without simulating the surrounding air. In addition, we propose an intuitive user interface based on OmniAD to interactively design three-dimensional kites that actually fly. Various nontraditional kites were designed to demonstrate the physical validity of our model.}, author = {Martin, Tobias and Umetani, Nobuyuki and Bickel, Bernd}, location = {Los Angeles, CA, United States}, number = {4}, publisher = {ACM}, title = {{OmniAD: Data-driven omni-directional aerodynamics}}, doi = {10.1145/2766919}, volume = {34}, year = {2015}, } @inproceedings{1628, abstract = {We propose a method for fabricating deformable objects with spatially varying elasticity using 3D printing. Using a single, relatively stiff printer material, our method designs an assembly of smallscale microstructures that have the effect of a softer material at the object scale, with properties depending on the microstructure used in each part of the object. We build on work in the area of metamaterials, using numerical optimization to design tiled microstructures with desired properties, but with the key difference that our method designs families of related structures that can be interpolated to smoothly vary the material properties over a wide range. To create an object with spatially varying elastic properties, we tile the object's interior with microstructures drawn from these families, generating a different microstructure for each cell using an efficient algorithm to select compatible structures for neighboring cells. We show results computed for both 2D and 3D objects, validating several 2D and 3D printed structures using standard material tests as well as demonstrating various example applications.}, author = {Schumacher, Christian and Bickel, Bernd and Rys, Jan and Marschner, Steve and Daraio, Chiara and Gross, Markus}, location = {Los Angeles, CA, USA}, number = {4}, publisher = {ACM}, title = {{Microstructures to control elasticity in 3D printing}}, doi = {10.1145/2766926}, volume = {34}, year = {2015}, } @inproceedings{1627, abstract = {We present a computational tool for fabrication-oriented design of flexible rod meshes. Given a deformable surface and a set of deformed poses as input, our method automatically computes a printable rod mesh that, once manufactured, closely matches the input poses under the same boundary conditions. The core of our method is formed by an optimization scheme that adjusts the cross-sectional profiles of the rods and their rest centerline in order to best approximate the target deformations. This approach allows us to locally control the bending and stretching resistance of the surface with a single material, yielding high design flexibility and low fabrication cost.}, author = {Pérez, Jesús and Thomaszewski, Bernhard and Coros, Stelian and Bickel, Bernd and Canabal, José and Sumner, Robert and Otaduy, Miguel}, location = {Los Angeles, CA, United States}, number = {4}, publisher = {ACM}, title = {{Design and fabrication of flexible rod meshes}}, doi = {10.1145/2766998}, volume = {34}, year = {2015}, } @article{1734, abstract = {Facial appearance capture is now firmly established within academic research and used extensively across various application domains, perhaps most prominently in the entertainment industry through the design of virtual characters in video games and films. While significant progress has occurred over the last two decades, no single survey currently exists that discusses the similarities, differences, and practical considerations of the available appearance capture techniques as applied to human faces. A central difficulty of facial appearance capture is the way light interacts with skin-which has a complex multi-layered structure-and the interactions that occur below the skin surface can, by definition, only be observed indirectly. In this report, we distinguish between two broad strategies for dealing with this complexity. "Image-based methods" try to exhaustively capture the exact face appearance under different lighting and viewing conditions, and then render the face through weighted image combinations. "Parametric methods" instead fit the captured reflectance data to some parametric appearance model used during rendering, allowing for a more lightweight and flexible representation but at the cost of potentially increased rendering complexity or inexact reproduction. The goal of this report is to provide an overview that can guide practitioners and researchers in assessing the tradeoffs between current approaches and identifying directions for future advances in facial appearance capture.}, author = {Klehm, Oliver and Rousselle, Fabrice and Papas, Marios and Bradley, Derek and Hery, Christophe and Bickel, Bernd and Jarosz, Wojciech and Beeler, Thabo}, journal = {Computer Graphics Forum}, number = {2}, pages = {709 -- 733}, publisher = {Wiley-Blackwell}, title = {{Recent advances in facial appearance capture}}, doi = {10.1111/cgf.12594}, volume = {34}, year = {2015}, }