@inproceedings{1099,
  abstract     = {We present FlexMolds, a novel computational approach to automatically design flexible, reusable molds that, once 3D printed, allow us to physically fabricate, by means of liquid casting, multiple copies of complex shapes with rich surface details and complex topology. The approach to design such flexible molds is based on a greedy bottom-up search of possible cuts over an object, evaluating for each possible cut the feasibility of the resulting mold. We use a dynamic simulation approach to evaluate candidate molds, providing a heuristic to generate forces that are able to open, detach, and remove a complex mold from the object it surrounds. We have tested the approach with a number of objects with nontrivial shapes and topologies.},
  author       = {Malomo, Luigi and Pietroni, Nico and Bickel, Bernd and Cignoni, Paolo},
  location     = {Macao, China},
  number       = {6},
  publisher    = {ACM},
  title        = {{FlexMolds: Automatic design of flexible shells for molding}},
  doi          = {10.1145/2980179.2982397},
  volume       = {35},
  year         = {2016},
}

@inproceedings{1520,
  abstract     = {Creating mechanical automata that can walk in stable and pleasing manners is a challenging task that requires both skill and expertise. We propose to use computational design to offset the technical difficulties of this process. A simple drag-and-drop interface allows casual users to create personalized walking toys from a library of pre-defined template mechanisms. Provided with this input, our method leverages physical simulation and evolutionary optimization to refine the mechanical designs such that the resulting toys are able to walk. The optimization process is guided by an intuitive set of objectives that measure the quality of the walking motions. We demonstrate our approach on a set of simulated mechanical toys with different numbers of legs and various distinct gaits. Two fabricated prototypes showcase the feasibility of our designs.},
  author       = {Bharaj, Gaurav and Coros, Stelian and Thomaszewski, Bernhard and Tompkin, James and Bickel, Bernd and Pfister, Hanspeter},
  isbn         = {978-1-4503-3496-9},
  location     = {Los Angeles, CA, United States},
  pages        = {93 -- 100},
  publisher    = {ACM},
  title        = {{Computational design of walking automata}},
  doi          = {10.1145/2786784.2786803},
  year         = {2015},
}

@inproceedings{1625,
  abstract     = {In recent years we have seen numerous improvements on 3D scanning and tracking of human faces, greatly advancing the creation of digital doubles for film and video games. However, despite the high-resolution quality of the reconstruction approaches available, current methods are unable to capture one of the most important regions of the face - the eye region. In this work we present the first method for detailed spatio-temporal reconstruction of eyelids. Tracking and reconstructing eyelids is extremely challenging, as this region exhibits very complex and unique skin deformation where skin is folded under while opening the eye. Furthermore, eyelids are often only partially visible and obstructed due to selfocclusion and eyelashes. Our approach is to combine a geometric deformation model with image data, leveraging multi-view stereo, optical flow, contour tracking and wrinkle detection from local skin appearance. Our deformation model serves as a prior that enables reconstruction of eyelids even under strong self-occlusions caused by rolling and folding skin as the eye opens and closes. The output is a person-specific, time-varying eyelid reconstruction with anatomically plausible deformations. Our high-resolution detailed eyelids couple naturally with current facial performance capture approaches. As a result, our method can largely increase the fidelity of facial capture and the creation of digital doubles.},
  author       = {Bermano, Amit and Beeler, Thabo and Kozlov, Yeara and Bradley, Derek and Bickel, Bernd and Gross, Markus},
  location     = {Los Angeles, CA, United States},
  number       = {4},
  publisher    = {ACM},
  title        = {{Detailed spatio-temporal reconstruction of eyelids}},
  doi          = {10.1145/2766924},
  volume       = {34},
  year         = {2015},
}

@inproceedings{1626,
  abstract     = {This paper introduces &quot;OmniAD,&quot; a novel data-driven pipeline to model and acquire the aerodynamics of three-dimensional rigid objects. Traditionally, aerodynamics are examined through elaborate wind tunnel experiments or expensive fluid dynamics computations, and are only measured for a small number of discrete wind directions. OmniAD allows the evaluation of aerodynamic forces, such as drag and lift, for any incoming wind direction using a novel representation based on spherical harmonics. Our datadriven technique acquires the aerodynamic properties of an object simply by capturing its falling motion using a single camera. Once model parameters are estimated, OmniAD enables realistic realtime simulation of rigid bodies, such as the tumbling and gliding of leaves, without simulating the surrounding air. In addition, we propose an intuitive user interface based on OmniAD to interactively design three-dimensional kites that actually fly. Various nontraditional kites were designed to demonstrate the physical validity of our model.},
  author       = {Martin, Tobias and Umetani, Nobuyuki and Bickel, Bernd},
  location     = {Los Angeles, CA, United States},
  number       = {4},
  publisher    = {ACM},
  title        = {{OmniAD: Data-driven omni-directional aerodynamics}},
  doi          = {10.1145/2766919},
  volume       = {34},
  year         = {2015},
}

@inproceedings{1627,
  abstract     = {We present a computational tool for fabrication-oriented design of flexible rod meshes. Given a deformable surface and a set of deformed poses as input, our method automatically computes a printable rod mesh that, once manufactured, closely matches the input poses under the same boundary conditions. The core of our method is formed by an optimization scheme that adjusts the cross-sectional profiles of the rods and their rest centerline in order to best approximate the target deformations. This approach allows us to locally control the bending and stretching resistance of the surface with a single material, yielding high design flexibility and low fabrication cost.},
  author       = {Pérez, Jesús and Thomaszewski, Bernhard and Coros, Stelian and Bickel, Bernd and Canabal, José and Sumner, Robert and Otaduy, Miguel},
  location     = {Los Angeles, CA, United States},
  number       = {4},
  publisher    = {ACM},
  title        = {{Design and fabrication of flexible rod meshes}},
  doi          = {10.1145/2766998},
  volume       = {34},
  year         = {2015},
}

@inproceedings{1628,
  abstract     = {We propose a method for fabricating deformable objects with spatially varying elasticity using 3D printing. Using a single, relatively stiff printer material, our method designs an assembly of smallscale microstructures that have the effect of a softer material at the object scale, with properties depending on the microstructure used in each part of the object. We build on work in the area of metamaterials, using numerical optimization to design tiled microstructures with desired properties, but with the key difference that our method designs families of related structures that can be interpolated to smoothly vary the material properties over a wide range. To create an object with spatially varying elastic properties, we tile the object's interior with microstructures drawn from these families, generating a different microstructure for each cell using an efficient algorithm to select compatible structures for neighboring cells. We show results computed for both 2D and 3D objects, validating several 2D and 3D printed structures using standard material tests as well as demonstrating various example applications.},
  author       = {Schumacher, Christian and Bickel, Bernd and Rys, Jan and Marschner, Steve and Daraio, Chiara and Gross, Markus},
  location     = {Los Angeles, CA, USA},
  number       = {4},
  publisher    = {ACM},
  title        = {{Microstructures to control elasticity in 3D printing}},
  doi          = {10.1145/2766926},
  volume       = {34},
  year         = {2015},
}

@article{1734,
  abstract     = {Facial appearance capture is now firmly established within academic research and used extensively across various application domains, perhaps most prominently in the entertainment industry through the design of virtual characters in video games and films. While significant progress has occurred over the last two decades, no single survey currently exists that discusses the similarities, differences, and practical considerations of the available appearance capture techniques as applied to human faces. A central difficulty of facial appearance capture is the way light interacts with skin-which has a complex multi-layered structure-and the interactions that occur below the skin surface can, by definition, only be observed indirectly. In this report, we distinguish between two broad strategies for dealing with this complexity. &quot;Image-based methods&quot; try to exhaustively capture the exact face appearance under different lighting and viewing conditions, and then render the face through weighted image combinations. &quot;Parametric methods&quot; instead fit the captured reflectance data to some parametric appearance model used during rendering, allowing for a more lightweight and flexible representation but at the cost of potentially increased rendering complexity or inexact reproduction. The goal of this report is to provide an overview that can guide practitioners and researchers in assessing the tradeoffs between current approaches and identifying directions for future advances in facial appearance capture.},
  author       = {Klehm, Oliver and Rousselle, Fabrice and Papas, Marios and Bradley, Derek and Hery, Christophe and Bickel, Bernd and Jarosz, Wojciech and Beeler, Thabo},
  journal      = {Computer Graphics Forum},
  number       = {2},
  pages        = {709 -- 733},
  publisher    = {Wiley-Blackwell},
  title        = {{Recent advances in facial appearance capture}},
  doi          = {10.1111/cgf.12594},
  volume       = {34},
  year         = {2015},
}

