@inproceedings{14190,
  abstract     = {Learning meaningful and compact representations with disentangled semantic
aspects is considered to be of key importance in representation learning. Since
real-world data is notoriously costly to collect, many recent state-of-the-art
disentanglement models have heavily relied on synthetic toy data-sets. In this
paper, we propose a novel data-set which consists of over one million images of
physical 3D objects with seven factors of variation, such as object color,
shape, size and position. In order to be able to control all the factors of
variation precisely, we built an experimental platform where the objects are
being moved by a robotic arm. In addition, we provide two more datasets which
consist of simulations of the experimental setup. These datasets provide for
the first time the possibility to systematically investigate how well different
disentanglement methods perform on real data in comparison to simulation, and
how simulated data can be leveraged to build better representations of the real
world. We provide a first experimental study of these questions and our results
indicate that learned models transfer poorly, but that model and hyperparameter
selection is an effective means of transferring information to the real world.},
  author       = {Gondal, Muhammad Waleed and Wüthrich, Manuel and Miladinović, Đorđe and Locatello, Francesco and Breidt, Martin and Volchkov, Valentin and Akpo, Joel and Bachem, Olivier and Schölkopf, Bernhard and Bauer, Stefan},
  booktitle    = {Advances in Neural Information Processing Systems},
  isbn         = {9781713807933},
  location     = {Vancouver, Canada},
  title        = {{On the transfer of inductive bias from simulation to the real world: a new disentanglement dataset}},
  volume       = {32},
  year         = {2019},
}

@inproceedings{14191,
  abstract     = {A broad class of convex optimization problems can be formulated as a semidefinite program (SDP), minimization of a convex function over the positive-semidefinite cone subject to some affine constraints. The majority of classical SDP solvers are designed for the deterministic setting where problem data is readily available. In this setting, generalized conditional gradient methods (aka Frank-Wolfe-type methods) provide scalable solutions by leveraging the so-called linear minimization oracle instead of the projection onto the semidefinite cone. Most problems in machine learning and modern engineering applications, however, contain some degree of stochasticity. In this work, we propose the first conditional-gradient-type method for solving stochastic optimization problems under affine constraints. Our method guarantees O(k−1/3) convergence rate in expectation on the objective residual and O(k−5/12) on the feasibility gap.},
  author       = {Locatello, Francesco and Yurtsever, Alp and Fercoq, Olivier and Cevher, Volkan},
  booktitle    = {Advances in Neural Information Processing Systems},
  isbn         = {9781713807933},
  location     = {Vancouver, Canada},
  pages        = {14291–14301},
  title        = {{Stochastic Frank-Wolfe for composite convex minimization}},
  volume       = {32},
  year         = {2019},
}

@inproceedings{14193,
  abstract     = {A disentangled representation encodes information about the salient factors
of variation in the data independently. Although it is often argued that this
representational format is useful in learning to solve many real-world
down-stream tasks, there is little empirical evidence that supports this claim.
In this paper, we conduct a large-scale study that investigates whether
disentangled representations are more suitable for abstract reasoning tasks.
Using two new tasks similar to Raven's Progressive Matrices, we evaluate the
usefulness of the representations learned by 360 state-of-the-art unsupervised
disentanglement models. Based on these representations, we train 3600 abstract
reasoning models and observe that disentangled representations do in fact lead
to better down-stream performance. In particular, they enable quicker learning
using fewer samples.},
  author       = {Steenkiste, Sjoerd van and Locatello, Francesco and Schmidhuber, Jürgen and Bachem, Olivier},
  booktitle    = {Advances in Neural Information Processing Systems},
  isbn         = {9781713807933},
  location     = {Vancouver, Canada},
  title        = {{Are disentangled representations helpful for abstract visual reasoning?}},
  volume       = {32},
  year         = {2019},
}

@inproceedings{14197,
  abstract     = {Recently there has been a significant interest in learning disentangled
representations, as they promise increased interpretability, generalization to
unseen scenarios and faster learning on downstream tasks. In this paper, we
investigate the usefulness of different notions of disentanglement for
improving the fairness of downstream prediction tasks based on representations.
We consider the setting where the goal is to predict a target variable based on
the learned representation of high-dimensional observations (such as images)
that depend on both the target variable and an \emph{unobserved} sensitive
variable. We show that in this setting both the optimal and empirical
predictions can be unfair, even if the target variable and the sensitive
variable are independent. Analyzing the representations of more than
\num{12600} trained state-of-the-art disentangled models, we observe that
several disentanglement scores are consistently correlated with increased
fairness, suggesting that disentanglement may be a useful property to encourage
fairness when sensitive variables are not observed.},
  author       = {Locatello, Francesco and Abbati, Gabriele and Rainforth, Tom and Bauer, Stefan and Schölkopf, Bernhard and Bachem, Olivier},
  booktitle    = {Advances in Neural Information Processing Systems},
  isbn         = {9781713807933},
  location     = {Vancouver, Canada},
  pages        = {14611–14624},
  title        = {{On the fairness of disentangled representations}},
  volume       = {32},
  year         = {2019},
}

