@inproceedings{6558, abstract = {This paper studies the problem of distributed stochastic optimization in an adversarial setting where, out of m machines which allegedly compute stochastic gradients every iteration, an α-fraction are Byzantine, and may behave adversarially. Our main result is a variant of stochastic gradient descent (SGD) which finds ε-approximate minimizers of convex functions in T=O~(1/ε²m+α²/ε²) iterations. In contrast, traditional mini-batch SGD needs T=O(1/ε²m) iterations, but cannot tolerate Byzantine failures. Further, we provide a lower bound showing that, up to logarithmic factors, our algorithm is information-theoretically optimal both in terms of sample complexity and time complexity.}, author = {Alistarh, Dan-Adrian and Allen-Zhu, Zeyuan and Li, Jerry}, booktitle = {Advances in Neural Information Processing Systems}, location = {Montreal, Canada}, pages = {4613--4623}, publisher = {Neural Information Processing Systems Foundation}, title = {{Byzantine stochastic gradient descent}}, volume = {2018}, year = {2018}, }