An All-MLP solution for Vision, from Google AI, in Pytorch.
No convolutions nor attention needed!
$ pip install mlp-mixer-pytorch
import torch
from mlp_mixer_pytorch import MLPMixer
model = MLPMixer(
image_size = 256,
channels = 3,
patch_size = 16,
dim = 512,
depth = 12,
num_classes = 1000
)
img = torch.randn(1, 3, 256, 256)
pred = model(img) # (1, 1000)
Rectangular image
import torch
from mlp_mixer_pytorch import MLPMixer
model = MLPMixer(
image_size = (256, 128),
channels = 3,
patch_size = 16,
dim = 512,
depth = 12,
num_classes = 1000
)
img = torch.randn(1, 3, 256, 128)
pred = model(img) # (1, 1000)
Video
import torch
from mlp_mixer_pytorch import MLPMixer3D
model = MLPMixer3D(
image_size = (256, 128),
time_size = 4,
time_patch_size = 2,
channels = 3,
patch_size = 16,
dim = 512,
depth = 12,
num_classes = 1000
)
video = torch.randn(1, 3, 4, 256, 128)
pred = model(video) # (1, 1000)
@misc{tolstikhin2021mlpmixer,
title = {MLP-Mixer: An all-MLP Architecture for Vision},
author = {Ilya Tolstikhin and Neil Houlsby and Alexander Kolesnikov and Lucas Beyer and Xiaohua Zhai and Thomas Unterthiner and Jessica Yung and Daniel Keysers and Jakob Uszkoreit and Mario Lucic and Alexey Dosovitskiy},
year = {2021},
eprint = {2105.01601},
archivePrefix = {arXiv},
primaryClass = {cs.CV}
}
@misc{hou2021vision,
title = {Vision Permutator: A Permutable MLP-Like Architecture for Visual Recognition},
author = {Qibin Hou and Zihang Jiang and Li Yuan and Ming-Ming Cheng and Shuicheng Yan and Jiashi Feng},
year = {2021},
eprint = {2106.12368},
archivePrefix = {arXiv},
primaryClass = {cs.CV}
}