Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

added config files for much lighter yolov5 models #11812

Closed
wants to merge 26 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
988dfbe
first commit
AyushExel May 29, 2023
8ee268e
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] May 29, 2023
3e8191f
update
AyushExel May 29, 2023
1aae676
Merge branch 'ultralytics_cleanup' of https://github.com/ultralytics/…
AyushExel May 29, 2023
9171b2d
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] May 29, 2023
e979c12
update
AyushExel May 29, 2023
f54a59d
Merge branch 'ultralytics_cleanup' of https://github.com/ultralytics/…
AyushExel May 29, 2023
28c8092
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] May 29, 2023
f21523f
update
AyushExel May 29, 2023
9018227
update
AyushExel May 29, 2023
8509d81
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] May 29, 2023
c2c3a39
Merge branch 'master' into ultralytics_cleanup
AyushExel Jun 1, 2023
49faf87
use autobatch
AyushExel Jun 1, 2023
3873325
Merge branch 'ultralytics_cleanup' of https://github.com/ultralytics/…
AyushExel Jun 1, 2023
0355343
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jun 1, 2023
9946c68
update
AyushExel Jun 1, 2023
b8ac927
Merge branch 'ultralytics_cleanup' of https://github.com/ultralytics/…
AyushExel Jun 1, 2023
9f32732
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jun 1, 2023
16d32a7
udpate
AyushExel Jun 1, 2023
97fbbac
Merge branch 'ultralytics_cleanup' of https://github.com/ultralytics/…
AyushExel Jun 1, 2023
9c358a5
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jun 1, 2023
7dc495f
Merge branch 'master' into ultralytics_cleanup
AyushExel Jun 2, 2023
934e881
Merge branch 'master' into ultralytics_cleanup
AyushExel Jun 6, 2023
f91d6b1
Merge branch 'master' into ultralytics_cleanup
glenn-jocher Jun 17, 2023
a614091
Merge branch 'ultralytics_cleanup' into master
glenn-jocher Jul 4, 2023
d05a086
added lighter yolov5 models
jere357 Jul 4, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 5 additions & 4 deletions benchmarks.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,10 @@
from pathlib import Path

import pandas as pd
from ulralytics.yolo.utils.checks import check_yaml, print_args
from ultraltics.yolo.utils import LOGGER, file_size
from ultralytics.nn.tasks import attempt_load_weights
from ultralytics.yolo.utils.torch_utils import select_device

FILE = Path(__file__).resolve()
ROOT = FILE.parents[0] # YOLOv5 root directory
Expand All @@ -40,12 +44,9 @@
# ROOT = ROOT.relative_to(Path.cwd()) # relative

import export
from models.experimental import attempt_load
from models.yolo import SegmentationModel
from segment.val import run as val_seg
from utils import notebook_init
from utils.general import LOGGER, check_yaml, file_size, print_args
from utils.torch_utils import select_device
from val import run as val_det


Expand All @@ -62,7 +63,7 @@ def run(
):
y, t = [], time.time()
device = select_device(device)
model_type = type(attempt_load(weights, fuse=False)) # DetectionModel, SegmentationModel, etc.
model_type = type(attempt_load_weights(weights, fuse=False)) # DetectionModel, SegmentationModel, etc.
for i, (name, f, suffix, cpu, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, CPU, GPU)
try:
assert i not in (9, 10), 'inference not supported' # Edge TPU and TF.js are unsupported
Expand Down
8 changes: 4 additions & 4 deletions classify/predict.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,8 @@

import torch
import torch.nn.functional as F
from ultralytics.yolo.utils.checks import check_imgsz, check_imshow, check_requirements, print_args
from ultralytics.yolo.utils.torch_utils import select_device, smart_inference_mode, strip_optimizer

FILE = Path(__file__).resolve()
ROOT = FILE.parents[1] # YOLOv5 root directory
Expand All @@ -46,10 +48,8 @@
from models.common import DetectMultiBackend
from utils.augmentations import classify_transforms
from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
increment_path, print_args, strip_optimizer)
from utils.general import LOGGER, Profile, check_file, colorstr, cv2, increment_path
from utils.plots import Annotator
from utils.torch_utils import select_device, smart_inference_mode


@smart_inference_mode()
Expand Down Expand Up @@ -89,7 +89,7 @@ def run(
device = select_device(device)
model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
stride, names, pt = model.stride, model.names, model.pt
imgsz = check_img_size(imgsz, s=stride) # check image size
imgsz = check_imgsz(imgsz, s=stride) # check image size

# Dataloader
bs = 1 # batch_size
Expand Down
11 changes: 6 additions & 5 deletions classify/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,9 @@
import torchvision
from torch.cuda import amp
from tqdm import tqdm
from ultralytics.nn.tasks import attempt_load_weights
from ultralytics.yolo.utils.torch_utils import (ModelEMA, de_parallel, model_info, select_device,
torch_distributed_zero_first)

FILE = Path(__file__).resolve()
ROOT = FILE.parents[1] # YOLOv5 root directory
Expand All @@ -37,15 +40,13 @@
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative

from classify import val as validate
from models.experimental import attempt_load
from models.yolo import ClassificationModel, DetectionModel
from utils.dataloaders import create_classification_dataloader
from utils.general import (DATASETS_DIR, LOGGER, TQDM_BAR_FORMAT, WorkingDirectory, check_git_info, check_git_status,
check_requirements, colorstr, download, increment_path, init_seeds, print_args, yaml_save)
from utils.loggers import GenericLogger
from utils.plots import imshow_cls
from utils.torch_utils import (ModelEMA, de_parallel, model_info, reshape_classifier_output, select_device, smart_DDP,
smart_optimizer, smartCrossEntropyLoss, torch_distributed_zero_first)
from utils.torch_utils import reshape_classifier_output, smart_DDP, smart_optimizer, smartCrossEntropyLoss

LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
RANK = int(os.getenv('RANK', -1))
Expand Down Expand Up @@ -108,7 +109,7 @@ def train(opt, device):
# Model
with torch_distributed_zero_first(LOCAL_RANK), WorkingDirectory(ROOT):
if Path(opt.model).is_file() or opt.model.endswith('.pt'):
model = attempt_load(opt.model, device='cpu', fuse=False)
model = attempt_load_weights(opt.model, device='cpu', fuse=False)
elif opt.model in torchvision.models.__dict__: # TorchVision models i.e. resnet50, efficientnet_b0
model = torchvision.models.__dict__[opt.model](weights='IMAGENET1K_V1' if pretrained else None)
else:
Expand Down Expand Up @@ -303,7 +304,7 @@ def main(opt):
check_requirements(ROOT / 'requirements.txt')

# DDP mode
device = select_device(opt.device, batch_size=opt.batch_size)
device = select_device(opt.device, batch=opt.batch_size)
if LOCAL_RANK != -1:
assert opt.batch_size != -1, 'AutoBatch is coming soon for classification, please pass a valid --batch-size'
assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE'
Expand Down
11 changes: 6 additions & 5 deletions classify/val.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,9 @@

import torch
from tqdm import tqdm
from ultralytics.yolo.utils import colorstr
from ultralytics.yolo.utils.checks import check_imgsz, check_requirements, print_args
from ultralytics.yolo.utils.torch_utils import select_device, smart_inference_mode

FILE = Path(__file__).resolve()
ROOT = FILE.parents[1] # YOLOv5 root directory
Expand All @@ -36,9 +39,7 @@

from models.common import DetectMultiBackend
from utils.dataloaders import create_classification_dataloader
from utils.general import (LOGGER, TQDM_BAR_FORMAT, Profile, check_img_size, check_requirements, colorstr,
increment_path, print_args)
from utils.torch_utils import select_device, smart_inference_mode
from utils.general import LOGGER, TQDM_BAR_FORMAT, Profile, increment_path


@smart_inference_mode()
Expand Down Expand Up @@ -67,7 +68,7 @@ def run(
half &= device.type != 'cpu' # half precision only supported on CUDA
model.half() if half else model.float()
else: # called directly
device = select_device(device, batch_size=batch_size)
device = select_device(device, batch=batch_size)

# Directories
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
Expand All @@ -76,7 +77,7 @@ def run(
# Load model
model = DetectMultiBackend(weights, device=device, dnn=dnn, fp16=half)
stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine
imgsz = check_img_size(imgsz, s=stride) # check image size
imgsz = check_imgsz(imgsz, s=stride) # check image size
half = model.fp16 # FP16 supported on limited backends with CUDA
if engine:
batch_size = model.batch_size
Expand Down
10 changes: 6 additions & 4 deletions detect.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,9 @@
from pathlib import Path

import torch
from ultralyitcs.yolo.utils import colorstr
from ultralytics.yolo.utils.checks import check_imgsz, check_imshow, check_requirements, print_args
from ultralytics.yolo.utils.torch_utils import select_device, smart_inference_mode

FILE = Path(__file__).resolve()
ROOT = FILE.parents[0] # YOLOv5 root directory
Expand All @@ -44,10 +47,9 @@

from models.common import DetectMultiBackend
from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh)
from utils.general import (LOGGER, Profile, check_file, cv2, increment_path, non_max_suppression, scale_boxes,
strip_optimizer, xyxy2xywh)
from utils.plots import Annotator, colors, save_one_box
from utils.torch_utils import select_device, smart_inference_mode


@smart_inference_mode()
Expand Down Expand Up @@ -97,7 +99,7 @@ def run(
device = select_device(device)
model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
stride, names, pt = model.stride, model.names, model.pt
imgsz = check_img_size(imgsz, s=stride) # check image size
imgsz = check_imgsz(imgsz, s=stride) # check image size

# Dataloader
bs = 1 # batch_size
Expand Down
13 changes: 7 additions & 6 deletions export.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,10 @@
import pandas as pd
import torch
from torch.utils.mobile_optimizer import optimize_for_mobile
from ultraltics.yolo.utils import check_requirements, colorstr, file_size
from ultralytics.nn.tasks import attempt_load_weights
from ultralytics.yolo.utils.checks import check_img_size, check_imgsz, check_version, print_args
from ultralytics.yolo.utils.torch_utils import select_device, smart_inference_mode

FILE = Path(__file__).resolve()
ROOT = FILE.parents[0] # YOLOv5 root directory
Expand All @@ -67,12 +71,9 @@
if platform.system() != 'Windows':
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative

from models.experimental import attempt_load
from models.yolo import ClassificationModel, Detect, DetectionModel, SegmentationModel
from utils.dataloaders import LoadImages
from utils.general import (LOGGER, Profile, check_dataset, check_img_size, check_requirements, check_version,
check_yaml, colorstr, file_size, get_default_args, print_args, url2file, yaml_save)
from utils.torch_utils import select_device, smart_inference_mode
from utils.general import LOGGER, Profile, check_dataset, check_yaml, get_default_args, url2file, yaml_save

MACOS = platform.system() == 'Darwin' # macOS environment

Expand Down Expand Up @@ -733,7 +734,7 @@ def run(
if half:
assert device.type != 'cpu' or coreml, '--half only compatible with GPU export, i.e. use --device 0'
assert not dynamic, '--half not compatible with --dynamic, i.e. use either --half or --dynamic but not both'
model = attempt_load(weights, device=device, inplace=True, fuse=True) # load FP32 model
model = attempt_load_weights(weights, device=device, inplace=True, fuse=True) # load FP32 model

# Checks
imgsz *= 2 if len(imgsz) == 1 else 1 # expand
Expand All @@ -742,7 +743,7 @@ def run(

# Input
gs = int(max(model.stride)) # grid size (max stride)
imgsz = [check_img_size(x, gs) for x in imgsz] # verify img_size are gs-multiples
imgsz = [check_imgsz(x, gs) for x in imgsz] # verify img_size are gs-multiples
im = torch.zeros(batch_size, 3, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection

# Update model
Expand Down
7 changes: 4 additions & 3 deletions hubconf.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,12 +30,13 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo
"""
from pathlib import Path

from ultralytics.nn.tasks import attempt_load_weights
from ultralytics.yolo.utils.torch_utils import select_device, smart_inference_mode

from models.common import AutoShape, DetectMultiBackend
from models.experimental import attempt_load
from models.yolo import ClassificationModel, DetectionModel, SegmentationModel
from utils.downloads import attempt_download
from utils.general import LOGGER, ROOT, check_requirements, intersect_dicts, logging
from utils.torch_utils import select_device

if not verbose:
LOGGER.setLevel(logging.WARNING)
Expand All @@ -57,7 +58,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo
else:
model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS
except Exception:
model = attempt_load(path, device=device, fuse=False) # arbitrary model
model = attempt_load_weights(path, device=device, fuse=False) # arbitrary model
else:
cfg = list((Path(__file__).parent / 'models').rglob(f'{path.stem}.yaml'))[0] # model.yaml path
model = DetectionModel(cfg, channels, classes) # create model
Expand Down
15 changes: 10 additions & 5 deletions models/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,14 @@
import torch.nn as nn
from PIL import Image
from torch.cuda import amp
from ultralytics.nn.tasks import attempt_load_weights
from ultralytics.yolo.utils import colorstr
from ultralytics.yolo.utils.checks import check_requirements, check_suffix, check_version

from utils import TryExcept
from utils.dataloaders import exif_transpose, letterbox
from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr,
increment_path, is_jupyter, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy,
xyxy2xywh, yaml_load)
from utils.general import (LOGGER, ROOT, Profile, increment_path, is_jupyter, make_divisible, non_max_suppression,
scale_boxes, xywh2xyxy, xyxy2xywh, yaml_load)
from utils.plots import Annotator, colors, save_one_box
from utils.torch_utils import copy_attr, smart_inference_mode

Expand Down Expand Up @@ -328,7 +330,7 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False,
# TensorFlow Lite: *.tflite
# TensorFlow Edge TPU: *_edgetpu.tflite
# PaddlePaddle: *_paddle_model
from models.experimental import attempt_download, attempt_load # scoped to avoid circular import
from models.experimental import attempt_download # scoped to avoid circular import

super().__init__()
w = str(weights[0] if isinstance(weights, list) else weights)
Expand All @@ -341,7 +343,10 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False,
w = attempt_download(w) # download if not local

if pt: # PyTorch
model = attempt_load(weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse)
model = attempt_load_weights(weights if isinstance(weights, list) else w,
device=device,
inplace=True,
fuse=fuse)
stride = max(int(model.stride.max()), 32) # model stride
names = model.module.names if hasattr(model, 'module') else model.names # get class names
model.half() if fp16 else model.float()
Expand Down
41 changes: 0 additions & 41 deletions models/experimental.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,44 +68,3 @@ def forward(self, x, augment=False, profile=False, visualize=False):
# y = torch.stack(y).mean(0) # mean ensemble
y = torch.cat(y, 1) # nms ensemble
return y, None # inference, train output


def attempt_load(weights, device=None, inplace=True, fuse=True):
# Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
from models.yolo import Detect, Model

model = Ensemble()
for w in weights if isinstance(weights, list) else [weights]:
ckpt = torch.load(attempt_download(w), map_location='cpu') # load
ckpt = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model

# Model compatibility updates
if not hasattr(ckpt, 'stride'):
ckpt.stride = torch.tensor([32.])
if hasattr(ckpt, 'names') and isinstance(ckpt.names, (list, tuple)):
ckpt.names = dict(enumerate(ckpt.names)) # convert to dict

model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval()) # model in eval mode

# Module compatibility updates
for m in model.modules():
t = type(m)
if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model):
m.inplace = inplace # torch 1.7.0 compatibility
if t is Detect and not isinstance(m.anchor_grid, list):
delattr(m, 'anchor_grid')
setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl)
elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'):
m.recompute_scale_factor = None # torch 1.11.0 compatibility

# Return model
if len(model) == 1:
return model[-1]

# Return detection ensemble
print(f'Ensemble created with {weights}\n')
for k in 'names', 'nc', 'yaml':
setattr(model, k, getattr(model[0], k))
model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride
assert all(model[0].nc == m.nc for m in model), f'Models have different class counts: {[m.nc for m in model]}'
return model
5 changes: 3 additions & 2 deletions models/tf.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,11 @@
import torch
import torch.nn as nn
from tensorflow import keras
from ultralytics.nn.tasks import attempt_load_weights

from models.common import (C3, SPP, SPPF, Bottleneck, BottleneckCSP, C3x, Concat, Conv, CrossConv, DWConv,
DWConvTranspose2d, Focus, autopad)
from models.experimental import MixConv2d, attempt_load
from models.experimental import MixConv2d
from models.yolo import Detect, Segment
from utils.activations import SiLU
from utils.general import LOGGER, make_divisible, print_args
Expand Down Expand Up @@ -570,7 +571,7 @@ def run(
):
# PyTorch model
im = torch.zeros((batch_size, 3, *imgsz)) # BCHW image
model = attempt_load(weights, device=torch.device('cpu'), inplace=True, fuse=False)
model = attempt_load_weights(weights, device=torch.device('cpu'), inplace=True, fuse=False)
_ = model(im) # inference
model.info()

Expand Down
8 changes: 5 additions & 3 deletions models/yolo.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,9 @@
from copy import deepcopy
from pathlib import Path

from ultralytics.yolo.utils.checks import check_version, print_args
from ultralytics.yolo.utils.torch_utils import select_device

FILE = Path(__file__).resolve()
ROOT = FILE.parents[1] # YOLOv5 root directory
if str(ROOT) not in sys.path:
Expand All @@ -24,10 +27,9 @@
from models.common import *
from models.experimental import *
from utils.autoanchor import check_anchor_order
from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args
from utils.general import LOGGER, check_yaml, make_divisible
from utils.plots import feature_visualization
from utils.torch_utils import (fuse_conv_and_bn, initialize_weights, model_info, profile, scale_img, select_device,
time_sync)
from utils.torch_utils import fuse_conv_and_bn, initialize_weights, model_info, profile, scale_img, time_sync

try:
import thop # for FLOPs computation
Expand Down
Loading