diff --git a/utils/aws/__init__.py b/__init__.py similarity index 100% rename from utils/aws/__init__.py rename to __init__.py diff --git a/benchmarks.py b/benchmarks.py deleted file mode 100644 index 03d7d693a936..000000000000 --- a/benchmarks.py +++ /dev/null @@ -1,169 +0,0 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license -""" -Run YOLOv5 benchmarks on all supported export formats - -Format | `export.py --include` | Model ---- | --- | --- -PyTorch | - | yolov5s.pt -TorchScript | `torchscript` | yolov5s.torchscript -ONNX | `onnx` | yolov5s.onnx -OpenVINO | `openvino` | yolov5s_openvino_model/ -TensorRT | `engine` | yolov5s.engine -CoreML | `coreml` | yolov5s.mlmodel -TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/ -TensorFlow GraphDef | `pb` | yolov5s.pb -TensorFlow Lite | `tflite` | yolov5s.tflite -TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite -TensorFlow.js | `tfjs` | yolov5s_web_model/ - -Requirements: - $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU - $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU - $ pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # TensorRT - -Usage: - $ python benchmarks.py --weights yolov5s.pt --img 640 -""" - -import argparse -import platform -import sys -import time -from pathlib import Path - -import pandas as pd - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[0] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -# ROOT = ROOT.relative_to(Path.cwd()) # relative - -import export -from models.experimental import attempt_load -from models.yolo import SegmentationModel -from segment.val import run as val_seg -from utils import notebook_init -from utils.general import LOGGER, check_yaml, file_size, print_args -from utils.torch_utils import select_device -from val import run as val_det - - -def run( - weights=ROOT / 'yolov5s.pt', # weights path - imgsz=640, # inference size (pixels) - batch_size=1, # batch size - data=ROOT / 'data/coco128.yaml', # dataset.yaml path - device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu - half=False, # use FP16 half-precision inference - test=False, # test exports only - pt_only=False, # test PyTorch only - hard_fail=False, # throw error on benchmark failure -): - y, t = [], time.time() - device = select_device(device) - model_type = type(attempt_load(weights, fuse=False)) # DetectionModel, SegmentationModel, etc. - for i, (name, f, suffix, cpu, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, CPU, GPU) - try: - assert i not in (9, 10), 'inference not supported' # Edge TPU and TF.js are unsupported - assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13' # CoreML - if 'cpu' in device.type: - assert cpu, 'inference not supported on CPU' - if 'cuda' in device.type: - assert gpu, 'inference not supported on GPU' - - # Export - if f == '-': - w = weights # PyTorch format - else: - w = export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # all others - assert suffix in str(w), 'export failed' - - # Validate - if model_type == SegmentationModel: - result = val_seg(data, w, batch_size, imgsz, plots=False, device=device, task='speed', half=half) - metric = result[0][7] # (box(p, r, map50, map), mask(p, r, map50, map), *loss(box, obj, cls)) - else: # DetectionModel: - result = val_det(data, w, batch_size, imgsz, plots=False, device=device, task='speed', half=half) - metric = result[0][3] # (p, r, map50, map, *loss(box, obj, cls)) - speed = result[2][1] # times (preprocess, inference, postprocess) - y.append([name, round(file_size(w), 1), round(metric, 4), round(speed, 2)]) # MB, mAP, t_inference - except Exception as e: - if hard_fail: - assert type(e) is AssertionError, f'Benchmark --hard-fail for {name}: {e}' - LOGGER.warning(f'WARNING โš ๏ธ Benchmark failure for {name}: {e}') - y.append([name, None, None, None]) # mAP, t_inference - if pt_only and i == 0: - break # break after PyTorch - - # Print results - LOGGER.info('\n') - parse_opt() - notebook_init() # print system info - c = ['Format', 'Size (MB)', 'mAP50-95', 'Inference time (ms)'] if map else ['Format', 'Export', '', ''] - py = pd.DataFrame(y, columns=c) - LOGGER.info(f'\nBenchmarks complete ({time.time() - t:.2f}s)') - LOGGER.info(str(py if map else py.iloc[:, :2])) - if hard_fail and isinstance(hard_fail, str): - metrics = py['mAP50-95'].array # values to compare to floor - floor = eval(hard_fail) # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n - assert all(x > floor for x in metrics if pd.notna(x)), f'HARD FAIL: mAP50-95 < floor {floor}' - return py - - -def test( - weights=ROOT / 'yolov5s.pt', # weights path - imgsz=640, # inference size (pixels) - batch_size=1, # batch size - data=ROOT / 'data/coco128.yaml', # dataset.yaml path - device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu - half=False, # use FP16 half-precision inference - test=False, # test exports only - pt_only=False, # test PyTorch only - hard_fail=False, # throw error on benchmark failure -): - y, t = [], time.time() - device = select_device(device) - for i, (name, f, suffix, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, gpu-capable) - try: - w = weights if f == '-' else \ - export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # weights - assert suffix in str(w), 'export failed' - y.append([name, True]) - except Exception: - y.append([name, False]) # mAP, t_inference - - # Print results - LOGGER.info('\n') - parse_opt() - notebook_init() # print system info - py = pd.DataFrame(y, columns=['Format', 'Export']) - LOGGER.info(f'\nExports complete ({time.time() - t:.2f}s)') - LOGGER.info(str(py)) - return py - - -def parse_opt(): - parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') - parser.add_argument('--batch-size', type=int, default=1, help='batch size') - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') - parser.add_argument('--test', action='store_true', help='test exports only') - parser.add_argument('--pt-only', action='store_true', help='test PyTorch only') - parser.add_argument('--hard-fail', nargs='?', const=True, default=False, help='Exception on error or < min metric') - opt = parser.parse_args() - opt.data = check_yaml(opt.data) # check YAML - print_args(vars(opt)) - return opt - - -def main(opt): - test(**vars(opt)) if opt.test else run(**vars(opt)) - - -if __name__ == "__main__": - opt = parse_opt() - main(opt) diff --git a/classify/predict.py b/classify/predict.py deleted file mode 100644 index 5a5edabda42c..000000000000 --- a/classify/predict.py +++ /dev/null @@ -1,226 +0,0 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license -""" -Run YOLOv5 classification inference on images, videos, directories, globs, YouTube, webcam, streams, etc. - -Usage - sources: - $ python classify/predict.py --weights yolov5s-cls.pt --source 0 # webcam - img.jpg # image - vid.mp4 # video - screen # screenshot - path/ # directory - list.txt # list of images - list.streams # list of streams - 'path/*.jpg' # glob - 'https://youtu.be/Zgi9g1ksQHc' # YouTube - 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream - -Usage - formats: - $ python classify/predict.py --weights yolov5s-cls.pt # PyTorch - yolov5s-cls.torchscript # TorchScript - yolov5s-cls.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s-cls_openvino_model # OpenVINO - yolov5s-cls.engine # TensorRT - yolov5s-cls.mlmodel # CoreML (macOS-only) - yolov5s-cls_saved_model # TensorFlow SavedModel - yolov5s-cls.pb # TensorFlow GraphDef - yolov5s-cls.tflite # TensorFlow Lite - yolov5s-cls_edgetpu.tflite # TensorFlow Edge TPU - yolov5s-cls_paddle_model # PaddlePaddle -""" - -import argparse -import os -import platform -import sys -from pathlib import Path - -import torch -import torch.nn.functional as F - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[1] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative - -from models.common import DetectMultiBackend -from utils.augmentations import classify_transforms -from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams -from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, - increment_path, print_args, strip_optimizer) -from utils.plots import Annotator -from utils.torch_utils import select_device, smart_inference_mode - - -@smart_inference_mode() -def run( - weights=ROOT / 'yolov5s-cls.pt', # model.pt path(s) - source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam) - data=ROOT / 'data/coco128.yaml', # dataset.yaml path - imgsz=(224, 224), # inference size (height, width) - device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu - view_img=False, # show results - save_txt=False, # save results to *.txt - nosave=False, # do not save images/videos - augment=False, # augmented inference - visualize=False, # visualize features - update=False, # update all models - project=ROOT / 'runs/predict-cls', # save results to project/name - name='exp', # save results to project/name - exist_ok=False, # existing project/name ok, do not increment - half=False, # use FP16 half-precision inference - dnn=False, # use OpenCV DNN for ONNX inference - vid_stride=1, # video frame-rate stride -): - source = str(source) - save_img = not nosave and not source.endswith('.txt') # save inference images - is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) - is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) - webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file) - screenshot = source.lower().startswith('screen') - if is_url and is_file: - source = check_file(source) # download - - # Directories - save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run - (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir - - # Load model - device = select_device(device) - model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) - stride, names, pt = model.stride, model.names, model.pt - imgsz = check_img_size(imgsz, s=stride) # check image size - - # Dataloader - bs = 1 # batch_size - if webcam: - view_img = check_imshow(warn=True) - dataset = LoadStreams(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride) - bs = len(dataset) - elif screenshot: - dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt) - else: - dataset = LoadImages(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride) - vid_path, vid_writer = [None] * bs, [None] * bs - - # Run inference - model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup - seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) - for path, im, im0s, vid_cap, s in dataset: - with dt[0]: - im = torch.Tensor(im).to(model.device) - im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 - if len(im.shape) == 3: - im = im[None] # expand for batch dim - - # Inference - with dt[1]: - results = model(im) - - # Post-process - with dt[2]: - pred = F.softmax(results, dim=1) # probabilities - - # Process predictions - for i, prob in enumerate(pred): # per image - seen += 1 - if webcam: # batch_size >= 1 - p, im0, frame = path[i], im0s[i].copy(), dataset.count - s += f'{i}: ' - else: - p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) - - p = Path(p) # to Path - save_path = str(save_dir / p.name) # im.jpg - txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt - - s += '%gx%g ' % im.shape[2:] # print string - annotator = Annotator(im0, example=str(names), pil=True) - - # Print results - top5i = prob.argsort(0, descending=True)[:5].tolist() # top 5 indices - s += f"{', '.join(f'{names[j]} {prob[j]:.2f}' for j in top5i)}, " - - # Write results - text = '\n'.join(f'{prob[j]:.2f} {names[j]}' for j in top5i) - if save_img or view_img: # Add bbox to image - annotator.text((32, 32), text, txt_color=(255, 255, 255)) - if save_txt: # Write to file - with open(f'{txt_path}.txt', 'a') as f: - f.write(text + '\n') - - # Stream results - im0 = annotator.result() - if view_img: - if platform.system() == 'Linux' and p not in windows: - windows.append(p) - cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) - cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0]) - cv2.imshow(str(p), im0) - cv2.waitKey(1) # 1 millisecond - - # Save results (image with detections) - if save_img: - if dataset.mode == 'image': - cv2.imwrite(save_path, im0) - else: # 'video' or 'stream' - if vid_path[i] != save_path: # new video - vid_path[i] = save_path - if isinstance(vid_writer[i], cv2.VideoWriter): - vid_writer[i].release() # release previous video writer - if vid_cap: # video - fps = vid_cap.get(cv2.CAP_PROP_FPS) - w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) - h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) - else: # stream - fps, w, h = 30, im0.shape[1], im0.shape[0] - save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos - vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) - vid_writer[i].write(im0) - - # Print time (inference-only) - LOGGER.info(f"{s}{dt[1].dt * 1E3:.1f}ms") - - # Print results - t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image - LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) - if save_txt or save_img: - s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' - LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") - if update: - strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning) - - -def parse_opt(): - parser = argparse.ArgumentParser() - parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-cls.pt', help='model path(s)') - parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)') - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') - parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[224], help='inference size h,w') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--view-img', action='store_true', help='show results') - parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') - parser.add_argument('--nosave', action='store_true', help='do not save images/videos') - parser.add_argument('--augment', action='store_true', help='augmented inference') - parser.add_argument('--visualize', action='store_true', help='visualize features') - parser.add_argument('--update', action='store_true', help='update all models') - parser.add_argument('--project', default=ROOT / 'runs/predict-cls', help='save results to project/name') - parser.add_argument('--name', default='exp', help='save results to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') - parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') - parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride') - opt = parser.parse_args() - opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand - print_args(vars(opt)) - return opt - - -def main(opt): - check_requirements(exclude=('tensorboard', 'thop')) - run(**vars(opt)) - - -if __name__ == "__main__": - opt = parse_opt() - main(opt) diff --git a/classify/train.py b/classify/train.py deleted file mode 100644 index 4767be77bd61..000000000000 --- a/classify/train.py +++ /dev/null @@ -1,333 +0,0 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license -""" -Train a YOLOv5 classifier model on a classification dataset - -Usage - Single-GPU training: - $ python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 224 - -Usage - Multi-GPU DDP training: - $ python -m torch.distributed.run --nproc_per_node 4 --master_port 2022 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 - -Datasets: --data mnist, fashion-mnist, cifar10, cifar100, imagenette, imagewoof, imagenet, or 'path/to/data' -YOLOv5-cls models: --model yolov5n-cls.pt, yolov5s-cls.pt, yolov5m-cls.pt, yolov5l-cls.pt, yolov5x-cls.pt -Torchvision models: --model resnet50, efficientnet_b0, etc. See https://pytorch.org/vision/stable/models.html -""" - -import argparse -import os -import subprocess -import sys -import time -from copy import deepcopy -from datetime import datetime -from pathlib import Path - -import torch -import torch.distributed as dist -import torch.hub as hub -import torch.optim.lr_scheduler as lr_scheduler -import torchvision -from torch.cuda import amp -from tqdm import tqdm - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[1] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative - -from classify import val as validate -from models.experimental import attempt_load -from models.yolo import ClassificationModel, DetectionModel -from utils.dataloaders import create_classification_dataloader -from utils.general import (DATASETS_DIR, LOGGER, TQDM_BAR_FORMAT, WorkingDirectory, check_git_info, check_git_status, - check_requirements, colorstr, download, increment_path, init_seeds, print_args, yaml_save) -from utils.loggers import GenericLogger -from utils.plots import imshow_cls -from utils.torch_utils import (ModelEMA, model_info, reshape_classifier_output, select_device, smart_DDP, - smart_optimizer, smartCrossEntropyLoss, torch_distributed_zero_first) - -LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html -RANK = int(os.getenv('RANK', -1)) -WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) -GIT_INFO = check_git_info() - - -def train(opt, device): - init_seeds(opt.seed + 1 + RANK, deterministic=True) - save_dir, data, bs, epochs, nw, imgsz, pretrained = \ - opt.save_dir, Path(opt.data), opt.batch_size, opt.epochs, min(os.cpu_count() - 1, opt.workers), \ - opt.imgsz, str(opt.pretrained).lower() == 'true' - cuda = device.type != 'cpu' - - # Directories - wdir = save_dir / 'weights' - wdir.mkdir(parents=True, exist_ok=True) # make dir - last, best = wdir / 'last.pt', wdir / 'best.pt' - - # Save run settings - yaml_save(save_dir / 'opt.yaml', vars(opt)) - - # Logger - logger = GenericLogger(opt=opt, console_logger=LOGGER) if RANK in {-1, 0} else None - - # Download Dataset - with torch_distributed_zero_first(LOCAL_RANK), WorkingDirectory(ROOT): - data_dir = data if data.is_dir() else (DATASETS_DIR / data) - if not data_dir.is_dir(): - LOGGER.info(f'\nDataset not found โš ๏ธ, missing path {data_dir}, attempting download...') - t = time.time() - if str(data) == 'imagenet': - subprocess.run(f"bash {ROOT / 'data/scripts/get_imagenet.sh'}", shell=True, check=True) - else: - url = f'https://github.com/ultralytics/yolov5/releases/download/v1.0/{data}.zip' - download(url, dir=data_dir.parent) - s = f"Dataset download success โœ… ({time.time() - t:.1f}s), saved to {colorstr('bold', data_dir)}\n" - LOGGER.info(s) - - # Dataloaders - nc = len([x for x in (data_dir / 'train').glob('*') if x.is_dir()]) # number of classes - trainloader = create_classification_dataloader(path=data_dir / 'train', - imgsz=imgsz, - batch_size=bs // WORLD_SIZE, - augment=True, - cache=opt.cache, - rank=LOCAL_RANK, - workers=nw) - - test_dir = data_dir / 'test' if (data_dir / 'test').exists() else data_dir / 'val' # data/test or data/val - if RANK in {-1, 0}: - testloader = create_classification_dataloader(path=test_dir, - imgsz=imgsz, - batch_size=bs // WORLD_SIZE * 2, - augment=False, - cache=opt.cache, - rank=-1, - workers=nw) - - # Model - with torch_distributed_zero_first(LOCAL_RANK), WorkingDirectory(ROOT): - if Path(opt.model).is_file() or opt.model.endswith('.pt'): - model = attempt_load(opt.model, device='cpu', fuse=False) - elif opt.model in torchvision.models.__dict__: # TorchVision models i.e. resnet50, efficientnet_b0 - model = torchvision.models.__dict__[opt.model](weights='IMAGENET1K_V1' if pretrained else None) - else: - m = hub.list('ultralytics/yolov5') # + hub.list('pytorch/vision') # models - raise ModuleNotFoundError(f'--model {opt.model} not found. Available models are: \n' + '\n'.join(m)) - if isinstance(model, DetectionModel): - LOGGER.warning("WARNING โš ๏ธ pass YOLOv5 classifier model with '-cls' suffix, i.e. '--model yolov5s-cls.pt'") - model = ClassificationModel(model=model, nc=nc, cutoff=opt.cutoff or 10) # convert to classification model - reshape_classifier_output(model, nc) # update class count - for m in model.modules(): - if not pretrained and hasattr(m, 'reset_parameters'): - m.reset_parameters() - if isinstance(m, torch.nn.Dropout) and opt.dropout is not None: - m.p = opt.dropout # set dropout - for p in model.parameters(): - p.requires_grad = True # for training - model = model.to(device) - - # Info - if RANK in {-1, 0}: - model.names = trainloader.dataset.classes # attach class names - model.transforms = testloader.dataset.torch_transforms # attach inference transforms - model_info(model) - if opt.verbose: - LOGGER.info(model) - images, labels = next(iter(trainloader)) - file = imshow_cls(images[:25], labels[:25], names=model.names, f=save_dir / 'train_images.jpg') - logger.log_images(file, name='Train Examples') - logger.log_graph(model, imgsz) # log model - - # Optimizer - optimizer = smart_optimizer(model, opt.optimizer, opt.lr0, momentum=0.9, decay=opt.decay) - - # Scheduler - lrf = 0.01 # final lr (fraction of lr0) - # lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - lrf) + lrf # cosine - lf = lambda x: (1 - x / epochs) * (1 - lrf) + lrf # linear - scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) - # scheduler = lr_scheduler.OneCycleLR(optimizer, max_lr=lr0, total_steps=epochs, pct_start=0.1, - # final_div_factor=1 / 25 / lrf) - - # EMA - ema = ModelEMA(model) if RANK in {-1, 0} else None - - # DDP mode - if cuda and RANK != -1: - model = smart_DDP(model) - - # Train - t0 = time.time() - criterion = smartCrossEntropyLoss(label_smoothing=opt.label_smoothing) # loss function - best_fitness = 0.0 - scaler = amp.GradScaler(enabled=cuda) - val = test_dir.stem # 'val' or 'test' - LOGGER.info(f'Image sizes {imgsz} train, {imgsz} test\n' - f'Using {nw * WORLD_SIZE} dataloader workers\n' - f"Logging results to {colorstr('bold', save_dir)}\n" - f'Starting {opt.model} training on {data} dataset with {nc} classes for {epochs} epochs...\n\n' - f"{'Epoch':>10}{'GPU_mem':>10}{'train_loss':>12}{f'{val}_loss':>12}{'top1_acc':>12}{'top5_acc':>12}") - for epoch in range(epochs): # loop over the dataset multiple times - tloss, vloss, fitness = 0.0, 0.0, 0.0 # train loss, val loss, fitness - model.train() - if RANK != -1: - trainloader.sampler.set_epoch(epoch) - pbar = enumerate(trainloader) - if RANK in {-1, 0}: - pbar = tqdm(enumerate(trainloader), total=len(trainloader), bar_format=TQDM_BAR_FORMAT) - for i, (images, labels) in pbar: # progress bar - images, labels = images.to(device, non_blocking=True), labels.to(device) - - # Forward - with amp.autocast(enabled=cuda): # stability issues when enabled - loss = criterion(model(images), labels) - - # Backward - scaler.scale(loss).backward() - - # Optimize - scaler.unscale_(optimizer) # unscale gradients - torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients - scaler.step(optimizer) - scaler.update() - optimizer.zero_grad() - if ema: - ema.update(model) - - if RANK in {-1, 0}: - # Print - tloss = (tloss * i + loss.item()) / (i + 1) # update mean losses - mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB) - pbar.desc = f"{f'{epoch + 1}/{epochs}':>10}{mem:>10}{tloss:>12.3g}" + ' ' * 36 - - # Test - if i == len(pbar) - 1: # last batch - top1, top5, vloss = validate.run(model=ema.ema, - dataloader=testloader, - criterion=criterion, - pbar=pbar) # test accuracy, loss - fitness = top1 # define fitness as top1 accuracy - - # Scheduler - scheduler.step() - - # Log metrics - if RANK in {-1, 0}: - # Best fitness - if fitness > best_fitness: - best_fitness = fitness - - # Log - metrics = { - "train/loss": tloss, - f"{val}/loss": vloss, - "metrics/accuracy_top1": top1, - "metrics/accuracy_top5": top5, - "lr/0": optimizer.param_groups[0]['lr']} # learning rate - logger.log_metrics(metrics, epoch) - - # Save model - final_epoch = epoch + 1 == epochs - if (not opt.nosave) or final_epoch: - ckpt = { - 'epoch': epoch, - 'best_fitness': best_fitness, - 'model': deepcopy(ema.ema).half(), # deepcopy(de_parallel(model)).half(), - 'ema': None, # deepcopy(ema.ema).half(), - 'updates': ema.updates, - 'optimizer': None, # optimizer.state_dict(), - 'opt': vars(opt), - 'git': GIT_INFO, # {remote, branch, commit} if a git repo - 'date': datetime.now().isoformat()} - - # Save last, best and delete - torch.save(ckpt, last) - if best_fitness == fitness: - torch.save(ckpt, best) - del ckpt - - # Train complete - if RANK in {-1, 0} and final_epoch: - LOGGER.info(f'\nTraining complete ({(time.time() - t0) / 3600:.3f} hours)' - f"\nResults saved to {colorstr('bold', save_dir)}" - f"\nPredict: python classify/predict.py --weights {best} --source im.jpg" - f"\nValidate: python classify/val.py --weights {best} --data {data_dir}" - f"\nExport: python export.py --weights {best} --include onnx" - f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{best}')" - f"\nVisualize: https://netron.app\n") - - # Plot examples - images, labels = (x[:25] for x in next(iter(testloader))) # first 25 images and labels - pred = torch.max(ema.ema(images.to(device)), 1)[1] - file = imshow_cls(images, labels, pred, model.names, verbose=False, f=save_dir / 'test_images.jpg') - - # Log results - meta = {"epochs": epochs, "top1_acc": best_fitness, "date": datetime.now().isoformat()} - logger.log_images(file, name='Test Examples (true-predicted)', epoch=epoch) - logger.log_model(best, epochs, metadata=meta) - - -def parse_opt(known=False): - parser = argparse.ArgumentParser() - parser.add_argument('--model', type=str, default='yolov5s-cls.pt', help='initial weights path') - parser.add_argument('--data', type=str, default='imagenette160', help='cifar10, cifar100, mnist, imagenet, ...') - parser.add_argument('--epochs', type=int, default=10, help='total training epochs') - parser.add_argument('--batch-size', type=int, default=64, help='total batch size for all GPUs') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=224, help='train, val image size (pixels)') - parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') - parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') - parser.add_argument('--project', default=ROOT / 'runs/train-cls', help='save to project/name') - parser.add_argument('--name', default='exp', help='save to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--pretrained', nargs='?', const=True, default=True, help='start from i.e. --pretrained False') - parser.add_argument('--optimizer', choices=['SGD', 'Adam', 'AdamW', 'RMSProp'], default='Adam', help='optimizer') - parser.add_argument('--lr0', type=float, default=0.001, help='initial learning rate') - parser.add_argument('--decay', type=float, default=5e-5, help='weight decay') - parser.add_argument('--label-smoothing', type=float, default=0.1, help='Label smoothing epsilon') - parser.add_argument('--cutoff', type=int, default=None, help='Model layer cutoff index for Classify() head') - parser.add_argument('--dropout', type=float, default=None, help='Dropout (fraction)') - parser.add_argument('--verbose', action='store_true', help='Verbose mode') - parser.add_argument('--seed', type=int, default=0, help='Global training seed') - parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') - return parser.parse_known_args()[0] if known else parser.parse_args() - - -def main(opt): - # Checks - if RANK in {-1, 0}: - print_args(vars(opt)) - check_git_status() - check_requirements() - - # DDP mode - device = select_device(opt.device, batch_size=opt.batch_size) - if LOCAL_RANK != -1: - assert opt.batch_size != -1, 'AutoBatch is coming soon for classification, please pass a valid --batch-size' - assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE' - assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' - torch.cuda.set_device(LOCAL_RANK) - device = torch.device('cuda', LOCAL_RANK) - dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") - - # Parameters - opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run - - # Train - train(opt, device) - - -def run(**kwargs): - # Usage: from yolov5 import classify; classify.train.run(data=mnist, imgsz=320, model='yolov5m') - opt = parse_opt(True) - for k, v in kwargs.items(): - setattr(opt, k, v) - main(opt) - return opt - - -if __name__ == "__main__": - opt = parse_opt() - main(opt) diff --git a/classify/val.py b/classify/val.py deleted file mode 100644 index 8657036fb2a2..000000000000 --- a/classify/val.py +++ /dev/null @@ -1,170 +0,0 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license -""" -Validate a trained YOLOv5 classification model on a classification dataset - -Usage: - $ bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images) - $ python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate ImageNet - -Usage - formats: - $ python classify/val.py --weights yolov5s-cls.pt # PyTorch - yolov5s-cls.torchscript # TorchScript - yolov5s-cls.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s-cls_openvino_model # OpenVINO - yolov5s-cls.engine # TensorRT - yolov5s-cls.mlmodel # CoreML (macOS-only) - yolov5s-cls_saved_model # TensorFlow SavedModel - yolov5s-cls.pb # TensorFlow GraphDef - yolov5s-cls.tflite # TensorFlow Lite - yolov5s-cls_edgetpu.tflite # TensorFlow Edge TPU - yolov5s-cls_paddle_model # PaddlePaddle -""" - -import argparse -import os -import sys -from pathlib import Path - -import torch -from tqdm import tqdm - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[1] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative - -from models.common import DetectMultiBackend -from utils.dataloaders import create_classification_dataloader -from utils.general import (LOGGER, TQDM_BAR_FORMAT, Profile, check_img_size, check_requirements, colorstr, - increment_path, print_args) -from utils.torch_utils import select_device, smart_inference_mode - - -@smart_inference_mode() -def run( - data=ROOT / '../datasets/mnist', # dataset dir - weights=ROOT / 'yolov5s-cls.pt', # model.pt path(s) - batch_size=128, # batch size - imgsz=224, # inference size (pixels) - device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu - workers=8, # max dataloader workers (per RANK in DDP mode) - verbose=False, # verbose output - project=ROOT / 'runs/val-cls', # save to project/name - name='exp', # save to project/name - exist_ok=False, # existing project/name ok, do not increment - half=False, # use FP16 half-precision inference - dnn=False, # use OpenCV DNN for ONNX inference - model=None, - dataloader=None, - criterion=None, - pbar=None, -): - # Initialize/load model and set device - training = model is not None - if training: # called by train.py - device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model - half &= device.type != 'cpu' # half precision only supported on CUDA - model.half() if half else model.float() - else: # called directly - device = select_device(device, batch_size=batch_size) - - # Directories - save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run - save_dir.mkdir(parents=True, exist_ok=True) # make dir - - # Load model - model = DetectMultiBackend(weights, device=device, dnn=dnn, fp16=half) - stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine - imgsz = check_img_size(imgsz, s=stride) # check image size - half = model.fp16 # FP16 supported on limited backends with CUDA - if engine: - batch_size = model.batch_size - else: - device = model.device - if not (pt or jit): - batch_size = 1 # export.py models default to batch-size 1 - LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') - - # Dataloader - data = Path(data) - test_dir = data / 'test' if (data / 'test').exists() else data / 'val' # data/test or data/val - dataloader = create_classification_dataloader(path=test_dir, - imgsz=imgsz, - batch_size=batch_size, - augment=False, - rank=-1, - workers=workers) - - model.eval() - pred, targets, loss, dt = [], [], 0, (Profile(), Profile(), Profile()) - n = len(dataloader) # number of batches - action = 'validating' if dataloader.dataset.root.stem == 'val' else 'testing' - desc = f"{pbar.desc[:-36]}{action:>36}" if pbar else f"{action}" - bar = tqdm(dataloader, desc, n, not training, bar_format=TQDM_BAR_FORMAT, position=0) - with torch.cuda.amp.autocast(enabled=device.type != 'cpu'): - for images, labels in bar: - with dt[0]: - images, labels = images.to(device, non_blocking=True), labels.to(device) - - with dt[1]: - y = model(images) - - with dt[2]: - pred.append(y.argsort(1, descending=True)[:, :5]) - targets.append(labels) - if criterion: - loss += criterion(y, labels) - - loss /= n - pred, targets = torch.cat(pred), torch.cat(targets) - correct = (targets[:, None] == pred).float() - acc = torch.stack((correct[:, 0], correct.max(1).values), dim=1) # (top1, top5) accuracy - top1, top5 = acc.mean(0).tolist() - - if pbar: - pbar.desc = f"{pbar.desc[:-36]}{loss:>12.3g}{top1:>12.3g}{top5:>12.3g}" - if verbose: # all classes - LOGGER.info(f"{'Class':>24}{'Images':>12}{'top1_acc':>12}{'top5_acc':>12}") - LOGGER.info(f"{'all':>24}{targets.shape[0]:>12}{top1:>12.3g}{top5:>12.3g}") - for i, c in model.names.items(): - aci = acc[targets == i] - top1i, top5i = aci.mean(0).tolist() - LOGGER.info(f"{c:>24}{aci.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}") - - # Print results - t = tuple(x.t / len(dataloader.dataset.samples) * 1E3 for x in dt) # speeds per image - shape = (1, 3, imgsz, imgsz) - LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms post-process per image at shape {shape}' % t) - LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") - - return top1, top5, loss - - -def parse_opt(): - parser = argparse.ArgumentParser() - parser.add_argument('--data', type=str, default=ROOT / '../datasets/mnist', help='dataset path') - parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-cls.pt', help='model.pt path(s)') - parser.add_argument('--batch-size', type=int, default=128, help='batch size') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=224, help='inference size (pixels)') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') - parser.add_argument('--verbose', nargs='?', const=True, default=True, help='verbose output') - parser.add_argument('--project', default=ROOT / 'runs/val-cls', help='save to project/name') - parser.add_argument('--name', default='exp', help='save to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') - parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') - opt = parser.parse_args() - print_args(vars(opt)) - return opt - - -def main(opt): - check_requirements(exclude=('tensorboard', 'thop')) - run(**vars(opt)) - - -if __name__ == "__main__": - opt = parse_opt() - main(opt) diff --git a/detect.py b/detect.py deleted file mode 100644 index 2d13401f78bd..000000000000 --- a/detect.py +++ /dev/null @@ -1,261 +0,0 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license -""" -Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc. - -Usage - sources: - $ python detect.py --weights yolov5s.pt --source 0 # webcam - img.jpg # image - vid.mp4 # video - screen # screenshot - path/ # directory - list.txt # list of images - list.streams # list of streams - 'path/*.jpg' # glob - 'https://youtu.be/Zgi9g1ksQHc' # YouTube - 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream - -Usage - formats: - $ python detect.py --weights yolov5s.pt # PyTorch - yolov5s.torchscript # TorchScript - yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s_openvino_model # OpenVINO - yolov5s.engine # TensorRT - yolov5s.mlmodel # CoreML (macOS-only) - yolov5s_saved_model # TensorFlow SavedModel - yolov5s.pb # TensorFlow GraphDef - yolov5s.tflite # TensorFlow Lite - yolov5s_edgetpu.tflite # TensorFlow Edge TPU - yolov5s_paddle_model # PaddlePaddle -""" - -import argparse -import os -import platform -import sys -from pathlib import Path - -import torch - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[0] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative - -from models.common import DetectMultiBackend -from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams -from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, - increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh) -from utils.plots import Annotator, colors, save_one_box -from utils.torch_utils import select_device, smart_inference_mode - - -@smart_inference_mode() -def run( - weights=ROOT / 'yolov5s.pt', # model path or triton URL - source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam) - data=ROOT / 'data/coco128.yaml', # dataset.yaml path - imgsz=(640, 640), # inference size (height, width) - conf_thres=0.25, # confidence threshold - iou_thres=0.45, # NMS IOU threshold - max_det=1000, # maximum detections per image - device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu - view_img=False, # show results - save_txt=False, # save results to *.txt - save_conf=False, # save confidences in --save-txt labels - save_crop=False, # save cropped prediction boxes - nosave=False, # do not save images/videos - classes=None, # filter by class: --class 0, or --class 0 2 3 - agnostic_nms=False, # class-agnostic NMS - augment=False, # augmented inference - visualize=False, # visualize features - update=False, # update all models - project=ROOT / 'runs/detect', # save results to project/name - name='exp', # save results to project/name - exist_ok=False, # existing project/name ok, do not increment - line_thickness=3, # bounding box thickness (pixels) - hide_labels=False, # hide labels - hide_conf=False, # hide confidences - half=False, # use FP16 half-precision inference - dnn=False, # use OpenCV DNN for ONNX inference - vid_stride=1, # video frame-rate stride -): - source = str(source) - save_img = not nosave and not source.endswith('.txt') # save inference images - is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) - is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) - webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file) - screenshot = source.lower().startswith('screen') - if is_url and is_file: - source = check_file(source) # download - - # Directories - save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run - (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir - - # Load model - device = select_device(device) - model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) - stride, names, pt = model.stride, model.names, model.pt - imgsz = check_img_size(imgsz, s=stride) # check image size - - # Dataloader - bs = 1 # batch_size - if webcam: - view_img = check_imshow(warn=True) - dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) - bs = len(dataset) - elif screenshot: - dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt) - else: - dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) - vid_path, vid_writer = [None] * bs, [None] * bs - - # Run inference - model.warmup(imgsz=(1 if pt or model.triton else bs, 3, *imgsz)) # warmup - seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) - for path, im, im0s, vid_cap, s in dataset: - with dt[0]: - im = torch.from_numpy(im).to(model.device) - im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 - im /= 255 # 0 - 255 to 0.0 - 1.0 - if len(im.shape) == 3: - im = im[None] # expand for batch dim - - # Inference - with dt[1]: - visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False - pred = model(im, augment=augment, visualize=visualize) - - # NMS - with dt[2]: - pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) - - # Second-stage classifier (optional) - # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) - - # Process predictions - for i, det in enumerate(pred): # per image - seen += 1 - if webcam: # batch_size >= 1 - p, im0, frame = path[i], im0s[i].copy(), dataset.count - s += f'{i}: ' - else: - p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) - - p = Path(p) # to Path - save_path = str(save_dir / p.name) # im.jpg - txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt - s += '%gx%g ' % im.shape[2:] # print string - gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh - imc = im0.copy() if save_crop else im0 # for save_crop - annotator = Annotator(im0, line_width=line_thickness, example=str(names)) - if len(det): - # Rescale boxes from img_size to im0 size - det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() - - # Print results - for c in det[:, 5].unique(): - n = (det[:, 5] == c).sum() # detections per class - s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string - - # Write results - for *xyxy, conf, cls in reversed(det): - if save_txt: # Write to file - xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh - line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format - with open(f'{txt_path}.txt', 'a') as f: - f.write(('%g ' * len(line)).rstrip() % line + '\n') - - if save_img or save_crop or view_img: # Add bbox to image - c = int(cls) # integer class - label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') - annotator.box_label(xyxy, label, color=colors(c, True)) - if save_crop: - save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) - - # Stream results - im0 = annotator.result() - if view_img: - if platform.system() == 'Linux' and p not in windows: - windows.append(p) - cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) - cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0]) - cv2.imshow(str(p), im0) - cv2.waitKey(1) # 1 millisecond - - # Save results (image with detections) - if save_img: - if dataset.mode == 'image': - cv2.imwrite(save_path, im0) - else: # 'video' or 'stream' - if vid_path[i] != save_path: # new video - vid_path[i] = save_path - if isinstance(vid_writer[i], cv2.VideoWriter): - vid_writer[i].release() # release previous video writer - if vid_cap: # video - fps = vid_cap.get(cv2.CAP_PROP_FPS) - w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) - h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) - else: # stream - fps, w, h = 30, im0.shape[1], im0.shape[0] - save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos - vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) - vid_writer[i].write(im0) - - # Print time (inference-only) - LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms") - - # Print results - t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image - LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) - if save_txt or save_img: - s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' - LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") - if update: - strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning) - - -def parse_opt(): - parser = argparse.ArgumentParser() - parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path or triton URL') - parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)') - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') - parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') - parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') - parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') - parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--view-img', action='store_true', help='show results') - parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') - parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') - parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') - parser.add_argument('--nosave', action='store_true', help='do not save images/videos') - parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3') - parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') - parser.add_argument('--augment', action='store_true', help='augmented inference') - parser.add_argument('--visualize', action='store_true', help='visualize features') - parser.add_argument('--update', action='store_true', help='update all models') - parser.add_argument('--project', default=ROOT / 'runs/detect', help='save results to project/name') - parser.add_argument('--name', default='exp', help='save results to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') - parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') - parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') - parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') - parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') - parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride') - opt = parser.parse_args() - opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand - print_args(vars(opt)) - return opt - - -def main(opt): - check_requirements(exclude=('tensorboard', 'thop')) - run(**vars(opt)) - - -if __name__ == "__main__": - opt = parse_opt() - main(opt) diff --git a/export.py b/export.py deleted file mode 100644 index 7910178b2338..000000000000 --- a/export.py +++ /dev/null @@ -1,653 +0,0 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license -""" -Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit - -Format | `export.py --include` | Model ---- | --- | --- -PyTorch | - | yolov5s.pt -TorchScript | `torchscript` | yolov5s.torchscript -ONNX | `onnx` | yolov5s.onnx -OpenVINO | `openvino` | yolov5s_openvino_model/ -TensorRT | `engine` | yolov5s.engine -CoreML | `coreml` | yolov5s.mlmodel -TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/ -TensorFlow GraphDef | `pb` | yolov5s.pb -TensorFlow Lite | `tflite` | yolov5s.tflite -TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite -TensorFlow.js | `tfjs` | yolov5s_web_model/ -PaddlePaddle | `paddle` | yolov5s_paddle_model/ - -Requirements: - $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU - $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU - -Usage: - $ python export.py --weights yolov5s.pt --include torchscript onnx openvino engine coreml tflite ... - -Inference: - $ python detect.py --weights yolov5s.pt # PyTorch - yolov5s.torchscript # TorchScript - yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s_openvino_model # OpenVINO - yolov5s.engine # TensorRT - yolov5s.mlmodel # CoreML (macOS-only) - yolov5s_saved_model # TensorFlow SavedModel - yolov5s.pb # TensorFlow GraphDef - yolov5s.tflite # TensorFlow Lite - yolov5s_edgetpu.tflite # TensorFlow Edge TPU - yolov5s_paddle_model # PaddlePaddle - -TensorFlow.js: - $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example - $ npm install - $ ln -s ../../yolov5/yolov5s_web_model public/yolov5s_web_model - $ npm start -""" - -import argparse -import contextlib -import json -import os -import platform -import re -import subprocess -import sys -import time -import warnings -from pathlib import Path - -import pandas as pd -import torch -from torch.utils.mobile_optimizer import optimize_for_mobile - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[0] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -if platform.system() != 'Windows': - ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative - -from models.experimental import attempt_load -from models.yolo import ClassificationModel, Detect, DetectionModel, SegmentationModel -from utils.dataloaders import LoadImages -from utils.general import (LOGGER, Profile, check_dataset, check_img_size, check_requirements, check_version, - check_yaml, colorstr, file_size, get_default_args, print_args, url2file, yaml_save) -from utils.torch_utils import select_device, smart_inference_mode - -MACOS = platform.system() == 'Darwin' # macOS environment - - -def export_formats(): - # YOLOv5 export formats - x = [ - ['PyTorch', '-', '.pt', True, True], - ['TorchScript', 'torchscript', '.torchscript', True, True], - ['ONNX', 'onnx', '.onnx', True, True], - ['OpenVINO', 'openvino', '_openvino_model', True, False], - ['TensorRT', 'engine', '.engine', False, True], - ['CoreML', 'coreml', '.mlmodel', True, False], - ['TensorFlow SavedModel', 'saved_model', '_saved_model', True, True], - ['TensorFlow GraphDef', 'pb', '.pb', True, True], - ['TensorFlow Lite', 'tflite', '.tflite', True, False], - ['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False, False], - ['TensorFlow.js', 'tfjs', '_web_model', False, False], - ['PaddlePaddle', 'paddle', '_paddle_model', True, True],] - return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU']) - - -def try_export(inner_func): - # YOLOv5 export decorator, i..e @try_export - inner_args = get_default_args(inner_func) - - def outer_func(*args, **kwargs): - prefix = inner_args['prefix'] - try: - with Profile() as dt: - f, model = inner_func(*args, **kwargs) - LOGGER.info(f'{prefix} export success โœ… {dt.t:.1f}s, saved as {f} ({file_size(f):.1f} MB)') - return f, model - except Exception as e: - LOGGER.info(f'{prefix} export failure โŒ {dt.t:.1f}s: {e}') - return None, None - - return outer_func - - -@try_export -def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')): - # YOLOv5 TorchScript model export - LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...') - f = file.with_suffix('.torchscript') - - ts = torch.jit.trace(model, im, strict=False) - d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names} - extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap() - if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html - optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files) - else: - ts.save(str(f), _extra_files=extra_files) - return f, None - - -@try_export -def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX:')): - # YOLOv5 ONNX export - check_requirements('onnx>=1.12.0') - import onnx - - LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') - f = file.with_suffix('.onnx') - - output_names = ['output0', 'output1'] if isinstance(model, SegmentationModel) else ['output0'] - if dynamic: - dynamic = {'images': {0: 'batch', 2: 'height', 3: 'width'}} # shape(1,3,640,640) - if isinstance(model, SegmentationModel): - dynamic['output0'] = {0: 'batch', 1: 'anchors'} # shape(1,25200,85) - dynamic['output1'] = {0: 'batch', 2: 'mask_height', 3: 'mask_width'} # shape(1,32,160,160) - elif isinstance(model, DetectionModel): - dynamic['output0'] = {0: 'batch', 1: 'anchors'} # shape(1,25200,85) - - torch.onnx.export( - model.cpu() if dynamic else model, # --dynamic only compatible with cpu - im.cpu() if dynamic else im, - f, - verbose=False, - opset_version=opset, - do_constant_folding=True, # WARNING: DNN inference with torch>=1.12 may require do_constant_folding=False - input_names=['images'], - output_names=output_names, - dynamic_axes=dynamic or None) - - # Checks - model_onnx = onnx.load(f) # load onnx model - onnx.checker.check_model(model_onnx) # check onnx model - - # Metadata - d = {'stride': int(max(model.stride)), 'names': model.names} - for k, v in d.items(): - meta = model_onnx.metadata_props.add() - meta.key, meta.value = k, str(v) - onnx.save(model_onnx, f) - - # Simplify - if simplify: - try: - cuda = torch.cuda.is_available() - check_requirements(('onnxruntime-gpu' if cuda else 'onnxruntime', 'onnx-simplifier>=0.4.1')) - import onnxsim - - LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') - model_onnx, check = onnxsim.simplify(model_onnx) - assert check, 'assert check failed' - onnx.save(model_onnx, f) - except Exception as e: - LOGGER.info(f'{prefix} simplifier failure: {e}') - return f, model_onnx - - -@try_export -def export_openvino(file, metadata, half, prefix=colorstr('OpenVINO:')): - # YOLOv5 OpenVINO export - check_requirements('openvino-dev') # requires openvino-dev: https://pypi.org/project/openvino-dev/ - import openvino.inference_engine as ie - - LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') - f = str(file).replace('.pt', f'_openvino_model{os.sep}') - - cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f} --data_type {'FP16' if half else 'FP32'}" - subprocess.run(cmd.split(), check=True, env=os.environ) # export - yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml - return f, None - - -@try_export -def export_paddle(model, im, file, metadata, prefix=colorstr('PaddlePaddle:')): - # YOLOv5 Paddle export - check_requirements(('paddlepaddle', 'x2paddle')) - import x2paddle - from x2paddle.convert import pytorch2paddle - - LOGGER.info(f'\n{prefix} starting export with X2Paddle {x2paddle.__version__}...') - f = str(file).replace('.pt', f'_paddle_model{os.sep}') - - pytorch2paddle(module=model, save_dir=f, jit_type='trace', input_examples=[im]) # export - yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml - return f, None - - -@try_export -def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')): - # YOLOv5 CoreML export - check_requirements('coremltools') - import coremltools as ct - - LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...') - f = file.with_suffix('.mlmodel') - - ts = torch.jit.trace(model, im, strict=False) # TorchScript model - ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])]) - bits, mode = (8, 'kmeans_lut') if int8 else (16, 'linear') if half else (32, None) - if bits < 32: - if MACOS: # quantization only supported on macOS - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=DeprecationWarning) # suppress numpy==1.20 float warning - ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode) - else: - print(f'{prefix} quantization only supported on macOS, skipping...') - ct_model.save(f) - return f, ct_model - - -@try_export -def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')): - # YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt - assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`' - try: - import tensorrt as trt - except Exception: - if platform.system() == 'Linux': - check_requirements('nvidia-tensorrt', cmds='-U --index-url https://pypi.ngc.nvidia.com') - import tensorrt as trt - - if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 - grid = model.model[-1].anchor_grid - model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid] - export_onnx(model, im, file, 12, dynamic, simplify) # opset 12 - model.model[-1].anchor_grid = grid - else: # TensorRT >= 8 - check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0 - export_onnx(model, im, file, 12, dynamic, simplify) # opset 12 - onnx = file.with_suffix('.onnx') - - LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') - assert onnx.exists(), f'failed to export ONNX file: {onnx}' - f = file.with_suffix('.engine') # TensorRT engine file - logger = trt.Logger(trt.Logger.INFO) - if verbose: - logger.min_severity = trt.Logger.Severity.VERBOSE - - builder = trt.Builder(logger) - config = builder.create_builder_config() - config.max_workspace_size = workspace * 1 << 30 - # config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) # fix TRT 8.4 deprecation notice - - flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) - network = builder.create_network(flag) - parser = trt.OnnxParser(network, logger) - if not parser.parse_from_file(str(onnx)): - raise RuntimeError(f'failed to load ONNX file: {onnx}') - - inputs = [network.get_input(i) for i in range(network.num_inputs)] - outputs = [network.get_output(i) for i in range(network.num_outputs)] - for inp in inputs: - LOGGER.info(f'{prefix} input "{inp.name}" with shape{inp.shape} {inp.dtype}') - for out in outputs: - LOGGER.info(f'{prefix} output "{out.name}" with shape{out.shape} {out.dtype}') - - if dynamic: - if im.shape[0] <= 1: - LOGGER.warning(f"{prefix} WARNING โš ๏ธ --dynamic model requires maximum --batch-size argument") - profile = builder.create_optimization_profile() - for inp in inputs: - profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape) - config.add_optimization_profile(profile) - - LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine as {f}') - if builder.platform_has_fast_fp16 and half: - config.set_flag(trt.BuilderFlag.FP16) - with builder.build_engine(network, config) as engine, open(f, 'wb') as t: - t.write(engine.serialize()) - return f, None - - -@try_export -def export_saved_model(model, - im, - file, - dynamic, - tf_nms=False, - agnostic_nms=False, - topk_per_class=100, - topk_all=100, - iou_thres=0.45, - conf_thres=0.25, - keras=False, - prefix=colorstr('TensorFlow SavedModel:')): - # YOLOv5 TensorFlow SavedModel export - try: - import tensorflow as tf - except Exception: - check_requirements(f"tensorflow{'' if torch.cuda.is_available() else '-macos' if MACOS else '-cpu'}") - import tensorflow as tf - from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 - - from models.tf import TFModel - - LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') - f = str(file).replace('.pt', '_saved_model') - batch_size, ch, *imgsz = list(im.shape) # BCHW - - tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz) - im = tf.zeros((batch_size, *imgsz, ch)) # BHWC order for TensorFlow - _ = tf_model.predict(im, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) - inputs = tf.keras.Input(shape=(*imgsz, ch), batch_size=None if dynamic else batch_size) - outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) - keras_model = tf.keras.Model(inputs=inputs, outputs=outputs) - keras_model.trainable = False - keras_model.summary() - if keras: - keras_model.save(f, save_format='tf') - else: - spec = tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype) - m = tf.function(lambda x: keras_model(x)) # full model - m = m.get_concrete_function(spec) - frozen_func = convert_variables_to_constants_v2(m) - tfm = tf.Module() - tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if tf_nms else frozen_func(x), [spec]) - tfm.__call__(im) - tf.saved_model.save(tfm, - f, - options=tf.saved_model.SaveOptions(experimental_custom_gradients=False) if check_version( - tf.__version__, '2.6') else tf.saved_model.SaveOptions()) - return f, keras_model - - -@try_export -def export_pb(keras_model, file, prefix=colorstr('TensorFlow GraphDef:')): - # YOLOv5 TensorFlow GraphDef *.pb export https://github.com/leimao/Frozen_Graph_TensorFlow - import tensorflow as tf - from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 - - LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') - f = file.with_suffix('.pb') - - m = tf.function(lambda x: keras_model(x)) # full model - m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)) - frozen_func = convert_variables_to_constants_v2(m) - frozen_func.graph.as_graph_def() - tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False) - return f, None - - -@try_export -def export_tflite(keras_model, im, file, int8, data, nms, agnostic_nms, prefix=colorstr('TensorFlow Lite:')): - # YOLOv5 TensorFlow Lite export - import tensorflow as tf - - LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') - batch_size, ch, *imgsz = list(im.shape) # BCHW - f = str(file).replace('.pt', '-fp16.tflite') - - converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) - converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] - converter.target_spec.supported_types = [tf.float16] - converter.optimizations = [tf.lite.Optimize.DEFAULT] - if int8: - from models.tf import representative_dataset_gen - dataset = LoadImages(check_dataset(check_yaml(data))['train'], img_size=imgsz, auto=False) - converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib=100) - converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] - converter.target_spec.supported_types = [] - converter.inference_input_type = tf.uint8 # or tf.int8 - converter.inference_output_type = tf.uint8 # or tf.int8 - converter.experimental_new_quantizer = True - f = str(file).replace('.pt', '-int8.tflite') - if nms or agnostic_nms: - converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS) - - tflite_model = converter.convert() - open(f, "wb").write(tflite_model) - return f, None - - -@try_export -def export_edgetpu(file, prefix=colorstr('Edge TPU:')): - # YOLOv5 Edge TPU export https://coral.ai/docs/edgetpu/models-intro/ - cmd = 'edgetpu_compiler --version' - help_url = 'https://coral.ai/docs/edgetpu/compiler/' - assert platform.system() == 'Linux', f'export only supported on Linux. See {help_url}' - if subprocess.run(f'{cmd} >/dev/null', shell=True).returncode != 0: - LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}') - sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system - for c in ( - 'curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -', - 'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list', - 'sudo apt-get update', 'sudo apt-get install edgetpu-compiler'): - subprocess.run(c if sudo else c.replace('sudo ', ''), shell=True, check=True) - ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1] - - LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...') - f = str(file).replace('.pt', '-int8_edgetpu.tflite') # Edge TPU model - f_tfl = str(file).replace('.pt', '-int8.tflite') # TFLite model - - cmd = f"edgetpu_compiler -s -d -k 10 --out_dir {file.parent} {f_tfl}" - subprocess.run(cmd.split(), check=True) - return f, None - - -@try_export -def export_tfjs(file, prefix=colorstr('TensorFlow.js:')): - # YOLOv5 TensorFlow.js export - check_requirements('tensorflowjs') - import tensorflowjs as tfjs - - LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...') - f = str(file).replace('.pt', '_web_model') # js dir - f_pb = file.with_suffix('.pb') # *.pb path - f_json = f'{f}/model.json' # *.json path - - cmd = f'tensorflowjs_converter --input_format=tf_frozen_model ' \ - f'--output_node_names=Identity,Identity_1,Identity_2,Identity_3 {f_pb} {f}' - subprocess.run(cmd.split()) - - json = Path(f_json).read_text() - with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order - subst = re.sub( - r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, ' - r'"Identity.?.?": {"name": "Identity.?.?"}, ' - r'"Identity.?.?": {"name": "Identity.?.?"}, ' - r'"Identity.?.?": {"name": "Identity.?.?"}}}', r'{"outputs": {"Identity": {"name": "Identity"}, ' - r'"Identity_1": {"name": "Identity_1"}, ' - r'"Identity_2": {"name": "Identity_2"}, ' - r'"Identity_3": {"name": "Identity_3"}}}', json) - j.write(subst) - return f, None - - -def add_tflite_metadata(file, metadata, num_outputs): - # Add metadata to *.tflite models per https://www.tensorflow.org/lite/models/convert/metadata - with contextlib.suppress(ImportError): - # check_requirements('tflite_support') - from tflite_support import flatbuffers - from tflite_support import metadata as _metadata - from tflite_support import metadata_schema_py_generated as _metadata_fb - - tmp_file = Path('/tmp/meta.txt') - with open(tmp_file, 'w') as meta_f: - meta_f.write(str(metadata)) - - model_meta = _metadata_fb.ModelMetadataT() - label_file = _metadata_fb.AssociatedFileT() - label_file.name = tmp_file.name - model_meta.associatedFiles = [label_file] - - subgraph = _metadata_fb.SubGraphMetadataT() - subgraph.inputTensorMetadata = [_metadata_fb.TensorMetadataT()] - subgraph.outputTensorMetadata = [_metadata_fb.TensorMetadataT()] * num_outputs - model_meta.subgraphMetadata = [subgraph] - - b = flatbuffers.Builder(0) - b.Finish(model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER) - metadata_buf = b.Output() - - populator = _metadata.MetadataPopulator.with_model_file(file) - populator.load_metadata_buffer(metadata_buf) - populator.load_associated_files([str(tmp_file)]) - populator.populate() - tmp_file.unlink() - - -@smart_inference_mode() -def run( - data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' - weights=ROOT / 'yolov5s.pt', # weights path - imgsz=(640, 640), # image (height, width) - batch_size=1, # batch size - device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu - include=('torchscript', 'onnx'), # include formats - half=False, # FP16 half-precision export - inplace=False, # set YOLOv5 Detect() inplace=True - keras=False, # use Keras - optimize=False, # TorchScript: optimize for mobile - int8=False, # CoreML/TF INT8 quantization - dynamic=False, # ONNX/TF/TensorRT: dynamic axes - simplify=False, # ONNX: simplify model - opset=12, # ONNX: opset version - verbose=False, # TensorRT: verbose log - workspace=4, # TensorRT: workspace size (GB) - nms=False, # TF: add NMS to model - agnostic_nms=False, # TF: add agnostic NMS to model - topk_per_class=100, # TF.js NMS: topk per class to keep - topk_all=100, # TF.js NMS: topk for all classes to keep - iou_thres=0.45, # TF.js NMS: IoU threshold - conf_thres=0.25, # TF.js NMS: confidence threshold -): - t = time.time() - include = [x.lower() for x in include] # to lowercase - fmts = tuple(export_formats()['Argument'][1:]) # --include arguments - flags = [x in include for x in fmts] - assert sum(flags) == len(include), f'ERROR: Invalid --include {include}, valid --include arguments are {fmts}' - jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle = flags # export booleans - file = Path(url2file(weights) if str(weights).startswith(('http:/', 'https:/')) else weights) # PyTorch weights - - # Load PyTorch model - device = select_device(device) - if half: - assert device.type != 'cpu' or coreml, '--half only compatible with GPU export, i.e. use --device 0' - assert not dynamic, '--half not compatible with --dynamic, i.e. use either --half or --dynamic but not both' - model = attempt_load(weights, device=device, inplace=True, fuse=True) # load FP32 model - - # Checks - imgsz *= 2 if len(imgsz) == 1 else 1 # expand - if optimize: - assert device.type == 'cpu', '--optimize not compatible with cuda devices, i.e. use --device cpu' - - # Input - gs = int(max(model.stride)) # grid size (max stride) - imgsz = [check_img_size(x, gs) for x in imgsz] # verify img_size are gs-multiples - im = torch.zeros(batch_size, 3, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection - - # Update model - model.eval() - for k, m in model.named_modules(): - if isinstance(m, Detect): - m.inplace = inplace - m.dynamic = dynamic - m.export = True - - for _ in range(2): - y = model(im) # dry runs - if half and not coreml: - im, model = im.half(), model.half() # to FP16 - shape = tuple((y[0] if isinstance(y, tuple) else y).shape) # model output shape - metadata = {'stride': int(max(model.stride)), 'names': model.names} # model metadata - LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} with output shape {shape} ({file_size(file):.1f} MB)") - - # Exports - f = [''] * len(fmts) # exported filenames - warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning) # suppress TracerWarning - if jit: # TorchScript - f[0], _ = export_torchscript(model, im, file, optimize) - if engine: # TensorRT required before ONNX - f[1], _ = export_engine(model, im, file, half, dynamic, simplify, workspace, verbose) - if onnx or xml: # OpenVINO requires ONNX - f[2], _ = export_onnx(model, im, file, opset, dynamic, simplify) - if xml: # OpenVINO - f[3], _ = export_openvino(file, metadata, half) - if coreml: # CoreML - f[4], _ = export_coreml(model, im, file, int8, half) - if any((saved_model, pb, tflite, edgetpu, tfjs)): # TensorFlow formats - assert not tflite or not tfjs, 'TFLite and TF.js models must be exported separately, please pass only one type.' - assert not isinstance(model, ClassificationModel), 'ClassificationModel export to TF formats not yet supported.' - f[5], s_model = export_saved_model(model.cpu(), - im, - file, - dynamic, - tf_nms=nms or agnostic_nms or tfjs, - agnostic_nms=agnostic_nms or tfjs, - topk_per_class=topk_per_class, - topk_all=topk_all, - iou_thres=iou_thres, - conf_thres=conf_thres, - keras=keras) - if pb or tfjs: # pb prerequisite to tfjs - f[6], _ = export_pb(s_model, file) - if tflite or edgetpu: - f[7], _ = export_tflite(s_model, im, file, int8 or edgetpu, data=data, nms=nms, agnostic_nms=agnostic_nms) - if edgetpu: - f[8], _ = export_edgetpu(file) - add_tflite_metadata(f[8] or f[7], metadata, num_outputs=len(s_model.outputs)) - if tfjs: - f[9], _ = export_tfjs(file) - if paddle: # PaddlePaddle - f[10], _ = export_paddle(model, im, file, metadata) - - # Finish - f = [str(x) for x in f if x] # filter out '' and None - if any(f): - cls, det, seg = (isinstance(model, x) for x in (ClassificationModel, DetectionModel, SegmentationModel)) # type - det &= not seg # segmentation models inherit from SegmentationModel(DetectionModel) - dir = Path('segment' if seg else 'classify' if cls else '') - h = '--half' if half else '' # --half FP16 inference arg - s = "# WARNING โš ๏ธ ClassificationModel not yet supported for PyTorch Hub AutoShape inference" if cls else \ - "# WARNING โš ๏ธ SegmentationModel not yet supported for PyTorch Hub AutoShape inference" if seg else '' - LOGGER.info(f'\nExport complete ({time.time() - t:.1f}s)' - f"\nResults saved to {colorstr('bold', file.parent.resolve())}" - f"\nDetect: python {dir / ('detect.py' if det else 'predict.py')} --weights {f[-1]} {h}" - f"\nValidate: python {dir / 'val.py'} --weights {f[-1]} {h}" - f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}') {s}" - f"\nVisualize: https://netron.app") - return f # return list of exported files/dirs - - -def parse_opt(): - parser = argparse.ArgumentParser() - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)') - parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640, 640], help='image (h, w)') - parser.add_argument('--batch-size', type=int, default=1, help='batch size') - parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--half', action='store_true', help='FP16 half-precision export') - parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True') - parser.add_argument('--keras', action='store_true', help='TF: use Keras') - parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile') - parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization') - parser.add_argument('--dynamic', action='store_true', help='ONNX/TF/TensorRT: dynamic axes') - parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') - parser.add_argument('--opset', type=int, default=17, help='ONNX: opset version') - parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log') - parser.add_argument('--workspace', type=int, default=4, help='TensorRT: workspace size (GB)') - parser.add_argument('--nms', action='store_true', help='TF: add NMS to model') - parser.add_argument('--agnostic-nms', action='store_true', help='TF: add agnostic NMS to model') - parser.add_argument('--topk-per-class', type=int, default=100, help='TF.js NMS: topk per class to keep') - parser.add_argument('--topk-all', type=int, default=100, help='TF.js NMS: topk for all classes to keep') - parser.add_argument('--iou-thres', type=float, default=0.45, help='TF.js NMS: IoU threshold') - parser.add_argument('--conf-thres', type=float, default=0.25, help='TF.js NMS: confidence threshold') - parser.add_argument( - '--include', - nargs='+', - default=['torchscript'], - help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle') - opt = parser.parse_args() - print_args(vars(opt)) - return opt - - -def main(opt): - for opt.weights in (opt.weights if isinstance(opt.weights, list) else [opt.weights]): - run(**vars(opt)) - - -if __name__ == "__main__": - opt = parse_opt() - main(opt) diff --git a/hubconf.py b/hubconf.py deleted file mode 100644 index 41af8e39d14d..000000000000 --- a/hubconf.py +++ /dev/null @@ -1,169 +0,0 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license -""" -PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5 - -Usage: - import torch - model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # official model - model = torch.hub.load('ultralytics/yolov5:master', 'yolov5s') # from branch - model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s.pt') # custom/local model - model = torch.hub.load('.', 'custom', 'yolov5s.pt', source='local') # local repo -""" - -import torch - - -def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): - """Creates or loads a YOLOv5 model - - Arguments: - name (str): model name 'yolov5s' or path 'path/to/best.pt' - pretrained (bool): load pretrained weights into the model - channels (int): number of input channels - classes (int): number of model classes - autoshape (bool): apply YOLOv5 .autoshape() wrapper to model - verbose (bool): print all information to screen - device (str, torch.device, None): device to use for model parameters - - Returns: - YOLOv5 model - """ - from pathlib import Path - - from models.common import AutoShape, DetectMultiBackend - from models.experimental import attempt_load - from models.yolo import ClassificationModel, DetectionModel, SegmentationModel - from utils.downloads import attempt_download - from utils.general import LOGGER, check_requirements, intersect_dicts, logging - from utils.torch_utils import select_device - - if not verbose: - LOGGER.setLevel(logging.WARNING) - check_requirements(exclude=('opencv-python', 'tensorboard', 'thop')) - name = Path(name) - path = name.with_suffix('.pt') if name.suffix == '' and not name.is_dir() else name # checkpoint path - try: - device = select_device(device) - if pretrained and channels == 3 and classes == 80: - try: - model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model - if autoshape: - if model.pt and isinstance(model.model, ClassificationModel): - LOGGER.warning('WARNING โš ๏ธ YOLOv5 ClassificationModel is not yet AutoShape compatible. ' - 'You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).') - elif model.pt and isinstance(model.model, SegmentationModel): - LOGGER.warning('WARNING โš ๏ธ YOLOv5 SegmentationModel is not yet AutoShape compatible. ' - 'You will not be able to run inference with this model.') - else: - model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS - except Exception: - model = attempt_load(path, device=device, fuse=False) # arbitrary model - else: - cfg = list((Path(__file__).parent / 'models').rglob(f'{path.stem}.yaml'))[0] # model.yaml path - model = DetectionModel(cfg, channels, classes) # create model - if pretrained: - ckpt = torch.load(attempt_download(path), map_location=device) # load - csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 - csd = intersect_dicts(csd, model.state_dict(), exclude=['anchors']) # intersect - model.load_state_dict(csd, strict=False) # load - if len(ckpt['model'].names) == classes: - model.names = ckpt['model'].names # set class names attribute - if not verbose: - LOGGER.setLevel(logging.INFO) # reset to default - return model.to(device) - - except Exception as e: - help_url = 'https://github.com/ultralytics/yolov5/issues/36' - s = f'{e}. Cache may be out of date, try `force_reload=True` or see {help_url} for help.' - raise Exception(s) from e - - -def custom(path='path/to/model.pt', autoshape=True, _verbose=True, device=None): - # YOLOv5 custom or local model - return _create(path, autoshape=autoshape, verbose=_verbose, device=device) - - -def yolov5n(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-nano model https://github.com/ultralytics/yolov5 - return _create('yolov5n', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-small model https://github.com/ultralytics/yolov5 - return _create('yolov5s', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-medium model https://github.com/ultralytics/yolov5 - return _create('yolov5m', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-large model https://github.com/ultralytics/yolov5 - return _create('yolov5l', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-xlarge model https://github.com/ultralytics/yolov5 - return _create('yolov5x', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-nano-P6 model https://github.com/ultralytics/yolov5 - return _create('yolov5n6', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-small-P6 model https://github.com/ultralytics/yolov5 - return _create('yolov5s6', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-medium-P6 model https://github.com/ultralytics/yolov5 - return _create('yolov5m6', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-large-P6 model https://github.com/ultralytics/yolov5 - return _create('yolov5l6', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-xlarge-P6 model https://github.com/ultralytics/yolov5 - return _create('yolov5x6', pretrained, channels, classes, autoshape, _verbose, device) - - -if __name__ == '__main__': - import argparse - from pathlib import Path - - import numpy as np - from PIL import Image - - from utils.general import cv2, print_args - - # Argparser - parser = argparse.ArgumentParser() - parser.add_argument('--model', type=str, default='yolov5s', help='model name') - opt = parser.parse_args() - print_args(vars(opt)) - - # Model - model = _create(name=opt.model, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True) - # model = custom(path='path/to/model.pt') # custom - - # Images - imgs = [ - 'data/images/zidane.jpg', # filename - Path('data/images/zidane.jpg'), # Path - 'https://ultralytics.com/images/zidane.jpg', # URI - cv2.imread('data/images/bus.jpg')[:, :, ::-1], # OpenCV - Image.open('data/images/bus.jpg'), # PIL - np.zeros((320, 640, 3))] # numpy - - # Inference - results = model(imgs, size=320) # batched inference - - # Results - results.print() - results.save() diff --git a/models/common.py b/models/common.py index 8b5ec1c786d8..c89996c0148c 100644 --- a/models/common.py +++ b/models/common.py @@ -25,13 +25,13 @@ from PIL import Image from torch.cuda import amp -from utils import TryExcept -from utils.dataloaders import exif_transpose, letterbox -from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr, +from ..utils import TryExcept +from ..utils.dataloaders import exif_transpose, letterbox +from ..utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr, increment_path, is_notebook, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy, xyxy2xywh, yaml_load) -from utils.plots import Annotator, colors, save_one_box -from utils.torch_utils import copy_attr, smart_inference_mode +from ..utils.plots import Annotator, colors, save_one_box +from ..utils.torch_utils import copy_attr, smart_inference_mode def autopad(k, p=None, d=1): # kernel, padding, dilation @@ -489,7 +489,7 @@ def gd_outputs(gd): elif triton: # NVIDIA Triton Inference Server LOGGER.info(f'Using {w} as Triton Inference Server...') check_requirements('tritonclient[all]') - from utils.triton import TritonRemoteModel + from ..utils.triton import TritonRemoteModel model = TritonRemoteModel(url=w) nhwc = model.runtime.startswith("tensorflow") else: @@ -601,7 +601,7 @@ def _model_type(p='path/to/model.pt'): # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx # types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle] from export import export_formats - from utils.downloads import is_url + from ..utils.downloads import is_url sf = list(export_formats().Suffix) # export suffixes if not is_url(p, check=False): check_suffix(p, sf) # checks diff --git a/models/experimental.py b/models/experimental.py index 02d35b9ebd11..ad7033a53980 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -8,7 +8,7 @@ import torch import torch.nn as nn -from utils.downloads import attempt_download +from ..utils.downloads import attempt_download class Sum(nn.Module): diff --git a/models/tf.py b/models/tf.py index 3f3dc8dbe7e7..abcc9a1bcf86 100644 --- a/models/tf.py +++ b/models/tf.py @@ -27,12 +27,12 @@ import torch.nn as nn from tensorflow import keras -from models.common import (C3, SPP, SPPF, Bottleneck, BottleneckCSP, C3x, Concat, Conv, CrossConv, DWConv, +from .common import (C3, SPP, SPPF, Bottleneck, BottleneckCSP, C3x, Concat, Conv, CrossConv, DWConv, DWConvTranspose2d, Focus, autopad) -from models.experimental import MixConv2d, attempt_load -from models.yolo import Detect, Segment -from utils.activations import SiLU -from utils.general import LOGGER, make_divisible, print_args +from .experimental import MixConv2d, attempt_load +from .yolo import Detect, Segment +from ..utils.activations import SiLU +from ..utils.general import LOGGER, make_divisible, print_args class TFBN(keras.layers.Layer): diff --git a/models/yolo.py b/models/yolo.py index ed21c067ee93..96e070a09483 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -21,12 +21,12 @@ if platform.system() != 'Windows': ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative -from models.common import * -from models.experimental import * -from utils.autoanchor import check_anchor_order -from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args -from utils.plots import feature_visualization -from utils.torch_utils import (fuse_conv_and_bn, initialize_weights, model_info, profile, scale_img, select_device, +from .common import * +from .experimental import * +from ..utils.autoanchor import check_anchor_order +from ..utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args +from ..utils.plots import feature_visualization +from ..utils.torch_utils import (fuse_conv_and_bn, initialize_weights, model_info, profile, scale_img, select_device, time_sync) try: diff --git a/segment/predict.py b/segment/predict.py deleted file mode 100644 index e9093baa1cc7..000000000000 --- a/segment/predict.py +++ /dev/null @@ -1,284 +0,0 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license -""" -Run YOLOv5 segmentation inference on images, videos, directories, streams, etc. - -Usage - sources: - $ python segment/predict.py --weights yolov5s-seg.pt --source 0 # webcam - img.jpg # image - vid.mp4 # video - screen # screenshot - path/ # directory - list.txt # list of images - list.streams # list of streams - 'path/*.jpg' # glob - 'https://youtu.be/Zgi9g1ksQHc' # YouTube - 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream - -Usage - formats: - $ python segment/predict.py --weights yolov5s-seg.pt # PyTorch - yolov5s-seg.torchscript # TorchScript - yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s-seg_openvino_model # OpenVINO - yolov5s-seg.engine # TensorRT - yolov5s-seg.mlmodel # CoreML (macOS-only) - yolov5s-seg_saved_model # TensorFlow SavedModel - yolov5s-seg.pb # TensorFlow GraphDef - yolov5s-seg.tflite # TensorFlow Lite - yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU - yolov5s-seg_paddle_model # PaddlePaddle -""" - -import argparse -import os -import platform -import sys -from pathlib import Path - -import torch - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[1] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative - -from models.common import DetectMultiBackend -from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams -from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, - increment_path, non_max_suppression, print_args, scale_boxes, scale_segments, - strip_optimizer) -from utils.plots import Annotator, colors, save_one_box -from utils.segment.general import masks2segments, process_mask, process_mask_native -from utils.torch_utils import select_device, smart_inference_mode - - -@smart_inference_mode() -def run( - weights=ROOT / 'yolov5s-seg.pt', # model.pt path(s) - source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam) - data=ROOT / 'data/coco128.yaml', # dataset.yaml path - imgsz=(640, 640), # inference size (height, width) - conf_thres=0.25, # confidence threshold - iou_thres=0.45, # NMS IOU threshold - max_det=1000, # maximum detections per image - device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu - view_img=False, # show results - save_txt=False, # save results to *.txt - save_conf=False, # save confidences in --save-txt labels - save_crop=False, # save cropped prediction boxes - nosave=False, # do not save images/videos - classes=None, # filter by class: --class 0, or --class 0 2 3 - agnostic_nms=False, # class-agnostic NMS - augment=False, # augmented inference - visualize=False, # visualize features - update=False, # update all models - project=ROOT / 'runs/predict-seg', # save results to project/name - name='exp', # save results to project/name - exist_ok=False, # existing project/name ok, do not increment - line_thickness=3, # bounding box thickness (pixels) - hide_labels=False, # hide labels - hide_conf=False, # hide confidences - half=False, # use FP16 half-precision inference - dnn=False, # use OpenCV DNN for ONNX inference - vid_stride=1, # video frame-rate stride - retina_masks=False, -): - source = str(source) - save_img = not nosave and not source.endswith('.txt') # save inference images - is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) - is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) - webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file) - screenshot = source.lower().startswith('screen') - if is_url and is_file: - source = check_file(source) # download - - # Directories - save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run - (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir - - # Load model - device = select_device(device) - model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) - stride, names, pt = model.stride, model.names, model.pt - imgsz = check_img_size(imgsz, s=stride) # check image size - - # Dataloader - bs = 1 # batch_size - if webcam: - view_img = check_imshow(warn=True) - dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) - bs = len(dataset) - elif screenshot: - dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt) - else: - dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) - vid_path, vid_writer = [None] * bs, [None] * bs - - # Run inference - model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup - seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) - for path, im, im0s, vid_cap, s in dataset: - with dt[0]: - im = torch.from_numpy(im).to(model.device) - im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 - im /= 255 # 0 - 255 to 0.0 - 1.0 - if len(im.shape) == 3: - im = im[None] # expand for batch dim - - # Inference - with dt[1]: - visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False - pred, proto = model(im, augment=augment, visualize=visualize)[:2] - - # NMS - with dt[2]: - pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det, nm=32) - - # Second-stage classifier (optional) - # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) - - # Process predictions - for i, det in enumerate(pred): # per image - seen += 1 - if webcam: # batch_size >= 1 - p, im0, frame = path[i], im0s[i].copy(), dataset.count - s += f'{i}: ' - else: - p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) - - p = Path(p) # to Path - save_path = str(save_dir / p.name) # im.jpg - txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt - s += '%gx%g ' % im.shape[2:] # print string - imc = im0.copy() if save_crop else im0 # for save_crop - annotator = Annotator(im0, line_width=line_thickness, example=str(names)) - if len(det): - if retina_masks: - # scale bbox first the crop masks - det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size - masks = process_mask_native(proto[i], det[:, 6:], det[:, :4], im0.shape[:2]) # HWC - else: - masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True) # HWC - det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size - - # Segments - if save_txt: - segments = [ - scale_segments(im0.shape if retina_masks else im.shape[2:], x, im0.shape, normalize=True) - for x in reversed(masks2segments(masks))] - - # Print results - for c in det[:, 5].unique(): - n = (det[:, 5] == c).sum() # detections per class - s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string - - # Mask plotting - annotator.masks( - masks, - colors=[colors(x, True) for x in det[:, 5]], - im_gpu=torch.as_tensor(im0, dtype=torch.float16).to(device).permute(2, 0, 1).flip(0).contiguous() / - 255 if retina_masks else im[i]) - - # Write results - for j, (*xyxy, conf, cls) in enumerate(reversed(det[:, :6])): - if save_txt: # Write to file - seg = segments[j].reshape(-1) # (n,2) to (n*2) - line = (cls, *seg, conf) if save_conf else (cls, *seg) # label format - with open(f'{txt_path}.txt', 'a') as f: - f.write(('%g ' * len(line)).rstrip() % line + '\n') - - if save_img or save_crop or view_img: # Add bbox to image - c = int(cls) # integer class - label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') - annotator.box_label(xyxy, label, color=colors(c, True)) - # annotator.draw.polygon(segments[j], outline=colors(c, True), width=3) - if save_crop: - save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) - - # Stream results - im0 = annotator.result() - if view_img: - if platform.system() == 'Linux' and p not in windows: - windows.append(p) - cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) - cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0]) - cv2.imshow(str(p), im0) - if cv2.waitKey(1) == ord('q'): # 1 millisecond - exit() - - # Save results (image with detections) - if save_img: - if dataset.mode == 'image': - cv2.imwrite(save_path, im0) - else: # 'video' or 'stream' - if vid_path[i] != save_path: # new video - vid_path[i] = save_path - if isinstance(vid_writer[i], cv2.VideoWriter): - vid_writer[i].release() # release previous video writer - if vid_cap: # video - fps = vid_cap.get(cv2.CAP_PROP_FPS) - w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) - h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) - else: # stream - fps, w, h = 30, im0.shape[1], im0.shape[0] - save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos - vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) - vid_writer[i].write(im0) - - # Print time (inference-only) - LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms") - - # Print results - t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image - LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) - if save_txt or save_img: - s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' - LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") - if update: - strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning) - - -def parse_opt(): - parser = argparse.ArgumentParser() - parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-seg.pt', help='model path(s)') - parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)') - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') - parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') - parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') - parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') - parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--view-img', action='store_true', help='show results') - parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') - parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') - parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') - parser.add_argument('--nosave', action='store_true', help='do not save images/videos') - parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3') - parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') - parser.add_argument('--augment', action='store_true', help='augmented inference') - parser.add_argument('--visualize', action='store_true', help='visualize features') - parser.add_argument('--update', action='store_true', help='update all models') - parser.add_argument('--project', default=ROOT / 'runs/predict-seg', help='save results to project/name') - parser.add_argument('--name', default='exp', help='save results to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') - parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') - parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') - parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') - parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') - parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride') - parser.add_argument('--retina-masks', action='store_true', help='whether to plot masks in native resolution') - opt = parser.parse_args() - opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand - print_args(vars(opt)) - return opt - - -def main(opt): - check_requirements(exclude=('tensorboard', 'thop')) - run(**vars(opt)) - - -if __name__ == "__main__": - opt = parse_opt() - main(opt) diff --git a/segment/train.py b/segment/train.py deleted file mode 100644 index 3f32d2100a75..000000000000 --- a/segment/train.py +++ /dev/null @@ -1,658 +0,0 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license -""" -Train a YOLOv5 segment model on a segment dataset -Models and datasets download automatically from the latest YOLOv5 release. - -Usage - Single-GPU training: - $ python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 # from pretrained (recommended) - $ python segment/train.py --data coco128-seg.yaml --weights '' --cfg yolov5s-seg.yaml --img 640 # from scratch - -Usage - Multi-GPU DDP training: - $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3 - -Models: https://github.com/ultralytics/yolov5/tree/master/models -Datasets: https://github.com/ultralytics/yolov5/tree/master/data -Tutorial: https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data -""" - -import argparse -import math -import os -import random -import sys -import time -from copy import deepcopy -from datetime import datetime -from pathlib import Path - -import numpy as np -import torch -import torch.distributed as dist -import torch.nn as nn -import yaml -from torch.optim import lr_scheduler -from tqdm import tqdm - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[1] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative - -import segment.val as validate # for end-of-epoch mAP -from models.experimental import attempt_load -from models.yolo import SegmentationModel -from utils.autoanchor import check_anchors -from utils.autobatch import check_train_batch_size -from utils.callbacks import Callbacks -from utils.downloads import attempt_download, is_url -from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_info, - check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr, - get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights, - labels_to_image_weights, one_cycle, print_args, print_mutation, strip_optimizer, yaml_save) -from utils.loggers import GenericLogger -from utils.plots import plot_evolve, plot_labels -from utils.segment.dataloaders import create_dataloader -from utils.segment.loss import ComputeLoss -from utils.segment.metrics import KEYS, fitness -from utils.segment.plots import plot_images_and_masks, plot_results_with_masks -from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, select_device, smart_DDP, smart_optimizer, - smart_resume, torch_distributed_zero_first) - -LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html -RANK = int(os.getenv('RANK', -1)) -WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) -GIT_INFO = check_git_info() - - -def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary - save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze, mask_ratio = \ - Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ - opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze, opt.mask_ratio - # callbacks.run('on_pretrain_routine_start') - - # Directories - w = save_dir / 'weights' # weights dir - (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir - last, best = w / 'last.pt', w / 'best.pt' - - # Hyperparameters - if isinstance(hyp, str): - with open(hyp, errors='ignore') as f: - hyp = yaml.safe_load(f) # load hyps dict - LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) - opt.hyp = hyp.copy() # for saving hyps to checkpoints - - # Save run settings - if not evolve: - yaml_save(save_dir / 'hyp.yaml', hyp) - yaml_save(save_dir / 'opt.yaml', vars(opt)) - - # Loggers - data_dict = None - if RANK in {-1, 0}: - logger = GenericLogger(opt=opt, console_logger=LOGGER) - - # Config - plots = not evolve and not opt.noplots # create plots - overlap = not opt.no_overlap - cuda = device.type != 'cpu' - init_seeds(opt.seed + 1 + RANK, deterministic=True) - with torch_distributed_zero_first(LOCAL_RANK): - data_dict = data_dict or check_dataset(data) # check if None - train_path, val_path = data_dict['train'], data_dict['val'] - nc = 1 if single_cls else int(data_dict['nc']) # number of classes - names = {0: 'item'} if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names - is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset - - # Model - check_suffix(weights, '.pt') # check weights - pretrained = weights.endswith('.pt') - if pretrained: - with torch_distributed_zero_first(LOCAL_RANK): - weights = attempt_download(weights) # download if not found locally - ckpt = torch.load(weights, map_location='cpu') # load checkpoint to CPU to avoid CUDA memory leak - model = SegmentationModel(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) - exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys - csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 - csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect - model.load_state_dict(csd, strict=False) # load - LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report - else: - model = SegmentationModel(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create - amp = check_amp(model) # check AMP - - # Freeze - freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze - for k, v in model.named_parameters(): - v.requires_grad = True # train all layers - # v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results) - if any(x in k for x in freeze): - LOGGER.info(f'freezing {k}') - v.requires_grad = False - - # Image size - gs = max(int(model.stride.max()), 32) # grid size (max stride) - imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple - - # Batch size - if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size - batch_size = check_train_batch_size(model, imgsz, amp) - logger.update_params({"batch_size": batch_size}) - # loggers.on_params_update({"batch_size": batch_size}) - - # Optimizer - nbs = 64 # nominal batch size - accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing - hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay - optimizer = smart_optimizer(model, opt.optimizer, hyp['lr0'], hyp['momentum'], hyp['weight_decay']) - - # Scheduler - if opt.cos_lr: - lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] - else: - lf = lambda x: (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear - scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) - - # EMA - ema = ModelEMA(model) if RANK in {-1, 0} else None - - # Resume - best_fitness, start_epoch = 0.0, 0 - if pretrained: - if resume: - best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume) - del ckpt, csd - - # DP mode - if cuda and RANK == -1 and torch.cuda.device_count() > 1: - LOGGER.warning('WARNING โš ๏ธ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' - 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') - model = torch.nn.DataParallel(model) - - # SyncBatchNorm - if opt.sync_bn and cuda and RANK != -1: - model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) - LOGGER.info('Using SyncBatchNorm()') - - # Trainloader - train_loader, dataset = create_dataloader( - train_path, - imgsz, - batch_size // WORLD_SIZE, - gs, - single_cls, - hyp=hyp, - augment=True, - cache=None if opt.cache == 'val' else opt.cache, - rect=opt.rect, - rank=LOCAL_RANK, - workers=workers, - image_weights=opt.image_weights, - quad=opt.quad, - prefix=colorstr('train: '), - shuffle=True, - mask_downsample_ratio=mask_ratio, - overlap_mask=overlap, - ) - labels = np.concatenate(dataset.labels, 0) - mlc = int(labels[:, 0].max()) # max label class - assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' - - # Process 0 - if RANK in {-1, 0}: - val_loader = create_dataloader(val_path, - imgsz, - batch_size // WORLD_SIZE * 2, - gs, - single_cls, - hyp=hyp, - cache=None if noval else opt.cache, - rect=True, - rank=-1, - workers=workers * 2, - pad=0.5, - mask_downsample_ratio=mask_ratio, - overlap_mask=overlap, - prefix=colorstr('val: '))[0] - - if not resume: - if not opt.noautoanchor: - check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) # run AutoAnchor - model.half().float() # pre-reduce anchor precision - - if plots: - plot_labels(labels, names, save_dir) - # callbacks.run('on_pretrain_routine_end', labels, names) - - # DDP mode - if cuda and RANK != -1: - model = smart_DDP(model) - - # Model attributes - nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps) - hyp['box'] *= 3 / nl # scale to layers - hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers - hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers - hyp['label_smoothing'] = opt.label_smoothing - model.nc = nc # attach number of classes to model - model.hyp = hyp # attach hyperparameters to model - model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights - model.names = names - - # Start training - t0 = time.time() - nb = len(train_loader) # number of batches - nw = max(round(hyp['warmup_epochs'] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations) - # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training - last_opt_step = -1 - maps = np.zeros(nc) # mAP per class - results = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) - scheduler.last_epoch = start_epoch - 1 # do not move - scaler = torch.cuda.amp.GradScaler(enabled=amp) - stopper, stop = EarlyStopping(patience=opt.patience), False - compute_loss = ComputeLoss(model, overlap=overlap) # init loss class - # callbacks.run('on_train_start') - LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n' - f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n' - f"Logging results to {colorstr('bold', save_dir)}\n" - f'Starting training for {epochs} epochs...') - for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ - # callbacks.run('on_train_epoch_start') - model.train() - - # Update image weights (optional, single-GPU only) - if opt.image_weights: - cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights - iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights - dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx - - # Update mosaic border (optional) - # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) - # dataset.mosaic_border = [b - imgsz, -b] # height, width borders - - mloss = torch.zeros(4, device=device) # mean losses - if RANK != -1: - train_loader.sampler.set_epoch(epoch) - pbar = enumerate(train_loader) - LOGGER.info(('\n' + '%11s' * 8) % - ('Epoch', 'GPU_mem', 'box_loss', 'seg_loss', 'obj_loss', 'cls_loss', 'Instances', 'Size')) - if RANK in {-1, 0}: - pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT) # progress bar - optimizer.zero_grad() - for i, (imgs, targets, paths, _, masks) in pbar: # batch ------------------------------------------------------ - # callbacks.run('on_train_batch_start') - ni = i + nb * epoch # number integrated batches (since train start) - imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0 - - # Warmup - if ni <= nw: - xi = [0, nw] # x interp - # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) - accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round()) - for j, x in enumerate(optimizer.param_groups): - # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 - x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 0 else 0.0, x['initial_lr'] * lf(epoch)]) - if 'momentum' in x: - x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']]) - - # Multi-scale - if opt.multi_scale: - sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size - sf = sz / max(imgs.shape[2:]) # scale factor - if sf != 1: - ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) - imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) - - # Forward - with torch.cuda.amp.autocast(amp): - pred = model(imgs) # forward - loss, loss_items = compute_loss(pred, targets.to(device), masks=masks.to(device).float()) - if RANK != -1: - loss *= WORLD_SIZE # gradient averaged between devices in DDP mode - if opt.quad: - loss *= 4. - - # Backward - scaler.scale(loss).backward() - - # Optimize - https://pytorch.org/docs/master/notes/amp_examples.html - if ni - last_opt_step >= accumulate: - scaler.unscale_(optimizer) # unscale gradients - torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients - scaler.step(optimizer) # optimizer.step - scaler.update() - optimizer.zero_grad() - if ema: - ema.update(model) - last_opt_step = ni - - # Log - if RANK in {-1, 0}: - mloss = (mloss * i + loss_items) / (i + 1) # update mean losses - mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) - pbar.set_description(('%11s' * 2 + '%11.4g' * 6) % - (f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) - # callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths) - # if callbacks.stop_training: - # return - - # Mosaic plots - if plots: - if ni < 3: - plot_images_and_masks(imgs, targets, masks, paths, save_dir / f"train_batch{ni}.jpg") - if ni == 10: - files = sorted(save_dir.glob('train*.jpg')) - logger.log_images(files, "Mosaics", epoch) - # end batch ------------------------------------------------------------------------------------------------ - - # Scheduler - lr = [x['lr'] for x in optimizer.param_groups] # for loggers - scheduler.step() - - if RANK in {-1, 0}: - # mAP - # callbacks.run('on_train_epoch_end', epoch=epoch) - ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights']) - final_epoch = (epoch + 1 == epochs) or stopper.possible_stop - if not noval or final_epoch: # Calculate mAP - results, maps, _ = validate.run(data_dict, - batch_size=batch_size // WORLD_SIZE * 2, - imgsz=imgsz, - half=amp, - model=ema.ema, - single_cls=single_cls, - dataloader=val_loader, - save_dir=save_dir, - plots=False, - callbacks=callbacks, - compute_loss=compute_loss, - mask_downsample_ratio=mask_ratio, - overlap=overlap) - - # Update best mAP - fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] - stop = stopper(epoch=epoch, fitness=fi) # early stop check - if fi > best_fitness: - best_fitness = fi - log_vals = list(mloss) + list(results) + lr - # callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi) - # Log val metrics and media - metrics_dict = dict(zip(KEYS, log_vals)) - logger.log_metrics(metrics_dict, epoch) - - # Save model - if (not nosave) or (final_epoch and not evolve): # if save - ckpt = { - 'epoch': epoch, - 'best_fitness': best_fitness, - 'model': deepcopy(de_parallel(model)).half(), - 'ema': deepcopy(ema.ema).half(), - 'updates': ema.updates, - 'optimizer': optimizer.state_dict(), - 'opt': vars(opt), - 'git': GIT_INFO, # {remote, branch, commit} if a git repo - 'date': datetime.now().isoformat()} - - # Save last, best and delete - torch.save(ckpt, last) - if best_fitness == fi: - torch.save(ckpt, best) - if opt.save_period > 0 and epoch % opt.save_period == 0: - torch.save(ckpt, w / f'epoch{epoch}.pt') - logger.log_model(w / f'epoch{epoch}.pt') - del ckpt - # callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi) - - # EarlyStopping - if RANK != -1: # if DDP training - broadcast_list = [stop if RANK == 0 else None] - dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks - if RANK != 0: - stop = broadcast_list[0] - if stop: - break # must break all DDP ranks - - # end epoch ---------------------------------------------------------------------------------------------------- - # end training ----------------------------------------------------------------------------------------------------- - if RANK in {-1, 0}: - LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.') - for f in last, best: - if f.exists(): - strip_optimizer(f) # strip optimizers - if f is best: - LOGGER.info(f'\nValidating {f}...') - results, _, _ = validate.run( - data_dict, - batch_size=batch_size // WORLD_SIZE * 2, - imgsz=imgsz, - model=attempt_load(f, device).half(), - iou_thres=0.65 if is_coco else 0.60, # best pycocotools at iou 0.65 - single_cls=single_cls, - dataloader=val_loader, - save_dir=save_dir, - save_json=is_coco, - verbose=True, - plots=plots, - callbacks=callbacks, - compute_loss=compute_loss, - mask_downsample_ratio=mask_ratio, - overlap=overlap) # val best model with plots - if is_coco: - # callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi) - metrics_dict = dict(zip(KEYS, list(mloss) + list(results) + lr)) - logger.log_metrics(metrics_dict, epoch) - - # callbacks.run('on_train_end', last, best, epoch, results) - # on train end callback using genericLogger - logger.log_metrics(dict(zip(KEYS[4:16], results)), epochs) - if not opt.evolve: - logger.log_model(best, epoch) - if plots: - plot_results_with_masks(file=save_dir / 'results.csv') # save results.png - files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))] - files = [(save_dir / f) for f in files if (save_dir / f).exists()] # filter - LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") - logger.log_images(files, "Results", epoch + 1) - logger.log_images(sorted(save_dir.glob('val*.jpg')), "Validation", epoch + 1) - torch.cuda.empty_cache() - return results - - -def parse_opt(known=False): - parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s-seg.pt', help='initial weights path') - parser.add_argument('--cfg', type=str, default='', help='model.yaml path') - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128-seg.yaml', help='dataset.yaml path') - parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') - parser.add_argument('--epochs', type=int, default=100, help='total training epochs') - parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') - parser.add_argument('--rect', action='store_true', help='rectangular training') - parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') - parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') - parser.add_argument('--noval', action='store_true', help='only validate final epoch') - parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') - parser.add_argument('--noplots', action='store_true', help='save no plot files') - parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') - parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') - parser.add_argument('--cache', type=str, nargs='?', const='ram', help='image --cache ram/disk') - parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') - parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') - parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer') - parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') - parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') - parser.add_argument('--project', default=ROOT / 'runs/train-seg', help='save to project/name') - parser.add_argument('--name', default='exp', help='save to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--quad', action='store_true', help='quad dataloader') - parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler') - parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') - parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') - parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') - parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') - parser.add_argument('--seed', type=int, default=0, help='Global training seed') - parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') - - # Instance Segmentation Args - parser.add_argument('--mask-ratio', type=int, default=4, help='Downsample the truth masks to saving memory') - parser.add_argument('--no-overlap', action='store_true', help='Overlap masks train faster at slightly less mAP') - - return parser.parse_known_args()[0] if known else parser.parse_args() - - -def main(opt, callbacks=Callbacks()): - # Checks - if RANK in {-1, 0}: - print_args(vars(opt)) - check_git_status() - check_requirements() - - # Resume - if opt.resume and not opt.evolve: # resume from specified or most recent last.pt - last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run()) - opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml - opt_data = opt.data # original dataset - if opt_yaml.is_file(): - with open(opt_yaml, errors='ignore') as f: - d = yaml.safe_load(f) - else: - d = torch.load(last, map_location='cpu')['opt'] - opt = argparse.Namespace(**d) # replace - opt.cfg, opt.weights, opt.resume = '', str(last), True # reinstate - if is_url(opt_data): - opt.data = check_file(opt_data) # avoid HUB resume auth timeout - else: - opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \ - check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks - assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' - if opt.evolve: - if opt.project == str(ROOT / 'runs/train'): # if default project name, rename to runs/evolve - opt.project = str(ROOT / 'runs/evolve') - opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume - if opt.name == 'cfg': - opt.name = Path(opt.cfg).stem # use model.yaml as name - opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) - - # DDP mode - device = select_device(opt.device, batch_size=opt.batch_size) - if LOCAL_RANK != -1: - msg = 'is not compatible with YOLOv5 Multi-GPU DDP training' - assert not opt.image_weights, f'--image-weights {msg}' - assert not opt.evolve, f'--evolve {msg}' - assert opt.batch_size != -1, f'AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size' - assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE' - assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' - torch.cuda.set_device(LOCAL_RANK) - device = torch.device('cuda', LOCAL_RANK) - dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") - - # Train - if not opt.evolve: - train(opt.hyp, opt, device, callbacks) - - # Evolve hyperparameters (optional) - else: - # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit) - meta = { - 'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3) - 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) - 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1 - 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay - 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok) - 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum - 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr - 'box': (1, 0.02, 0.2), # box loss gain - 'cls': (1, 0.2, 4.0), # cls loss gain - 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight - 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels) - 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight - 'iou_t': (0, 0.1, 0.7), # IoU training threshold - 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold - 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore) - 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5) - 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction) - 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction) - 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction) - 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg) - 'translate': (1, 0.0, 0.9), # image translation (+/- fraction) - 'scale': (1, 0.0, 0.9), # image scale (+/- gain) - 'shear': (1, 0.0, 10.0), # image shear (+/- deg) - 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 - 'flipud': (1, 0.0, 1.0), # image flip up-down (probability) - 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability) - 'mosaic': (1, 0.0, 1.0), # image mixup (probability) - 'mixup': (1, 0.0, 1.0), # image mixup (probability) - 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability) - - with open(opt.hyp, errors='ignore') as f: - hyp = yaml.safe_load(f) # load hyps dict - if 'anchors' not in hyp: # anchors commented in hyp.yaml - hyp['anchors'] = 3 - if opt.noautoanchor: - del hyp['anchors'], meta['anchors'] - opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch - # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices - evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv' - if opt.bucket: - os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}') # download evolve.csv if exists - - for _ in range(opt.evolve): # generations to evolve - if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate - # Select parent(s) - parent = 'single' # parent selection method: 'single' or 'weighted' - x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1) - n = min(5, len(x)) # number of previous results to consider - x = x[np.argsort(-fitness(x))][:n] # top n mutations - w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0) - if parent == 'single' or len(x) == 1: - # x = x[random.randint(0, n - 1)] # random selection - x = x[random.choices(range(n), weights=w)[0]] # weighted selection - elif parent == 'weighted': - x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination - - # Mutate - mp, s = 0.8, 0.2 # mutation probability, sigma - npr = np.random - npr.seed(int(time.time())) - g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1 - ng = len(meta) - v = np.ones(ng) - while all(v == 1): # mutate until a change occurs (prevent duplicates) - v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0) - for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300) - hyp[k] = float(x[i + 7] * v[i]) # mutate - - # Constrain to limits - for k, v in meta.items(): - hyp[k] = max(hyp[k], v[1]) # lower limit - hyp[k] = min(hyp[k], v[2]) # upper limit - hyp[k] = round(hyp[k], 5) # significant digits - - # Train mutation - results = train(hyp.copy(), opt, device, callbacks) - callbacks = Callbacks() - # Write mutation results - print_mutation(KEYS, results, hyp.copy(), save_dir, opt.bucket) - - # Plot results - plot_evolve(evolve_csv) - LOGGER.info(f'Hyperparameter evolution finished {opt.evolve} generations\n' - f"Results saved to {colorstr('bold', save_dir)}\n" - f'Usage example: $ python train.py --hyp {evolve_yaml}') - - -def run(**kwargs): - # Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt') - opt = parse_opt(True) - for k, v in kwargs.items(): - setattr(opt, k, v) - main(opt) - return opt - - -if __name__ == "__main__": - opt = parse_opt() - main(opt) diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb deleted file mode 100644 index cb1af34d9f17..000000000000 --- a/segment/tutorial.ipynb +++ /dev/null @@ -1,594 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "t6MPjfT5NrKQ" - }, - "source": [ - "
\n", - "\n", - " \n", - " \n", - "\n", - "\n", - "
\n", - " \"Run\n", - " \"Open\n", - " \"Open\n", - "
\n", - "\n", - "This YOLOv5 ๐Ÿš€ notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See GitHub for community support or contact us for professional support.\n", - "\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "7mGmQbAO5pQb" - }, - "source": [ - "# Setup\n", - "\n", - "Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "wbvMlHd_QwMG", - "outputId": "171b23f0-71b9-4cbf-b666-6fa2ecef70c8" - }, - "outputs": [ - { - "output_type": "stream", - "name": "stderr", - "text": [ - "YOLOv5 ๐Ÿš€ v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" - ] - }, - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Setup complete โœ… (2 CPUs, 12.7 GB RAM, 22.6/78.2 GB disk)\n" - ] - } - ], - "source": [ - "!git clone https://github.com/ultralytics/yolov5 # clone\n", - "%cd yolov5\n", - "%pip install -qr requirements.txt # install\n", - "\n", - "import torch\n", - "import utils\n", - "display = utils.notebook_init() # checks" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "4JnkELT0cIJg" - }, - "source": [ - "# 1. Predict\n", - "\n", - "`segment/predict.py` runs YOLOv5 instance segmentation inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict`. Example inference sources are:\n", - "\n", - "```shell\n", - "python segment/predict.py --source 0 # webcam\n", - " img.jpg # image \n", - " vid.mp4 # video\n", - " screen # screenshot\n", - " path/ # directory\n", - " 'path/*.jpg' # glob\n", - " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", - " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "zR9ZbuQCH7FX", - "outputId": "3f67f1c7-f15e-4fa5-d251-967c3b77eaad" - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "\u001b[34m\u001b[1msegment/predict: \u001b[0mweights=['yolov5s-seg.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/predict-seg, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1, retina_masks=False\n", - "YOLOv5 ๐Ÿš€ v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", - "\n", - "Downloading https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt to yolov5s-seg.pt...\n", - "100% 14.9M/14.9M [00:01<00:00, 12.0MB/s]\n", - "\n", - "Fusing layers... \n", - "YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 18.2ms\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, 13.4ms\n", - "Speed: 0.5ms pre-process, 15.8ms inference, 18.5ms NMS per image at shape (1, 3, 640, 640)\n", - "Results saved to \u001b[1mruns/predict-seg/exp\u001b[0m\n" - ] - } - ], - "source": [ - "!python segment/predict.py --weights yolov5s-seg.pt --img 640 --conf 0.25 --source data/images\n", - "#display.Image(filename='runs/predict-seg/exp/zidane.jpg', width=600)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "hkAzDWJ7cWTr" - }, - "source": [ - "        \n", - "" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "0eq1SMWl6Sfn" - }, - "source": [ - "# 2. Validate\n", - "Validate a model's accuracy on the [COCO](https://cocodataset.org/#home) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "WQPtK1QYVaD_", - "outputId": "9d751d8c-bee8-4339-cf30-9854ca530449" - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Downloading https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017labels-segments.zip ...\n", - "Downloading http://images.cocodataset.org/zips/val2017.zip ...\n", - "######################################################################## 100.0%\n", - "######################################################################## 100.0%\n" - ] - } - ], - "source": [ - "# Download COCO val\n", - "!bash data/scripts/get_coco.sh --val --segments # download (780M - 5000 images)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "X58w8JLpMnjH", - "outputId": "a140d67a-02da-479e-9ddb-7d54bf9e407a" - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "\u001b[34m\u001b[1msegment/val: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5s-seg.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val-seg, name=exp, exist_ok=False, half=True, dnn=False\n", - "YOLOv5 ๐Ÿš€ v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", - "\n", - "Fusing layers... \n", - "YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco/val2017... 4952 images, 48 backgrounds, 0 corrupt: 100% 5000/5000 [00:03<00:00, 1361.31it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n", - " Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 157/157 [01:54<00:00, 1.37it/s]\n", - " all 5000 36335 0.673 0.517 0.566 0.373 0.672 0.49 0.532 0.319\n", - "Speed: 0.6ms pre-process, 4.4ms inference, 2.9ms NMS per image at shape (32, 3, 640, 640)\n", - "Results saved to \u001b[1mruns/val-seg/exp\u001b[0m\n" - ] - } - ], - "source": [ - "# Validate YOLOv5s-seg on COCO val\n", - "!python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 --half" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ZY2VXXXu74w5" - }, - "source": [ - "# 3. Train\n", - "\n", - "

\n", - "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", - "

\n", - "\n", - "Train a YOLOv5s-seg model on the [COCO128](https://www.kaggle.com/ultralytics/coco128) dataset with `--data coco128-seg.yaml`, starting from pretrained `--weights yolov5s-seg.pt`, or from randomly initialized `--weights '' --cfg yolov5s-seg.yaml`.\n", - "\n", - "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n", - "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", - "- **[Datasets](https://github.com/ultralytics/yolov5/tree/master/data)** available for autodownload include: [COCO](https://github.com/ultralytics/yolov5/blob/master/data/coco.yaml), [COCO128](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml), [Argoverse](https://github.com/ultralytics/yolov5/blob/master/data/Argoverse.yaml), [VisDrone](https://github.com/ultralytics/yolov5/blob/master/data/VisDrone.yaml), [GlobalWheat](https://github.com/ultralytics/yolov5/blob/master/data/GlobalWheat2020.yaml), [xView](https://github.com/ultralytics/yolov5/blob/master/data/xView.yaml), [Objects365](https://github.com/ultralytics/yolov5/blob/master/data/Objects365.yaml), [SKU-110K](https://github.com/ultralytics/yolov5/blob/master/data/SKU-110K.yaml).\n", - "- **Training Results** are saved to `runs/train-seg/` with incrementing run directories, i.e. `runs/train-seg/exp2`, `runs/train-seg/exp3` etc.\n", - "

\n", - "\n", - "A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n", - "\n", - "## Train on Custom Data with Roboflow ๐ŸŒŸ NEW\n", - "\n", - "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n", - "\n", - "- Custom Training Example: [https://blog.roboflow.com/train-yolov5-instance-segmentation-custom-dataset/](https://blog.roboflow.com/train-yolov5-instance-segmentation-custom-dataset/?ref=ultralytics)\n", - "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1JTz7kpmHsg-5qwVz2d2IH3AaenI1tv0N?usp=sharing)\n", - "
\n", - "\n", - "

Label images lightning fast (including with model-assisted labeling)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "i3oKtE4g-aNn" - }, - "outputs": [], - "source": [ - "#@title Select YOLOv5 ๐Ÿš€ logger {run: 'auto'}\n", - "logger = 'TensorBoard' #@param ['TensorBoard', 'Comet', 'ClearML']\n", - "\n", - "if logger == 'TensorBoard':\n", - " %load_ext tensorboard\n", - " %tensorboard --logdir runs/train-seg\n", - "elif logger == 'Comet':\n", - " %pip install -q comet_ml\n", - " import comet_ml; comet_ml.init()\n", - "elif logger == 'ClearML':\n", - " import clearml; clearml.browser_login()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "1NcFxRcFdJ_O", - "outputId": "3a3e0cf7-e79c-47a5-c8e7-2d26eeeab988" - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "\u001b[34m\u001b[1msegment/train: \u001b[0mweights=yolov5s-seg.pt, cfg=, data=coco128-seg.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train-seg, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, mask_ratio=4, no_overlap=False\n", - "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 โœ…\n", - "YOLOv5 ๐Ÿš€ v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", - "\n", - "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", - "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-seg', view at http://localhost:6006/\n", - "\n", - "Dataset not found โš ๏ธ, missing paths ['/content/datasets/coco128-seg/images/train2017']\n", - "Downloading https://ultralytics.com/assets/coco128-seg.zip to coco128-seg.zip...\n", - "100% 6.79M/6.79M [00:01<00:00, 6.73MB/s]\n", - "Dataset download success โœ… (1.9s), saved to \u001b[1m/content/datasets\u001b[0m\n", - "\n", - " from n params module arguments \n", - " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", - " 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n", - " 2 -1 1 18816 models.common.C3 [64, 64, 1] \n", - " 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n", - " 4 -1 2 115712 models.common.C3 [128, 128, 2] \n", - " 5 -1 1 295424 models.common.Conv [128, 256, 3, 2] \n", - " 6 -1 3 625152 models.common.C3 [256, 256, 3] \n", - " 7 -1 1 1180672 models.common.Conv [256, 512, 3, 2] \n", - " 8 -1 1 1182720 models.common.C3 [512, 512, 1] \n", - " 9 -1 1 656896 models.common.SPPF [512, 512, 5] \n", - " 10 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n", - " 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", - " 12 [-1, 6] 1 0 models.common.Concat [1] \n", - " 13 -1 1 361984 models.common.C3 [512, 256, 1, False] \n", - " 14 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n", - " 15 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", - " 16 [-1, 4] 1 0 models.common.Concat [1] \n", - " 17 -1 1 90880 models.common.C3 [256, 128, 1, False] \n", - " 18 -1 1 147712 models.common.Conv [128, 128, 3, 2] \n", - " 19 [-1, 14] 1 0 models.common.Concat [1] \n", - " 20 -1 1 296448 models.common.C3 [256, 256, 1, False] \n", - " 21 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n", - " 22 [-1, 10] 1 0 models.common.Concat [1] \n", - " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", - " 24 [17, 20, 23] 1 615133 models.yolo.Segment [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], 32, 128, [128, 256, 512]]\n", - "Model summary: 225 layers, 7621277 parameters, 7621277 gradients, 26.6 GFLOPs\n", - "\n", - "Transferred 367/367 items from yolov5s-seg.pt\n", - "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed โœ…\n", - "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 60 weight(decay=0.0), 63 weight(decay=0.0005), 63 bias\n", - "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128-seg/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 1389.59it/s]\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128-seg/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 238.86it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco128-seg/labels/train2017.cache... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00 # 2. paste API key\n", - "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", - "```\n", - "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n", - "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", - "\n", - "\n", - "\"Comet" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Lay2WsTjNJzP" - }, - "source": [ - "## ClearML Logging and Automation ๐ŸŒŸ NEW\n", - "\n", - "[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\n", - "\n", - "- `pip install clearml`\n", - "- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\n", - "\n", - "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n", - "\n", - "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) for details!\n", - "\n", - "\n", - "\"ClearML" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "-WPvRbS5Swl6" - }, - "source": [ - "## Local Logging\n", - "\n", - "Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n", - "\n", - "This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n", - "\n", - "\"Local\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Zelyeqbyt3GD" - }, - "source": [ - "# Environments\n", - "\n", - "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", - "\n", - "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", - "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", - "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", - "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "6Qu7Iesl0p54" - }, - "source": [ - "# Status\n", - "\n", - "![YOLOv5 CI](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg)\n", - "\n", - "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "IEijrePND_2I" - }, - "source": [ - "# Appendix\n", - "\n", - "Additional content below." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "GMusP4OAxFu6" - }, - "outputs": [], - "source": [ - "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n", - "import torch\n", - "\n", - "model = torch.hub.load('ultralytics/yolov5', 'yolov5s-seg') # yolov5n - yolov5x6 or custom\n", - "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n", - "results = model(im) # inference\n", - "results.print() # or .show(), .save(), .crop(), .pandas(), etc." - ] - } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "name": "YOLOv5 Segmentation Tutorial", - "provenance": [], - "toc_visible": true - }, - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.12" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} \ No newline at end of file diff --git a/segment/val.py b/segment/val.py deleted file mode 100644 index 248d2bee9be1..000000000000 --- a/segment/val.py +++ /dev/null @@ -1,472 +0,0 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license -""" -Validate a trained YOLOv5 segment model on a segment dataset - -Usage: - $ bash data/scripts/get_coco.sh --val --segments # download COCO-segments val split (1G, 5000 images) - $ python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate COCO-segments - -Usage - formats: - $ python segment/val.py --weights yolov5s-seg.pt # PyTorch - yolov5s-seg.torchscript # TorchScript - yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s-seg_openvino_label # OpenVINO - yolov5s-seg.engine # TensorRT - yolov5s-seg.mlmodel # CoreML (macOS-only) - yolov5s-seg_saved_model # TensorFlow SavedModel - yolov5s-seg.pb # TensorFlow GraphDef - yolov5s-seg.tflite # TensorFlow Lite - yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU - yolov5s-seg_paddle_model # PaddlePaddle -""" - -import argparse -import json -import os -import sys -from multiprocessing.pool import ThreadPool -from pathlib import Path - -import numpy as np -import torch -from tqdm import tqdm - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[1] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative - -import torch.nn.functional as F - -from models.common import DetectMultiBackend -from models.yolo import SegmentationModel -from utils.callbacks import Callbacks -from utils.general import (LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, - check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, - non_max_suppression, print_args, scale_boxes, xywh2xyxy, xyxy2xywh) -from utils.metrics import ConfusionMatrix, box_iou -from utils.plots import output_to_target, plot_val_study -from utils.segment.dataloaders import create_dataloader -from utils.segment.general import mask_iou, process_mask, process_mask_native, scale_image -from utils.segment.metrics import Metrics, ap_per_class_box_and_mask -from utils.segment.plots import plot_images_and_masks -from utils.torch_utils import de_parallel, select_device, smart_inference_mode - - -def save_one_txt(predn, save_conf, shape, file): - # Save one txt result - gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh - for *xyxy, conf, cls in predn.tolist(): - xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh - line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format - with open(file, 'a') as f: - f.write(('%g ' * len(line)).rstrip() % line + '\n') - - -def save_one_json(predn, jdict, path, class_map, pred_masks): - # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} - from pycocotools.mask import encode - - def single_encode(x): - rle = encode(np.asarray(x[:, :, None], order="F", dtype="uint8"))[0] - rle["counts"] = rle["counts"].decode("utf-8") - return rle - - image_id = int(path.stem) if path.stem.isnumeric() else path.stem - box = xyxy2xywh(predn[:, :4]) # xywh - box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner - pred_masks = np.transpose(pred_masks, (2, 0, 1)) - with ThreadPool(NUM_THREADS) as pool: - rles = pool.map(single_encode, pred_masks) - for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())): - jdict.append({ - 'image_id': image_id, - 'category_id': class_map[int(p[5])], - 'bbox': [round(x, 3) for x in b], - 'score': round(p[4], 5), - 'segmentation': rles[i]}) - - -def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, overlap=False, masks=False): - """ - Return correct prediction matrix - Arguments: - detections (array[N, 6]), x1, y1, x2, y2, conf, class - labels (array[M, 5]), class, x1, y1, x2, y2 - Returns: - correct (array[N, 10]), for 10 IoU levels - """ - if masks: - if overlap: - nl = len(labels) - index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1 - gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640) - gt_masks = torch.where(gt_masks == index, 1.0, 0.0) - if gt_masks.shape[1:] != pred_masks.shape[1:]: - gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode="bilinear", align_corners=False)[0] - gt_masks = gt_masks.gt_(0.5) - iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1)) - else: # boxes - iou = box_iou(labels[:, 1:], detections[:, :4]) - - correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) - correct_class = labels[:, 0:1] == detections[:, 5] - for i in range(len(iouv)): - x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match - if x[0].shape[0]: - matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou] - if x[0].shape[0] > 1: - matches = matches[matches[:, 2].argsort()[::-1]] - matches = matches[np.unique(matches[:, 1], return_index=True)[1]] - # matches = matches[matches[:, 2].argsort()[::-1]] - matches = matches[np.unique(matches[:, 0], return_index=True)[1]] - correct[matches[:, 1].astype(int), i] = True - return torch.tensor(correct, dtype=torch.bool, device=iouv.device) - - -@smart_inference_mode() -def run( - data, - weights=None, # model.pt path(s) - batch_size=32, # batch size - imgsz=640, # inference size (pixels) - conf_thres=0.001, # confidence threshold - iou_thres=0.6, # NMS IoU threshold - max_det=300, # maximum detections per image - task='val', # train, val, test, speed or study - device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu - workers=8, # max dataloader workers (per RANK in DDP mode) - single_cls=False, # treat as single-class dataset - augment=False, # augmented inference - verbose=False, # verbose output - save_txt=False, # save results to *.txt - save_hybrid=False, # save label+prediction hybrid results to *.txt - save_conf=False, # save confidences in --save-txt labels - save_json=False, # save a COCO-JSON results file - project=ROOT / 'runs/val-seg', # save to project/name - name='exp', # save to project/name - exist_ok=False, # existing project/name ok, do not increment - half=True, # use FP16 half-precision inference - dnn=False, # use OpenCV DNN for ONNX inference - model=None, - dataloader=None, - save_dir=Path(''), - plots=True, - overlap=False, - mask_downsample_ratio=1, - compute_loss=None, - callbacks=Callbacks(), -): - if save_json: - check_requirements('pycocotools>=2.0.6') - process = process_mask_native # more accurate - else: - process = process_mask # faster - - # Initialize/load model and set device - training = model is not None - if training: # called by train.py - device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model - half &= device.type != 'cpu' # half precision only supported on CUDA - model.half() if half else model.float() - nm = de_parallel(model).model[-1].nm # number of masks - else: # called directly - device = select_device(device, batch_size=batch_size) - - # Directories - save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run - (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir - - # Load model - model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) - stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine - imgsz = check_img_size(imgsz, s=stride) # check image size - half = model.fp16 # FP16 supported on limited backends with CUDA - nm = de_parallel(model).model.model[-1].nm if isinstance(model, SegmentationModel) else 32 # number of masks - if engine: - batch_size = model.batch_size - else: - device = model.device - if not (pt or jit): - batch_size = 1 # export.py models default to batch-size 1 - LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') - - # Data - data = check_dataset(data) # check - - # Configure - model.eval() - cuda = device.type != 'cpu' - is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset - nc = 1 if single_cls else int(data['nc']) # number of classes - iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95 - niou = iouv.numel() - - # Dataloader - if not training: - if pt and not single_cls: # check --weights are trained on --data - ncm = model.model.nc - assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ - f'classes). Pass correct combination of --weights and --data that are trained together.' - model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup - pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks - task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images - dataloader = create_dataloader(data[task], - imgsz, - batch_size, - stride, - single_cls, - pad=pad, - rect=rect, - workers=workers, - prefix=colorstr(f'{task}: '), - overlap_mask=overlap, - mask_downsample_ratio=mask_downsample_ratio)[0] - - seen = 0 - confusion_matrix = ConfusionMatrix(nc=nc) - names = model.names if hasattr(model, 'names') else model.module.names # get class names - if isinstance(names, (list, tuple)): # old format - names = dict(enumerate(names)) - class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) - s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', "R", "mAP50", "mAP50-95)", "Mask(P", "R", - "mAP50", "mAP50-95)") - dt = Profile(), Profile(), Profile() - metrics = Metrics() - loss = torch.zeros(4, device=device) - jdict, stats = [], [] - # callbacks.run('on_val_start') - pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar - for batch_i, (im, targets, paths, shapes, masks) in enumerate(pbar): - # callbacks.run('on_val_batch_start') - with dt[0]: - if cuda: - im = im.to(device, non_blocking=True) - targets = targets.to(device) - masks = masks.to(device) - masks = masks.float() - im = im.half() if half else im.float() # uint8 to fp16/32 - im /= 255 # 0 - 255 to 0.0 - 1.0 - nb, _, height, width = im.shape # batch size, channels, height, width - - # Inference - with dt[1]: - preds, protos, train_out = model(im) if compute_loss else (*model(im, augment=augment)[:2], None) - - # Loss - if compute_loss: - loss += compute_loss((train_out, protos), targets, masks)[1] # box, obj, cls - - # NMS - targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels - lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling - with dt[2]: - preds = non_max_suppression(preds, - conf_thres, - iou_thres, - labels=lb, - multi_label=True, - agnostic=single_cls, - max_det=max_det, - nm=nm) - - # Metrics - plot_masks = [] # masks for plotting - for si, (pred, proto) in enumerate(zip(preds, protos)): - labels = targets[targets[:, 0] == si, 1:] - nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions - path, shape = Path(paths[si]), shapes[si][0] - correct_masks = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init - correct_bboxes = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init - seen += 1 - - if npr == 0: - if nl: - stats.append((correct_masks, correct_bboxes, *torch.zeros((2, 0), device=device), labels[:, 0])) - if plots: - confusion_matrix.process_batch(detections=None, labels=labels[:, 0]) - continue - - # Masks - midx = [si] if overlap else targets[:, 0] == si - gt_masks = masks[midx] - pred_masks = process(proto, pred[:, 6:], pred[:, :4], shape=im[si].shape[1:]) - - # Predictions - if single_cls: - pred[:, 5] = 0 - predn = pred.clone() - scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred - - # Evaluate - if nl: - tbox = xywh2xyxy(labels[:, 1:5]) # target boxes - scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels - labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels - correct_bboxes = process_batch(predn, labelsn, iouv) - correct_masks = process_batch(predn, labelsn, iouv, pred_masks, gt_masks, overlap=overlap, masks=True) - if plots: - confusion_matrix.process_batch(predn, labelsn) - stats.append((correct_masks, correct_bboxes, pred[:, 4], pred[:, 5], labels[:, 0])) # (conf, pcls, tcls) - - pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8) - if plots and batch_i < 3: - plot_masks.append(pred_masks[:15]) # filter top 15 to plot - - # Save/log - if save_txt: - save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') - if save_json: - pred_masks = scale_image(im[si].shape[1:], - pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(), shape, shapes[si][1]) - save_one_json(predn, jdict, path, class_map, pred_masks) # append to COCO-JSON dictionary - # callbacks.run('on_val_image_end', pred, predn, path, names, im[si]) - - # Plot images - if plots and batch_i < 3: - if len(plot_masks): - plot_masks = torch.cat(plot_masks, dim=0) - plot_images_and_masks(im, targets, masks, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names) - plot_images_and_masks(im, output_to_target(preds, max_det=15), plot_masks, paths, - save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred - - # callbacks.run('on_val_batch_end') - - # Compute metrics - stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy - if len(stats) and stats[0].any(): - results = ap_per_class_box_and_mask(*stats, plot=plots, save_dir=save_dir, names=names) - metrics.update(results) - nt = np.bincount(stats[4].astype(int), minlength=nc) # number of targets per class - - # Print results - pf = '%22s' + '%11i' * 2 + '%11.3g' * 8 # print format - LOGGER.info(pf % ("all", seen, nt.sum(), *metrics.mean_results())) - if nt.sum() == 0: - LOGGER.warning(f'WARNING โš ๏ธ no labels found in {task} set, can not compute metrics without labels') - - # Print results per class - if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): - for i, c in enumerate(metrics.ap_class_index): - LOGGER.info(pf % (names[c], seen, nt[c], *metrics.class_result(i))) - - # Print speeds - t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image - if not training: - shape = (batch_size, 3, imgsz, imgsz) - LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) - - # Plots - if plots: - confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) - # callbacks.run('on_val_end') - - mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask = metrics.mean_results() - - # Save JSON - if save_json and len(jdict): - w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights - anno_json = str(Path('../datasets/coco/annotations/instances_val2017.json')) # annotations - pred_json = str(save_dir / f"{w}_predictions.json") # predictions - LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') - with open(pred_json, 'w') as f: - json.dump(jdict, f) - - try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb - from pycocotools.coco import COCO - from pycocotools.cocoeval import COCOeval - - anno = COCO(anno_json) # init annotations api - pred = anno.loadRes(pred_json) # init predictions api - results = [] - for eval in COCOeval(anno, pred, 'bbox'), COCOeval(anno, pred, 'segm'): - if is_coco: - eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # img ID to evaluate - eval.evaluate() - eval.accumulate() - eval.summarize() - results.extend(eval.stats[:2]) # update results (mAP@0.5:0.95, mAP@0.5) - map_bbox, map50_bbox, map_mask, map50_mask = results - except Exception as e: - LOGGER.info(f'pycocotools unable to run: {e}') - - # Return results - model.float() # for training - if not training: - s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' - LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") - final_metric = mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask - return (*final_metric, *(loss.cpu() / len(dataloader)).tolist()), metrics.get_maps(nc), t - - -def parse_opt(): - parser = argparse.ArgumentParser() - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128-seg.yaml', help='dataset.yaml path') - parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-seg.pt', help='model path(s)') - parser.add_argument('--batch-size', type=int, default=32, help='batch size') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') - parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold') - parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold') - parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image') - parser.add_argument('--task', default='val', help='train, val, test, speed or study') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') - parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') - parser.add_argument('--augment', action='store_true', help='augmented inference') - parser.add_argument('--verbose', action='store_true', help='report mAP by class') - parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') - parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') - parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') - parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file') - parser.add_argument('--project', default=ROOT / 'runs/val-seg', help='save results to project/name') - parser.add_argument('--name', default='exp', help='save to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') - parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') - opt = parser.parse_args() - opt.data = check_yaml(opt.data) # check YAML - # opt.save_json |= opt.data.endswith('coco.yaml') - opt.save_txt |= opt.save_hybrid - print_args(vars(opt)) - return opt - - -def main(opt): - check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) - - if opt.task in ('train', 'val', 'test'): # run normally - if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466 - LOGGER.warning(f'WARNING โš ๏ธ confidence threshold {opt.conf_thres} > 0.001 produces invalid results') - if opt.save_hybrid: - LOGGER.warning('WARNING โš ๏ธ --save-hybrid returns high mAP from hybrid labels, not from predictions alone') - run(**vars(opt)) - - else: - weights = opt.weights if isinstance(opt.weights, list) else [opt.weights] - opt.half = torch.cuda.is_available() and opt.device != 'cpu' # FP16 for fastest results - if opt.task == 'speed': # speed benchmarks - # python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt... - opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False - for opt.weights in weights: - run(**vars(opt), plots=False) - - elif opt.task == 'study': # speed vs mAP benchmarks - # python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt... - for opt.weights in weights: - f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to - x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis - for opt.imgsz in x: # img-size - LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...') - r, _, t = run(**vars(opt), plots=False) - y.append(r + t) # results and times - np.savetxt(f, y, fmt='%10.4g') # save - os.system('zip -r study.zip study_*.txt') - plot_val_study(x=x) # plot - else: - raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")') - - -if __name__ == "__main__": - opt = parse_opt() - main(opt) diff --git a/train.py b/train.py deleted file mode 100644 index 5d75f22b6335..000000000000 --- a/train.py +++ /dev/null @@ -1,634 +0,0 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license -""" -Train a YOLOv5 model on a custom dataset. -Models and datasets download automatically from the latest YOLOv5 release. - -Usage - Single-GPU training: - $ python train.py --data coco128.yaml --weights yolov5s.pt --img 640 # from pretrained (recommended) - $ python train.py --data coco128.yaml --weights '' --cfg yolov5s.yaml --img 640 # from scratch - -Usage - Multi-GPU DDP training: - $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 train.py --data coco128.yaml --weights yolov5s.pt --img 640 --device 0,1,2,3 - -Models: https://github.com/ultralytics/yolov5/tree/master/models -Datasets: https://github.com/ultralytics/yolov5/tree/master/data -Tutorial: https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data -""" - -import argparse -import math -import os -import random -import sys -import time -from copy import deepcopy -from datetime import datetime -from pathlib import Path - -import numpy as np -import torch -import torch.distributed as dist -import torch.nn as nn -import yaml -from torch.optim import lr_scheduler -from tqdm import tqdm - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[0] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative - -import val as validate # for end-of-epoch mAP -from models.experimental import attempt_load -from models.yolo import Model -from utils.autoanchor import check_anchors -from utils.autobatch import check_train_batch_size -from utils.callbacks import Callbacks -from utils.dataloaders import create_dataloader -from utils.downloads import attempt_download, is_url -from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_info, - check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr, - get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights, - labels_to_image_weights, methods, one_cycle, print_args, print_mutation, strip_optimizer, - yaml_save) -from utils.loggers import Loggers -from utils.loggers.comet.comet_utils import check_comet_resume -from utils.loss import ComputeLoss -from utils.metrics import fitness -from utils.plots import plot_evolve -from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, select_device, smart_DDP, smart_optimizer, - smart_resume, torch_distributed_zero_first) - -LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html -RANK = int(os.getenv('RANK', -1)) -WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) -GIT_INFO = check_git_info() - - -def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary - save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = \ - Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ - opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze - callbacks.run('on_pretrain_routine_start') - - # Directories - w = save_dir / 'weights' # weights dir - (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir - last, best = w / 'last.pt', w / 'best.pt' - - # Hyperparameters - if isinstance(hyp, str): - with open(hyp, errors='ignore') as f: - hyp = yaml.safe_load(f) # load hyps dict - LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) - opt.hyp = hyp.copy() # for saving hyps to checkpoints - - # Save run settings - if not evolve: - yaml_save(save_dir / 'hyp.yaml', hyp) - yaml_save(save_dir / 'opt.yaml', vars(opt)) - - # Loggers - data_dict = None - if RANK in {-1, 0}: - loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance - - # Register actions - for k in methods(loggers): - callbacks.register_action(k, callback=getattr(loggers, k)) - - # Process custom dataset artifact link - data_dict = loggers.remote_dataset - if resume: # If resuming runs from remote artifact - weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size - - # Config - plots = not evolve and not opt.noplots # create plots - cuda = device.type != 'cpu' - init_seeds(opt.seed + 1 + RANK, deterministic=True) - with torch_distributed_zero_first(LOCAL_RANK): - data_dict = data_dict or check_dataset(data) # check if None - train_path, val_path = data_dict['train'], data_dict['val'] - nc = 1 if single_cls else int(data_dict['nc']) # number of classes - names = {0: 'item'} if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names - is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset - - # Model - check_suffix(weights, '.pt') # check weights - pretrained = weights.endswith('.pt') - if pretrained: - with torch_distributed_zero_first(LOCAL_RANK): - weights = attempt_download(weights) # download if not found locally - ckpt = torch.load(weights, map_location='cpu') # load checkpoint to CPU to avoid CUDA memory leak - model = Model(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create - exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys - csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 - csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect - model.load_state_dict(csd, strict=False) # load - LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report - else: - model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create - amp = check_amp(model) # check AMP - - # Freeze - freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze - for k, v in model.named_parameters(): - v.requires_grad = True # train all layers - # v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results) - if any(x in k for x in freeze): - LOGGER.info(f'freezing {k}') - v.requires_grad = False - - # Image size - gs = max(int(model.stride.max()), 32) # grid size (max stride) - imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple - - # Batch size - if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size - batch_size = check_train_batch_size(model, imgsz, amp) - loggers.on_params_update({"batch_size": batch_size}) - - # Optimizer - nbs = 64 # nominal batch size - accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing - hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay - optimizer = smart_optimizer(model, opt.optimizer, hyp['lr0'], hyp['momentum'], hyp['weight_decay']) - - # Scheduler - if opt.cos_lr: - lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] - else: - lf = lambda x: (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear - scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) - - # EMA - ema = ModelEMA(model) if RANK in {-1, 0} else None - - # Resume - best_fitness, start_epoch = 0.0, 0 - if pretrained: - if resume: - best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume) - del ckpt, csd - - # DP mode - if cuda and RANK == -1 and torch.cuda.device_count() > 1: - LOGGER.warning('WARNING โš ๏ธ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' - 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') - model = torch.nn.DataParallel(model) - - # SyncBatchNorm - if opt.sync_bn and cuda and RANK != -1: - model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) - LOGGER.info('Using SyncBatchNorm()') - - # Trainloader - train_loader, dataset = create_dataloader(train_path, - imgsz, - batch_size // WORLD_SIZE, - gs, - single_cls, - hyp=hyp, - augment=True, - cache=None if opt.cache == 'val' else opt.cache, - rect=opt.rect, - rank=LOCAL_RANK, - workers=workers, - image_weights=opt.image_weights, - quad=opt.quad, - prefix=colorstr('train: '), - shuffle=True, - seed=opt.seed) - labels = np.concatenate(dataset.labels, 0) - mlc = int(labels[:, 0].max()) # max label class - assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' - - # Process 0 - if RANK in {-1, 0}: - val_loader = create_dataloader(val_path, - imgsz, - batch_size // WORLD_SIZE * 2, - gs, - single_cls, - hyp=hyp, - cache=None if noval else opt.cache, - rect=True, - rank=-1, - workers=workers * 2, - pad=0.5, - prefix=colorstr('val: '))[0] - - if not resume: - if not opt.noautoanchor: - check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) # run AutoAnchor - model.half().float() # pre-reduce anchor precision - - callbacks.run('on_pretrain_routine_end', labels, names) - - # DDP mode - if cuda and RANK != -1: - model = smart_DDP(model) - - # Model attributes - nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps) - hyp['box'] *= 3 / nl # scale to layers - hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers - hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers - hyp['label_smoothing'] = opt.label_smoothing - model.nc = nc # attach number of classes to model - model.hyp = hyp # attach hyperparameters to model - model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights - model.names = names - - # Start training - t0 = time.time() - nb = len(train_loader) # number of batches - nw = max(round(hyp['warmup_epochs'] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations) - # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training - last_opt_step = -1 - maps = np.zeros(nc) # mAP per class - results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) - scheduler.last_epoch = start_epoch - 1 # do not move - scaler = torch.cuda.amp.GradScaler(enabled=amp) - stopper, stop = EarlyStopping(patience=opt.patience), False - compute_loss = ComputeLoss(model) # init loss class - callbacks.run('on_train_start') - LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n' - f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n' - f"Logging results to {colorstr('bold', save_dir)}\n" - f'Starting training for {epochs} epochs...') - for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ - callbacks.run('on_train_epoch_start') - model.train() - - # Update image weights (optional, single-GPU only) - if opt.image_weights: - cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights - iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights - dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx - - # Update mosaic border (optional) - # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) - # dataset.mosaic_border = [b - imgsz, -b] # height, width borders - - mloss = torch.zeros(3, device=device) # mean losses - if RANK != -1: - train_loader.sampler.set_epoch(epoch) - pbar = enumerate(train_loader) - LOGGER.info(('\n' + '%11s' * 7) % ('Epoch', 'GPU_mem', 'box_loss', 'obj_loss', 'cls_loss', 'Instances', 'Size')) - if RANK in {-1, 0}: - pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT) # progress bar - optimizer.zero_grad() - for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- - callbacks.run('on_train_batch_start') - ni = i + nb * epoch # number integrated batches (since train start) - imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0 - - # Warmup - if ni <= nw: - xi = [0, nw] # x interp - # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) - accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round()) - for j, x in enumerate(optimizer.param_groups): - # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 - x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 0 else 0.0, x['initial_lr'] * lf(epoch)]) - if 'momentum' in x: - x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']]) - - # Multi-scale - if opt.multi_scale: - sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size - sf = sz / max(imgs.shape[2:]) # scale factor - if sf != 1: - ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) - imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) - - # Forward - with torch.cuda.amp.autocast(amp): - pred = model(imgs) # forward - loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size - if RANK != -1: - loss *= WORLD_SIZE # gradient averaged between devices in DDP mode - if opt.quad: - loss *= 4. - - # Backward - scaler.scale(loss).backward() - - # Optimize - https://pytorch.org/docs/master/notes/amp_examples.html - if ni - last_opt_step >= accumulate: - scaler.unscale_(optimizer) # unscale gradients - torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients - scaler.step(optimizer) # optimizer.step - scaler.update() - optimizer.zero_grad() - if ema: - ema.update(model) - last_opt_step = ni - - # Log - if RANK in {-1, 0}: - mloss = (mloss * i + loss_items) / (i + 1) # update mean losses - mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) - pbar.set_description(('%11s' * 2 + '%11.4g' * 5) % - (f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) - callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths, list(mloss)) - if callbacks.stop_training: - return - # end batch ------------------------------------------------------------------------------------------------ - - # Scheduler - lr = [x['lr'] for x in optimizer.param_groups] # for loggers - scheduler.step() - - if RANK in {-1, 0}: - # mAP - callbacks.run('on_train_epoch_end', epoch=epoch) - ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights']) - final_epoch = (epoch + 1 == epochs) or stopper.possible_stop - if not noval or final_epoch: # Calculate mAP - results, maps, _ = validate.run(data_dict, - batch_size=batch_size // WORLD_SIZE * 2, - imgsz=imgsz, - half=amp, - model=ema.ema, - single_cls=single_cls, - dataloader=val_loader, - save_dir=save_dir, - plots=False, - callbacks=callbacks, - compute_loss=compute_loss) - - # Update best mAP - fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] - stop = stopper(epoch=epoch, fitness=fi) # early stop check - if fi > best_fitness: - best_fitness = fi - log_vals = list(mloss) + list(results) + lr - callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi) - - # Save model - if (not nosave) or (final_epoch and not evolve): # if save - ckpt = { - 'epoch': epoch, - 'best_fitness': best_fitness, - 'model': deepcopy(de_parallel(model)).half(), - 'ema': deepcopy(ema.ema).half(), - 'updates': ema.updates, - 'optimizer': optimizer.state_dict(), - 'opt': vars(opt), - 'git': GIT_INFO, # {remote, branch, commit} if a git repo - 'date': datetime.now().isoformat()} - - # Save last, best and delete - torch.save(ckpt, last) - if best_fitness == fi: - torch.save(ckpt, best) - if opt.save_period > 0 and epoch % opt.save_period == 0: - torch.save(ckpt, w / f'epoch{epoch}.pt') - del ckpt - callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi) - - # EarlyStopping - if RANK != -1: # if DDP training - broadcast_list = [stop if RANK == 0 else None] - dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks - if RANK != 0: - stop = broadcast_list[0] - if stop: - break # must break all DDP ranks - - # end epoch ---------------------------------------------------------------------------------------------------- - # end training ----------------------------------------------------------------------------------------------------- - if RANK in {-1, 0}: - LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.') - for f in last, best: - if f.exists(): - strip_optimizer(f) # strip optimizers - if f is best: - LOGGER.info(f'\nValidating {f}...') - results, _, _ = validate.run( - data_dict, - batch_size=batch_size // WORLD_SIZE * 2, - imgsz=imgsz, - model=attempt_load(f, device).half(), - iou_thres=0.65 if is_coco else 0.60, # best pycocotools at iou 0.65 - single_cls=single_cls, - dataloader=val_loader, - save_dir=save_dir, - save_json=is_coco, - verbose=True, - plots=plots, - callbacks=callbacks, - compute_loss=compute_loss) # val best model with plots - if is_coco: - callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi) - - callbacks.run('on_train_end', last, best, epoch, results) - - torch.cuda.empty_cache() - return results - - -def parse_opt(known=False): - parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='initial weights path') - parser.add_argument('--cfg', type=str, default='', help='model.yaml path') - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') - parser.add_argument('--epochs', type=int, default=100, help='total training epochs') - parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') - parser.add_argument('--rect', action='store_true', help='rectangular training') - parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') - parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') - parser.add_argument('--noval', action='store_true', help='only validate final epoch') - parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') - parser.add_argument('--noplots', action='store_true', help='save no plot files') - parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') - parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') - parser.add_argument('--cache', type=str, nargs='?', const='ram', help='image --cache ram/disk') - parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') - parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') - parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer') - parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') - parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') - parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name') - parser.add_argument('--name', default='exp', help='save to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--quad', action='store_true', help='quad dataloader') - parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler') - parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') - parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') - parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') - parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') - parser.add_argument('--seed', type=int, default=0, help='Global training seed') - parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') - - # Logger arguments - parser.add_argument('--entity', default=None, help='Entity') - parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='Upload data, "val" option') - parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval') - parser.add_argument('--artifact_alias', type=str, default='latest', help='Version of dataset artifact to use') - - return parser.parse_known_args()[0] if known else parser.parse_args() - - -def main(opt, callbacks=Callbacks()): - # Checks - if RANK in {-1, 0}: - print_args(vars(opt)) - check_git_status() - check_requirements() - - # Resume (from specified or most recent last.pt) - if opt.resume and not check_comet_resume(opt) and not opt.evolve: - last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run()) - opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml - opt_data = opt.data # original dataset - if opt_yaml.is_file(): - with open(opt_yaml, errors='ignore') as f: - d = yaml.safe_load(f) - else: - d = torch.load(last, map_location='cpu')['opt'] - opt = argparse.Namespace(**d) # replace - opt.cfg, opt.weights, opt.resume = '', str(last), True # reinstate - if is_url(opt_data): - opt.data = check_file(opt_data) # avoid HUB resume auth timeout - else: - opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \ - check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks - assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' - if opt.evolve: - if opt.project == str(ROOT / 'runs/train'): # if default project name, rename to runs/evolve - opt.project = str(ROOT / 'runs/evolve') - opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume - if opt.name == 'cfg': - opt.name = Path(opt.cfg).stem # use model.yaml as name - opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) - - # DDP mode - device = select_device(opt.device, batch_size=opt.batch_size) - if LOCAL_RANK != -1: - msg = 'is not compatible with YOLOv5 Multi-GPU DDP training' - assert not opt.image_weights, f'--image-weights {msg}' - assert not opt.evolve, f'--evolve {msg}' - assert opt.batch_size != -1, f'AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size' - assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE' - assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' - torch.cuda.set_device(LOCAL_RANK) - device = torch.device('cuda', LOCAL_RANK) - dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") - - # Train - if not opt.evolve: - train(opt.hyp, opt, device, callbacks) - - # Evolve hyperparameters (optional) - else: - # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit) - meta = { - 'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3) - 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) - 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1 - 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay - 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok) - 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum - 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr - 'box': (1, 0.02, 0.2), # box loss gain - 'cls': (1, 0.2, 4.0), # cls loss gain - 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight - 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels) - 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight - 'iou_t': (0, 0.1, 0.7), # IoU training threshold - 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold - 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore) - 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5) - 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction) - 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction) - 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction) - 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg) - 'translate': (1, 0.0, 0.9), # image translation (+/- fraction) - 'scale': (1, 0.0, 0.9), # image scale (+/- gain) - 'shear': (1, 0.0, 10.0), # image shear (+/- deg) - 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 - 'flipud': (1, 0.0, 1.0), # image flip up-down (probability) - 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability) - 'mosaic': (1, 0.0, 1.0), # image mixup (probability) - 'mixup': (1, 0.0, 1.0), # image mixup (probability) - 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability) - - with open(opt.hyp, errors='ignore') as f: - hyp = yaml.safe_load(f) # load hyps dict - if 'anchors' not in hyp: # anchors commented in hyp.yaml - hyp['anchors'] = 3 - if opt.noautoanchor: - del hyp['anchors'], meta['anchors'] - opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch - # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices - evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv' - if opt.bucket: - os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}') # download evolve.csv if exists - - for _ in range(opt.evolve): # generations to evolve - if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate - # Select parent(s) - parent = 'single' # parent selection method: 'single' or 'weighted' - x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1) - n = min(5, len(x)) # number of previous results to consider - x = x[np.argsort(-fitness(x))][:n] # top n mutations - w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0) - if parent == 'single' or len(x) == 1: - # x = x[random.randint(0, n - 1)] # random selection - x = x[random.choices(range(n), weights=w)[0]] # weighted selection - elif parent == 'weighted': - x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination - - # Mutate - mp, s = 0.8, 0.2 # mutation probability, sigma - npr = np.random - npr.seed(int(time.time())) - g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1 - ng = len(meta) - v = np.ones(ng) - while all(v == 1): # mutate until a change occurs (prevent duplicates) - v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0) - for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300) - hyp[k] = float(x[i + 7] * v[i]) # mutate - - # Constrain to limits - for k, v in meta.items(): - hyp[k] = max(hyp[k], v[1]) # lower limit - hyp[k] = min(hyp[k], v[2]) # upper limit - hyp[k] = round(hyp[k], 5) # significant digits - - # Train mutation - results = train(hyp.copy(), opt, device, callbacks) - callbacks = Callbacks() - # Write mutation results - keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', 'val/box_loss', - 'val/obj_loss', 'val/cls_loss') - print_mutation(keys, results, hyp.copy(), save_dir, opt.bucket) - - # Plot results - plot_evolve(evolve_csv) - LOGGER.info(f'Hyperparameter evolution finished {opt.evolve} generations\n' - f"Results saved to {colorstr('bold', save_dir)}\n" - f'Usage example: $ python train.py --hyp {evolve_yaml}') - - -def run(**kwargs): - # Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt') - opt = parse_opt(True) - for k, v in kwargs.items(): - setattr(opt, k, v) - main(opt) - return opt - - -if __name__ == "__main__": - opt = parse_opt() - main(opt) diff --git a/tutorial.ipynb b/tutorial.ipynb deleted file mode 100644 index c320d699a940..000000000000 --- a/tutorial.ipynb +++ /dev/null @@ -1,976 +0,0 @@ -{ - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "name": "YOLOv5 Tutorial", - "provenance": [], - "toc_visible": true - }, - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - }, - "accelerator": "GPU", - "widgets": { - "application/vnd.jupyter.widget-state+json": { - "1f7df330663048998adcf8a45bc8f69b": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HBoxModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HBoxModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HBoxView", - "box_style": "", - "children": [ - "IPY_MODEL_e896e6096dd244c59d7955e2035cd729", - "IPY_MODEL_a6ff238c29984b24bf6d0bd175c19430", - "IPY_MODEL_3c085ba3f3fd4c3c8a6bb41b41ce1479" - ], - "layout": "IPY_MODEL_16b0c8aa6e0f427e8a54d3791abb7504" - } - }, - "e896e6096dd244c59d7955e2035cd729": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_c7b2dd0f78384cad8e400b282996cdf5", - "placeholder": "โ€‹", - "style": "IPY_MODEL_6a27e43b0e434edd82ee63f0a91036ca", - "value": "100%" - } - }, - "a6ff238c29984b24bf6d0bd175c19430": { - "model_module": "@jupyter-widgets/controls", - "model_name": "FloatProgressModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "FloatProgressModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "ProgressView", - "bar_style": "success", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_cce0e6c0c4ec442cb47e65c674e02e92", - "max": 818322941, - "min": 0, - "orientation": "horizontal", - "style": "IPY_MODEL_c5b9f38e2f0d4f9aa97fe87265263743", - "value": 818322941 - } - }, - "3c085ba3f3fd4c3c8a6bb41b41ce1479": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_df554fb955c7454696beac5a82889386", - "placeholder": "โ€‹", - "style": "IPY_MODEL_74e9112a87a242f4831b7d68c7da6333", - "value": " 780M/780M [00:05<00:00, 126MB/s]" - } - }, - "16b0c8aa6e0f427e8a54d3791abb7504": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "c7b2dd0f78384cad8e400b282996cdf5": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "6a27e43b0e434edd82ee63f0a91036ca": { - "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "model_module_version": "1.5.0", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - }, - "cce0e6c0c4ec442cb47e65c674e02e92": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "c5b9f38e2f0d4f9aa97fe87265263743": { - "model_module": "@jupyter-widgets/controls", - "model_name": "ProgressStyleModel", - "model_module_version": "1.5.0", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "ProgressStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "bar_color": null, - "description_width": "" - } - }, - "df554fb955c7454696beac5a82889386": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "74e9112a87a242f4831b7d68c7da6333": { - "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "model_module_version": "1.5.0", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - } - } - } - }, - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "t6MPjfT5NrKQ" - }, - "source": [ - "
\n", - "\n", - " \n", - " \n", - "\n", - "\n", - "
\n", - " \"Run\n", - " \"Open\n", - " \"Open\n", - "
\n", - "\n", - "This YOLOv5 ๐Ÿš€ notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See GitHub for community support or contact us for professional support.\n", - "\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "7mGmQbAO5pQb" - }, - "source": [ - "# Setup\n", - "\n", - "Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "wbvMlHd_QwMG", - "colab": { - "base_uri": "https://localhost:8080/" - }, - "outputId": "f9f016ad-3dcf-4bd2-e1c3-d5b79efc6f32" - }, - "source": [ - "!git clone https://github.com/ultralytics/yolov5 # clone\n", - "%cd yolov5\n", - "%pip install -qr requirements.txt # install\n", - "\n", - "import torch\n", - "import utils\n", - "display = utils.notebook_init() # checks" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stderr", - "text": [ - "YOLOv5 ๐Ÿš€ v7.0-1-gb32f67f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" - ] - }, - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Setup complete โœ… (2 CPUs, 12.7 GB RAM, 22.6/78.2 GB disk)\n" - ] - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "4JnkELT0cIJg" - }, - "source": [ - "# 1. Detect\n", - "\n", - "`detect.py` runs YOLOv5 inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/detect`. Example inference sources are:\n", - "\n", - "```shell\n", - "python detect.py --source 0 # webcam\n", - " img.jpg # image \n", - " vid.mp4 # video\n", - " screen # screenshot\n", - " path/ # directory\n", - " 'path/*.jpg' # glob\n", - " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", - " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", - "```" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "zR9ZbuQCH7FX", - "colab": { - "base_uri": "https://localhost:8080/" - }, - "outputId": "b4db5c49-f501-4505-cf0d-a1d35236c485" - }, - "source": [ - "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", - "# display.Image(filename='runs/detect/exp/zidane.jpg', width=600)" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1\n", - "YOLOv5 ๐Ÿš€ v7.0-1-gb32f67f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", - "\n", - "Downloading https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s.pt to yolov5s.pt...\n", - "100% 14.1M/14.1M [00:00<00:00, 116MB/s] \n", - "\n", - "Fusing layers... \n", - "YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 17.0ms\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 14.3ms\n", - "Speed: 0.5ms pre-process, 15.7ms inference, 18.6ms NMS per image at shape (1, 3, 640, 640)\n", - "Results saved to \u001b[1mruns/detect/exp\u001b[0m\n" - ] - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "hkAzDWJ7cWTr" - }, - "source": [ - "        \n", - "" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "0eq1SMWl6Sfn" - }, - "source": [ - "# 2. Validate\n", - "Validate a model's accuracy on the [COCO](https://cocodataset.org/#home) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "WQPtK1QYVaD_", - "colab": { - "base_uri": "https://localhost:8080/", - "height": 49, - "referenced_widgets": [ - "1f7df330663048998adcf8a45bc8f69b", - "e896e6096dd244c59d7955e2035cd729", - "a6ff238c29984b24bf6d0bd175c19430", - "3c085ba3f3fd4c3c8a6bb41b41ce1479", - "16b0c8aa6e0f427e8a54d3791abb7504", - "c7b2dd0f78384cad8e400b282996cdf5", - "6a27e43b0e434edd82ee63f0a91036ca", - "cce0e6c0c4ec442cb47e65c674e02e92", - "c5b9f38e2f0d4f9aa97fe87265263743", - "df554fb955c7454696beac5a82889386", - "74e9112a87a242f4831b7d68c7da6333" - ] - }, - "outputId": "c7d0a0d2-abfb-44c3-d60d-f99d0e7aabad" - }, - "source": [ - "# Download COCO val\n", - "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip') # download (780M - 5000 images)\n", - "!unzip -q tmp.zip -d ../datasets && rm tmp.zip # unzip" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "display_data", - "data": { - "text/plain": [ - " 0%| | 0.00/780M [00:00

\n", - "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", - "

\n", - "\n", - "Train a YOLOv5s model on the [COCO128](https://www.kaggle.com/ultralytics/coco128) dataset with `--data coco128.yaml`, starting from pretrained `--weights yolov5s.pt`, or from randomly initialized `--weights '' --cfg yolov5s.yaml`.\n", - "\n", - "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n", - "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", - "- **[Datasets](https://github.com/ultralytics/yolov5/tree/master/data)** available for autodownload include: [COCO](https://github.com/ultralytics/yolov5/blob/master/data/coco.yaml), [COCO128](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml), [Argoverse](https://github.com/ultralytics/yolov5/blob/master/data/Argoverse.yaml), [VisDrone](https://github.com/ultralytics/yolov5/blob/master/data/VisDrone.yaml), [GlobalWheat](https://github.com/ultralytics/yolov5/blob/master/data/GlobalWheat2020.yaml), [xView](https://github.com/ultralytics/yolov5/blob/master/data/xView.yaml), [Objects365](https://github.com/ultralytics/yolov5/blob/master/data/Objects365.yaml), [SKU-110K](https://github.com/ultralytics/yolov5/blob/master/data/SKU-110K.yaml).\n", - "- **Training Results** are saved to `runs/train/` with incrementing run directories, i.e. `runs/train/exp2`, `runs/train/exp3` etc.\n", - "

\n", - "\n", - "A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n", - "\n", - "## Train on Custom Data with Roboflow ๐ŸŒŸ NEW\n", - "\n", - "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n", - "\n", - "- Custom Training Example: [https://blog.roboflow.com/how-to-train-yolov5-on-a-custom-dataset/](https://blog.roboflow.com/how-to-train-yolov5-on-a-custom-dataset/?ref=ultralytics)\n", - "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/roboflow-ai/yolov5-custom-training-tutorial/blob/main/yolov5-custom-training.ipynb)\n", - "
\n", - "\n", - "

Label images lightning fast (including with model-assisted labeling)" - ] - }, - { - "cell_type": "code", - "source": [ - "#@title Select YOLOv5 ๐Ÿš€ logger {run: 'auto'}\n", - "logger = 'ClearML' #@param ['ClearML', 'Comet', 'TensorBoard']\n", - "\n", - "if logger == 'ClearML':\n", - " %pip install -q clearml\n", - " import clearml; clearml.browser_login()\n", - "elif logger == 'Comet':\n", - " %pip install -q comet_ml\n", - " import comet_ml; comet_ml.init()\n", - "elif logger == 'TensorBoard':\n", - " %load_ext tensorboard\n", - " %tensorboard --logdir runs/train" - ], - "metadata": { - "id": "i3oKtE4g-aNn" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "1NcFxRcFdJ_O", - "colab": { - "base_uri": "https://localhost:8080/" - }, - "outputId": "721b9028-767f-4a05-c964-692c245f7398" - }, - "source": [ - "# Train YOLOv5s on COCO128 for 3 epochs\n", - "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "\u001b[34m\u001b[1mtrain: \u001b[0mweights=yolov5s.pt, cfg=, data=coco128.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\n", - "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 โœ…\n", - "YOLOv5 ๐Ÿš€ v7.0-1-gb32f67f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", - "\n", - "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", - "\u001b[34m\u001b[1mClearML: \u001b[0mrun 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 ๐Ÿš€ in ClearML\n", - "\u001b[34m\u001b[1mComet: \u001b[0mrun 'pip install comet_ml' to automatically track and visualize YOLOv5 ๐Ÿš€ runs in Comet\n", - "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n", - "\n", - "Dataset not found โš ๏ธ, missing paths ['/content/datasets/coco128/images/train2017']\n", - "Downloading https://ultralytics.com/assets/coco128.zip to coco128.zip...\n", - "100% 6.66M/6.66M [00:00<00:00, 261MB/s]\n", - "Dataset download success โœ… (0.3s), saved to \u001b[1m/content/datasets\u001b[0m\n", - "\n", - " from n params module arguments \n", - " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", - " 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n", - " 2 -1 1 18816 models.common.C3 [64, 64, 1] \n", - " 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n", - " 4 -1 2 115712 models.common.C3 [128, 128, 2] \n", - " 5 -1 1 295424 models.common.Conv [128, 256, 3, 2] \n", - " 6 -1 3 625152 models.common.C3 [256, 256, 3] \n", - " 7 -1 1 1180672 models.common.Conv [256, 512, 3, 2] \n", - " 8 -1 1 1182720 models.common.C3 [512, 512, 1] \n", - " 9 -1 1 656896 models.common.SPPF [512, 512, 5] \n", - " 10 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n", - " 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", - " 12 [-1, 6] 1 0 models.common.Concat [1] \n", - " 13 -1 1 361984 models.common.C3 [512, 256, 1, False] \n", - " 14 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n", - " 15 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", - " 16 [-1, 4] 1 0 models.common.Concat [1] \n", - " 17 -1 1 90880 models.common.C3 [256, 128, 1, False] \n", - " 18 -1 1 147712 models.common.Conv [128, 128, 3, 2] \n", - " 19 [-1, 14] 1 0 models.common.Concat [1] \n", - " 20 -1 1 296448 models.common.C3 [256, 256, 1, False] \n", - " 21 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n", - " 22 [-1, 10] 1 0 models.common.Concat [1] \n", - " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", - " 24 [17, 20, 23] 1 229245 models.yolo.Detect [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n", - "Model summary: 214 layers, 7235389 parameters, 7235389 gradients, 16.6 GFLOPs\n", - "\n", - "Transferred 349/349 items from yolov5s.pt\n", - "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed โœ…\n", - "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n", - "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 1911.57it/s]\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 229.69it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco128/labels/train2017.cache... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00 # 2. paste API key\n", - "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", - "```\n", - "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n", - "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", - "\n", - "\n", - "\"Comet" - ], - "metadata": { - "id": "nWOsI5wJR1o3" - } - }, - { - "cell_type": "markdown", - "source": [ - "## ClearML Logging and Automation ๐ŸŒŸ NEW\n", - "\n", - "[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\n", - "\n", - "- `pip install clearml`\n", - "- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\n", - "\n", - "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n", - "\n", - "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) for details!\n", - "\n", - "\n", - "\"ClearML" - ], - "metadata": { - "id": "Lay2WsTjNJzP" - } - }, - { - "cell_type": "markdown", - "metadata": { - "id": "-WPvRbS5Swl6" - }, - "source": [ - "## Local Logging\n", - "\n", - "Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n", - "\n", - "This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n", - "\n", - "\"Local\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Zelyeqbyt3GD" - }, - "source": [ - "# Environments\n", - "\n", - "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", - "\n", - "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", - "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", - "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", - "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "6Qu7Iesl0p54" - }, - "source": [ - "# Status\n", - "\n", - "![YOLOv5 CI](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg)\n", - "\n", - "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "IEijrePND_2I" - }, - "source": [ - "# Appendix\n", - "\n", - "Additional content below." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "GMusP4OAxFu6" - }, - "source": [ - "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n", - "import torch\n", - "\n", - "model = torch.hub.load('ultralytics/yolov5', 'yolov5s', force_reload=True) # yolov5n - yolov5x6 or custom\n", - "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n", - "results = model(im) # inference\n", - "results.print() # or .show(), .save(), .crop(), .pandas(), etc." - ], - "execution_count": null, - "outputs": [] - } - ] -} \ No newline at end of file diff --git a/utils/__init__.py b/utils/__init__.py index 7bf3efe6b8c7..ca58fb1eaa1f 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -54,8 +54,8 @@ def notebook_init(verbose=True): import os import shutil - from utils.general import check_font, check_requirements, is_colab - from utils.torch_utils import select_device # imports + from .general import check_font, check_requirements, is_colab + from .torch_utils import select_device # imports check_font() diff --git a/utils/augmentations.py b/utils/augmentations.py index 1eae5db8f816..c60bb6dd4145 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -12,8 +12,8 @@ import torchvision.transforms as T import torchvision.transforms.functional as TF -from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box, xywhn2xyxy -from utils.metrics import bbox_ioa +from .general import LOGGER, check_version, colorstr, resample_segments, segment2box, xywhn2xyxy +from .metrics import bbox_ioa IMAGENET_MEAN = 0.485, 0.456, 0.406 # RGB mean IMAGENET_STD = 0.229, 0.224, 0.225 # RGB standard deviation diff --git a/utils/autoanchor.py b/utils/autoanchor.py index bb5cf6e6965e..fabad6fbe1c6 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -10,8 +10,8 @@ import yaml from tqdm import tqdm -from utils import TryExcept -from utils.general import LOGGER, TQDM_BAR_FORMAT, colorstr +from . import TryExcept +from .general import LOGGER, TQDM_BAR_FORMAT, colorstr PREFIX = colorstr('AutoAnchor: ') @@ -112,7 +112,7 @@ def print_results(k, verbose=True): if isinstance(dataset, str): # *.yaml file with open(dataset, errors='ignore') as f: data_dict = yaml.safe_load(f) # model dict - from utils.dataloaders import LoadImagesAndLabels + from .dataloaders import LoadImagesAndLabels dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) # Get label wh diff --git a/utils/autobatch.py b/utils/autobatch.py index bdeb91c3d2bd..64d99b5c5873 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -8,8 +8,8 @@ import numpy as np import torch -from utils.general import LOGGER, colorstr -from utils.torch_utils import profile +from .general import LOGGER, colorstr +from .torch_utils import profile def check_train_batch_size(model, imgsz=640, amp=True): diff --git a/utils/aws/mime.sh b/utils/aws/mime.sh deleted file mode 100644 index c319a83cfbdf..000000000000 --- a/utils/aws/mime.sh +++ /dev/null @@ -1,26 +0,0 @@ -# AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/ -# This script will run on every instance restart, not only on first start -# --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA --- - -Content-Type: multipart/mixed; boundary="//" -MIME-Version: 1.0 - ---// -Content-Type: text/cloud-config; charset="us-ascii" -MIME-Version: 1.0 -Content-Transfer-Encoding: 7bit -Content-Disposition: attachment; filename="cloud-config.txt" - -#cloud-config -cloud_final_modules: -- [scripts-user, always] - ---// -Content-Type: text/x-shellscript; charset="us-ascii" -MIME-Version: 1.0 -Content-Transfer-Encoding: 7bit -Content-Disposition: attachment; filename="userdata.txt" - -#!/bin/bash -# --- paste contents of userdata.sh here --- ---// diff --git a/utils/aws/resume.py b/utils/aws/resume.py deleted file mode 100644 index b21731c979a1..000000000000 --- a/utils/aws/resume.py +++ /dev/null @@ -1,40 +0,0 @@ -# Resume all interrupted trainings in yolov5/ dir including DDP trainings -# Usage: $ python utils/aws/resume.py - -import os -import sys -from pathlib import Path - -import torch -import yaml - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[2] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH - -port = 0 # --master_port -path = Path('').resolve() -for last in path.rglob('*/**/last.pt'): - ckpt = torch.load(last) - if ckpt['optimizer'] is None: - continue - - # Load opt.yaml - with open(last.parent.parent / 'opt.yaml', errors='ignore') as f: - opt = yaml.safe_load(f) - - # Get device count - d = opt['device'].split(',') # devices - nd = len(d) # number of devices - ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel - - if ddp: # multi-GPU - port += 1 - cmd = f'python -m torch.distributed.run --nproc_per_node {nd} --master_port {port} train.py --resume {last}' - else: # single-GPU - cmd = f'python train.py --resume {last}' - - cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread - print(cmd) - os.system(cmd) diff --git a/utils/aws/userdata.sh b/utils/aws/userdata.sh deleted file mode 100644 index 5fc1332ac1b0..000000000000 --- a/utils/aws/userdata.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash -# AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html -# This script will run only once on first instance start (for a re-start script see mime.sh) -# /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir -# Use >300 GB SSD - -cd home/ubuntu -if [ ! -d yolov5 ]; then - echo "Running first-time script." # install dependencies, download COCO, pull Docker - git clone https://github.com/ultralytics/yolov5 -b master && sudo chmod -R 777 yolov5 - cd yolov5 - bash data/scripts/get_coco.sh && echo "COCO done." & - sudo docker pull ultralytics/yolov5:latest && echo "Docker done." & - python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." & - wait && echo "All tasks done." # finish background tasks -else - echo "Running re-start script." # resume interrupted runs - i=0 - list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour' - while IFS= read -r id; do - ((i++)) - echo "restarting container $i: $id" - sudo docker start $id - # sudo docker exec -it $id python train.py --resume # single-GPU - sudo docker exec -d $id python utils/aws/resume.py # multi-scenario - done <<<"$list" -fi diff --git a/utils/dataloaders.py b/utils/dataloaders.py index cbb3114e94d8..2ff5ae75cedc 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -28,12 +28,12 @@ from torch.utils.data import DataLoader, Dataset, dataloader, distributed from tqdm import tqdm -from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste, +from .augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste, letterbox, mixup, random_perspective) -from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, check_dataset, check_requirements, +from .general import (DATASETS_DIR, LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, check_dataset, check_requirements, check_yaml, clean_str, cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn) -from utils.torch_utils import torch_distributed_zero_first +from .torch_utils import torch_distributed_zero_first # Parameters HELP_URL = 'See https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile deleted file mode 100644 index c8b88357cb6d..000000000000 --- a/utils/docker/Dockerfile +++ /dev/null @@ -1,66 +0,0 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license -# Builds ultralytics/yolov5:latest image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 -# Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference - -# Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:22.12-py3 -RUN rm -rf /opt/pytorch # remove 1.2GB dir - -# Downloads to user config dir -ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ - -# Install linux packages -RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1-mesa-glx - -# Install pip packages (uninstall torch nightly in favor of stable) -COPY requirements.txt . -RUN python -m pip install --upgrade pip wheel -RUN pip uninstall -y Pillow torchtext torch torchvision -RUN pip install --no-cache -U pycocotools # install --upgrade -RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook 'opencv-python<4.6.0.66' \ - Pillow>=9.1.0 ultralytics \ - --extra-index-url https://download.pytorch.org/whl/cu113 - -# Create working directory -RUN mkdir -p /usr/src/app -WORKDIR /usr/src/app - -# Copy contents -# COPY . /usr/src/app (issues as not a .git directory) -RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app - -# Set environment variables -ENV OMP_NUM_THREADS=1 - - -# Usage Examples ------------------------------------------------------------------------------------------------------- - -# Build and Push -# t=ultralytics/yolov5:latest && sudo docker build -f utils/docker/Dockerfile -t $t . && sudo docker push $t - -# Pull and Run -# t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t - -# Pull and Run with local directory access -# t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/datasets:/usr/src/datasets $t - -# Kill all -# sudo docker kill $(sudo docker ps -q) - -# Kill all image-based -# sudo docker kill $(sudo docker ps -qa --filter ancestor=ultralytics/yolov5:latest) - -# DockerHub tag update -# t=ultralytics/yolov5:latest tnew=ultralytics/yolov5:v6.2 && sudo docker pull $t && sudo docker tag $t $tnew && sudo docker push $tnew - -# Clean up -# sudo docker system prune -a --volumes - -# Update Ubuntu drivers -# https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/ - -# DDP test -# python -m torch.distributed.run --nproc_per_node 2 --master_port 1 train.py --epochs 3 - -# GCP VM from Image -# docker.io/ultralytics/yolov5:latest diff --git a/utils/docker/Dockerfile-arm64 b/utils/docker/Dockerfile-arm64 deleted file mode 100644 index eed1410793a1..000000000000 --- a/utils/docker/Dockerfile-arm64 +++ /dev/null @@ -1,43 +0,0 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license -# Builds ultralytics/yolov5:latest-arm64 image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 -# Image is aarch64-compatible for Apple M1 and other ARM architectures i.e. Jetson Nano and Raspberry Pi - -# Start FROM Ubuntu image https://hub.docker.com/_/ubuntu -FROM arm64v8/ubuntu:20.04 - -# Downloads to user config dir -ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ - -# Install linux packages -ENV DEBIAN_FRONTEND noninteractive -RUN apt update -RUN TZ=Etc/UTC apt install -y tzdata -RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc libgl1-mesa-glx libglib2.0-0 libpython3-dev -# RUN alias python=python3 - -# Install pip packages -COPY requirements.txt . -RUN python3 -m pip install --upgrade pip wheel -RUN pip install --no-cache -r requirements.txt ultralytics gsutil notebook \ - tensorflow-aarch64 - # tensorflowjs \ - # onnx onnx-simplifier onnxruntime \ - # coremltools openvino-dev \ - -# Create working directory -RUN mkdir -p /usr/src/app -WORKDIR /usr/src/app - -# Copy contents -# COPY . /usr/src/app (issues as not a .git directory) -RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app -ENV DEBIAN_FRONTEND teletype - - -# Usage Examples ------------------------------------------------------------------------------------------------------- - -# Build and Push -# t=ultralytics/yolov5:latest-arm64 && sudo docker build --platform linux/arm64 -f utils/docker/Dockerfile-arm64 -t $t . && sudo docker push $t - -# Pull and Run -# t=ultralytics/yolov5:latest-arm64 && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu deleted file mode 100644 index 558f81f00584..000000000000 --- a/utils/docker/Dockerfile-cpu +++ /dev/null @@ -1,42 +0,0 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license -# Builds ultralytics/yolov5:latest-cpu image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 -# Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments - -# Start FROM Ubuntu image https://hub.docker.com/_/ubuntu -FROM ubuntu:20.04 - -# Downloads to user config dir -ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ - -# Install linux packages -ENV DEBIAN_FRONTEND noninteractive -RUN apt update -RUN TZ=Etc/UTC apt install -y tzdata -RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg -# RUN alias python=python3 - -# Install pip packages -COPY requirements.txt . -RUN python3 -m pip install --upgrade pip wheel -RUN pip install --no-cache -r requirements.txt ultralytics albumentations gsutil notebook \ - coremltools onnx onnx-simplifier onnxruntime tensorflow-cpu tensorflowjs \ - # openvino-dev \ - --extra-index-url https://download.pytorch.org/whl/cpu - -# Create working directory -RUN mkdir -p /usr/src/app -WORKDIR /usr/src/app - -# Copy contents -# COPY . /usr/src/app (issues as not a .git directory) -RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app -ENV DEBIAN_FRONTEND teletype - - -# Usage Examples ------------------------------------------------------------------------------------------------------- - -# Build and Push -# t=ultralytics/yolov5:latest-cpu && sudo docker build -f utils/docker/Dockerfile-cpu -t $t . && sudo docker push $t - -# Pull and Run -# t=ultralytics/yolov5:latest-cpu && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t diff --git a/utils/downloads.py b/utils/downloads.py index 72ea87340eb9..c70afb654fc9 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -38,7 +38,7 @@ def url_getsize(url='https://ultralytics.com/images/bus.jpg'): def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): # Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes - from utils.general import LOGGER + from .general import LOGGER file = Path(file) assert_msg = f"Downloaded file '{file}' does not exist or size is < min_bytes={min_bytes}" @@ -61,7 +61,7 @@ def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): def attempt_download(file, repo='ultralytics/yolov5', release='v7.0'): # Attempt file download from GitHub release assets if not found locally. release = 'latest', 'v7.0', etc. - from utils.general import LOGGER + from .general import LOGGER def github_assets(repository, version='latest'): # Return GitHub repo tag (i.e. 'v7.0') and assets (i.e. ['yolov5s.pt', 'yolov5m.pt', ...]) diff --git a/utils/flask_rest_api/README.md b/utils/flask_rest_api/README.md deleted file mode 100644 index a726acbd9204..000000000000 --- a/utils/flask_rest_api/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# Flask REST API - -[REST](https://en.wikipedia.org/wiki/Representational_state_transfer) [API](https://en.wikipedia.org/wiki/API)s are -commonly used to expose Machine Learning (ML) models to other services. This folder contains an example REST API -created using Flask to expose the YOLOv5s model from [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/). - -## Requirements - -[Flask](https://palletsprojects.com/p/flask/) is required. Install with: - -```shell -$ pip install Flask -``` - -## Run - -After Flask installation run: - -```shell -$ python3 restapi.py --port 5000 -``` - -Then use [curl](https://curl.se/) to perform a request: - -```shell -$ curl -X POST -F image=@zidane.jpg 'http://localhost:5000/v1/object-detection/yolov5s' -``` - -The model inference results are returned as a JSON response: - -```json -[ - { - "class": 0, - "confidence": 0.8900438547, - "height": 0.9318675399, - "name": "person", - "width": 0.3264600933, - "xcenter": 0.7438579798, - "ycenter": 0.5207948685 - }, - { - "class": 0, - "confidence": 0.8440024257, - "height": 0.7155083418, - "name": "person", - "width": 0.6546785235, - "xcenter": 0.427829951, - "ycenter": 0.6334488392 - }, - { - "class": 27, - "confidence": 0.3771208823, - "height": 0.3902671337, - "name": "tie", - "width": 0.0696444362, - "xcenter": 0.3675483763, - "ycenter": 0.7991207838 - }, - { - "class": 27, - "confidence": 0.3527112305, - "height": 0.1540903747, - "name": "tie", - "width": 0.0336618312, - "xcenter": 0.7814827561, - "ycenter": 0.5065554976 - } -] -``` - -An example python script to perform inference using [requests](https://docs.python-requests.org/en/master/) is given -in `example_request.py` diff --git a/utils/flask_rest_api/example_request.py b/utils/flask_rest_api/example_request.py deleted file mode 100644 index 773ad8932967..000000000000 --- a/utils/flask_rest_api/example_request.py +++ /dev/null @@ -1,19 +0,0 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license -""" -Perform test request -""" - -import pprint - -import requests - -DETECTION_URL = "http://localhost:5000/v1/object-detection/yolov5s" -IMAGE = "zidane.jpg" - -# Read image -with open(IMAGE, "rb") as f: - image_data = f.read() - -response = requests.post(DETECTION_URL, files={"image": image_data}).json() - -pprint.pprint(response) diff --git a/utils/flask_rest_api/restapi.py b/utils/flask_rest_api/restapi.py deleted file mode 100644 index 8482435c861e..000000000000 --- a/utils/flask_rest_api/restapi.py +++ /dev/null @@ -1,48 +0,0 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license -""" -Run a Flask REST API exposing one or more YOLOv5s models -""" - -import argparse -import io - -import torch -from flask import Flask, request -from PIL import Image - -app = Flask(__name__) -models = {} - -DETECTION_URL = "/v1/object-detection/" - - -@app.route(DETECTION_URL, methods=["POST"]) -def predict(model): - if request.method != "POST": - return - - if request.files.get("image"): - # Method 1 - # with request.files["image"] as f: - # im = Image.open(io.BytesIO(f.read())) - - # Method 2 - im_file = request.files["image"] - im_bytes = im_file.read() - im = Image.open(io.BytesIO(im_bytes)) - - if model in models: - results = models[model](im, size=640) # reduce size=320 for faster inference - return results.pandas().xyxy[0].to_json(orient="records") - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Flask API exposing YOLOv5 model") - parser.add_argument("--port", default=5000, type=int, help="port number") - parser.add_argument('--model', nargs='+', default=['yolov5s'], help='model(s) to run, i.e. --model yolov5n yolov5s') - opt = parser.parse_args() - - for m in opt.model: - models[m] = torch.hub.load("ultralytics/yolov5", m, force_reload=True, skip_validation=True) - - app.run(host="0.0.0.0", port=opt.port) # debug=True causes Restarting with stat diff --git a/utils/general.py b/utils/general.py index 0bbcb6e7334c..24937f5faf13 100644 --- a/utils/general.py +++ b/utils/general.py @@ -36,9 +36,9 @@ import torchvision import yaml -from utils import TryExcept, emojis -from utils.downloads import gsutil_getsize -from utils.metrics import box_iou, fitness +from . import TryExcept, emojis +from .downloads import gsutil_getsize +from .metrics import box_iou, fitness FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory diff --git a/utils/google_app_engine/Dockerfile b/utils/google_app_engine/Dockerfile deleted file mode 100644 index 0155618f4751..000000000000 --- a/utils/google_app_engine/Dockerfile +++ /dev/null @@ -1,25 +0,0 @@ -FROM gcr.io/google-appengine/python - -# Create a virtualenv for dependencies. This isolates these packages from -# system-level packages. -# Use -p python3 or -p python3.7 to select python version. Default is version 2. -RUN virtualenv /env -p python3 - -# Setting these environment variables are the same as running -# source /env/bin/activate. -ENV VIRTUAL_ENV /env -ENV PATH /env/bin:$PATH - -RUN apt-get update && apt-get install -y python-opencv - -# Copy the application's requirements.txt and run pip to install all -# dependencies into the virtualenv. -ADD requirements.txt /app/requirements.txt -RUN pip install -r /app/requirements.txt - -# Add the application source code. -ADD . /app - -# Run a WSGI server to serve the application. gunicorn must be declared as -# a dependency in requirements.txt. -CMD gunicorn -b :$PORT main:app diff --git a/utils/google_app_engine/additional_requirements.txt b/utils/google_app_engine/additional_requirements.txt deleted file mode 100644 index 42d7ffc0eed8..000000000000 --- a/utils/google_app_engine/additional_requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -# add these requirements in your app on top of the existing ones -pip==21.1 -Flask==1.0.2 -gunicorn==19.9.0 diff --git a/utils/google_app_engine/app.yaml b/utils/google_app_engine/app.yaml deleted file mode 100644 index 5056b7c1186d..000000000000 --- a/utils/google_app_engine/app.yaml +++ /dev/null @@ -1,14 +0,0 @@ -runtime: custom -env: flex - -service: yolov5app - -liveness_check: - initial_delay_sec: 600 - -manual_scaling: - instances: 1 -resources: - cpu: 1 - memory_gb: 4 - disk_size_gb: 20 diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py deleted file mode 100644 index 22da87034f24..000000000000 --- a/utils/loggers/__init__.py +++ /dev/null @@ -1,411 +0,0 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license -""" -Logging utils -""" - -import os -import warnings -from pathlib import Path - -import pkg_resources as pkg -import torch -from torch.utils.tensorboard import SummaryWriter - -from utils.general import LOGGER, colorstr, cv2 -from utils.loggers.clearml.clearml_utils import ClearmlLogger -from utils.loggers.wandb.wandb_utils import WandbLogger -from utils.plots import plot_images, plot_labels, plot_results -from utils.torch_utils import de_parallel - -LOGGERS = ('csv', 'tb', 'wandb', 'clearml', 'comet') # *.csv, TensorBoard, Weights & Biases, ClearML -RANK = int(os.getenv('RANK', -1)) - -try: - import wandb - - assert hasattr(wandb, '__version__') # verify package import not local dir - if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.2') and RANK in {0, -1}: - try: - wandb_login_success = wandb.login(timeout=30) - except wandb.errors.UsageError: # known non-TTY terminal issue - wandb_login_success = False - if not wandb_login_success: - wandb = None -except (ImportError, AssertionError): - wandb = None - -try: - import clearml - - assert hasattr(clearml, '__version__') # verify package import not local dir -except (ImportError, AssertionError): - clearml = None - -try: - if RANK not in [0, -1]: - comet_ml = None - else: - import comet_ml - - assert hasattr(comet_ml, '__version__') # verify package import not local dir - from utils.loggers.comet import CometLogger - -except (ModuleNotFoundError, ImportError, AssertionError): - comet_ml = None - - -class Loggers(): - # YOLOv5 Loggers class - def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS): - self.save_dir = save_dir - self.weights = weights - self.opt = opt - self.hyp = hyp - self.plots = not opt.noplots # plot results - self.logger = logger # for printing results to console - self.include = include - self.keys = [ - 'train/box_loss', - 'train/obj_loss', - 'train/cls_loss', # train loss - 'metrics/precision', - 'metrics/recall', - 'metrics/mAP_0.5', - 'metrics/mAP_0.5:0.95', # metrics - 'val/box_loss', - 'val/obj_loss', - 'val/cls_loss', # val loss - 'x/lr0', - 'x/lr1', - 'x/lr2'] # params - self.best_keys = ['best/epoch', 'best/precision', 'best/recall', 'best/mAP_0.5', 'best/mAP_0.5:0.95'] - for k in LOGGERS: - setattr(self, k, None) # init empty logger dictionary - self.csv = True # always log to csv - - # Messages - # if not wandb: - # prefix = colorstr('Weights & Biases: ') - # s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 ๐Ÿš€ runs in Weights & Biases" - # self.logger.info(s) - if not clearml: - prefix = colorstr('ClearML: ') - s = f"{prefix}run 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 ๐Ÿš€ in ClearML" - self.logger.info(s) - if not comet_ml: - prefix = colorstr('Comet: ') - s = f"{prefix}run 'pip install comet_ml' to automatically track and visualize YOLOv5 ๐Ÿš€ runs in Comet" - self.logger.info(s) - # TensorBoard - s = self.save_dir - if 'tb' in self.include and not self.opt.evolve: - prefix = colorstr('TensorBoard: ') - self.logger.info(f"{prefix}Start with 'tensorboard --logdir {s.parent}', view at http://localhost:6006/") - self.tb = SummaryWriter(str(s)) - - # W&B - if wandb and 'wandb' in self.include: - wandb_artifact_resume = isinstance(self.opt.resume, str) and self.opt.resume.startswith('wandb-artifact://') - run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume and not wandb_artifact_resume else None - self.opt.hyp = self.hyp # add hyperparameters - self.wandb = WandbLogger(self.opt, run_id) - # temp warn. because nested artifacts not supported after 0.12.10 - # if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.11'): - # s = "YOLOv5 temporarily requires wandb version 0.12.10 or below. Some features may not work as expected." - # self.logger.warning(s) - else: - self.wandb = None - - # ClearML - if clearml and 'clearml' in self.include: - try: - self.clearml = ClearmlLogger(self.opt, self.hyp) - except Exception: - self.clearml = None - prefix = colorstr('ClearML: ') - LOGGER.warning(f'{prefix}WARNING โš ๏ธ ClearML is installed but not configured, skipping ClearML logging.' - f' See https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml#readme') - - else: - self.clearml = None - - # Comet - if comet_ml and 'comet' in self.include: - if isinstance(self.opt.resume, str) and self.opt.resume.startswith("comet://"): - run_id = self.opt.resume.split("/")[-1] - self.comet_logger = CometLogger(self.opt, self.hyp, run_id=run_id) - - else: - self.comet_logger = CometLogger(self.opt, self.hyp) - - else: - self.comet_logger = None - - @property - def remote_dataset(self): - # Get data_dict if custom dataset artifact link is provided - data_dict = None - if self.clearml: - data_dict = self.clearml.data_dict - if self.wandb: - data_dict = self.wandb.data_dict - if self.comet_logger: - data_dict = self.comet_logger.data_dict - - return data_dict - - def on_train_start(self): - if self.comet_logger: - self.comet_logger.on_train_start() - - def on_pretrain_routine_start(self): - if self.comet_logger: - self.comet_logger.on_pretrain_routine_start() - - def on_pretrain_routine_end(self, labels, names): - # Callback runs on pre-train routine end - if self.plots: - plot_labels(labels, names, self.save_dir) - paths = self.save_dir.glob('*labels*.jpg') # training labels - if self.wandb: - self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]}) - # if self.clearml: - # pass # ClearML saves these images automatically using hooks - if self.comet_logger: - self.comet_logger.on_pretrain_routine_end(paths) - - def on_train_batch_end(self, model, ni, imgs, targets, paths, vals): - log_dict = dict(zip(self.keys[0:3], vals)) - # Callback runs on train batch end - # ni: number integrated batches (since train start) - if self.plots: - if ni < 3: - f = self.save_dir / f'train_batch{ni}.jpg' # filename - plot_images(imgs, targets, paths, f) - if ni == 0 and self.tb and not self.opt.sync_bn: - log_tensorboard_graph(self.tb, model, imgsz=(self.opt.imgsz, self.opt.imgsz)) - if ni == 10 and (self.wandb or self.clearml): - files = sorted(self.save_dir.glob('train*.jpg')) - if self.wandb: - self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]}) - if self.clearml: - self.clearml.log_debug_samples(files, title='Mosaics') - - if self.comet_logger: - self.comet_logger.on_train_batch_end(log_dict, step=ni) - - def on_train_epoch_end(self, epoch): - # Callback runs on train epoch end - if self.wandb: - self.wandb.current_epoch = epoch + 1 - - if self.comet_logger: - self.comet_logger.on_train_epoch_end(epoch) - - def on_val_start(self): - if self.comet_logger: - self.comet_logger.on_val_start() - - def on_val_image_end(self, pred, predn, path, names, im): - # Callback runs on val image end - if self.wandb: - self.wandb.val_one_image(pred, predn, path, names, im) - if self.clearml: - self.clearml.log_image_with_boxes(path, pred, names, im) - - def on_val_batch_end(self, batch_i, im, targets, paths, shapes, out): - if self.comet_logger: - self.comet_logger.on_val_batch_end(batch_i, im, targets, paths, shapes, out) - - def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix): - # Callback runs on val end - if self.wandb or self.clearml: - files = sorted(self.save_dir.glob('val*.jpg')) - if self.wandb: - self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]}) - if self.clearml: - self.clearml.log_debug_samples(files, title='Validation') - - if self.comet_logger: - self.comet_logger.on_val_end(nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) - - def on_fit_epoch_end(self, vals, epoch, best_fitness, fi): - # Callback runs at the end of each fit (train+val) epoch - x = dict(zip(self.keys, vals)) - if self.csv: - file = self.save_dir / 'results.csv' - n = len(x) + 1 # number of cols - s = '' if file.exists() else (('%20s,' * n % tuple(['epoch'] + self.keys)).rstrip(',') + '\n') # add header - with open(file, 'a') as f: - f.write(s + ('%20.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n') - - if self.tb: - for k, v in x.items(): - self.tb.add_scalar(k, v, epoch) - elif self.clearml: # log to ClearML if TensorBoard not used - for k, v in x.items(): - title, series = k.split('/') - self.clearml.task.get_logger().report_scalar(title, series, v, epoch) - - if self.wandb: - if best_fitness == fi: - best_results = [epoch] + vals[3:7] - for i, name in enumerate(self.best_keys): - self.wandb.wandb_run.summary[name] = best_results[i] # log best results in the summary - self.wandb.log(x) - self.wandb.end_epoch(best_result=best_fitness == fi) - - if self.clearml: - self.clearml.current_epoch_logged_images = set() # reset epoch image limit - self.clearml.current_epoch += 1 - - if self.comet_logger: - self.comet_logger.on_fit_epoch_end(x, epoch=epoch) - - def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): - # Callback runs on model save event - if (epoch + 1) % self.opt.save_period == 0 and not final_epoch and self.opt.save_period != -1: - if self.wandb: - self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi) - if self.clearml: - self.clearml.task.update_output_model(model_path=str(last), - model_name='Latest Model', - auto_delete_file=False) - - if self.comet_logger: - self.comet_logger.on_model_save(last, epoch, final_epoch, best_fitness, fi) - - def on_train_end(self, last, best, epoch, results): - # Callback runs on training end, i.e. saving best model - if self.plots: - plot_results(file=self.save_dir / 'results.csv') # save results.png - files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))] - files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter - self.logger.info(f"Results saved to {colorstr('bold', self.save_dir)}") - - if self.tb and not self.clearml: # These images are already captured by ClearML by now, we don't want doubles - for f in files: - self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC') - - if self.wandb: - self.wandb.log(dict(zip(self.keys[3:10], results))) - self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]}) - # Calling wandb.log. TODO: Refactor this into WandbLogger.log_model - if not self.opt.evolve: - wandb.log_artifact(str(best if best.exists() else last), - type='model', - name=f'run_{self.wandb.wandb_run.id}_model', - aliases=['latest', 'best', 'stripped']) - self.wandb.finish_run() - - if self.clearml and not self.opt.evolve: - self.clearml.task.update_output_model(model_path=str(best if best.exists() else last), - name='Best Model', - auto_delete_file=False) - - if self.comet_logger: - final_results = dict(zip(self.keys[3:10], results)) - self.comet_logger.on_train_end(files, self.save_dir, last, best, epoch, final_results) - - def on_params_update(self, params: dict): - # Update hyperparams or configs of the experiment - if self.wandb: - self.wandb.wandb_run.config.update(params, allow_val_change=True) - if self.comet_logger: - self.comet_logger.on_params_update(params) - - -class GenericLogger: - """ - YOLOv5 General purpose logger for non-task specific logging - Usage: from utils.loggers import GenericLogger; logger = GenericLogger(...) - Arguments - opt: Run arguments - console_logger: Console logger - include: loggers to include - """ - - def __init__(self, opt, console_logger, include=('tb', 'wandb')): - # init default loggers - self.save_dir = Path(opt.save_dir) - self.include = include - self.console_logger = console_logger - self.csv = self.save_dir / 'results.csv' # CSV logger - if 'tb' in self.include: - prefix = colorstr('TensorBoard: ') - self.console_logger.info( - f"{prefix}Start with 'tensorboard --logdir {self.save_dir.parent}', view at http://localhost:6006/") - self.tb = SummaryWriter(str(self.save_dir)) - - if wandb and 'wandb' in self.include: - self.wandb = wandb.init(project=web_project_name(str(opt.project)), - name=None if opt.name == "exp" else opt.name, - config=opt) - else: - self.wandb = None - - def log_metrics(self, metrics, epoch): - # Log metrics dictionary to all loggers - if self.csv: - keys, vals = list(metrics.keys()), list(metrics.values()) - n = len(metrics) + 1 # number of cols - s = '' if self.csv.exists() else (('%23s,' * n % tuple(['epoch'] + keys)).rstrip(',') + '\n') # header - with open(self.csv, 'a') as f: - f.write(s + ('%23.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n') - - if self.tb: - for k, v in metrics.items(): - self.tb.add_scalar(k, v, epoch) - - if self.wandb: - self.wandb.log(metrics, step=epoch) - - def log_images(self, files, name='Images', epoch=0): - # Log images to all loggers - files = [Path(f) for f in (files if isinstance(files, (tuple, list)) else [files])] # to Path - files = [f for f in files if f.exists()] # filter by exists - - if self.tb: - for f in files: - self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC') - - if self.wandb: - self.wandb.log({name: [wandb.Image(str(f), caption=f.name) for f in files]}, step=epoch) - - def log_graph(self, model, imgsz=(640, 640)): - # Log model graph to all loggers - if self.tb: - log_tensorboard_graph(self.tb, model, imgsz) - - def log_model(self, model_path, epoch=0, metadata={}): - # Log model to all loggers - if self.wandb: - art = wandb.Artifact(name=f"run_{wandb.run.id}_model", type="model", metadata=metadata) - art.add_file(str(model_path)) - wandb.log_artifact(art) - - def update_params(self, params): - # Update the paramters logged - if self.wandb: - wandb.run.config.update(params, allow_val_change=True) - - -def log_tensorboard_graph(tb, model, imgsz=(640, 640)): - # Log model graph to TensorBoard - try: - p = next(model.parameters()) # for device, type - imgsz = (imgsz, imgsz) if isinstance(imgsz, int) else imgsz # expand - im = torch.zeros((1, 3, *imgsz)).to(p.device).type_as(p) # input image (WARNING: must be zeros, not empty) - with warnings.catch_warnings(): - warnings.simplefilter('ignore') # suppress jit trace warning - tb.add_graph(torch.jit.trace(de_parallel(model), im, strict=False), []) - except Exception as e: - LOGGER.warning(f'WARNING โš ๏ธ TensorBoard graph visualization failure {e}') - - -def web_project_name(project): - # Convert local project name to web project name - if not project.startswith('runs/train'): - return project - suffix = '-Classify' if project.endswith('-cls') else '-Segment' if project.endswith('-seg') else '' - return f'YOLOv5{suffix}' diff --git a/utils/loggers/clearml/README.md b/utils/loggers/clearml/README.md deleted file mode 100644 index 3cf4c268583f..000000000000 --- a/utils/loggers/clearml/README.md +++ /dev/null @@ -1,230 +0,0 @@ -# ClearML Integration - -Clear|MLClear|ML - -## About ClearML - -[ClearML](https://cutt.ly/yolov5-tutorial-clearml) is an [open-source](https://github.com/allegroai/clearml) toolbox designed to save you time โฑ๏ธ. - -๐Ÿ”จ Track every YOLOv5 training run in the experiment manager - -๐Ÿ”ง Version and easily access your custom training data with the integrated ClearML Data Versioning Tool - -๐Ÿ”ฆ Remotely train and monitor your YOLOv5 training runs using ClearML Agent - -๐Ÿ”ฌ Get the very best mAP using ClearML Hyperparameter Optimization - -๐Ÿ”ญ Turn your newly trained YOLOv5 model into an API with just a few commands using ClearML Serving - -
-And so much more. It's up to you how many of these tools you want to use, you can stick to the experiment manager, or chain them all together into an impressive pipeline! -
-
- -![ClearML scalars dashboard](https://github.com/thepycoder/clearml_screenshots/raw/main/experiment_manager_with_compare.gif) - - -
-
- -## ๐Ÿฆพ Setting Things Up - -To keep track of your experiments and/or data, ClearML needs to communicate to a server. You have 2 options to get one: - -Either sign up for free to the [ClearML Hosted Service](https://cutt.ly/yolov5-tutorial-clearml) or you can set up your own server, see [here](https://clear.ml/docs/latest/docs/deploying_clearml/clearml_server). Even the server is open-source, so even if you're dealing with sensitive data, you should be good to go! - -1. Install the `clearml` python package: - - ```bash - pip install clearml - ``` - -1. Connect the ClearML SDK to the server by [creating credentials](https://app.clear.ml/settings/workspace-configuration) (go right top to Settings -> Workspace -> Create new credentials), then execute the command below and follow the instructions: - - ```bash - clearml-init - ``` - -That's it! You're done ๐Ÿ˜Ž - -
- -## ๐Ÿš€ Training YOLOv5 With ClearML - -To enable ClearML experiment tracking, simply install the ClearML pip package. - -```bash -pip install clearml>=1.2.0 -``` - -This will enable integration with the YOLOv5 training script. Every training run from now on, will be captured and stored by the ClearML experiment manager. - -If you want to change the `project_name` or `task_name`, use the `--project` and `--name` arguments of the `train.py` script, by default the project will be called `YOLOv5` and the task `Training`. -PLEASE NOTE: ClearML uses `/` as a delimter for subprojects, so be careful when using `/` in your project name! - -```bash -python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache -``` - -or with custom project and task name: -```bash -python train.py --project my_project --name my_training --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache -``` - -This will capture: -- Source code + uncommitted changes -- Installed packages -- (Hyper)parameters -- Model files (use `--save-period n` to save a checkpoint every n epochs) -- Console output -- Scalars (mAP_0.5, mAP_0.5:0.95, precision, recall, losses, learning rates, ...) -- General info such as machine details, runtime, creation date etc. -- All produced plots such as label correlogram and confusion matrix -- Images with bounding boxes per epoch -- Mosaic per epoch -- Validation images per epoch -- ... - -That's a lot right? ๐Ÿคฏ -Now, we can visualize all of this information in the ClearML UI to get an overview of our training progress. Add custom columns to the table view (such as e.g. mAP_0.5) so you can easily sort on the best performing model. Or select multiple experiments and directly compare them! - -There even more we can do with all of this information, like hyperparameter optimization and remote execution, so keep reading if you want to see how that works! - -
- -## ๐Ÿ”— Dataset Version Management - -Versioning your data separately from your code is generally a good idea and makes it easy to aqcuire the latest version too. This repository supports supplying a dataset version ID and it will make sure to get the data if it's not there yet. Next to that, this workflow also saves the used dataset ID as part of the task parameters, so you will always know for sure which data was used in which experiment! - -![ClearML Dataset Interface](https://github.com/thepycoder/clearml_screenshots/raw/main/clearml_data.gif) - -### Prepare Your Dataset - -The YOLOv5 repository supports a number of different datasets by using yaml files containing their information. By default datasets are downloaded to the `../datasets` folder in relation to the repository root folder. So if you downloaded the `coco128` dataset using the link in the yaml or with the scripts provided by yolov5, you get this folder structure: - -``` -.. -|_ yolov5 -|_ datasets - |_ coco128 - |_ images - |_ labels - |_ LICENSE - |_ README.txt -``` -But this can be any dataset you wish. Feel free to use your own, as long as you keep to this folder structure. - -Next, โš ๏ธ**copy the corresponding yaml file to the root of the dataset folder**โš ๏ธ. This yaml files contains the information ClearML will need to properly use the dataset. You can make this yourself too, of course, just follow the structure of the example yamls. - -Basically we need the following keys: `path`, `train`, `test`, `val`, `nc`, `names`. - -``` -.. -|_ yolov5 -|_ datasets - |_ coco128 - |_ images - |_ labels - |_ coco128.yaml # <---- HERE! - |_ LICENSE - |_ README.txt -``` - -### Upload Your Dataset - -To get this dataset into ClearML as a versionned dataset, go to the dataset root folder and run the following command: -```bash -cd coco128 -clearml-data sync --project YOLOv5 --name coco128 --folder . -``` - -The command `clearml-data sync` is actually a shorthand command. You could also run these commands one after the other: -```bash -# Optionally add --parent if you want to base -# this version on another dataset version, so no duplicate files are uploaded! -clearml-data create --name coco128 --project YOLOv5 -clearml-data add --files . -clearml-data close -``` - -### Run Training Using A ClearML Dataset - -Now that you have a ClearML dataset, you can very simply use it to train custom YOLOv5 ๐Ÿš€ models! - -```bash -python train.py --img 640 --batch 16 --epochs 3 --data clearml:// --weights yolov5s.pt --cache -``` - -
- -## ๐Ÿ‘€ Hyperparameter Optimization - -Now that we have our experiments and data versioned, it's time to take a look at what we can build on top! - -Using the code information, installed packages and environment details, the experiment itself is now **completely reproducible**. In fact, ClearML allows you to clone an experiment and even change its parameters. We can then just rerun it with these new parameters automatically, this is basically what HPO does! - -To **run hyperparameter optimization locally**, we've included a pre-made script for you. Just make sure a training task has been run at least once, so it is in the ClearML experiment manager, we will essentially clone it and change its hyperparameters. - -You'll need to fill in the ID of this `template task` in the script found at `utils/loggers/clearml/hpo.py` and then just run it :) You can change `task.execute_locally()` to `task.execute()` to put it in a ClearML queue and have a remote agent work on it instead. - -```bash -# To use optuna, install it first, otherwise you can change the optimizer to just be RandomSearch -pip install optuna -python utils/loggers/clearml/hpo.py -``` - -![HPO](https://github.com/thepycoder/clearml_screenshots/raw/main/hpo.png) - -## ๐Ÿคฏ Remote Execution (advanced) - -Running HPO locally is really handy, but what if we want to run our experiments on a remote machine instead? Maybe you have access to a very powerful GPU machine on-site or you have some budget to use cloud GPUs. -This is where the ClearML Agent comes into play. Check out what the agent can do here: - -- [YouTube video](https://youtu.be/MX3BrXnaULs) -- [Documentation](https://clear.ml/docs/latest/docs/clearml_agent) - -In short: every experiment tracked by the experiment manager contains enough information to reproduce it on a different machine (installed packages, uncommitted changes etc.). So a ClearML agent does just that: it listens to a queue for incoming tasks and when it finds one, it recreates the environment and runs it while still reporting scalars, plots etc. to the experiment manager. - -You can turn any machine (a cloud VM, a local GPU machine, your own laptop ... ) into a ClearML agent by simply running: -```bash -clearml-agent daemon --queue [--docker] -``` - -### Cloning, Editing And Enqueuing - -With our agent running, we can give it some work. Remember from the HPO section that we can clone a task and edit the hyperparameters? We can do that from the interface too! - -๐Ÿช„ Clone the experiment by right clicking it - -๐ŸŽฏ Edit the hyperparameters to what you wish them to be - -โณ Enqueue the task to any of the queues by right clicking it - -![Enqueue a task from the UI](https://github.com/thepycoder/clearml_screenshots/raw/main/enqueue.gif) - -### Executing A Task Remotely - -Now you can clone a task like we explained above, or simply mark your current script by adding `task.execute_remotely()` and on execution it will be put into a queue, for the agent to start working on! - -To run the YOLOv5 training script remotely, all you have to do is add this line to the training.py script after the clearml logger has been instatiated: -```python -# ... -# Loggers -data_dict = None -if RANK in {-1, 0}: - loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance - if loggers.clearml: - loggers.clearml.task.execute_remotely(queue='my_queue') # <------ ADD THIS LINE - # Data_dict is either None is user did not choose for ClearML dataset or is filled in by ClearML - data_dict = loggers.clearml.data_dict -# ... -``` -When running the training script after this change, python will run the script up until that line, after which it will package the code and send it to the queue instead! - -### Autoscaling workers - -ClearML comes with autoscalers too! This tool will automatically spin up new remote machines in the cloud of your choice (AWS, GCP, Azure) and turn them into ClearML agents for you whenever there are experiments detected in the queue. Once the tasks are processed, the autoscaler will automatically shut down the remote machines and you stop paying! - -Check out the autoscalers getting started video below. - -[![Watch the video](https://img.youtube.com/vi/j4XVMAaUt3E/0.jpg)](https://youtu.be/j4XVMAaUt3E) diff --git a/utils/loggers/clearml/__init__.py b/utils/loggers/clearml/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py deleted file mode 100644 index 3457727a96a4..000000000000 --- a/utils/loggers/clearml/clearml_utils.py +++ /dev/null @@ -1,164 +0,0 @@ -"""Main Logger class for ClearML experiment tracking.""" -import glob -import re -from pathlib import Path - -import numpy as np -import yaml - -from utils.plots import Annotator, colors - -try: - import clearml - from clearml import Dataset, Task - - assert hasattr(clearml, '__version__') # verify package import not local dir -except (ImportError, AssertionError): - clearml = None - - -def construct_dataset(clearml_info_string): - """Load in a clearml dataset and fill the internal data_dict with its contents. - """ - dataset_id = clearml_info_string.replace('clearml://', '') - dataset = Dataset.get(dataset_id=dataset_id) - dataset_root_path = Path(dataset.get_local_copy()) - - # We'll search for the yaml file definition in the dataset - yaml_filenames = list(glob.glob(str(dataset_root_path / "*.yaml")) + glob.glob(str(dataset_root_path / "*.yml"))) - if len(yaml_filenames) > 1: - raise ValueError('More than one yaml file was found in the dataset root, cannot determine which one contains ' - 'the dataset definition this way.') - elif len(yaml_filenames) == 0: - raise ValueError('No yaml definition found in dataset root path, check that there is a correct yaml file ' - 'inside the dataset root path.') - with open(yaml_filenames[0]) as f: - dataset_definition = yaml.safe_load(f) - - assert set(dataset_definition.keys()).issuperset( - {'train', 'test', 'val', 'nc', 'names'} - ), "The right keys were not found in the yaml file, make sure it at least has the following keys: ('train', 'test', 'val', 'nc', 'names')" - - data_dict = dict() - data_dict['train'] = str( - (dataset_root_path / dataset_definition['train']).resolve()) if dataset_definition['train'] else None - data_dict['test'] = str( - (dataset_root_path / dataset_definition['test']).resolve()) if dataset_definition['test'] else None - data_dict['val'] = str( - (dataset_root_path / dataset_definition['val']).resolve()) if dataset_definition['val'] else None - data_dict['nc'] = dataset_definition['nc'] - data_dict['names'] = dataset_definition['names'] - - return data_dict - - -class ClearmlLogger: - """Log training runs, datasets, models, and predictions to ClearML. - - This logger sends information to ClearML at app.clear.ml or to your own hosted server. By default, - this information includes hyperparameters, system configuration and metrics, model metrics, code information and - basic data metrics and analyses. - - By providing additional command line arguments to train.py, datasets, - models and predictions can also be logged. - """ - - def __init__(self, opt, hyp): - """ - - Initialize ClearML Task, this object will capture the experiment - - Upload dataset version to ClearML Data if opt.upload_dataset is True - - arguments: - opt (namespace) -- Commandline arguments for this run - hyp (dict) -- Hyperparameters for this run - - """ - self.current_epoch = 0 - # Keep tracked of amount of logged images to enforce a limit - self.current_epoch_logged_images = set() - # Maximum number of images to log to clearML per epoch - self.max_imgs_to_log_per_epoch = 16 - # Get the interval of epochs when bounding box images should be logged - self.bbox_interval = opt.bbox_interval - self.clearml = clearml - self.task = None - self.data_dict = None - if self.clearml: - self.task = Task.init( - project_name=opt.project if opt.project != 'runs/train' else 'YOLOv5', - task_name=opt.name if opt.name != 'exp' else 'Training', - tags=['YOLOv5'], - output_uri=True, - reuse_last_task_id=opt.exist_ok, - auto_connect_frameworks={'pytorch': False} - # We disconnect pytorch auto-detection, because we added manual model save points in the code - ) - # ClearML's hooks will already grab all general parameters - # Only the hyperparameters coming from the yaml config file - # will have to be added manually! - self.task.connect(hyp, name='Hyperparameters') - self.task.connect(opt, name='Args') - - # Make sure the code is easily remotely runnable by setting the docker image to use by the remote agent - self.task.set_base_docker("ultralytics/yolov5:latest", - docker_arguments='--ipc=host -e="CLEARML_AGENT_SKIP_PYTHON_ENV_INSTALL=1"', - docker_setup_bash_script='pip install clearml') - - # Get ClearML Dataset Version if requested - if opt.data.startswith('clearml://'): - # data_dict should have the following keys: - # names, nc (number of classes), test, train, val (all three relative paths to ../datasets) - self.data_dict = construct_dataset(opt.data) - # Set data to data_dict because wandb will crash without this information and opt is the best way - # to give it to them - opt.data = self.data_dict - - def log_debug_samples(self, files, title='Debug Samples'): - """ - Log files (images) as debug samples in the ClearML task. - - arguments: - files (List(PosixPath)) a list of file paths in PosixPath format - title (str) A title that groups together images with the same values - """ - for f in files: - if f.exists(): - it = re.search(r'_batch(\d+)', f.name) - iteration = int(it.groups()[0]) if it else 0 - self.task.get_logger().report_image(title=title, - series=f.name.replace(it.group(), ''), - local_path=str(f), - iteration=iteration) - - def log_image_with_boxes(self, image_path, boxes, class_names, image, conf_threshold=0.25): - """ - Draw the bounding boxes on a single image and report the result as a ClearML debug sample. - - arguments: - image_path (PosixPath) the path the original image file - boxes (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class] - class_names (dict): dict containing mapping of class int to class name - image (Tensor): A torch tensor containing the actual image data - """ - if len(self.current_epoch_logged_images) < self.max_imgs_to_log_per_epoch and self.current_epoch >= 0: - # Log every bbox_interval times and deduplicate for any intermittend extra eval runs - if self.current_epoch % self.bbox_interval == 0 and image_path not in self.current_epoch_logged_images: - im = np.ascontiguousarray(np.moveaxis(image.mul(255).clamp(0, 255).byte().cpu().numpy(), 0, 2)) - annotator = Annotator(im=im, pil=True) - for i, (conf, class_nr, box) in enumerate(zip(boxes[:, 4], boxes[:, 5], boxes[:, :4])): - color = colors(i) - - class_name = class_names[int(class_nr)] - confidence_percentage = round(float(conf) * 100, 2) - label = f"{class_name}: {confidence_percentage}%" - - if conf > conf_threshold: - annotator.rectangle(box.cpu().numpy(), outline=color) - annotator.box_label(box.cpu().numpy(), label=label, color=color) - - annotated_image = annotator.result() - self.task.get_logger().report_image(title='Bounding Boxes', - series=image_path.name, - iteration=self.current_epoch, - image=annotated_image) - self.current_epoch_logged_images.add(image_path) diff --git a/utils/loggers/clearml/hpo.py b/utils/loggers/clearml/hpo.py deleted file mode 100644 index ee518b0fbfc8..000000000000 --- a/utils/loggers/clearml/hpo.py +++ /dev/null @@ -1,84 +0,0 @@ -from clearml import Task -# Connecting ClearML with the current process, -# from here on everything is logged automatically -from clearml.automation import HyperParameterOptimizer, UniformParameterRange -from clearml.automation.optuna import OptimizerOptuna - -task = Task.init(project_name='Hyper-Parameter Optimization', - task_name='YOLOv5', - task_type=Task.TaskTypes.optimizer, - reuse_last_task_id=False) - -# Example use case: -optimizer = HyperParameterOptimizer( - # This is the experiment we want to optimize - base_task_id='', - # here we define the hyper-parameters to optimize - # Notice: The parameter name should exactly match what you see in the UI: / - # For Example, here we see in the base experiment a section Named: "General" - # under it a parameter named "batch_size", this becomes "General/batch_size" - # If you have `argparse` for example, then arguments will appear under the "Args" section, - # and you should instead pass "Args/batch_size" - hyper_parameters=[ - UniformParameterRange('Hyperparameters/lr0', min_value=1e-5, max_value=1e-1), - UniformParameterRange('Hyperparameters/lrf', min_value=0.01, max_value=1.0), - UniformParameterRange('Hyperparameters/momentum', min_value=0.6, max_value=0.98), - UniformParameterRange('Hyperparameters/weight_decay', min_value=0.0, max_value=0.001), - UniformParameterRange('Hyperparameters/warmup_epochs', min_value=0.0, max_value=5.0), - UniformParameterRange('Hyperparameters/warmup_momentum', min_value=0.0, max_value=0.95), - UniformParameterRange('Hyperparameters/warmup_bias_lr', min_value=0.0, max_value=0.2), - UniformParameterRange('Hyperparameters/box', min_value=0.02, max_value=0.2), - UniformParameterRange('Hyperparameters/cls', min_value=0.2, max_value=4.0), - UniformParameterRange('Hyperparameters/cls_pw', min_value=0.5, max_value=2.0), - UniformParameterRange('Hyperparameters/obj', min_value=0.2, max_value=4.0), - UniformParameterRange('Hyperparameters/obj_pw', min_value=0.5, max_value=2.0), - UniformParameterRange('Hyperparameters/iou_t', min_value=0.1, max_value=0.7), - UniformParameterRange('Hyperparameters/anchor_t', min_value=2.0, max_value=8.0), - UniformParameterRange('Hyperparameters/fl_gamma', min_value=0.0, max_value=4.0), - UniformParameterRange('Hyperparameters/hsv_h', min_value=0.0, max_value=0.1), - UniformParameterRange('Hyperparameters/hsv_s', min_value=0.0, max_value=0.9), - UniformParameterRange('Hyperparameters/hsv_v', min_value=0.0, max_value=0.9), - UniformParameterRange('Hyperparameters/degrees', min_value=0.0, max_value=45.0), - UniformParameterRange('Hyperparameters/translate', min_value=0.0, max_value=0.9), - UniformParameterRange('Hyperparameters/scale', min_value=0.0, max_value=0.9), - UniformParameterRange('Hyperparameters/shear', min_value=0.0, max_value=10.0), - UniformParameterRange('Hyperparameters/perspective', min_value=0.0, max_value=0.001), - UniformParameterRange('Hyperparameters/flipud', min_value=0.0, max_value=1.0), - UniformParameterRange('Hyperparameters/fliplr', min_value=0.0, max_value=1.0), - UniformParameterRange('Hyperparameters/mosaic', min_value=0.0, max_value=1.0), - UniformParameterRange('Hyperparameters/mixup', min_value=0.0, max_value=1.0), - UniformParameterRange('Hyperparameters/copy_paste', min_value=0.0, max_value=1.0)], - # this is the objective metric we want to maximize/minimize - objective_metric_title='metrics', - objective_metric_series='mAP_0.5', - # now we decide if we want to maximize it or minimize it (accuracy we maximize) - objective_metric_sign='max', - # let us limit the number of concurrent experiments, - # this in turn will make sure we do dont bombard the scheduler with experiments. - # if we have an auto-scaler connected, this, by proxy, will limit the number of machine - max_number_of_concurrent_tasks=1, - # this is the optimizer class (actually doing the optimization) - # Currently, we can choose from GridSearch, RandomSearch or OptimizerBOHB (Bayesian optimization Hyper-Band) - optimizer_class=OptimizerOptuna, - # If specified only the top K performing Tasks will be kept, the others will be automatically archived - save_top_k_tasks_only=5, # 5, - compute_time_limit=None, - total_max_jobs=20, - min_iteration_per_job=None, - max_iteration_per_job=None, -) - -# report every 10 seconds, this is way too often, but we are testing here -optimizer.set_report_period(10 / 60) -# You can also use the line below instead to run all the optimizer tasks locally, without using queues or agent -# an_optimizer.start_locally(job_complete_callback=job_complete_callback) -# set the time limit for the optimization process (2 hours) -optimizer.set_time_limit(in_minutes=120.0) -# Start the optimization process in the local environment -optimizer.start_locally() -# wait until process is done (notice we are controlling the optimization process in the background) -optimizer.wait() -# make sure background optimization stopped -optimizer.stop() - -print('We are done, good bye') diff --git a/utils/loggers/comet/README.md b/utils/loggers/comet/README.md deleted file mode 100644 index 8a361e2b211d..000000000000 --- a/utils/loggers/comet/README.md +++ /dev/null @@ -1,256 +0,0 @@ - - -# YOLOv5 with Comet - -This guide will cover how to use YOLOv5 with [Comet](https://bit.ly/yolov5-readme-comet2) - -# About Comet - -Comet builds tools that help data scientists, engineers, and team leaders accelerate and optimize machine learning and deep learning models. - -Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github)! -Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! - -# Getting Started - -## Install Comet - -```shell -pip install comet_ml -``` - -## Configure Comet Credentials - -There are two ways to configure Comet with YOLOv5. - -You can either set your credentials through enviroment variables - -**Environment Variables** - -```shell -export COMET_API_KEY= -export COMET_PROJECT_NAME= # This will default to 'yolov5' -``` - -Or create a `.comet.config` file in your working directory and set your credentials there. - -**Comet Configuration File** - -``` -[comet] -api_key= -project_name= # This will default to 'yolov5' -``` - -## Run the Training Script - -```shell -# Train YOLOv5s on COCO128 for 5 epochs -python train.py --img 640 --batch 16 --epochs 5 --data coco128.yaml --weights yolov5s.pt -``` - -That's it! Comet will automatically log your hyperparameters, command line arguments, training and valiation metrics. You can visualize and analyze your runs in the Comet UI - -yolo-ui - -# Try out an Example! -Check out an example of a [completed run here](https://www.comet.com/examples/comet-example-yolov5/a0e29e0e9b984e4a822db2a62d0cb357?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step&utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github) - -Or better yet, try it out yourself in this Colab Notebook - -[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing) - -# Log automatically - -By default, Comet will log the following items - -## Metrics -- Box Loss, Object Loss, Classification Loss for the training and validation data -- mAP_0.5, mAP_0.5:0.95 metrics for the validation data. -- Precision and Recall for the validation data - -## Parameters - -- Model Hyperparameters -- All parameters passed through the command line options - -## Visualizations - -- Confusion Matrix of the model predictions on the validation data -- Plots for the PR and F1 curves across all classes -- Correlogram of the Class Labels - -# Configure Comet Logging - -Comet can be configured to log additional data either through command line flags passed to the training script -or through environment variables. - -```shell -export COMET_MODE=online # Set whether to run Comet in 'online' or 'offline' mode. Defaults to online -export COMET_MODEL_NAME= #Set the name for the saved model. Defaults to yolov5 -export COMET_LOG_CONFUSION_MATRIX=false # Set to disable logging a Comet Confusion Matrix. Defaults to true -export COMET_MAX_IMAGE_UPLOADS= # Controls how many total image predictions to log to Comet. Defaults to 100. -export COMET_LOG_PER_CLASS_METRICS=true # Set to log evaluation metrics for each detected class at the end of training. Defaults to false -export COMET_DEFAULT_CHECKPOINT_FILENAME= # Set this if you would like to resume training from a different checkpoint. Defaults to 'last.pt' -export COMET_LOG_BATCH_LEVEL_METRICS=true # Set this if you would like to log training metrics at the batch level. Defaults to false. -export COMET_LOG_PREDICTIONS=true # Set this to false to disable logging model predictions -``` - -## Logging Checkpoints with Comet - -Logging Models to Comet is disabled by default. To enable it, pass the `save-period` argument to the training script. This will save the -logged checkpoints to Comet based on the interval value provided by `save-period` - -```shell -python train.py \ ---img 640 \ ---batch 16 \ ---epochs 5 \ ---data coco128.yaml \ ---weights yolov5s.pt \ ---save-period 1 -``` - -## Logging Model Predictions - -By default, model predictions (images, ground truth labels and bounding boxes) will be logged to Comet. - -You can control the frequency of logged predictions and the associated images by passing the `bbox_interval` command line argument. Predictions can be visualized using Comet's Object Detection Custom Panel. This frequency corresponds to every Nth batch of data per epoch. In the example below, we are logging every 2nd batch of data for each epoch. - -**Note:** The YOLOv5 validation dataloader will default to a batch size of 32, so you will have to set the logging frequency accordingly. - -Here is an [example project using the Panel](https://www.comet.com/examples/comet-example-yolov5?shareable=YcwMiJaZSXfcEXpGOHDD12vA1&utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github) - - -```shell -python train.py \ ---img 640 \ ---batch 16 \ ---epochs 5 \ ---data coco128.yaml \ ---weights yolov5s.pt \ ---bbox_interval 2 -``` - -### Controlling the number of Prediction Images logged to Comet - -When logging predictions from YOLOv5, Comet will log the images associated with each set of predictions. By default a maximum of 100 validation images are logged. You can increase or decrease this number using the `COMET_MAX_IMAGE_UPLOADS` environment variable. - -```shell -env COMET_MAX_IMAGE_UPLOADS=200 python train.py \ ---img 640 \ ---batch 16 \ ---epochs 5 \ ---data coco128.yaml \ ---weights yolov5s.pt \ ---bbox_interval 1 -``` - -### Logging Class Level Metrics - -Use the `COMET_LOG_PER_CLASS_METRICS` environment variable to log mAP, precision, recall, f1 for each class. - -```shell -env COMET_LOG_PER_CLASS_METRICS=true python train.py \ ---img 640 \ ---batch 16 \ ---epochs 5 \ ---data coco128.yaml \ ---weights yolov5s.pt -``` - -## Uploading a Dataset to Comet Artifacts - -If you would like to store your data using [Comet Artifacts](https://www.comet.com/docs/v2/guides/data-management/using-artifacts/#learn-more?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github), you can do so using the `upload_dataset` flag. - -The dataset be organized in the way described in the [YOLOv5 documentation](https://docs.ultralytics.com/tutorials/train-custom-datasets/#3-organize-directories). The dataset config `yaml` file must follow the same format as that of the `coco128.yaml` file. - -```shell -python train.py \ ---img 640 \ ---batch 16 \ ---epochs 5 \ ---data coco128.yaml \ ---weights yolov5s.pt \ ---upload_dataset -``` - -You can find the uploaded dataset in the Artifacts tab in your Comet Workspace -artifact-1 - -You can preview the data directly in the Comet UI. -artifact-2 - -Artifacts are versioned and also support adding metadata about the dataset. Comet will automatically log the metadata from your dataset `yaml` file -artifact-3 - -### Using a saved Artifact - -If you would like to use a dataset from Comet Artifacts, set the `path` variable in your dataset `yaml` file to point to the following Artifact resource URL. - -``` -# contents of artifact.yaml file -path: "comet:///:" -``` -Then pass this file to your training script in the following way - -```shell -python train.py \ ---img 640 \ ---batch 16 \ ---epochs 5 \ ---data artifact.yaml \ ---weights yolov5s.pt -``` - -Artifacts also allow you to track the lineage of data as it flows through your Experimentation workflow. Here you can see a graph that shows you all the experiments that have used your uploaded dataset. -artifact-4 - -## Resuming a Training Run - -If your training run is interrupted for any reason, e.g. disrupted internet connection, you can resume the run using the `resume` flag and the Comet Run Path. - -The Run Path has the following format `comet:////`. - -This will restore the run to its state before the interruption, which includes restoring the model from a checkpoint, restoring all hyperparameters and training arguments and downloading Comet dataset Artifacts if they were used in the original run. The resumed run will continue logging to the existing Experiment in the Comet UI - -```shell -python train.py \ ---resume "comet://" -``` - -## Hyperparameter Search with the Comet Optimizer - -YOLOv5 is also integrated with Comet's Optimizer, making is simple to visualie hyperparameter sweeps in the Comet UI. - -### Configuring an Optimizer Sweep - -To configure the Comet Optimizer, you will have to create a JSON file with the information about the sweep. An example file has been provided in `utils/loggers/comet/optimizer_config.json` - -```shell -python utils/loggers/comet/hpo.py \ - --comet_optimizer_config "utils/loggers/comet/optimizer_config.json" -``` - -The `hpo.py` script accepts the same arguments as `train.py`. If you wish to pass additional arguments to your sweep simply add them after -the script. - -```shell -python utils/loggers/comet/hpo.py \ - --comet_optimizer_config "utils/loggers/comet/optimizer_config.json" \ - --save-period 1 \ - --bbox_interval 1 -``` - -### Running a Sweep in Parallel - -```shell -comet optimizer -j utils/loggers/comet/hpo.py \ - utils/loggers/comet/optimizer_config.json" -``` - -### Visualizing Results - -Comet provides a number of ways to visualize the results of your sweep. Take a look at a [project with a completed sweep here](https://www.comet.com/examples/comet-example-yolov5/view/PrlArHGuuhDTKC1UuBmTtOSXD/panels?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github) - -hyperparameter-yolo diff --git a/utils/loggers/comet/__init__.py b/utils/loggers/comet/__init__.py deleted file mode 100644 index b0318f88d6a6..000000000000 --- a/utils/loggers/comet/__init__.py +++ /dev/null @@ -1,508 +0,0 @@ -import glob -import json -import logging -import os -import sys -from pathlib import Path - -logger = logging.getLogger(__name__) - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[3] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH - -try: - import comet_ml - - # Project Configuration - config = comet_ml.config.get_config() - COMET_PROJECT_NAME = config.get_string(os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5") -except (ModuleNotFoundError, ImportError): - comet_ml = None - COMET_PROJECT_NAME = None - -import PIL -import torch -import torchvision.transforms as T -import yaml - -from utils.dataloaders import img2label_paths -from utils.general import check_dataset, scale_boxes, xywh2xyxy -from utils.metrics import box_iou - -COMET_PREFIX = "comet://" - -COMET_MODE = os.getenv("COMET_MODE", "online") - -# Model Saving Settings -COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5") - -# Dataset Artifact Settings -COMET_UPLOAD_DATASET = os.getenv("COMET_UPLOAD_DATASET", "false").lower() == "true" - -# Evaluation Settings -COMET_LOG_CONFUSION_MATRIX = os.getenv("COMET_LOG_CONFUSION_MATRIX", "true").lower() == "true" -COMET_LOG_PREDICTIONS = os.getenv("COMET_LOG_PREDICTIONS", "true").lower() == "true" -COMET_MAX_IMAGE_UPLOADS = int(os.getenv("COMET_MAX_IMAGE_UPLOADS", 100)) - -# Confusion Matrix Settings -CONF_THRES = float(os.getenv("CONF_THRES", 0.001)) -IOU_THRES = float(os.getenv("IOU_THRES", 0.6)) - -# Batch Logging Settings -COMET_LOG_BATCH_METRICS = os.getenv("COMET_LOG_BATCH_METRICS", "false").lower() == "true" -COMET_BATCH_LOGGING_INTERVAL = os.getenv("COMET_BATCH_LOGGING_INTERVAL", 1) -COMET_PREDICTION_LOGGING_INTERVAL = os.getenv("COMET_PREDICTION_LOGGING_INTERVAL", 1) -COMET_LOG_PER_CLASS_METRICS = os.getenv("COMET_LOG_PER_CLASS_METRICS", "false").lower() == "true" - -RANK = int(os.getenv("RANK", -1)) - -to_pil = T.ToPILImage() - - -class CometLogger: - """Log metrics, parameters, source code, models and much more - with Comet - """ - - def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwargs) -> None: - self.job_type = job_type - self.opt = opt - self.hyp = hyp - - # Comet Flags - self.comet_mode = COMET_MODE - - self.save_model = opt.save_period > -1 - self.model_name = COMET_MODEL_NAME - - # Batch Logging Settings - self.log_batch_metrics = COMET_LOG_BATCH_METRICS - self.comet_log_batch_interval = COMET_BATCH_LOGGING_INTERVAL - - # Dataset Artifact Settings - self.upload_dataset = self.opt.upload_dataset if self.opt.upload_dataset else COMET_UPLOAD_DATASET - self.resume = self.opt.resume - - # Default parameters to pass to Experiment objects - self.default_experiment_kwargs = { - "log_code": False, - "log_env_gpu": True, - "log_env_cpu": True, - "project_name": COMET_PROJECT_NAME,} - self.default_experiment_kwargs.update(experiment_kwargs) - self.experiment = self._get_experiment(self.comet_mode, run_id) - - self.data_dict = self.check_dataset(self.opt.data) - self.class_names = self.data_dict["names"] - self.num_classes = self.data_dict["nc"] - - self.logged_images_count = 0 - self.max_images = COMET_MAX_IMAGE_UPLOADS - - if run_id is None: - self.experiment.log_other("Created from", "YOLOv5") - if not isinstance(self.experiment, comet_ml.OfflineExperiment): - workspace, project_name, experiment_id = self.experiment.url.split("/")[-3:] - self.experiment.log_other( - "Run Path", - f"{workspace}/{project_name}/{experiment_id}", - ) - self.log_parameters(vars(opt)) - self.log_parameters(self.opt.hyp) - self.log_asset_data( - self.opt.hyp, - name="hyperparameters.json", - metadata={"type": "hyp-config-file"}, - ) - self.log_asset( - f"{self.opt.save_dir}/opt.yaml", - metadata={"type": "opt-config-file"}, - ) - - self.comet_log_confusion_matrix = COMET_LOG_CONFUSION_MATRIX - - if hasattr(self.opt, "conf_thres"): - self.conf_thres = self.opt.conf_thres - else: - self.conf_thres = CONF_THRES - if hasattr(self.opt, "iou_thres"): - self.iou_thres = self.opt.iou_thres - else: - self.iou_thres = IOU_THRES - - self.log_parameters({"val_iou_threshold": self.iou_thres, "val_conf_threshold": self.conf_thres}) - - self.comet_log_predictions = COMET_LOG_PREDICTIONS - if self.opt.bbox_interval == -1: - self.comet_log_prediction_interval = 1 if self.opt.epochs < 10 else self.opt.epochs // 10 - else: - self.comet_log_prediction_interval = self.opt.bbox_interval - - if self.comet_log_predictions: - self.metadata_dict = {} - self.logged_image_names = [] - - self.comet_log_per_class_metrics = COMET_LOG_PER_CLASS_METRICS - - self.experiment.log_others({ - "comet_mode": COMET_MODE, - "comet_max_image_uploads": COMET_MAX_IMAGE_UPLOADS, - "comet_log_per_class_metrics": COMET_LOG_PER_CLASS_METRICS, - "comet_log_batch_metrics": COMET_LOG_BATCH_METRICS, - "comet_log_confusion_matrix": COMET_LOG_CONFUSION_MATRIX, - "comet_model_name": COMET_MODEL_NAME,}) - - # Check if running the Experiment with the Comet Optimizer - if hasattr(self.opt, "comet_optimizer_id"): - self.experiment.log_other("optimizer_id", self.opt.comet_optimizer_id) - self.experiment.log_other("optimizer_objective", self.opt.comet_optimizer_objective) - self.experiment.log_other("optimizer_metric", self.opt.comet_optimizer_metric) - self.experiment.log_other("optimizer_parameters", json.dumps(self.hyp)) - - def _get_experiment(self, mode, experiment_id=None): - if mode == "offline": - if experiment_id is not None: - return comet_ml.ExistingOfflineExperiment( - previous_experiment=experiment_id, - **self.default_experiment_kwargs, - ) - - return comet_ml.OfflineExperiment(**self.default_experiment_kwargs,) - - else: - try: - if experiment_id is not None: - return comet_ml.ExistingExperiment( - previous_experiment=experiment_id, - **self.default_experiment_kwargs, - ) - - return comet_ml.Experiment(**self.default_experiment_kwargs) - - except ValueError: - logger.warning("COMET WARNING: " - "Comet credentials have not been set. " - "Comet will default to offline logging. " - "Please set your credentials to enable online logging.") - return self._get_experiment("offline", experiment_id) - - return - - def log_metrics(self, log_dict, **kwargs): - self.experiment.log_metrics(log_dict, **kwargs) - - def log_parameters(self, log_dict, **kwargs): - self.experiment.log_parameters(log_dict, **kwargs) - - def log_asset(self, asset_path, **kwargs): - self.experiment.log_asset(asset_path, **kwargs) - - def log_asset_data(self, asset, **kwargs): - self.experiment.log_asset_data(asset, **kwargs) - - def log_image(self, img, **kwargs): - self.experiment.log_image(img, **kwargs) - - def log_model(self, path, opt, epoch, fitness_score, best_model=False): - if not self.save_model: - return - - model_metadata = { - "fitness_score": fitness_score[-1], - "epochs_trained": epoch + 1, - "save_period": opt.save_period, - "total_epochs": opt.epochs,} - - model_files = glob.glob(f"{path}/*.pt") - for model_path in model_files: - name = Path(model_path).name - - self.experiment.log_model( - self.model_name, - file_or_folder=model_path, - file_name=name, - metadata=model_metadata, - overwrite=True, - ) - - def check_dataset(self, data_file): - with open(data_file) as f: - data_config = yaml.safe_load(f) - - if data_config['path'].startswith(COMET_PREFIX): - path = data_config['path'].replace(COMET_PREFIX, "") - data_dict = self.download_dataset_artifact(path) - - return data_dict - - self.log_asset(self.opt.data, metadata={"type": "data-config-file"}) - - return check_dataset(data_file) - - def log_predictions(self, image, labelsn, path, shape, predn): - if self.logged_images_count >= self.max_images: - return - detections = predn[predn[:, 4] > self.conf_thres] - iou = box_iou(labelsn[:, 1:], detections[:, :4]) - mask, _ = torch.where(iou > self.iou_thres) - if len(mask) == 0: - return - - filtered_detections = detections[mask] - filtered_labels = labelsn[mask] - - image_id = path.split("/")[-1].split(".")[0] - image_name = f"{image_id}_curr_epoch_{self.experiment.curr_epoch}" - if image_name not in self.logged_image_names: - native_scale_image = PIL.Image.open(path) - self.log_image(native_scale_image, name=image_name) - self.logged_image_names.append(image_name) - - metadata = [] - for cls, *xyxy in filtered_labels.tolist(): - metadata.append({ - "label": f"{self.class_names[int(cls)]}-gt", - "score": 100, - "box": { - "x": xyxy[0], - "y": xyxy[1], - "x2": xyxy[2], - "y2": xyxy[3]},}) - for *xyxy, conf, cls in filtered_detections.tolist(): - metadata.append({ - "label": f"{self.class_names[int(cls)]}", - "score": conf * 100, - "box": { - "x": xyxy[0], - "y": xyxy[1], - "x2": xyxy[2], - "y2": xyxy[3]},}) - - self.metadata_dict[image_name] = metadata - self.logged_images_count += 1 - - return - - def preprocess_prediction(self, image, labels, shape, pred): - nl, _ = labels.shape[0], pred.shape[0] - - # Predictions - if self.opt.single_cls: - pred[:, 5] = 0 - - predn = pred.clone() - scale_boxes(image.shape[1:], predn[:, :4], shape[0], shape[1]) - - labelsn = None - if nl: - tbox = xywh2xyxy(labels[:, 1:5]) # target boxes - scale_boxes(image.shape[1:], tbox, shape[0], shape[1]) # native-space labels - labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels - scale_boxes(image.shape[1:], predn[:, :4], shape[0], shape[1]) # native-space pred - - return predn, labelsn - - def add_assets_to_artifact(self, artifact, path, asset_path, split): - img_paths = sorted(glob.glob(f"{asset_path}/*")) - label_paths = img2label_paths(img_paths) - - for image_file, label_file in zip(img_paths, label_paths): - image_logical_path, label_logical_path = map(lambda x: os.path.relpath(x, path), [image_file, label_file]) - - try: - artifact.add(image_file, logical_path=image_logical_path, metadata={"split": split}) - artifact.add(label_file, logical_path=label_logical_path, metadata={"split": split}) - except ValueError as e: - logger.error('COMET ERROR: Error adding file to Artifact. Skipping file.') - logger.error(f"COMET ERROR: {e}") - continue - - return artifact - - def upload_dataset_artifact(self): - dataset_name = self.data_dict.get("dataset_name", "yolov5-dataset") - path = str((ROOT / Path(self.data_dict["path"])).resolve()) - - metadata = self.data_dict.copy() - for key in ["train", "val", "test"]: - split_path = metadata.get(key) - if split_path is not None: - metadata[key] = split_path.replace(path, "") - - artifact = comet_ml.Artifact(name=dataset_name, artifact_type="dataset", metadata=metadata) - for key in metadata.keys(): - if key in ["train", "val", "test"]: - if isinstance(self.upload_dataset, str) and (key != self.upload_dataset): - continue - - asset_path = self.data_dict.get(key) - if asset_path is not None: - artifact = self.add_assets_to_artifact(artifact, path, asset_path, key) - - self.experiment.log_artifact(artifact) - - return - - def download_dataset_artifact(self, artifact_path): - logged_artifact = self.experiment.get_artifact(artifact_path) - artifact_save_dir = str(Path(self.opt.save_dir) / logged_artifact.name) - logged_artifact.download(artifact_save_dir) - - metadata = logged_artifact.metadata - data_dict = metadata.copy() - data_dict["path"] = artifact_save_dir - - metadata_names = metadata.get("names") - if type(metadata_names) == dict: - data_dict["names"] = {int(k): v for k, v in metadata.get("names").items()} - elif type(metadata_names) == list: - data_dict["names"] = {int(k): v for k, v in zip(range(len(metadata_names)), metadata_names)} - else: - raise "Invalid 'names' field in dataset yaml file. Please use a list or dictionary" - - data_dict = self.update_data_paths(data_dict) - return data_dict - - def update_data_paths(self, data_dict): - path = data_dict.get("path", "") - - for split in ["train", "val", "test"]: - if data_dict.get(split): - split_path = data_dict.get(split) - data_dict[split] = (f"{path}/{split_path}" if isinstance(split, str) else [ - f"{path}/{x}" for x in split_path]) - - return data_dict - - def on_pretrain_routine_end(self, paths): - if self.opt.resume: - return - - for path in paths: - self.log_asset(str(path)) - - if self.upload_dataset: - if not self.resume: - self.upload_dataset_artifact() - - return - - def on_train_start(self): - self.log_parameters(self.hyp) - - def on_train_epoch_start(self): - return - - def on_train_epoch_end(self, epoch): - self.experiment.curr_epoch = epoch - - return - - def on_train_batch_start(self): - return - - def on_train_batch_end(self, log_dict, step): - self.experiment.curr_step = step - if self.log_batch_metrics and (step % self.comet_log_batch_interval == 0): - self.log_metrics(log_dict, step=step) - - return - - def on_train_end(self, files, save_dir, last, best, epoch, results): - if self.comet_log_predictions: - curr_epoch = self.experiment.curr_epoch - self.experiment.log_asset_data(self.metadata_dict, "image-metadata.json", epoch=curr_epoch) - - for f in files: - self.log_asset(f, metadata={"epoch": epoch}) - self.log_asset(f"{save_dir}/results.csv", metadata={"epoch": epoch}) - - if not self.opt.evolve: - model_path = str(best if best.exists() else last) - name = Path(model_path).name - if self.save_model: - self.experiment.log_model( - self.model_name, - file_or_folder=model_path, - file_name=name, - overwrite=True, - ) - - # Check if running Experiment with Comet Optimizer - if hasattr(self.opt, 'comet_optimizer_id'): - metric = results.get(self.opt.comet_optimizer_metric) - self.experiment.log_other('optimizer_metric_value', metric) - - self.finish_run() - - def on_val_start(self): - return - - def on_val_batch_start(self): - return - - def on_val_batch_end(self, batch_i, images, targets, paths, shapes, outputs): - if not (self.comet_log_predictions and ((batch_i + 1) % self.comet_log_prediction_interval == 0)): - return - - for si, pred in enumerate(outputs): - if len(pred) == 0: - continue - - image = images[si] - labels = targets[targets[:, 0] == si, 1:] - shape = shapes[si] - path = paths[si] - predn, labelsn = self.preprocess_prediction(image, labels, shape, pred) - if labelsn is not None: - self.log_predictions(image, labelsn, path, shape, predn) - - return - - def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix): - if self.comet_log_per_class_metrics: - if self.num_classes > 1: - for i, c in enumerate(ap_class): - class_name = self.class_names[c] - self.experiment.log_metrics( - { - 'mAP@.5': ap50[i], - 'mAP@.5:.95': ap[i], - 'precision': p[i], - 'recall': r[i], - 'f1': f1[i], - 'true_positives': tp[i], - 'false_positives': fp[i], - 'support': nt[c]}, - prefix=class_name) - - if self.comet_log_confusion_matrix: - epoch = self.experiment.curr_epoch - class_names = list(self.class_names.values()) - class_names.append("background") - num_classes = len(class_names) - - self.experiment.log_confusion_matrix( - matrix=confusion_matrix.matrix, - max_categories=num_classes, - labels=class_names, - epoch=epoch, - column_label='Actual Category', - row_label='Predicted Category', - file_name=f"confusion-matrix-epoch-{epoch}.json", - ) - - def on_fit_epoch_end(self, result, epoch): - self.log_metrics(result, epoch=epoch) - - def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): - if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1: - self.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi) - - def on_params_update(self, params): - self.log_parameters(params) - - def finish_run(self): - self.experiment.end() diff --git a/utils/loggers/comet/comet_utils.py b/utils/loggers/comet/comet_utils.py deleted file mode 100644 index 3cbd45156b57..000000000000 --- a/utils/loggers/comet/comet_utils.py +++ /dev/null @@ -1,150 +0,0 @@ -import logging -import os -from urllib.parse import urlparse - -try: - import comet_ml -except (ModuleNotFoundError, ImportError): - comet_ml = None - -import yaml - -logger = logging.getLogger(__name__) - -COMET_PREFIX = "comet://" -COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5") -COMET_DEFAULT_CHECKPOINT_FILENAME = os.getenv("COMET_DEFAULT_CHECKPOINT_FILENAME", "last.pt") - - -def download_model_checkpoint(opt, experiment): - model_dir = f"{opt.project}/{experiment.name}" - os.makedirs(model_dir, exist_ok=True) - - model_name = COMET_MODEL_NAME - model_asset_list = experiment.get_model_asset_list(model_name) - - if len(model_asset_list) == 0: - logger.error(f"COMET ERROR: No checkpoints found for model name : {model_name}") - return - - model_asset_list = sorted( - model_asset_list, - key=lambda x: x["step"], - reverse=True, - ) - logged_checkpoint_map = {asset["fileName"]: asset["assetId"] for asset in model_asset_list} - - resource_url = urlparse(opt.weights) - checkpoint_filename = resource_url.query - - if checkpoint_filename: - asset_id = logged_checkpoint_map.get(checkpoint_filename) - else: - asset_id = logged_checkpoint_map.get(COMET_DEFAULT_CHECKPOINT_FILENAME) - checkpoint_filename = COMET_DEFAULT_CHECKPOINT_FILENAME - - if asset_id is None: - logger.error(f"COMET ERROR: Checkpoint {checkpoint_filename} not found in the given Experiment") - return - - try: - logger.info(f"COMET INFO: Downloading checkpoint {checkpoint_filename}") - asset_filename = checkpoint_filename - - model_binary = experiment.get_asset(asset_id, return_type="binary", stream=False) - model_download_path = f"{model_dir}/{asset_filename}" - with open(model_download_path, "wb") as f: - f.write(model_binary) - - opt.weights = model_download_path - - except Exception as e: - logger.warning("COMET WARNING: Unable to download checkpoint from Comet") - logger.exception(e) - - -def set_opt_parameters(opt, experiment): - """Update the opts Namespace with parameters - from Comet's ExistingExperiment when resuming a run - - Args: - opt (argparse.Namespace): Namespace of command line options - experiment (comet_ml.APIExperiment): Comet API Experiment object - """ - asset_list = experiment.get_asset_list() - resume_string = opt.resume - - for asset in asset_list: - if asset["fileName"] == "opt.yaml": - asset_id = asset["assetId"] - asset_binary = experiment.get_asset(asset_id, return_type="binary", stream=False) - opt_dict = yaml.safe_load(asset_binary) - for key, value in opt_dict.items(): - setattr(opt, key, value) - opt.resume = resume_string - - # Save hyperparameters to YAML file - # Necessary to pass checks in training script - save_dir = f"{opt.project}/{experiment.name}" - os.makedirs(save_dir, exist_ok=True) - - hyp_yaml_path = f"{save_dir}/hyp.yaml" - with open(hyp_yaml_path, "w") as f: - yaml.dump(opt.hyp, f) - opt.hyp = hyp_yaml_path - - -def check_comet_weights(opt): - """Downloads model weights from Comet and updates the - weights path to point to saved weights location - - Args: - opt (argparse.Namespace): Command Line arguments passed - to YOLOv5 training script - - Returns: - None/bool: Return True if weights are successfully downloaded - else return None - """ - if comet_ml is None: - return - - if isinstance(opt.weights, str): - if opt.weights.startswith(COMET_PREFIX): - api = comet_ml.API() - resource = urlparse(opt.weights) - experiment_path = f"{resource.netloc}{resource.path}" - experiment = api.get(experiment_path) - download_model_checkpoint(opt, experiment) - return True - - return None - - -def check_comet_resume(opt): - """Restores run parameters to its original state based on the model checkpoint - and logged Experiment parameters. - - Args: - opt (argparse.Namespace): Command Line arguments passed - to YOLOv5 training script - - Returns: - None/bool: Return True if the run is restored successfully - else return None - """ - if comet_ml is None: - return - - if isinstance(opt.resume, str): - if opt.resume.startswith(COMET_PREFIX): - api = comet_ml.API() - resource = urlparse(opt.resume) - experiment_path = f"{resource.netloc}{resource.path}" - experiment = api.get(experiment_path) - set_opt_parameters(opt, experiment) - download_model_checkpoint(opt, experiment) - - return True - - return None diff --git a/utils/loggers/comet/hpo.py b/utils/loggers/comet/hpo.py deleted file mode 100644 index 7dd5c92e8de1..000000000000 --- a/utils/loggers/comet/hpo.py +++ /dev/null @@ -1,118 +0,0 @@ -import argparse -import json -import logging -import os -import sys -from pathlib import Path - -import comet_ml - -logger = logging.getLogger(__name__) - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[3] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH - -from train import train -from utils.callbacks import Callbacks -from utils.general import increment_path -from utils.torch_utils import select_device - -# Project Configuration -config = comet_ml.config.get_config() -COMET_PROJECT_NAME = config.get_string(os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5") - - -def get_args(known=False): - parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='initial weights path') - parser.add_argument('--cfg', type=str, default='', help='model.yaml path') - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') - parser.add_argument('--epochs', type=int, default=300, help='total training epochs') - parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') - parser.add_argument('--rect', action='store_true', help='rectangular training') - parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') - parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') - parser.add_argument('--noval', action='store_true', help='only validate final epoch') - parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') - parser.add_argument('--noplots', action='store_true', help='save no plot files') - parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') - parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') - parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') - parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') - parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') - parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer') - parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') - parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') - parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name') - parser.add_argument('--name', default='exp', help='save to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--quad', action='store_true', help='quad dataloader') - parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler') - parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') - parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') - parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') - parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') - parser.add_argument('--seed', type=int, default=0, help='Global training seed') - parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') - - # Weights & Biases arguments - parser.add_argument('--entity', default=None, help='W&B: Entity') - parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='W&B: Upload data, "val" option') - parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval') - parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use') - - # Comet Arguments - parser.add_argument("--comet_optimizer_config", type=str, help="Comet: Path to a Comet Optimizer Config File.") - parser.add_argument("--comet_optimizer_id", type=str, help="Comet: ID of the Comet Optimizer sweep.") - parser.add_argument("--comet_optimizer_objective", type=str, help="Comet: Set to 'minimize' or 'maximize'.") - parser.add_argument("--comet_optimizer_metric", type=str, help="Comet: Metric to Optimize.") - parser.add_argument("--comet_optimizer_workers", - type=int, - default=1, - help="Comet: Number of Parallel Workers to use with the Comet Optimizer.") - - return parser.parse_known_args()[0] if known else parser.parse_args() - - -def run(parameters, opt): - hyp_dict = {k: v for k, v in parameters.items() if k not in ["epochs", "batch_size"]} - - opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) - opt.batch_size = parameters.get("batch_size") - opt.epochs = parameters.get("epochs") - - device = select_device(opt.device, batch_size=opt.batch_size) - train(hyp_dict, opt, device, callbacks=Callbacks()) - - -if __name__ == "__main__": - opt = get_args(known=True) - - opt.weights = str(opt.weights) - opt.cfg = str(opt.cfg) - opt.data = str(opt.data) - opt.project = str(opt.project) - - optimizer_id = os.getenv("COMET_OPTIMIZER_ID") - if optimizer_id is None: - with open(opt.comet_optimizer_config) as f: - optimizer_config = json.load(f) - optimizer = comet_ml.Optimizer(optimizer_config) - else: - optimizer = comet_ml.Optimizer(optimizer_id) - - opt.comet_optimizer_id = optimizer.id - status = optimizer.status() - - opt.comet_optimizer_objective = status["spec"]["objective"] - opt.comet_optimizer_metric = status["spec"]["metric"] - - logger.info("COMET INFO: Starting Hyperparameter Sweep") - for parameter in optimizer.get_parameters(): - run(parameter["parameters"], opt) diff --git a/utils/loggers/comet/optimizer_config.json b/utils/loggers/comet/optimizer_config.json deleted file mode 100644 index 83ddddab6f20..000000000000 --- a/utils/loggers/comet/optimizer_config.json +++ /dev/null @@ -1,209 +0,0 @@ -{ - "algorithm": "random", - "parameters": { - "anchor_t": { - "type": "discrete", - "values": [ - 2, - 8 - ] - }, - "batch_size": { - "type": "discrete", - "values": [ - 16, - 32, - 64 - ] - }, - "box": { - "type": "discrete", - "values": [ - 0.02, - 0.2 - ] - }, - "cls": { - "type": "discrete", - "values": [ - 0.2 - ] - }, - "cls_pw": { - "type": "discrete", - "values": [ - 0.5 - ] - }, - "copy_paste": { - "type": "discrete", - "values": [ - 1 - ] - }, - "degrees": { - "type": "discrete", - "values": [ - 0, - 45 - ] - }, - "epochs": { - "type": "discrete", - "values": [ - 5 - ] - }, - "fl_gamma": { - "type": "discrete", - "values": [ - 0 - ] - }, - "fliplr": { - "type": "discrete", - "values": [ - 0 - ] - }, - "flipud": { - "type": "discrete", - "values": [ - 0 - ] - }, - "hsv_h": { - "type": "discrete", - "values": [ - 0 - ] - }, - "hsv_s": { - "type": "discrete", - "values": [ - 0 - ] - }, - "hsv_v": { - "type": "discrete", - "values": [ - 0 - ] - }, - "iou_t": { - "type": "discrete", - "values": [ - 0.7 - ] - }, - "lr0": { - "type": "discrete", - "values": [ - 1e-05, - 0.1 - ] - }, - "lrf": { - "type": "discrete", - "values": [ - 0.01, - 1 - ] - }, - "mixup": { - "type": "discrete", - "values": [ - 1 - ] - }, - "momentum": { - "type": "discrete", - "values": [ - 0.6 - ] - }, - "mosaic": { - "type": "discrete", - "values": [ - 0 - ] - }, - "obj": { - "type": "discrete", - "values": [ - 0.2 - ] - }, - "obj_pw": { - "type": "discrete", - "values": [ - 0.5 - ] - }, - "optimizer": { - "type": "categorical", - "values": [ - "SGD", - "Adam", - "AdamW" - ] - }, - "perspective": { - "type": "discrete", - "values": [ - 0 - ] - }, - "scale": { - "type": "discrete", - "values": [ - 0 - ] - }, - "shear": { - "type": "discrete", - "values": [ - 0 - ] - }, - "translate": { - "type": "discrete", - "values": [ - 0 - ] - }, - "warmup_bias_lr": { - "type": "discrete", - "values": [ - 0, - 0.2 - ] - }, - "warmup_epochs": { - "type": "discrete", - "values": [ - 5 - ] - }, - "warmup_momentum": { - "type": "discrete", - "values": [ - 0, - 0.95 - ] - }, - "weight_decay": { - "type": "discrete", - "values": [ - 0, - 0.001 - ] - } - }, - "spec": { - "maxCombo": 0, - "metric": "metrics/mAP_0.5", - "objective": "maximize" - }, - "trials": 1 -} diff --git a/utils/loggers/wandb/README.md b/utils/loggers/wandb/README.md deleted file mode 100644 index d78324b4c8e9..000000000000 --- a/utils/loggers/wandb/README.md +++ /dev/null @@ -1,162 +0,0 @@ -๐Ÿ“š This guide explains how to use **Weights & Biases** (W&B) with YOLOv5 ๐Ÿš€. UPDATED 29 September 2021. - -- [About Weights & Biases](#about-weights-&-biases) -- [First-Time Setup](#first-time-setup) -- [Viewing runs](#viewing-runs) -- [Disabling wandb](#disabling-wandb) -- [Advanced Usage: Dataset Versioning and Evaluation](#advanced-usage) -- [Reports: Share your work with the world!](#reports) - -## About Weights & Biases - -Think of [W&B](https://wandb.ai/site?utm_campaign=repo_yolo_wandbtutorial) like GitHub for machine learning models. With a few lines of code, save everything you need to debug, compare and reproduce your models โ€” architecture, hyperparameters, git commits, model weights, GPU usage, and even datasets and predictions. - -Used by top researchers including teams at OpenAI, Lyft, Github, and MILA, W&B is part of the new standard of best practices for machine learning. How W&B can help you optimize your machine learning workflows: - -- [Debug](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Free-2) model performance in real time -- [GPU usage](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#System-4) visualized automatically -- [Custom charts](https://wandb.ai/wandb/customizable-charts/reports/Powerful-Custom-Charts-To-Debug-Model-Peformance--VmlldzoyNzY4ODI) for powerful, extensible visualization -- [Share insights](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Share-8) interactively with collaborators -- [Optimize hyperparameters](https://docs.wandb.com/sweeps) efficiently -- [Track](https://docs.wandb.com/artifacts) datasets, pipelines, and production models - -## First-Time Setup - -
- Toggle Details -When you first train, W&B will prompt you to create a new account and will generate an **API key** for you. If you are an existing user you can retrieve your key from https://wandb.ai/authorize. This key is used to tell W&B where to log your data. You only need to supply your key once, and then it is remembered on the same device. - -W&B will create a cloud **project** (default is 'YOLOv5') for your training runs, and each new training run will be provided a unique run **name** within that project as project/name. You can also manually set your project and run name as: - -```shell -$ python train.py --project ... --name ... -``` - -YOLOv5 notebook example: Open In Colab Open In Kaggle -Screen Shot 2021-09-29 at 10 23 13 PM - -
- -## Viewing Runs - -
- Toggle Details -Run information streams from your environment to the W&B cloud console as you train. This allows you to monitor and even cancel runs in realtime . All important information is logged: - -- Training & Validation losses -- Metrics: Precision, Recall, mAP@0.5, mAP@0.5:0.95 -- Learning Rate over time -- A bounding box debugging panel, showing the training progress over time -- GPU: Type, **GPU Utilization**, power, temperature, **CUDA memory usage** -- System: Disk I/0, CPU utilization, RAM memory usage -- Your trained model as W&B Artifact -- Environment: OS and Python types, Git repository and state, **training command** - -

Weights & Biases dashboard

-
- -## Disabling wandb - -- training after running `wandb disabled` inside that directory creates no wandb run - ![Screenshot (84)](https://user-images.githubusercontent.com/15766192/143441777-c780bdd7-7cb4-4404-9559-b4316030a985.png) - -- To enable wandb again, run `wandb online` - ![Screenshot (85)](https://user-images.githubusercontent.com/15766192/143441866-7191b2cb-22f0-4e0f-ae64-2dc47dc13078.png) - -## Advanced Usage - -You can leverage W&B artifacts and Tables integration to easily visualize and manage your datasets, models and training evaluations. Here are some quick examples to get you started. - -
-

1: Train and Log Evaluation simultaneousy

- This is an extension of the previous section, but it'll also training after uploading the dataset. This also evaluation Table - Evaluation table compares your predictions and ground truths across the validation set for each epoch. It uses the references to the already uploaded datasets, - so no images will be uploaded from your system more than once. -
- Usage - Code $ python train.py --upload_data val - -![Screenshot from 2021-11-21 17-40-06](https://user-images.githubusercontent.com/15766192/142761183-c1696d8c-3f38-45ab-991a-bb0dfd98ae7d.png) - -
- -

2. Visualize and Version Datasets

- Log, visualize, dynamically query, and understand your data with W&B Tables. You can use the following command to log your dataset as a W&B Table. This will generate a {dataset}_wandb.yaml file which can be used to train from dataset artifact. -
- Usage - Code $ python utils/logger/wandb/log_dataset.py --project ... --name ... --data .. - -![Screenshot (64)](https://user-images.githubusercontent.com/15766192/128486078-d8433890-98a3-4d12-8986-b6c0e3fc64b9.png) - -
- -

3: Train using dataset artifact

- When you upload a dataset as described in the first section, you get a new config file with an added `_wandb` to its name. This file contains the information that - can be used to train a model directly from the dataset artifact. This also logs evaluation -
- Usage - Code $ python train.py --data {data}_wandb.yaml - -![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png) - -
- -

4: Save model checkpoints as artifacts

- To enable saving and versioning checkpoints of your experiment, pass `--save_period n` with the base cammand, where `n` represents checkpoint interval. - You can also log both the dataset and model checkpoints simultaneously. If not passed, only the final model will be logged - -
- Usage - Code $ python train.py --save_period 1 - -![Screenshot (68)](https://user-images.githubusercontent.com/15766192/128726138-ec6c1f60-639d-437d-b4ee-3acd9de47ef3.png) - -
- -
- -

5: Resume runs from checkpoint artifacts.

-Any run can be resumed using artifacts if the --resume argument starts withย wandb-artifact://ย prefix followed by the run path, i.e,ย wandb-artifact://username/project/runid . This doesn't require the model checkpoint to be present on the local system. - -
- Usage - Code $ python train.py --resume wandb-artifact://{run_path} - -![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png) - -
- -

6: Resume runs from dataset artifact & checkpoint artifacts.

- Local dataset or model checkpoints are not required. This can be used to resume runs directly on a different device - The syntax is same as the previous section, but you'll need to lof both the dataset and model checkpoints as artifacts, i.e, set bot --upload_dataset or - train from _wandb.yaml file and set --save_period - -
- Usage - Code $ python train.py --resume wandb-artifact://{run_path} - -![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png) - -
- - - -

Reports

-W&B Reports can be created from your saved runs for sharing online. Once a report is created you will receive a link you can use to publically share your results. Here is an example report created from the COCO128 tutorial trainings of all four YOLOv5 models ([link](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY)). - -Weights & Biases Reports - -## Environments - -YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): - -- **Google Colab and Kaggle** notebooks with free GPU: Open In Colab Open In Kaggle -- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) -- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) -- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls - -## Status - -![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg) - -If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), validation ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit. diff --git a/utils/loggers/wandb/__init__.py b/utils/loggers/wandb/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/utils/loggers/wandb/log_dataset.py b/utils/loggers/wandb/log_dataset.py deleted file mode 100644 index 06e81fb69307..000000000000 --- a/utils/loggers/wandb/log_dataset.py +++ /dev/null @@ -1,27 +0,0 @@ -import argparse - -from wandb_utils import WandbLogger - -from utils.general import LOGGER - -WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' - - -def create_dataset_artifact(opt): - logger = WandbLogger(opt, None, job_type='Dataset Creation') # TODO: return value unused - if not logger.wandb: - LOGGER.info("install wandb using `pip install wandb` to log the dataset") - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path') - parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') - parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project') - parser.add_argument('--entity', default=None, help='W&B entity') - parser.add_argument('--name', type=str, default='log dataset', help='name of W&B run') - - opt = parser.parse_args() - opt.resume = False # Explicitly disallow resume check for dataset upload job - - create_dataset_artifact(opt) diff --git a/utils/loggers/wandb/sweep.py b/utils/loggers/wandb/sweep.py deleted file mode 100644 index d49ea6f2778b..000000000000 --- a/utils/loggers/wandb/sweep.py +++ /dev/null @@ -1,41 +0,0 @@ -import sys -from pathlib import Path - -import wandb - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[3] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH - -from train import parse_opt, train -from utils.callbacks import Callbacks -from utils.general import increment_path -from utils.torch_utils import select_device - - -def sweep(): - wandb.init() - # Get hyp dict from sweep agent. Copy because train() modifies parameters which confused wandb. - hyp_dict = vars(wandb.config).get("_items").copy() - - # Workaround: get necessary opt args - opt = parse_opt(known=True) - opt.batch_size = hyp_dict.get("batch_size") - opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) - opt.epochs = hyp_dict.get("epochs") - opt.nosave = True - opt.data = hyp_dict.get("data") - opt.weights = str(opt.weights) - opt.cfg = str(opt.cfg) - opt.data = str(opt.data) - opt.hyp = str(opt.hyp) - opt.project = str(opt.project) - device = select_device(opt.device, batch_size=opt.batch_size) - - # train - train(hyp_dict, opt, device, callbacks=Callbacks()) - - -if __name__ == "__main__": - sweep() diff --git a/utils/loggers/wandb/sweep.yaml b/utils/loggers/wandb/sweep.yaml deleted file mode 100644 index 688b1ea0285f..000000000000 --- a/utils/loggers/wandb/sweep.yaml +++ /dev/null @@ -1,143 +0,0 @@ -# Hyperparameters for training -# To set range- -# Provide min and max values as: -# parameter: -# -# min: scalar -# max: scalar -# OR -# -# Set a specific list of search space- -# parameter: -# values: [scalar1, scalar2, scalar3...] -# -# You can use grid, bayesian and hyperopt search strategy -# For more info on configuring sweeps visit - https://docs.wandb.ai/guides/sweeps/configuration - -program: utils/loggers/wandb/sweep.py -method: random -metric: - name: metrics/mAP_0.5 - goal: maximize - -parameters: - # hyperparameters: set either min, max range or values list - data: - value: "data/coco128.yaml" - batch_size: - values: [64] - epochs: - values: [10] - - lr0: - distribution: uniform - min: 1e-5 - max: 1e-1 - lrf: - distribution: uniform - min: 0.01 - max: 1.0 - momentum: - distribution: uniform - min: 0.6 - max: 0.98 - weight_decay: - distribution: uniform - min: 0.0 - max: 0.001 - warmup_epochs: - distribution: uniform - min: 0.0 - max: 5.0 - warmup_momentum: - distribution: uniform - min: 0.0 - max: 0.95 - warmup_bias_lr: - distribution: uniform - min: 0.0 - max: 0.2 - box: - distribution: uniform - min: 0.02 - max: 0.2 - cls: - distribution: uniform - min: 0.2 - max: 4.0 - cls_pw: - distribution: uniform - min: 0.5 - max: 2.0 - obj: - distribution: uniform - min: 0.2 - max: 4.0 - obj_pw: - distribution: uniform - min: 0.5 - max: 2.0 - iou_t: - distribution: uniform - min: 0.1 - max: 0.7 - anchor_t: - distribution: uniform - min: 2.0 - max: 8.0 - fl_gamma: - distribution: uniform - min: 0.0 - max: 4.0 - hsv_h: - distribution: uniform - min: 0.0 - max: 0.1 - hsv_s: - distribution: uniform - min: 0.0 - max: 0.9 - hsv_v: - distribution: uniform - min: 0.0 - max: 0.9 - degrees: - distribution: uniform - min: 0.0 - max: 45.0 - translate: - distribution: uniform - min: 0.0 - max: 0.9 - scale: - distribution: uniform - min: 0.0 - max: 0.9 - shear: - distribution: uniform - min: 0.0 - max: 10.0 - perspective: - distribution: uniform - min: 0.0 - max: 0.001 - flipud: - distribution: uniform - min: 0.0 - max: 1.0 - fliplr: - distribution: uniform - min: 0.0 - max: 1.0 - mosaic: - distribution: uniform - min: 0.0 - max: 1.0 - mixup: - distribution: uniform - min: 0.0 - max: 1.0 - copy_paste: - distribution: uniform - min: 0.0 - max: 1.0 diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py deleted file mode 100644 index 238f4edbf2a0..000000000000 --- a/utils/loggers/wandb/wandb_utils.py +++ /dev/null @@ -1,589 +0,0 @@ -"""Utilities and tools for tracking runs with Weights & Biases.""" - -import logging -import os -import sys -from contextlib import contextmanager -from pathlib import Path -from typing import Dict - -import yaml -from tqdm import tqdm - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[3] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH - -from utils.dataloaders import LoadImagesAndLabels, img2label_paths -from utils.general import LOGGER, check_dataset, check_file - -try: - import wandb - - assert hasattr(wandb, '__version__') # verify package import not local dir -except (ImportError, AssertionError): - wandb = None - -RANK = int(os.getenv('RANK', -1)) -WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' - - -def remove_prefix(from_string, prefix=WANDB_ARTIFACT_PREFIX): - return from_string[len(prefix):] - - -def check_wandb_config_file(data_config_file): - wandb_config = '_wandb.'.join(data_config_file.rsplit('.', 1)) # updated data.yaml path - if Path(wandb_config).is_file(): - return wandb_config - return data_config_file - - -def check_wandb_dataset(data_file): - is_trainset_wandb_artifact = False - is_valset_wandb_artifact = False - if isinstance(data_file, dict): - # In that case another dataset manager has already processed it and we don't have to - return data_file - if check_file(data_file) and data_file.endswith('.yaml'): - with open(data_file, errors='ignore') as f: - data_dict = yaml.safe_load(f) - is_trainset_wandb_artifact = isinstance(data_dict['train'], - str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX) - is_valset_wandb_artifact = isinstance(data_dict['val'], - str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX) - if is_trainset_wandb_artifact or is_valset_wandb_artifact: - return data_dict - else: - return check_dataset(data_file) - - -def get_run_info(run_path): - run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX)) - run_id = run_path.stem - project = run_path.parent.stem - entity = run_path.parent.parent.stem - model_artifact_name = 'run_' + run_id + '_model' - return entity, project, run_id, model_artifact_name - - -def check_wandb_resume(opt): - process_wandb_config_ddp_mode(opt) if RANK not in [-1, 0] else None - if isinstance(opt.resume, str): - if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - if RANK not in [-1, 0]: # For resuming DDP runs - entity, project, run_id, model_artifact_name = get_run_info(opt.resume) - api = wandb.Api() - artifact = api.artifact(entity + '/' + project + '/' + model_artifact_name + ':latest') - modeldir = artifact.download() - opt.weights = str(Path(modeldir) / "last.pt") - return True - return None - - -def process_wandb_config_ddp_mode(opt): - with open(check_file(opt.data), errors='ignore') as f: - data_dict = yaml.safe_load(f) # data dict - train_dir, val_dir = None, None - if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX): - api = wandb.Api() - train_artifact = api.artifact(remove_prefix(data_dict['train']) + ':' + opt.artifact_alias) - train_dir = train_artifact.download() - train_path = Path(train_dir) / 'data/images/' - data_dict['train'] = str(train_path) - - if isinstance(data_dict['val'], str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX): - api = wandb.Api() - val_artifact = api.artifact(remove_prefix(data_dict['val']) + ':' + opt.artifact_alias) - val_dir = val_artifact.download() - val_path = Path(val_dir) / 'data/images/' - data_dict['val'] = str(val_path) - if train_dir or val_dir: - ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml') - with open(ddp_data_path, 'w') as f: - yaml.safe_dump(data_dict, f) - opt.data = ddp_data_path - - -class WandbLogger(): - """Log training runs, datasets, models, and predictions to Weights & Biases. - - This logger sends information to W&B at wandb.ai. By default, this information - includes hyperparameters, system configuration and metrics, model metrics, - and basic data metrics and analyses. - - By providing additional command line arguments to train.py, datasets, - models and predictions can also be logged. - - For more on how this logger is used, see the Weights & Biases documentation: - https://docs.wandb.com/guides/integrations/yolov5 - """ - - def __init__(self, opt, run_id=None, job_type='Training'): - """ - - Initialize WandbLogger instance - - Upload dataset if opt.upload_dataset is True - - Setup training processes if job_type is 'Training' - - arguments: - opt (namespace) -- Commandline arguments for this run - run_id (str) -- Run ID of W&B run to be resumed - job_type (str) -- To set the job_type for this run - - """ - # Temporary-fix - if opt.upload_dataset: - opt.upload_dataset = False - # LOGGER.info("Uploading Dataset functionality is not being supported temporarily due to a bug.") - - # Pre-training routine -- - self.job_type = job_type - self.wandb, self.wandb_run = wandb, None if not wandb else wandb.run - self.val_artifact, self.train_artifact = None, None - self.train_artifact_path, self.val_artifact_path = None, None - self.result_artifact = None - self.val_table, self.result_table = None, None - self.bbox_media_panel_images = [] - self.val_table_path_map = None - self.max_imgs_to_log = 16 - self.wandb_artifact_data_dict = None - self.data_dict = None - # It's more elegant to stick to 1 wandb.init call, - # but useful config data is overwritten in the WandbLogger's wandb.init call - if isinstance(opt.resume, str): # checks resume from artifact - if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - entity, project, run_id, model_artifact_name = get_run_info(opt.resume) - model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name - assert wandb, 'install wandb to resume wandb runs' - # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config - self.wandb_run = wandb.init(id=run_id, - project=project, - entity=entity, - resume='allow', - allow_val_change=True) - opt.resume = model_artifact_name - elif self.wandb: - self.wandb_run = wandb.init(config=opt, - resume="allow", - project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, - entity=opt.entity, - name=opt.name if opt.name != 'exp' else None, - job_type=job_type, - id=run_id, - allow_val_change=True) if not wandb.run else wandb.run - if self.wandb_run: - if self.job_type == 'Training': - if opt.upload_dataset: - if not opt.resume: - self.wandb_artifact_data_dict = self.check_and_upload_dataset(opt) - - if isinstance(opt.data, dict): - # This means another dataset manager has already processed the dataset info (e.g. ClearML) - # and they will have stored the already processed dict in opt.data - self.data_dict = opt.data - elif opt.resume: - # resume from artifact - if isinstance(opt.resume, str) and opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - self.data_dict = dict(self.wandb_run.config.data_dict) - else: # local resume - self.data_dict = check_wandb_dataset(opt.data) - else: - self.data_dict = check_wandb_dataset(opt.data) - self.wandb_artifact_data_dict = self.wandb_artifact_data_dict or self.data_dict - - # write data_dict to config. useful for resuming from artifacts. Do this only when not resuming. - self.wandb_run.config.update({'data_dict': self.wandb_artifact_data_dict}, allow_val_change=True) - self.setup_training(opt) - - if self.job_type == 'Dataset Creation': - self.wandb_run.config.update({"upload_dataset": True}) - self.data_dict = self.check_and_upload_dataset(opt) - - def check_and_upload_dataset(self, opt): - """ - Check if the dataset format is compatible and upload it as W&B artifact - - arguments: - opt (namespace)-- Commandline arguments for current run - - returns: - Updated dataset info dictionary where local dataset paths are replaced by WAND_ARFACT_PREFIX links. - """ - assert wandb, 'Install wandb to upload dataset' - config_path = self.log_dataset_artifact(opt.data, opt.single_cls, - 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) - with open(config_path, errors='ignore') as f: - wandb_data_dict = yaml.safe_load(f) - return wandb_data_dict - - def setup_training(self, opt): - """ - Setup the necessary processes for training YOLO models: - - Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX - - Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded - - Setup log_dict, initialize bbox_interval - - arguments: - opt (namespace) -- commandline arguments for this run - - """ - self.log_dict, self.current_epoch = {}, 0 - self.bbox_interval = opt.bbox_interval - if isinstance(opt.resume, str): - modeldir, _ = self.download_model_artifact(opt) - if modeldir: - self.weights = Path(modeldir) / "last.pt" - config = self.wandb_run.config - opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp, opt.imgsz = str( - self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs,\ - config.hyp, config.imgsz - data_dict = self.data_dict - if self.val_artifact is None: # If --upload_dataset is set, use the existing artifact, don't download - self.train_artifact_path, self.train_artifact = self.download_dataset_artifact( - data_dict.get('train'), opt.artifact_alias) - self.val_artifact_path, self.val_artifact = self.download_dataset_artifact( - data_dict.get('val'), opt.artifact_alias) - - if self.train_artifact_path is not None: - train_path = Path(self.train_artifact_path) / 'data/images/' - data_dict['train'] = str(train_path) - if self.val_artifact_path is not None: - val_path = Path(self.val_artifact_path) / 'data/images/' - data_dict['val'] = str(val_path) - - if self.val_artifact is not None: - self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") - columns = ["epoch", "id", "ground truth", "prediction"] - columns.extend(self.data_dict['names']) - self.result_table = wandb.Table(columns) - self.val_table = self.val_artifact.get("val") - if self.val_table_path_map is None: - self.map_val_table_path() - if opt.bbox_interval == -1: - self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1 - if opt.evolve or opt.noplots: - self.bbox_interval = opt.bbox_interval = opt.epochs + 1 # disable bbox_interval - train_from_artifact = self.train_artifact_path is not None and self.val_artifact_path is not None - # Update the the data_dict to point to local artifacts dir - if train_from_artifact: - self.data_dict = data_dict - - def download_dataset_artifact(self, path, alias): - """ - download the model checkpoint artifact if the path starts with WANDB_ARTIFACT_PREFIX - - arguments: - path -- path of the dataset to be used for training - alias (str)-- alias of the artifact to be download/used for training - - returns: - (str, wandb.Artifact) -- path of the downladed dataset and it's corresponding artifact object if dataset - is found otherwise returns (None, None) - """ - if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX): - artifact_path = Path(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) - dataset_artifact = wandb.use_artifact(artifact_path.as_posix().replace("\\", "/")) - assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'" - datadir = dataset_artifact.download() - return datadir, dataset_artifact - return None, None - - def download_model_artifact(self, opt): - """ - download the model checkpoint artifact if the resume path starts with WANDB_ARTIFACT_PREFIX - - arguments: - opt (namespace) -- Commandline arguments for this run - """ - if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest") - assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist' - modeldir = model_artifact.download() - # epochs_trained = model_artifact.metadata.get('epochs_trained') - total_epochs = model_artifact.metadata.get('total_epochs') - is_finished = total_epochs is None - assert not is_finished, 'training is finished, can only resume incomplete runs.' - return modeldir, model_artifact - return None, None - - def log_model(self, path, opt, epoch, fitness_score, best_model=False): - """ - Log the model checkpoint as W&B artifact - - arguments: - path (Path) -- Path of directory containing the checkpoints - opt (namespace) -- Command line arguments for this run - epoch (int) -- Current epoch number - fitness_score (float) -- fitness score for current epoch - best_model (boolean) -- Boolean representing if the current checkpoint is the best yet. - """ - model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', - type='model', - metadata={ - 'original_url': str(path), - 'epochs_trained': epoch + 1, - 'save period': opt.save_period, - 'project': opt.project, - 'total_epochs': opt.epochs, - 'fitness_score': fitness_score}) - model_artifact.add_file(str(path / 'last.pt'), name='last.pt') - wandb.log_artifact(model_artifact, - aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), 'best' if best_model else '']) - LOGGER.info(f"Saving model artifact on epoch {epoch + 1}") - - def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): - """ - Log the dataset as W&B artifact and return the new data file with W&B links - - arguments: - data_file (str) -- the .yaml file with information about the dataset like - path, classes etc. - single_class (boolean) -- train multi-class data as single-class - project (str) -- project name. Used to construct the artifact path - overwrite_config (boolean) -- overwrites the data.yaml file if set to true otherwise creates a new - file with _wandb postfix. Eg -> data_wandb.yaml - - returns: - the new .yaml file with artifact links. it can be used to start training directly from artifacts - """ - upload_dataset = self.wandb_run.config.upload_dataset - log_val_only = isinstance(upload_dataset, str) and upload_dataset == 'val' - self.data_dict = check_dataset(data_file) # parse and check - data = dict(self.data_dict) - nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names']) - names = {k: v for k, v in enumerate(names)} # to index dictionary - - # log train set - if not log_val_only: - self.train_artifact = self.create_dataset_table(LoadImagesAndLabels(data['train'], rect=True, batch_size=1), - names, - name='train') if data.get('train') else None - if data.get('train'): - data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train') - - self.val_artifact = self.create_dataset_table( - LoadImagesAndLabels(data['val'], rect=True, batch_size=1), names, name='val') if data.get('val') else None - if data.get('val'): - data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val') - - path = Path(data_file) - # create a _wandb.yaml file with artifacts links if both train and test set are logged - if not log_val_only: - path = (path.stem if overwrite_config else path.stem + '_wandb') + '.yaml' # updated data.yaml path - path = ROOT / 'data' / path - data.pop('download', None) - data.pop('path', None) - with open(path, 'w') as f: - yaml.safe_dump(data, f) - LOGGER.info(f"Created dataset config file {path}") - - if self.job_type == 'Training': # builds correct artifact pipeline graph - if not log_val_only: - self.wandb_run.log_artifact( - self.train_artifact) # calling use_artifact downloads the dataset. NOT NEEDED! - self.wandb_run.use_artifact(self.val_artifact) - self.val_artifact.wait() - self.val_table = self.val_artifact.get('val') - self.map_val_table_path() - else: - self.wandb_run.log_artifact(self.train_artifact) - self.wandb_run.log_artifact(self.val_artifact) - return path - - def map_val_table_path(self): - """ - Map the validation dataset Table like name of file -> it's id in the W&B Table. - Useful for - referencing artifacts for evaluation. - """ - self.val_table_path_map = {} - LOGGER.info("Mapping dataset") - for i, data in enumerate(tqdm(self.val_table.data)): - self.val_table_path_map[data[3]] = data[0] - - def create_dataset_table(self, dataset: LoadImagesAndLabels, class_to_id: Dict[int, str], name: str = 'dataset'): - """ - Create and return W&B artifact containing W&B Table of the dataset. - - arguments: - dataset -- instance of LoadImagesAndLabels class used to iterate over the data to build Table - class_to_id -- hash map that maps class ids to labels - name -- name of the artifact - - returns: - dataset artifact to be logged or used - """ - # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging - artifact = wandb.Artifact(name=name, type="dataset") - img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None - img_files = tqdm(dataset.im_files) if not img_files else img_files - for img_file in img_files: - if Path(img_file).is_dir(): - artifact.add_dir(img_file, name='data/images') - labels_path = 'labels'.join(dataset.path.rsplit('images', 1)) - artifact.add_dir(labels_path, name='data/labels') - else: - artifact.add_file(img_file, name='data/images/' + Path(img_file).name) - label_file = Path(img2label_paths([img_file])[0]) - artifact.add_file(str(label_file), name='data/labels/' + - label_file.name) if label_file.exists() else None - table = wandb.Table(columns=["id", "train_image", "Classes", "name"]) - class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()]) - for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)): - box_data, img_classes = [], {} - for cls, *xywh in labels[:, 1:].tolist(): - cls = int(cls) - box_data.append({ - "position": { - "middle": [xywh[0], xywh[1]], - "width": xywh[2], - "height": xywh[3]}, - "class_id": cls, - "box_caption": "%s" % (class_to_id[cls])}) - img_classes[cls] = class_to_id[cls] - boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space - table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), list(img_classes.values()), - Path(paths).name) - artifact.add(table, name) - return artifact - - def log_training_progress(self, predn, path, names): - """ - Build evaluation Table. Uses reference from validation dataset table. - - arguments: - predn (list): list of predictions in the native space in the format - [xmin, ymin, xmax, ymax, confidence, class] - path (str): local path of the current evaluation image - names (dict(int, str)): hash map that maps class ids to labels - """ - class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) - box_data = [] - avg_conf_per_class = [0] * len(self.data_dict['names']) - pred_class_count = {} - for *xyxy, conf, cls in predn.tolist(): - if conf >= 0.25: - cls = int(cls) - box_data.append({ - "position": { - "minX": xyxy[0], - "minY": xyxy[1], - "maxX": xyxy[2], - "maxY": xyxy[3]}, - "class_id": cls, - "box_caption": f"{names[cls]} {conf:.3f}", - "scores": { - "class_score": conf}, - "domain": "pixel"}) - avg_conf_per_class[cls] += conf - - if cls in pred_class_count: - pred_class_count[cls] += 1 - else: - pred_class_count[cls] = 1 - - for pred_class in pred_class_count.keys(): - avg_conf_per_class[pred_class] = avg_conf_per_class[pred_class] / pred_class_count[pred_class] - - boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space - id = self.val_table_path_map[Path(path).name] - self.result_table.add_data(self.current_epoch, id, self.val_table.data[id][1], - wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set), - *avg_conf_per_class) - - def val_one_image(self, pred, predn, path, names, im): - """ - Log validation data for one image. updates the result Table if validation dataset is uploaded and log bbox media panel - - arguments: - pred (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class] - predn (list): list of predictions in the native space - [xmin, ymin, xmax, ymax, confidence, class] - path (str): local path of the current evaluation image - """ - if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact - self.log_training_progress(predn, path, names) - - if len(self.bbox_media_panel_images) < self.max_imgs_to_log and self.current_epoch > 0: - if self.current_epoch % self.bbox_interval == 0: - box_data = [{ - "position": { - "minX": xyxy[0], - "minY": xyxy[1], - "maxX": xyxy[2], - "maxY": xyxy[3]}, - "class_id": int(cls), - "box_caption": f"{names[int(cls)]} {conf:.3f}", - "scores": { - "class_score": conf}, - "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] - boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space - self.bbox_media_panel_images.append(wandb.Image(im, boxes=boxes, caption=path.name)) - - def log(self, log_dict): - """ - save the metrics to the logging dictionary - - arguments: - log_dict (Dict) -- metrics/media to be logged in current step - """ - if self.wandb_run: - for key, value in log_dict.items(): - self.log_dict[key] = value - - def end_epoch(self, best_result=False): - """ - commit the log_dict, model artifacts and Tables to W&B and flush the log_dict. - - arguments: - best_result (boolean): Boolean representing if the result of this evaluation is best or not - """ - if self.wandb_run: - with all_logging_disabled(): - if self.bbox_media_panel_images: - self.log_dict["BoundingBoxDebugger"] = self.bbox_media_panel_images - try: - wandb.log(self.log_dict) - except BaseException as e: - LOGGER.info( - f"An error occurred in wandb logger. The training will proceed without interruption. More info\n{e}" - ) - self.wandb_run.finish() - self.wandb_run = None - - self.log_dict = {} - self.bbox_media_panel_images = [] - if self.result_artifact: - self.result_artifact.add(self.result_table, 'result') - wandb.log_artifact(self.result_artifact, - aliases=[ - 'latest', 'last', 'epoch ' + str(self.current_epoch), - ('best' if best_result else '')]) - - wandb.log({"evaluation": self.result_table}) - columns = ["epoch", "id", "ground truth", "prediction"] - columns.extend(self.data_dict['names']) - self.result_table = wandb.Table(columns) - self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") - - def finish_run(self): - """ - Log metrics if any and finish the current W&B run - """ - if self.wandb_run: - if self.log_dict: - with all_logging_disabled(): - wandb.log(self.log_dict) - wandb.run.finish() - - -@contextmanager -def all_logging_disabled(highest_level=logging.CRITICAL): - """ source - https://gist.github.com/simon-weber/7853144 - A context manager that will prevent any logging messages triggered during the body from being processed. - :param highest_level: the maximum logging level in use. - This would only need to be changed if a custom level greater than CRITICAL is defined. - """ - previous_level = logging.root.manager.disable - logging.disable(highest_level) - try: - yield - finally: - logging.disable(previous_level) diff --git a/utils/loss.py b/utils/loss.py index 9b9c3d9f8018..05f840ca5270 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -6,8 +6,8 @@ import torch import torch.nn as nn -from utils.metrics import bbox_iou -from utils.torch_utils import de_parallel +from .metrics import bbox_iou +from .torch_utils import de_parallel def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 diff --git a/utils/metrics.py b/utils/metrics.py index 7fb077774384..db697846ccc0 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -11,7 +11,7 @@ import numpy as np import torch -from utils import TryExcept, threaded +from . import TryExcept, threaded def fitness(x): diff --git a/utils/plots.py b/utils/plots.py index f84aed9fb5c7..7654b9beb174 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -19,11 +19,11 @@ import torch from PIL import Image, ImageDraw, ImageFont -from utils import TryExcept, threaded -from utils.general import (CONFIG_DIR, FONT, LOGGER, check_font, check_requirements, clip_boxes, increment_path, +from . import TryExcept, threaded +from .general import (CONFIG_DIR, FONT, LOGGER, check_font, check_requirements, clip_boxes, increment_path, is_ascii, xywh2xyxy, xyxy2xywh) -from utils.metrics import fitness -from utils.segment.general import scale_image +from .metrics import fitness +from .segment.general import scale_image # Settings RANK = int(os.getenv('RANK', -1)) @@ -431,7 +431,7 @@ def plot_labels(labels, names=(), save_dir=Path('')): def imshow_cls(im, labels=None, pred=None, names=None, nmax=25, verbose=False, f=Path('images.jpg')): # Show classification image grid with labels (optional) and predictions (optional) - from utils.augmentations import denormalize + from .augmentations import denormalize names = names or [f'class{i}' for i in range(1000)] blocks = torch.chunk(denormalize(im.clone()).cpu().float(), len(im), diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 77549b005ceb..dd051b2a5e53 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -19,7 +19,7 @@ import torch.nn.functional as F from torch.nn.parallel import DistributedDataParallel as DDP -from utils.general import LOGGER, check_version, colorstr, file_date, git_describe +from .general import LOGGER, check_version, colorstr, file_date, git_describe LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) diff --git a/val.py b/val.py deleted file mode 100644 index 599aa1afdd4a..000000000000 --- a/val.py +++ /dev/null @@ -1,408 +0,0 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license -""" -Validate a trained YOLOv5 detection model on a detection dataset - -Usage: - $ python val.py --weights yolov5s.pt --data coco128.yaml --img 640 - -Usage - formats: - $ python val.py --weights yolov5s.pt # PyTorch - yolov5s.torchscript # TorchScript - yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s_openvino_model # OpenVINO - yolov5s.engine # TensorRT - yolov5s.mlmodel # CoreML (macOS-only) - yolov5s_saved_model # TensorFlow SavedModel - yolov5s.pb # TensorFlow GraphDef - yolov5s.tflite # TensorFlow Lite - yolov5s_edgetpu.tflite # TensorFlow Edge TPU - yolov5s_paddle_model # PaddlePaddle -""" - -import argparse -import json -import os -import sys -from pathlib import Path - -import numpy as np -import torch -from tqdm import tqdm - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[0] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative - -from models.common import DetectMultiBackend -from utils.callbacks import Callbacks -from utils.dataloaders import create_dataloader -from utils.general import (LOGGER, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, check_requirements, - check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, - print_args, scale_boxes, xywh2xyxy, xyxy2xywh) -from utils.metrics import ConfusionMatrix, ap_per_class, box_iou -from utils.plots import output_to_target, plot_images, plot_val_study -from utils.torch_utils import select_device, smart_inference_mode - - -def save_one_txt(predn, save_conf, shape, file): - # Save one txt result - gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh - for *xyxy, conf, cls in predn.tolist(): - xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh - line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format - with open(file, 'a') as f: - f.write(('%g ' * len(line)).rstrip() % line + '\n') - - -def save_one_json(predn, jdict, path, class_map): - # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} - image_id = int(path.stem) if path.stem.isnumeric() else path.stem - box = xyxy2xywh(predn[:, :4]) # xywh - box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner - for p, b in zip(predn.tolist(), box.tolist()): - jdict.append({ - 'image_id': image_id, - 'category_id': class_map[int(p[5])], - 'bbox': [round(x, 3) for x in b], - 'score': round(p[4], 5)}) - - -def process_batch(detections, labels, iouv): - """ - Return correct prediction matrix - Arguments: - detections (array[N, 6]), x1, y1, x2, y2, conf, class - labels (array[M, 5]), class, x1, y1, x2, y2 - Returns: - correct (array[N, 10]), for 10 IoU levels - """ - correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) - iou = box_iou(labels[:, 1:], detections[:, :4]) - correct_class = labels[:, 0:1] == detections[:, 5] - for i in range(len(iouv)): - x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match - if x[0].shape[0]: - matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou] - if x[0].shape[0] > 1: - matches = matches[matches[:, 2].argsort()[::-1]] - matches = matches[np.unique(matches[:, 1], return_index=True)[1]] - # matches = matches[matches[:, 2].argsort()[::-1]] - matches = matches[np.unique(matches[:, 0], return_index=True)[1]] - correct[matches[:, 1].astype(int), i] = True - return torch.tensor(correct, dtype=torch.bool, device=iouv.device) - - -@smart_inference_mode() -def run( - data, - weights=None, # model.pt path(s) - batch_size=32, # batch size - imgsz=640, # inference size (pixels) - conf_thres=0.001, # confidence threshold - iou_thres=0.6, # NMS IoU threshold - max_det=300, # maximum detections per image - task='val', # train, val, test, speed or study - device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu - workers=8, # max dataloader workers (per RANK in DDP mode) - single_cls=False, # treat as single-class dataset - augment=False, # augmented inference - verbose=False, # verbose output - save_txt=False, # save results to *.txt - save_hybrid=False, # save label+prediction hybrid results to *.txt - save_conf=False, # save confidences in --save-txt labels - save_json=False, # save a COCO-JSON results file - project=ROOT / 'runs/val', # save to project/name - name='exp', # save to project/name - exist_ok=False, # existing project/name ok, do not increment - half=True, # use FP16 half-precision inference - dnn=False, # use OpenCV DNN for ONNX inference - model=None, - dataloader=None, - save_dir=Path(''), - plots=True, - callbacks=Callbacks(), - compute_loss=None, -): - # Initialize/load model and set device - training = model is not None - if training: # called by train.py - device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model - half &= device.type != 'cpu' # half precision only supported on CUDA - model.half() if half else model.float() - else: # called directly - device = select_device(device, batch_size=batch_size) - - # Directories - save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run - (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir - - # Load model - model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) - stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine - imgsz = check_img_size(imgsz, s=stride) # check image size - half = model.fp16 # FP16 supported on limited backends with CUDA - if engine: - batch_size = model.batch_size - else: - device = model.device - if not (pt or jit): - batch_size = 1 # export.py models default to batch-size 1 - LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') - - # Data - data = check_dataset(data) # check - - # Configure - model.eval() - cuda = device.type != 'cpu' - is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset - nc = 1 if single_cls else int(data['nc']) # number of classes - iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95 - niou = iouv.numel() - - # Dataloader - if not training: - if pt and not single_cls: # check --weights are trained on --data - ncm = model.model.nc - assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ - f'classes). Pass correct combination of --weights and --data that are trained together.' - model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup - pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks - task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images - dataloader = create_dataloader(data[task], - imgsz, - batch_size, - stride, - single_cls, - pad=pad, - rect=rect, - workers=workers, - prefix=colorstr(f'{task}: '))[0] - - seen = 0 - confusion_matrix = ConfusionMatrix(nc=nc) - names = model.names if hasattr(model, 'names') else model.module.names # get class names - if isinstance(names, (list, tuple)): # old format - names = dict(enumerate(names)) - class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) - s = ('%22s' + '%11s' * 6) % ('Class', 'Images', 'Instances', 'P', 'R', 'mAP50', 'mAP50-95') - tp, fp, p, r, f1, mp, mr, map50, ap50, map = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 - dt = Profile(), Profile(), Profile() # profiling times - loss = torch.zeros(3, device=device) - jdict, stats, ap, ap_class = [], [], [], [] - callbacks.run('on_val_start') - pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar - for batch_i, (im, targets, paths, shapes) in enumerate(pbar): - callbacks.run('on_val_batch_start') - with dt[0]: - if cuda: - im = im.to(device, non_blocking=True) - targets = targets.to(device) - im = im.half() if half else im.float() # uint8 to fp16/32 - im /= 255 # 0 - 255 to 0.0 - 1.0 - nb, _, height, width = im.shape # batch size, channels, height, width - - # Inference - with dt[1]: - preds, train_out = model(im) if compute_loss else (model(im, augment=augment), None) - - # Loss - if compute_loss: - loss += compute_loss(train_out, targets)[1] # box, obj, cls - - # NMS - targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels - lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling - with dt[2]: - preds = non_max_suppression(preds, - conf_thres, - iou_thres, - labels=lb, - multi_label=True, - agnostic=single_cls, - max_det=max_det) - - # Metrics - for si, pred in enumerate(preds): - labels = targets[targets[:, 0] == si, 1:] - nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions - path, shape = Path(paths[si]), shapes[si][0] - correct = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init - seen += 1 - - if npr == 0: - if nl: - stats.append((correct, *torch.zeros((2, 0), device=device), labels[:, 0])) - if plots: - confusion_matrix.process_batch(detections=None, labels=labels[:, 0]) - continue - - # Predictions - if single_cls: - pred[:, 5] = 0 - predn = pred.clone() - scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred - - # Evaluate - if nl: - tbox = xywh2xyxy(labels[:, 1:5]) # target boxes - scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels - labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels - correct = process_batch(predn, labelsn, iouv) - if plots: - confusion_matrix.process_batch(predn, labelsn) - stats.append((correct, pred[:, 4], pred[:, 5], labels[:, 0])) # (correct, conf, pcls, tcls) - - # Save/log - if save_txt: - save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') - if save_json: - save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary - callbacks.run('on_val_image_end', pred, predn, path, names, im[si]) - - # Plot images - if plots and batch_i < 3: - plot_images(im, targets, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names) # labels - plot_images(im, output_to_target(preds), paths, save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred - - callbacks.run('on_val_batch_end', batch_i, im, targets, paths, shapes, preds) - - # Compute metrics - stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy - if len(stats) and stats[0].any(): - tp, fp, p, r, f1, ap, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names) - ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95 - mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean() - nt = np.bincount(stats[3].astype(int), minlength=nc) # number of targets per class - - # Print results - pf = '%22s' + '%11i' * 2 + '%11.3g' * 4 # print format - LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) - if nt.sum() == 0: - LOGGER.warning(f'WARNING โš ๏ธ no labels found in {task} set, can not compute metrics without labels') - - # Print results per class - if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): - for i, c in enumerate(ap_class): - LOGGER.info(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i])) - - # Print speeds - t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image - if not training: - shape = (batch_size, 3, imgsz, imgsz) - LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) - - # Plots - if plots: - confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) - callbacks.run('on_val_end', nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) - - # Save JSON - if save_json and len(jdict): - w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights - anno_json = str(Path('../datasets/coco/annotations/instances_val2017.json')) # annotations - pred_json = str(save_dir / f"{w}_predictions.json") # predictions - LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') - with open(pred_json, 'w') as f: - json.dump(jdict, f) - - try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb - check_requirements('pycocotools>=2.0.6') - from pycocotools.coco import COCO - from pycocotools.cocoeval import COCOeval - - anno = COCO(anno_json) # init annotations api - pred = anno.loadRes(pred_json) # init predictions api - eval = COCOeval(anno, pred, 'bbox') - if is_coco: - eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # image IDs to evaluate - eval.evaluate() - eval.accumulate() - eval.summarize() - map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5) - except Exception as e: - LOGGER.info(f'pycocotools unable to run: {e}') - - # Return results - model.float() # for training - if not training: - s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' - LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") - maps = np.zeros(nc) + map - for i, c in enumerate(ap_class): - maps[c] = ap[i] - return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t - - -def parse_opt(): - parser = argparse.ArgumentParser() - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path(s)') - parser.add_argument('--batch-size', type=int, default=32, help='batch size') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') - parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold') - parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold') - parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image') - parser.add_argument('--task', default='val', help='train, val, test, speed or study') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') - parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') - parser.add_argument('--augment', action='store_true', help='augmented inference') - parser.add_argument('--verbose', action='store_true', help='report mAP by class') - parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') - parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') - parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') - parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file') - parser.add_argument('--project', default=ROOT / 'runs/val', help='save to project/name') - parser.add_argument('--name', default='exp', help='save to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') - parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') - opt = parser.parse_args() - opt.data = check_yaml(opt.data) # check YAML - opt.save_json |= opt.data.endswith('coco.yaml') - opt.save_txt |= opt.save_hybrid - print_args(vars(opt)) - return opt - - -def main(opt): - check_requirements(exclude=('tensorboard', 'thop')) - - if opt.task in ('train', 'val', 'test'): # run normally - if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466 - LOGGER.info(f'WARNING โš ๏ธ confidence threshold {opt.conf_thres} > 0.001 produces invalid results') - if opt.save_hybrid: - LOGGER.info('WARNING โš ๏ธ --save-hybrid will return high mAP from hybrid labels, not from predictions alone') - run(**vars(opt)) - - else: - weights = opt.weights if isinstance(opt.weights, list) else [opt.weights] - opt.half = torch.cuda.is_available() and opt.device != 'cpu' # FP16 for fastest results - if opt.task == 'speed': # speed benchmarks - # python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt... - opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False - for opt.weights in weights: - run(**vars(opt), plots=False) - - elif opt.task == 'study': # speed vs mAP benchmarks - # python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt... - for opt.weights in weights: - f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to - x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis - for opt.imgsz in x: # img-size - LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...') - r, _, t = run(**vars(opt), plots=False) - y.append(r + t) # results and times - np.savetxt(f, y, fmt='%10.4g') # save - os.system('zip -r study.zip study_*.txt') - plot_val_study(x=x) # plot - else: - raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")') - - -if __name__ == "__main__": - opt = parse_opt() - main(opt)