diff --git a/3dtools/.gitignore b/3dtools/.gitignore deleted file mode 100644 index 9164704..0000000 --- a/3dtools/.gitignore +++ /dev/null @@ -1 +0,0 @@ -yolov5 \ No newline at end of file diff --git a/3dtools/README.md b/3dtools/README.md index 5d2e3f7..2bfbdc8 100644 --- a/3dtools/README.md +++ b/3dtools/README.md @@ -6,32 +6,30 @@ This is a user guide for 'OGrEE-Tools/3dtools', a powerful program that can auto | Name | method | | ------ | ------ | -| idrac | template matching | -| usb | rectangle detect | -| d-sub female/vga | image's feature points | -| d-sub male/rs232 | image's feature points | -| slot normal | YOLOV5 object detection | -| slot lp | YOLOV5 object detection | -| disk lff | YOLOV5 object detection | -| disk sff | YOLOV5 object detection | -| power supply unit | YOLOV5 object detection | +| BMC interface | YOLOv8 object detection | +| USB port | YOLOv8 object detection | +| VGA port | YOLOv8 object detection | +| Serial port | YOLOv8 object detection | +| Slot normal | YOLOv8 object detection | +| Slot lp | YOLOv8 object detection | +| Disk lff | YOLOv8 object detection | +| Disk sff | YOLOv8 object detection | +| PSU | YOLOv8 object detection | ## Requirements -The minimum Python version required to use 3dtools is Python version 3.7. - -YOLOV5 needs to be cloned from [YOLOV5 official page](https://github.com/ultralytics/yolov5#tutorials) in the `3dtools` directory. +The minimum Python version required to use 3dtools is Python version 3.6. Package infomation in [requirements.txt](requirements.txt) ## Setup -A setup script for 3dtools is provided under `/.../OGrEE-Tools/3dtools/setup.sh`. To use it, run: +To use `OGrEE-Tools/3dtools`, run the following commands to clone the repository and install the dependencies: ```sh git clone https://github.com/ditrit/OGrEE-Tools.git cd OGrEE-Tools/3dtools -./setup.sh +pip3 install -r ./requirements.txt ``` ## Introduction @@ -68,24 +66,21 @@ Run `python3 main.py --gui` to interact with the Graphic User Interface (GUI). You can also set aditional parameters to control the algorithm's performance. > - # YOLOV5 hyparameters - --weights : string, model path or triton URL, if you don't want to change the YOLOV5 model, don't use it; - --conf-thres : float, default=0.5, confidence threshold, YOLOV5 will filter the results below it; - --iou-thres : float, default=0.45, 'NMS IoU threshold'; + # YOLOv8 hyparameters + --model : string, model path or triton URL, if you don't want to change the YOLOv8 model, don't use it; + --conf : float, default=0.5, confidence threshold, YOLOv8 will filter the results below it; + --iou : float, default=0.45, 'NMS IoU threshold'; --device : default=cuda device, i.e. 0 or 0,1,2,3 or cpu; - --view-img : if provided, show results on screen; + --augment : if provided, augmented inference; + --show : if provided, show results on screen; + --save : if provided, save detection results; --save-txt : if provided, save results to *.txt; --save-conf : if provided, save confidences in --save-txt labels; --save-crop : if provided, save cropped prediction boxes; - --nosave : if provided, won't save images'; - --augment : if provided, augmented inference; - --visualize : if provided, visualize features; - --project : default='ROOT/detect', save results to project/name; - --name : default='exp', save results to 'project/name'; - --exist-ok : if provided, an existing project/name will be overwritten; - --line-thickness : int, default=1, bounding box thickness (pixels); - --hide-labels : default=False, if provided, hide labels; - --hide-conf : default=False, if provided, hide confidences. + --show-labels : if provided, show labels; + --show-conf : if provided, show confidences; + --show-boxes : if provided, show bounding boxes; + --line-width : bounding box line width (pixels). > ### Running the CLI @@ -102,32 +97,28 @@ python3 main.py --servername image/serveur/dell-poweredge-r720xd.rear.png --heig The user will be prompted with the following message: ```sh -class list: {'d-sub female': '11', 'd-sub male': '12', 'idrac': '13', 'usb': '14', 'all': '15'} -or enter the name 'slot', 'disk', 'source'(without '') -Please input one by one. Enter 'finish' to output the json -----Enter component name or code: +Choose a component to detect. +Available commands: 'All', 'BMC', 'Disk_lff', 'Disk_sff', 'Disks', 'PSU', 'Serial', 'Slot_lp', 'Slot_normal', 'Slots', 'USB', 'VGA' +Enter 'finish' to output the JSON. + +Command = ... ``` -Enter the wished name or code to start the detection. +Enter the wished component name to start detecting. Examples: ```sh -> d-sub female -> idrac -> 14 -> 15 -> slot +> BMC +> VGA +> Serial +> Slots ``` Results are printed, showing all detected components in the format `xxx in [x, y, angle, similarity]`. ```sh -start detecting d-sub female -0° searching progress: 100%: ▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋ - -90° searching progress: 100%: ▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋▋ - -vga in : [[640.0, 860.0, 0.0, 0.7698690075499673]] +Detecting VGA... + - VGA in : [[640.0, 860.0, 0.0, 0.7698690075499673]] ``` **Attention:** the type of input code is *string*. If we want transform it into a interface connect with other program, the command should also be *string*, not int. @@ -210,44 +201,32 @@ Another idrac template was captured from model *cisco-c240-m6-lff.rear.png*, and ## Classifiers in *Classifier* class: -### clidrac: - -Based on template matching, the program will use the standard image of an idrac component to compare each slice of image, and calculate the similarity. Then, a local_peak function is applied to find out the best position to pass the threshold, set as 0.45. - -### clvgars232: - -This classifier is designed to find vga or rs232 at the same time, since they have the same shape. The method applied is the 'CENSURE algorithm', that processes out desired image features, which are pin positions in our case. - -### clusb: - -This classifier is designed to find usb components in the image. We use the same template matching method to find out where the components are, giving out possible points in the image. Usb components have a rectangle edge; a straight line detection function to find suspected rectangles of the same dimension in the image. Lastly, an vector geometric calculation will decide whether the points are in this rectangle, i.e. if the points belong to the usb components we are searching for. - ### dl_addComponents: -This classifier is designed to find slots, disks and PSUs, and use the foreign YOLOV5 method for detection. - -The `api` file of YOLOV5 is under `/.../OGrEE-Tools/3dtools/api/yoloapi.py`. It is similar to the one under `/.../OGrEE-Tools/3dtools/yolov5/detect.py`, but a bit simpler and with different coordinates treatment. Some unnecessary parameters and pieces of code have been moved. +This classifier is designed to find all components using the foreign YOLOv8 method for detection. -The user can detct all the slots, disks and PSUs with command `all` or with code `15`. These components can also be detected individually. +The user can detct all components at once by typing command `All`. These components can also be detected individually. | Command | Components detected | | ------ | ------ | -| all | slot normal, slot lp, disk sff, disk lff, psu | -| disk | disk lff, disk sff | -| slot | slot normal, slot lp | -| disk_sff | disk sff | -| disk_lff | disk lff | -| slot_normal | slot normal | -| slot_lp | slot lp | -| source | power supply unit | - -The detection results of YOLOV5 will be saved under `/.../OGrEE-Tools/3dtools/detect/exp x/`; the user can check the output there. Remember to clean the `detect` folder regularly, so it doesn't take up much memory. +| All | Slot normal, Slot lp, Disk sff, Disk lff, psu | +| BMC | BMC | +| Disks | Disk lff, Disk sff | +| Disk_sff | Disk sff | +| Disk_lff | Disk lff | +| Slots | Slot normal, Slot lp | +| Slot_lp | Slot lp | +| Slot_normal | Slot normal | +| PSU | PSU | +| Serial | Serial | +| USB | USB | +| VGA | VGA | #### Notes: - The unit dimension of power supply units differ among manufacturers. A data base has to be created to account for this information; -- A spreadsheet with the shape of the server is stored under `/.../OGrEE-Tools/3dtools/image/name_list.xlsx`. Use with caution, as some data might be incorrect; +- A spreadsheet with the shapes of servers is stored under `/.../OGrEE-Tools/3dtools/image/name_list.xlsx`. Use with caution, as some data might be incorrect; - A very common error is to inverse the x and y axis, because the indexes in different libraries are not the same; some use (vertical, horizontal), while others use (horizontal, vertical). For the further programming, check the axis order when the classifier finds a component at the wrong position but with high similarity, or when the component position is out of the picture. The user can trust that the present version works properly. diff --git a/3dtools/YOLOVcfg/serveur122.yaml b/3dtools/YOLOVcfg/serveur122.yaml deleted file mode 100644 index 6367691..0000000 --- a/3dtools/YOLOVcfg/serveur122.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license -# COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics -# Example usage: python train.py --data coco128.yaml -# parent -# ├── yolov5 -# └── datasets -# └── coco128 ← downloads here (7 MB) - - -# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: D:/Work/OGREE/image/YOLO_serveur/train/ # dataset root dir -train: D:/Work/OGREE/image/YOLO_serveur/train/images -val: D:/Work/OGREE/image/YOLO_serveur/train/images -test: # test images (optional) - -# Classes -names: - 0: slot_normal - 1: slot_lp - 2: disk_lff - 3: disk_sff - 4: PSU - - -# Download script/URL (optional) -download: https://ultralytics.com/assets/coco128.zip diff --git a/3dtools/YOLOVcfg/serveur_yolov5s.yaml b/3dtools/YOLOVcfg/serveur_yolov5s.yaml deleted file mode 100644 index 8c330b7..0000000 --- a/3dtools/YOLOVcfg/serveur_yolov5s.yaml +++ /dev/null @@ -1,48 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license - -# Parameters -nc: 5 # number of classes -depth_multiple: 0.33 # model depth multiple -width_multiple: 0.50 # layer channel multiple -anchors: - - [10,13, 16,30, 33,23] # P3/8 - - [30,61, 62,45, 59,119] # P4/16 - - [116,90, 156,198, 373,326] # P5/32 - -# YOLOv5 v6.0 backbone -backbone: - # [from, number, module, args] - [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 - [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 - [-1, 3, C3, [128]], - [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 6, C3, [256]], - [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 - [-1, 9, C3, [512]], - [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 - [-1, 3, C3, [1024]], - [-1, 1, SPPF, [1024, 5]], # 9 - ] - -# YOLOv5 v6.0 head -head: - [[-1, 1, Conv, [512, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 6], 1, Concat, [1]], # cat backbone P4 - [-1, 3, C3, [512, False]], # 13 - - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 4], 1, Concat, [1]], # cat backbone P3 - [-1, 3, C3, [256, False]], # 17 (P3/8-small) - - [-1, 1, Conv, [256, 3, 2]], - [[-1, 14], 1, Concat, [1]], # cat head P4 - [-1, 3, C3, [512, False]], # 20 (P4/16-medium) - - [-1, 1, Conv, [512, 3, 2]], - [[-1, 10], 1, Concat, [1]], # cat head P5 - [-1, 3, C3, [1024, False]], # 23 (P5/32-large) - - [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) - ] diff --git a/3dtools/YOLOVcfg/yolov5_guid.txt b/3dtools/YOLOVcfg/yolov5_guid.txt deleted file mode 100644 index 6543432..0000000 --- a/3dtools/YOLOVcfg/yolov5_guid.txt +++ /dev/null @@ -1,4 +0,0 @@ -This file is used to config the YOLOV5 model. -step 1: copy "serveur122.yaml", paste under /yolov5/data. This is the setting of our dataset. -step 2: Copy "serveur_yolov5s.yaml", paste under /yolov5/models. This is the config of yolov5s model in the case of Ogree. -Then we can use the YOLOV5 model in our code. \ No newline at end of file diff --git a/3dtools/api/best.pt b/3dtools/api/best.pt deleted file mode 100644 index 31cc25e..0000000 Binary files a/3dtools/api/best.pt and /dev/null differ diff --git a/3dtools/api/yoloapi.py b/3dtools/api/yoloapi.py deleted file mode 100644 index e0bf003..0000000 --- a/3dtools/api/yoloapi.py +++ /dev/null @@ -1,262 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license -""" -Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc. - -Usage - sources: - $ python detect.py --weights yolov5s.pt --source 0 # webcam - img.jpg # image - vid.mp4 # video - screen # screenshot - path/ # directory - list.txt # list of images - list.streams # list of streams - 'path/*.jpg' # glob - 'https://youtu.be/Zgi9g1ksQHc' # YouTube - 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream - -Usage - formats: - $ python detect.py --weights yolov5s.pt # PyTorch - yolov5s.torchscript # TorchScript - yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s_openvino_model # OpenVINO - yolov5s.engine # TensorRT - yolov5s.mlmodel # CoreML (macOS-only) - yolov5s_saved_model # TensorFlow SavedModel - yolov5s.pb # TensorFlow GraphDef - yolov5s.tflite # TensorFlow Lite - yolov5s_edgetpu.tflite # TensorFlow Edge TPU - yolov5s_paddle_model # PaddlePaddle -""" - -import argparse -import os -import platform -import sys -from pathlib import Path -import torch - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[1] / "yolov5" # YOLOv5 root directory - -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative - -from yolov5.models.common import DetectMultiBackend -from yolov5.utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams -from yolov5.utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, - increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh) -from yolov5.utils.plots import Annotator, colors, save_one_box -from yolov5.utils.torch_utils import select_device, smart_inference_mode - - - - -@smart_inference_mode() -def run( - weights=ROOT / 'yolov5s.pt', # model path or triton URL - source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam) - data=ROOT / 'data/coco128.yaml', # dataset.yaml path - imgsz=(640, 640), # inference size (height, width) - conf_thres=0.25, # confidence threshold - iou_thres=0.45, # NMS IOU threshold - max_det=1000, # maximum detections per image - device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu - view_img=False, # show results - save_txt=False, # save results to *.txt - save_conf=False, # save confidences in --save-txt labels - save_crop=False, # save cropped prediction boxes - nosave=False, # do not save images/videos - classes=None, # filter by class: --class 0, or --class 0 2 3 - agnostic_nms=False, # class-agnostic NMS - augment=False, # augmented inference - visualize=False, # visualize features - update=False, # update all models - project=ROOT / 'detect', # save results to project/name - name='sl_dis_pw', # save results to project/name - exist_ok=True, # existing project/name ok, do not increment - line_thickness=2, # bounding box thickness (pixels) - hide_labels=True, # hide labels - hide_conf=False, # hide confidences - half=False, # use FP16 half-precision inference - dnn=False, # use OpenCV DNN for ONNX inference - vid_stride=1, # video frame-rate stride -): - source = str(source) - save_img = not nosave and not source.endswith('.txt') # save inference images - is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) - is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) - webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file) - screenshot = source.lower().startswith('screen') - if is_url and is_file: - source = check_file(source) # download - - # Directories - save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run - (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir - - # Load model - device = select_device(device) - model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) - stride, names, pt = model.stride, model.names, model.pt - imgsz = check_img_size(imgsz, s=stride) # check image size - - # Dataloader - bs = 1 # batch_size - if webcam: - view_img = check_imshow(warn=True) - dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) - bs = len(dataset) - elif screenshot: - dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt) - else: - dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) - vid_path, vid_writer = [None] * bs, [None] * bs - - # Run inference - model.warmup(imgsz=(1 if pt or model.triton else bs, 3, *imgsz)) # warmup - seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) - for path, im, im0s, vid_cap, s in dataset: - with dt[0]: - im = torch.from_numpy(im).to(model.device) - im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 - im /= 255 # 0 - 255 to 0.0 - 1.0 - if len(im.shape) == 3: - im = im[None] # expand for batch dim - - # Inference - with dt[1]: - visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False - pred = model(im, augment=augment, visualize=visualize) - - # NMS - with dt[2]: - pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) - pred0 = pred.copy() - - # Second-stage classifier (optional) - # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) - - # Process predictions - for i, det in enumerate(pred): # per image - seen += 1 - p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) - p = Path(p) # to Path - save_path = str(save_dir / p.name) # im.jpg - txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt - s += '%gx%g ' % im.shape[2:] # print string - gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh - gn = gn.to(device) - imc = im0.copy() if save_crop else im0 # for save_crop - annotator = Annotator(im0, line_width=line_thickness, example=str(names)) - if len(det): - # Rescale boxes from img_size to im0 size - - det0 = torch.clone(det) - det0[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() / gn - pred0[i] = det0 - det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() - # Print results - for c in det[:, 5].unique(): - n = (det[:, 5] == c).sum() # detections per class - s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string - - # Write results - for *xyxy, conf, cls in reversed(det): - if save_txt: # Write to file - xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4))).view(-1).tolist() # normalized xywh - line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format - with open(f'{txt_path}.txt', 'a') as f: - f.write(('%g ' * len(line)).rstrip() % line + '\n') - - if save_img or save_crop or view_img: # Add bbox to image - c = int(cls) # integer class - label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') - annotator.box_label(xyxy, label, color=colors(c, True)) - if save_crop: - save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) - - # Stream results - im0 = annotator.result() - if view_img: - if platform.system() == 'Linux' and p not in windows: - windows.append(p) - cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) - cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0]) - cv2.imshow(str(p), im0) - cv2.waitKey(1) # 1 millisecond - - # Save results (image with detections) - if save_img: - if dataset.mode == 'image': - cv2.imwrite(save_path, im0) - else: # 'video' or 'stream' - if vid_path[i] != save_path: # new video - vid_path[i] = save_path - if isinstance(vid_writer[i], cv2.VideoWriter): - vid_writer[i].release() # release previous video writer - if vid_cap: # video - fps = vid_cap.get(cv2.CAP_PROP_FPS) - w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) - h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) - else: # stream - fps, w, h = 30, im0.shape[1], im0.shape[0] - save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos - vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) - vid_writer[i].write(im0) - - # Print time (inference-only) - LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms") - # Print results - t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image - LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) - if save_txt or save_img: - s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' - LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") - if update: - strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning) - return torch.flip(pred0[0], [1]) - -def parse_opt(): - parser = argparse.ArgumentParser() - parser.add_argument('--weights', nargs='+', type=str, default=ROOT/'runs/train\exp\weights/best.pt', help='model path or triton URL') - parser.add_argument('--source', type=str, default=FILE.parents[1]/'image/YOLO_serveur/test/images', help='file/dir/URL/glob/screen/0(webcam)') - parser.add_argument('--data', type=str, default=ROOT / 'yolov5/data/serveur122.yaml', help='(optional) dataset.yaml path') - parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') - parser.add_argument('--conf-thres', type=float, default=0.5, help='confidence threshold') - parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') - parser.add_argument('--max-det', type=int, default=200, help='maximum detections per image') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--view-img', action='store_true', help='if provided, show results') - parser.add_argument('--save-txt', action='store_true', help='if provided, save results to *.txt') - parser.add_argument('--save-conf', action='store_true', help='if provided, save confidences in --save-txt labels') - parser.add_argument('--save-crop', action='store_true', help='if provided, save cropped prediction boxes') - parser.add_argument('--nosave', action='store_true', help='if provided, do not save images/videos') - parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3') - parser.add_argument('--agnostic-nms', action='store_true', help='if provided, class-agnostic NMS') - parser.add_argument('--augment', action='store_true', help='if provided, augmented inference') - parser.add_argument('--visualize', action='store_true', help='if provided, visualize features') - parser.add_argument('--update', action='store_true', help='if provided, update all models') - parser.add_argument('--project', default=ROOT / 'runs/detect', help='save results to project/name') - parser.add_argument('--name', default='exp', help='save results to project/name') - parser.add_argument('--exist-ok', action='store_true', help='if provided, existing project/name ok, do not increment') - parser.add_argument('--line-thickness', default=1, type=int, help='bounding box thickness (pixels)') - parser.add_argument('--hide-labels', default=False, action='store_true', help='if provided, hide labels') - parser.add_argument('--hide-conf', default=False, action='store_true', help='if provided, hide confidences') - parser.add_argument('--half', action='store_true', help='if provided, use FP16 half-precision inference') - parser.add_argument('--dnn', action='store_true', help='if provided, use OpenCV DNN for ONNX inference') - parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride') - opt = parser.parse_args() - opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand - print_args(vars(opt)) - return opt - - -def main(opt): - check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) - run(**vars(opt)) - - -if __name__ == '__main__': - opt = parse_opt() - main(opt) diff --git a/3dtools/api/yolov8-best.pt b/3dtools/api/yolov8-best.pt new file mode 100644 index 0000000..adf507f Binary files /dev/null and b/3dtools/api/yolov8-best.pt differ diff --git a/3dtools/censure.py b/3dtools/censure.py deleted file mode 100644 index 7fa1a82..0000000 --- a/3dtools/censure.py +++ /dev/null @@ -1,98 +0,0 @@ -from skimage.feature import CENSURE,plot_matches,match_descriptors -from skimage.color.colorconv import rgba2rgb, rgb2hsv, rgb2gray, hsv2rgb -import matplotlib.pyplot as plt -import numpy as np -from skimage.io import imshow,imread,imsave -from skimage import filters -from skimage import exposure -from skimage.transform import resize -import tools -import matplotlib.pyplot as plt - -mask = tools.imageload('standard/mask.png','grey') -mask = mask>0.5 - -#FILEHANDEL = "rs232/d9" -FILEHANDEL = "vga/vga" -SAVEHANDEL = "image/result/" -QUANTITY = 13 -detector = CENSURE(min_scale=1, max_scale=7, mode='DoB', non_max_threshold=0.15, line_threshold=10) -#(min_scale=1, max_scale=7, mode='DoB', non_max_threshold=0.15, line_threshold=10) -std = tools.imageload("standard/standard-vga.png",'grey') -stdfeature = detector.keypoints -for i in range(1,QUANTITY+1): - path = FILEHANDEL + '{}'.format(str(i).zfill(3)) + '.png' - img_orig = tools.imageload(path,'grey') - img_orig = resize(img_orig, (105,290)) - #img_orig = exposure.equalize_hist(img_orig) - img_vag = filters.gaussian(img_orig * mask,sigma=1) - img_inv = -img_vag + 1 - - detector.detect(img_vag) - #detector.detect(img_inv) - - #dst = (dst * 255.0).astype('uint8') - #imsave(SAVEHANDEL + '{}'.format(str(i).zfill(3)) + '.png', img_orig) - pfeature = detector.keypoints - idx = [] - for j in range(pfeature.shape[0]): - if not mask[pfeature[j,0],pfeature[j,1]]: - idx.append(j) - pfeature = np.delete(pfeature, idx, 0) - print(i," picture similarity is :",tools.patchsimilarity(detector.keypoints,stdfeature)) - plt.figure(linewidth=4) - ax = plt.gca() - ax.imshow(img_vag, cmap=plt.cm.gray) - - ax.scatter(detector.keypoints[:, 1], detector.keypoints[:, 0], - 2 ** detector.scales, facecolors='none', edgecolors='r') - ax.axis('off') - path = SAVEHANDEL + '{}'.format(str(i).zfill(3)) + '.png' - plt.savefig(path) - -#ax.set_facecolor('lightgreen') # 设置视图背景颜⾊ - -# 2、图例 -#plt.legend(['Sin','Cos'],fontsize = 18,loc = 'center',ncol = 2,bbox_to_anchor =[0,1.05,1,0.2]) -# plt.tight_layout() # ⾃动调整布局空间,就不会出现图⽚保存不完整 -''' - - -mask = tools.imageload('standard/mask.png','grey') -mask = mask>0.5 -detector = CENSURE(min_scale=1, max_scale=7, mode='DoB', non_max_threshold=0.15, line_threshold=10) -path = 'standard/standard-vga.png' -img_std = tools.imageload(path, 'grey') -img_std = resize(img_std, (105, 290)) -# img_orig = exposure.equalize_hist(img_orig) -img_std = filters.gaussian(img_std * mask, sigma=1.25) -img_istd = -img_std + 1 - -detector.detect(img_std) -keypoints_std = detector.keypoints - -path = 'vga/vga001.png' -img = tools.imageload(path, 'grey') -img = resize(img, (105, 290)) -# img_orig = exposure.equalize_hist(img_orig) -img = filters.gaussian(img * mask, sigma=1.25) -img_i = -img + 1 - -detector.detect(img) -keypoints_img = detector.keypoints - -matches = match_descriptors(keypoints_std, keypoints_img, metric=None, p=2, cross_check=False) -#detector.detect(img_inv) - -# Visualize the results. - -fig, ax = plt.subplots(nrows=1, ncols=1) - -plt.gray() - -plot_matches(ax, img, img_std, keypoints_img, keypoints_std,matches,only_matches=False,alignment='vertical') -ax.axis("off") -ax.set_title("Inlier correspondences") -plt.show() -''' - diff --git a/3dtools/classifiers.py b/3dtools/classifiers.py index c172031..d8f8f3a 100644 --- a/3dtools/classifiers.py +++ b/3dtools/classifiers.py @@ -12,167 +12,31 @@ class Classifiers: - def clvga_rs232(self, type): + def dl_addComponents(self, pred): """ - Find the vga and rs232 in the image. - - Args: - type: 'female' or 'male'. if 'female', detect vga, else detect rs232 - - Constance: - ACCURACY: step of moving in pixels. Minimum is 1 - - Returns: - Coordinates of vga or rs232 in the picture. - """ - ACCURACY = 16 - detector = CENSURE(min_scale=1, max_scale=7, mode='DoB', non_max_threshold=0.15, line_threshold=10) - if type == 'female': - vga_list = [] - std_fvga, _ = tools.imfeature(self._vga, self._mask, detector) - fvga_des = tools.Pins(std_fvga, 'vga').destribution() - im_dark, _ = tools.imfeature(self._image, np.ones_like(self._image), detector) - imdark_des = tools.Pins(im_dark, 'vga', np.array(self._image.shape)).destribution() - tmpvga = tools.paramatch(ACCURACY, imdark_des, 0, fvga_des*self._mask) - vga_list += tmpvga - tmpvga = tools.paramatch(ACCURACY, imdark_des, 90, np.rot90(fvga_des*self._mask, 1)) - vga_list += tmpvga - vga_list = tools.composantfilter(vga_list, 180 + 95) - # tools.drawcomponents(self._image, vga_list, 180, 95) - for vga in vga_list: - print(" - VGA port in : " + str(vga)) - for x, y, ang, sim in vga_list: - self.components[(x, y)] = ("vga", "vga", ang, sim) - if vga_list == []: - print(" - Nothing found") - elif type == 'male': - rs232_list = [] - _, std_frs232 = tools.imfeature(self._rs232, self._mask, detector) - frs232_des = tools.Pins(std_frs232, 'rs232').destribution() - _, im_bright = tools.imfeature(self._image, np.ones_like(self._image), detector) - imbright_des = tools.Pins(im_bright, 'rs232', np.array(self._image.shape)).destribution() - tmprs232 = tools.paramatch(ACCURACY, imbright_des, 0, frs232_des) - rs232_list += tmprs232 - tmprs232 = tools.paramatch(ACCURACY, imbright_des, 90, frs232_des) - rs232_list += tmprs232 - rs232_list = tools.composantfilter(rs232_list, 180 + 95) - # tools.drawcomponents(self._image, rs232_list, 180, 95) - for rs232 in rs232_list: - print(" - Serial port in : " + str(rs232)) - for x, y, ang, sim in rs232_list: - self.components[(x, y)] = ("rs232", "rs232", ang, sim) - if rs232_list == []: - print(" - Nothing found") - - def clidrac(self): + Add components detected by YOLOv8 model. """ - Find the Network cable interface in the image. - Constance: - min_distance: parameter of peaklocalmax - threshold: parameter of peaklocalmax - - Returns: - Coordinates of idrac in the picture. - """ - min_distance = 45 - threshold = 0.45 - template_e = tools.imedge(self._idrac) - image_e = tools.imedge(self._image) - position_sim1 = tools.template_match(image_e, template_e, threshold, min_distance) - template_e = tools.imedge(self._idrac_cisco) - position_sim2 = tools.template_match(image_e, template_e, threshold, min_distance) - position_sim = position_sim1 if np.mean(np.array(position_sim1)[:, 3]) > np.mean(np.array(position_sim2)[:, 3]) else position_sim2 - # position_sim = tools.composantfilter(position_sim, 116+89) - for bmc in position_sim: - print(" - BMC in : " + str(bmc)) - for x, y, ang, sim in position_sim: - self.components[(x, y)] = ("idrac", "idrac", ang, sim) - if position_sim == []: - print(" - Nothing found") - - def clusb(self): - """ - Find the usb in the image. + class_dic = {0: 'BMC', 1: 'Disk_lff', 2: 'Disk_sff', 3: 'PSU', 4: 'Serial', 5: 'Slot_lp', 6: 'Slot_normal', 7: 'USB', 8: 'VGA'} - Args: - - Constance: - thickness: parameter of tools.find_rectangle. The pixels maximum for the line. If pass, - program will consider it as two line - - Returns: - Coordinates of usb in the picture. - """ - def produce_rect(args): - x, y = args[1], args[0] - if angle == 0: - sim = tools.modsimilarity(tools.imedge(self._image[x-1:x + 48-1, y-2:y + 122-2]), stdedge) - else: - sim = tools.modsimilarity(self._image[x-2:x + 122-2, y-1:y + 48-1], stdedge.T) - return [x, y, angle, sim] - - found = False - thickness = 2 - stdedge = tools.imedge(self._usb) - rectangles = tools.find_rectangle1(self._image, 122, 48, thickness) # usb: width =13.15mm, height = 5.70 - if rectangles: - angle = 90 - usbs = list(map(produce_rect, rectangles)) - usbs = tools.composantfilter(usbs, 48+122) - for c in usbs: - print(" - USB port in : " + str(c)) - self.components[(c[0], c[1])] = ("usb", "usb", angle, c[3]) - # tools.drawcomponents(self._image, usbs, 120, 50) - if usbs != []: - found = True - rectangles = tools.find_rectangle_(self._image, 122, 48, thickness) # usb: width =13.15mm, height = 5.70 - if rectangles: + boxes = pred[0].boxes.numpy() + + for component in boxes: + compotype = class_dic[component.cls[0]] + x, y, _, _ = component.xyxy[0] + _, _, w, h = component.xywh[0] + position = (int(y * tools.RATIO * self._shapemm[0] / 640), int(x * tools.RATIO * self._shapemm[1] / 640)) angle = 0 - usbs = list(map(produce_rect, rectangles)) - usbs = tools.composantfilter(usbs, 48+122) - for c in usbs: - print(" - USB port in : " + str(c)) - self.components[(c[0], c[1])] = ("usb", "usb", angle, c[3]) - # tools.drawcomponents(self._image, usbs, 120, 50) - if usbs != []: - found = True - if not found: + if h * tools.RATIO * self._shapemm[0] / 640 > w * tools.RATIO * self._shapemm[1] / 640: + angle = 90 + sim = component.conf[0] + + self.components[position] = (compotype, compotype, angle, sim) + print(f" - {compotype} in : " + str([position, angle, sim])) + + if len(boxes) == 0: print(" - Nothing found") - def dl_addComponents(self, pred, compotype=0, angle=0): - """ - Add components by deep leaning other method, such as yolov. It can also be a coordinate marked by hand. - - Args: - pred: a tensor or numpy.ndarray of components like [class, sim,x,y,x,y] - compotype: If add components by other method not by yolov, should give the compotype string. - This function is not completed. So keep compotype=0 - angle: If add components by other method not by yolov, should give the angle. - This function is not completed. So keep compotype=0 - """ - class_dic = {0: 'slot_normal', 1: 'slot_lp', 2: 'disk_lff', 3: 'disk_sff', 4: 'PSU'} - if compotype == 0: - for i in pred: - angle = 0 - compotype = class_dic[int(i[0])] - if (i[2]-i[4])*self._image.shape[0] > (i[3]-i[5])*self._image.shape[1]: - angle = 90 - # modify the component's size by the box size in yolov - ''' - self.sizetable[compotype][2] = round(float(i[3]-i[5])*self._image.shape[1]/tools.RATIO, 1) - self.sizetable[compotype][0] = round(float(i[2]-i[4])*self._image.shape[0]/tools.RATIO, 1) - else: - self.sizetable[compotype][2] = round(float(i[2]-i[4])*self._image.shape[0]/tools.RATIO, 1) - self.sizetable[compotype][0] = round(float(i[3]-i[5])*self._image.shape[1]/tools.RATIO, 1) - ''' - position = tuple(map(int, np.floor(i[4:6].numpy() * self._image.shape))) - self.components[position] = (compotype, compotype, angle, float(i[1])) - print(f" - {compotype} in : " + str([position, angle, float(i[1])])) - - if pred.numel() == 0: - print(" - Nothing found") - def cutears(self): """ Determine if there is a pair of ears in the image. @@ -215,7 +79,7 @@ def writejson(self): jsonraw.append({"location": name+str(num), "type": compotype, "elemOrient": 'horizontal' if angle == 0 else 'vertical', 'elemPos': [round(float(k[1]), 1), 0, round(float(k[0] - composhape[2]), 1)], "elemSize": composhape, "labelPos": 'rear', - "color": "", "attributes": {"factor": "", 'similarity': similarity}}) + "color": "", "attributes": {"factor": "", 'similarity': str(similarity)}}) num += 1 else: for k in self.componentsmm: @@ -224,7 +88,7 @@ def writejson(self): jsonraw.append({"location": str(num) + name, "type": compotype, "elemOrient": 'horizontal' if angle == 0 else 'vertical', 'elemPos': [round(float(k[1]) - composhape[0], 1), round(756.67 - composhape[1], 1), round(float(k[0] - composhape[2]), 1)], "elemSize": composhape, "labelPos": 'front', - "color": "", "attributes": {"factor": "", 'similarity': similarity}}) + "color": "", "attributes": {"factor": "", 'similarity': str(similarity)}}) num += 1 self.jsonraw = json.dumps(jsonraw, indent=4) return self.jsonraw @@ -250,4 +114,4 @@ def __init__(self, servername, x, y, face): self.sizetable = tools.SIZETABLE self.components = dict() self.componentsmm = dict() - tools.rgbview(self._image) + # tools.rgbview(self._image) diff --git a/3dtools/gui.py b/3dtools/gui.py index 2d66c44..85bede6 100644 --- a/3dtools/gui.py +++ b/3dtools/gui.py @@ -1,8 +1,9 @@ import sys + +import numpy as np import tools from classifiers import Classifiers from pathlib import Path -import api.yoloapi from skimage.io import imsave from os import remove @@ -10,6 +11,7 @@ from tkinter.filedialog import askopenfilename from tkinter import messagebox, scrolledtext, ttk from PIL import Image, ImageTk +from ultralytics import YOLO class Stdout_to_window(object): @@ -29,83 +31,212 @@ def flush(self): self.widget.update_idletasks() -class Gui(): - # Resizes the image so it fits the space on screen - def resize_image(self, image): - if image.height > image.width: - image = image.rotate(90) +class Open_images_window(Toplevel): + def __init__(self, master): + # Open a new window + super().__init__(master) + self.title("OGrEE-Tools/3dtools - open images") + self.geometry("640x420") + self.resizable(False, False) - if image.width > 760: - w = 760 - self.image_ratio = w / image.width - h = int(self.image_ratio * image.height) - return image.resize((w, h)) + # Black header with white text + self.header = Frame(self, width=640, height=40, bg="black") + self.header.grid(columnspan=4, rowspan=3, row=0, column=0) - else: - self.image_ratio = 1 - return image + self.header_text = Label(self, text="OGrEE-Tools/3dtools - open images", bg="black", fg="white", font=("Helvetica, 20"), justify="center") + self.header_text.grid(columnspan=4, row=1) + # Main content + self.main_content = Frame(self, width=640, height=380, bg="white") + self.main_content.grid(columnspan=4, rowspan=4, row=3, column=0) - # Opens a single image (front OR back face) - def open_one_image(self): - try: - file = askopenfilename(initialdir="", filetypes=[("PNG image", "*.png"), ("JPEG image", "*.jpg"), ("All files", "*")]) - if file: - print("\n" + 94 * "=" + "\n") + # Buttons on leftmost column + self.select_rear_image_button = Button(self, text="Select REAR\nface image", command=self.select_rear_image, bg="white", fg="black", height=4, width=10) + self.select_rear_image_button.grid(columnspan=1, column=0, row=3) - self.options["servername"] = file - self.options["path"] = 'api/tmp' + self.options["servername"].split('/')[-1] - imsave(self.options["path"], tools.scaleim(self.options["servername"], self.options["height"], 2.0)) + self.select_front_image_button = Button(self, text="Select FRONT\nface image", command=self.select_front_image, bg="white", fg="black", height=4, width=10) + self.select_front_image_button.grid(columnspan=1, column=0, row=4) - image = Image.open(self.options["path"]) - image = self.resize_image(image) - photo = ImageTk.PhotoImage(image) - self.image_labels[0].config(image=photo) - self.image_labels[0].image = photo - self.image_labels[0].grid(columnspan=3, rowspan=2, column=2, row=4, padx=(20, 20), pady=(20, 20)) - self.image_labels[1].grid_remove() + # Text on central columns + self.rear_image_var = StringVar(self) + self.rear_image_var.set("No rear face image selected") + self.rear_image_file = "" + self.rear_image_label = Label(self, textvariable=self.rear_image_var, bg="white", fg="black", height=4) + self.rear_image_label.grid(columnspan=2, column=1, row=3) - is_rear = messagebox.askyesno("Server face", "Is this image of the REAR face of the device?", icon="question") - self.options["face"] = "rear" if is_rear else "front" + self.front_image_var = StringVar(self) + self.front_image_var.set("No front face image selected") + self.front_image_file = "" + self.front_image_label = Label(self, textvariable=self.front_image_var, bg="white", fg="black", height=4) + self.front_image_label.grid(columnspan=2, column=1, row=4) - self.classifiers = [Classifiers(self.options["servername"], self.options["height"], self.options["width"], self.options["face"])] + # Buttons on rightmost column + self.unselect_rear_image_button = Button(self, text="X", font=("Helvetica, 20"), command=self.unselect_rear_image, fg="red", height=1, width=1) + self.unselect_rear_image_button.grid(column=3, row=3, padx=(76, 0)) - print(f"\nNew image opened: {file.split('/')[-1]}.") + self.unselect_front_image_button = Button(self, text="X", font=("Helvetica, 20"), command=self.unselect_front_image, fg="red", height=1, width=1) + self.unselect_front_image_button.grid(column=3, row=4, padx=(76, 0)) - except FileNotFoundError: - messagebox.showerror("File not found", f"The file '{file}' could not be found.") + # Entries on second-to-bottom row + self.server_height_label = Label(self, text="Server height (mm):", bg="white", fg="black", height=4) + self.server_height_label.grid(column=0, row=5) + self.server_height_var = StringVar(self) + self.server_height_entry = Entry(self, textvariable=self.server_height_var, bg="white", fg="black", width=14) + self.server_height_entry.grid(column=1, row=5) - # Opens two images (front AND back face) - def open_two_images(self): - self.open_one_image() + self.server_width_label = Label(self, text="Server width (mm):", bg="white", fg="black", height=4) + self.server_width_label.grid(column=2, row=5) - try: - file = askopenfilename(initialdir="", filetypes=[("PNG image", "*.png"), ("JPEG image", "*.jpg"), ("All files", "*")]) - if file: - print("\n" + 94 * "=" + "\n") + self.server_width_var = StringVar(self) + self.server_width_entry = Entry(self, textvariable=self.server_width_var, bg="white", fg="black", width=14) + self.server_width_entry.grid(column=3, row=5) + + # Buttons on bottom row + self.cancel_button = Button(self, text="Cancel", command=self.destroy, bg="white", fg="red", height=4, width=10) + self.cancel_button.grid(column=1, row=6) + self.bind("", lambda e: self.destroy()) + + self.done_button = Button(self, text="Done", command=self.done, bg="white", fg="blue", height=4, width=10) + self.done_button.grid(column=2, row=6) + self.bind("", lambda e: self.done()) + + + def select_rear_image(self): + file = askopenfilename(initialdir="", filetypes=[("PNG image", "*.png"), ("JPEG image", "*.jpg"), ("All files", "*")]) + if file: + self.rear_image_file = file + self.rear_image_var.set(file.split('/')[-1]) - self.options["height_2"], self.options["width_2"] = self.options["height"], self.options["width"] - self.options["servername_2"] = file - self.options["path_2"] = 'api/tmp' + self.options["servername_2"].split('/')[-1] - imsave(self.options["path_2"], tools.scaleim(self.options["servername_2"], self.options["height_2"], 2.0)) + def select_front_image(self): + file = askopenfilename(initialdir="", filetypes=[("PNG image", "*.png"), ("JPEG image", "*.jpg"), ("All files", "*")]) + if file: + self.front_image_file = file + self.front_image_var.set(file.split('/')[-1]) - image = Image.open(self.options["path_2"]) + + def unselect_rear_image(self): + self.rear_image_file = "" + self.rear_image_var.set("No rear face image selected") + + + def unselect_front_image(self): + self.front_image_file = "" + self.front_image_var.set("No front face image selected") + + + def done(self): + if self.rear_image_file == "" and self.front_image_file == "": + messagebox.showwarning("No images selected", "Please, select at least one image before continuing.") + elif self.server_height_entry.get() == "" or not self.server_height_entry.get().replace(".", "").isnumeric(): + messagebox.showwarning("Invalid server height", "Server height should be a float.\n\nPlease, enter a valid server height before continuing.") + elif self.server_width_entry.get() == "" or not self.server_width_entry.get().replace(".", "").isnumeric(): + messagebox.showwarning("Invalid server width", "Server width should be a float.\n\nPlease, enter a valid server width before continuing.") + else: + if self.front_image_file == "": + ok = self.master.open_images({"rear": self.rear_image_file}, self.server_height_entry.get(), self.server_width_entry.get()) + elif self.rear_image_file == "": + ok = self.master.open_images({"front": self.front_image_file}, self.server_height_entry.get(), self.server_width_entry.get()) + else: + ok = self.master.open_images({"rear": self.rear_image_file, "front": self.front_image_file}, self.server_height_entry.get(), self.server_width_entry.get()) + + if ok: + self.destroy() + + +class Gui(Tk): + # Resizes the image so it fits the space on screen + def resize_image(self, image): + if image.height > image.width: + image = image.rotate(90, expand=True) + + w = 760 + self.image_ratio = w / image.width + h = int(self.image_ratio * image.height) + + if h > 152: + h = 152 + self.image_ratio = h / image.height + w = int(self.image_ratio * image.width) + + return image.resize((w, h)) + + + # Open one or two images (rear and/or front) + def open_images(self, images, height, width): + if len(images) == 1: + try: + remove(self.options["path"]) + remove(self.options["path640"]) + except FileNotFoundError: + pass + if len(self.classifiers) == 2: + try: + remove(self.options["path_2"]) + remove(self.options["path640_2"]) + except FileNotFoundError: + pass + try: + print("\n" + 94 * "=" + "\n") + + face, file = images.popitem() + self.options["servername"] = file + self.options["height"] = float(height) + self.options["width"] = float(width) + self.options["face"] = face + self.options["path"] = "api/tmp" + file.split('/')[-1] + imsave(self.options["path"], np.asarray(Image.open(file).resize((int(tools.RATIO * self.options["width"]), int(tools.RATIO * self.options["height"]))))) + self.options["path640"] = "api/s-tmp" + file.split('/')[-1] + imsave(self.options["path640"], np.asarray(Image.open(file).resize((640, 640)))) + + image = Image.open(self.options["path"]) image = self.resize_image(image) photo = ImageTk.PhotoImage(image) - self.image_labels[1].config(image=photo) - self.image_labels[1].image = photo - self.image_labels[1].grid(columnspan=3, rowspan=2, column=2, row=5, padx=(20, 20), pady=(20, 20)) - self.image_labels[0].grid(columnspan=3, rowspan=2, column=2, row=3, padx=(20, 20), pady=(20, 20)) + self.image_labels[0].config(image=photo) + self.image_labels[0].image = photo + self.image_labels[0].grid(columnspan=3, rowspan=2, column=2, row=4, padx=(20, 20), pady=(20, 20)) + self.image_labels[1].grid_remove() - self.options["face_2"] = "front" if self.options["face"] == "rear" else "rear" - self.classifiers.append(Classifiers(self.options["servername_2"], self.options["height_2"], self.options["width_2"], self.options["face_2"])) + self.classifiers = [Classifiers(file, float(height), float(width), face)] print(f"\nNew image opened: {file.split('/')[-1]}.") - - except FileNotFoundError: - messagebox.showerror("File not found", f"The file '{file}' could not be found.") + return True + except FileNotFoundError: + messagebox.showwarning("File not found", f"The file '{file}' could not be found.") + return False + else: + ok = self.open_images({"rear": images["rear"]}, float(height), float(width)) + if ok: + try: + print("\n" + 94 * "=" + "\n") + + file = images["front"] + self.options["servername_2"] = file + self.options["height_2"] = float(height) + self.options["width_2"] = float(width) + self.options["face_2"] = "front" + self.options["path_2"] = "api/tmp" + file.split('/')[-1] + imsave(self.options["path_2"], np.asarray(Image.open(file).resize((int(tools.RATIO * self.options["width"]), int(tools.RATIO * self.options["height"]))))) + self.options["path640_2"] = "api/s-tmp" + file.split('/')[-1] + imsave(self.options["path640_2"], np.asarray(Image.open(file).resize((640, 640)))) + + image = Image.open(self.options["path_2"]) + image = self.resize_image(image) + photo = ImageTk.PhotoImage(image) + self.image_labels[1].config(image=photo) + self.image_labels[1].image = photo + self.image_labels[1].grid(columnspan=3, rowspan=2, column=2, row=5, padx=(20, 20), pady=(20, 20)) + self.image_labels[0].grid(columnspan=3, rowspan=2, column=2, row=3, padx=(20, 20), pady=(20, 20)) + + self.classifiers.append(Classifiers(file, float(height), float(width), "front")) + + print(f"\nNew image opened: {file.split('/')[-1]}.") + except FileNotFoundError: + messagebox.showwarning("File not found", f"The file '{file}' could not be found.") + ok = False + + return ok # Reloads the images so new updates are shown @@ -142,26 +273,26 @@ def calculate_hitboxes(self): # Detects slots, disks and PSUs def detect_all(self): - def detect(classifier, face, servername, path): + def detect(classifier, face, path640, path, servername): print(f"\n{face.title()} face:") - if any([compotype == "slot_normal" or compotype == "slot_lp" or compotype == "disk_lff" or compotype == "disk_sff" or compotype == "PSU" for (_, compotype, *_) in classifier.components.values()]): + if classifier.components != {}: print(f" - Some components have already been detected. Try detecting each component individually.") else: self.options["classes"] = None - pred = api.yoloapi.run(self.options["weights"], path, self.options["data"], self.options["imgsz"], self.options["conf_thres"], self.options["iou_thres"], self.options["max_det"], self.options["device"], self.options["view_img"], - self.options["save_txt"], self.options["save_conf"], self.options["save_crop"], self.options["nosave"], self.options["classes"], self.options["agnostic_nms"], self.options["augment"], self.options["visualize"], self.options["update"], - self.options["project"], self.options["name"], self.options["exist_ok"], self.options["line_thickness"], self.options["hide_labels"], self.options["hide_conf"], self.options["half"], self.options["dnn"], self.options["vid_stride"]) - # pred: a list of tensor, each tensor represent a picture - classifier.dl_addComponents(pred.cpu()) + pred = self.model.predict(path640, conf=self.options["conf"], iou=self.options["iou"], imgsz=self.options["imgsz"], half=self.options["half"], device=self.options["device"], max_det=self.options["max_det"], visualize=self.options["visualize"], + augment=self.options["augment"], agnostic_nms=self.options["agnostic_nms"], classes=self.options["classes"], show=self.options["show"], save=self.options["save"], save_txt=self.options["save_txt"], save_conf=self.options["save_conf"], + save_crop=self.options["save_crop"], show_labels=self.options["show_labels"], show_conf=self.options["show_conf"], show_boxes=self.options["show_boxes"], line_width=self.options["line_width"]) + # pred: a list of tensor, each tensor represents a picture + classifier.dl_addComponents(pred) tools.drawcomponents_gui(servername, path, classifier.components) print("\n" + 94 * "=" + "\n") - print("Detecting slots, disks and power...") + print("Detecting all components...") sys.stdout.flush() - detect(self.classifiers[0], self.options["face"], self.options["servername"], self.options["path"]) + detect(self.classifiers[0], self.options["face"], self.options["path640"], self.options["path"], self.options["servername"]) if len(self.classifiers) == 2: - detect(self.classifiers[1], self.options["face_2"], self.options["servername_2"], self.options["path_2"]) + detect(self.classifiers[1], self.options["face_2"], self.options["path640_2"], self.options["path_2"], self.options["servername_2"]) self.update_images() self.calculate_hitboxes() @@ -169,26 +300,26 @@ def detect(classifier, face, servername, path): # Detects slots def detect_slot(self): - def detect(classifier, face, servername, path): + def detect(classifier, face, path640, path, servername): print(f"\n{face.title()} face:") - self.options["classes"] = [0, 1] - if any([compotype == "slot_normal" or compotype == "slot_lp" for (_, compotype, *_) in classifier.components.values()]): + self.options["classes"] = [5, 6] + if any([compotype == "Slot_normal" or compotype == "Slot_lp" for (_, compotype, *_) in classifier.components.values()]): print(f" - Slots have already been detected.") else: - pred = api.yoloapi.run(self.options["weights"], path, self.options["data"], self.options["imgsz"], self.options["conf_thres"], self.options["iou_thres"], self.options["max_det"], self.options["device"], self.options["view_img"], - self.options["save_txt"], self.options["save_conf"], self.options["save_crop"], self.options["nosave"], self.options["classes"], self.options["agnostic_nms"], self.options["augment"], self.options["visualize"], self.options["update"], - self.options["project"], self.options["name"], self.options["exist_ok"], self.options["line_thickness"], self.options["hide_labels"], self.options["hide_conf"], self.options["half"], self.options["dnn"], self.options["vid_stride"]) - # pred: a list of tensor, each tensor represent a picture - classifier.dl_addComponents(pred.cpu()) + pred = self.model.predict(path640, conf=self.options["conf"], iou=self.options["iou"], imgsz=self.options["imgsz"], half=self.options["half"], device=self.options["device"], max_det=self.options["max_det"], visualize=self.options["visualize"], + augment=self.options["augment"], agnostic_nms=self.options["agnostic_nms"], classes=self.options["classes"], show=self.options["show"], save=self.options["save"], save_txt=self.options["save_txt"], save_conf=self.options["save_conf"], + save_crop=self.options["save_crop"], show_labels=self.options["show_labels"], show_conf=self.options["show_conf"], show_boxes=self.options["show_boxes"], line_width=self.options["line_width"]) + # pred: a list of tensor, each tensor represents a picture + classifier.dl_addComponents(pred) tools.drawcomponents_gui(servername, path, classifier.components) print("\n" + 94 * "=" + "\n") print("Detecting slots...") sys.stdout.flush() - detect(self.classifiers[0], self.options["face"], self.options["servername"], self.options["path"]) + detect(self.classifiers[0], self.options["face"], self.options["path640"], self.options["path"], self.options["servername"]) if len(self.classifiers) == 2: - detect(self.classifiers[1], self.options["face_2"], self.options["servername_2"], self.options["path_2"]) + detect(self.classifiers[1], self.options["face_2"], self.options["path640_2"], self.options["path_2"], self.options["servername_2"]) self.update_images() self.calculate_hitboxes() @@ -196,26 +327,26 @@ def detect(classifier, face, servername, path): # Detects disks def detect_disk(self): - def detect(classifier, face, servername, path): + def detect(classifier, face, path640, path, servername): print(f"\n{face.title()} face:") - if any([compotype == "disk_lff" or compotype == "disk_sff" for (_, compotype, *_) in classifier.components.values()]): + if any([compotype == "Disk_lff" or compotype == "Disk_sff" for (_, compotype, *_) in classifier.components.values()]): print(f" - Disks have already been detected.") else: - self.options["classes"] = [2, 3] - pred = api.yoloapi.run(self.options["weights"], path, self.options["data"], self.options["imgsz"], self.options["conf_thres"], self.options["iou_thres"], self.options["max_det"], self.options["device"], self.options["view_img"], - self.options["save_txt"], self.options["save_conf"], self.options["save_crop"], self.options["nosave"], self.options["classes"], self.options["agnostic_nms"], self.options["augment"], self.options["visualize"], self.options["update"], - self.options["project"], self.options["name"], self.options["exist_ok"], self.options["line_thickness"], self.options["hide_labels"], self.options["hide_conf"], self.options["half"], self.options["dnn"], self.options["vid_stride"]) - # pred: a list of tensor, each tensor represent a picture - classifier.dl_addComponents(pred.cpu()) + self.options["classes"] = [1, 2] + pred = self.model.predict(path640, conf=self.options["conf"], iou=self.options["iou"], imgsz=self.options["imgsz"], half=self.options["half"], device=self.options["device"], max_det=self.options["max_det"], visualize=self.options["visualize"], + augment=self.options["augment"], agnostic_nms=self.options["agnostic_nms"], classes=self.options["classes"], show=self.options["show"], save=self.options["save"], save_txt=self.options["save_txt"], save_conf=self.options["save_conf"], + save_crop=self.options["save_crop"], show_labels=self.options["show_labels"], show_conf=self.options["show_conf"], show_boxes=self.options["show_boxes"], line_width=self.options["line_width"]) + # pred: a list of tensor, each tensor represents a picture + classifier.dl_addComponents(pred) tools.drawcomponents_gui(servername, path, classifier.components) print("\n" + 94 * "=" + "\n") print("Detecting disks...") sys.stdout.flush() - detect(self.classifiers[0], self.options["face"], self.options["servername"], self.options["path"]) + detect(self.classifiers[0], self.options["face"], self.options["path640"], self.options["path"], self.options["servername"]) if len(self.classifiers) == 2: - detect(self.classifiers[1], self.options["face_2"], self.options["servername_2"], self.options["path_2"]) + detect(self.classifiers[1], self.options["face_2"], self.options["path640_2"], self.options["path_2"], self.options["servername_2"]) self.update_images() self.calculate_hitboxes() @@ -223,26 +354,26 @@ def detect(classifier, face, servername, path): # Detects PSUs def detect_psu(self): - def detect(classifier, face, servername, path): + def detect(classifier, face, path640, path, servername): print(f"\n{face.title()} face:") if any([compotype == "PSU" for (_, compotype, *_) in classifier.components.values()]): print(f" - PSUs have already been detected.") else: - self.options["classes"] = 4 - pred = api.yoloapi.run(self.options["weights"], path, self.options["data"], self.options["imgsz"], self.options["conf_thres"], self.options["iou_thres"], self.options["max_det"], self.options["device"], self.options["view_img"], - self.options["save_txt"], self.options["save_conf"], self.options["save_crop"], self.options["nosave"], self.options["classes"], self.options["agnostic_nms"], self.options["augment"], self.options["visualize"], self.options["update"], - self.options["project"], self.options["name"], self.options["exist_ok"], self.options["line_thickness"], self.options["hide_labels"], self.options["hide_conf"], self.options["half"], self.options["dnn"], self.options["vid_stride"]) - # pred: a list of tensor, each tensor represent a picture - classifier.dl_addComponents(pred.cpu()) + self.options["classes"] = 3 + pred = self.model.predict(path640, conf=self.options["conf"], iou=self.options["iou"], imgsz=self.options["imgsz"], half=self.options["half"], device=self.options["device"], max_det=self.options["max_det"], visualize=self.options["visualize"], + augment=self.options["augment"], agnostic_nms=self.options["agnostic_nms"], classes=self.options["classes"], show=self.options["show"], save=self.options["save"], save_txt=self.options["save_txt"], save_conf=self.options["save_conf"], + save_crop=self.options["save_crop"], show_labels=self.options["show_labels"], show_conf=self.options["show_conf"], show_boxes=self.options["show_boxes"], line_width=self.options["line_width"]) + # pred: a list of tensor, each tensor represents a picture + classifier.dl_addComponents(pred) tools.drawcomponents_gui(servername, path, classifier.components) print("\n" + 94 * "=" + "\n") print("Detecting PSU...") sys.stdout.flush() - detect(self.classifiers[0], self.options["face"], self.options["servername"], self.options["path"]) + detect(self.classifiers[0], self.options["face"], self.options["path640"], self.options["path"], self.options["servername"]) if len(self.classifiers) == 2: - detect(self.classifiers[1], self.options["face_2"], self.options["servername_2"], self.options["path_2"]) + detect(self.classifiers[1], self.options["face_2"], self.options["path640_2"], self.options["path_2"], self.options["servername_2"]) self.update_images() self.calculate_hitboxes() @@ -250,21 +381,26 @@ def detect(classifier, face, servername, path): # Detects serial ports def detect_serial(self): - def detect(classifier, face, servername, path): + def detect(classifier, face, path640, path, servername): print(f"\n{face.title()} face:") - if any([compotype == "rs232" for (_, compotype, *_) in classifier.components.values()]): + if any([compotype == "Serial" for (_, compotype, *_) in classifier.components.values()]): print(f" - Serial ports have already been detected.") else: - classifier.clvga_rs232('male') + self.options["classes"] = 4 + pred = self.model.predict(path640, conf=self.options["conf"], iou=self.options["iou"], imgsz=self.options["imgsz"], half=self.options["half"], device=self.options["device"], max_det=self.options["max_det"], visualize=self.options["visualize"], + augment=self.options["augment"], agnostic_nms=self.options["agnostic_nms"], classes=self.options["classes"], show=self.options["show"], save=self.options["save"], save_txt=self.options["save_txt"], save_conf=self.options["save_conf"], + save_crop=self.options["save_crop"], show_labels=self.options["show_labels"], show_conf=self.options["show_conf"], show_boxes=self.options["show_boxes"], line_width=self.options["line_width"]) + # pred: a list of tensor, each tensor represents a picture + classifier.dl_addComponents(pred) tools.drawcomponents_gui(servername, path, classifier.components) print("\n" + 94 * "=" + "\n") print("Detecting serial ports...") sys.stdout.flush() - detect(self.classifiers[0], self.options["face"], self.options["servername"], self.options["path"]) + detect(self.classifiers[0], self.options["face"], self.options["path640"], self.options["path"], self.options["servername"]) if len(self.classifiers) == 2: - detect(self.classifiers[1], self.options["face_2"], self.options["servername_2"], self.options["path_2"]) + detect(self.classifiers[1], self.options["face_2"], self.options["path640_2"], self.options["path_2"], self.options["servername_2"]) self.update_images() self.calculate_hitboxes() @@ -272,21 +408,26 @@ def detect(classifier, face, servername, path): # Detects VGA ports def detect_vga(self): - def detect(classifier, face, servername, path): + def detect(classifier, face, path640, path, servername): print(f"\n{face.title()} face:") - if any([compotype == "vga" for (_, compotype, *_) in classifier.components.values()]): + if any([compotype == "VGA" for (_, compotype, *_) in classifier.components.values()]): print(f" - VGA ports have already been detected.") else: - classifier.clvga_rs232('female') + self.options["classes"] = 8 + pred = self.model.predict(path640, conf=self.options["conf"], iou=self.options["iou"], imgsz=self.options["imgsz"], half=self.options["half"], device=self.options["device"], max_det=self.options["max_det"], visualize=self.options["visualize"], + augment=self.options["augment"], agnostic_nms=self.options["agnostic_nms"], classes=self.options["classes"], show=self.options["show"], save=self.options["save"], save_txt=self.options["save_txt"], save_conf=self.options["save_conf"], + save_crop=self.options["save_crop"], show_labels=self.options["show_labels"], show_conf=self.options["show_conf"], show_boxes=self.options["show_boxes"], line_width=self.options["line_width"]) + # pred: a list of tensor, each tensor represents a picture + classifier.dl_addComponents(pred) tools.drawcomponents_gui(servername, path, classifier.components) print("\n" + 94 * "=" + "\n") print("Detecting VGA ports...") sys.stdout.flush() - detect(self.classifiers[0], self.options["face"], self.options["servername"], self.options["path"]) + detect(self.classifiers[0], self.options["face"], self.options["path640"], self.options["path"], self.options["servername"]) if len(self.classifiers) == 2: - detect(self.classifiers[1], self.options["face_2"], self.options["servername_2"], self.options["path_2"]) + detect(self.classifiers[1], self.options["face_2"], self.options["path640_2"], self.options["path_2"], self.options["servername_2"]) self.update_images() self.calculate_hitboxes() @@ -294,21 +435,26 @@ def detect(classifier, face, servername, path): # Detects BMC ports def detect_bmc(self): - def detect(classifier, face, servername, path): + def detect(classifier, face, path640, path, servername): print(f"\n{face.title()} face:") - if any([compotype == "idrac" for (_, compotype, *_) in classifier.components.values()]): + if any([compotype == "BMC" for (_, compotype, *_) in classifier.components.values()]): print(" - BMC interfaces have already been detected.") else: - classifier.clidrac() + self.options["classes"] = 0 + pred = self.model.predict(path640, conf=self.options["conf"], iou=self.options["iou"], imgsz=self.options["imgsz"], half=self.options["half"], device=self.options["device"], max_det=self.options["max_det"], visualize=self.options["visualize"], + augment=self.options["augment"], agnostic_nms=self.options["agnostic_nms"], classes=self.options["classes"], show=self.options["show"], save=self.options["save"], save_txt=self.options["save_txt"], save_conf=self.options["save_conf"], + save_crop=self.options["save_crop"], show_labels=self.options["show_labels"], show_conf=self.options["show_conf"], show_boxes=self.options["show_boxes"], line_width=self.options["line_width"]) + # pred: a list of tensor, each tensor represents a picture + classifier.dl_addComponents(pred) tools.drawcomponents_gui(servername, path, classifier.components) print("\n" + 94 * "=" + "\n") print("Detecting BMC interfaces...") sys.stdout.flush() - detect(self.classifiers[0], self.options["face"], self.options["servername"], self.options["path"]) + detect(self.classifiers[0], self.options["face"], self.options["path640"], self.options["path"], self.options["servername"]) if len(self.classifiers) == 2: - detect(self.classifiers[1], self.options["face_2"], self.options["servername_2"], self.options["path_2"]) + detect(self.classifiers[1], self.options["face_2"], self.options["path640_2"], self.options["path_2"], self.options["servername_2"]) self.update_images() self.calculate_hitboxes() @@ -316,21 +462,26 @@ def detect(classifier, face, servername, path): # Detects USB ports def detect_usb(self): - def detect(classifier, face, servername, path): + def detect(classifier, face, path640, path, servername): print(f"\n{face.title()} face:") - if any([compotype == "usb" for (_, compotype, *_) in classifier.components.values()]): + if any([compotype == "USB" for (_, compotype, *_) in classifier.components.values()]): print(" - USB ports have already been detected.") else: - classifier.clusb() + self.options["classes"] = 7 + pred = self.model.predict(path640, conf=self.options["conf"], iou=self.options["iou"], imgsz=self.options["imgsz"], half=self.options["half"], device=self.options["device"], max_det=self.options["max_det"], visualize=self.options["visualize"], + augment=self.options["augment"], agnostic_nms=self.options["agnostic_nms"], classes=self.options["classes"], show=self.options["show"], save=self.options["save"], save_txt=self.options["save_txt"], save_conf=self.options["save_conf"], + save_crop=self.options["save_crop"], show_labels=self.options["show_labels"], show_conf=self.options["show_conf"], show_boxes=self.options["show_boxes"], line_width=self.options["line_width"]) + # pred: a list of tensor, each tensor represents a picture + classifier.dl_addComponents(pred) tools.drawcomponents_gui(servername, path, classifier.components) print("\n" + 94 * "=" + "\n") print("Detecting USB ports...") sys.stdout.flush() - detect(self.classifiers[0], self.options["face"], self.options["servername"], self.options["path"]) + detect(self.classifiers[0], self.options["face"], self.options["path640"], self.options["path"], self.options["servername"]) if len(self.classifiers) == 2: - detect(self.classifiers[1], self.options["face_2"], self.options["servername_2"], self.options["path_2"]) + detect(self.classifiers[1], self.options["face_2"], self.options["path640_2"], self.options["path_2"], self.options["servername_2"]) self.update_images() self.calculate_hitboxes() @@ -374,7 +525,7 @@ def save_component(self): elif self.component_type_var.get() == "": print("\n" + 94 * "=" + "\n") print("ERROR: invalid component type.") - elif self.component_depth_var.get() == "": + elif self.component_depth_var.get() == "" or not self.component_depth_var.get().replace(".", "").isnumeric(): print("\n" + 94 * "=" + "\n") print("ERROR: invalid component depth.") elif self.image_label_selected == "top": @@ -528,9 +679,11 @@ def delete_component(self, event=None): def component_selected(self, event=None): if self.component_type_var.get() in tools.SIZETABLE: composhape = tools.SIZETABLE[self.component_type_var.get()] + if self.component_name_var.get() == "": + self.component_name_var.set(self.component_type_var.get()) self.component_depth_var.set(composhape[1]) self.component_wh_var.set(f"W = {composhape[0]}, H = {composhape[2]}") - self.root.update_idletasks() + self.update_idletasks() # Draws blue rectangle following the cursor @@ -670,8 +823,8 @@ def return_to_editing(self): label.bind("", lambda event: self.release(event, str(event.widget).split(".")[-1])) label.bind("", self.delete_component) label.bind("", self.delete_component) - label.bind("", self.reset) + self.bind("", self.reset) self.reset() self.calculate_hitboxes() @@ -701,13 +854,13 @@ def return_to_detection(self): label.unbind("") label.unbind("") label.unbind("") - label.unbind("") + self.unbind("") self.reset() self.calculate_hitboxes() print("\n" + 94 * "=" + "\n") - print("Click 'Open one image' or 'Open two images' to choose new images.") + print("Click 'Open images' to choose new images (rear AND/OR front).") print("Click one of the 'Detect ...' buttons to start detecting components.") print("When you're done, click 'Finish detection' to proceed.\n") @@ -716,9 +869,11 @@ def return_to_detection(self): def close_window(self): if messagebox.askokcancel("Quit", "Are you sure you want to quit?\nAll progress will be lost."): remove(self.options["path"]) + remove(self.options["path640"]) if len(self.classifiers) == 2: remove(self.options["path_2"]) - self.root.destroy() + remove(self.options["path640_2"]) + self.destroy() sys.stdout = self.prev_stdout @@ -732,14 +887,16 @@ def save_and_exit(self): self.classifiers[0].savejson() self.classifiers[1].savejson() remove(self.options["path"]) + remove(self.options["path640"]) remove(self.options["path_2"]) - self.root.destroy() + remove(self.options["path640_2"]) + self.destroy() sys.stdout = self.prev_stdout else: if messagebox.askokcancel("File saved", f"{file_1}.json file saved in '/api/'\n\nThank you for using OGrEE-Tools/3dtools!"): self.classifiers[0].savejson() remove(self.options["path"]) - self.root.destroy() + self.destroy() sys.stdout = self.prev_stdout @@ -754,17 +911,18 @@ def create_json_window(self): label.unbind("") label.unbind("") label.unbind("") - label.unbind("") + + self.unbind("") # JSON text on top leftmost columns - self.json_text = scrolledtext.ScrolledText(self.root, bg="black", fg="white", font=("Courier", 14), width=47, height=31) + self.json_text = scrolledtext.ScrolledText(self, bg="black", fg="white", font=("Courier", 14), width=47, height=31) self.json_text.grid(columnspan=2, rowspan=5, column=0, row=3, pady=(20, 20)) # Buttons on bottom leftmost columns - self.return_to_editing_button = Button(self.root, text="Return to editing", command=self.return_to_editing, fg="black", height=4, width=40) + self.return_to_editing_button = Button(self, text="Return to editing", command=self.return_to_editing, fg="black", height=4, width=40) self.return_to_editing_button.grid(columnspan=2, column=0, row=8) - self.save_and_exit_button = Button(self.root, text="Save and exit", command=self.save_and_exit, fg="black", height=4, width=40) + self.save_and_exit_button = Button(self, text="Save and exit", command=self.save_and_exit, fg="black", height=4, width=40) self.save_and_exit_button.grid(columnspan=2, column=0, row=9) self.json_widgets = [self.return_to_editing_button, self.save_and_exit_button, self.json_text] @@ -797,43 +955,43 @@ def create_editing_window(self): widget.grid_remove() # Component variables on top leftmost columns - self.component_name_label = Label(self.root, text="Component name:", bg="white", fg="black", height=4) + self.component_name_label = Label(self, text="Component name:", bg="white", fg="black", height=4) self.component_name_label.grid(column=0, row=3) - self.component_name_entry = Entry(self.root, textvariable=self.component_name_var, bg="white", fg="black", width=14) + self.component_name_entry = Entry(self, textvariable=self.component_name_var, bg="white", fg="black", width=14) self.component_name_entry.grid(column=1, row=3) - self.component_type_label = Label(self.root, text="Component type:", bg="white", fg="black", height=4) + self.component_type_label = Label(self, text="Component type:", bg="white", fg="black", height=4) self.component_type_label.grid(column=0, row=4) self.values = list(tools.SIZETABLE.keys()) - self.component_type_entry = ttk.Combobox(self.root, textvariable=self.component_type_var, values=self.values, background="white", foreground="black", width=13) + self.component_type_entry = ttk.Combobox(self, textvariable=self.component_type_var, values=self.values, background="white", foreground="black", width=13) self.component_type_entry.grid(column=1, row=4) self.component_type_entry.bind("<>", self.component_selected) - self.component_depth_label = Label(self.root, text="Component depth (mm):", bg="white", fg="black", height=4) + self.component_depth_label = Label(self, text="Component depth (mm):", bg="white", fg="black", height=4) self.component_depth_label.grid(column=0, row=5) - self.component_depth_entry = Entry(self.root, textvariable=self.component_depth_var, bg="white", fg="black", width=14) + self.component_depth_entry = Entry(self, textvariable=self.component_depth_var, bg="white", fg="black", width=14) self.component_depth_entry.grid(column=1, row=5) - self.component_wh_label_l = Label(self.root, text="Component width, height (mm):", bg="white", fg="black", height=4) + self.component_wh_label_l = Label(self, text="Component width, height (mm):", bg="white", fg="black", height=4) self.component_wh_label_l.grid(column=0, row=6) - self.component_wh_label_r = Label(self.root, textvariable=self.component_wh_var, anchor="w", bg="white", fg="black", height=4, width=14) + self.component_wh_label_r = Label(self, textvariable=self.component_wh_var, anchor="w", bg="white", fg="black", height=4, width=14) self.component_wh_label_r.grid(column=1, row=6) # Buttons on bottom leftmost columns - self.save_component_button = Button(self.root, text="Save component", command=self.save_component, fg="black", height=4, width=14) + self.save_component_button = Button(self, text="Save component", command=self.save_component, fg="black", height=4, width=14) self.save_component_button.grid(column=0, row=7) - self.delete_component_button = Button(self.root, text="Delete component", command=self.delete_component, fg="black", height=4, width=14) + self.delete_component_button = Button(self, text="Delete component", command=self.delete_component, fg="black", height=4, width=14) self.delete_component_button.grid(column=1, row=7) - self.return_to_detection_button = Button(self.root, text="Return to detection", command=self.return_to_detection, fg="black", height=4, width=40) + self.return_to_detection_button = Button(self, text="Return to detection", command=self.return_to_detection, fg="black", height=4, width=40) self.return_to_detection_button.grid(columnspan=2, column=0, row=8) - self.finish_editing = Button(self.root, text="Finish editing", command=self.create_json_window, fg="black", height=4, width=40) + self.finish_editing = Button(self, text="Finish editing", command=self.create_json_window, fg="black", height=4, width=40) self.finish_editing.grid(columnspan=2, column=0, row=9) self.editing_widgets = [self.component_name_label, self.component_name_entry, self.component_type_label, self.component_type_entry, self.component_depth_label, self.component_depth_entry, self.component_wh_label_l, self.component_wh_label_r, self.save_component_button, self.delete_component_button, self.return_to_detection_button, self.finish_editing] @@ -843,8 +1001,8 @@ def create_editing_window(self): label.bind("", lambda event: self.release(event, str(event.widget).split(".")[-1])) label.bind("", self.delete_component) label.bind("", self.delete_component) - label.bind("", self.reset) + self.bind("", self.reset) self.reset() self.image_label_selected = "top" @@ -862,63 +1020,61 @@ def create_editing_window(self): # Creates the detection window def create_detection_window(self): # Window - self.root = Tk() - self.root.title("OGrEE-Tools/3dtools") - self.root.geometry("1280x720") - self.root.resizable(False, False) - self.root.protocol("WM_DELETE_WINDOW", self.close_window) - self.root.createcommand("::tk::mac::Quit", self.close_window) - self.root.bind('', self.close_window) - - # black header with white text - self.header = Frame(self.root, width=1280, height=40, bg="black") + self.title("OGrEE-Tools/3dtools") + self.geometry("1280x720") + self.resizable(False, False) + self.protocol("WM_DELETE_WINDOW", self.close_window) + self.createcommand("::tk::mac::Quit", self.close_window) + + # Black header with white text + self.header = Frame(self, width=1280, height=40, bg="black") self.header.grid(columnspan=5, rowspan=3, row=0, column=0) - self.header_text = Label(self.root, text="OGrEE-Tools/3dtools", bg="black", fg="white", font=("Helvetica, 20"), justify="center") + self.header_text = Label(self, text="OGrEE-Tools/3dtools", bg="black", fg="white", font=("Helvetica, 20"), justify="center") self.header_text.grid(columnspan=5, row=1) # Main content - self.main_content = Frame(self.root, width=1280, height=680, bg="white") + self.main_content = Frame(self, width=1280, height=680, bg="white") self.main_content.grid(columnspan=5, rowspan=7, row=3, column=0) # Buttons on two leftmost columns - self.open_one_image_button = Button(self.root, text="Open one image\n(rear OR front)", command=self.open_one_image, fg="black", height=4, width=40) - self.open_one_image_button.grid(columnspan=2, column=0, row=3) + self.open_images_label = Label(self, text="Welcome to OGrEE-Tools/3dtools!", bg="white", fg="black", font=("Helvetica, 16"), justify="center", height=4) + self.open_images_label.grid(columnspan=2, column=0, row=3) - self.open_two_images_button = Button(self.root, text="Open two images\n(rear AND front)", command=self.open_two_images, fg="black", height=4, width=40) - self.open_two_images_button.grid(columnspan=2, column=0, row=4) + self.open_images_button = Button(self, text="Open images\n(rear AND/OR front)", command=lambda: Open_images_window(self), fg="black", height=4, width=40) + self.open_images_button.grid(columnspan=2, column=0, row=4) - self.detect_all_button = Button(self.root, text="Detect all\n(slots + disks + PSU)", command=self.detect_all, fg="black", height=4, width=14) + self.detect_all_button = Button(self, text="Detect all components", command=self.detect_all, fg="black", height=4, width=14) self.detect_all_button.grid(column=0, row=5) - self.detect_slot_button = Button(self.root, text="Detect slots", command=self.detect_slot, fg="black", height=4, width=14) + self.detect_slot_button = Button(self, text="Detect slots", command=self.detect_slot, fg="black", height=4, width=14) self.detect_slot_button.grid(column=1, row=5) - self.detect_disk_button = Button(self.root, text="Detect disks", command=self.detect_disk, fg="black", height=4, width=14) + self.detect_disk_button = Button(self, text="Detect disks", command=self.detect_disk, fg="black", height=4, width=14) self.detect_disk_button.grid(column=0, row=6) - self.detect_psu_button = Button(self.root, text="Detect PSU", command=self.detect_psu, fg="black", height=4, width=14) + self.detect_psu_button = Button(self, text="Detect PSU", command=self.detect_psu, fg="black", height=4, width=14) self.detect_psu_button.grid(column=1, row=6) - self.detect_serial_button = Button(self.root, text="Detect serial ports\n(DB9 connector)", command=self.detect_serial, fg="black", height=4, width=14) + self.detect_serial_button = Button(self, text="Detect serial ports\n(DB9 connector)", command=self.detect_serial, fg="black", height=4, width=14) self.detect_serial_button.grid(column=0, row=7) - self.detect_vga_button = Button(self.root, text="Detect VGA ports\n(DB15 connector)", command=self.detect_vga, fg="black", height=4, width=14) + self.detect_vga_button = Button(self, text="Detect VGA ports\n(DB15 connector)", command=self.detect_vga, fg="black", height=4, width=14) self.detect_vga_button.grid(column=1, row=7) - self.detect_bmc_button = Button(self.root, text="Detect BMC", command=self.detect_bmc, fg="black", height=4, width=14) + self.detect_bmc_button = Button(self, text="Detect BMC", command=self.detect_bmc, fg="black", height=4, width=14) self.detect_bmc_button.grid(column=0, row=8) - self.detect_usb_button = Button(self.root, text="Detect USB ports", command=self.detect_usb, fg="black", height=4, width=14) + self.detect_usb_button = Button(self, text="Detect USB ports", command=self.detect_usb, fg="black", height=4, width=14) self.detect_usb_button.grid(column=1, row=8) - self.create_editing_window_button = Button(self.root, text="Finish detection", command=self.create_editing_window, fg="black", height=4, width=40) + self.create_editing_window_button = Button(self, text="Finish detection", command=self.create_editing_window, fg="black", height=4, width=40) self.create_editing_window_button.grid(columnspan=2, column=0, row=9) - self.detection_widgets = [self.open_one_image_button, self.open_two_images_button, self.detect_all_button, self.detect_slot_button, self.detect_disk_button, self.detect_psu_button, self.detect_serial_button, self.detect_vga_button, self.detect_bmc_button, self.detect_usb_button, self.create_editing_window_button] + self.detection_widgets = [self.open_images_label, self.open_images_button, self.detect_all_button, self.detect_slot_button, self.detect_disk_button, self.detect_psu_button, self.detect_serial_button, self.detect_vga_button, self.detect_bmc_button, self.detect_usb_button, self.create_editing_window_button] # Output text on bottom rightmost columns - self.output = scrolledtext.ScrolledText(self.root, bg="black", fg="white", font=("Courier", 14), width=94, height=19) + self.output = scrolledtext.ScrolledText(self, bg="black", fg="white", font=("Courier", 14), width=94, height=19) self.output.grid(columnspan=3, rowspan=3, column=2, row=7) self.prev_stdout = sys.stdout @@ -926,59 +1082,55 @@ def create_detection_window(self): print("\n" + 94 * "=" + "\n") print("Welcome to OGrEE-Tools/3dtools!") - print("Click 'Open one image' or 'Open two images' to choose new images.") + print("Click 'Open images' to choose new images (rear AND/OR front).") print("Click one of the 'Detect ...' buttons to start detecting components.") print("When you're done, click 'Finish detection' to proceed.\n") print(94 * "=" + "\n") # Images on top rightmost columns - self.top_image_label = Label(self.root, name="top") + self.top_image_label = Label(self, name="top") self.top_image_label.grid(columnspan=3, rowspan=2, column=2, row=4, padx=(20, 20), pady=(20, 20)) - self.bot_image_label = Label(self.root, name="bot") + self.bot_image_label = Label(self, name="bot") # bot_image_label.grid(columnspan=3, rowspan=2, column=2, row=5, padx=(20, 20), pady=(20, 20)) self.image_labels = [self.top_image_label, self.bot_image_label] self.image_label_selected = "top" - imsave(self.options["path"], tools.scaleim(self.options["servername"], self.options["height"], 2.0)) + imsave(self.options["path"], np.asarray(Image.open(self.options["servername"]).resize((int(tools.RATIO * self.options["width"]), int(tools.RATIO * self.options["height"]))))) + self.options["path640"] = "api/s-tmp" + self.options["servername"].split('/')[-1] + imsave(self.options["path640"], np.asarray(Image.open(self.options["servername"]).resize((640, 640)))) + self.update_images() - self.component_name_var = StringVar(self.root) - self.component_type_var = StringVar(self.root) - self.component_depth_var = StringVar(self.root) - self.component_wh_var = StringVar(self.root) + self.component_name_var = StringVar(self) + self.component_type_var = StringVar(self) + self.component_depth_var = StringVar(self) + self.component_wh_var = StringVar(self) self.click_pt1 = None self.selected_component = None self.hitboxes = [{}, {}] - self.root.after(100, lambda:print(f"\nNew image opened: {self.options['servername'].split('/')[-1]}.")) + self.after(100, lambda:print(f"\nNew image opened: {self.options['servername'].split('/')[-1]}.")) # Starts the GUI def __init__(self, options): + super().__init__() self.FILE = Path(__file__).resolve() self.ROOT = self.FILE.parents[0] - self.yROOT = self.ROOT / 'yolov5' # YOLOv5 root directory - # ''' - # if str(self.ROOT) not in sys.path: - # sys.path.append(str(self.ROOT)) # add ROOT to PATH - # self.ROOT = Path(os.path.relpath(self.ROOT, Path.cwd())) # relative - # ''' self.options = options self.options["path"] = 'api/tmp' + self.options["servername"].split('/')[-1] - self.options["data"] =self.ROOT / 'yolov5/data/serveur122.yaml' - self.options["imgsz"] = (192, 768) + self.options["imgsz"] = 640 self.options["max_det"] = 400 + self.options["visualize"] = False self.options["agnostic_nms"] = False - self.options["update"] = False self.options["half"] = False - self.options["dnn"] = False - self.options["vid_stride"] = 1 self.create_detection_window() self.classifiers = [Classifiers(self.options["servername"], self.options["height"], self.options["width"], self.options["face"])] + self.model = YOLO(self.options["model"], task="detect") # Creates and runs the GUI diff --git a/3dtools/image/serveur/gui.png b/3dtools/image/serveur/gui.png index 0bdabcb..2f43ca8 100644 Binary files a/3dtools/image/serveur/gui.png and b/3dtools/image/serveur/gui.png differ diff --git a/3dtools/main.py b/3dtools/main.py index fc83bbc..ab6c099 100644 --- a/3dtools/main.py +++ b/3dtools/main.py @@ -1,15 +1,16 @@ -import tools from classifiers import Classifiers import argparse from pathlib import Path -import api.yoloapi from skimage.io import imsave from os import remove +from ultralytics import YOLO +import numpy as np +from PIL import Image from gui import run_gui + FILE = Path(__file__).resolve() ROOT = FILE.parents[0] -yROOT = ROOT / 'yolov5' # YOLOv5 root directory ''' if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH @@ -19,68 +20,50 @@ def run(servername ="image/serveur/dell-poweredge-r720xd.rear.png", height=87.38, width=443.99, face='face', - weights=ROOT/'api/best.pt', # model path or triton URL - data=ROOT / 'yolov5/data/serveur122.yaml', # dataset.yaml path - imgsz=(192, 768), # inference size (height, width) - conf_thres=0.60, # confidence threshold - iou_thres=0.45, # NMS IOU threshold - max_det=400, # maximum detections per image - device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu - view_img=False, # show results - save_txt=False, # save results to *.txt - save_conf=False, # save confidences in --save-txt labels - save_crop=False, # save cropped prediction boxes - nosave=False, # do not save images/videos - agnostic_nms=False, # class-agnostic NMS - augment=False, # augmented inference - visualize=False, # visualize features - update=False, # update all models - project=ROOT / 'result/detect', # save results to project/name - name='sl_dis_pw', # save results to project/name - exist_ok=True, # existing project/name ok, do not increment - line_thickness=2, # bounding box thickness (pixels) - hide_labels=True, # hide labels - hide_conf=False, # hide confidences - half=False, # use FP16 half-precision inference - dnn=False, # use OpenCV DNN for ONNX inference - vid_stride=1, # video frame-rate stride + model=ROOT / 'api/yolov8-best.pt', + conf=0.25, + iou=0.70, + device='', + augment=False, + show=False, + save=True, + save_txt=False, + save_conf=False, + save_crop=False, + show_label=True, + show_conf=True, + show_boxes=True, + line_width=None, + **kwargs ): #image = tools.impreprocess(image) ogreeTools = Classifiers(servername, height, width, face) + model = YOLO(model) path = 'api/tmp' + servername.split('/')[-1] - imsave(path, tools.scaleim(servername, height, 2.0)) + imsave(path, np.asarray(Image.open(servername).resize((640, 640)))) # filter by class: --class 0, or --class 0 2 3 - class_dic = {'all': None, '15': None, 'slot_normal': 0, 'slot_lp': 1, 'slot': [0, 1], - 'disk_sff': 2, 'disk_lff': 3, 'disk': [2, 3], 'source': 4} + class_dic = {'All': None, 'BMC': 0, 'Disk_lff': 1, 'Disk_sff': 2, 'Disks': [1, 2], 'PSU': 3, 'Serial': 4, 'Slot_lp': 5, 'Slot_normal': 6, 'Slots': [5, 6], 'USB': 7, 'VGA': 8} + while True: - print('\nclass list: ', {'d-sub female': '11', 'd-sub male': '12', 'idrac': '13', 'usb': '14', 'all': '15'}, - "\nor enter the name 'slot', 'disk', 'source'(without '')", - "\nPlease input one by one. Enter 'finish' to output the json") - print("----Enter component name or code:") - command = input() + print("\nChoose a component to detect.") + print("Available commands: 'All', 'BMC', 'Disk_lff', 'Disk_sff', 'Disks', 'PSU', 'Serial', 'Slot_lp', 'Slot_normal', 'Slots', 'USB', 'VGA'.") + print("Enter 'finish' to output the JSON.") + print() + command = input("Command: ") if command == "finish": break - elif command == "d-sub female" or command == '11': - print("start detecting d-sub female") - ogreeTools.clvga_rs232('female') - elif command == "d-sub male" or command == '12': - print("start detecting d-sub male") - ogreeTools.clvga_rs232('male') - elif command == "idrac" or command == '13': - print("start detecting idrac") - ogreeTools.clidrac() - elif command == "usb" or command == '14': - print("start detecting usb") - ogreeTools.clusb() - elif command in class_dic.keys() or command == '15': - print("start detecting slot&disk") - # command = 'all' - pred = api.yoloapi.run(weights, path, data, imgsz, conf_thres, iou_thres, max_det, device, view_img, - save_txt, save_conf, save_crop, nosave, class_dic[command], agnostic_nms, augment, visualize, update, - project, name, exist_ok, line_thickness, hide_labels, hide_conf, half, dnn, vid_stride,) - # pred: a list of tensor, each tensor represent a picture - ogreeTools.dl_addComponents(pred.cpu()) + else: + classes = class_dic.get(command, None) + if classes is not None: + print(f"Detecting {command}...") + pred = model.predict(path, conf=conf, iou=iou, imgsz=640, half=False, device=device, max_det=400, visualize=False, + augment=augment, agnostic_nms=False, classes=classes, show=show, save=save, save_txt=save_txt, save_conf=save_conf, + save_crop=save_crop, show_labels=True, show_conf=show_conf, show_boxes=show_boxes, line_width=line_width) + # pred: a list of tensor, each tensor represent a picture + ogreeTools.dl_addComponents(pred) + else: + print("Invalid command. Try again.") ogreeTools.cutears() ogreeTools.writejson() ogreeTools.savejson() @@ -95,24 +78,21 @@ def parse_opt(): parser.add_argument('--height', type=float, default=87.38, help="Server's height/vertical size" ) parser.add_argument('--width', type=float, default=443.99, help="Server's width/horizon size") parser.add_argument('--face', default='rear', choices=['front', 'rear'], help='the picture is front bord or rear bord') - #yolov5 hyparameter - parser.add_argument('--weights', nargs='+', type=str, default=ROOT/'api/best.pt', help='model path or triton URL') - parser.add_argument('--conf-thres', type=float, default=0.60, help='confidence threshold') - parser.add_argument('--iou-thres', type=float, default=0.50, help='NMS IoU threshold') + #yolo hyparameters + parser.add_argument('--model', nargs='+', type=str, default=ROOT/'api/yolov8-best.pt', help='model path or triton URL') + parser.add_argument('--conf', type=float, default=0.25, help='confidence threshold') + parser.add_argument('--iou', type=float, default=0.70, help='NMS IoU threshold') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--view-img', action='store_true', help='if provided, show results') + parser.add_argument('--augment', default=False, action='store_true', help='if provided, augmented inference') + parser.add_argument('--show', default=False, action='store_true', help='if provided, visualize features') + parser.add_argument('--save', default=False, action='store_true', help='if provided, save detection results') parser.add_argument('--save-txt', action='store_true', help='if provided, save results to *.txt') parser.add_argument('--save-conf', action='store_true', help='if provided, save confidences in --save-txt labels') parser.add_argument('--save-crop', action='store_true', help='if provided, save cropped prediction boxes') - parser.add_argument('--nosave', action='store_true', help='if provided, do not save images/videos') - parser.add_argument('--augment', action='store_true', help='if provided, augmented inference') - parser.add_argument('--visualize', action='store_true', help='if provided, visualize features') - parser.add_argument('--project', default=ROOT / 'detect', help='save results to project/name') - parser.add_argument('--name', default='exp', help='save results to project/name') - parser.add_argument('--exist-ok', action='store_true', help='if provided, existing project/name ok, do not increment') - parser.add_argument('--line-thickness', default=1, type=int, help='bounding box thickness (pixels)') - parser.add_argument('--hide-labels', default=False, action='store_true', help='if provided, hide labels') - parser.add_argument('--hide-conf', default=False, action='store_true', help='if provided, hide confidences') + parser.add_argument('--show-labels', default=True, action='store_true', help='if provided, show labels') + parser.add_argument('--show-conf', default=True, action='store_true', help='if provided, show confidences') + parser.add_argument('--show-boxes', default=True, action='store_true', help='if provided, show bounding boxes') + parser.add_argument('--line-width', default=None, type=int, help='bounding box line width (pixels)') opt = parser.parse_args() return opt diff --git a/3dtools/orgnised.py b/3dtools/orgnised.py deleted file mode 100644 index 9651dbb..0000000 --- a/3dtools/orgnised.py +++ /dev/null @@ -1,49 +0,0 @@ -from skimage.color.colorconv import rgba2rgb -import matplotlib.pyplot as plt -from PIL import Image -import numpy as np -import numpy as np -from skimage.io import imshow, imread -from skimage.color import rgb2gray,rgba2rgb -from mpl_toolkits.mplot3d import Axes3D -from skimage.feature import match_template -from skimage.feature import peak_local_max -from skimage import data, io, filters, transform - -def templateMatch(image,template): - fig, ax = plt.subplots(2,1,figsize=(10,8)) - ax[0].imshow(image_g,cmap='gray') - for _ in range(4): - template = transform.rotate(template,90,resize=True) - sample_mt = match_template(image, template) - template_width, template_height = template.shape - for x, y in peak_local_max(np.squeeze(sample_mt), threshold_abs=0.7): - rect = plt.Rectangle((y, x), template_height, template_width, color='r', - fc='none') - ax[0].add_patch(rect) - ax[1].imshow(sample_mt,cmap='magma') - ax[0].set_title('Grayscale',fontsize=15) - ax[1].set_title('Template Matching',fontsize=15) - plt.show() - - -image = rgba2rgb(imread('image/dell-poweredge-r740xd.rear.png')) # (H x W x C), [0, 255], RGB -image_g = rgb2gray(image) -image_e = filters.sobel(image_g) -image_hsv = rgb2hsv -''' -fig, ax = plt.subplots(3,1,figsize=(10,8)) -ax[0].imshow(image) -ax[1].imshow(image_g,cmap='gray') -ax[2].imshow(image_e,cmap='gray') -ax[0].set_title('Colored Image',fontsize=15) -ax[1].set_title('Grayscale Image',fontsize=15) -ax[2].set_title('Sobel edge',fontsize=15) -plt.show() -plt.close(fig) -''' -template = rgba2rgb(imread('image/rj45-90.png')) -template_g = rgb2gray(template) -template_e = filters.sobel(template_g) - -templateMatch(image_e,template_e) \ No newline at end of file diff --git a/3dtools/requirements.txt b/3dtools/requirements.txt index 539be7d..f45eac2 100644 --- a/3dtools/requirements.txt +++ b/3dtools/requirements.txt @@ -4,7 +4,7 @@ flask>=2.2.5 pyyaml>=6.0 pillow>=9.4.0 torchvision>=0.14.1 -numpy>=1.21.6 +numpy>=1.22.2 opencv-python>=4.7.0.72 matplotlib>=3.5.3 pandas>=1.3.5 @@ -15,4 +15,5 @@ requests>=2.31.0 tqdm>=4.65.0 psutil>=5.9.5 thop>=0.1.1-2209072238 -scikit-image>=0.19.3 \ No newline at end of file +scikit-image>=0.19.3 +ultralytics>=8.1.0 \ No newline at end of file diff --git a/3dtools/sacle_picture.py b/3dtools/sacle_picture.py deleted file mode 100644 index 324a256..0000000 --- a/3dtools/sacle_picture.py +++ /dev/null @@ -1,20 +0,0 @@ -import numpy as np -import pandas as pd -import os -import tools -from skimage.io import imsave -from skimage.transform import rescale - -FILEHANDEL = "YOLO_serveur/raw/" -SAVEHANDEL = "D:/Work/OGREE/image/YOLO_serveur/pixel2.5/" -RATIO = 2.0 - -size = pd.read_excel("image/name_list.xlsx").set_index('File') -files = os.listdir("image/"+FILEHANDEL) -for i in files: - image = tools.imageload(FILEHANDEL+i, flag="color") - image_s = rescale(image, size.loc[i, 'Height'] * RATIO / image.shape[0],channel_axis=2) - image_s = (image_s * 255.0).astype('uint8') - path = SAVEHANDEL + i - imsave(path, image_s) - diff --git a/3dtools/setup.sh b/3dtools/setup.sh deleted file mode 100755 index d81a730..0000000 --- a/3dtools/setup.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash - -# Check Python version -if ! python3 -c 'import sys; assert sys.version_info >= (3,7)' > /dev/null; then - echo "Error: Python version 3.7 or higher is required." - exit 1 -else - echo "Python version is compatible. Proceeding with installations." -fi - -# OGrEE-Tools/3dtools installation -pip3 install -r ./requirements.txt - -# YOLOV5 installation -git clone https://github.com/ultralytics/yolov5 -cd yolov5 -pip3 install -r ./requirements.txt diff --git a/3dtools/test.py b/3dtools/test.py deleted file mode 100644 index 2e4dbcb..0000000 --- a/3dtools/test.py +++ /dev/null @@ -1,50 +0,0 @@ -import torch -from torch.multiprocessing import Pool, freeze_support - -def calculate_value(args): - matrix, row_idx, col_idx = args - # Calculate the value at the specified row and column index - value = matrix[row_idx, col_idx] - return value - -def parallel_calculation(matrix, batch_size): - num_rows, num_cols = matrix.size() - - # Create a shared memory tensor to store the results - result_matrix = torch.zeros_like(matrix) - - # Generate indices for parallel processing - indices = [] - for i in range(num_rows): - for j in range(num_cols): - indices.append((i, j)) - - # Create a pool of processes to perform the calculations in parallel - with Pool() as pool: - # Split the indices into batches - batches = [indices[i:i+batch_size] for i in range(0, len(indices), batch_size)] - - # Process each batch in parallel using map - results = pool.map(calculate_value, [(matrix, row_idx, col_idx) for batch in batches for row_idx, col_idx in batch]) - - # Update the result_matrix using the calculated values - for idx, (row_idx, col_idx) in enumerate([(row_idx, col_idx) for batch in batches for row_idx, col_idx in batch]): - result_matrix[row_idx, col_idx] = results[idx] - - return result_matrix - -# Example usage -if __name__ == '__main__': - freeze_support() - - # Create a matrix - matrix = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) - - # Set the batch size for parallel processing - batch_size = 4 - - # Perform parallel calculation - result = parallel_calculation(matrix, batch_size) - - print("Result Matrix:") - print(result) \ No newline at end of file diff --git a/3dtools/tools.py b/3dtools/tools.py index 8b7d3b0..991918c 100644 --- a/3dtools/tools.py +++ b/3dtools/tools.py @@ -22,9 +22,9 @@ # special constant for all the function RATIO = 781/85.4 # pixel/mm = 9.145 -SIZETABLE = {'idrac': [14.0, 11.0, 11.0], 'usb': [13.0, 14.0, 5.5], 'vga': [16.0, 11.0, 8.0], - 'rs232': [16.0, 11.0, 8.0], 'slot_normal': [107.0, 312.0, 18.0], 'slot_lp': [65.0, 175.0, 18.0], - 'disk_lff': [101.0, 146.0, 26.0], 'disk_sff': [70.0, 101.0, 10.0], 'PSU': [90.0, 100.0, 40.0]} +SIZETABLE = {'BMC': [20, 11.0, 12.5], 'USB': [15.0, 14.0, 5.5], 'VGA': [35.0, 11.0, 17.5], + 'Serial': [35.0, 11.0, 17.5], 'Slot_normal': [100.0, 312.0, 15], 'Slot_lp': [65.0, 175.0, 15], + 'Disk_lff': [107.5, 146.0, 25], 'Disk_sff': [80, 101.0, 15], 'PSU': [90.0, 100.0, 40.0]} def imageload(fn, flag="color"):