diff --git a/README.md b/README.md index 0838a2d..dcb277f 100644 --- a/README.md +++ b/README.md @@ -4,17 +4,29 @@ ## Training -Loss + + + +| heads Epoch | all Epoch | loss | val_loss | +|:-----------:|:---------:|:----:|:--------:| +| 10 | 40 | ![loss](./assets/loss_40.png) | ![val_loss](./assets/val_loss_40.png) | +| 40 | 100 | ![loss](./assets/loss2.png) | ![val_loss](./assets/val_loss2.png) | + + ## Reference https://github.com/matterport/Mask_RCNN diff --git a/inference.py b/inference.py index 21b666a..b20a545 100644 --- a/inference.py +++ b/inference.py @@ -35,12 +35,12 @@ class ShapesConfig(Config): to the toy shapes dataset. """ # Give the configuration a recognizable name - NAME = "shapes" + NAME = "lyft_perception_challenge" # Train on 1 GPU and 8 images per GPU. We can put multiple images on each # GPU because the images are small. Batch size is 8 (GPUs * images/GPU). GPU_COUNT = 1 - IMAGES_PER_GPU = 1 + IMAGES_PER_GPU = 4 # Number of classes (including background) NUM_CLASSES = 1 + 2 # background + 3 shapes @@ -80,11 +80,7 @@ class InferenceConfig(ShapesConfig): config=inference_config, model_dir=MODEL_DIR) -# Get path to saved weights -# Either set a specific path or find last trained weights -# model_path = os.path.join('./logs/shapes20180529T0826', "mask_rcnn_shapes_0040.h5") model_path = os.path.join('./', "mask_rcnn_lyft.h5") -# model_path = model.find_last()[1] # Load trained weights (fill in path to trained weights here) assert model_path != "", "Provide path to trained weights" @@ -92,7 +88,6 @@ class InferenceConfig(ShapesConfig): model.load_weights(model_path, by_name=True) -# # Test on a random image RED = (255,0,0) GREEN = (0,255,0) @@ -116,9 +111,7 @@ def segment_images(original_image): color_id=0 else: color_id=1 - # print('id:',_id) mask_1 = f_mask[:,:,ch] - # print(mask_1) mask1 = np.dstack([mask_1*colors[color_id][0], mask_1*colors[color_id][1], mask_1*colors[color_id][2]])