From cfb907f29a3b38a3a4e9cb02ddb6f2f4c5a88116 Mon Sep 17 00:00:00 2001 From: Kristina Ulicna Date: Tue, 11 Jul 2023 14:53:45 +0100 Subject: [PATCH] Navigate example config --- grace/README.md | 18 ++++++++++++++++-- grace/config.json | 2 +- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/grace/README.md b/grace/README.md index fe21900..e643861 100644 --- a/grace/README.md +++ b/grace/README.md @@ -29,8 +29,9 @@ The `config_file` should be structured as follows: "log_dir": "--same--as--above--", # Feature extractor: - "extractor_fn": "/absolute/path/to/your/feature/extractor/resnet152.pt", - "feature_dim": "2048", -> if using ResNet 50 / 101 / 152, "512" if using ResNet 18 / 34; = input channels into the classifier + "extractor_fn": "/path/to/your/feature/extractor/resnet152.pt", + "feature_dim": "2048", -> if using ResNet 50 / 101 / 152, + "512" if using ResNet 18 / 34; = input channels into the classifier # Patch data specs: "patch_size": "(224, 224)", -> size of the patch to crop & feed to feature extractor @@ -56,6 +57,19 @@ The `config_file` should be structured as follows: _Note:_ Write the parameters into a single line, the file will be parsed accordingly. +Downloading the feature extractor: + +In case you decide to use a pre-trained image classifier, such as resnet-152, you can use this snippet to import the model, load the default weights & download the model: + +```sh +import torch +from grace.models.feature_extractor import resnet + +resnet_model = resnet(resnet_type="resnet152") +extractor_fn = "/path/to/your/feature/extractor/resnet152.pt" +torch.save(resnet_model, extractor_fn) +``` + ## Full list of graph / patch augmentations: diff --git a/grace/config.json b/grace/config.json index 3a645dd..1a0b39a 100644 --- a/grace/config.json +++ b/grace/config.json @@ -1 +1 @@ -{"image_dir": "/Users/kulicna/Desktop/classifier/data_fake/train/padded", "grace_dir": "/Users/kulicna/Desktop/classifier/data_fake/train/padded", "log_dir": "/Users/kulicna/Desktop/classifier/runs", "run_dir": "/Users/kulicna/Desktop/classifier/runs", "extractor_fn": "/Users/kulicna/Desktop/classifier/extractor/resnet152.pt", "feature_dim": "2048", "patch_size": "(112, 112)", "ignore_fraction": "1.0", "img_graph_augs": "[]", "img_graph_aug_params": "[]", "patch_augs": "[]", "patch_aug_params": "[]", "hidden_channels": "[512, 128, 32]", "num_node_classes": "2", "num_edge_classes": "2", "epochs": "200", "metrics": "['accuracy', 'confusion_matrix']"} +{"image_dir": "/path/to/your/images", "grace_dir": "/path/to/your/annotations", "log_dir": "/path/to/your/classifier/runs", "run_dir": "/path/to/your/classifier/runs", "extractor_fn": "/path/to/your/extractor/resnet152.pt", "feature_dim": "2048", "patch_size": "(224, 224)", "keep_patch_fraction": "1.0", "img_graph_augs": "[]", "img_graph_aug_params": "[]", "patch_augs": "[]", "patch_aug_params": "[]", "hidden_channels": "[512, 128, 32]", "num_node_classes": "2", "num_edge_classes": "2", "epochs": "10", "metrics": "['accuracy', 'confusion_matrix']"}