-
Notifications
You must be signed in to change notification settings - Fork 0
/
size_it.py
3244 lines (2738 loc) · 139 KB
/
size_it.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env python3
"""
A tkinter GUI, size_it.py, for OpenCV processing of an image to obtain
sizes, means, and ranges of objects in a sample population. The distance
transform, watershed, and random walker algorithms are used interactively
by setting their parameter values with slide bars and pull-down menus.
Related image preprocessing factors like contrast, brightness, noise
reduction, and filtering are also adjusted interactively, with live
updating of the resulting images.
A report is provided of parameter settings, object count, individual
object sizes, and sample size mean and range, along with an annotated
image file of labeled objects.
USAGE
For command line execution, from within the count-and-size-main folder:
python3 -m size_it --about
python3 -m size_it --help
python3 -m size_it
python3 -m size_it --terminal
Windows systems may need to substitute 'python3' with 'py' or 'python'.
Displayed imaged can be scaled with key commands to help arrange windows
on the screen.
Image preprocessing functions do live updates as most settings are
changed. For some slider settings however, when prompted, click the
"Run..." button to initiate the final processing step.
Save settings report and the annotated image with the "Save" button.
Identified objects can be saved to individual image files.
Quit program with Esc key, Ctrl-Q key, the close window icon of the
report window, or from command line with Ctrl-C.
Requires Python 3.7 or later and the packages opencv-python, numpy,
scikit-image, scipy, and psutil.
See this distribution's requirements.txt file for details.
Developed in Python 3.8 and 3.9, tested up to 3.11.
"""
# Copyright (C) 2024 C.S. Echt, under GNU General Public License
# Standard library imports.
from datetime import datetime
from json import loads
from pathlib import Path
from statistics import mean, median
from sys import exit as sys_exit
from time import time
from typing import Union, List
# Third party imports.
# tkinter(Tk/Tcl) is included with most Python3 distributions,
# but may sometimes need to be regarded as third-party.
# There is a bug(?) in PyCharm that does not recognize cv2 memberships,
# so pylint and inspections flag every use of cv2.*.
# Be aware that this disables all checks of (E1101): *%s %r has no %r member%s*
# pylint: disable=no-member
try:
import cv2
import numpy as np
import tkinter as tk
from tkinter import ttk, messagebox, filedialog
from skimage.segmentation import watershed, random_walker
from skimage.feature import peak_local_max
from scipy import ndimage
except (ImportError, ModuleNotFoundError) as import_err:
sys_exit(
'*** One or more required Python packages were not found'
' or need an update:\nOpenCV-Python, NumPy, scikit-image, SciPy, tkinter (Tk/Tcl).\n\n'
'To install: from the current folder, run this command'
' for the Python package installer (PIP):\n'
' python3 -m pip install -r requirements.txt\n\n'
'Alternative command formats (system dependent):\n'
' py -m pip install -r requirements.txt (Windows)\n'
' pip install -r requirements.txt\n\n'
'You may also install directly using, for example, this command,'
' for the Python package installer (PIP):\n'
' python3 -m pip install opencv-python\n\n'
'A package may already be installed, but needs an update;\n'
' this may be the case when the error message (below) is a bit cryptic\n'
' Example update command:\n'
' python3 -m pip install -U numpy\n\n'
'On Linux, if tkinter is the problem, then you may need:\n'
' sudo apt-get install python3-tk\n\n'
'See also: https://numpy.org/install/\n'
' https://tkdocs.com/tutorial/install.html\n'
' https://docs.opencv2.org/4.6.0/d5/de5/tutorial_py_setup_in_windows.html\n\n'
'Consider running this app and installing missing packages in a virtual environment.\n'
f'Error message:\n{import_err}')
# Local application imports.
# pylint: disable=import-error
# Need to place local imports after try...except to ensure exit messaging.
from utility_modules import (vcheck,
utils,
manage,
constants as const,
to_precision as to_p)
PROGRAM_NAME = utils.program_name()
class ProcessImage(tk.Tk):
"""
A suite of OpenCV methods to apply various image processing
functions involved in segmenting objects from an image file.
Class methods:
update_image
adjust_contrast
reduce_noise
filter_image
th_and_dist_trans
make_labeled_array
watershed_segmentation
draw_ws_segments
randomwalk_segmentation
draw_rw_segments
"""
def __init__(self):
super().__init__()
# Note: The matching selector widgets for the following
# control variables are in ContourViewer __init__.
self.slider_val = {
# For variables in config_sliders()...
'alpha': tk.DoubleVar(),
'beta': tk.IntVar(),
'noise_k': tk.IntVar(),
'noise_iter': tk.IntVar(),
'filter_k': tk.IntVar(),
'plm_mindist': tk.IntVar(),
'plm_footprint': tk.IntVar(),
'circle_r_min': tk.IntVar(),
'circle_r_max': tk.IntVar(),
}
self.scale_factor = tk.DoubleVar()
self.cbox_val = {
# For textvariables in config_comboboxes()...
'morph_op': tk.StringVar(),
'morph_shape': tk.StringVar(),
'filter_type': tk.StringVar(),
'threshold_type': tk.StringVar(),
'dt_type': tk.StringVar(),
'dt_mask_size': tk.StringVar(),
'ws_connectivity': tk.StringVar(),
'size_std': tk.StringVar(),
# For color_cbox textvariable in setup_start_window()...
'annotation_color': tk.StringVar(),
}
# Arrays of images to be processed. When used within a method,
# the purpose of self.tkimg[*] as an instance attribute is to
# retain the attribute reference and thus prevent garbage collection.
# Dict values will be defined for panels of PIL ImageTk.PhotoImage
# with Label images displayed in their respective tkimg_window Toplevel.
# The cvimg images are numpy arrays.
# The Watershed and Random Walker items are capitalized b/c they
# are also used for reporting the segmentation algorithm employed.
self.tkimg: dict = {}
self.cvimg: dict = {}
image_names = ('input',
'gray',
'contrasted',
'reduced_noise',
'filtered',
'thresholded',
'transformed',
'Watershed',
'Random Walker',
'segmented_objects',
'sized')
for _name in image_names:
self.tkimg[_name] = tk.PhotoImage()
self.cvimg[_name] = const.STUB_ARRAY
# img_label dictionary is set up in ViewImage.setup_image_windows(),
# but is used in all Class methods here.
self.img_label: dict = {}
# metrics dict is populated in ViewImage.open_input().
self.metrics: dict = {}
self.num_dt_segments: int = 0
self.ws_basins: list = []
self.rw_contours: list = []
self.sorted_size_list: list = []
self.unit_per_px = tk.DoubleVar()
self.num_sigfig: int = 0
self.time_start: float = 0
self.elapsed: float = 0
def update_image(self,
tkimg_name: str,
cvimg_array: np.ndarray) -> None:
"""
Process a cv2 image array to use as a tk PhotoImage and update
(configure) its window label for immediate display, at scale.
Calls module manage.tk_image(). Called from all methods that
display an image.
Args:
tkimg_name: The key name used in the tkimg and img_label
dictionaries.
cvimg_array: The new cv2 processed numpy image array.
Returns:
None
"""
self.tkimg[tkimg_name] = manage.tk_image(
image=cvimg_array,
scale_factor=self.scale_factor.get()
)
self.img_label[tkimg_name].configure(image=self.tkimg[tkimg_name])
def adjust_contrast(self) -> None:
"""
Adjust contrast of the input self.cvimg['gray'] image.
Updates contrast and brightness via alpha and beta sliders.
Displays contrasted and redux noise images.
Called by process(). Calls update_image().
Returns:
None
"""
# Source concepts:
# https://docs.opencv2.org/3.4/d3/dc1/tutorial_basic_linear_transform.html
# https://stackoverflow.com/questions/39308030/
# how-do-i-increase-the-contrast-of-an-image-in-python-opencv
self.cvimg['contrasted'] = (
cv2.convertScaleAbs(
src=self.cvimg['gray'],
alpha=self.slider_val['alpha'].get(),
beta=self.slider_val['beta'].get(),
)
)
self.update_image(tkimg_name='contrasted',
cvimg_array=self.cvimg['contrasted'])
def reduce_noise(self) -> None:
"""
Reduce noise in the contrast adjust image erode and dilate actions
of cv2.morphologyEx operations.
Called by preprocess(). Calls update_image().
Returns:
None
"""
# Need (sort of) kernel to be odd, to avoid an annoying shift of
# the displayed image.
_k = self.slider_val['noise_k'].get()
noise_k = _k + 1 if _k % 2 == 0 else _k
iteration = self.slider_val['noise_iter'].get()
# If redux iteration slider is set to 0, then proceed without,
# noise reduction and use the contrast image from adjust_contrast().
if iteration == 0:
self.update_image(tkimg_name='reduced_noise',
cvimg_array=self.cvimg['contrasted'])
return
# Need integers for the cv function parameters.
morph_shape = const.CV['morph_shape'][self.cbox_val['morph_shape'].get()]
morph_op = const.CV['morph_op'][self.cbox_val['morph_op'].get()]
# See: https://docs.opencv2.org/3.0-beta/modules/imgproc/doc/filtering.html
# on page, see: cv2.getStructuringElement(shape, ksize[, anchor])
# see: https://docs.opencv2.org/4.x/d9/d61/tutorial_py_morphological_ops.html
element = cv2.getStructuringElement(
shape=morph_shape,
ksize=(noise_k, noise_k))
# Use morphologyEx as a shortcut for erosion followed by dilation.
# Read https://docs.opencv2.org/3.4/db/df6/tutorial_erosion_dilatation.html
# https://theailearner.com/tag/cv-morphologyex/
# The op argument from const.CV['morph_op'] options:
# MORPH_OPEN is useful to remove noise and small features.
# MORPH_CLOSE is better for certain images, but generally is worse.
# MORPH_HITMISS helps to separate close objects by shrinking them.
self.cvimg['reduced_noise'] = cv2.morphologyEx(
src=self.cvimg['contrasted'],
op=morph_op,
kernel=element,
iterations=iteration,
borderType=cv2.BORDER_DEFAULT,
)
self.update_image(tkimg_name='reduced_noise',
cvimg_array=self.cvimg['reduced_noise'])
def filter_image(self) -> None:
"""
Applies a filter selection to blur the reduced noise image
to prepare for threshold segmentation. Can also serve as a
specialized noise reduction step.
Called by preprocess().
Calls update_image().
Returns:
None
"""
filter_selected = self.cbox_val['filter_type'].get()
border_type = cv2.BORDER_ISOLATED # cv2.BORDER_REPLICATE #cv2.BORDER_DEFAULT
noise_iter = self.slider_val['noise_iter'].get()
_k = self.slider_val['filter_k'].get()
# If filter kernel slider and noise iteration are both set to 0,
# then proceed without filtering and use the contrasted image.
if _k == 0 and noise_iter == 0:
self.update_image(tkimg_name='filtered',
cvimg_array=self.cvimg['contrasted'])
return
# If filter kernel slider is set to 0, then proceed without
# filtering and use the reduced noise image.
if _k == 0:
self.update_image(tkimg_name='filtered',
cvimg_array=self.cvimg['reduced_noise'])
return
# Need to filter the contrasted image when noise reduction is
# not applied.
if noise_iter == 0:
image2filter = self.cvimg['contrasted']
else:
image2filter = self.cvimg['reduced_noise']
# cv2.GaussianBlur and cv2.medianBlur need to have odd kernels,
# but cv2.blur and cv2.bilateralFilter will shift image between
# even and odd kernels, so just make it odd for everything.
# NOTE: don't allow a filter kernel value of 0 to be passed to
# cv2.bilateralFilter b/c it is too CPU intensive; a _k of zero
# results in a method return (above).
filter_k = _k + 1 if _k % 2 == 0 else _k
# Apply a filter to blur edges or image interior.
# NOTE: filtered image dtype is uint8
# Bilateral parameters:
# https://docs.opencv.org/3.4/d4/d86/group__imgproc__filter.html#ga9d7064d478c95d60003cf839430737ed
# from doc: Sigma values: For simplicity, you can set the 2 sigma
# values to be the same. If they are small (< 10), the filter
# will not have much effect, whereas if they are large (> 150),
# they will have a very strong effect, making the image look "cartoonish".
# NOTE: The larger the sigma the greater the effect of kernel size d.
# NOTE: d=-1 or 0, is very CPU intensive.
# Gaussian parameters:
# see: https://theailearner.com/2019/05/06/gaussian-blurring/
# see: https://docs.opencv.org/4.x/d4/d13/tutorial_py_filtering.html
# If only sigmaX is specified, sigmaY is taken as the same as sigmaX.
# If both are given as zeros, they are calculated from the kernel size.
# Gaussian blurring is highly effective in removing Gaussian noise
# from an image.
if filter_selected == 'cv2.blur':
self.cvimg['filtered'] = cv2.blur(
src=image2filter,
ksize=(filter_k, filter_k),
borderType=border_type)
elif filter_selected == 'cv2.bilateralFilter':
self.cvimg['filtered'] = cv2.bilateralFilter(
src=image2filter,
d=filter_k,
sigmaColor=100,
sigmaSpace=100,
borderType=border_type)
elif filter_selected == 'cv2.GaussianBlur':
self.cvimg['filtered'] = cv2.GaussianBlur(
src=image2filter,
ksize=(filter_k, filter_k),
sigmaX=0,
sigmaY=0,
borderType=border_type)
elif filter_selected == 'cv2.medianBlur':
self.cvimg['filtered'] = cv2.medianBlur(
src=image2filter,
ksize=filter_k)
self.update_image(tkimg_name='filtered',
cvimg_array=self.cvimg['filtered'])
def th_and_dist_trans(self) -> None:
"""
Produces a threshold image from the filtered image. This image
is used for masking in watershed_segmentation(). It is separate
here so that its display can be updated independently of running
watershed_segmentation(). Called by preprocess().
Returns:
None
"""
th_type: int = const.CV['threshold_type'][self.cbox_val['threshold_type'].get()]
filter_k = self.slider_val['filter_k'].get()
noise_iter = self.slider_val['noise_iter'].get()
dt_type: int = const.CV['distance_trans_type'][self.cbox_val['dt_type'].get()]
mask_size = int(self.cbox_val['dt_mask_size'].get())
# Note from doc: Currently, the Otsu's and Triangle methods
# are implemented only for 8-bit single-channel images.
# For other cv2.THRESH_*, thresh needs to be manually provided.
# The thresh parameter is determined automatically (0 is placeholder).
# Convert values above thresh to a maxval of 255, white.
# Need to use type *_INVERSE for black-on-white images.
if filter_k == 0 and noise_iter == 0:
image2threshold = self.cvimg['contrasted']
elif filter_k == 0:
image2threshold = self.cvimg['reduced_noise']
else:
image2threshold = self.cvimg['filtered']
_, self.cvimg['thresholded'] = cv2.threshold(
src=image2threshold,
thresh=0,
maxval=255,
type=th_type)
# Calculate the distance transform of the objects' thresholds,
# by replacing each foreground (non-zero) element, with its
# shortest distance to the background (any zero-valued element).
# Returns a float64 ndarray.
# Note that maskSize=0 calculates the precise mask size only for
# cv2.DIST_L2. cv2.DIST_L1 and cv2.DIST_C always use maskSize=3.
self.cvimg['transformed']: np.ndarray = cv2.distanceTransform(
src=self.cvimg['thresholded'],
distanceType=dt_type,
maskSize=mask_size)
self.update_image(tkimg_name='thresholded',
cvimg_array=self.cvimg['thresholded'])
# self.update_image(tkimg_name='transformed',
# cvimg_array=np.uint8(self.cvimg['transformed']))
def make_labeled_array(self) -> np.ndarray:
"""
Finds peak local maximum as defined in skimage.feature. The
array is used as the 'markers' or 'labels' arguments in
segmentation methods.
Called as process() argument for watershed_segmentation() or
randomwalk_segmentation().
Returns: A labeled ndarray to use in one of the segmentation
algorithms.
"""
min_dist: int = self.slider_val['plm_mindist'].get()
p_kernel: tuple = (self.slider_val['plm_footprint'].get(),
self.slider_val['plm_footprint'].get())
plm_kernel = np.ones(shape=p_kernel, dtype=np.uint8)
# Generate the markers as local maxima of the distance to the background.
# see: https://docs.opencv.org/3.4/d3/dc0/group__imgproc__shape.html
# https://scikit-image.org/docs/stable/auto_examples/segmentation/plot_watershed.html
# Don't use exclude_border; objects touching image border will be excluded
# in ViewImage.select_and_size_objects().
local_max: ndimage = peak_local_max(image=self.cvimg['transformed'],
min_distance=min_dist,
exclude_border=False, # True is min_dist
num_peaks=np.inf,
footprint=plm_kernel,
labels=self.cvimg['thresholded'],
num_peaks_per_label=np.inf,
p_norm=np.inf) # Chebyshev distance
# p_norm=2, # Euclidean distance
mask = np.zeros(shape=self.cvimg['transformed'].shape, dtype=bool)
# Set background to True (not zero: True or 1)
mask[tuple(local_max.T)] = True
# Note that markers are single px, colored in grayscale by their label index.
labeled_array, self.num_dt_segments = ndimage.label(input=mask)
# Source: http://scipy-lectures.org/packages/scikit-image/index.html
# From the doc: labels: array of ints, of same shape as data without channels dimension.
# Array of seed markers labeled with different positive integers for
# different phases. Zero-labeled pixels are unlabeled pixels.
# Negative labels correspond to inactive pixels that are not taken into
# account (they are removed from the graph).
# Replace thresh_img background with -1 to ignore those pixels.
labeled_array[labeled_array == self.cvimg['thresholded']] = -1
return labeled_array
def watershed_segmentation(self, labeled_array: np.ndarray) -> None:
"""
Segment objects with skimage.segmentation.watershed().
Argument *array* calls the make_labeled_array() method that
returns a labeled array.
Called from process().
Args:
labeled_array: A skimage.features.peak_local_max array,
e.g., from make_labeled_array().
Returns: None
"""
ws_connectivity = int(self.cbox_val['ws_connectivity'].get()) # 1, 4 or 8.
# Note that the minus symbol with the image argument self.cvimg['transformed']
# converts the distance transform into a threshold. Watershed can work
# without that conversion, but does a better job identifying segments with it.
# https://scikit-image.org/docs/stable/auto_examples/segmentation/plot_compact_watershed.html
# https://scikit-image.org/docs/stable/auto_examples/segmentation/plot_watershed.html
# Need watershed_line to show boundaries on displayed watershed contour_pointset.
# compactness=1.0 based on: DOI:10.1109/ICPR.2014.181
self.cvimg['segmented_objects']: np.ndarray = watershed(
image=-self.cvimg['transformed'],
markers=labeled_array,
connectivity=ws_connectivity,
mask=self.cvimg['thresholded'],
compactness=1.0,
watershed_line=True)
def draw_ws_segments(self) -> None:
"""
Find and draw contours for watershed basin segments.
Called from process() with a watershed_segmentation() arg.
Calls update_image().
Returns: None
"""
# self.ws_basins is used in select_and_size_objects() to draw enclosing circles.
# Convert image array from int32 to uint8 data type to find contour_pointset.
# Conversion with cv2.convertScaleAbs(watershed_img) also works.
# NOTE: Use method=cv2.CHAIN_APPROX_NONE when masking individual segments
# in select_and_export_objects(). CHAIN_APPROX_SIMPLE can work, but NONE is best?
self.ws_basins, _ = cv2.findContours(image=np.uint8(self.cvimg['segmented_objects']),
mode=cv2.RETR_EXTERNAL,
method=cv2.CHAIN_APPROX_NONE)
# Convert watershed array data from int32 to allow colored contours.
self.cvimg['Watershed'] = cv2.cvtColor(src=np.uint8(self.cvimg['segmented_objects']),
code=cv2.COLOR_GRAY2BGR)
# Need to prevent a thickness value of 0, yet have it be a function
# of image size so that it looks good in scaled display. Because the
# watershed_img has a black background, the contour lines are
# easier to see and look better if they are thinner than in the
# annotated 'sized' image where metrics['line_thickness'] is used.
# When user changes line thickness with + & - keys, only the 'sized'
# image updates; the watershed image displays the original thickness.
if self.metrics['line_thickness'] == 1:
line_thickness = 1
else:
line_thickness = self.metrics['line_thickness'] // 2
# Need to prevent black contours because they won't show on the
# black background of the watershed_img image.
if self.cbox_val['annotation_color'].get() == 'black':
line_color = const.COLORS_CV['blue']
else:
line_color = const.COLORS_CV[self.cbox_val['annotation_color'].get()]
cv2.drawContours(image=self.cvimg['Watershed'],
contours=self.ws_basins,
contourIdx=-1, # do all contours
color=line_color,
thickness=line_thickness,
lineType=cv2.LINE_AA)
self.update_image(tkimg_name='segmented_objects',
cvimg_array=self.cvimg['Watershed'])
# Now need to draw enclosing circles around watershed segments and
# annotate with object sizes in ViewImage.select_and_size_objects().
def randomwalk_segmentation(self, labeled_array: np.ndarray) -> None:
"""
Segment objects with skimage.segmentation.random_walker().
Argument *array* calls the make_labeled_array() method that
returns a labeled array.
Called from process().
Args:
labeled_array: A skimage.features.peak_local_max array,
e.g., from make_labeled_array().
Returns: None
"""
# Note that cvimg['segmented_objects'] is used for both watershed
# and random_walker images because both share the Label() grid for
# img_label['segmented_objects'] window.
# NOTE: beta and tol values were empirically determined during development
# for best performance using sample images run on an Intel i9600k @ 4.8 GHz.
# Default beta & tol arguments take ~8x longer to process for similar results.
# Need pyamg installed with mode='cg_mg'. 'cg_j' works poorly with these args.
self.cvimg['segmented_objects']: np.ndarray = random_walker(
data=self.cvimg['thresholded'],
labels=labeled_array,
beta=5, # default: 130,
mode='cg_mg', # default: 'cg_j'
tol=0.1, # default: 1.e-3
copy=True,
return_full_prob=False,
spacing=None,
prob_tol=0.1, # default: 1.e-3
channel_axis=None)
# self.rw_contours is used in select_and_size_objects() to draw
# enclosing circles and calculate sizes of segmented objects.
# Note: This for loop is much more stable, and in most cases faster,
# than using parallelization modules (parallel.py and pool-worker.py
# in utility_modules).
self.rw_contours.clear()
for label in np.unique(ar=self.cvimg['segmented_objects']):
# If the label is zero, we are examining the 'background',
# so simply ignore it.
if label == 0:
continue
# ...otherwise, allocate memory for the label region and draw
# it on the mask.
mask = np.zeros(shape=self.cvimg['segmented_objects'].shape, dtype="uint8")
mask[self.cvimg['segmented_objects'] == label] = 255
# Detect contours in the mask, grab the largest, and add it
# to the list used to draw, size, export, etc. the RW ROIs.
contours, _ = cv2.findContours(image=mask.copy(),
mode=cv2.RETR_EXTERNAL,
method=cv2.CHAIN_APPROX_SIMPLE)
self.rw_contours.append(max(contours, key=cv2.contourArea))
def draw_rw_segments(self) -> None:
"""
Draw and display the segments from randomwalk_segmentation().
Called from process().
Calls update_image().
Returns: None
"""
# Convert array data from int32 to allow colored contours.
self.cvimg['Random Walker'] = cv2.cvtColor(src=np.uint8(self.cvimg['segmented_objects']),
code=cv2.COLOR_GRAY2BGR)
# Need to prevent white or black contours because they
# won't show on the white background with black segments.
if self.cbox_val['annotation_color'].get() in 'white, black':
line_color: tuple = const.COLORS_CV['blue']
else:
line_color = const.COLORS_CV[self.cbox_val['annotation_color'].get()]
# Note: this does not update until process() is called.
# It shares a window with the distance transform image, which
# updates with slider or combobox preprocessing changes.
cv2.drawContours(image=self.cvimg['Random Walker'],
contours=self.rw_contours,
contourIdx=-1, # do all contours
color=line_color,
thickness=self.metrics['line_thickness'],
lineType=cv2.LINE_AA)
self.update_image(tkimg_name='segmented_objects',
cvimg_array=self.cvimg['Random Walker'])
# Now need to draw enclosing circles around RW segments and
# annotate with object sizes in ViewImage.select_and_size_objects().
class ViewImage(ProcessImage):
"""
A suite of methods to display cv segments based on selected settings
and parameters that are in ProcessImage() methods.
Methods:
open_input
check_for_saved_settings
set_auto_scale_factor
import_settings
delay_size_std_info_msg
show_info_message
configure_circle_r_sliders
widget_control
validate_px_size_entry
validate_custom_size_entry
set_size_standard
is_selected_contour
measure_object
annotate_object
select_and_size_objects
mask_for_export
define_roi
select_and_export_objects
report_results
preprocess
process
process_sizes
"""
def __init__(self):
super().__init__()
self.first_run: bool = True
self.report_frame = tk.Frame()
self.selectors_frame = tk.Frame()
# self.configure(bg='green') # for development.
# The control variables with matching names for these Scale() and
# Combobox() widgets are instance attributes in ProcessImage.
self.slider = {
'alpha': tk.Scale(master=self.selectors_frame),
'alpha_lbl': tk.Label(master=self.selectors_frame),
'beta': tk.Scale(master=self.selectors_frame),
'beta_lbl': tk.Label(master=self.selectors_frame),
'noise_k': tk.Scale(master=self.selectors_frame),
'noise_k_lbl': tk.Label(master=self.selectors_frame),
'noise_iter': tk.Scale(master=self.selectors_frame),
'noise_iter_lbl': tk.Label(master=self.selectors_frame),
'filter_k': tk.Scale(master=self.selectors_frame),
'filter_k_lbl': tk.Label(master=self.selectors_frame),
'plm_mindist': tk.Scale(master=self.selectors_frame),
'plm_mindist_lbl': tk.Label(master=self.selectors_frame),
'plm_footprint': tk.Scale(master=self.selectors_frame),
'plm_footprint_lbl': tk.Label(master=self.selectors_frame),
'circle_r_min': tk.Scale(master=self.selectors_frame),
'circle_r_min_lbl': tk.Label(master=self.selectors_frame),
'circle_r_max': tk.Scale(master=self.selectors_frame),
'circle_r_max_lbl': tk.Label(master=self.selectors_frame),
}
self.cbox = {
'morph_op': ttk.Combobox(master=self.selectors_frame),
'morph_op_lbl': tk.Label(master=self.selectors_frame),
'morph_shape': ttk.Combobox(master=self.selectors_frame),
'morph_shape_lbl': tk.Label(master=self.selectors_frame),
'filter_type': ttk.Combobox(master=self.selectors_frame),
'filter_lbl': tk.Label(master=self.selectors_frame),
'threshold_type': ttk.Combobox(master=self.selectors_frame),
'th_type_lbl': tk.Label(master=self.selectors_frame),
'dt_type': ttk.Combobox(master=self.selectors_frame),
'dt_type_lbl': tk.Label(master=self.selectors_frame),
'dt_mask_size': ttk.Combobox(master=self.selectors_frame),
'dt_mask_size_lbl': tk.Label(master=self.selectors_frame),
'ws_connectivity': ttk.Combobox(master=self.selectors_frame),
'ws_connectivity_lbl': tk.Label(master=self.selectors_frame),
'size_std_lbl': tk.Label(master=self.selectors_frame),
'size_std': ttk.Combobox(master=self.selectors_frame),
}
self.size_std = {
'px_entry': tk.Entry(master=self.selectors_frame),
'px_val': tk.StringVar(master=self.selectors_frame),
'px_lbl': tk.Label(master=self.selectors_frame),
'custom_entry': tk.Entry(master=self.selectors_frame),
'custom_val': tk.StringVar(master=self.selectors_frame),
'custom_lbl': tk.Label(master=self.selectors_frame),
}
self.button = {
'process_ws': ttk.Button(master=self),
'process_rw': ttk.Button(master=self),
'save_results': ttk.Button(master=self),
'new_input': ttk.Button(master=self),
'export_objects': ttk.Button(master=self),
'export_settings': ttk.Button(master=self),
'reset': ttk.Button(master=self),
}
# Screen pixel width is defined in set_auto_scale_factor().
self.screen_width: int = 0
# Defined in setup_start_window() Radiobuttons.
self.do_inverse_th = tk.BooleanVar()
# Info label is gridded in configure_main_window().
self.info_txt = tk.StringVar()
self.info_label = tk.Label(master=self, textvariable=self.info_txt)
# Flag user's choice of segment export types. Defined in
# configure_buttons() _export_objects() Button cmd.
self.export_segment: bool = True
self.export_hull: bool = False
# Defined in widget_control() to reset values that user may have
# tried to change during prolonged processing times.
self.slider_values: list = []
self.input_file_path: str = ''
self.input_file_name: str = ''
self.input_folder_name: str = ''
self.input_folder_path: str = ''
self.input_ht: int = 0
self.input_w: int = 0
self.settings_file_path = Path('')
self.use_saved_settings: bool = False
self.imported_settings: dict = {}
self.seg_algorithm: str = ''
self.report_txt: str = ''
self.selected_sizes: List[float] = []
self.object_labels: List[list] = []
def open_input(self, parent: Union[tk.Toplevel, 'SetupApp']) -> bool:
"""
Provides an open file dialog to select an initial or new input
image file. Also sets a scale slider value for the displayed img.
Called from setup_start_window() or "New input" button.
Args:
parent: The window or mainloop Class over which to place the
file dialog, e.g., start_win or self.
Returns:
True or False depending on whether input was selected.
"""
self.input_file_path = filedialog.askopenfilename(
parent=parent,
title='Select input image',
filetypes=[('JPG', '*.jpg'),
('JPG', '*.jpeg'),
('JPG', '*.JPG'), # used for iPhone images
('PNG', '*.png'),
('TIFF', '*.tiff'),
('TIFF', '*.tif'),
('All', '*.*')],
)
# When user selects an input, check whether it can be used by OpenCV.
# If so, open it, and proceed. If user selects "Cancel" instead of
# selecting a file, then quit if at the start window, otherwise
# simply close the filedialog (default action) because this was
# called from the "New input" button in the mainloop (self) window.
# Need to call quit_gui() without confirmation b/c a confirmation
# dialog answer of "No" throws an error during file input.
try:
if self.input_file_path:
self.cvimg['input'] = cv2.imread(self.input_file_path)
self.cvimg['gray'] = cv2.cvtColor(src=self.cvimg['input'],
code=cv2.COLOR_RGBA2GRAY)
self.input_ht, self.input_w = self.cvimg['gray'].shape
self.input_file_name = Path(self.input_file_path).name
self.input_folder_path = str(Path(self.input_file_path).parent)
self.input_folder_name = str(Path(self.input_folder_path).name)
self.settings_file_path = Path(self.input_folder_path, const.SETTINGS_FILE_NAME)
elif parent != self:
utils.quit_gui(mainloop=self, confirm=False)
else: # no input and parent is self (app).
return False
except cv2.error as cverr:
msg = f'File: {self.input_file_name} cannot be used.'
if self.first_run:
print(f'{msg} Exiting with error:\n{cverr}')
messagebox.showerror(
title="Bad input file",
message=msg + '\nRestart and try a different file.\nQuitting...')
utils.quit_gui(mainloop=self, confirm=False)
else:
messagebox.showerror(
title="Bad input file",
message=msg + '\nUse "New input" to try another file.')
return False
# Auto-set images' scale factor based on input image size.
# Can be later reset with keybindings in bind_scale_adjustment().
# circle_r_slider ranges are a function of input image size.
self.metrics = manage.input_metrics(img=self.cvimg['input'])
self.set_auto_scale_factor()
self.configure_circle_r_sliders()
return True
def check_for_saved_settings(self) -> None:
"""
Following image file import, need to check whether user wants to
use saved settings. The JSON settings file is expected to be in
the input image's folder. Calls import_settings().
"""
if self.settings_file_path.exists():
if self.first_run:
choice = ('Yes, from JSON file in\n'
f' folder: {self.input_folder_name}.\n'
'No, use default settings.')
else:
choice = ('Yes, from JSON file in\n'
f' folder: {self.input_folder_name}.\n'
'No, use current settings.')
self.use_saved_settings = messagebox.askyesno(
title=f"Use saved settings?",
detail=choice)
if self.use_saved_settings:
self.import_settings()
def set_auto_scale_factor(self) -> None:
"""
As a convenience for user, set a default scale factor to that
needed for images to fit easily on the screen, either 1/3
screen px width or 2/3 screen px height, depending
on input image orientation.
Returns: None
"""
# Note that the scale factor is not included in saved_settings.json.
if self.input_w >= self.input_ht:
estimated_scale = round((self.screen_width * 0.33) / self.input_w, 2)
else:
estimated_scale = round((self.winfo_screenheight() * 0.66) / self.input_ht, 2)
self.scale_factor.set(estimated_scale)
def import_settings(self) -> None:
"""
The dictionary of saved settings, imported via json.loads(),
that are to be applied to a new image. Includes all settings
except the scale_factor for window image size.
"""
try:
with open(self.settings_file_path, mode='rt', encoding='utf-8') as _fp:
settings_json = _fp.read()
self.imported_settings: dict = loads(settings_json)
except FileNotFoundError as fnf:
print('The settings JSON file could not be found.\n'
f'{fnf}')
except OSError as oserr:
print('There was a problem reading the settings JSON file.\n'
f'{oserr}')
# Set/Reset Scale widgets.
for _name in self.slider_val:
self.slider_val[_name].set(self.imported_settings[_name])
# Set/Reset Combobox widgets.
for _name in self.cbox_val:
self.cbox_val[_name].set(self.imported_settings[_name])
# Set the threshold selection Radiobutton in setup_start_window().
self.do_inverse_th.set(self.imported_settings['do_inverse_th'])
self.metrics['font_scale'] = self.imported_settings['font_scale']
self.metrics['line_thickness'] = self.imported_settings['line_thickness']
self.size_std['px_val'].set(self.imported_settings['px_val'])
self.size_std['custom_val'].set(self.imported_settings['custom_val'])
self.seg_algorithm = self.imported_settings['seg_algorithm']
def delay_size_std_info_msg(self) -> None:
"""
When no size standard values ar entered, after a few seconds,
display the size standard instructions in the mainloop (app)
window. Internal function calls show_info_message().
Called from process(), process_sizes(), and
configure_buttons._new_input().
Returns: None
"""
def _show_msg() -> None:
_info = ('\nWhen the entered pixel size is 1 AND selected size standard\n'
'is "None", then the size units displayed size are pixels.\n'
'Size units are millimeters for any pre-set size standard.\n'
f'(Processing time elapsed: {self.elapsed})\n')
self.show_info_message(info=_info, color='black')
if (self.size_std['px_val'].get() == '1' and
self.cbox_val['size_std'].get() == 'None'):
self.after(ms=6000, func=_show_msg)
def show_info_message(self, info: str, color: str) -> None:
"""
Configure for display and update the informational message in
the report and settings window.
Args:
info: The text string of the message to display.
color: The font color string, either as a key in the
const.COLORS_TK dictionary or as a Tk compatible fg
color string, i.e. hex code or X11 named color.