This repository has been archived by the owner on Sep 29, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 3
/
example_server.yml
442 lines (377 loc) · 15.4 KB
/
example_server.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
########################################################################
# This is a TEMPLATE, please dont change! #
# ${ML_INSTALL_ prepended variables are replaced when using install.py #
########################################################################
substitutions:
# - Substitutions can be used like BASH variables
# - THEY MUST BE WRAPPED IN ${} - ${THIS_WILL_WORK} $THIS_WONT
# - Order matters!
DATA_DIR: ${ML_INSTALL_DATA_DIR}
TMP_DIR: ${ML_INSTALL_TMP_DIR}
CFG_DIR: ${ML_INSTALL_CFG_DIR}
LOGGING_DIR: ${ML_INSTALL_LOGGING_DIR}
MODEL_DIR: ${ML_INSTALL_MODEL_DIR}
SERVER_ADDRESS: ${ML_INSTALL_SERVER_ADDRESS}
SERVER_PORT: ${ML_INSTALL_SERVER_PORT}
# - This file contains substitutions that will be imported into the current file (secrets)
# ** NOTE: The 'IncludeFile' directive must be contained in the 'substitutions' section
IncludeFile: ${CFG_DIR}/secrets.yml
# --------- Example
EXAMPLE: "World!"
THIS IS AN EXAMPLE:
of substitution variables: "Hello, ${EXAMPLE}"
# - Set some uvicorn config options
uvicorn:
# - Allow proxy headers to be used for determining the client's IP address
# - This is useful for when you are running behind a proxy
proxy_headers: no
# - If you allow proxy headers define what ips are allowed to set them
# - This is in YAML list format, so if you have multiple IPs, add them as such
# - ** uvicorn does not support CIDR notation, so you must add each IP individually **
forwarded_allow_ips:
- "127.0.0.1"
debug: no
system:
# - Path where the system will store configuration files
config_path: ${CFG_DIR}
# - Path where the system will store variable data (tokens, serialized data, etc)
variable_data_path: ${DATA_DIR}
# - Path where temp files will be stored
tmp_path: ${TMP_DIR}
# - Path where various images will be stored
image_dir: ${DATA_DIR}/images
# - Path where the ML model folder structure will be stored
model_dir: ${MODEL_DIR}
# - Maximum threaded processes, adjust to your core count and load.
thread_workers: 4
server:
# - Set interface IP to listen on, 0.0.0.0 will listen on all IPv4 interfaces
address: ${SERVER_ADDRESS}
port: ${SERVER_PORT}
auth:
# - Enable/Disable authentication
# - If disabled, anyone can access the API but, they must still login and receive a token.
# - It will accept any username/password combo while disabled.
enabled: no
# - REQUIRED: Where to store the user database
db_file: ${DATA_DIR}/udata.db
# - The secret key that will be used to sign JWT tokens.
# - If you change the key after signing tokens, those existing tokens will be invalid
sign_key: ${JWT_SIGN_PHRASE}
# - The algorithm to use for signing JWT tokens. The default is HS256.
# ** BE AWARE if you change this after signing tokens, tokens signed with old algo will be invalid
algorithm: HS256
# - How long an access token is valid for in minutes
expire_after: 60
locks:
# - Locks have been changed to use asyncio.BoundedSemaphore. No more file locking.
gpu:
# - Max number of parallel inference requests running on the GPU (Default: 4)
max: 6
cpu:
# - Default: 4
max: 12
tpu:
# - For TPU, unexpected results may occur when max > 1, YMMV.
# - Default: 1
max: 1
logging:
# - Logging levels are: debug, info, warning, error, critical
# - Root logging level
level: ${ML_INSTALL_LOGGING_LEVEL}
# - Try to sanitize tokens, keys, passwords, usernames, host and ip addresses from logs
sanitize:
enabled: yes
replacement_str: <sanitized>
# - Log to console aka stdout
console:
enabled: ${ML_INSTALL_LOGGING_CONSOLE_ENABLED}
# - Per module logging level
#level: debug
syslog:
enabled: ${ML_INSTALL_LOGGING_SYSLOG_ENABLED}
#level: debug
address: ${ML_INSTALL_LOGGING_SYSLOG_ADDRESS}
# - Log to file
file:
enabled: ${ML_INSTALL_LOGGING_FILE_ENABLED}
#level: debug
# - Directory where log files will be stored
path: ${LOGGING_DIR}
# - File name for the log file
file_name: zomi_server.log
# - Override log file owner and group
# user:
# group:
# - Rudimentary color detection
color:
# - Enable or disable color detection of a detected object.
# - The bounding box is cropped and analysis is done to get up to the top_n colors
enabled: no
# - Number of colors to return, default: 5
top_n: 5
# - color naming specification to use (html4, css2, css21, css3), default: html4
spec: html4
# - List of labels to run color detection on. if none are listed, all labels use color detection.
labels:
- car
- truck
- bus
- motorcycle
- bicycle
# - Define what models the server will offer, name is case-insensitive and preserves spaces.
models:
# OpenCV DarkNet Model Example
- name: YOLOv4 # Name will be lower cased, spaces are preserved; 'yolov4' / used in http request to id model
enabled: no
description: "YOLOv4 DarkNet pretrained model"
#type_of: object # Default is object / object , face, alpr
# - Framework to use: opencv, trt, ort, torch, coral, http, face_recognition, alpr
# - trt = tensor rt, ort = onnxruntime
framework: opencv
# - sub_framework is the various backends the framework can make use of
# - opencv can use darknet, onnx (Although ort framework is faster)
sub_framework: darknet
# - cpu/gpu
# NOTE: gpu requires OpenCV to be compiled with CUDA/cuDNN support!!!
processor: cpu
# - Which GPU to use, default: 0
#gpu_idx: 0
# - Use experimental half precision floating point (FP16) for inference with CUDA
#cuda_fp_16: no
# - Type of model output (helps to process output into scores, bboxes, class ids)
#output_type: yolov4 # WIP/NOT IMPLEMENTED
# - Model file is required for models that require input.
# - Config file is optional, only .weights input requires a config file.
input: "${MODEL_DIR}/yolo/yolov4_new.weights" # Optional. This is the model file itself.
config: "${MODEL_DIR}/yolo/yolov4_new.cfg" # Optional (.weights requires .cfg, .onnx and .tflite does not).
#classes: "${MODEL_DIR}/coco.names" # Optional. Default is COCO 2017 (80) classes.
# - Image will be resized to these dimensions before being passed to the model.
# - what was the model trained on? 416x416? 512x512? 1024x1024?
height: 512 # Optional. Defaults to 416.
width: 512 # Optional. Defaults to 416.
# - Square the image by zero-padding the shorter side to match the longer side before resize
# - 1920x1080 -> 1920x1920 with a black bg where the new pixels are (letterboxing)
square: no # Optional. Defaults to False.
# - Override global color detection
detect_color: no # Optional. Defaults to False.
detection_options:
# - Confidence threshold for detection (recommended 0.2-0.5)
confidence: 0.2
# - Non Max Suppressive threshold, lower will filter more overlapping bounding boxes out.
nms: 0.4
# PyTorch Example (very rudimentary, basics plumbed in)
- name: TORch tESt # lower-cased, spaces preserved = torch test
description: testing pretrained torch model
enabled: no
framework: torch
# - Use pretrained model with weights or configure input to your own model and classes
# - NOTE: if pretrained is enabled, input, classes and num_classes are ignored
# - accurate|fast|default|high_performance|low_performance
pretrained:
enabled: yes
name: default
#input: /path/to/model.pt
#classes: /path/to/classes/file.txt
# - Number of classes including background (only for torch framework)
#num_classes: 80
type_of: object
processor: gpu
# - If using multiple GPUs, set the index of the GPU to use. Ignored if `processor` is not `gpu`.
# - To get the index and name of each device:
# - `python3 -c 'import torch; print(f"Available GPUs: {torch.cuda.device_count()}") [print(f"index: {i} - {torch.cuda.get_device_name(i)}") for i in range(torch.cuda.device_count())]'`
# ** NOTE: The index is zero based, so the first GPU is 0, second is 1, etc.
gpu_idx: 0
detection_options:
confidence: 0.2
nms:
enabled: yes
threshold: 0.65
# - TPU Model Example [pre-built google pycoral only supports Python 3.7-3.9]
# - There is a 3.10 prebuilt available from a user on GitHub, but it is not official.
# - You can also build it yourself for python 3.11+.
# - See https://github.com/google-coral/pycoral/issues/85 for 3.10 prebuilt repo and build instructions (including required hacks)
- name: tpu
description: "SSD MobileNet V2 TensorFlow2 trained"
enabled: no
framework: coral
type_of: object
# - Will always be tpu
processor: tpu
input: "${MODEL_DIR}/coral_tpu/tf2_ssd_mobilenet_v2_coco17_ptq_edgetpu.tflite"
# - All of the included TPU object detection models require the 90 label COCO dataset
# - See https://coral.ai/models/object-detection/
classes: "${MODEL_DIR}/coral_tpu/coco-labels-paper.txt"
detection_options:
# - Non Max Suppressive threshold, lower will filter more overlapping bounding boxes out.
# - Currently, only TPU model NMS can be enabled/disabled
nms:
enabled: yes
threshold: .35
confidence: 0.2
# TensorRT Model Example (User must install TensorRT and compile their engine model)
- name: yolo-nas-s trt
enabled: no
description: "TensorRT optimized YOLO-NAS-S pretrained model"
input: "/shared/models/yolo/yolo_nas_s.trt"
#gpu_idx: 0
framework: trt
type_of: object
# - Only ort and trt support output_type
# - Tells the server how to process the output from the model into confidence, bounding box, and class id
output_type: yolonas
# - Make sure to set the proper model input size or it will throw errors!
height: 640
width: 640
detection_options:
confidence: 0.31
# ort and trt support nms enabled/disabled (YOLO v10 does not need NMS)
nms:
enabled: yes
threshold: 0.44
# ONNX Runtime Example
- name: yolov8s onnx
description: "Ultralytics YOLO v8s pretrained ONNX model on onnxruntime"
enabled: no
# - Use onnxruntime backend: ort
framework: ort
type_of: object
processor: gpu
# gpu_idx: 0
# - Possible ROCm support *WIP*
# gpu_framework: cuda
input: "/shared/models/yolo/yolov8s.onnx"
#classes: path/to/classes.file
# - Only ort and trt support output_type
# - Tells the server how to process the output from the model into confidence, bounding box, and class id
# - [yolonas, yolov8, yolov10]
output_type: yolov8
# ** NOTE: MAKE SURE to set the proper model input size or it will throw errors!
height: 640
width: 640
detection_options:
confidence: 0.33
# ort and trt support nms enabled/disabled (YOLO v10 does not need NMS)
nms:
enabled: yes
threshold: 0.44
# AWS Rekognition Example
- name: aws
description: "AWS Rekognition remote HTTP detection (PAID per request!)"
enabled: no
# - AWS credentials
aws_access_key_id:
aws_secret_access_key:
region_name:
framework: http
sub_framework: rekognition
type_of: object
processor: none
detection_options:
confidence: 0.4455
# face-recognition Example
- name: dlib face
enabled: no
description: "dlib face detection/recognition model"
type_of: face
framework: face_recognition
# - These options only apply to when the model is
# - used for training faces to be recognized
training_options:
# - 'cnn' is more accurate but slower on CPUs. 'hog' is faster but less accurate
# ** NOTE: if you use cnn here you MUST use cnn for detection
model: cnn
# - How many times to upsample the image looking for faces.
# - Higher numbers find smaller faces but take longer.
upsample_times: 1
# - How many times to re-sample the face when calculating encoding.
# - Higher is more accurate, but slower (i.e. 100 is 100x slower)
num_jitters: 1
# - Max width of image to feed the model (scaling applied)
max_size: 600
# - Source dir where known faces are stored.
dir: "${DATA_DIR}/face_data/known"
# - An unknown face is a detected face that does not match any known faces
# - Can possibly use the unknown face to train a new face depending on the quality of the cropped image
unknown_faces:
enabled: yes
# - The label to give an unknown face
label_as: "Unknown"
# - When cropping an unknown face from the frame, how many pixels to add to each side
leeway_pixels: 10
# - Where to save unknown faces
dir: "${DATA_DIR}/face_data/unknown"
detection_options:
# - 'cnn' is more accurate but slower on CPUs. 'hog' is faster but less accurate
model: cnn
# - Confidence threshold for face DETECTION
confidence: 0.5
# - Confidence threshold for face RECOGNITION (comparing the detected face to known faces)
recognition_threshold: 0.6
# - How many times to upsample the image looking for faces.
# - Higher numbers find smaller faces but takes longer.
upsample_times: 1
# - How many times to re-sample the face when calculating encoding.
# - Higher is more accurate, but slower (i.e. 100 is 100x slower)
num_jitters: 1
# - Max width of image to feed the model (scaling applied)
max_size: 600
# - OpenALPR local binary Example
- name: "openalpr cpu"
# ** NOTE:
# - You need to understand how to skew and warp images to make a plate readable by OCR, see openalpr docs.
# - You can also customize openalpr config files and only run them on certain cameras.
description: "openalpr local SDK (binary) model with a config file for CPU"
enabled: no
type_of: alpr
framework: alpr
processor: none
# - ALPR sub-framework to use: openalpr, platerecognizer
sub_framework: openalpr
detection_options:
binary_path: alpr
# - The default config file uses CPU, no need to make a custom config file
binary_params:
confidence: 0.5
max_size: 600
# - OpenALPR Model Example (Shows how to use a custom openalpr config file)
- name: "openalpr gpu"
description: "openalpr local SDK (binary) with a config file to use CUDA GPU"
enabled: no
type_of: alpr
framework: alpr
sub_framework: openalpr
# - openalpr config file controls processor, you can put none,cpu,gpu or tpu here.
processor: none
detection_options:
# - Path to alpr binary (default: alpr)
binary_path: alpr
# - Make a config file that uses the gpu instead of cpu
binary_params: "--config /etc/alpr/openalpr-gpu.conf"
confidence: 0.5
max_size: 600
# - Plate Recognizer Example
- name: 'Platerec'
enabled: no
type_of: alpr
# - Even though it is ALPR, it is using HTTP for detection
framework: http
sub_framework: plate_recognizer
api_type: cloud
#api_url: "https://api.platerecognizer.com/v1/plate-reader/"
api_key: ${PLATEREC_API_KEY}
detection_options:
# - Only look in certain countrys or regions in a country.
# - See platerecognizer docs for more info
#regions: ['ca', 'us']
stats: no
min_dscore: 0.5
min_score: 0.5
max_size: 1600
# - For advanced users, you can pass in any of the options from the API docs
#payload:
#regions: ['us']
#camera_id: 12
#config:
#region: 'strict'
#mode: 'fast'