-
Notifications
You must be signed in to change notification settings - Fork 1
/
inference.py
72 lines (54 loc) · 2.05 KB
/
inference.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import os
from openvino.inference_engine import IENetwork, IECore
class Network:
"""
Load and store information for working with the Inference Engine,
and any loaded models.
"""
def __init__(self):
self.plugin = None
self.network = None
self.input_blob = None
self.output_blob = None
self.exec_network = None
self.infer_request = None
def load_model(self, model, device="CPU", cpu_extension=None ):
'''
Load the model given in form of the OpenVINO IR files.
'''
# Initialize the plugin
self.plugin = IECore()
# Load the Intermediate Representation files
model_xml = model
model_bin = os.path.splitext(model_xml)[0] + ".bin"
self.network = IENetwork(model=model_xml, weights=model_bin)
self.network.batch_size = 1
assert len(self.network.inputs.keys()) == 1 # YOLOv3-based single input topologies supported only
# Load the IENetwork into the plugin
self.exec_network = self.plugin.load_network(self.network, device)
# Get the input and output layers
self.input_blob = next(iter(self.network.inputs))
self.output_blob = next(iter(self.network.outputs))
return
def get_input_shape(self):
'''
Gets the input shape of the network
'''
return self.network.inputs[self.input_blob].shape
def async_inference(self, frame):
'''
Makes an asynchronous inference request, given an input frame.
'''
self.exec_network.start_async(request_id=0, inputs={self.input_blob: frame})
return
def wait(self):
'''
Checks the status of the inference request.
'''
status = self.exec_network.requests[0].wait(-1)
return status
def extract_output(self):
'''
Returns a list of the results for the output layer of the network.
'''
return self.exec_network.requests[0].outputs