-
Notifications
You must be signed in to change notification settings - Fork 0
/
bitmap_helpers_impl.h
98 lines (78 loc) · 3.52 KB
/
bitmap_helpers_impl.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CONTRIB_LITE_EXAMPLES_LABEL_IMAGE_BITMAP_HELPERS_IMPL_H_
#define TENSORFLOW_CONTRIB_LITE_EXAMPLES_LABEL_IMAGE_BITMAP_HELPERS_IMPL_H_
#include "tensorflow/contrib/lite/examples/label_image/label_image.h"
#include "tensorflow/contrib/lite/builtin_op_data.h"
#include "tensorflow/contrib/lite/interpreter.h"
#include "tensorflow/contrib/lite/kernels/register.h"
#include "tensorflow/contrib/lite/string_util.h"
#include "tensorflow/contrib/lite/version.h"
namespace tflite {
namespace label_image {
template <class T>
void resize(T* out, uint8_t* in, int image_height, int image_width,
int image_channels, int wanted_height, int wanted_width,
int wanted_channels, Settings* s) {
int number_of_pixels = image_height * image_width * image_channels;
std::unique_ptr<Interpreter> interpreter(new Interpreter);
int base_index = 0;
// two inputs: input and new_sizes
interpreter->AddTensors(2, &base_index);
// one output
interpreter->AddTensors(1, &base_index);
// set input and output tensors
interpreter->SetInputs({0, 1});
interpreter->SetOutputs({2});
// set parameters of tensors
TfLiteQuantizationParams quant;
interpreter->SetTensorParametersReadWrite(
0, kTfLiteFloat32, "input",
{1, image_height, image_width, image_channels}, quant);
interpreter->SetTensorParametersReadWrite(1, kTfLiteInt32, "new_size", {2},
quant);
interpreter->SetTensorParametersReadWrite(
2, kTfLiteFloat32, "output",
{1, wanted_height, wanted_width, wanted_channels}, quant);
ops::builtin::BuiltinOpResolver resolver;
const TfLiteRegistration* resize_op =
resolver.FindOp(BuiltinOperator_RESIZE_BILINEAR, 1);
auto* params = reinterpret_cast<TfLiteResizeBilinearParams*>(
malloc(sizeof(TfLiteResizeBilinearParams)));
params->align_corners = false;
interpreter->AddNodeWithParameters({0, 1}, {2}, nullptr, 0, params, resize_op,
nullptr);
interpreter->AllocateTensors();
// fill input image
// in[] are integers, cannot do memcpy() directly
auto input = interpreter->typed_tensor<float>(0);
for (int i = 0; i < number_of_pixels; i++) {
input[i] = in[i];
input[i] = input[i] / 255.;
}
// fill new_sizes
interpreter->typed_tensor<int>(1)[0] = wanted_height;
interpreter->typed_tensor<int>(1)[1] = wanted_width;
interpreter->Invoke();
auto output = interpreter->typed_tensor<float>(2);
auto output_number_of_pixels = wanted_height * wanted_width * wanted_channels;
for (int i = 0; i < output_number_of_pixels; i++) {
if (s->input_floating)
// out[i] = (output[i] - s->input_mean) / s->input_std;
out[i] = output[i];
else
out[i] = (uint8_t)output[i];
}
}
} // namespace label_image
} // namespace tflite
#endif // TENSORFLOW_CONTRIB_LITE_EXAMPLES_LABEL_IMAGE_BITMAP_HELPERS_IMPL_H_