1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_LITE_MICRO_TEST_HELPERS_H_
17 #define TENSORFLOW_LITE_MICRO_TEST_HELPERS_H_
18 
19 // Useful functions for writing tests.
20 
21 #include <cstdint>
22 #include <limits>
23 
24 #include "flatbuffers/flatbuffers.h"  // from @flatbuffers
25 #include "tensorflow/lite/c/common.h"
26 #include "tensorflow/lite/kernels/internal/compatibility.h"
27 #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
28 #include "tensorflow/lite/micro/all_ops_resolver.h"
29 #include "tensorflow/lite/micro/micro_utils.h"
30 #include "tensorflow/lite/portable_type_to_tflitetype.h"
31 #include "tensorflow/lite/schema/schema_generated.h"
32 
33 namespace tflite {
34 namespace testing {
35 
36 constexpr int kOfflinePlannerHeaderSize = 3;
37 
38 struct NodeConnection_ {
39   std::initializer_list<int32_t> input;
40   std::initializer_list<int32_t> output;
41 };
42 typedef struct NodeConnection_ NodeConnection;
43 
44 // A simple operator that returns the median of the input with the number of
45 // times the kernel was invoked. The implementation below is deliberately
46 // complicated, just to demonstrate how kernel memory planning works.
47 class SimpleStatefulOp {
48   static constexpr int kBufferNotAllocated = 0;
49   // Inputs:
50   static constexpr int kInputTensor = 0;
51   // Outputs:
52   static constexpr int kMedianTensor = 0;
53   static constexpr int kInvokeCount = 1;
54   struct OpData {
55     int* invoke_count = nullptr;
56     int sorting_buffer = kBufferNotAllocated;
57   };
58 
59  public:
60   static const TfLiteRegistration* getRegistration();
61   static TfLiteRegistration* GetMutableRegistration();
62   static void* Init(TfLiteContext* context, const char* buffer, size_t length);
63   static TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node);
64   static TfLiteStatus Invoke(TfLiteContext* context, TfLiteNode* node);
65 };
66 
67 class MockCustom {
68  public:
69   static const TfLiteRegistration* getRegistration();
70   static TfLiteRegistration* GetMutableRegistration();
71   static void* Init(TfLiteContext* context, const char* buffer, size_t length);
72   static void Free(TfLiteContext* context, void* buffer);
73   static TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node);
74   static TfLiteStatus Invoke(TfLiteContext* context, TfLiteNode* node);
75 
76   static bool freed_;
77 };
78 
79 // A simple operator with the purpose of testing multiple inputs. It returns
80 // the sum of the inputs.
81 class MultipleInputs {
82  public:
83   static const TfLiteRegistration* getRegistration();
84   static TfLiteRegistration* GetMutableRegistration();
85   static void* Init(TfLiteContext* context, const char* buffer, size_t length);
86   static void Free(TfLiteContext* context, void* buffer);
87   static TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node);
88   static TfLiteStatus Invoke(TfLiteContext* context, TfLiteNode* node);
89 
90   static bool freed_;
91 };
92 
93 // Returns an Op Resolver that can be used in the testing code.
94 AllOpsResolver GetOpResolver();
95 
96 // Returns a simple example flatbuffer TensorFlow Lite model. Contains 1 input,
97 // 1 layer of weights, 1 output Tensor, and 1 operator.
98 const Model* GetSimpleMockModel();
99 
100 // Returns a flatbuffer TensorFlow Lite model with more inputs, variable
101 // tensors, and operators.
102 const Model* GetComplexMockModel();
103 
104 // Returns a simple flatbuffer model with two branches.
105 const Model* GetSimpleModelWithBranch();
106 
107 // Returns a simple example flatbuffer TensorFlow Lite model. Contains 3 inputs,
108 // 1 output Tensor, and 1 operator.
109 const Model* GetSimpleMultipleInputsModel();
110 
111 // Returns a simple flatbuffer model with offline planned tensors
112 // @param[in]       num_tensors           Number of tensors in the model.
113 // @param[in]       metadata_buffer       Metadata for offline planner.
114 // @param[in]       node_con              List of connections, i.e. operators
115 //                                        in the model.
116 // @param[in]       num_conns             Number of connections.
117 // @param[in]       num_subgraph_inputs   How many of the input tensors are in
118 //                                        the subgraph inputs. The default value
119 //                                        of 0 means all of the input tensors
120 //                                        are in the subgraph input list. There
121 //                                        must be at least 1 input tensor in the
122 //                                        subgraph input list.
123 const Model* GetModelWithOfflinePlanning(int num_tensors,
124                                          const int32_t* metadata_buffer,
125                                          NodeConnection* node_conn,
126                                          int num_conns,
127                                          int num_subgraph_inputs = 0);
128 
129 // Returns a flatbuffer model with `simple_stateful_op`
130 const Model* GetSimpleStatefulModel();
131 
132 // Builds a one-dimensional flatbuffer tensor of the given size.
133 const Tensor* Create1dFlatbufferTensor(int size, bool is_variable = false);
134 
135 // Builds a one-dimensional flatbuffer tensor of the given size with
136 // quantization metadata.
137 const Tensor* CreateQuantizedFlatbufferTensor(int size);
138 
139 // Creates a one-dimensional tensor with no quantization metadata.
140 const Tensor* CreateMissingQuantizationFlatbufferTensor(int size);
141 
142 // Creates a vector of flatbuffer buffers.
143 const flatbuffers::Vector<flatbuffers::Offset<Buffer>>*
144 CreateFlatbufferBuffers();
145 
146 // Performs a simple string comparison without requiring standard C library.
147 int TestStrcmp(const char* a, const char* b);
148 
149 // Wrapper to forward kernel errors to the interpreter's error reporter.
150 void ReportOpError(struct TfLiteContext* context, const char* format, ...);
151 
152 void PopulateContext(TfLiteTensor* tensors, int tensors_size,
153                      TfLiteContext* context);
154 
155 // Create a TfLiteIntArray from an array of ints.  The first element in the
156 // supplied array must be the size of the array expressed as an int.
157 TfLiteIntArray* IntArrayFromInts(const int* int_array);
158 
159 // Create a TfLiteFloatArray from an array of floats.  The first element in the
160 // supplied array must be the size of the array expressed as a float.
161 TfLiteFloatArray* FloatArrayFromFloats(const float* floats);
162 
163 template <typename T>
164 TfLiteTensor CreateTensor(const T* data, TfLiteIntArray* dims,
165                           const bool is_variable = false) {
166   TfLiteTensor result;
167   result.dims = dims;
168   result.params = {};
169   result.quantization = {kTfLiteNoQuantization, nullptr};
170   result.is_variable = is_variable;
171   result.allocation_type = kTfLiteMemNone;
172   result.type = typeToTfLiteType<T>();
173   // Const cast is used to allow passing in const and non-const arrays within a
174   // single CreateTensor method. A Const array should be used for immutable
175   // input tensors and non-const array should be used for mutable and output
176   // tensors.
177   result.data.data = const_cast<T*>(data);
178   result.quantization = {kTfLiteAffineQuantization, nullptr};
179   result.bytes = ElementCount(*dims) * sizeof(T);
180   return result;
181 }
182 
183 template <typename T>
184 TfLiteTensor CreateQuantizedTensor(const T* data, TfLiteIntArray* dims,
185                                    const float scale, const int zero_point = 0,
186                                    const bool is_variable = false) {
187   TfLiteTensor result = CreateTensor(data, dims, is_variable);
188   result.params = {scale, zero_point};
189   result.quantization = {kTfLiteAffineQuantization, nullptr};
190   return result;
191 }
192 
193 template <typename T>
194 TfLiteTensor CreateQuantizedTensor(const float* input, T* quantized,
195                                    TfLiteIntArray* dims, float scale,
196                                    int zero_point, bool is_variable = false) {
197   int input_size = ElementCount(*dims);
198   tflite::Quantize(input, quantized, input_size, scale, zero_point);
199   return CreateQuantizedTensor(quantized, dims, scale, zero_point, is_variable);
200 }
201 
202 TfLiteTensor CreateQuantizedBiasTensor(const float* data, int32_t* quantized,
203                                        TfLiteIntArray* dims, float input_scale,
204                                        float weights_scale,
205                                        bool is_variable = false);
206 
207 // Quantizes int32_t bias tensor with per-channel weights determined by input
208 // scale multiplied by weight scale for each channel.
209 TfLiteTensor CreatePerChannelQuantizedBiasTensor(
210     const float* input, int32_t* quantized, TfLiteIntArray* dims,
211     float input_scale, float* weight_scales, float* scales, int* zero_points,
212     TfLiteAffineQuantization* affine_quant, int quantized_dimension,
213     bool is_variable = false);
214 
215 TfLiteTensor CreateSymmetricPerChannelQuantizedTensor(
216     const float* input, int8_t* quantized, TfLiteIntArray* dims, float* scales,
217     int* zero_points, TfLiteAffineQuantization* affine_quant,
218     int quantized_dimension, bool is_variable = false);
219 
220 // Returns the number of tensors in the default subgraph for a tflite::Model.
221 size_t GetModelTensorCount(const Model* model);
222 
223 // Derives the quantization scaling factor from a min and max range.
224 template <typename T>
ScaleFromMinMax(const float min,const float max)225 inline float ScaleFromMinMax(const float min, const float max) {
226   return (max - min) /
227          static_cast<float>((std::numeric_limits<T>::max() * 1.0) -
228                             std::numeric_limits<T>::min());
229 }
230 
231 // Derives the quantization zero point from a min and max range.
232 template <typename T>
ZeroPointFromMinMax(const float min,const float max)233 inline int ZeroPointFromMinMax(const float min, const float max) {
234   return static_cast<int>(std::numeric_limits<T>::min()) +
235          static_cast<int>(-min / ScaleFromMinMax<T>(min, max) + 0.5f);
236 }
237 
238 }  // namespace testing
239 }  // namespace tflite
240 
241 #endif  // TENSORFLOW_LITE_MICRO_TEST_HELPERS_H_
242