1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #ifndef TENSORFLOW_LITE_C_C_API_H_
16 #define TENSORFLOW_LITE_C_C_API_H_
17 
18 #include <stdarg.h>
19 #include <stdint.h>
20 #include <stdlib.h>
21 
22 #include "tensorflow/lite/c/c_api_types.h"  // IWYU pragma: export
23 
24 // --------------------------------------------------------------------------
25 /// C API for TensorFlow Lite.
26 ///
27 /// The API leans towards simplicity and uniformity instead of convenience, as
28 /// most usage will be by language-specific wrappers. It provides largely the
29 /// same set of functionality as that of the C++ TensorFlow Lite `Interpreter`
30 /// API, but is useful for shared libraries where having a stable ABI boundary
31 /// is important.
32 ///
33 /// Conventions:
34 /// * We use the prefix TfLite for everything in the API.
35 /// * size_t is used to represent byte sizes of objects that are
36 ///   materialized in the address space of the calling process.
37 /// * int is used as an index into arrays.
38 ///
39 /// Usage:
40 /// <pre><code>
41 /// // Create the model and interpreter options.
42 /// TfLiteModel* model = TfLiteModelCreateFromFile("/path/to/model.tflite");
43 /// TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
44 /// TfLiteInterpreterOptionsSetNumThreads(options, 2);
45 ///
46 /// // Create the interpreter.
47 /// TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
48 ///
49 /// // Allocate tensors and populate the input tensor data.
50 /// TfLiteInterpreterAllocateTensors(interpreter);
51 /// TfLiteTensor* input_tensor =
52 ///     TfLiteInterpreterGetInputTensor(interpreter, 0);
53 /// TfLiteTensorCopyFromBuffer(input_tensor, input.data(),
54 ///                            input.size() * sizeof(float));
55 ///
56 /// // Execute inference.
57 /// TfLiteInterpreterInvoke(interpreter);
58 ///
59 /// // Extract the output tensor data.
60 /// const TfLiteTensor* output_tensor =
61 //      TfLiteInterpreterGetOutputTensor(interpreter, 0);
62 /// TfLiteTensorCopyToBuffer(output_tensor, output.data(),
63 ///                          output.size() * sizeof(float));
64 ///
65 /// // Dispose of the model and interpreter objects.
66 /// TfLiteInterpreterDelete(interpreter);
67 /// TfLiteInterpreterOptionsDelete(options);
68 /// TfLiteModelDelete(model);
69 
70 #ifdef __cplusplus
71 extern "C" {
72 #endif  // __cplusplus
73 
74 // --------------------------------------------------------------------------
75 // Opaque types used by the C API.
76 
77 // TfLiteModel wraps a loaded TensorFlow Lite model.
78 typedef struct TfLiteModel TfLiteModel;
79 
80 // TfLiteInterpreterOptions allows customized interpreter configuration.
81 typedef struct TfLiteInterpreterOptions TfLiteInterpreterOptions;
82 
83 // Allows delegation of nodes to alternative backends.
84 typedef struct TfLiteDelegate TfLiteDelegate;
85 
86 // TfLiteInterpreter provides inference from a provided model.
87 typedef struct TfLiteInterpreter TfLiteInterpreter;
88 
89 // A tensor in the interpreter system which is a wrapper around a buffer of
90 // data including a dimensionality (or NULL if not currently defined).
91 typedef struct TfLiteTensor TfLiteTensor;
92 
93 // --------------------------------------------------------------------------
94 // TfLiteVersion returns a string describing version information of the
95 // TensorFlow Lite library. TensorFlow Lite uses semantic versioning.
96 TFL_CAPI_EXPORT extern const char* TfLiteVersion(void);
97 
98 // Returns a model from the provided buffer, or null on failure.
99 TFL_CAPI_EXPORT extern TfLiteModel* TfLiteModelCreate(const void* model_data,
100                                                       size_t model_size);
101 
102 // Returns a model from the provided file, or null on failure.
103 TFL_CAPI_EXPORT extern TfLiteModel* TfLiteModelCreateFromFile(
104     const char* model_path);
105 
106 // Destroys the model instance.
107 TFL_CAPI_EXPORT extern void TfLiteModelDelete(TfLiteModel* model);
108 
109 // Returns a new interpreter options instances.
110 TFL_CAPI_EXPORT extern TfLiteInterpreterOptions*
111 TfLiteInterpreterOptionsCreate();
112 
113 // Destroys the interpreter options instance.
114 TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsDelete(
115     TfLiteInterpreterOptions* options);
116 
117 // Sets the number of CPU threads to use for the interpreter.
118 TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsSetNumThreads(
119     TfLiteInterpreterOptions* options, int32_t num_threads);
120 
121 // Adds a delegate to be applied during `TfLiteInterpreter` creation.
122 //
123 // If delegate application fails, interpreter creation will also fail with an
124 // associated error logged.
125 //
126 // NOTE: The caller retains ownership of the delegate and should ensure that it
127 // remains valid for the duration of any created interpreter's lifetime.
128 TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsAddDelegate(
129     TfLiteInterpreterOptions* options, TfLiteDelegate* delegate);
130 
131 // Sets a custom error reporter for interpreter execution.
132 //
133 // * `reporter` takes the provided `user_data` object, as well as a C-style
134 //   format string and arg list (see also vprintf).
135 // * `user_data` is optional. If provided, it is owned by the client and must
136 //   remain valid for the duration of the interpreter lifetime.
137 TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsSetErrorReporter(
138     TfLiteInterpreterOptions* options,
139     void (*reporter)(void* user_data, const char* format, va_list args),
140     void* user_data);
141 
142 // Returns a new interpreter using the provided model and options, or null on
143 // failure.
144 //
145 // * `model` must be a valid model instance. The caller retains ownership of the
146 //   object, and can destroy it immediately after creating the interpreter; the
147 //   interpreter will maintain its own reference to the underlying model data.
148 // * `optional_options` may be null. The caller retains ownership of the object,
149 //   and can safely destroy it immediately after creating the interpreter.
150 //
151 // NOTE: The client *must* explicitly allocate tensors before attempting to
152 // access input tensor data or invoke the interpreter.
153 TFL_CAPI_EXPORT extern TfLiteInterpreter* TfLiteInterpreterCreate(
154     const TfLiteModel* model, const TfLiteInterpreterOptions* optional_options);
155 
156 // Destroys the interpreter.
157 TFL_CAPI_EXPORT extern void TfLiteInterpreterDelete(
158     TfLiteInterpreter* interpreter);
159 
160 // Returns the number of input tensors associated with the model.
161 TFL_CAPI_EXPORT extern int32_t TfLiteInterpreterGetInputTensorCount(
162     const TfLiteInterpreter* interpreter);
163 
164 // Returns the tensor associated with the input index.
165 // REQUIRES: 0 <= input_index < TfLiteInterpreterGetInputTensorCount(tensor)
166 TFL_CAPI_EXPORT extern TfLiteTensor* TfLiteInterpreterGetInputTensor(
167     const TfLiteInterpreter* interpreter, int32_t input_index);
168 
169 // Resizes the specified input tensor.
170 //
171 // NOTE: After a resize, the client *must* explicitly allocate tensors before
172 // attempting to access the resized tensor data or invoke the interpreter.
173 // REQUIRES: 0 <= input_index < TfLiteInterpreterGetInputTensorCount(tensor)
174 TFL_CAPI_EXPORT extern TfLiteStatus TfLiteInterpreterResizeInputTensor(
175     TfLiteInterpreter* interpreter, int32_t input_index, const int* input_dims,
176     int32_t input_dims_size);
177 
178 // Updates allocations for all tensors, resizing dependent tensors using the
179 // specified input tensor dimensionality.
180 //
181 // This is a relatively expensive operation, and need only be called after
182 // creating the graph and/or resizing any inputs.
183 TFL_CAPI_EXPORT extern TfLiteStatus TfLiteInterpreterAllocateTensors(
184     TfLiteInterpreter* interpreter);
185 
186 // Runs inference for the loaded graph.
187 //
188 // Before calling this function, the caller should first invoke
189 // TfLiteInterpreterAllocateTensors() and should also set the values for the
190 // input tensors.  After successfully calling this function, the values for the
191 // output tensors will be set.
192 //
193 // NOTE: It is possible that the interpreter is not in a ready state to
194 // evaluate (e.g., if AllocateTensors() hasn't been called, or if a
195 // ResizeInputTensor() has been performed without a subsequent call to
196 // AllocateTensors()).
197 //
198 //   If the (experimental!) delegate fallback option was enabled in the
199 //   interpreter options, then the interpreter will automatically fall back to
200 //   not using any delegates if execution with delegates fails. For details, see
201 //   TfLiteInterpreterOptionsSetEnableDelegateFallback in c_api_experimental.h.
202 //
203 // Returns one of the following status codes:
204 //  - kTfLiteOk: Success. Output is valid.
205 //  - kTfLiteDelegateError: Execution with delegates failed, due to a problem
206 //    with the delegate(s). If fallback was not enabled, output is invalid.
207 //    If fallback was enabled, this return value indicates that fallback
208 //    succeeded, the output is valid, and all delegates previously applied to
209 //    the interpreter have been undone.
210 //  - kTfLiteApplicationError: Same as for kTfLiteDelegateError, except that
211 //    the problem was not with the delegate itself, but rather was
212 //    due to an incompatibility between the delegate(s) and the
213 //    interpreter or model.
214 //  - kTfLiteError: Unexpected/runtime failure. Output is invalid.
215 
216 TFL_CAPI_EXPORT extern TfLiteStatus TfLiteInterpreterInvoke(
217     TfLiteInterpreter* interpreter);
218 
219 // Returns the number of output tensors associated with the model.
220 TFL_CAPI_EXPORT extern int32_t TfLiteInterpreterGetOutputTensorCount(
221     const TfLiteInterpreter* interpreter);
222 
223 // Returns the tensor associated with the output index.
224 // REQUIRES: 0 <= output_index < TfLiteInterpreterGetOutputTensorCount(tensor)
225 //
226 // NOTE: The shape and underlying data buffer for output tensors may be not
227 // be available until after the output tensor has been both sized and allocated.
228 // In general, best practice is to interact with the output tensor *after*
229 // calling TfLiteInterpreterInvoke().
230 TFL_CAPI_EXPORT extern const TfLiteTensor* TfLiteInterpreterGetOutputTensor(
231     const TfLiteInterpreter* interpreter, int32_t output_index);
232 
233 // --------------------------------------------------------------------------
234 // TfLiteTensor wraps data associated with a graph tensor.
235 //
236 // Note that, while the TfLiteTensor struct is not currently opaque, and its
237 // fields can be accessed directly, these methods are still convenient for
238 // language bindings. In the future the tensor struct will likely be made opaque
239 // in the public API.
240 
241 // Returns the type of a tensor element.
242 TFL_CAPI_EXPORT extern TfLiteType TfLiteTensorType(const TfLiteTensor* tensor);
243 
244 // Returns the number of dimensions that the tensor has.
245 TFL_CAPI_EXPORT extern int32_t TfLiteTensorNumDims(const TfLiteTensor* tensor);
246 
247 // Returns the length of the tensor in the "dim_index" dimension.
248 // REQUIRES: 0 <= dim_index < TFLiteTensorNumDims(tensor)
249 TFL_CAPI_EXPORT extern int32_t TfLiteTensorDim(const TfLiteTensor* tensor,
250                                                int32_t dim_index);
251 
252 // Returns the size of the underlying data in bytes.
253 TFL_CAPI_EXPORT extern size_t TfLiteTensorByteSize(const TfLiteTensor* tensor);
254 
255 // Returns a pointer to the underlying data buffer.
256 //
257 // NOTE: The result may be null if tensors have not yet been allocated, e.g.,
258 // if the Tensor has just been created or resized and `TfLiteAllocateTensors()`
259 // has yet to be called, or if the output tensor is dynamically sized and the
260 // interpreter hasn't been invoked.
261 TFL_CAPI_EXPORT extern void* TfLiteTensorData(const TfLiteTensor* tensor);
262 
263 // Returns the (null-terminated) name of the tensor.
264 TFL_CAPI_EXPORT extern const char* TfLiteTensorName(const TfLiteTensor* tensor);
265 
266 // Returns the parameters for asymmetric quantization. The quantization
267 // parameters are only valid when the tensor type is `kTfLiteUInt8` and the
268 // `scale != 0`. Quantized values can be converted back to float using:
269 //    real_value = scale * (quantized_value - zero_point);
270 TFL_CAPI_EXPORT extern TfLiteQuantizationParams TfLiteTensorQuantizationParams(
271     const TfLiteTensor* tensor);
272 
273 // Copies from the provided input buffer into the tensor's buffer.
274 // REQUIRES: input_data_size == TfLiteTensorByteSize(tensor)
275 TFL_CAPI_EXPORT extern TfLiteStatus TfLiteTensorCopyFromBuffer(
276     TfLiteTensor* tensor, const void* input_data, size_t input_data_size);
277 
278 // Copies to the provided output buffer from the tensor's buffer.
279 // REQUIRES: output_data_size == TfLiteTensorByteSize(tensor)
280 TFL_CAPI_EXPORT extern TfLiteStatus TfLiteTensorCopyToBuffer(
281     const TfLiteTensor* output_tensor, void* output_data,
282     size_t output_data_size);
283 
284 #ifdef __cplusplus
285 }  // extern "C"
286 #endif  // __cplusplus
287 
288 #endif  // TENSORFLOW_LITE_C_C_API_H_
289