1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include "tensorflow/lite/c/common.h"
17 #include "tensorflow/lite/c/c_api_types.h"
18
19 #ifndef TF_LITE_STATIC_MEMORY
20 #include <stdlib.h>
21 #include <string.h>
22 #endif // TF_LITE_STATIC_MEMORY
23
TfLiteIntArrayGetSizeInBytes(int size)24 int TfLiteIntArrayGetSizeInBytes(int size) {
25 static TfLiteIntArray dummy;
26 return sizeof(dummy) + sizeof(dummy.data[0]) * size;
27 }
28
TfLiteIntArrayEqual(const TfLiteIntArray * a,const TfLiteIntArray * b)29 int TfLiteIntArrayEqual(const TfLiteIntArray* a, const TfLiteIntArray* b) {
30 if (a == b) return 1;
31 if (a == NULL || b == NULL) return 0;
32 return TfLiteIntArrayEqualsArray(a, b->size, b->data);
33 }
34
TfLiteIntArrayEqualsArray(const TfLiteIntArray * a,int b_size,const int b_data[])35 int TfLiteIntArrayEqualsArray(const TfLiteIntArray* a, int b_size,
36 const int b_data[]) {
37 if (a == NULL) return (b_size == 0);
38 if (a->size != b_size) return 0;
39 int i = 0;
40 for (; i < a->size; i++)
41 if (a->data[i] != b_data[i]) return 0;
42 return 1;
43 }
44
45 #ifndef TF_LITE_STATIC_MEMORY
46
TfLiteIntArrayCreate(int size)47 TfLiteIntArray* TfLiteIntArrayCreate(int size) {
48 int alloc_size = TfLiteIntArrayGetSizeInBytes(size);
49 if (alloc_size <= 0) return NULL;
50 TfLiteIntArray* ret = (TfLiteIntArray*)malloc(alloc_size);
51 if (!ret) return ret;
52 ret->size = size;
53 return ret;
54 }
55
TfLiteIntArrayCopy(const TfLiteIntArray * src)56 TfLiteIntArray* TfLiteIntArrayCopy(const TfLiteIntArray* src) {
57 if (!src) return NULL;
58 TfLiteIntArray* ret = TfLiteIntArrayCreate(src->size);
59 if (ret) {
60 memcpy(ret->data, src->data, src->size * sizeof(int));
61 }
62 return ret;
63 }
64
TfLiteIntArrayFree(TfLiteIntArray * a)65 void TfLiteIntArrayFree(TfLiteIntArray* a) { free(a); }
66
67 #endif // TF_LITE_STATIC_MEMORY
68
TfLiteFloatArrayGetSizeInBytes(int size)69 int TfLiteFloatArrayGetSizeInBytes(int size) {
70 static TfLiteFloatArray dummy;
71 return sizeof(dummy) + sizeof(dummy.data[0]) * size;
72 }
73
74 #ifndef TF_LITE_STATIC_MEMORY
75
TfLiteFloatArrayCreate(int size)76 TfLiteFloatArray* TfLiteFloatArrayCreate(int size) {
77 TfLiteFloatArray* ret =
78 (TfLiteFloatArray*)malloc(TfLiteFloatArrayGetSizeInBytes(size));
79 ret->size = size;
80 return ret;
81 }
82
TfLiteFloatArrayFree(TfLiteFloatArray * a)83 void TfLiteFloatArrayFree(TfLiteFloatArray* a) { free(a); }
84
TfLiteTensorDataFree(TfLiteTensor * t)85 void TfLiteTensorDataFree(TfLiteTensor* t) {
86 if (t->allocation_type == kTfLiteDynamic ||
87 t->allocation_type == kTfLitePersistentRo) {
88 free(t->data.raw);
89 }
90 t->data.raw = NULL;
91 }
92
TfLiteQuantizationFree(TfLiteQuantization * quantization)93 void TfLiteQuantizationFree(TfLiteQuantization* quantization) {
94 if (quantization->type == kTfLiteAffineQuantization) {
95 TfLiteAffineQuantization* q_params =
96 (TfLiteAffineQuantization*)(quantization->params);
97 if (q_params->scale) {
98 TfLiteFloatArrayFree(q_params->scale);
99 q_params->scale = NULL;
100 }
101 if (q_params->zero_point) {
102 TfLiteIntArrayFree(q_params->zero_point);
103 q_params->zero_point = NULL;
104 }
105 free(q_params);
106 }
107 quantization->params = NULL;
108 quantization->type = kTfLiteNoQuantization;
109 }
110
TfLiteSparsityFree(TfLiteSparsity * sparsity)111 void TfLiteSparsityFree(TfLiteSparsity* sparsity) {
112 if (sparsity == NULL) {
113 return;
114 }
115
116 if (sparsity->traversal_order) {
117 TfLiteIntArrayFree(sparsity->traversal_order);
118 sparsity->traversal_order = NULL;
119 }
120
121 if (sparsity->block_map) {
122 TfLiteIntArrayFree(sparsity->block_map);
123 sparsity->block_map = NULL;
124 }
125
126 if (sparsity->dim_metadata) {
127 int i = 0;
128 for (; i < sparsity->dim_metadata_size; i++) {
129 TfLiteDimensionMetadata metadata = sparsity->dim_metadata[i];
130 if (metadata.format == kTfLiteDimSparseCSR) {
131 TfLiteIntArrayFree(metadata.array_segments);
132 metadata.array_segments = NULL;
133 TfLiteIntArrayFree(metadata.array_indices);
134 metadata.array_indices = NULL;
135 }
136 }
137 free(sparsity->dim_metadata);
138 sparsity->dim_metadata = NULL;
139 }
140
141 free(sparsity);
142 }
143
TfLiteTensorFree(TfLiteTensor * t)144 void TfLiteTensorFree(TfLiteTensor* t) {
145 TfLiteTensorDataFree(t);
146 if (t->dims) TfLiteIntArrayFree(t->dims);
147 t->dims = NULL;
148
149 if (t->dims_signature) {
150 TfLiteIntArrayFree((TfLiteIntArray *) t->dims_signature);
151 }
152 t->dims_signature = NULL;
153
154 TfLiteQuantizationFree(&t->quantization);
155 TfLiteSparsityFree(t->sparsity);
156 t->sparsity = NULL;
157 }
158
TfLiteTensorReset(TfLiteType type,const char * name,TfLiteIntArray * dims,TfLiteQuantizationParams quantization,char * buffer,size_t size,TfLiteAllocationType allocation_type,const void * allocation,bool is_variable,TfLiteTensor * tensor)159 void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims,
160 TfLiteQuantizationParams quantization, char* buffer,
161 size_t size, TfLiteAllocationType allocation_type,
162 const void* allocation, bool is_variable,
163 TfLiteTensor* tensor) {
164 TfLiteTensorFree(tensor);
165 tensor->type = type;
166 tensor->name = name;
167 tensor->dims = dims;
168 tensor->params = quantization;
169 tensor->data.raw = buffer;
170 tensor->bytes = size;
171 tensor->allocation_type = allocation_type;
172 tensor->allocation = allocation;
173 tensor->is_variable = is_variable;
174
175 tensor->quantization.type = kTfLiteNoQuantization;
176 tensor->quantization.params = NULL;
177 }
178
TfLiteTensorRealloc(size_t num_bytes,TfLiteTensor * tensor)179 void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor) {
180 if (tensor->allocation_type != kTfLiteDynamic &&
181 tensor->allocation_type != kTfLitePersistentRo) {
182 return;
183 }
184 // TODO(b/145340303): Tensor data should be aligned.
185 if (!tensor->data.raw) {
186 tensor->data.raw = malloc(num_bytes);
187 } else if (num_bytes > tensor->bytes) {
188 tensor->data.raw = realloc(tensor->data.raw, num_bytes);
189 }
190 tensor->bytes = num_bytes;
191 }
192 #endif // TF_LITE_STATIC_MEMORY
193
TfLiteTypeGetName(TfLiteType type)194 const char* TfLiteTypeGetName(TfLiteType type) {
195 switch (type) {
196 case kTfLiteNoType:
197 return "NOTYPE";
198 case kTfLiteFloat32:
199 return "FLOAT32";
200 case kTfLiteInt16:
201 return "INT16";
202 case kTfLiteInt32:
203 return "INT32";
204 case kTfLiteUInt32:
205 return "UINT32";
206 case kTfLiteUInt8:
207 return "UINT8";
208 case kTfLiteInt8:
209 return "INT8";
210 case kTfLiteInt64:
211 return "INT64";
212 case kTfLiteUInt64:
213 return "UINT64";
214 case kTfLiteBool:
215 return "BOOL";
216 case kTfLiteComplex64:
217 return "COMPLEX64";
218 case kTfLiteComplex128:
219 return "COMPLEX128";
220 case kTfLiteString:
221 return "STRING";
222 case kTfLiteFloat16:
223 return "FLOAT16";
224 case kTfLiteFloat64:
225 return "FLOAT64";
226 case kTfLiteResource:
227 return "RESOURCE";
228 case kTfLiteVariant:
229 return "VARIANT";
230 }
231 return "Unknown type";
232 }
233
TfLiteDelegateCreate()234 TfLiteDelegate TfLiteDelegateCreate() {
235 TfLiteDelegate d = {
236 .data_ = NULL,
237 .Prepare = NULL,
238 .CopyFromBufferHandle = NULL,
239 .CopyToBufferHandle = NULL,
240 .FreeBufferHandle = NULL,
241 .flags = kTfLiteDelegateFlagsNone,
242 };
243 return d;
244 }
245