/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/lite/micro/examples/micro_speech/micro_features/model.h" #include "tensorflow/lite/micro/examples/micro_speech/micro_features/no_micro_features_data.h" #include "tensorflow/lite/micro/examples/micro_speech/micro_features/yes_micro_features_data.h" #include "tensorflow/lite/micro/micro_error_reporter.h" #include "tensorflow/lite/micro/micro_interpreter.h" #include "tensorflow/lite/micro/micro_mutable_op_resolver.h" #include "tensorflow/lite/micro/testing/micro_test.h" #include "tensorflow/lite/schema/schema_generated.h" TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(TestInvoke) { // Set up logging. tflite::MicroErrorReporter micro_error_reporter; // Map the model into a usable data structure. This doesn't involve any // copying or parsing, it's a very lightweight operation. const tflite::Model* model = ::tflite::GetModel(g_model); if (model->version() != TFLITE_SCHEMA_VERSION) { TF_LITE_REPORT_ERROR(µ_error_reporter, "Model provided is schema version %d not equal " "to supported version %d.\n", model->version(), TFLITE_SCHEMA_VERSION); } // Pull in only the operation implementations we need. // This relies on a complete list of all the ops needed by this graph. // An easier approach is to just use the AllOpsResolver, but this will // incur some penalty in code space for op implementations that are not // needed by this graph. // // tflite::AllOpsResolver resolver; tflite::MicroMutableOpResolver<4> micro_op_resolver; micro_op_resolver.AddDepthwiseConv2D(); micro_op_resolver.AddFullyConnected(); micro_op_resolver.AddReshape(); micro_op_resolver.AddSoftmax(); // Create an area of memory to use for input, output, and intermediate arrays. const int tensor_arena_size = 10 * 1024; uint8_t tensor_arena[tensor_arena_size]; // Build an interpreter to run the model with. tflite::MicroInterpreter interpreter(model, micro_op_resolver, tensor_arena, tensor_arena_size, µ_error_reporter); interpreter.AllocateTensors(); // Get information about the memory area to use for the model's input. TfLiteTensor* input = interpreter.input(0); // Make sure the input has the properties we expect. TF_LITE_MICRO_EXPECT_NE(nullptr, input); TF_LITE_MICRO_EXPECT_EQ(2, input->dims->size); TF_LITE_MICRO_EXPECT_EQ(1, input->dims->data[0]); TF_LITE_MICRO_EXPECT_EQ(1960, input->dims->data[1]); TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt8, input->type); // Copy a spectrogram created from a .wav audio file of someone saying "Yes", // into the memory area used for the input. const int8_t* yes_features_data = g_yes_micro_f2e59fea_nohash_1_data; for (size_t i = 0; i < input->bytes; ++i) { input->data.int8[i] = yes_features_data[i]; } // Run the model on this input and make sure it succeeds. TfLiteStatus invoke_status = interpreter.Invoke(); if (invoke_status != kTfLiteOk) { TF_LITE_REPORT_ERROR(µ_error_reporter, "Invoke failed\n"); } TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status); // Get the output from the model, and make sure it's the expected size and // type. TfLiteTensor* output = interpreter.output(0); TF_LITE_MICRO_EXPECT_EQ(2, output->dims->size); TF_LITE_MICRO_EXPECT_EQ(1, output->dims->data[0]); TF_LITE_MICRO_EXPECT_EQ(4, output->dims->data[1]); TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt8, output->type); // There are four possible classes in the output, each with a score. const int kSilenceIndex = 0; const int kUnknownIndex = 1; const int kYesIndex = 2; const int kNoIndex = 3; // Make sure that the expected "Yes" score is higher than the other classes. uint8_t silence_score = output->data.int8[kSilenceIndex] + 128; uint8_t unknown_score = output->data.int8[kUnknownIndex] + 128; uint8_t yes_score = output->data.int8[kYesIndex] + 128; uint8_t no_score = output->data.int8[kNoIndex] + 128; TF_LITE_MICRO_EXPECT_GT(yes_score, silence_score); TF_LITE_MICRO_EXPECT_GT(yes_score, unknown_score); TF_LITE_MICRO_EXPECT_GT(yes_score, no_score); // Now test with a different input, from a recording of "No". const int8_t* no_features_data = g_no_micro_f9643d42_nohash_4_data; for (size_t i = 0; i < input->bytes; ++i) { input->data.int8[i] = no_features_data[i]; } // Run the model on this "No" input. invoke_status = interpreter.Invoke(); if (invoke_status != kTfLiteOk) { TF_LITE_REPORT_ERROR(µ_error_reporter, "Invoke failed\n"); } TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status); // Get the output from the model, and make sure it's the expected size and // type. output = interpreter.output(0); TF_LITE_MICRO_EXPECT_EQ(2, output->dims->size); TF_LITE_MICRO_EXPECT_EQ(1, output->dims->data[0]); TF_LITE_MICRO_EXPECT_EQ(4, output->dims->data[1]); TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt8, output->type); // Make sure that the expected "No" score is higher than the other classes. silence_score = output->data.int8[kSilenceIndex] + 128; unknown_score = output->data.int8[kUnknownIndex] + 128; yes_score = output->data.int8[kYesIndex] + 128; no_score = output->data.int8[kNoIndex] + 128; TF_LITE_MICRO_EXPECT_GT(no_score, silence_score); TF_LITE_MICRO_EXPECT_GT(no_score, unknown_score); TF_LITE_MICRO_EXPECT_GT(no_score, yes_score); TF_LITE_REPORT_ERROR(µ_error_reporter, "Ran successfully\n"); } TF_LITE_MICRO_TESTS_END