1 /*
2  * Copyright 2019-2022 Arm Limited and/or its affiliates <open-source-office@arm.com>
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #pragma once
8 
9 #include <array>
10 #include <queue>
11 #include <stdlib.h>
12 #include <string>
13 #include <vector>
14 #include <cstdint>
15 
16 namespace InferenceProcess
17 {
18 struct DataPtr {
19 	void *data;
20 	size_t size;
21 
22 	DataPtr(void *data = nullptr, size_t size = 0);
23 
24 	void invalidate();
25 	void clean();
26 };
27 
28 struct InferenceJob {
29 	std::string name;
30 	DataPtr networkModel;
31 	std::vector<DataPtr> input;
32 	std::vector<DataPtr> output;
33 	std::vector<DataPtr> expectedOutput;
34 
35 	InferenceJob();
36 	InferenceJob(const std::string &name, const DataPtr &networkModel,
37 		     const std::vector<DataPtr> &input, const std::vector<DataPtr> &output,
38 		     const std::vector<DataPtr> &expectedOutput);
39 
40 	void invalidate();
41 	void clean();
42 };
43 
44 class InferenceProcess {
45     public:
InferenceProcess(uint8_t * _tensorArena,size_t _tensorArenaSize)46 	InferenceProcess(uint8_t *_tensorArena, size_t _tensorArenaSize)
47 		: tensorArena(_tensorArena), tensorArenaSize(_tensorArenaSize)
48 	{
49 	}
50 
51 	bool runJob(InferenceJob &job);
52 
53     private:
54 	uint8_t *tensorArena;
55 	const size_t tensorArenaSize;
56 };
57 } /* namespace InferenceProcess */
58