1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright 2018-2021, 2024 NXP
4  *
5  * Brief   Memory management utilities.
6  *         Primitive to allocate, free memory.
7  */
8 #include <arm.h>
9 #include <caam_common.h>
10 #include <caam_trace.h>
11 #include <caam_utils_mem.h>
12 #include <io.h>
13 #include <kernel/cache_helpers.h>
14 #include <mm/core_memprot.h>
15 #include <tee/cache.h>
16 #include <string.h>
17 
18 #define MEM_TYPE_NORMAL 0      /* Normal allocation */
19 #define MEM_TYPE_ZEROED	BIT(0) /* Buffer filled with 0's */
20 #define MEM_TYPE_ALIGN	BIT(1) /* Address and size aligned on a cache line */
21 
22 /*
23  * Read the first byte at the given @addr to ensure that
24  * virtual page is mapped before getting its physical address.
25  *
26  * @addr: address to read
27  */
touch_page(vaddr_t addr)28 static inline void touch_page(vaddr_t addr)
29 {
30 	io_read8(addr);
31 }
32 
33 /*
34  * Allocate an area of given size in bytes. Add the memory allocator
35  * information in the newly allocated area.
36  *
37  * @size   Size in bytes to allocate
38  * @type   Type of area to allocate (refer to MEM_TYPE_*)
39  */
mem_alloc(size_t size,uint8_t type)40 static void *mem_alloc(size_t size, uint8_t type)
41 {
42 	void *ptr = NULL;
43 	size_t alloc_size = size;
44 
45 	MEM_TRACE("alloc %zu bytes of type %" PRIu8, size, type);
46 
47 	if (type & MEM_TYPE_ALIGN) {
48 		size_t cacheline_size = dcache_get_line_size();
49 
50 		if (ROUNDUP_OVERFLOW(alloc_size, CFG_CAAM_SIZE_ALIGN,
51 				     &alloc_size))
52 			return NULL;
53 
54 		if (ROUNDUP2_OVERFLOW(alloc_size, cacheline_size, &alloc_size))
55 			return NULL;
56 
57 		ptr = memalign(cacheline_size, alloc_size);
58 	} else {
59 		ptr = malloc(alloc_size);
60 	}
61 
62 	if (!ptr) {
63 		MEM_TRACE("alloc Error - NULL");
64 		return NULL;
65 	}
66 
67 	if (type & MEM_TYPE_ZEROED)
68 		memset(ptr, 0, alloc_size);
69 
70 	MEM_TRACE("alloc returned %p", ptr);
71 	return ptr;
72 }
73 
74 /*
75  * Free allocated area
76  *
77  * @ptr  area to free
78  */
mem_free(void * ptr)79 static void mem_free(void *ptr)
80 {
81 	if (ptr) {
82 		MEM_TRACE("free %p", ptr);
83 		free(ptr);
84 	}
85 }
86 
87 /*
88  * Allocate internal driver buffer aligned with a cache line.
89  *
90  * @buf   [out] buffer allocated
91  * @size  size in bytes of the memory to allocate
92  * @type  Type of area to allocate (refer to MEM_TYPE_*)
93  */
mem_alloc_buf(struct caambuf * buf,size_t size,uint8_t type)94 static enum caam_status mem_alloc_buf(struct caambuf *buf, size_t size,
95 				      uint8_t type)
96 {
97 	buf->data = mem_alloc(size, type);
98 
99 	if (!buf->data)
100 		return CAAM_OUT_MEMORY;
101 
102 	buf->paddr = virt_to_phys(buf->data);
103 	if (!buf->paddr) {
104 		caam_free_buf(buf);
105 		return CAAM_OUT_MEMORY;
106 	}
107 
108 	buf->length = size;
109 	buf->nocache = 0;
110 	return CAAM_NO_ERROR;
111 }
112 
caam_alloc(size_t size)113 void *caam_alloc(size_t size)
114 {
115 	return mem_alloc(size, MEM_TYPE_NORMAL);
116 }
117 
caam_calloc(size_t size)118 void *caam_calloc(size_t size)
119 {
120 	return mem_alloc(size, MEM_TYPE_ZEROED);
121 }
122 
caam_calloc_align(size_t size)123 void *caam_calloc_align(size_t size)
124 {
125 	return mem_alloc(size, MEM_TYPE_ZEROED | MEM_TYPE_ALIGN);
126 }
127 
caam_free(void * ptr)128 void caam_free(void *ptr)
129 {
130 	mem_free(ptr);
131 }
132 
caam_calloc_desc(uint8_t nbentries)133 uint32_t *caam_calloc_desc(uint8_t nbentries)
134 {
135 	return mem_alloc(DESC_SZBYTES(nbentries),
136 			 MEM_TYPE_ZEROED | MEM_TYPE_ALIGN);
137 }
138 
caam_free_desc(uint32_t ** ptr)139 void caam_free_desc(uint32_t **ptr)
140 {
141 	mem_free(*ptr);
142 	*ptr = NULL;
143 }
144 
caam_alloc_buf(struct caambuf * buf,size_t size)145 enum caam_status caam_alloc_buf(struct caambuf *buf, size_t size)
146 {
147 	return mem_alloc_buf(buf, size, MEM_TYPE_NORMAL);
148 }
149 
caam_calloc_buf(struct caambuf * buf,size_t size)150 enum caam_status caam_calloc_buf(struct caambuf *buf, size_t size)
151 {
152 	return mem_alloc_buf(buf, size, MEM_TYPE_ZEROED);
153 }
154 
caam_calloc_align_buf(struct caambuf * buf,size_t size)155 enum caam_status caam_calloc_align_buf(struct caambuf *buf, size_t size)
156 {
157 	return mem_alloc_buf(buf, size, MEM_TYPE_ZEROED | MEM_TYPE_ALIGN);
158 }
159 
caam_alloc_align_buf(struct caambuf * buf,size_t size)160 enum caam_status caam_alloc_align_buf(struct caambuf *buf, size_t size)
161 {
162 	return mem_alloc_buf(buf, size, MEM_TYPE_ALIGN);
163 }
164 
caam_free_buf(struct caambuf * buf)165 void caam_free_buf(struct caambuf *buf)
166 {
167 	if (buf) {
168 		if (buf->data) {
169 			caam_free(buf->data);
170 			buf->data = NULL;
171 		}
172 
173 		buf->length = 0;
174 		buf->paddr = 0;
175 		buf->nocache = 0;
176 	}
177 }
178 
caam_mem_is_cached_buf(void * buf,size_t size)179 bool caam_mem_is_cached_buf(void *buf, size_t size)
180 {
181 	enum teecore_memtypes mtype = MEM_AREA_MAXTYPE;
182 	bool is_cached = false;
183 
184 	/*
185 	 * First check if the buffer is a known memory area mapped
186 	 * with a type listed in the teecore_memtypes enum.
187 	 * If not mapped, this is a User Area and so assume
188 	 * it cacheable
189 	 */
190 	mtype = core_mmu_get_type_by_pa(virt_to_phys(buf));
191 	if (mtype == MEM_AREA_MAXTYPE)
192 		is_cached = true;
193 	else
194 		is_cached = core_vbuf_is(CORE_MEM_CACHED, buf, size);
195 
196 	return is_cached;
197 }
198 
caam_cpy_block_src(struct caamblock * block,struct caambuf * src,size_t offset)199 enum caam_status caam_cpy_block_src(struct caamblock *block,
200 				    struct caambuf *src, size_t offset)
201 {
202 	enum caam_status ret = CAAM_FAILURE;
203 	size_t cpy_size = 0;
204 
205 	if (!src->data)
206 		return CAAM_FAILURE;
207 
208 	/* Check if the temporary buffer is allocated, else allocate it */
209 	if (!block->buf.data) {
210 		ret = caam_alloc_align_buf(&block->buf, block->max);
211 		if (ret != CAAM_NO_ERROR) {
212 			MEM_TRACE("Allocation Block buffer error");
213 			goto end_cpy;
214 		}
215 	}
216 
217 	/* Calculate the number of bytes to copy in the block buffer */
218 	MEM_TRACE("Current buffer is %zu (%zu) bytes", block->filled,
219 		  block->max);
220 
221 	cpy_size = block->max - block->filled;
222 	cpy_size = MIN(cpy_size, src->length - offset);
223 
224 	memcpy(&block->buf.data[block->filled], &src->data[offset], cpy_size);
225 
226 	block->filled += cpy_size;
227 
228 	ret = CAAM_NO_ERROR;
229 
230 end_cpy:
231 	return ret;
232 }
233 
caam_cpy_buf(struct caambuf * dst,uint8_t * src_data,size_t src_length)234 enum caam_status caam_cpy_buf(struct caambuf *dst, uint8_t *src_data,
235 			      size_t src_length)
236 {
237 	enum caam_status ret = CAAM_FAILURE;
238 
239 	if (!src_data || !dst)
240 		return CAAM_FAILURE;
241 
242 	if (!src_length)
243 		return CAAM_NO_ERROR;
244 
245 	if (!dst->data) {
246 		/* Allocate the destination buffer */
247 		ret = caam_alloc_align_buf(dst, src_length);
248 		if (ret != CAAM_NO_ERROR) {
249 			MEM_TRACE("Allocation buffer error");
250 			return ret;
251 		}
252 	}
253 
254 	assert(dst->length == src_length);
255 
256 	/* Do the copy */
257 	memcpy(dst->data, src_data, dst->length);
258 
259 	/* Push data to physical memory */
260 	cache_operation(TEE_CACHECLEAN, dst->data, dst->length);
261 
262 	return CAAM_NO_ERROR;
263 }
264 
caam_mem_get_pa_area(struct caambuf * buf,struct caambuf ** out_pabufs)265 int caam_mem_get_pa_area(struct caambuf *buf, struct caambuf **out_pabufs)
266 {
267 	int nb_pa_area = 0;
268 	size_t len = 0;
269 	size_t len_tohandle = 0;
270 	vaddr_t va = 0;
271 	vaddr_t next_va = 0;
272 	paddr_t pa = 0;
273 	paddr_t next_pa = 0;
274 	struct caambuf *pabufs = NULL;
275 
276 	MEM_TRACE("Get PA Areas of %p-%zu (out %p)", buf->data, buf->length,
277 		  out_pabufs);
278 
279 	if (out_pabufs) {
280 		/*
281 		 * Caller asked for the extracted contiguous
282 		 * physical areas.
283 		 * Allocate maximum possible small pages
284 		 */
285 		if (buf->length > SMALL_PAGE_SIZE) {
286 			nb_pa_area = buf->length / SMALL_PAGE_SIZE + 1;
287 			if (buf->length % SMALL_PAGE_SIZE)
288 				nb_pa_area++;
289 		} else {
290 			nb_pa_area = 2;
291 		}
292 
293 		pabufs = caam_calloc(nb_pa_area * sizeof(*pabufs));
294 		if (!pabufs)
295 			return -1;
296 
297 		MEM_TRACE("Allocate max %d Physical Areas", nb_pa_area);
298 	}
299 
300 	/*
301 	 * Go thru all the VA space to extract the contiguous
302 	 * physical areas
303 	 */
304 	va = (vaddr_t)buf->data;
305 	pa = virt_to_phys((void *)va);
306 	if (!pa)
307 		goto err;
308 
309 	nb_pa_area = 0;
310 	if (pabufs) {
311 		pabufs[nb_pa_area].data = (uint8_t *)va;
312 		pabufs[nb_pa_area].paddr = pa;
313 		pabufs[nb_pa_area].length = 0;
314 		pabufs[nb_pa_area].nocache = buf->nocache;
315 		MEM_TRACE("Add %d PA 0x%" PRIxPA " VA 0x%" PRIxVA, nb_pa_area,
316 			  pa, va);
317 	}
318 
319 	for (len = buf->length; len; len -= len_tohandle) {
320 		len_tohandle =
321 			MIN(SMALL_PAGE_SIZE - (va & SMALL_PAGE_MASK), len);
322 		next_va = va + len_tohandle;
323 		if (pabufs)
324 			pabufs[nb_pa_area].length += len_tohandle;
325 
326 		/*
327 		 * Reaches the end of buffer, exits here because
328 		 * the next virtual address is out of scope.
329 		 */
330 		if (len == len_tohandle)
331 			break;
332 
333 		touch_page(next_va);
334 		next_pa = virt_to_phys((void *)next_va);
335 		if (!next_pa)
336 			goto err;
337 
338 		if (next_pa != (pa + len_tohandle)) {
339 			nb_pa_area++;
340 			if (pabufs) {
341 				pabufs[nb_pa_area].data = (uint8_t *)next_va;
342 				pabufs[nb_pa_area].paddr = next_pa;
343 				pabufs[nb_pa_area].length = 0;
344 				pabufs[nb_pa_area].nocache = buf->nocache;
345 			}
346 			MEM_TRACE("Add %d PA 0x%" PRIxPA " VA 0x%" PRIxVA,
347 				  nb_pa_area, next_pa, next_va);
348 		}
349 
350 		va = next_va;
351 		pa = next_pa;
352 	}
353 
354 	if (out_pabufs)
355 		*out_pabufs = pabufs;
356 
357 	MEM_TRACE("Nb Physical Area %d", nb_pa_area + 1);
358 	return nb_pa_area + 1;
359 
360 err:
361 	free(pabufs);
362 	return -1;
363 }
364 
caam_mem_cpy_ltrim_buf(struct caambuf * dst,struct caambuf * src)365 void caam_mem_cpy_ltrim_buf(struct caambuf *dst, struct caambuf *src)
366 {
367 	size_t offset = 0;
368 	size_t cpy_size = 0;
369 
370 	/* Calculate the offset to start the copy */
371 	while (!src->data[offset] && offset < src->length)
372 		offset++;
373 
374 	if (offset >= src->length)
375 		offset = src->length - 1;
376 
377 	cpy_size = MIN(dst->length, (src->length - offset));
378 	MEM_TRACE("Copy %zu of src %zu bytes (offset = %zu)", cpy_size,
379 		  src->length, offset);
380 	memcpy(dst->data, &src->data[offset], cpy_size);
381 
382 	dst->length = cpy_size;
383 }
384