1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright 2018-2021 NXP
4  *
5  * Brief   Memory management utilities.
6  *         Primitive to allocate, free memory.
7  */
8 #include <arm.h>
9 #include <caam_common.h>
10 #include <caam_trace.h>
11 #include <caam_utils_mem.h>
12 #include <io.h>
13 #include <kernel/cache_helpers.h>
14 #include <mm/core_memprot.h>
15 #include <string.h>
16 
17 #define MEM_TYPE_NORMAL 0      /* Normal allocation */
18 #define MEM_TYPE_ZEROED	BIT(0) /* Buffer filled with 0's */
19 #define MEM_TYPE_ALIGN	BIT(1) /* Address and size aligned on a cache line */
20 
21 /*
22  * Read the first byte at the given @addr to ensure that
23  * virtual page is mapped before getting its physical address.
24  *
25  * @addr: address to read
26  */
touch_page(vaddr_t addr)27 static inline void touch_page(vaddr_t addr)
28 {
29 	io_read8(addr);
30 }
31 
32 /*
33  * Allocate an area of given size in bytes. Add the memory allocator
34  * information in the newly allocated area.
35  *
36  * @size   Size in bytes to allocate
37  * @type   Type of area to allocate (refer to MEM_TYPE_*)
38  */
mem_alloc(size_t size,uint8_t type)39 static void *mem_alloc(size_t size, uint8_t type)
40 {
41 	void *ptr = NULL;
42 	size_t alloc_size = size;
43 
44 	MEM_TRACE("alloc %zu bytes of type %" PRIu8, size, type);
45 
46 	if (type & MEM_TYPE_ALIGN) {
47 		size_t cacheline_size = dcache_get_line_size();
48 
49 		if (ROUNDUP_OVERFLOW(alloc_size, CFG_CAAM_SIZE_ALIGN,
50 				     &alloc_size))
51 			return NULL;
52 
53 		if (ROUNDUP_OVERFLOW(alloc_size, cacheline_size, &alloc_size))
54 			return NULL;
55 
56 		ptr = memalign(cacheline_size, alloc_size);
57 	} else {
58 		ptr = malloc(alloc_size);
59 	}
60 
61 	if (!ptr) {
62 		MEM_TRACE("alloc Error - NULL");
63 		return NULL;
64 	}
65 
66 	if (type & MEM_TYPE_ZEROED)
67 		memset(ptr, 0, alloc_size);
68 
69 	MEM_TRACE("alloc returned %p", ptr);
70 	return ptr;
71 }
72 
73 /*
74  * Free allocated area
75  *
76  * @ptr  area to free
77  */
mem_free(void * ptr)78 static void mem_free(void *ptr)
79 {
80 	if (ptr) {
81 		MEM_TRACE("free %p", ptr);
82 		free(ptr);
83 	}
84 }
85 
86 /*
87  * Allocate internal driver buffer aligned with a cache line.
88  *
89  * @buf   [out] buffer allocated
90  * @size  size in bytes of the memory to allocate
91  * @type  Type of area to allocate (refer to MEM_TYPE_*)
92  */
mem_alloc_buf(struct caambuf * buf,size_t size,uint8_t type)93 static enum caam_status mem_alloc_buf(struct caambuf *buf, size_t size,
94 				      uint8_t type)
95 {
96 	buf->data = mem_alloc(size, type);
97 
98 	if (!buf->data)
99 		return CAAM_OUT_MEMORY;
100 
101 	buf->paddr = virt_to_phys(buf->data);
102 	if (!buf->paddr) {
103 		caam_free_buf(buf);
104 		return CAAM_OUT_MEMORY;
105 	}
106 
107 	buf->length = size;
108 	buf->nocache = 0;
109 	return CAAM_NO_ERROR;
110 }
111 
caam_alloc(size_t size)112 void *caam_alloc(size_t size)
113 {
114 	return mem_alloc(size, MEM_TYPE_NORMAL);
115 }
116 
caam_calloc(size_t size)117 void *caam_calloc(size_t size)
118 {
119 	return mem_alloc(size, MEM_TYPE_ZEROED);
120 }
121 
caam_calloc_align(size_t size)122 void *caam_calloc_align(size_t size)
123 {
124 	return mem_alloc(size, MEM_TYPE_ZEROED | MEM_TYPE_ALIGN);
125 }
126 
caam_free(void * ptr)127 void caam_free(void *ptr)
128 {
129 	mem_free(ptr);
130 }
131 
caam_calloc_desc(uint8_t nbentries)132 uint32_t *caam_calloc_desc(uint8_t nbentries)
133 {
134 	return mem_alloc(DESC_SZBYTES(nbentries),
135 			 MEM_TYPE_ZEROED | MEM_TYPE_ALIGN);
136 }
137 
caam_free_desc(uint32_t ** ptr)138 void caam_free_desc(uint32_t **ptr)
139 {
140 	mem_free(*ptr);
141 	*ptr = NULL;
142 }
143 
caam_alloc_buf(struct caambuf * buf,size_t size)144 enum caam_status caam_alloc_buf(struct caambuf *buf, size_t size)
145 {
146 	return mem_alloc_buf(buf, size, MEM_TYPE_NORMAL);
147 }
148 
caam_calloc_buf(struct caambuf * buf,size_t size)149 enum caam_status caam_calloc_buf(struct caambuf *buf, size_t size)
150 {
151 	return mem_alloc_buf(buf, size, MEM_TYPE_ZEROED);
152 }
153 
caam_calloc_align_buf(struct caambuf * buf,size_t size)154 enum caam_status caam_calloc_align_buf(struct caambuf *buf, size_t size)
155 {
156 	return mem_alloc_buf(buf, size, MEM_TYPE_ZEROED | MEM_TYPE_ALIGN);
157 }
158 
caam_alloc_align_buf(struct caambuf * buf,size_t size)159 enum caam_status caam_alloc_align_buf(struct caambuf *buf, size_t size)
160 {
161 	return mem_alloc_buf(buf, size, MEM_TYPE_ALIGN);
162 }
163 
caam_free_buf(struct caambuf * buf)164 void caam_free_buf(struct caambuf *buf)
165 {
166 	if (buf) {
167 		if (buf->data) {
168 			caam_free(buf->data);
169 			buf->data = NULL;
170 		}
171 
172 		buf->length = 0;
173 		buf->paddr = 0;
174 		buf->nocache = 0;
175 	}
176 }
177 
caam_mem_is_cached_buf(void * buf,size_t size)178 bool caam_mem_is_cached_buf(void *buf, size_t size)
179 {
180 	enum teecore_memtypes mtype = MEM_AREA_MAXTYPE;
181 	bool is_cached = false;
182 
183 	/*
184 	 * First check if the buffer is a known memory area mapped
185 	 * with a type listed in the teecore_memtypes enum.
186 	 * If not mapped, this is a User Area and so assume
187 	 * it cacheable
188 	 */
189 	mtype = core_mmu_get_type_by_pa(virt_to_phys(buf));
190 	if (mtype == MEM_AREA_MAXTYPE)
191 		is_cached = true;
192 	else
193 		is_cached = core_vbuf_is(CORE_MEM_CACHED, buf, size);
194 
195 	return is_cached;
196 }
197 
caam_cpy_block_src(struct caamblock * block,struct caambuf * src,size_t offset)198 enum caam_status caam_cpy_block_src(struct caamblock *block,
199 				    struct caambuf *src, size_t offset)
200 {
201 	enum caam_status ret = CAAM_FAILURE;
202 	size_t cpy_size = 0;
203 
204 	if (!src->data)
205 		return CAAM_FAILURE;
206 
207 	/* Check if the temporary buffer is allocated, else allocate it */
208 	if (!block->buf.data) {
209 		ret = caam_alloc_align_buf(&block->buf, block->max);
210 		if (ret != CAAM_NO_ERROR) {
211 			MEM_TRACE("Allocation Block buffer error");
212 			goto end_cpy;
213 		}
214 	}
215 
216 	/* Calculate the number of bytes to copy in the block buffer */
217 	MEM_TRACE("Current buffer is %zu (%zu) bytes", block->filled,
218 		  block->max);
219 
220 	cpy_size = block->max - block->filled;
221 	cpy_size = MIN(cpy_size, src->length - offset);
222 
223 	memcpy(&block->buf.data[block->filled], &src->data[offset], cpy_size);
224 
225 	block->filled += cpy_size;
226 
227 	ret = CAAM_NO_ERROR;
228 
229 end_cpy:
230 	return ret;
231 }
232 
caam_mem_get_pa_area(struct caambuf * buf,struct caambuf ** out_pabufs)233 int caam_mem_get_pa_area(struct caambuf *buf, struct caambuf **out_pabufs)
234 {
235 	int nb_pa_area = 0;
236 	size_t len = 0;
237 	size_t len_tohandle = 0;
238 	vaddr_t va = 0;
239 	vaddr_t next_va = 0;
240 	paddr_t pa = 0;
241 	paddr_t next_pa = 0;
242 	struct caambuf *pabufs = NULL;
243 
244 	MEM_TRACE("Get PA Areas of %p-%zu (out %p)", buf->data, buf->length,
245 		  out_pabufs);
246 
247 	if (out_pabufs) {
248 		/*
249 		 * Caller asked for the extracted contiguous
250 		 * physical areas.
251 		 * Allocate maximum possible small pages
252 		 */
253 		if (buf->length > SMALL_PAGE_SIZE) {
254 			nb_pa_area = buf->length / SMALL_PAGE_SIZE + 1;
255 			if (buf->length % SMALL_PAGE_SIZE)
256 				nb_pa_area++;
257 		} else {
258 			nb_pa_area = 2;
259 		}
260 
261 		pabufs = caam_calloc(nb_pa_area * sizeof(*pabufs));
262 		if (!pabufs)
263 			return -1;
264 
265 		MEM_TRACE("Allocate max %d Physical Areas", nb_pa_area);
266 	}
267 
268 	/*
269 	 * Go thru all the VA space to extract the contiguous
270 	 * physical areas
271 	 */
272 	va = (vaddr_t)buf->data;
273 	pa = virt_to_phys((void *)va);
274 	if (!pa)
275 		goto err;
276 
277 	nb_pa_area = 0;
278 	if (pabufs) {
279 		pabufs[nb_pa_area].data = (uint8_t *)va;
280 		pabufs[nb_pa_area].paddr = pa;
281 		pabufs[nb_pa_area].length = 0;
282 		pabufs[nb_pa_area].nocache = buf->nocache;
283 		MEM_TRACE("Add %d PA 0x%" PRIxPA " VA 0x%" PRIxVA, nb_pa_area,
284 			  pa, va);
285 	}
286 
287 	for (len = buf->length; len; len -= len_tohandle) {
288 		len_tohandle =
289 			MIN(SMALL_PAGE_SIZE - (va & SMALL_PAGE_MASK), len);
290 		next_va = va + len_tohandle;
291 		if (pabufs)
292 			pabufs[nb_pa_area].length += len_tohandle;
293 
294 		/*
295 		 * Reaches the end of buffer, exits here because
296 		 * the next virtual address is out of scope.
297 		 */
298 		if (len == len_tohandle)
299 			break;
300 
301 		touch_page(next_va);
302 		next_pa = virt_to_phys((void *)next_va);
303 		if (!next_pa)
304 			goto err;
305 
306 		if (next_pa != (pa + len_tohandle)) {
307 			nb_pa_area++;
308 			if (pabufs) {
309 				pabufs[nb_pa_area].data = (uint8_t *)next_va;
310 				pabufs[nb_pa_area].paddr = next_pa;
311 				pabufs[nb_pa_area].length = 0;
312 				pabufs[nb_pa_area].nocache = buf->nocache;
313 			}
314 			MEM_TRACE("Add %d PA 0x%" PRIxPA " VA 0x%" PRIxVA,
315 				  nb_pa_area, next_pa, next_va);
316 		}
317 
318 		va = next_va;
319 		pa = next_pa;
320 	}
321 
322 	if (out_pabufs)
323 		*out_pabufs = pabufs;
324 
325 	MEM_TRACE("Nb Physical Area %d", nb_pa_area + 1);
326 	return nb_pa_area + 1;
327 
328 err:
329 	free(pabufs);
330 	return -1;
331 }
332 
caam_mem_cpy_ltrim_buf(struct caambuf * dst,struct caambuf * src)333 void caam_mem_cpy_ltrim_buf(struct caambuf *dst, struct caambuf *src)
334 {
335 	size_t offset = 0;
336 	size_t cpy_size = 0;
337 
338 	/* Calculate the offset to start the copy */
339 	while (!src->data[offset] && offset < src->length)
340 		offset++;
341 
342 	if (offset >= src->length)
343 		offset = src->length - 1;
344 
345 	cpy_size = MIN(dst->length, (src->length - offset));
346 	MEM_TRACE("Copy %zu of src %zu bytes (offset = %zu)", cpy_size,
347 		  src->length, offset);
348 	memcpy(dst->data, &src->data[offset], cpy_size);
349 
350 	dst->length = cpy_size;
351 }
352