1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2024, Linaro Limited
4 */
5
6 #include <assert.h>
7 #include <kernel/boot.h>
8 #include <mm/core_memprot.h>
9 #include <mm/core_mmu.h>
10 #include <mm/phys_mem.h>
11 #include <mm/tee_mm.h>
12 #include <stdalign.h>
13 #include <string.h>
14 #include <util.h>
15
16 /*
17 * struct boot_mem_reloc - Pointers relocated in memory during boot
18 * @ptrs: Array of relocation
19 * @count: Number of cells used in @ptrs
20 * @next: Next relocation array when @ptrs is fully used
21 */
22 struct boot_mem_reloc {
23 void **ptrs[64];
24 size_t count;
25 struct boot_mem_reloc *next;
26 };
27
28 /*
29 * struct boot_mem_desc - Stack like boot memory allocation pool
30 * @orig_mem_start: Boot memory stack base address
31 * @orig_mem_end: Boot memory start end address
32 * @mem_start: Boot memory free space start address
33 * @mem_end: Boot memory free space end address
34 * @reloc: Boot memory pointers requiring relocation
35 */
36 struct boot_mem_desc {
37 vaddr_t orig_mem_start;
38 vaddr_t orig_mem_end;
39 vaddr_t mem_start;
40 vaddr_t mem_end;
41 struct boot_mem_reloc *reloc;
42 };
43
44 static struct boot_mem_desc *boot_mem_desc;
45
mem_alloc_tmp(struct boot_mem_desc * desc,size_t len,size_t align)46 static void *mem_alloc_tmp(struct boot_mem_desc *desc, size_t len, size_t align)
47 {
48 vaddr_t va = 0;
49
50 assert(desc && desc->mem_start && desc->mem_end);
51 assert(IS_POWER_OF_TWO(align) && !(len % align));
52 if (SUB_OVERFLOW(desc->mem_end, len, &va))
53 panic();
54 va = ROUNDDOWN2(va, align);
55 if (va < desc->mem_start)
56 panic();
57 desc->mem_end = va;
58 return (void *)va;
59 }
60
mem_alloc(struct boot_mem_desc * desc,size_t len,size_t align)61 static void *mem_alloc(struct boot_mem_desc *desc, size_t len, size_t align)
62 {
63 vaddr_t va = 0;
64 vaddr_t ve = 0;
65
66 runtime_assert(!IS_ENABLED(CFG_WITH_PAGER));
67 assert(desc && desc->mem_start && desc->mem_end);
68 assert(IS_POWER_OF_TWO(align) && !(len % align));
69 va = ROUNDUP2(desc->mem_start, align);
70 if (ADD_OVERFLOW(va, len, &ve))
71 panic();
72 if (ve > desc->mem_end)
73 panic();
74 desc->mem_start = ve;
75 return (void *)va;
76 }
77
boot_mem_init(vaddr_t start,vaddr_t end,vaddr_t orig_end)78 void boot_mem_init(vaddr_t start, vaddr_t end, vaddr_t orig_end)
79 {
80 struct boot_mem_desc desc = {
81 .orig_mem_start = start,
82 .orig_mem_end = orig_end,
83 .mem_start = start,
84 .mem_end = end,
85 };
86
87 boot_mem_desc = mem_alloc_tmp(&desc, sizeof(desc), alignof(desc));
88 *boot_mem_desc = desc;
89 boot_mem_desc->reloc = mem_alloc_tmp(boot_mem_desc,
90 sizeof(*boot_mem_desc->reloc),
91 alignof(*boot_mem_desc->reloc));
92 memset(boot_mem_desc->reloc, 0, sizeof(*boot_mem_desc->reloc));
93 }
94
boot_mem_add_reloc(void * ptr)95 void boot_mem_add_reloc(void *ptr)
96 {
97 struct boot_mem_reloc *reloc = NULL;
98
99 assert(boot_mem_desc && boot_mem_desc->reloc);
100 reloc = boot_mem_desc->reloc;
101
102 /* If the reloc struct is full, allocate a new and link it first */
103 if (reloc->count == ARRAY_SIZE(reloc->ptrs)) {
104 reloc = boot_mem_alloc_tmp(sizeof(*reloc), alignof(*reloc));
105 reloc->next = boot_mem_desc->reloc;
106 boot_mem_desc->reloc = reloc;
107 }
108
109 reloc->ptrs[reloc->count] = ptr;
110 reloc->count++;
111 }
112
add_offs(void * p,size_t offs)113 static void *add_offs(void *p, size_t offs)
114 {
115 assert(p);
116 return (uint8_t *)p + offs;
117 }
118
boot_mem_relocate(size_t offs)119 void boot_mem_relocate(size_t offs)
120 {
121 struct boot_mem_reloc *reloc = NULL;
122 size_t n = 0;
123
124 boot_mem_desc = add_offs(boot_mem_desc, offs);
125
126 boot_mem_desc->orig_mem_start += offs;
127 boot_mem_desc->orig_mem_end += offs;
128 boot_mem_desc->mem_start += offs;
129 boot_mem_desc->mem_end += offs;
130 boot_mem_desc->reloc = add_offs(boot_mem_desc->reloc, offs);
131
132 for (reloc = boot_mem_desc->reloc;; reloc = reloc->next) {
133 for (n = 0; n < reloc->count; n++) {
134 reloc->ptrs[n] = add_offs(reloc->ptrs[n], offs);
135 *reloc->ptrs[n] = add_offs(*reloc->ptrs[n], offs);
136 }
137 if (!reloc->next)
138 break;
139 reloc->next = add_offs(reloc->next, offs);
140 }
141 }
142
boot_mem_alloc(size_t len,size_t align)143 void *boot_mem_alloc(size_t len, size_t align)
144 {
145 return mem_alloc(boot_mem_desc, len, align);
146 }
147
boot_mem_alloc_tmp(size_t len,size_t align)148 void *boot_mem_alloc_tmp(size_t len, size_t align)
149 {
150 return mem_alloc_tmp(boot_mem_desc, len, align);
151 }
152
boot_mem_release_unused(void)153 vaddr_t boot_mem_release_unused(void)
154 {
155 tee_mm_entry_t *mm = NULL;
156 paddr_t pa = 0;
157 vaddr_t va = 0;
158 size_t n = 0;
159 vaddr_t tmp_va = 0;
160 paddr_t tmp_pa = 0;
161 size_t tmp_n = 0;
162
163 assert(boot_mem_desc);
164
165 n = boot_mem_desc->mem_start - boot_mem_desc->orig_mem_start;
166 DMSG("Allocated %zu bytes at va %#"PRIxVA" pa %#"PRIxPA,
167 n, boot_mem_desc->orig_mem_start,
168 vaddr_to_phys(boot_mem_desc->orig_mem_start));
169
170 DMSG("Tempalloc %zu bytes at va %#"PRIxVA,
171 (size_t)(boot_mem_desc->orig_mem_end - boot_mem_desc->mem_end),
172 boot_mem_desc->mem_end);
173
174 if (IS_ENABLED(CFG_WITH_PAGER))
175 goto out;
176
177 pa = vaddr_to_phys(ROUNDUP(boot_mem_desc->orig_mem_start,
178 SMALL_PAGE_SIZE));
179 mm = nex_phys_mem_mm_find(pa);
180 if (!mm)
181 panic();
182
183 va = ROUNDUP(boot_mem_desc->mem_start, SMALL_PAGE_SIZE);
184
185 tmp_va = ROUNDDOWN(boot_mem_desc->mem_end, SMALL_PAGE_SIZE);
186 tmp_n = boot_mem_desc->orig_mem_end - tmp_va;
187 tmp_pa = vaddr_to_phys(tmp_va);
188
189 pa = tee_mm_get_smem(mm);
190 n = vaddr_to_phys(boot_mem_desc->mem_start) - pa;
191 tee_mm_free(mm);
192 DMSG("Carving out %#"PRIxPA"..%#"PRIxPA, pa, pa + n - 1);
193 mm = nex_phys_mem_alloc2(pa, n);
194 if (!mm)
195 panic();
196 mm = nex_phys_mem_alloc2(tmp_pa, tmp_n);
197 if (!mm)
198 panic();
199
200 n = tmp_va - boot_mem_desc->mem_start;
201 DMSG("Releasing %zu bytes from va %#"PRIxVA, n, va);
202
203 /* Unmap the now unused pages */
204 core_mmu_unmap_pages(va, n / SMALL_PAGE_SIZE);
205
206 out:
207 /* Stop further allocations. */
208 boot_mem_desc->mem_start = boot_mem_desc->mem_end;
209 return va;
210 }
211
boot_mem_release_tmp_alloc(void)212 void boot_mem_release_tmp_alloc(void)
213 {
214 tee_mm_entry_t *mm = NULL;
215 vaddr_t va = 0;
216 paddr_t pa = 0;
217 size_t n = 0;
218
219 assert(boot_mem_desc &&
220 boot_mem_desc->mem_start == boot_mem_desc->mem_end);
221
222 if (IS_ENABLED(CFG_WITH_PAGER)) {
223 n = boot_mem_desc->orig_mem_end - boot_mem_desc->mem_end;
224 va = boot_mem_desc->mem_end;
225 boot_mem_desc = NULL;
226 DMSG("Releasing %zu bytes from va %#"PRIxVA, n, va);
227 return;
228 }
229
230 va = ROUNDDOWN(boot_mem_desc->mem_end, SMALL_PAGE_SIZE);
231 pa = vaddr_to_phys(va);
232
233 mm = nex_phys_mem_mm_find(pa);
234 if (!mm)
235 panic();
236 assert(pa == tee_mm_get_smem(mm));
237 n = tee_mm_get_bytes(mm);
238
239 /* Boot memory allocation is now done */
240 boot_mem_desc = NULL;
241
242 DMSG("Releasing %zu bytes from va %#"PRIxVA, n, va);
243
244 /* Unmap the now unused pages */
245 core_mmu_unmap_pages(va, n / SMALL_PAGE_SIZE);
246 }
247