1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2024, Linaro Limited
4 */
5
6 #include <kernel/panic.h>
7 #include <kernel/tee_misc.h>
8 #include <mm/core_mmu.h>
9 #include <mm/phys_mem.h>
10 #include <mm/tee_mm.h>
11 #include <string.h>
12 #include <types_ext.h>
13
14 static tee_mm_pool_t *nex_core_pool __nex_bss;
15 static tee_mm_pool_t *nex_ta_pool __nex_bss;
16
init_pool(paddr_t b,paddr_size_t sz,uint32_t flags)17 static tee_mm_pool_t *init_pool(paddr_t b, paddr_size_t sz, uint32_t flags)
18 {
19 tee_mm_pool_t *pool = NULL;
20
21 if (!b && !sz)
22 return NULL;
23
24 if (!b || (b & CORE_MMU_USER_CODE_MASK) ||
25 !sz || (sz & CORE_MMU_USER_CODE_MASK))
26 panic("invalid phys mem");
27
28 if (flags & TEE_MM_POOL_NEX_MALLOC)
29 pool = nex_malloc(sizeof(*pool));
30 else
31 pool = malloc(sizeof(*pool));
32 if (!pool)
33 panic();
34
35 if (!tee_mm_init(pool, b, sz, CORE_MMU_USER_CODE_SHIFT, flags))
36 panic();
37
38 return pool;
39 }
40
nex_phys_mem_init(paddr_t core_base,paddr_size_t core_size,paddr_t ta_base,paddr_size_t ta_size)41 void nex_phys_mem_init(paddr_t core_base, paddr_size_t core_size,
42 paddr_t ta_base, paddr_size_t ta_size)
43 {
44 uint32_t flags = TEE_MM_POOL_NEX_MALLOC;
45
46 assert(!nex_core_pool && !nex_ta_pool);
47
48 nex_core_pool = init_pool(core_base, core_size, flags);
49 nex_ta_pool = init_pool(ta_base, ta_size, flags);
50 }
51
nex_phys_mem_get_ta_size(void)52 paddr_size_t nex_phys_mem_get_ta_size(void)
53 {
54 if (nex_ta_pool)
55 return nex_ta_pool->size;
56 assert(nex_core_pool);
57 return nex_core_pool->size - TEE_RAM_VA_SIZE;
58 }
59
nex_phys_mem_get_ta_base(void)60 paddr_t nex_phys_mem_get_ta_base(void)
61 {
62 if (nex_ta_pool)
63 return nex_ta_pool->lo;
64 assert(nex_core_pool);
65 return nex_core_pool->lo;
66 }
67
is_in_pool_range(tee_mm_pool_t * pool,paddr_t addr)68 static bool is_in_pool_range(tee_mm_pool_t *pool, paddr_t addr)
69 {
70 return pool && core_is_buffer_inside(addr, 1, pool->lo, pool->size);
71 }
72
mm_find(tee_mm_pool_t * p0,tee_mm_pool_t * p1,paddr_t addr)73 static tee_mm_entry_t *mm_find(tee_mm_pool_t *p0, tee_mm_pool_t *p1,
74 paddr_t addr)
75 {
76 if (is_in_pool_range(p0, addr))
77 return tee_mm_find(p0, addr);
78 if (is_in_pool_range(p1, addr))
79 return tee_mm_find(p1, addr);
80 return NULL;
81 }
82
nex_phys_mem_mm_find(paddr_t addr)83 tee_mm_entry_t *nex_phys_mem_mm_find(paddr_t addr)
84 {
85 return mm_find(nex_core_pool, nex_ta_pool, addr);
86 }
87
mm_alloc(tee_mm_pool_t * p0,tee_mm_pool_t * p1,size_t size)88 static tee_mm_entry_t *mm_alloc(tee_mm_pool_t *p0, tee_mm_pool_t *p1,
89 size_t size)
90 {
91 tee_mm_entry_t *mm = NULL;
92
93 if (p0)
94 mm = tee_mm_alloc(p0, size);
95 if (!mm && p1)
96 mm = tee_mm_alloc(p1, size);
97
98 return mm;
99 }
100
nex_phys_mem_core_alloc(size_t size)101 tee_mm_entry_t *nex_phys_mem_core_alloc(size_t size)
102 {
103 return mm_alloc(nex_core_pool, NULL, size);
104 }
105
nex_phys_mem_ta_alloc(size_t size)106 tee_mm_entry_t *nex_phys_mem_ta_alloc(size_t size)
107 {
108 return mm_alloc(nex_ta_pool, nex_core_pool, size);
109 }
110
mm_alloc2(tee_mm_pool_t * p0,tee_mm_pool_t * p1,paddr_t base,size_t size)111 static tee_mm_entry_t *mm_alloc2(tee_mm_pool_t *p0, tee_mm_pool_t *p1,
112 paddr_t base, size_t size)
113 {
114 if (is_in_pool_range(p0, base))
115 return tee_mm_alloc2(p0, base, size);
116 if (is_in_pool_range(p1, base))
117 return tee_mm_alloc2(p1, base, size);
118 return NULL;
119 }
120
nex_phys_mem_alloc2(paddr_t base,size_t size)121 tee_mm_entry_t *nex_phys_mem_alloc2(paddr_t base, size_t size)
122 {
123 return mm_alloc2(nex_core_pool, nex_ta_pool, base, size);
124 }
125
partial_carve_out(tee_mm_pool_t * pool,paddr_t base,size_t size)126 static void partial_carve_out(tee_mm_pool_t *pool, paddr_t base, size_t size)
127 {
128 if (pool &&
129 core_is_buffer_intersect(base, size, pool->lo, pool->size)) {
130 tee_mm_entry_t *mm __maybe_unused = NULL;
131 paddr_t end_pa = 0;
132 paddr_t pa = 0;
133 size_t sz = 0;
134
135 pa = MAX(base, pool->lo);
136 end_pa = MIN(base + size - 1, pool->lo + pool->size - 1);
137 sz = end_pa - pa + 1;
138
139 mm = tee_mm_alloc2(pool, pa, sz);
140 assert(mm);
141 }
142 }
143
nex_phys_mem_partial_carve_out(paddr_t base,size_t size)144 void nex_phys_mem_partial_carve_out(paddr_t base, size_t size)
145 {
146 partial_carve_out(nex_core_pool, base, size);
147 partial_carve_out(nex_ta_pool, base, size);
148 }
149
150 #ifdef CFG_WITH_STATS
add_pool_stats(tee_mm_pool_t * pool,struct pta_stats_alloc * stats,bool reset)151 static void add_pool_stats(tee_mm_pool_t *pool, struct pta_stats_alloc *stats,
152 bool reset)
153 {
154 if (pool) {
155 struct pta_stats_alloc s = { };
156
157 tee_mm_get_pool_stats(pool, &s, reset);
158 stats->size += s.size;
159 if (s.max_allocated > stats->max_allocated)
160 stats->max_allocated = s.max_allocated;
161 stats->allocated += s.allocated;
162 }
163 }
164
nex_phys_mem_stats(struct pta_stats_alloc * stats,bool reset)165 void nex_phys_mem_stats(struct pta_stats_alloc *stats, bool reset)
166 {
167 memset(stats, 0, sizeof(*stats));
168
169 add_pool_stats(nex_core_pool, stats, reset);
170 add_pool_stats(nex_ta_pool, stats, reset);
171 }
172 #endif /*CFG_WITH_STATS*/
173
174 #if defined(CFG_NS_VIRTUALIZATION)
175
176 static tee_mm_pool_t *core_pool;
177 static tee_mm_pool_t *ta_pool;
178
phys_mem_init(paddr_t core_base,paddr_size_t core_size,paddr_t ta_base,paddr_size_t ta_size)179 void phys_mem_init(paddr_t core_base, paddr_size_t core_size,
180 paddr_t ta_base, paddr_size_t ta_size)
181 {
182 uint32_t flags = TEE_MM_POOL_NO_FLAGS;
183
184 assert(!core_pool && !ta_pool);
185
186 core_pool = init_pool(core_base, core_size, flags);
187 ta_pool = init_pool(ta_base, ta_size, flags);
188 }
189
phys_mem_mm_find(paddr_t addr)190 tee_mm_entry_t *phys_mem_mm_find(paddr_t addr)
191 {
192 return mm_find(core_pool, ta_pool, addr);
193 }
194
phys_mem_core_alloc(size_t size)195 tee_mm_entry_t *phys_mem_core_alloc(size_t size)
196 {
197 /*
198 * With CFG_NS_VIRTUALIZATION all memory is equally secure so we
199 * should normally be able to use one pool only, but if we have two
200 * make sure to use both even for core allocations.
201 */
202 return mm_alloc(core_pool, ta_pool, size);
203 }
204
phys_mem_ta_alloc(size_t size)205 tee_mm_entry_t *phys_mem_ta_alloc(size_t size)
206 {
207 return mm_alloc(ta_pool, core_pool, size);
208 }
209
phys_mem_alloc2(paddr_t base,size_t size)210 tee_mm_entry_t *phys_mem_alloc2(paddr_t base, size_t size)
211 {
212 return mm_alloc2(core_pool, ta_pool, base, size);
213 }
214
215 #ifdef CFG_WITH_STATS
phys_mem_stats(struct pta_stats_alloc * stats,bool reset)216 void phys_mem_stats(struct pta_stats_alloc *stats, bool reset)
217 {
218 memset(stats, 0, sizeof(*stats));
219
220 add_pool_stats(core_pool, stats, reset);
221 add_pool_stats(ta_pool, stats, reset);
222 }
223 #endif /*CFG_WITH_STATS*/
224 #endif /*CFG_NS_VIRTUALIZATION*/
225