1 /*
2 * Copyright (c) 2006-2023, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2023-09-07 zmshahaha the first version
9 */
10
11 #include "mm_memblock.h"
12 #include "mm_page.h"
13 #include "mm_aspace.h"
14 #include <mmu.h>
15
16 #define DBG_TAG "mm.memblock"
17 #define DBG_LVL DBG_INFO
18 #include <rtdbg.h>
19
20 #define PHYS_ADDR_MAX (~((rt_size_t)0))
21
22 #define MAX(a, b) ((a) > (b) ? (a) : (b))
23 #define MIN(a, b) ((a) < (b) ? (a) : (b))
24
25 #ifdef ARCH_CPU_64BIT
26 #define MIN_BIT 16
27 #else
28 #define MIN_BIT 8
29 #endif
30
31 #ifndef RT_INIT_MEMORY_REGIONS
32 #define RT_INIT_MEMORY_REGIONS 128
33 #endif
34
35 static struct rt_mmblk_reg _regions[RT_INIT_MEMORY_REGIONS];
36 static int _hint_idx;
37
38 static struct rt_memblock mmblk_memory;
39 static struct rt_memblock mmblk_reserved;
40
rt_memblock_get_memory(void)41 struct rt_memblock *rt_memblock_get_memory(void)
42 {
43 return &mmblk_memory;
44 }
45
rt_memblock_get_reserved(void)46 struct rt_memblock *rt_memblock_get_reserved(void)
47 {
48 return &mmblk_reserved;
49 }
50
_next_region(struct rt_mmblk_reg * prev)51 rt_inline struct rt_mmblk_reg *_next_region(struct rt_mmblk_reg *prev)
52 {
53 if (prev && prev->node.next)
54 {
55 return rt_slist_entry(prev->node.next, struct rt_mmblk_reg, node);
56 }
57 else
58 {
59 return RT_NULL;
60 }
61 }
62
_alloc_memreg(struct rt_mmblk_reg * prev)63 static struct rt_mmblk_reg *_alloc_memreg(struct rt_mmblk_reg *prev)
64 {
65 for (int i =_hint_idx; i < RT_INIT_MEMORY_REGIONS; i++)
66 {
67 if (_regions[i].alloc == RT_FALSE)
68 {
69 rt_slist_insert(&(prev->node), &(_regions[i].node));
70 _regions[i].alloc = RT_TRUE;
71 _hint_idx = i + 1;
72 return &_regions[i];
73 }
74 }
75
76 for (int i = 0; i < _hint_idx; i++)
77 {
78 if (_regions[i].alloc == RT_FALSE)
79 {
80 rt_slist_insert(&(prev->node), &(_regions[i].node));
81 _regions[i].alloc = RT_TRUE;
82 _hint_idx = i + 1;
83 return &_regions[i];
84 }
85 }
86
87 return RT_NULL;
88 }
89
_free_memreg(struct rt_mmblk_reg * prev)90 static void _free_memreg(struct rt_mmblk_reg *prev)
91 {
92 struct rt_mmblk_reg *next = _next_region(prev);
93
94 next->alloc = RT_FALSE;
95 rt_slist_remove(&(prev->node), prev->node.next);
96 }
97
_reg_insert_after(struct rt_mmblk_reg * prev,rt_region_t * reg,mmblk_flag_t flags)98 static rt_err_t _reg_insert_after(struct rt_mmblk_reg *prev, rt_region_t *reg,
99 mmblk_flag_t flags)
100 {
101 struct rt_mmblk_reg *new_reg = _alloc_memreg(prev);
102
103 if (!new_reg)
104 {
105 LOG_E("No enough space");
106 return -RT_ENOMEM;
107 }
108
109 rt_memcpy(&(new_reg->memreg), reg, sizeof(*reg));
110 new_reg->flags = flags;
111 return RT_EOK;
112 }
113
_reg_remove_after(struct rt_mmblk_reg * prev)114 rt_inline void _reg_remove_after(struct rt_mmblk_reg *prev)
115 {
116 _free_memreg(prev);
117 }
118
119 /* adding overlapped regions is banned */
_memblock_add_range(struct rt_memblock * memblock,const char * name,rt_size_t start,rt_size_t end,mm_flag_t flag)120 static rt_err_t _memblock_add_range(struct rt_memblock *memblock,
121 const char *name, rt_size_t start, rt_size_t end, mm_flag_t flag)
122 {
123 struct rt_mmblk_reg *reg = RT_NULL, *reg_next = RT_NULL;
124 rt_slist_t sentinel;
125 rt_region_t new_region;
126
127 if (start >= end)
128 return -RT_EINVAL;
129
130 sentinel.next = &(memblock->reg_list);
131
132 /* find suitable place */
133 rt_slist_for_each_entry(reg, &sentinel, node)
134 {
135 reg_next = _next_region(reg);
136
137 if (reg_next == RT_NULL)
138 break;
139
140 rt_size_t rstart = reg_next->memreg.start;
141 rt_size_t rend = reg_next->memreg.end;
142
143 /* not overlap */
144 if (rstart >= end)
145 break;
146 if (rend <= start)
147 continue;
148
149 /* overlap */
150 LOG_E("region to add %s: [%p-%p) overlap with existing region %s: [%p-%p)",\
151 name, start, end, reg_next->memreg.name, rstart, rend);
152 return -RT_EINVAL;
153 }
154
155 /* insert the region */
156 new_region.name = name;
157 new_region.start = start;
158 new_region.end = end;
159 return _reg_insert_after(reg, &new_region, flag);
160 }
161
rt_memblock_add_memory(const char * name,rt_size_t start,rt_size_t end,mmblk_flag_t flags)162 rt_err_t rt_memblock_add_memory(const char *name, rt_size_t start, rt_size_t end, mmblk_flag_t flags)
163 {
164 LOG_D("add physical address range [0x%.*lx-0x%.*lx) with flag 0x%x" \
165 " to overall memory regions\n", MIN_BIT, base, MIN_BIT, base + size, flag);
166
167 return _memblock_add_range(&mmblk_memory, name, start, end, flags);
168 }
169
rt_memblock_reserve_memory(const char * name,rt_size_t start,rt_size_t end,mmblk_flag_t flags)170 rt_err_t rt_memblock_reserve_memory(const char *name, rt_size_t start, rt_size_t end, mmblk_flag_t flags)
171 {
172 LOG_D("add physical address range %s [0x%.*lx-0x%.*lx) to reserved memory regions\n",
173 name, MIN_BIT, start, MIN_BIT, end);
174
175 return _memblock_add_range(&mmblk_reserved, name, start, end, flags);
176 }
177
178 /* [*start_reg, *end_reg) is the isolated range */
_memblock_separate_range(struct rt_memblock * memblock,rt_size_t start,rt_size_t end,struct rt_mmblk_reg ** start_reg,struct rt_mmblk_reg ** end_reg)179 static rt_err_t _memblock_separate_range(struct rt_memblock *memblock,
180 rt_size_t start, rt_size_t end,
181 struct rt_mmblk_reg **start_reg, struct rt_mmblk_reg **end_reg)
182 {
183 struct rt_mmblk_reg *reg = RT_NULL;
184 rt_region_t new_region;
185 rt_err_t err = RT_EOK;
186
187 *start_reg = *end_reg = RT_NULL;
188
189 rt_slist_for_each_entry(reg, &(memblock->reg_list), node)
190 {
191 rt_size_t rstart = reg->memreg.start;
192 rt_size_t rend = reg->memreg.end;
193
194 if (rstart >= end)
195 break;
196 if (rend <= start)
197 continue;
198
199 /* the beginning of the range separates its respective region */
200 if (rstart < start)
201 {
202 new_region.start = start;
203 new_region.end = rend;
204 new_region.name = reg->memreg.name;
205 err = _reg_insert_after(reg, &new_region, reg->flags);
206
207 if (err != RT_EOK)
208 return err;
209
210 reg->memreg.end = start;
211
212 *start_reg = _next_region(reg);
213 *end_reg = _next_region(*start_reg);
214 }
215 /* the endpoint of the range separates its respective region */
216 else if (rend > end)
217 {
218 new_region.start = end;
219 new_region.end = rend;
220 new_region.name = reg->memreg.name;
221 err = _reg_insert_after(reg, &new_region, reg->flags);
222
223 if (err != RT_EOK)
224 return err;
225
226 reg->memreg.end = end;
227
228 *end_reg = _next_region(reg);
229 break;
230 }
231 /* reg->next is fully contained in range */
232 else
233 {
234 if (!*end_reg)
235 *start_reg = reg;
236 *end_reg = _next_region(reg);
237 }
238 }
239
240 return err;
241 }
242
_memblock_set_flag(struct rt_mmblk_reg * start_reg,struct rt_mmblk_reg * end_reg,mmblk_flag_t flags)243 static void _memblock_set_flag(struct rt_mmblk_reg *start_reg, struct rt_mmblk_reg *end_reg, \
244 mmblk_flag_t flags)
245 {
246 if (start_reg == RT_NULL)
247 return;
248
249 for (struct rt_mmblk_reg *iter = start_reg; iter != end_reg; iter = _next_region(iter)) {
250 iter->flags |= flags;
251 }
252 }
253
_next_free_region(struct rt_mmblk_reg ** m,struct rt_mmblk_reg ** r,mmblk_flag_t flags,rt_size_t * out_start,rt_size_t * out_end)254 static void _next_free_region(struct rt_mmblk_reg **m, struct rt_mmblk_reg **r, mmblk_flag_t flags,
255 rt_size_t *out_start, rt_size_t *out_end)
256 {
257 /* memory related data */
258 rt_size_t m_start = 0;
259 rt_size_t m_end = 0;
260
261 /* reserved related data */
262 rt_size_t r_start = 0;
263 rt_size_t r_end = 0;
264 struct rt_mmblk_reg *r_sentinel = rt_slist_entry(&(mmblk_reserved.reg_list), struct rt_mmblk_reg, node);
265
266 for (; *m != RT_NULL; *m = _next_region(*m))
267 {
268 if ((*m)->flags != flags)
269 continue;
270
271 m_start = (*m)->memreg.start;
272 m_end = (*m)->memreg.end;
273
274 for (; *r != RT_NULL; *r = _next_region(*r))
275 {
276 /*
277 * r started with _resreg_guard
278 * Find the complement of reserved memblock.
279 * For example, if reserved memblock is following:
280 *
281 * 0:[8-16), 1:[32-48), 2:[128-130)
282 *
283 * The upper 32bit indexes the following regions.
284 *
285 * 0:[0-8), 1:[16-32), 2:[48-128), 3:[130-MAX)
286 *
287 * So we can find intersecting region other than excluding.
288 */
289 r_start = (*r == r_sentinel) ? 0 : (*r)->memreg.end;
290 r_end = (_next_region(*r)) ? _next_region(*r)->memreg.start : PHYS_ADDR_MAX;
291
292 /* two reserved region are adjacent */
293 if (r_start == r_end)
294 continue;
295
296 if (r_start >= m_end)
297 break;
298
299 if (m_start < r_end)
300 {
301 *out_start = MAX(m_start, r_start);
302 *out_end = MIN(m_end, r_end);
303
304 if (m_end <= r_end)
305 *m = _next_region(*m);
306 else
307 *r = _next_region(*r);
308 return;
309 }
310 }
311 }
312
313 /* all regions found */
314 *m = rt_slist_entry(&(mmblk_memory.reg_list), struct rt_mmblk_reg, node);
315 }
316
317 /* for each region in memory with flags and not reserved */
318 #define for_each_free_region(m, r, flags, p_start, p_end) \
319 m = rt_slist_entry(&(mmblk_memory.reg_list.next), struct rt_mmblk_reg, node); \
320 r = rt_slist_entry(&(mmblk_reserved.reg_list), struct rt_mmblk_reg, node); \
321 for (_next_free_region(&m, &r, flags, p_start, p_end); \
322 m != rt_slist_entry(&(mmblk_memory.reg_list), struct rt_mmblk_reg, node); \
323 _next_free_region(&m, &r, flags, p_start, p_end))
324
325 /* merge normal memory regions */
_memblock_merge_memory(void)326 static void _memblock_merge_memory(void)
327 {
328 struct rt_mmblk_reg *reg = RT_NULL;
329
330 rt_slist_for_each_entry(reg, &(mmblk_memory.reg_list), node)
331 {
332 while (_next_region(reg) &&
333 reg->flags == _next_region(reg)->flags &&
334 reg->memreg.end == _next_region(reg)->memreg.start)
335 {
336 reg->memreg.end = _next_region(reg)->memreg.end;
337 _reg_remove_after(reg);
338 }
339 }
340 }
341
rt_memblock_setup_memory_environment(void)342 void rt_memblock_setup_memory_environment(void)
343 {
344 struct rt_mmblk_reg *iter = RT_NULL, *start_reg = RT_NULL, *end_reg = RT_NULL;
345 rt_region_t reg = {0};
346 rt_size_t mem = 0;
347 struct rt_mmblk_reg *m, *r;
348 void *err;
349
350 _memblock_merge_memory();
351
352 LOG_I("System memory:");
353
354 rt_slist_for_each_entry(iter, &(mmblk_memory.reg_list), node)
355 {
356 LOG_I(" %-*.s [0x%.*lx, 0x%.*lx]", RT_NAME_MAX, iter->memreg.name, MIN_BIT, iter->memreg.start, MIN_BIT, iter->memreg.end);
357 }
358
359 LOG_I("Reserved memory:");
360
361 rt_slist_for_each_entry(iter, &(mmblk_reserved.reg_list), node)
362 {
363 LOG_I(" %-*.s [0x%.*lx, 0x%.*lx]", RT_NAME_MAX, iter->memreg.name, MIN_BIT, iter->memreg.start, MIN_BIT, iter->memreg.end);
364
365 if (iter->flags != MEMBLOCK_NONE)
366 {
367 _memblock_separate_range(&mmblk_memory, iter->memreg.start, iter->memreg.end, &start_reg, &end_reg);
368 _memblock_set_flag(start_reg, end_reg, iter->flags);
369 }
370 }
371
372 /* install usable memory to system page */
373 for_each_free_region(m, r, MEMBLOCK_NONE, ®.start, ®.end)
374 {
375 reg.start = RT_ALIGN(reg.start, ARCH_PAGE_SIZE);
376 reg.end = RT_ALIGN_DOWN(reg.end, ARCH_PAGE_SIZE);
377
378 if (reg.start >= reg.end)
379 continue;
380
381 LOG_I("physical memory region [%p-%p] installed to system page", reg.start, reg.end);
382
383 reg.start -= PV_OFFSET;
384 reg.end -= PV_OFFSET;
385
386 struct rt_mm_va_hint hint = {.flags = MMF_MAP_FIXED,
387 .limit_start = rt_kernel_space.start,
388 .limit_range_size = rt_kernel_space.size,
389 .map_size = reg.end - reg.start,
390 .prefer = (void *)reg.start};
391
392 rt_aspace_map_phy(&rt_kernel_space, &hint, MMU_MAP_K_RWCB, (reg.start + PV_OFFSET) >> MM_PAGE_SHIFT, &err);
393 rt_page_install(reg);
394 mem += reg.end - reg.start;
395 }
396
397 LOG_I("%ld MB memory installed to system page", mem/1000000);
398 }
399
400 #ifdef UTEST_MM_API_TC
401 /* functions below are only used for utest */
rt_memblock_merge(void)402 void rt_memblock_merge(void)
403 {
404 _memblock_merge_memory();
405 }
406
407 static struct rt_mmblk_reg *mem;
408 static struct rt_mmblk_reg *res;
409
rt_memblock_next_free_region_init(void)410 void rt_memblock_next_free_region_init(void)
411 {
412 mem = rt_slist_entry(&(mmblk_memory.reg_list.next), struct rt_mmblk_reg, node);
413 res = rt_slist_entry(&(mmblk_reserved.reg_list), struct rt_mmblk_reg, node);
414 }
415
rt_memblock_next_free_region(mmblk_flag_t flags,rt_size_t * out_start,rt_size_t * out_end)416 void rt_memblock_next_free_region(mmblk_flag_t flags, rt_size_t *out_start, rt_size_t *out_end)
417 {
418 _next_free_region(&mem, &res, flags, out_start, out_end);
419 }
420
rt_memblock_is_last_free(void)421 rt_bool_t rt_memblock_is_last_free(void)
422 {
423 return mem == rt_slist_entry(&(mmblk_memory.reg_list), struct rt_mmblk_reg, node);
424 }
425
426 #endif /* UTEST_MM_API_TC */
427