1 /*
2 * Copyright (c) 2006-2022, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2019-11-01 Jesven The first version
9 * 2022-12-13 WangXiaoyao Hot-pluggable, extensible
10 * page management algorithm
11 * 2023-02-20 WangXiaoyao Multi-list page-management
12 * 2023-11-28 Shell Bugs fix for page_install on shadow region
13 * 2024-06-18 Shell Added affinity page management for page coloring.
14 */
15 #include <rtthread.h>
16
17 #include <stddef.h>
18 #include <stdint.h>
19 #include <string.h>
20
21 #include "mm_fault.h"
22 #include "mm_private.h"
23 #include "mm_aspace.h"
24 #include "mm_flag.h"
25 #include "mm_page.h"
26 #include <mmu.h>
27
28 #define DBG_TAG "mm.page"
29 #define DBG_LVL DBG_WARNING
30 #include <rtdbg.h>
31
32 RT_STATIC_ASSERT(order_huge_pg, RT_PAGE_MAX_ORDER > ARCH_PAGE_SHIFT - 2);
33 RT_STATIC_ASSERT(size_width, sizeof(rt_size_t) == sizeof(void *));
34
35 #ifdef RT_USING_SMART
36 #include "lwp_arch_comm.h"
37 #endif /* RT_USING_SMART */
38
39 static rt_size_t init_mpr_align_start;
40 static rt_size_t init_mpr_align_end;
41 static void *init_mpr_cont_start;
42
43 static struct rt_varea mpr_varea;
44
45 typedef union
46 {
47 struct rt_page *page_list;
48 rt_ubase_t aff_page_map;
49 } pgls_agr_t;
50
51 #define PGLS_IS_AFF_MAP(pgls) (!!((pgls).aff_page_map & 0x1))
52 #define PGLS_FROM_AFF_MAP(pgls, aff_map) \
53 ((pgls).aff_page_map = (-(rt_ubase_t)(aff_map)) | 0x1)
54 #define PGLS_GET_AFF_MAP(pgls) \
55 ((struct rt_page **)-((pgls).aff_page_map & ~0x1))
56 #define PGLS_GET(pgls) \
57 (PGLS_IS_AFF_MAP(pgls) ? PGLS_GET_AFF_MAP(pgls) : (pgls).page_list)
58 #define PAGE_TO_AFFID(page) (RT_PAGE_PICK_AFFID(page_to_paddr(page)))
59
60 /* affinity id */
61 #define AFFID_BLK_BITS \
62 ((sizeof(int) * 8 - 1) - __builtin_clz(RT_PAGE_AFFINITY_BLOCK_SIZE) - ARCH_PAGE_SHIFT)
63 #define AFFID_NUMOF_ID_IN_SET(order) \
64 ((RT_PAGE_AFFINITY_BLOCK_SIZE / ARCH_PAGE_SIZE) / (1ul << (order)))
65 #define AFFID_BITS_MASK(order) \
66 (((1 << AFFID_BLK_BITS) - 1) - ((1 << (order)) - 1))
67
68 static pgls_agr_t page_list_low[RT_PAGE_MAX_ORDER];
69 static rt_page_t
70 aff_pglist_low[AFFID_NUMOF_ID_IN_SET(0) * 2 - 2];
71 static pgls_agr_t page_list_high[RT_PAGE_MAX_ORDER];
72 static rt_page_t
73 aff_pglist_high[AFFID_NUMOF_ID_IN_SET(0) * 2 - 2];
74
75 /* protect buddy list and page records */
76 static RT_DEFINE_SPINLOCK(_pgmgr_lock);
77
78 #define page_start ((rt_page_t)rt_mpr_start)
79
80 static rt_size_t _page_nr, _page_nr_hi;
81 static rt_size_t _freed_nr, _freed_nr_hi;
82 static rt_size_t early_offset;
83
get_name(rt_varea_t varea)84 static const char *get_name(rt_varea_t varea)
85 {
86 return "master-page-record";
87 }
88
hint_free(rt_mm_va_hint_t hint)89 static void hint_free(rt_mm_va_hint_t hint)
90 {
91 hint->flags = MMF_MAP_FIXED;
92 hint->limit_start = rt_kernel_space.start;
93 hint->limit_range_size = rt_kernel_space.size;
94 hint->prefer = rt_mpr_start;
95 }
96
on_page_fault(struct rt_varea * varea,struct rt_aspace_fault_msg * msg)97 static void on_page_fault(struct rt_varea *varea,
98 struct rt_aspace_fault_msg *msg)
99 {
100 char *init_start = (void *)init_mpr_align_start;
101 char *init_end = (void *)init_mpr_align_end;
102 if ((char *)msg->fault_vaddr < init_end &&
103 (char *)msg->fault_vaddr >= init_start)
104 {
105 rt_size_t offset = (char *)msg->fault_vaddr - init_start;
106 msg->response.status = MM_FAULT_STATUS_OK;
107 msg->response.vaddr = (char *)init_mpr_cont_start + offset;
108 msg->response.size = ARCH_PAGE_SIZE;
109 }
110 else
111 {
112 rt_mm_dummy_mapper.on_page_fault(varea, msg);
113 }
114 }
115
116 static struct rt_mem_obj mm_page_mapper = {
117 .get_name = get_name,
118 .on_page_fault = on_page_fault,
119 .hint_free = hint_free,
120 };
121
122 #ifdef RT_DEBUGGING_PAGE_LEAK
123 static volatile int enable;
124 static rt_page_t _trace_head;
125 #define TRACE_ALLOC(pg, size) _trace_alloc(pg, __builtin_return_address(0), size)
126 #define TRACE_FREE(pgaddr, size) _trace_free(pgaddr, __builtin_return_address(0), size)
127
128 static long _alloc_cnt;
129
rt_page_leak_trace_start()130 void rt_page_leak_trace_start()
131 {
132 // TODO multicore safety
133 _trace_head = NULL;
134 _alloc_cnt = 0;
135 enable = 1;
136 }
137 MSH_CMD_EXPORT(rt_page_leak_trace_start, start page leak tracer);
138
_collect()139 static void _collect()
140 {
141 rt_page_t page = _trace_head;
142 if (!page)
143 {
144 rt_kprintf("ok! ALLOC CNT %ld\n", _alloc_cnt);
145 }
146 else
147 {
148 while (page)
149 {
150 rt_page_t next = page->tl_next;
151 void *pg_va = rt_page_page2addr(page);
152 LOG_W("LEAK: %p, allocator: %p, size bits: %lx", pg_va, page->caller, page->trace_size);
153 rt_pages_free(pg_va, page->trace_size);
154 page = next;
155 }
156 }
157 }
158
rt_page_leak_trace_stop()159 void rt_page_leak_trace_stop()
160 {
161 // TODO multicore safety
162 enable = 0;
163 _collect();
164 }
165 MSH_CMD_EXPORT(rt_page_leak_trace_stop, stop page leak tracer);
166
_trace_alloc(rt_page_t page,void * caller,size_t size_bits)167 static void _trace_alloc(rt_page_t page, void *caller, size_t size_bits)
168 {
169 if (enable)
170 {
171 page->caller = caller;
172 page->trace_size = size_bits;
173 page->tl_prev = NULL;
174 page->tl_next = NULL;
175
176 _alloc_cnt++;
177 if (_trace_head == NULL)
178 {
179 _trace_head = page;
180 }
181 else
182 {
183 _trace_head->tl_prev = page;
184 page->tl_next = _trace_head;
185 _trace_head = page;
186 }
187 }
188 }
189
_report(rt_page_t page,size_t size_bits,char * msg)190 void _report(rt_page_t page, size_t size_bits, char *msg)
191 {
192 void *pg_va = rt_page_page2addr(page);
193 LOG_W("%s: %p, allocator: %p, size bits: %lx", msg, pg_va, page->caller, page->trace_size);
194 rt_kprintf("backtrace\n");
195 rt_backtrace();
196 }
197
_trace_free(rt_page_t page,void * caller,size_t size_bits)198 static void _trace_free(rt_page_t page, void *caller, size_t size_bits)
199 {
200 if (enable)
201 {
202 /* free after free */
203 if (page->trace_size == 0xabadcafe)
204 {
205 _report(page, size_bits, "free after free");
206 return ;
207 }
208 else if (page->trace_size != size_bits)
209 {
210 rt_kprintf("free with size bits %lx\n", size_bits);
211 _report(page, size_bits, "incompatible size bits parameter");
212 return ;
213 }
214
215 if (page->ref_cnt == 0)
216 {
217 _alloc_cnt--;
218 if (page->tl_prev)
219 page->tl_prev->tl_next = page->tl_next;
220 if (page->tl_next)
221 page->tl_next->tl_prev = page->tl_prev;
222
223 if (page == _trace_head)
224 _trace_head = page->tl_next;
225
226 page->tl_prev = NULL;
227 page->tl_next = NULL;
228 page->trace_size = 0xabadcafe;
229 }
230 }
231 }
232 #else
233 #define TRACE_ALLOC(x, y)
234 #define TRACE_FREE(x, y)
235 #endif
236
237 /* page management */
238 #ifdef RT_DEBUGGING_PAGE_POISON
239 #include <bitmap.h>
240 RT_BITMAP_DECLARE(_init_region_usage_trace, (1 << (1 + ARCH_SECTION_SHIFT - ARCH_PAGE_SHIFT)));
241 #else
242 typedef char rt_bitmap_t[0];
243 #define RT_BITMAP_LEN(__name) (__name)
244 #endif /* RT_DEBUGGING_PAGE_POISON */
245
246 static struct installed_page_reg
247 {
248 rt_region_t region_area;
249 struct installed_page_reg *next;
250 struct rt_spinlock lock;
251 #ifdef RT_DEBUGGING_PAGE_POISON
252 rt_bitmap_t *usage_trace;
253 #endif /* RT_DEBUGGING_PAGE_POISON */
254 } _init_region;
255
256 static RT_DEFINE_SPINLOCK(_inst_page_reg_lock);
257 static struct installed_page_reg *_inst_page_reg_head;
258
_print_region_list(void)259 static void _print_region_list(void)
260 {
261 struct installed_page_reg *iter;
262 int counts = 0;
263
264 rt_spin_lock(&_inst_page_reg_lock);
265 iter = _inst_page_reg_head;
266 while (iter != RT_NULL)
267 {
268 rt_kprintf(" %d: [%p, %p]\n", counts++, iter->region_area.start + PV_OFFSET,
269 iter->region_area.end + PV_OFFSET);
270 iter = iter->next;
271 }
272 rt_spin_unlock(&_inst_page_reg_lock);
273 }
274
_find_page_region(rt_ubase_t page_va)275 static struct installed_page_reg *_find_page_region(rt_ubase_t page_va)
276 {
277 struct installed_page_reg *iter;
278 struct installed_page_reg *rc = RT_NULL;
279 rt_bool_t found = RT_FALSE;
280
281 rt_spin_lock(&_inst_page_reg_lock);
282 for (iter = _inst_page_reg_head; iter; iter = iter->next)
283 {
284 if (page_va >= iter->region_area.start &&
285 page_va < iter->region_area.end)
286 {
287 found = RT_TRUE;
288 break;
289 }
290 }
291 rt_spin_unlock(&_inst_page_reg_lock);
292
293 if (found)
294 {
295 rc = iter;
296 }
297 return rc;
298 }
299
rt_page_is_member(rt_base_t page_pa)300 rt_bool_t rt_page_is_member(rt_base_t page_pa)
301 {
302 return _find_page_region(page_pa - PV_OFFSET) != RT_NULL;
303 }
304
_pages_are_member(rt_ubase_t page_va,size_t size_bits)305 static rt_bool_t _pages_are_member(rt_ubase_t page_va, size_t size_bits)
306 {
307 rt_bool_t rc = RT_TRUE;
308 rt_ubase_t iter_frame = page_va;
309 size_t frame_end = page_va + (1 << size_bits);
310
311 while (iter_frame < frame_end)
312 {
313 size_t overlap_size;
314 struct installed_page_reg *page_reg = _find_page_region(iter_frame);
315
316 if (!page_reg)
317 {
318 rc = RT_FALSE;
319 LOG_E("Allocated invalid page %p", iter_frame);
320 break;
321 }
322
323 overlap_size = page_reg->region_area.end - iter_frame;
324 iter_frame += overlap_size;
325 }
326
327 return rc;
328 }
329
330 #ifdef RT_DEBUGGING_PAGE_POISON
_unpoisoned_pages(char * head,rt_uint32_t size_bits)331 static rt_err_t _unpoisoned_pages(char *head, rt_uint32_t size_bits)
332 {
333 rt_err_t error = RT_EOK;
334 struct installed_page_reg *page_reg = _find_page_region((rt_ubase_t)head);
335
336 if (page_reg)
337 {
338 int pages_count = 1 << size_bits;
339 long bit_number = ((rt_ubase_t)head - page_reg->region_area.start) / ARCH_PAGE_SIZE;
340
341 /* mark the pages as allocated */
342 for (size_t i = 0; i < pages_count; i++, bit_number++)
343 {
344 rt_spin_lock(&_inst_page_reg_lock);
345 if (rt_bitmap_test_bit(page_reg->usage_trace, bit_number))
346 {
347 error = RT_ERROR;
348 rt_kprintf("%s: Pages[%p, %d] is already in used by others!\n", __func__, head, size_bits);
349 }
350 rt_bitmap_set_bit(page_reg->usage_trace, bit_number);
351 rt_spin_unlock(&_inst_page_reg_lock);
352 }
353 }
354 else
355 {
356 error = RT_EINVAL;
357 }
358
359 return -error;
360 }
361
_poisoned_pages(char * head,rt_uint32_t size_bits)362 static rt_err_t _poisoned_pages(char *head, rt_uint32_t size_bits)
363 {
364 rt_err_t error = RT_EOK;
365 struct installed_page_reg *page_reg = _find_page_region((rt_ubase_t)head);
366
367 if (page_reg)
368 {
369 int pages_count = 1 << size_bits;
370 long bit_number = ((rt_ubase_t)head - page_reg->region_area.start) / ARCH_PAGE_SIZE;
371
372 /* mark the pages as free */
373 for (size_t i = 0; i < pages_count; i++, bit_number++)
374 {
375 rt_spin_lock(&_inst_page_reg_lock);
376 if (!rt_bitmap_test_bit(page_reg->usage_trace, bit_number))
377 {
378 error = RT_ERROR;
379 rt_kprintf("%s: Pages[%p, %d] is freed before!\n", __func__, head, size_bits);
380 }
381 rt_bitmap_clear_bit(page_reg->usage_trace, bit_number);
382 rt_spin_unlock(&_inst_page_reg_lock);
383 }
384 }
385 else
386 {
387 error = RT_EINVAL;
388 }
389
390 return -error;
391 }
392
393 #endif /* RT_DEBUGGING_PAGE_POISON */
394
page_to_addr(rt_page_t page)395 static inline void *page_to_addr(rt_page_t page)
396 {
397 return (void *)(((page - page_start) << ARCH_PAGE_SHIFT) - PV_OFFSET);
398 }
399
page_to_paddr(rt_page_t page)400 static inline rt_ubase_t page_to_paddr(rt_page_t page)
401 {
402 return (rt_ubase_t)((page - page_start) << ARCH_PAGE_SHIFT);
403 }
404
addr_to_page(rt_page_t pg_start,void * addr)405 static inline rt_page_t addr_to_page(rt_page_t pg_start, void *addr)
406 {
407 addr = (char *)addr + PV_OFFSET;
408 return &pg_start[((rt_ubase_t)addr >> ARCH_PAGE_SHIFT)];
409 }
410
411 #define CEIL(val, align) (((rt_size_t)(val) + (align)-1) & ~((align)-1))
412
413 /**
414 * shadow is the accessible region by buddy but not usable for page manager.
415 * shadow mask is used for calculate the region head from an address.
416 */
417 const rt_size_t shadow_mask =
418 ((1ul << (RT_PAGE_MAX_ORDER + ARCH_PAGE_SHIFT - 1)) - 1);
419
420 const rt_size_t rt_mpr_size = CEIL(
421 ((1ul << (ARCH_VADDR_WIDTH - ARCH_PAGE_SHIFT))) * sizeof(struct rt_page),
422 ARCH_PAGE_SIZE);
423
424 void *rt_mpr_start;
425
rt_hw_clz(unsigned long n)426 rt_weak int rt_hw_clz(unsigned long n)
427 {
428 return __builtin_clzl(n);
429 }
430
rt_hw_ctz(unsigned long n)431 rt_weak int rt_hw_ctz(unsigned long n)
432 {
433 return __builtin_ctzl(n);
434 }
435
rt_page_bits(rt_size_t size)436 rt_size_t rt_page_bits(rt_size_t size)
437 {
438 int bit = sizeof(rt_size_t) * 8 - rt_hw_clz(size) - 1;
439
440 if ((size ^ (1UL << bit)) != 0)
441 {
442 bit++;
443 }
444 bit -= ARCH_PAGE_SHIFT;
445 if (bit < 0)
446 {
447 bit = 0;
448 }
449 return bit;
450 }
451
rt_page_addr2page(void * addr)452 struct rt_page *rt_page_addr2page(void *addr)
453 {
454 return addr_to_page(page_start, addr);
455 }
456
rt_page_page2addr(struct rt_page * p)457 void *rt_page_page2addr(struct rt_page *p)
458 {
459 return page_to_addr(p);
460 }
461
_buddy_get(struct rt_page * p,rt_uint32_t size_bits)462 static inline struct rt_page *_buddy_get(struct rt_page *p,
463 rt_uint32_t size_bits)
464 {
465 rt_size_t addr;
466
467 RT_ASSERT(size_bits < RT_PAGE_MAX_ORDER - 1);
468
469 addr = (rt_size_t)rt_page_page2addr(p);
470 addr ^= (1UL << (size_bits + ARCH_PAGE_SHIFT));
471 return rt_page_addr2page((void *)addr);
472 }
473
_get_pgls_head_by_page(pgls_agr_t * agr_pgls,rt_page_t page,rt_uint32_t size_bits)474 static rt_page_t *_get_pgls_head_by_page(pgls_agr_t *agr_pgls, rt_page_t page,
475 rt_uint32_t size_bits)
476 {
477 rt_page_t *pgls_head;
478 int index;
479
480 if (size_bits < AFFID_BLK_BITS)
481 {
482 index = PAGE_TO_AFFID(page) >> size_bits;
483 RT_ASSERT(index < AFFID_NUMOF_ID_IN_SET(size_bits));
484
485 RT_ASSERT(PGLS_IS_AFF_MAP(agr_pgls[size_bits]));
486 pgls_head = &PGLS_GET_AFF_MAP(agr_pgls[size_bits])[index];
487 }
488 else
489 {
490 RT_ASSERT(!PGLS_IS_AFF_MAP(agr_pgls[size_bits]));
491 pgls_head = &agr_pgls[size_bits].page_list;
492 }
493
494 return pgls_head;
495 }
496
_get_pgls_head(pgls_agr_t * agr_pgls,int affid,rt_uint32_t size_bits)497 static rt_page_t *_get_pgls_head(pgls_agr_t *agr_pgls, int affid,
498 rt_uint32_t size_bits)
499 {
500 rt_page_t *pgls_head;
501 int index;
502
503 if (size_bits < AFFID_BLK_BITS)
504 {
505 index = affid >> size_bits;
506 RT_ASSERT(index < AFFID_NUMOF_ID_IN_SET(size_bits));
507
508 RT_ASSERT(PGLS_IS_AFF_MAP(agr_pgls[size_bits]));
509 pgls_head = &PGLS_GET_AFF_MAP(agr_pgls[size_bits])[index];
510 }
511 else
512 {
513 RT_ASSERT(!PGLS_IS_AFF_MAP(agr_pgls[size_bits]));
514 pgls_head = &agr_pgls[size_bits].page_list;
515 }
516
517 return pgls_head;
518 }
519
_page_alloc(struct rt_page * p)520 static void _page_alloc(struct rt_page *p)
521 {
522 p->size_bits = ARCH_ADDRESS_WIDTH_BITS;
523 p->ref_cnt = 1;
524 }
525
_page_remove(rt_page_t * page_head,struct rt_page * p,rt_uint32_t size_bits)526 static void _page_remove(rt_page_t *page_head, struct rt_page *p,
527 rt_uint32_t size_bits)
528 {
529 if (p->pre)
530 {
531 p->pre->next = p->next;
532 }
533 else
534 {
535 *page_head = p->next;
536 }
537
538 if (p->next)
539 {
540 p->next->pre = p->pre;
541 }
542
543 RT_ASSERT(p->size_bits == size_bits);
544 _page_alloc(p);
545 }
546
_page_insert(rt_page_t * page_head,struct rt_page * p,rt_uint32_t size_bits)547 static void _page_insert(rt_page_t *page_head, struct rt_page *p,
548 rt_uint32_t size_bits)
549 {
550 p->next = *page_head;
551 if (p->next)
552 {
553 p->next->pre = p;
554 }
555 p->pre = 0;
556 *page_head = p;
557 p->size_bits = size_bits;
558 }
559
_pages_ref_inc(struct rt_page * p,rt_uint32_t size_bits)560 static void _pages_ref_inc(struct rt_page *p, rt_uint32_t size_bits)
561 {
562 struct rt_page *page_head;
563 int idx;
564
565 /* find page group head */
566 idx = p - page_start;
567 idx = idx & ~((1UL << size_bits) - 1);
568
569 page_head = page_start + idx;
570 page_head = (void *)((char *)page_head + early_offset);
571 page_head->ref_cnt++;
572 }
573
_pages_ref_get(struct rt_page * p,rt_uint32_t size_bits)574 static int _pages_ref_get(struct rt_page *p, rt_uint32_t size_bits)
575 {
576 struct rt_page *page_head;
577 int idx;
578
579 /* find page group head */
580 idx = p - page_start;
581 idx = idx & ~((1UL << size_bits) - 1);
582
583 page_head = page_start + idx;
584 return page_head->ref_cnt;
585 }
586
_pages_free(pgls_agr_t page_list[],struct rt_page * p,char * frame_va,rt_uint32_t size_bits)587 static int _pages_free(pgls_agr_t page_list[], struct rt_page *p,
588 char *frame_va, rt_uint32_t size_bits)
589 {
590 rt_uint32_t level = size_bits;
591 struct rt_page *buddy;
592
593 RT_ASSERT(p >= page_start);
594 RT_ASSERT((char *)p < (char *)rt_mpr_start + rt_mpr_size);
595 RT_ASSERT(rt_kmem_v2p(p));
596 RT_ASSERT(p->ref_cnt > 0);
597 RT_ASSERT(p->size_bits == ARCH_ADDRESS_WIDTH_BITS);
598 RT_ASSERT(size_bits < RT_PAGE_MAX_ORDER);
599 RT_UNUSED(_pages_are_member);
600 RT_ASSERT(_pages_are_member((rt_ubase_t)frame_va, size_bits));
601
602 p->ref_cnt--;
603 if (p->ref_cnt != 0)
604 {
605 return 0;
606 }
607
608 #ifdef RT_DEBUGGING_PAGE_POISON
609 _poisoned_pages(frame_va, size_bits);
610 #endif /* RT_DEBUGGING_PAGE_POISON */
611
612 while (level < RT_PAGE_MAX_ORDER - 1)
613 {
614 buddy = _buddy_get(p, level);
615 if (buddy && buddy->size_bits == level)
616 {
617 _page_remove(_get_pgls_head_by_page(page_list, buddy, level),
618 buddy, level);
619 p = (p < buddy) ? p : buddy;
620 level++;
621 }
622 else
623 {
624 break;
625 }
626 }
627
628 _page_insert(_get_pgls_head_by_page(page_list, p, level),
629 p, level);
630 return 1;
631 }
632
__pages_alloc(pgls_agr_t agr_pgls[],rt_uint32_t size_bits,int affid,void (* page_remove)(rt_page_t * page_head,struct rt_page * p,rt_uint32_t size_bits),void (* page_insert)(rt_page_t * page_head,struct rt_page * p,rt_uint32_t size_bits),void (* page_alloc)(rt_page_t page))633 static struct rt_page *__pages_alloc(
634 pgls_agr_t agr_pgls[], rt_uint32_t size_bits, int affid,
635 void (*page_remove)(rt_page_t *page_head, struct rt_page *p,
636 rt_uint32_t size_bits),
637 void (*page_insert)(rt_page_t *page_head, struct rt_page *p,
638 rt_uint32_t size_bits),
639 void (*page_alloc)(rt_page_t page))
640 {
641 rt_page_t *pgls_head = _get_pgls_head(agr_pgls, affid, size_bits);
642 rt_page_t p = *pgls_head;
643
644 if (p)
645 {
646 page_remove(pgls_head, p, size_bits);
647 }
648 else
649 {
650 rt_uint32_t level;
651 rt_page_t head;
652
653 /* fallback for allocation */
654 for (level = size_bits + 1; level < RT_PAGE_MAX_ORDER; level++)
655 {
656 pgls_head = _get_pgls_head(agr_pgls, affid, level);
657 p = *pgls_head;
658 if (p)
659 {
660 break;
661 }
662 }
663 if (level == RT_PAGE_MAX_ORDER)
664 {
665 return 0;
666 }
667
668 page_remove(pgls_head, p, level);
669
670 /* pick the page satisfied the affinity tag */
671 head = p;
672 p = head + (affid - (affid & AFFID_BITS_MASK(level)));
673 page_alloc(p);
674
675 /* release the pages caller don't need */
676 while (level > size_bits)
677 {
678 long lower_bits = level - 1;
679 rt_page_t middle = _buddy_get(head, lower_bits);
680 if (p >= middle)
681 {
682 page_insert(
683 _get_pgls_head_by_page(agr_pgls, head, lower_bits),
684 head, lower_bits);
685 head = middle;
686 }
687 else
688 {
689 page_insert(
690 _get_pgls_head_by_page(agr_pgls, middle, lower_bits),
691 middle, lower_bits);
692 }
693 level = lower_bits;
694 }
695 }
696
697 return p;
698 }
699
_pages_alloc(pgls_agr_t page_list[],rt_uint32_t size_bits,int affid)700 static struct rt_page *_pages_alloc(pgls_agr_t page_list[],
701 rt_uint32_t size_bits, int affid)
702 {
703 return __pages_alloc(page_list, size_bits, affid, _page_remove,
704 _page_insert, _page_alloc);
705 }
706
_early_page_remove(rt_page_t * pgls_head,rt_page_t page,rt_uint32_t size_bits)707 static void _early_page_remove(rt_page_t *pgls_head, rt_page_t page,
708 rt_uint32_t size_bits)
709 {
710 rt_page_t page_cont = (rt_page_t)((char *)page + early_offset);
711 if (page_cont->pre)
712 {
713 rt_page_t pre_cont = (rt_page_t)((char *)page_cont->pre + early_offset);
714 pre_cont->next = page_cont->next;
715 }
716 else
717 {
718 *pgls_head = page_cont->next;
719 }
720
721 if (page_cont->next)
722 {
723 rt_page_t next_cont = (rt_page_t)((char *)page_cont->next + early_offset);
724 next_cont->pre = page_cont->pre;
725 }
726
727 RT_ASSERT(page_cont->size_bits == size_bits);
728 page_cont->size_bits = ARCH_ADDRESS_WIDTH_BITS;
729 page_cont->ref_cnt = 1;
730 }
731
_early_page_alloc(rt_page_t page)732 static void _early_page_alloc(rt_page_t page)
733 {
734 rt_page_t page_cont = (rt_page_t)((char *)page + early_offset);
735 page_cont->size_bits = ARCH_ADDRESS_WIDTH_BITS;
736 page_cont->ref_cnt = 1;
737 }
738
_early_page_insert(rt_page_t * pgls_head,rt_page_t page,rt_uint32_t size_bits)739 static void _early_page_insert(rt_page_t *pgls_head, rt_page_t page,
740 rt_uint32_t size_bits)
741 {
742 RT_ASSERT((void *)page >= rt_mpr_start &&
743 ((char *)page - (char *)rt_mpr_start) < rt_mpr_size);
744 rt_page_t page_cont = (rt_page_t)((char *)page + early_offset);
745
746 page_cont->next = *pgls_head;
747 if (page_cont->next)
748 {
749 rt_page_t next_cont = (rt_page_t)((char *)page_cont->next + early_offset);
750 next_cont->pre = page;
751 }
752 page_cont->pre = 0;
753 *pgls_head = page;
754 page_cont->size_bits = size_bits;
755 }
756
_early_pages_alloc(pgls_agr_t page_list[],rt_uint32_t size_bits,int affid)757 static struct rt_page *_early_pages_alloc(pgls_agr_t page_list[],
758 rt_uint32_t size_bits, int affid)
759 {
760 return __pages_alloc(page_list, size_bits, affid, _early_page_remove,
761 _early_page_insert, _early_page_alloc);
762 }
763
_get_page_list(void * vaddr)764 static pgls_agr_t *_get_page_list(void *vaddr)
765 {
766 rt_ubase_t pa_int = (rt_ubase_t)vaddr + PV_OFFSET;
767 pgls_agr_t *list;
768 if (pa_int > UINT32_MAX)
769 {
770 list = page_list_high;
771 }
772 else
773 {
774 list = page_list_low;
775 }
776 return list;
777 }
778
rt_page_ref_get(void * addr,rt_uint32_t size_bits)779 int rt_page_ref_get(void *addr, rt_uint32_t size_bits)
780 {
781 struct rt_page *p;
782 rt_base_t level;
783 int ref;
784
785 p = rt_page_addr2page(addr);
786 level = rt_spin_lock_irqsave(&_pgmgr_lock);
787 ref = _pages_ref_get(p, size_bits);
788 rt_spin_unlock_irqrestore(&_pgmgr_lock, level);
789 return ref;
790 }
791
rt_page_ref_inc(void * addr,rt_uint32_t size_bits)792 void rt_page_ref_inc(void *addr, rt_uint32_t size_bits)
793 {
794 struct rt_page *p;
795 rt_base_t level;
796
797 p = rt_page_addr2page(addr);
798 level = rt_spin_lock_irqsave(&_pgmgr_lock);
799 _pages_ref_inc(p, size_bits);
800 rt_spin_unlock_irqrestore(&_pgmgr_lock, level);
801 }
802
803 static rt_page_t (*pages_alloc_handler)(pgls_agr_t page_list[],
804 rt_uint32_t size_bits, int affid);
805
806 /* if not, we skip the finding on page_list_high */
807 static size_t _high_page_configured = 0;
808
_flag_to_page_list(size_t flags)809 static pgls_agr_t *_flag_to_page_list(size_t flags)
810 {
811 pgls_agr_t *page_list;
812 if (_high_page_configured && (flags & PAGE_ANY_AVAILABLE))
813 {
814 page_list = page_list_high;
815 }
816 else
817 {
818 page_list = page_list_low;
819 }
820 return page_list;
821 }
822
823 volatile static rt_ubase_t _last_alloc;
824
_do_pages_alloc(rt_uint32_t size_bits,size_t flags,int affid)825 rt_inline void *_do_pages_alloc(rt_uint32_t size_bits, size_t flags, int affid)
826 {
827 void *alloc_buf = RT_NULL;
828 struct rt_page *p;
829 rt_base_t level;
830 pgls_agr_t *page_list = _flag_to_page_list(flags);
831
832 level = rt_spin_lock_irqsave(&_pgmgr_lock);
833 p = pages_alloc_handler(page_list, size_bits, affid);
834 if (p)
835 {
836 _freed_nr -= 1 << size_bits;
837 }
838 rt_spin_unlock_irqrestore(&_pgmgr_lock, level);
839
840 if (!p && page_list != page_list_low)
841 {
842 /* fall back */
843 page_list = page_list_low;
844
845 level = rt_spin_lock_irqsave(&_pgmgr_lock);
846 p = pages_alloc_handler(page_list, size_bits, affid);
847 if (p)
848 {
849 _freed_nr -= 1 << size_bits;
850 _freed_nr_hi -= 1 << size_bits;
851 }
852 rt_spin_unlock_irqrestore(&_pgmgr_lock, level);
853 }
854
855 if (p)
856 {
857 alloc_buf = page_to_addr(p);
858 _last_alloc = (rt_ubase_t)alloc_buf;
859
860 #ifdef RT_DEBUGGING_PAGE_LEAK
861 level = rt_spin_lock_irqsave(&_spinlock);
862 TRACE_ALLOC(p, size_bits);
863 rt_spin_unlock_irqrestore(&_spinlock, level);
864 #endif
865
866 #ifdef RT_DEBUGGING_PAGE_POISON
867 _unpoisoned_pages(alloc_buf, size_bits);
868 #endif /* RT_DEBUGGING_PAGE_POISON */
869
870 }
871
872 return alloc_buf;
873 }
874
_get_balanced_id(rt_uint32_t size_bits)875 rt_inline int _get_balanced_id(rt_uint32_t size_bits)
876 {
877 rt_ubase_t last_alloc = (_last_alloc / RT_PAGE_AFFINITY_BLOCK_SIZE);
878 return (last_alloc + (1u << size_bits)) & AFFID_BITS_MASK(size_bits);
879 }
880
_do_pages_alloc_noaff(rt_uint32_t size_bits,size_t flags)881 static void *_do_pages_alloc_noaff(rt_uint32_t size_bits, size_t flags)
882 {
883 void *rc = RT_NULL;
884
885 if (size_bits < AFFID_BLK_BITS)
886 {
887 int try_affid = _get_balanced_id(size_bits);
888 size_t numof_id = AFFID_NUMOF_ID_IN_SET(size_bits);
889 size_t valid_affid_mask = numof_id - 1;
890
891 for (size_t i = 0; i < numof_id; i++, try_affid += 1 << size_bits)
892 {
893 rc = _do_pages_alloc(size_bits, flags, try_affid & valid_affid_mask);
894 if (rc)
895 {
896 break;
897 }
898 }
899 }
900 else
901 {
902 rc = _do_pages_alloc(size_bits, flags, 0);
903 }
904
905 if (!rc)
906 {
907 RT_ASSERT(0);
908 }
909 return rc;
910 }
911
rt_pages_alloc(rt_uint32_t size_bits)912 void *rt_pages_alloc(rt_uint32_t size_bits)
913 {
914 return _do_pages_alloc_noaff(size_bits, 0);
915 }
916
rt_pages_alloc_ext(rt_uint32_t size_bits,size_t flags)917 void *rt_pages_alloc_ext(rt_uint32_t size_bits, size_t flags)
918 {
919 return _do_pages_alloc_noaff(size_bits, flags);
920 }
921
rt_pages_alloc_tagged(rt_uint32_t size_bits,long affid,size_t flags)922 void *rt_pages_alloc_tagged(rt_uint32_t size_bits, long affid, size_t flags)
923 {
924 rt_page_t current;
925
926 current = _do_pages_alloc(size_bits, flags, affid);
927 if (current && RT_PAGE_PICK_AFFID(current) != affid)
928 {
929 RT_ASSERT(0);
930 }
931
932 return current;
933 }
934
rt_pages_free(void * addr,rt_uint32_t size_bits)935 int rt_pages_free(void *addr, rt_uint32_t size_bits)
936 {
937 struct rt_page *p;
938 pgls_agr_t *page_list = _get_page_list(addr);
939 int real_free = 0;
940
941 p = rt_page_addr2page(addr);
942 if (p)
943 {
944 rt_base_t level;
945 level = rt_spin_lock_irqsave(&_pgmgr_lock);
946 real_free = _pages_free(page_list, p, addr, size_bits);
947 if (real_free)
948 {
949 _freed_nr += 1 << size_bits;
950 if (page_list == page_list_high)
951 {
952 _freed_nr_hi += 1 << size_bits;
953 }
954 TRACE_FREE(p, size_bits);
955 }
956 rt_spin_unlock_irqrestore(&_pgmgr_lock, level);
957 }
958
959 return real_free;
960 }
961
962 /* debug command */
963 int rt_page_list(void) __attribute__((alias("list_page")));
964
965 #define PGNR2SIZE(nr) ((nr)*ARCH_PAGE_SIZE / 1024)
966
_dump_page_list(int order,rt_page_t lp,rt_page_t hp,rt_size_t * pfree)967 static void _dump_page_list(int order, rt_page_t lp, rt_page_t hp,
968 rt_size_t *pfree)
969 {
970 rt_size_t free = 0;
971
972 rt_kprintf("level %d ", order);
973
974 while (lp)
975 {
976 free += (1UL << order);
977 rt_kprintf("[L:0x%08p]", rt_page_page2addr(lp));
978 lp = lp->next;
979 }
980 while (hp)
981 {
982 free += (1UL << order);
983 rt_kprintf("[H:0x%08p]", rt_page_page2addr(hp));
984 hp = hp->next;
985 }
986
987 rt_kprintf("\n");
988
989 *pfree += free;
990 }
991
list_page(void)992 int list_page(void)
993 {
994 int i;
995 rt_size_t free = 0;
996 rt_size_t installed = _page_nr;
997 rt_base_t level;
998
999 level = rt_spin_lock_irqsave(&_pgmgr_lock);
1000
1001 /* dump affinity map area */
1002 for (i = 0; i < AFFID_BLK_BITS; i++)
1003 {
1004 rt_page_t *iter_lo = PGLS_GET_AFF_MAP(page_list_low[i]);
1005 rt_page_t *iter_hi = PGLS_GET_AFF_MAP(page_list_high[i]);
1006 rt_size_t list_len = AFFID_NUMOF_ID_IN_SET(i);
1007 for (size_t j = 0; j < list_len; j++)
1008 {
1009 _dump_page_list(i, iter_lo[j], iter_hi[j], &free);
1010 }
1011 }
1012
1013 /* dump normal page list */
1014 for (; i < RT_PAGE_MAX_ORDER; i++)
1015 {
1016 rt_page_t lp = page_list_low[i].page_list;
1017 rt_page_t hp = page_list_high[i].page_list;
1018
1019 _dump_page_list(i, lp, hp, &free);
1020 }
1021
1022 rt_spin_unlock_irqrestore(&_pgmgr_lock, level);
1023 rt_kprintf("-------------------------------\n");
1024 rt_kprintf("Page Summary:\n => free/installed:\n 0x%lx/0x%lx (%ld/%ld KB)\n",
1025 free, installed, PGNR2SIZE(free), PGNR2SIZE(installed));
1026
1027 rt_kprintf(" => Installed Pages Region:\n");
1028 _print_region_list();
1029 rt_kprintf("-------------------------------\n");
1030
1031 return 0;
1032 }
1033 MSH_CMD_EXPORT(list_page, show page info);
1034
rt_page_get_info(rt_size_t * total_nr,rt_size_t * free_nr)1035 void rt_page_get_info(rt_size_t *total_nr, rt_size_t *free_nr)
1036 {
1037 *total_nr = _page_nr;
1038 *free_nr = _freed_nr;
1039 }
1040
rt_page_high_get_info(rt_size_t * total_nr,rt_size_t * free_nr)1041 void rt_page_high_get_info(rt_size_t *total_nr, rt_size_t *free_nr)
1042 {
1043 *total_nr = _page_nr_hi;
1044 *free_nr = _freed_nr_hi;
1045 }
1046
_invalid_uninstalled_shadow(rt_page_t start,rt_page_t end)1047 static void _invalid_uninstalled_shadow(rt_page_t start, rt_page_t end)
1048 {
1049 for (rt_page_t iter = start; iter < end; iter++)
1050 {
1051 rt_base_t frame = (rt_base_t)rt_page_page2addr(iter);
1052 struct installed_page_reg *page_reg = _find_page_region(frame);
1053 if (page_reg)
1054 {
1055 continue;
1056 }
1057 iter->size_bits = ARCH_ADDRESS_WIDTH_BITS;
1058 }
1059 }
1060
_install_page(rt_page_t mpr_head,rt_region_t region,void (* insert)(rt_page_t * ppg,rt_page_t page,rt_uint32_t size_bits))1061 static void _install_page(rt_page_t mpr_head, rt_region_t region,
1062 void (*insert)(rt_page_t *ppg, rt_page_t page, rt_uint32_t size_bits))
1063 {
1064 pgls_agr_t *page_list;
1065 rt_page_t *page_head;
1066 rt_region_t shadow;
1067 const rt_base_t pvoffset = PV_OFFSET;
1068
1069 _page_nr += ((region.end - region.start) >> ARCH_PAGE_SHIFT);
1070 _freed_nr += ((region.end - region.start) >> ARCH_PAGE_SHIFT);
1071
1072 shadow.start = region.start & ~shadow_mask;
1073 shadow.end = CEIL(region.end, shadow_mask + 1);
1074
1075 if (shadow.end + pvoffset > UINT32_MAX)
1076 _high_page_configured = 1;
1077
1078 rt_page_t shad_head = addr_to_page(mpr_head, (void *)shadow.start);
1079 rt_page_t shad_tail = addr_to_page(mpr_head, (void *)shadow.end);
1080 rt_page_t head = addr_to_page(mpr_head, (void *)region.start);
1081 rt_page_t tail = addr_to_page(mpr_head, (void *)region.end);
1082
1083 /* mark shadow page records not belongs to other region as illegal */
1084 _invalid_uninstalled_shadow(shad_head, head);
1085 _invalid_uninstalled_shadow(tail, shad_tail);
1086
1087 /* insert reserved pages to list */
1088 const int max_order = RT_PAGE_MAX_ORDER + ARCH_PAGE_SHIFT - 1;
1089 while (region.start != region.end)
1090 {
1091 struct rt_page *p;
1092 int align_bits;
1093 int size_bits;
1094 int page_order;
1095
1096 size_bits =
1097 ARCH_ADDRESS_WIDTH_BITS - 1 - rt_hw_clz(region.end - region.start);
1098 align_bits = rt_hw_ctz(region.start);
1099 if (align_bits < size_bits)
1100 {
1101 size_bits = align_bits;
1102 }
1103 if (size_bits > max_order)
1104 {
1105 size_bits = max_order;
1106 }
1107
1108 p = addr_to_page(mpr_head, (void *)region.start);
1109 p->size_bits = ARCH_ADDRESS_WIDTH_BITS;
1110 p->ref_cnt = 0;
1111
1112 /* insert to list */
1113 page_list = _get_page_list((void *)region.start);
1114 if (page_list == page_list_high)
1115 {
1116 _page_nr_hi += 1 << (size_bits - ARCH_PAGE_SHIFT);
1117 _freed_nr_hi += 1 << (size_bits - ARCH_PAGE_SHIFT);
1118 }
1119
1120 page_order = size_bits - ARCH_PAGE_SHIFT;
1121 page_head = _get_pgls_head_by_page(page_list, p, page_order);
1122 insert(page_head, (rt_page_t)((char *)p - early_offset), page_order);
1123 region.start += (1UL << size_bits);
1124 }
1125 }
1126
_aligned_to_affinity(rt_ubase_t head_page_pa,void * mapped_to)1127 static void *_aligned_to_affinity(rt_ubase_t head_page_pa, void *mapped_to)
1128 {
1129 #define AFFBLK_MASK (RT_PAGE_AFFINITY_BLOCK_SIZE - 1)
1130 rt_ubase_t head_page_pg_aligned;
1131 rt_ubase_t aligned_affblk_tag = (long)mapped_to & AFFBLK_MASK;
1132
1133 head_page_pg_aligned =
1134 ((long)head_page_pa & ~AFFBLK_MASK) | aligned_affblk_tag;
1135 if (head_page_pg_aligned < head_page_pa)
1136 {
1137 /* find the page forward */
1138 head_page_pg_aligned += RT_PAGE_AFFINITY_BLOCK_SIZE;
1139 }
1140
1141 return (void *)head_page_pg_aligned;
1142 }
1143
rt_page_init(rt_region_t reg)1144 void rt_page_init(rt_region_t reg)
1145 {
1146 int i;
1147 rt_region_t shadow;
1148
1149 /* setup install page status */
1150 rt_spin_lock_init(&_init_region.lock);
1151 _init_region.region_area = reg;
1152 _init_region.next = RT_NULL;
1153 #ifdef RT_DEBUGGING_PAGE_POISON
1154 _init_region.usage_trace = _init_region_usage_trace;
1155 #endif /* RT_DEBUGGING_PAGE_POISON */
1156 _inst_page_reg_head = &_init_region;
1157
1158 /* adjust install region. inclusive start, exclusive end */
1159 reg.start += ARCH_PAGE_MASK;
1160 reg.start &= ~ARCH_PAGE_MASK;
1161 reg.end &= ~ARCH_PAGE_MASK;
1162 if (reg.end <= reg.start)
1163 {
1164 LOG_E("region end(%p) must greater than start(%p)", reg.start, reg.end);
1165 RT_ASSERT(0);
1166 }
1167
1168 shadow.start = reg.start & ~shadow_mask;
1169 shadow.end = CEIL(reg.end, shadow_mask + 1);
1170 LOG_D("[Init page] start: 0x%lx, end: 0x%lx, total: 0x%lx", reg.start,
1171 reg.end, page_nr);
1172
1173 int err;
1174
1175 /* init free list */
1176 rt_page_t *aff_pgls_iter_lo = aff_pglist_low;
1177 rt_page_t *aff_pgls_iter_hi = aff_pglist_high;
1178 for (i = 0; i < AFFID_BLK_BITS; i++)
1179 {
1180 long stride = AFFID_NUMOF_ID_IN_SET(i);
1181 PGLS_FROM_AFF_MAP(page_list_low[i], aff_pgls_iter_lo);
1182 PGLS_FROM_AFF_MAP(page_list_high[i], aff_pgls_iter_hi);
1183 aff_pgls_iter_lo += stride;
1184 aff_pgls_iter_hi += stride;
1185 }
1186
1187 for (; i < RT_PAGE_MAX_ORDER; i++)
1188 {
1189 page_list_low[i].page_list = 0;
1190 page_list_high[i].page_list = 0;
1191 }
1192
1193 /* map MPR area */
1194 err = rt_aspace_map_static(&rt_kernel_space, &mpr_varea, &rt_mpr_start,
1195 rt_mpr_size, MMU_MAP_K_RWCB, MMF_MAP_FIXED,
1196 &mm_page_mapper, 0);
1197
1198 if (err != RT_EOK)
1199 {
1200 LOG_E("MPR map failed with size %lx at %p", rt_mpr_size, rt_mpr_start);
1201 RT_ASSERT(0);
1202 }
1203
1204 /* calculate footprint */
1205 init_mpr_align_start =
1206 (rt_size_t)addr_to_page(page_start, (void *)shadow.start) &
1207 ~ARCH_PAGE_MASK;
1208 init_mpr_align_end =
1209 CEIL(addr_to_page(page_start, (void *)shadow.end), ARCH_PAGE_SIZE);
1210 rt_size_t init_mpr_size = init_mpr_align_end - init_mpr_align_start;
1211 rt_size_t init_mpr_npage = init_mpr_size >> ARCH_PAGE_SHIFT;
1212
1213 /* find available aligned page */
1214 init_mpr_cont_start = _aligned_to_affinity(reg.start,
1215 (void *)init_mpr_align_start);
1216
1217 rt_size_t init_mpr_cont_end = (rt_size_t)init_mpr_cont_start + init_mpr_size;
1218 early_offset = (rt_size_t)init_mpr_cont_start - init_mpr_align_start;
1219 rt_page_t mpr_cont = (void *)((char *)rt_mpr_start + early_offset);
1220
1221 /* mark init mpr pages as illegal */
1222 rt_page_t head_cont = addr_to_page(mpr_cont, (void *)reg.start);
1223 rt_page_t tail_cont = addr_to_page(mpr_cont, (void *)reg.end);
1224 for (rt_page_t iter = head_cont; iter < tail_cont; iter++)
1225 {
1226 iter->size_bits = ARCH_ADDRESS_WIDTH_BITS;
1227 }
1228
1229 reg.start = init_mpr_cont_end;
1230 _install_page(mpr_cont, reg, _early_page_insert);
1231
1232 pages_alloc_handler = _early_pages_alloc;
1233 /* doing the page table bushiness */
1234 if (rt_aspace_load_page(&rt_kernel_space, (void *)init_mpr_align_start, init_mpr_npage))
1235 {
1236 LOG_E("%s: failed to load pages", __func__);
1237 RT_ASSERT(0);
1238 }
1239
1240 if (rt_hw_mmu_tbl_get() == rt_kernel_space.page_table)
1241 rt_page_cleanup();
1242 }
1243
_load_mpr_area(void * head,void * tail)1244 static int _load_mpr_area(void *head, void *tail)
1245 {
1246 int err = 0;
1247 char *iter = (char *)((rt_ubase_t)head & ~ARCH_PAGE_MASK);
1248 tail = (void *)CEIL(tail, ARCH_PAGE_SIZE);
1249
1250 while (iter != tail)
1251 {
1252 void *paddr = rt_kmem_v2p(iter);
1253 if (paddr == ARCH_MAP_FAILED)
1254 {
1255 err = rt_aspace_load_page(&rt_kernel_space, iter, 1);
1256 if (err != RT_EOK)
1257 {
1258 LOG_E("%s: failed to load page", __func__);
1259 break;
1260 }
1261 }
1262 iter += ARCH_PAGE_SIZE;
1263 }
1264 return err;
1265 }
1266
_get_mpr_ready_n_install(rt_ubase_t inst_head,rt_ubase_t inst_end)1267 static int _get_mpr_ready_n_install(rt_ubase_t inst_head, rt_ubase_t inst_end)
1268 {
1269 int err;
1270 rt_region_t shadow;
1271 rt_region_t region =
1272 {
1273 .start = inst_head,
1274 .end = inst_end,
1275 };
1276 void *head, *tail;
1277
1278 shadow.start = region.start & ~shadow_mask;
1279 shadow.end = CEIL(region.end, shadow_mask + 1);
1280 head = addr_to_page(page_start, (void *)shadow.start);
1281 tail = addr_to_page(page_start, (void *)shadow.end);
1282
1283 err = _load_mpr_area(head, tail);
1284
1285 if (err == RT_EOK)
1286 {
1287 rt_ubase_t level = rt_spin_lock_irqsave(&_pgmgr_lock);
1288 _install_page(rt_mpr_start, region, _page_insert);
1289 rt_spin_unlock_irqrestore(&_pgmgr_lock, level);
1290 }
1291
1292 return err;
1293 }
1294
_update_region_list(struct installed_page_reg * member,rt_ubase_t inst_head,rt_ubase_t inst_end,rt_bitmap_t * ut_bitmap)1295 static void _update_region_list(struct installed_page_reg *member,
1296 rt_ubase_t inst_head, rt_ubase_t inst_end,
1297 rt_bitmap_t *ut_bitmap)
1298 {
1299 rt_spin_lock_init(&member->lock);
1300
1301 rt_spin_lock(&_inst_page_reg_lock);
1302
1303 member->region_area.start = inst_head;
1304 member->region_area.end = inst_end;
1305
1306 #ifdef RT_DEBUGGING_PAGE_POISON
1307 member->usage_trace = ut_bitmap;
1308 #else
1309 RT_UNUSED(ut_bitmap);
1310 #endif /* RT_DEBUGGING_PAGE_POISON */
1311
1312 member->next = _inst_page_reg_head;
1313 _inst_page_reg_head = member;
1314
1315 rt_spin_unlock(&_inst_page_reg_lock);
1316 }
1317
1318 #define _PAGE_STRIPE (1 << (RT_PAGE_MAX_ORDER + ARCH_PAGE_SHIFT - 1))
rt_page_install(rt_region_t region)1319 int rt_page_install(rt_region_t region)
1320 {
1321 int err = -RT_EINVAL;
1322
1323 if (region.end != region.start && !(region.start & ARCH_PAGE_MASK) &&
1324 !(region.end & ARCH_PAGE_MASK))
1325 {
1326 rt_ubase_t inst_head = region.start;
1327 rt_ubase_t inst_end = region.end;
1328 rt_ubase_t iter = inst_head;
1329 int pages_count = (inst_end - inst_head) / ARCH_PAGE_SIZE;
1330 struct installed_page_reg *installed_pgreg =
1331 rt_calloc(1, sizeof(struct installed_page_reg) +
1332 RT_BITMAP_LEN(pages_count) * sizeof(rt_bitmap_t));
1333
1334 if (installed_pgreg)
1335 {
1336 _update_region_list(installed_pgreg, inst_head, inst_end,
1337 (rt_bitmap_t *)(installed_pgreg + 1));
1338
1339 if ((rt_ubase_t)iter & shadow_mask)
1340 {
1341 iter = RT_ALIGN((rt_ubase_t)inst_head, _PAGE_STRIPE);
1342 _get_mpr_ready_n_install(inst_head, iter < inst_end ? iter : inst_end);
1343 }
1344
1345 for (rt_ubase_t next = iter + _PAGE_STRIPE; next < inst_end;
1346 iter = next, next += _PAGE_STRIPE)
1347 {
1348 _get_mpr_ready_n_install(iter, next);
1349 }
1350
1351 if (iter < inst_end)
1352 {
1353 _get_mpr_ready_n_install(iter, inst_end);
1354 }
1355 }
1356 }
1357 return err;
1358 }
1359
rt_page_cleanup(void)1360 void rt_page_cleanup(void)
1361 {
1362 early_offset = 0;
1363 pages_alloc_handler = _pages_alloc;
1364 }
1365