1 /*
2  * Copyright (c) 2006-2019, RT-Thread Development Team
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Change Logs:
7  * Date           Author       Notes
8  * 2019-11-01     Jesven       The first version
9  * 2022-12-13     WangXiaoyao  Hot-pluggable, extensible
10  *                             page management algorithm
11  */
12 #ifndef __MM_PAGE_H__
13 #define __MM_PAGE_H__
14 
15 #include <rthw.h>
16 #include <rtthread.h>
17 #include <stdint.h>
18 
19 #define GET_FLOOR(type) \
20     (1ul << (8 * sizeof(rt_size_t) - __builtin_clzl(2 * sizeof(type) - 1) - 1))
21 #define DEF_PAGE_T(fields) \
22     typedef struct rt_page {\
23         union {struct {fields}; char _padding[GET_FLOOR(struct {fields})];};\
24     } *rt_page_t
25 
26 /**
27  * @brief PAGE ALLOC FLAGS
28  *
29  * @info PAGE_ANY_AVAILABLE
30  * page allocation default to use lower region, this behavior can change by setting
31  * PAGE_ANY_AVAILABLE
32  */
33 
34 #define PAGE_ANY_AVAILABLE 0x1ul
35 
36 #define RT_PAGE_PICK_AFFID(ptr) \
37     ((((long)ptr) & (RT_PAGE_AFFINITY_BLOCK_SIZE - 1)) / ARCH_PAGE_SIZE)
38 
39 #ifdef RT_DEBUGGING_PAGE_LEAK
40 #define DEBUG_FIELD struct {    \
41     /* trace list */            \
42     struct rt_page *tl_next;    \
43     struct rt_page *tl_prev;    \
44     void *caller;               \
45     size_t trace_size;          \
46 }
47 #else
48 #define DEBUG_FIELD
49 #endif
50 
51 DEF_PAGE_T(
52     struct rt_page *next;   /* same level next */
53     struct rt_page *pre;    /* same level pre  */
54 
55     DEBUG_FIELD;
56 
57     rt_uint32_t size_bits;     /* if is ARCH_ADDRESS_WIDTH_BITS, means not free */
58     rt_uint32_t ref_cnt;       /* page group ref count */
59 );
60 
61 #undef GET_FLOOR
62 #undef DEF_PAGE_T
63 #undef DEBUG_FIELD
64 
65 typedef struct tag_region
66 {
67     rt_size_t start;
68     rt_size_t end;
69 
70     const char *name;
71 } rt_region_t;
72 
73 extern const rt_size_t rt_mpr_size;
74 extern void *rt_mpr_start;
75 
76 void rt_page_init(rt_region_t reg);
77 
78 void rt_page_cleanup(void);
79 
80 void *rt_pages_alloc(rt_uint32_t size_bits);
81 
82 void *rt_pages_alloc_ext(rt_uint32_t size_bits, size_t flags);
83 
84 void *rt_pages_alloc_tagged(rt_uint32_t size_bits, long tag, size_t flags);
85 
86 rt_bool_t rt_page_is_member(rt_base_t page_pa);
87 
88 void rt_page_ref_inc(void *addr, rt_uint32_t size_bits);
89 
90 int rt_page_ref_get(void *addr, rt_uint32_t size_bits);
91 
92 int rt_pages_free(void *addr, rt_uint32_t size_bits);
93 
94 int rt_page_list(void);
95 
96 rt_size_t rt_page_bits(rt_size_t size);
97 
98 void rt_page_get_info(rt_size_t *total_nr, rt_size_t *free_nr);
99 
100 void rt_page_high_get_info(rt_size_t *total_nr, rt_size_t *free_nr);
101 
102 void *rt_page_page2addr(struct rt_page *p);
103 
104 struct rt_page *rt_page_addr2page(void *addr);
105 
106 /**
107  * @brief Install page frames at run-time
108  * Region size must be aligned to 2^(RT_PAGE_MAX_ORDER + ARCH_PAGE_SHIFT - 1)
109  * bytes currently (typically 2 MB).
110  *
111  * !WARNING this API will NOT check whether region is valid or not in list
112  *
113  * @param region region.start as first page frame(inclusive),
114  *               region.end as first page frame after free region
115  * @return int 0 on success
116  */
117 int rt_page_install(rt_region_t region);
118 
119 void rt_page_leak_trace_start(void);
120 
121 void rt_page_leak_trace_stop(void);
122 
123 #endif /* __MM_PAGE_H__ */
124