1 /*
2  * Copyright (c) 2006-2022, RT-Thread Development Team
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Change Logs:
7  * Date           Author       Notes
8  * 2022-11-14     WangXiaoyao  the first version
9  */
10 #ifndef __MM_PRIVATE_H__
11 #define __MM_PRIVATE_H__
12 
13 #include "mm_aspace.h"
14 #include "mm_fault.h"
15 #include "mm_flag.h"
16 #include "mm_page.h"
17 
18 #include <rtdef.h>
19 #include <stddef.h>
20 
21 /**
22  * @brief DATA STRUCTURE & API USED INTERNALLY
23  *
24  * This is mainly a wrapper layer to actual data structure.
25  * In this way, we can switch to any BST we like by adding new
26  * wrapper code.
27  * Every BST must satisfy the API to support MM
28  *
29  * *INFO: varea range convention
30  * For API, a range is specified by a base and its length.
31  * This provides a clear interface without ambiguity.
32  * For implementation, a range is specified by [start, end] tuple
33  * where both start and end are inclusive.
34  */
35 
36 #define VAREA_NOT_STATIC(varea) (!((varea)->flag & MMF_STATIC_ALLOC))
37 #define VAREA_NAME(varea)                                                      \
38   ((!varea->mem_obj || !varea->mem_obj->get_name)                              \
39        ? "unknow"                                                              \
40        : varea->mem_obj->get_name(varea))
41 
42 /* only user address use COW technique, so user permission is always checked */
43 #define VAREA_IS_WRITABLE(varea)                                               \
44   (rt_hw_mmu_attr_test_perm(varea->attr,                                       \
45                             RT_HW_MMU_PROT_USER | RT_HW_MMU_PROT_WRITE))
46 #define VAREA_VA_TO_OFFSET(varea, va)                                          \
47     ((varea)->offset + MM_PA_TO_OFF((long)(va) - (long)(varea)->start))
48 
49 struct _mm_range
50 {
51     void *start;
52     void *end;
53 };
54 
55 /**
56  * @brief
57  *
58  * @param aspace
59  * @return rt_err_t
60  */
61 rt_err_t _aspace_bst_init(struct rt_aspace *aspace);
62 
63 /**
64  * @brief Retrieve any varea if start in [varea->start, varea->end]
65  *
66  * @param aspace
67  * @param start
68  * @return struct rt_varea*
69  */
70 struct rt_varea *_aspace_bst_search(struct rt_aspace *aspace, void *start);
71 
72 /**
73  * @brief Retrieve lowest varea satisfies (varea->start >= start)
74  *
75  * @param aspace
76  * @param length
77  * @param struct _mm_range
78  * @return struct rt_varea*
79  */
80 struct rt_varea *_aspace_bst_search_exceed(struct rt_aspace *aspace,
81                                            void *start);
82 
83 /**
84  * @brief Retrieve any varea overlaps a specified address range
85  *
86  * @param aspace
87  * @param start
88  * @param length
89  * @return struct rt_varea*
90  */
91 struct rt_varea *_aspace_bst_search_overlap(struct rt_aspace *aspace,
92                                             struct _mm_range range);
93 
94 /**
95  * @brief Insert a varea into the bst
96  *
97  * @param aspace
98  * @param varea
99  */
100 void _aspace_bst_insert(struct rt_aspace *aspace, struct rt_varea *varea);
101 
102 /**
103  * @brief Remove a varea from the bst
104  *
105  * @param aspace
106  * @param varea
107  */
108 void _aspace_bst_remove(struct rt_aspace *aspace, struct rt_varea *varea);
109 
110 int rt_varea_fix_private_locked(rt_varea_t ex_varea, void *pa,
111                                 struct rt_aspace_fault_msg *msg,
112                                 rt_bool_t dont_copy);
113 
114 int rt_varea_map_with_msg(rt_varea_t varea, struct rt_aspace_fault_msg *msg);
115 
116 void _varea_uninstall_locked(rt_varea_t varea);
117 
118 int _mm_aspace_map(rt_aspace_t aspace, rt_varea_t *pvarea, void **addr,
119                    rt_size_t length, rt_size_t attr, mm_flag_t flags,
120                    rt_mem_obj_t mem_obj, rt_size_t offset);
121 
rt_varea_is_private_locked(rt_varea_t varea)122 rt_inline rt_bool_t rt_varea_is_private_locked(rt_varea_t varea)
123 {
124     rt_base_t flags = varea->flag;
125     return !!(
126         (flags & (MMF_MAP_PRIVATE | MMF_MAP_PRIVATE_DONT_SYNC))
127         && (varea->aspace->private_object != varea->mem_obj)
128     );
129 }
130 
131 rt_err_t rt_aspace_anon_ref_dec(rt_mem_obj_t aobj);
132 rt_err_t rt_aspace_page_get_phy(rt_aspace_t aspace, void *page_va, void *buffer);
133 rt_err_t rt_aspace_page_put_phy(rt_aspace_t aspace, void *page_va, void *buffer);
134 
135 #endif /* __MM_PRIVATE_H__ */
136