1 /*
2 * Copyright (c) 2006-2022, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2022-12-06 WangXiaoyao the first version
9 * 2023-08-19 Shell Support PRIVATE mapping and COW
10 */
11 #include <rtthread.h>
12
13 #ifdef RT_USING_SMART
14 #define DBG_TAG "mm.fault"
15 #define DBG_LVL DBG_INFO
16 #include <rtdbg.h>
17
18 #include <lwp.h>
19 #include <lwp_syscall.h>
20 #include "mm_aspace.h"
21 #include "mm_fault.h"
22 #include "mm_flag.h"
23 #include "mm_private.h"
24 #include <mmu.h>
25 #include <tlb.h>
26
_fetch_page(rt_varea_t varea,struct rt_aspace_fault_msg * msg)27 static int _fetch_page(rt_varea_t varea, struct rt_aspace_fault_msg *msg)
28 {
29 int err = MM_FAULT_FIXABLE_FALSE;
30 if (varea->mem_obj && varea->mem_obj->on_page_fault)
31 {
32 varea->mem_obj->on_page_fault(varea, msg);
33 err = rt_varea_map_with_msg(varea, msg);
34 err = (err == RT_EOK ? MM_FAULT_FIXABLE_TRUE : MM_FAULT_FIXABLE_FALSE);
35 }
36 return err;
37 }
38
_read_fault(rt_varea_t varea,void * pa,struct rt_aspace_fault_msg * msg)39 static int _read_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *msg)
40 {
41 int err = MM_FAULT_FIXABLE_FALSE;
42 if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
43 {
44 RT_ASSERT(pa == ARCH_MAP_FAILED);
45 RT_ASSERT(!(varea->flag & MMF_PREFETCH));
46 err = _fetch_page(varea, msg);
47 }
48 else
49 {
50 /* signal a fault to user? */
51 }
52 return err;
53 }
54
_write_fault(rt_varea_t varea,void * pa,struct rt_aspace_fault_msg * msg)55 static int _write_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *msg)
56 {
57 rt_aspace_t aspace = varea->aspace;
58 int err = MM_FAULT_FIXABLE_FALSE;
59
60 if (rt_varea_is_private_locked(varea))
61 {
62 if (VAREA_IS_WRITABLE(varea) && (
63 msg->fault_type == MM_FAULT_TYPE_RWX_PERM ||
64 msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT))
65 {
66 RDWR_LOCK(aspace);
67 err = rt_varea_fix_private_locked(varea, pa, msg, RT_FALSE);
68 RDWR_UNLOCK(aspace);
69 if (err == MM_FAULT_FIXABLE_FALSE)
70 LOG_I("%s: fix private failure", __func__);
71 }
72 else
73 {
74 LOG_I("%s: No permission on %s(attr=0x%lx,writable=%s,fault_type=%d)",
75 __func__, VAREA_NAME(varea), varea->attr,
76 VAREA_IS_WRITABLE(varea) ? "True" : "False", msg->fault_type);
77 }
78 }
79 else if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
80 {
81 RT_ASSERT(pa == ARCH_MAP_FAILED);
82 RT_ASSERT(!(varea->flag & MMF_PREFETCH));
83 err = _fetch_page(varea, msg);
84 if (err == MM_FAULT_FIXABLE_FALSE)
85 LOG_I("%s: page fault failure", __func__);
86 }
87 else
88 {
89 LOG_D("%s: can not fix", __func__);
90 /* signal a fault to user? */
91 }
92 return err;
93 }
94
_exec_fault(rt_varea_t varea,void * pa,struct rt_aspace_fault_msg * msg)95 static int _exec_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *msg)
96 {
97 int err = MM_FAULT_FIXABLE_FALSE;
98 if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
99 {
100 RT_ASSERT(pa == ARCH_MAP_FAILED);
101 RT_ASSERT(!(varea->flag & MMF_PREFETCH));
102 err = _fetch_page(varea, msg);
103 }
104 return err;
105 }
106
_determine_precise_fault_type(struct rt_aspace_fault_msg * msg,rt_ubase_t pa,rt_varea_t varea)107 static void _determine_precise_fault_type(struct rt_aspace_fault_msg *msg, rt_ubase_t pa, rt_varea_t varea)
108 {
109 if (msg->fault_type == MM_FAULT_TYPE_GENERIC_MMU)
110 {
111 rt_base_t requesting_perm;
112 switch (msg->fault_op)
113 {
114 case MM_FAULT_OP_READ:
115 requesting_perm = RT_HW_MMU_PROT_READ | RT_HW_MMU_PROT_USER;
116 break;
117 case MM_FAULT_OP_WRITE:
118 requesting_perm = RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER;
119 break;
120 case MM_FAULT_OP_EXECUTE:
121 requesting_perm = RT_HW_MMU_PROT_EXECUTE | RT_HW_MMU_PROT_USER;
122 break;
123 }
124
125 /**
126 * always checking the user privileges since dynamic permission is not
127 * supported in kernel. So those faults are never fixable. Hence, adding
128 * permission check never changes the result of checking. In other
129 * words, { 0 && (expr) } is always false.
130 */
131 if (rt_hw_mmu_attr_test_perm(varea->attr, requesting_perm))
132 {
133 if (pa == (rt_ubase_t)ARCH_MAP_FAILED)
134 {
135 msg->fault_type = MM_FAULT_TYPE_PAGE_FAULT;
136 }
137 else
138 {
139 msg->fault_type = MM_FAULT_TYPE_RWX_PERM;
140 }
141 }
142 }
143 }
144
rt_aspace_fault_try_fix(rt_aspace_t aspace,struct rt_aspace_fault_msg * msg)145 int rt_aspace_fault_try_fix(rt_aspace_t aspace, struct rt_aspace_fault_msg *msg)
146 {
147 int err = MM_FAULT_FIXABLE_FALSE;
148 uintptr_t va = (uintptr_t)msg->fault_vaddr;
149 va &= ~ARCH_PAGE_MASK;
150 msg->fault_vaddr = (void *)va;
151 rt_mm_fault_res_init(&msg->response);
152
153 RT_DEBUG_SCHEDULER_AVAILABLE(1);
154
155 if (aspace)
156 {
157 rt_varea_t varea;
158
159 RD_LOCK(aspace);
160 varea = _aspace_bst_search(aspace, msg->fault_vaddr);
161 if (varea)
162 {
163 void *pa = rt_hw_mmu_v2p(aspace, msg->fault_vaddr);
164 _determine_precise_fault_type(msg, (rt_ubase_t)pa, varea);
165
166 if (pa != ARCH_MAP_FAILED && msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
167 {
168 LOG_D("%s(fault=%p) has already fixed", __func__, msg->fault_vaddr);
169 err = MM_FAULT_FIXABLE_TRUE;
170 }
171 else
172 {
173 LOG_D("%s(varea=%s,fault=%p,fault_op=%d,phy=%p)", __func__, VAREA_NAME(varea), msg->fault_vaddr, msg->fault_op, pa);
174 msg->off = varea->offset + ((long)msg->fault_vaddr - (long)varea->start) / ARCH_PAGE_SIZE;
175
176 /* permission checked by fault op */
177 switch (msg->fault_op)
178 {
179 case MM_FAULT_OP_READ:
180 err = _read_fault(varea, pa, msg);
181 break;
182 case MM_FAULT_OP_WRITE:
183 err = _write_fault(varea, pa, msg);
184 break;
185 case MM_FAULT_OP_EXECUTE:
186 err = _exec_fault(varea, pa, msg);
187 break;
188 default:
189 LOG_D("Unhandle exception");
190 break;
191 }
192 }
193 }
194 else
195 {
196 LOG_W("%s: varea not found at 0x%lx", __func__, msg->fault_vaddr);
197 }
198 RD_UNLOCK(aspace);
199 }
200 else
201 {
202 LOG_W("No aspace found");
203 }
204
205 return err;
206 }
207
208 #endif /* RT_USING_SMART */
209