1 /*
2 * Copyright (c) 2006-2022, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2022-12-06 WangXiaoyao the first version
9 */
10 #ifndef __MM_FAULT_H__
11 #define __MM_FAULT_H__
12
13 #include <rtthread.h>
14 #include <stddef.h>
15 #include <stdint.h>
16
17 /* fast path fault handler, a page frame on kernel space is returned */
18 #define MM_FAULT_STATUS_OK 0
19 /* customized fault handler, done by using rt_varea_map_* */
20 #define MM_FAULT_STATUS_OK_MAPPED 1
21 #define MM_FAULT_STATUS_UNRECOVERABLE 4
22
23 #define MM_FAULT_FIXABLE_FALSE 0
24 #define MM_FAULT_FIXABLE_TRUE 1
25
26 enum rt_mm_fault_op
27 {
28 MM_FAULT_OP_READ = 1,
29 MM_FAULT_OP_WRITE,
30 MM_FAULT_OP_EXECUTE,
31 };
32
33 enum rt_mm_fault_type
34 {
35 /**
36 * Occurs when an instruction attempts to access a memory address that it
37 * does not have R/W/X permission to access
38 */
39 MM_FAULT_TYPE_RWX_PERM,
40
41 /* Without privileges to access (e.g. user accessing kernel) */
42 MM_FAULT_TYPE_NO_PRIVILEGES,
43
44 /**
45 * Occurs when a load or store instruction accesses a virtual memory
46 * address that is not currently mapped to a physical memory page
47 */
48 MM_FAULT_TYPE_PAGE_FAULT,
49
50 /**
51 * Occurs like a SIGBUS
52 */
53 MM_FAULT_TYPE_BUS_ERROR,
54
55 /**
56 * Occurs when page table walk failed, permission failed, writings on
57 * non-dirty page.
58 */
59 MM_FAULT_TYPE_GENERIC_MMU,
60
61 MM_FAULT_TYPE_GENERIC,
62 __PRIVATE_PAGE_INSERT,
63 };
64
65 enum rt_mm_hint_prefetch
66 {
67 MM_FAULT_HINT_PREFETCH_NONE,
68 MM_FAULT_HINT_PREFETCH_READY,
69 };
70
71 struct rt_mm_fault_res
72 {
73 void *vaddr;
74 rt_size_t size;
75 int status;
76
77 /* hint for prefetch strategy */
78 enum rt_mm_hint_prefetch hint;
79 };
80
81 struct rt_aspace_fault_msg
82 {
83 enum rt_mm_fault_op fault_op;
84 enum rt_mm_fault_type fault_type;
85 rt_size_t off;
86 void *fault_vaddr;
87
88 struct rt_mm_fault_res response;
89 };
90
91 struct rt_aspace_io_msg
92 {
93 /* offset in varea */
94 rt_size_t off;
95 /* fault address in target address space */
96 void *fault_vaddr;
97 /* read/write buffer in kernel space */
98 void *buffer_vaddr;
99
100 struct rt_mm_fault_res response;
101 };
102
rt_mm_fault_res_init(struct rt_mm_fault_res * res)103 rt_inline void rt_mm_fault_res_init(struct rt_mm_fault_res *res)
104 {
105 res->vaddr = RT_NULL;
106 res->size = 0;
107 res->hint = MM_FAULT_HINT_PREFETCH_NONE;
108 res->status = MM_FAULT_STATUS_UNRECOVERABLE;
109 }
110
rt_mm_io_msg_init(struct rt_aspace_io_msg * io,rt_size_t off,void * fault_vaddr,void * buffer_vaddr)111 rt_inline void rt_mm_io_msg_init(struct rt_aspace_io_msg *io, rt_size_t off, void *fault_vaddr, void *buffer_vaddr)
112 {
113 io->off = off;
114 io->fault_vaddr = fault_vaddr;
115 io->buffer_vaddr = buffer_vaddr;
116 rt_mm_fault_res_init(&io->response);
117 }
118
119 struct rt_aspace;
120 /* MMU base page fault handler, MM_FAULT_FIXABLE_TRUE/MM_FAULT_FIXABLE_FALSE will be returned */
121 int rt_aspace_fault_try_fix(struct rt_aspace *aspace, struct rt_aspace_fault_msg *msg);
122
123 #endif /* __MM_FAULT_H__ */
124