1 /*
2 * Copyright (c) 2016 Oracle and/or its affiliates. All rights reserved.
3 */
4
5 #include <xen/init.h>
6 #include <xen/kernel.h>
7 #include <xen/mm.h>
8 #include <xen/rcupdate.h>
9 #include <xen/sections.h>
10 #include <xen/spinlock.h>
11 #include <xen/virtual_region.h>
12
13 extern const struct bug_frame
14 __start_bug_frames_0[], __stop_bug_frames_0[],
15 __start_bug_frames_1[], __stop_bug_frames_1[],
16 __start_bug_frames_2[], __stop_bug_frames_2[],
17 __start_bug_frames_3[], __stop_bug_frames_3[];
18
19 /*
20 * For the built-in regions, the double linked list can be constructed at
21 * build time. Forward-declare the elements and their initialisers.
22 */
23 static struct list_head virtual_region_list;
24 static struct virtual_region core, core_init;
25
26 #define LIST_ENTRY_HEAD() { .next = &core.list, .prev = &core_init.list }
27 #define LIST_ENTRY_CORE() { .next = &core_init.list, .prev = &virtual_region_list }
28 #define LIST_ENTRY_INIT() { .next = &virtual_region_list, .prev = &core.list }
29
30 static struct virtual_region core __read_mostly = {
31 .list = LIST_ENTRY_CORE(),
32 .text_start = _stext,
33 .text_end = _etext,
34 .rodata_start = _srodata,
35 .rodata_end = _erodata,
36
37 .frame = {
38 { __start_bug_frames_0, __stop_bug_frames_0 },
39 { __start_bug_frames_1, __stop_bug_frames_1 },
40 { __start_bug_frames_2, __stop_bug_frames_2 },
41 { __start_bug_frames_3, __stop_bug_frames_3 },
42 },
43
44 #ifdef CONFIG_HAS_EX_TABLE
45 .ex = __start___ex_table,
46 .ex_end = __stop___ex_table,
47 #endif
48 };
49
50 /* Becomes irrelevant when __init sections are cleared. */
51 static struct virtual_region core_init __initdata = {
52 .list = LIST_ENTRY_INIT(),
53 .text_start = _sinittext,
54 .text_end = _einittext,
55
56 .frame = {
57 { __start_bug_frames_0, __stop_bug_frames_0 },
58 { __start_bug_frames_1, __stop_bug_frames_1 },
59 { __start_bug_frames_2, __stop_bug_frames_2 },
60 { __start_bug_frames_3, __stop_bug_frames_3 },
61 },
62
63 #ifdef CONFIG_HAS_EX_TABLE
64 .ex = __start___ex_table,
65 .ex_end = __stop___ex_table,
66 #endif
67 };
68
69 /*
70 * RCU locking. Modifications to the list must be done in exclusive mode, and
71 * hence need to hold the spinlock.
72 *
73 * All readers of virtual_region_list MUST use list_for_each_entry_rcu.
74 */
75 static struct list_head virtual_region_list = LIST_ENTRY_HEAD();
76 static DEFINE_SPINLOCK(virtual_region_lock);
77 static DEFINE_RCU_READ_LOCK(rcu_virtual_region_lock);
78
find_text_region(unsigned long addr)79 const struct virtual_region *find_text_region(unsigned long addr)
80 {
81 const struct virtual_region *iter, *region = NULL;
82
83 rcu_read_lock(&rcu_virtual_region_lock);
84 list_for_each_entry_rcu ( iter, &virtual_region_list, list )
85 {
86 if ( (void *)addr >= iter->text_start &&
87 (void *)addr < iter->text_end )
88 {
89 region = iter;
90 break;
91 }
92 }
93 rcu_read_unlock(&rcu_virtual_region_lock);
94
95 return region;
96 }
97
98 /*
99 * Suggest inline so when !CONFIG_LIVEPATCH the function is not left
100 * unreachable after init code is removed.
101 */
remove_virtual_region(struct virtual_region * r)102 static void inline remove_virtual_region(struct virtual_region *r)
103 {
104 unsigned long flags;
105
106 spin_lock_irqsave(&virtual_region_lock, flags);
107 list_del_rcu(&r->list);
108 spin_unlock_irqrestore(&virtual_region_lock, flags);
109 }
110
111 #ifdef CONFIG_LIVEPATCH
register_virtual_region(struct virtual_region * r)112 void register_virtual_region(struct virtual_region *r)
113 {
114 unsigned long flags;
115
116 spin_lock_irqsave(&virtual_region_lock, flags);
117 list_add_tail_rcu(&r->list, &virtual_region_list);
118 spin_unlock_irqrestore(&virtual_region_lock, flags);
119 }
120
unregister_virtual_region(struct virtual_region * r)121 void unregister_virtual_region(struct virtual_region *r)
122 {
123 remove_virtual_region(r);
124
125 /* Assert that no CPU might be using the removed region. */
126 rcu_barrier();
127 }
128
129 #ifdef CONFIG_X86
relax_virtual_region_perms(void)130 void relax_virtual_region_perms(void)
131 {
132 const struct virtual_region *region;
133
134 rcu_read_lock(&rcu_virtual_region_lock);
135 list_for_each_entry_rcu( region, &virtual_region_list, list )
136 {
137 modify_xen_mappings_lite((unsigned long)region->text_start,
138 (unsigned long)region->text_end,
139 PAGE_HYPERVISOR_RWX);
140 if ( region->rodata_start )
141 modify_xen_mappings_lite((unsigned long)region->rodata_start,
142 (unsigned long)region->rodata_end,
143 PAGE_HYPERVISOR_RW);
144 }
145 rcu_read_unlock(&rcu_virtual_region_lock);
146 }
147
tighten_virtual_region_perms(void)148 void tighten_virtual_region_perms(void)
149 {
150 const struct virtual_region *region;
151
152 rcu_read_lock(&rcu_virtual_region_lock);
153 list_for_each_entry_rcu( region, &virtual_region_list, list )
154 {
155 modify_xen_mappings_lite((unsigned long)region->text_start,
156 (unsigned long)region->text_end,
157 PAGE_HYPERVISOR_RX);
158 if ( region->rodata_start )
159 modify_xen_mappings_lite((unsigned long)region->rodata_start,
160 (unsigned long)region->rodata_end,
161 PAGE_HYPERVISOR_RO);
162 }
163 rcu_read_unlock(&rcu_virtual_region_lock);
164 }
165 #endif /* CONFIG_X86 */
166 #endif /* CONFIG_LIVEPATCH */
167
unregister_init_virtual_region(void)168 void __init unregister_init_virtual_region(void)
169 {
170 BUG_ON(system_state != SYS_STATE_active);
171
172 remove_virtual_region(&core_init);
173 }
174
175 /*
176 * Local variables:
177 * mode: C
178 * c-file-style: "BSD"
179 * c-basic-offset: 4
180 * tab-width: 4
181 * indent-tabs-mode: nil
182 * End:
183 */
184