1
2 #include <xen/init.h>
3 #include <xen/list.h>
4 #include <xen/perfc.h>
5 #include <xen/rcupdate.h>
6 #include <xen/sort.h>
7 #include <xen/spinlock.h>
8 #include <asm/uaccess.h>
9 #include <xen/domain_page.h>
10 #include <xen/virtual_region.h>
11 #include <xen/livepatch.h>
12
13 #define EX_FIELD(ptr, field) ((unsigned long)&(ptr)->field + (ptr)->field)
14
ex_addr(const struct exception_table_entry * x)15 static inline unsigned long ex_addr(const struct exception_table_entry *x)
16 {
17 return EX_FIELD(x, addr);
18 }
19
ex_cont(const struct exception_table_entry * x)20 static inline unsigned long ex_cont(const struct exception_table_entry *x)
21 {
22 return EX_FIELD(x, cont);
23 }
24
cmp_ex(const void * a,const void * b)25 static int init_or_livepatch cmp_ex(const void *a, const void *b)
26 {
27 const struct exception_table_entry *l = a, *r = b;
28 unsigned long lip = ex_addr(l);
29 unsigned long rip = ex_addr(r);
30
31 /* avoid overflow */
32 if (lip > rip)
33 return 1;
34 if (lip < rip)
35 return -1;
36 return 0;
37 }
38
39 #ifndef swap_ex
swap_ex(void * a,void * b,int size)40 static void init_or_livepatch swap_ex(void *a, void *b, int size)
41 {
42 struct exception_table_entry *l = a, *r = b, tmp;
43 long delta = b - a;
44
45 tmp = *l;
46 l->addr = r->addr + delta;
47 l->cont = r->cont + delta;
48 r->addr = tmp.addr - delta;
49 r->cont = tmp.cont - delta;
50 }
51 #endif
52
sort_exception_table(struct exception_table_entry * start,const struct exception_table_entry * stop)53 void init_or_livepatch sort_exception_table(struct exception_table_entry *start,
54 const struct exception_table_entry *stop)
55 {
56 sort(start, stop - start,
57 sizeof(struct exception_table_entry), cmp_ex, swap_ex);
58 }
59
sort_exception_tables(void)60 void __init sort_exception_tables(void)
61 {
62 sort_exception_table(__start___ex_table, __stop___ex_table);
63 sort_exception_table(__start___pre_ex_table, __stop___pre_ex_table);
64 }
65
66 static unsigned long
search_one_extable(const struct exception_table_entry * first,const struct exception_table_entry * last,unsigned long value)67 search_one_extable(const struct exception_table_entry *first,
68 const struct exception_table_entry *last,
69 unsigned long value)
70 {
71 const struct exception_table_entry *mid;
72 long diff;
73
74 while ( first <= last )
75 {
76 mid = (last - first) / 2 + first;
77 diff = ex_addr(mid) - value;
78 if (diff == 0)
79 return ex_cont(mid);
80 else if (diff < 0)
81 first = mid+1;
82 else
83 last = mid-1;
84 }
85 return 0;
86 }
87
88 unsigned long
search_exception_table(const struct cpu_user_regs * regs)89 search_exception_table(const struct cpu_user_regs *regs)
90 {
91 const struct virtual_region *region = find_text_region(regs->rip);
92 unsigned long stub = this_cpu(stubs.addr);
93
94 if ( region && region->ex )
95 return search_one_extable(region->ex, region->ex_end - 1, regs->rip);
96
97 if ( regs->rip >= stub + STUB_BUF_SIZE / 2 &&
98 regs->rip < stub + STUB_BUF_SIZE &&
99 regs->rsp > (unsigned long)regs &&
100 regs->rsp < (unsigned long)get_cpu_info() )
101 {
102 unsigned long retptr = *(unsigned long *)regs->rsp;
103
104 region = find_text_region(retptr);
105 retptr = region && region->ex
106 ? search_one_extable(region->ex, region->ex_end - 1, retptr)
107 : 0;
108 if ( retptr )
109 {
110 /*
111 * Put trap number and error code on the stack (in place of the
112 * original return address) for recovery code to pick up.
113 */
114 union stub_exception_token token = {
115 .fields.ec = regs->error_code,
116 .fields.trapnr = regs->entry_vector,
117 };
118
119 *(unsigned long *)regs->rsp = token.raw;
120 return retptr;
121 }
122 }
123
124 return 0;
125 }
126
127 #ifndef NDEBUG
stub_selftest(void)128 static int __init stub_selftest(void)
129 {
130 static const struct {
131 uint8_t opc[4];
132 uint64_t rax;
133 union stub_exception_token res;
134 } tests[] __initconst = {
135 { .opc = { 0x0f, 0xb9, 0xc3, 0xc3 }, /* ud1 */
136 .res.fields.trapnr = TRAP_invalid_op },
137 { .opc = { 0x90, 0x02, 0x00, 0xc3 }, /* nop; add (%rax),%al */
138 .rax = 0x0123456789abcdef,
139 .res.fields.trapnr = TRAP_gp_fault },
140 { .opc = { 0x02, 0x04, 0x04, 0xc3 }, /* add (%rsp,%rax),%al */
141 .rax = 0xfedcba9876543210,
142 .res.fields.trapnr = TRAP_stack_error },
143 { .opc = { 0xcc, 0xc3, 0xc3, 0xc3 }, /* int3 */
144 .res.fields.trapnr = TRAP_int3 },
145 };
146 unsigned long addr = this_cpu(stubs.addr) + STUB_BUF_SIZE / 2;
147 unsigned int i;
148
149 printk("Running stub recovery selftests...\n");
150
151 for ( i = 0; i < ARRAY_SIZE(tests); ++i )
152 {
153 uint8_t *ptr = map_domain_page(_mfn(this_cpu(stubs.mfn))) +
154 (addr & ~PAGE_MASK);
155 unsigned long res = ~0;
156
157 memset(ptr, 0xcc, STUB_BUF_SIZE / 2);
158 memcpy(ptr, tests[i].opc, ARRAY_SIZE(tests[i].opc));
159 unmap_domain_page(ptr);
160
161 asm volatile ( "call *%[stb]\n"
162 ".Lret%=:\n\t"
163 ".pushsection .fixup,\"ax\"\n"
164 ".Lfix%=:\n\t"
165 "pop %[exn]\n\t"
166 "jmp .Lret%=\n\t"
167 ".popsection\n\t"
168 _ASM_EXTABLE(.Lret%=, .Lfix%=)
169 : [exn] "+m" (res)
170 : [stb] "rm" (addr), "a" (tests[i].rax));
171 ASSERT(res == tests[i].res.raw);
172 }
173
174 return 0;
175 }
176 __initcall(stub_selftest);
177 #endif
178
179 unsigned long
search_pre_exception_table(struct cpu_user_regs * regs)180 search_pre_exception_table(struct cpu_user_regs *regs)
181 {
182 unsigned long addr = regs->rip;
183 unsigned long fixup = search_one_extable(
184 __start___pre_ex_table, __stop___pre_ex_table-1, addr);
185 if ( fixup )
186 {
187 dprintk(XENLOG_INFO, "Pre-exception: %p -> %p\n", _p(addr), _p(fixup));
188 perfc_incr(exception_fixed);
189 }
190 return fixup;
191 }
192