1 #include <asm/page-bits.h>
2 
3 .macro clzero
4     .byte 0x0f, 0x01, 0xfc
5 .endm
6 
7 /*
8  * Call a noreturn function.  This could be JMP, but CALL results in a more
9  * helpful backtrace.  BUG is to catch functions which do decide to return...
10  */
11 .macro tailcall fn:req
12     call  \fn
13     BUG   /* Shouldn't return */
14 .endm
15 
16 .macro INDIRECT_CALL arg:req
17 /*
18  * Create an indirect call.  arg is a single register.
19  *
20  * With no compiler support, this degrades into a plain indirect call/jmp.
21  * With compiler support, dispatch to the correct __x86_indirect_thunk_*
22  */
23     .if CONFIG_INDIRECT_THUNK == 1
24 
25         $done = 0
26         .irp reg, ax, cx, dx, bx, bp, si, di, 8, 9, 10, 11, 12, 13, 14, 15
27         .ifeqs "\arg", "%r\reg"
28             call __x86_indirect_thunk_r\reg
29             $done = 1
30            .exitm
31         .endif
32         .endr
33 
34         .if $done != 1
35             .error "Bad register arg \arg"
36         .endif
37 
38     .else
39         call *\arg
40     .endif
41 .endm
42 
43 #ifdef CONFIG_RETURN_THUNK
44 # define RET jmp __x86_return_thunk
45 #else
46 # define RET ret
47 #endif
48 
49 #ifdef CONFIG_XEN_IBT
50 # define ENDBR64 endbr64
51 #else
52 # define ENDBR64
53 #endif
54 
55 .macro guest_access_mask_ptr ptr:req, scratch1:req, scratch2:req
56 #if defined(CONFIG_SPECULATIVE_HARDEN_GUEST_ACCESS)
57     /*
58      * Here we want to adjust \ptr such that
59      * - if it's within Xen range, it becomes non-canonical,
60      * - otherwise if it's (non-)canonical on input, it retains that property,
61      * - if the result is non-canonical, bit 47 is clear (to avoid
62      *   potentially populating the cache with Xen data on AMD-like hardware),
63      * but guaranteed without any conditional branches (hence in assembly).
64      *
65      * To achieve this we determine which bit to forcibly clear: Either bit 47
66      * (in case the address is below HYPERVISOR_VIRT_END) or bit 63.  Further
67      * we determine whether for forcably set bit 63: In case we first cleared
68      * it, we'll merely restore the original address.  In case we ended up
69      * clearing bit 47 (i.e. the address was either non-canonical or within Xen
70      * range), setting the bit will yield a guaranteed non-canonical address.
71      * If we didn't clear a bit, we also won't set one: The address was in the
72      * low half of address space in that case with bit 47 already clear.  The
73      * address can thus be left unchanged, whether canonical or not.
74      */
75     mov $(HYPERVISOR_VIRT_END - 1), \scratch1
76     mov $(VADDR_BITS - 1), \scratch2
77     cmp \ptr, \scratch1
78     /*
79      * Not needed: The value we have in \scratch1 will be truncated to 6 bits,
80      * thus yielding the value we need.
81     mov $63, \scratch1
82      */
83     cmovnb \scratch2, \scratch1
84     xor \scratch2, \scratch2
85     btr \scratch1, \ptr
86     rcr $1, \scratch2
87     or \scratch2, \ptr
88 #elif defined(CONFIG_DEBUG) && defined(CONFIG_PV)
89     xor $~\@, \scratch1
90     xor $~\@, \scratch2
91 #endif
92 .endm
93