1 /******************************************************************************
2  * current.h
3  *
4  * Information structure that lives at the bottom of the per-cpu Xen stack.
5  */
6 
7 #ifndef __X86_CURRENT_H__
8 #define __X86_CURRENT_H__
9 
10 #include <xen/percpu.h>
11 #include <public/xen.h>
12 #include <asm/page.h>
13 
14 /*
15  * Xen's cpu stacks are 8 pages (8-page aligned), arranged as:
16  *
17  * 7 - Primary stack (with a struct cpu_info at the top)
18  * 6 - Primary stack
19  * 5 - Optionally not preset (MEMORY_GUARD)
20  * 4 - unused
21  * 3 - Syscall trampolines
22  * 2 - MCE IST stack
23  * 1 - NMI IST stack
24  * 0 - Double Fault IST stack
25  */
26 
27 /*
28  * Identify which stack page the stack pointer is on.  Returns an index
29  * as per the comment above.
30  */
get_stack_page(unsigned long sp)31 static inline unsigned int get_stack_page(unsigned long sp)
32 {
33     return (sp & (STACK_SIZE-1)) >> PAGE_SHIFT;
34 }
35 
36 struct vcpu;
37 
38 struct cpu_info {
39     struct cpu_user_regs guest_cpu_user_regs;
40     unsigned int processor_id;
41     struct vcpu *current_vcpu;
42     unsigned long per_cpu_offset;
43     unsigned long cr4;
44     /* get_stack_bottom() must be 16-byte aligned */
45 };
46 
get_cpu_info(void)47 static inline struct cpu_info *get_cpu_info(void)
48 {
49 #ifdef __clang__
50     /* Clang complains that sp in the else case is not initialised. */
51     unsigned long sp;
52     asm ( "mov %%rsp, %0" : "=r" (sp) );
53 #else
54     register unsigned long sp asm("rsp");
55 #endif
56 
57     return (struct cpu_info *)((sp | (STACK_SIZE - 1)) + 1) - 1;
58 }
59 
60 #define get_current()         (get_cpu_info()->current_vcpu)
61 #define set_current(vcpu)     (get_cpu_info()->current_vcpu = (vcpu))
62 #define current               (get_current())
63 
64 #define get_processor_id()    (get_cpu_info()->processor_id)
65 #define set_processor_id(id)  do {                                      \
66     struct cpu_info *ci__ = get_cpu_info();                             \
67     ci__->per_cpu_offset = __per_cpu_offset[ci__->processor_id = (id)]; \
68 } while (0)
69 
70 #define guest_cpu_user_regs() (&get_cpu_info()->guest_cpu_user_regs)
71 
72 /*
73  * Get the bottom-of-stack, as stored in the per-CPU TSS. This actually points
74  * into the middle of cpu_info.guest_cpu_user_regs, at the section that
75  * precisely corresponds to a CPU trap frame.
76  */
77 #define get_stack_bottom()                      \
78     ((unsigned long)&get_cpu_info()->guest_cpu_user_regs.es)
79 
80 /*
81  * Get the reasonable stack bounds for stack traces and stack dumps.  Stack
82  * dumps have a slightly larger range to include exception frames in the
83  * printed information.  The returned word is inside the interesting range.
84  */
85 unsigned long get_stack_trace_bottom(unsigned long sp);
86 unsigned long get_stack_dump_bottom (unsigned long sp);
87 
88 #ifdef CONFIG_LIVEPATCH
89 # define CHECK_FOR_LIVEPATCH_WORK "call check_for_livepatch_work;"
90 #else
91 # define CHECK_FOR_LIVEPATCH_WORK ""
92 #endif
93 
94 #define reset_stack_and_jump(__fn)                                      \
95     ({                                                                  \
96         __asm__ __volatile__ (                                          \
97             "mov %0,%%"__OP"sp;"                                        \
98             CHECK_FOR_LIVEPATCH_WORK                                      \
99              "jmp %c1"                                                  \
100             : : "r" (guest_cpu_user_regs()), "i" (__fn) : "memory" );   \
101         unreachable();                                                  \
102     })
103 
104 /*
105  * Which VCPU's state is currently running on each CPU?
106  * This is not necesasrily the same as 'current' as a CPU may be
107  * executing a lazy state switch.
108  */
109 DECLARE_PER_CPU(struct vcpu *, curr_vcpu);
110 
111 #endif /* __X86_CURRENT_H__ */
112