1 /*
2 * Generate definitions needed by assembly language modules.
3 * This code generates raw asm output which is post-processed
4 * to extract and format the required data.
5 */
6 #define COMPILE_OFFSETS
7
8 #include <xen/perfc.h>
9 #include <xen/sched.h>
10 #include <xen/bitops.h>
11 #include <compat/xen.h>
12 #include <asm/fixmap.h>
13 #include <asm/hardirq.h>
14 #include <xen/multiboot.h>
15 #include <xen/multiboot2.h>
16
17 #define DEFINE(_sym, _val) \
18 asm volatile ("\n.ascii\"==>#define " #_sym " %0 /* " #_val " */<==\"" \
19 : : "i" (_val) )
20 #define BLANK() \
21 asm volatile ( "\n.ascii\"==><==\"" : : )
22 #define OFFSET(_sym, _str, _mem) \
23 DEFINE(_sym, offsetof(_str, _mem));
24
__dummy__(void)25 void __dummy__(void)
26 {
27 OFFSET(UREGS_r15, struct cpu_user_regs, r15);
28 OFFSET(UREGS_r14, struct cpu_user_regs, r14);
29 OFFSET(UREGS_r13, struct cpu_user_regs, r13);
30 OFFSET(UREGS_r12, struct cpu_user_regs, r12);
31 OFFSET(UREGS_rbp, struct cpu_user_regs, rbp);
32 OFFSET(UREGS_rbx, struct cpu_user_regs, rbx);
33 OFFSET(UREGS_r11, struct cpu_user_regs, r11);
34 OFFSET(UREGS_r10, struct cpu_user_regs, r10);
35 OFFSET(UREGS_r9, struct cpu_user_regs, r9);
36 OFFSET(UREGS_r8, struct cpu_user_regs, r8);
37 OFFSET(UREGS_rax, struct cpu_user_regs, rax);
38 OFFSET(UREGS_rcx, struct cpu_user_regs, rcx);
39 OFFSET(UREGS_rdx, struct cpu_user_regs, rdx);
40 OFFSET(UREGS_rsi, struct cpu_user_regs, rsi);
41 OFFSET(UREGS_rdi, struct cpu_user_regs, rdi);
42 OFFSET(UREGS_error_code, struct cpu_user_regs, error_code);
43 OFFSET(UREGS_entry_vector, struct cpu_user_regs, entry_vector);
44 OFFSET(UREGS_saved_upcall_mask, struct cpu_user_regs, saved_upcall_mask);
45 OFFSET(UREGS_rip, struct cpu_user_regs, rip);
46 OFFSET(UREGS_cs, struct cpu_user_regs, cs);
47 OFFSET(UREGS_eflags, struct cpu_user_regs, rflags);
48 OFFSET(UREGS_rsp, struct cpu_user_regs, rsp);
49 OFFSET(UREGS_ss, struct cpu_user_regs, ss);
50 OFFSET(UREGS_ds, struct cpu_user_regs, ds);
51 OFFSET(UREGS_es, struct cpu_user_regs, es);
52 OFFSET(UREGS_fs, struct cpu_user_regs, fs);
53 OFFSET(UREGS_gs, struct cpu_user_regs, gs);
54 OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, es);
55 DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs));
56 BLANK();
57
58 OFFSET(irq_caps_offset, struct domain, irq_caps);
59 OFFSET(next_in_list_offset, struct domain, next_in_list);
60 OFFSET(VCPU_processor, struct vcpu, processor);
61 OFFSET(VCPU_domain, struct vcpu, domain);
62 OFFSET(VCPU_vcpu_info, struct vcpu, vcpu_info);
63 OFFSET(VCPU_trap_bounce, struct vcpu, arch.pv_vcpu.trap_bounce);
64 OFFSET(VCPU_int80_bounce, struct vcpu, arch.pv_vcpu.int80_bounce);
65 OFFSET(VCPU_thread_flags, struct vcpu, arch.flags);
66 OFFSET(VCPU_event_addr, struct vcpu, arch.pv_vcpu.event_callback_eip);
67 OFFSET(VCPU_event_sel, struct vcpu, arch.pv_vcpu.event_callback_cs);
68 OFFSET(VCPU_failsafe_addr, struct vcpu,
69 arch.pv_vcpu.failsafe_callback_eip);
70 OFFSET(VCPU_failsafe_sel, struct vcpu,
71 arch.pv_vcpu.failsafe_callback_cs);
72 OFFSET(VCPU_syscall_addr, struct vcpu,
73 arch.pv_vcpu.syscall_callback_eip);
74 OFFSET(VCPU_syscall32_addr, struct vcpu,
75 arch.pv_vcpu.syscall32_callback_eip);
76 OFFSET(VCPU_syscall32_sel, struct vcpu,
77 arch.pv_vcpu.syscall32_callback_cs);
78 OFFSET(VCPU_syscall32_disables_events, struct vcpu,
79 arch.pv_vcpu.syscall32_disables_events);
80 OFFSET(VCPU_sysenter_addr, struct vcpu,
81 arch.pv_vcpu.sysenter_callback_eip);
82 OFFSET(VCPU_sysenter_sel, struct vcpu,
83 arch.pv_vcpu.sysenter_callback_cs);
84 OFFSET(VCPU_sysenter_disables_events, struct vcpu,
85 arch.pv_vcpu.sysenter_disables_events);
86 OFFSET(VCPU_trap_ctxt, struct vcpu, arch.pv_vcpu.trap_ctxt);
87 OFFSET(VCPU_kernel_sp, struct vcpu, arch.pv_vcpu.kernel_sp);
88 OFFSET(VCPU_kernel_ss, struct vcpu, arch.pv_vcpu.kernel_ss);
89 OFFSET(VCPU_iopl, struct vcpu, arch.pv_vcpu.iopl);
90 OFFSET(VCPU_guest_context_flags, struct vcpu, arch.vgc_flags);
91 OFFSET(VCPU_nmi_pending, struct vcpu, nmi_pending);
92 OFFSET(VCPU_mce_pending, struct vcpu, mce_pending);
93 OFFSET(VCPU_nmi_old_mask, struct vcpu, nmi_state.old_mask);
94 OFFSET(VCPU_mce_old_mask, struct vcpu, mce_state.old_mask);
95 OFFSET(VCPU_async_exception_mask, struct vcpu, async_exception_mask);
96 DEFINE(VCPU_TRAP_NMI, VCPU_TRAP_NMI);
97 DEFINE(VCPU_TRAP_MCE, VCPU_TRAP_MCE);
98 DEFINE(_VGCF_failsafe_disables_events, _VGCF_failsafe_disables_events);
99 DEFINE(_VGCF_syscall_disables_events, _VGCF_syscall_disables_events);
100 BLANK();
101
102 OFFSET(VCPU_svm_vmcb_pa, struct vcpu, arch.hvm_svm.vmcb_pa);
103 OFFSET(VCPU_svm_vmcb, struct vcpu, arch.hvm_svm.vmcb);
104 OFFSET(VCPU_svm_vmcb_in_sync, struct vcpu, arch.hvm_svm.vmcb_in_sync);
105 BLANK();
106
107 OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched);
108 OFFSET(VCPU_vmx_realmode, struct vcpu, arch.hvm_vmx.vmx_realmode);
109 OFFSET(VCPU_vmx_emulate, struct vcpu, arch.hvm_vmx.vmx_emulate);
110 OFFSET(VCPU_vm86_seg_mask, struct vcpu, arch.hvm_vmx.vm86_segment_mask);
111 OFFSET(VCPU_hvm_guest_cr2, struct vcpu, arch.hvm_vcpu.guest_cr[2]);
112 BLANK();
113
114 OFFSET(VCPU_nhvm_guestmode, struct vcpu, arch.hvm_vcpu.nvcpu.nv_guestmode);
115 OFFSET(VCPU_nhvm_p2m, struct vcpu, arch.hvm_vcpu.nvcpu.nv_p2m);
116 OFFSET(VCPU_nsvm_hap_enabled, struct vcpu, arch.hvm_vcpu.nvcpu.u.nsvm.ns_hap_enabled);
117 BLANK();
118
119 OFFSET(DOMAIN_is_32bit_pv, struct domain, arch.is_32bit_pv);
120 BLANK();
121
122 OFFSET(VMCB_rax, struct vmcb_struct, rax);
123 OFFSET(VMCB_rip, struct vmcb_struct, rip);
124 OFFSET(VMCB_rsp, struct vmcb_struct, rsp);
125 OFFSET(VMCB_rflags, struct vmcb_struct, rflags);
126 BLANK();
127
128 OFFSET(VCPUINFO_upcall_pending, struct vcpu_info, evtchn_upcall_pending);
129 OFFSET(VCPUINFO_upcall_mask, struct vcpu_info, evtchn_upcall_mask);
130 BLANK();
131
132 OFFSET(COMPAT_VCPUINFO_upcall_pending, struct compat_vcpu_info, evtchn_upcall_pending);
133 OFFSET(COMPAT_VCPUINFO_upcall_mask, struct compat_vcpu_info, evtchn_upcall_mask);
134 BLANK();
135
136 OFFSET(CPUINFO_guest_cpu_user_regs, struct cpu_info, guest_cpu_user_regs);
137 OFFSET(CPUINFO_processor_id, struct cpu_info, processor_id);
138 OFFSET(CPUINFO_current_vcpu, struct cpu_info, current_vcpu);
139 OFFSET(CPUINFO_cr4, struct cpu_info, cr4);
140 DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info));
141 BLANK();
142
143 OFFSET(TRAPINFO_eip, struct trap_info, address);
144 OFFSET(TRAPINFO_cs, struct trap_info, cs);
145 OFFSET(TRAPINFO_flags, struct trap_info, flags);
146 DEFINE(TRAPINFO_sizeof, sizeof(struct trap_info));
147 BLANK();
148
149 OFFSET(TRAPBOUNCE_error_code, struct trap_bounce, error_code);
150 OFFSET(TRAPBOUNCE_flags, struct trap_bounce, flags);
151 OFFSET(TRAPBOUNCE_cs, struct trap_bounce, cs);
152 OFFSET(TRAPBOUNCE_eip, struct trap_bounce, eip);
153 BLANK();
154
155 #ifdef CONFIG_PERF_COUNTERS
156 DEFINE(ASM_PERFC_exceptions, PERFC_exceptions);
157 BLANK();
158 #endif
159
160 DEFINE(IRQSTAT_shift, LOG_2(sizeof(irq_cpustat_t)));
161 OFFSET(IRQSTAT_softirq_pending, irq_cpustat_t, __softirq_pending);
162 BLANK();
163
164 OFFSET(CPUINFO_features, struct cpuinfo_x86, x86_capability);
165 BLANK();
166
167 OFFSET(MB_flags, multiboot_info_t, flags);
168 OFFSET(MB_cmdline, multiboot_info_t, cmdline);
169 OFFSET(MB_mem_lower, multiboot_info_t, mem_lower);
170 BLANK();
171
172 DEFINE(MB2_fixed_sizeof, sizeof(multiboot2_fixed_t));
173 OFFSET(MB2_fixed_total_size, multiboot2_fixed_t, total_size);
174 OFFSET(MB2_tag_type, multiboot2_tag_t, type);
175 OFFSET(MB2_tag_size, multiboot2_tag_t, size);
176 OFFSET(MB2_load_base_addr, multiboot2_tag_load_base_addr_t, load_base_addr);
177 OFFSET(MB2_mem_lower, multiboot2_tag_basic_meminfo_t, mem_lower);
178 OFFSET(MB2_efi64_st, multiboot2_tag_efi64_t, pointer);
179 OFFSET(MB2_efi64_ih, multiboot2_tag_efi64_ih_t, pointer);
180 BLANK();
181
182 DEFINE(l2_identmap_sizeof, sizeof(l2_identmap));
183 BLANK();
184
185 OFFSET(DOMAIN_vm_assist, struct domain, vm_assist);
186 }
187