1 // SPDX-License-Identifier: MIT License
2 /*
3  * hypervisor.c
4  *
5  * Communication to/from hypervisor.
6  *
7  * Copyright (c) 2002-2003, K A Fraser
8  * Copyright (c) 2005, Grzegorz Milos, gm281@cam.ac.uk,Intel Research Cambridge
9  * Copyright (c) 2020, EPAM Systems Inc.
10  */
11 #include <common.h>
12 #include <cpu_func.h>
13 #include <log.h>
14 #include <memalign.h>
15 
16 #include <asm/io.h>
17 #include <asm/armv8/mmu.h>
18 #include <asm/xen/system.h>
19 
20 #include <linux/bug.h>
21 
22 #include <xen/hvm.h>
23 #include <xen/events.h>
24 #include <xen/gnttab.h>
25 #include <xen/xenbus.h>
26 #include <xen/interface/memory.h>
27 
28 #define active_evtchns(cpu, sh, idx)	\
29 	((sh)->evtchn_pending[idx] &	\
30 	 ~(sh)->evtchn_mask[idx])
31 
32 int in_callback;
33 
34 /*
35  * Shared page for communicating with the hypervisor.
36  * Events flags go here, for example.
37  */
38 struct shared_info *HYPERVISOR_shared_info;
39 
param_name(int op)40 static const char *param_name(int op)
41 {
42 #define PARAM(x)[HVM_PARAM_##x] = #x
43 	static const char *const names[] = {
44 		PARAM(CALLBACK_IRQ),
45 		PARAM(STORE_PFN),
46 		PARAM(STORE_EVTCHN),
47 		PARAM(PAE_ENABLED),
48 		PARAM(IOREQ_PFN),
49 		PARAM(VPT_ALIGN),
50 		PARAM(CONSOLE_PFN),
51 		PARAM(CONSOLE_EVTCHN),
52 	};
53 #undef PARAM
54 
55 	if (op >= ARRAY_SIZE(names))
56 		return "unknown";
57 
58 	if (!names[op])
59 		return "reserved";
60 
61 	return names[op];
62 }
63 
64 /**
65  * hvm_get_parameter_maintain_dcache - function to obtain a HVM
66  * parameter value.
67  * @idx: HVM parameter index
68  * @value: Value to fill in
69  *
70  * According to Xen on ARM ABI (xen/include/public/arch-arm.h):
71  * all memory which is shared with other entities in the system
72  * (including the hypervisor and other guests) must reside in memory
73  * which is mapped as Normal Inner Write-Back Outer Write-Back
74  * Inner-Shareable.
75  *
76  * Thus, page attributes must be equally set for all the entities
77  * working with that page.
78  *
79  * Before MMU setup the data cache is turned off, so it means that
80  * manual data cache maintenance is required, because of the
81  * difference of page attributes.
82  */
hvm_get_parameter_maintain_dcache(int idx,uint64_t * value)83 int hvm_get_parameter_maintain_dcache(int idx, uint64_t *value)
84 {
85 	struct xen_hvm_param xhv;
86 	int ret;
87 
88 	invalidate_dcache_range((unsigned long)&xhv,
89 				(unsigned long)&xhv + sizeof(xhv));
90 	xhv.domid = DOMID_SELF;
91 	xhv.index = idx;
92 	invalidate_dcache_range((unsigned long)&xhv,
93 				(unsigned long)&xhv + sizeof(xhv));
94 
95 	ret = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv);
96 	if (ret < 0) {
97 		pr_err("Cannot get hvm parameter %s (%d): %d!\n",
98 			   param_name(idx), idx, ret);
99 		BUG();
100 	}
101 	invalidate_dcache_range((unsigned long)&xhv,
102 				(unsigned long)&xhv + sizeof(xhv));
103 
104 	*value = xhv.value;
105 
106 	return ret;
107 }
108 
hvm_get_parameter(int idx,uint64_t * value)109 int hvm_get_parameter(int idx, uint64_t *value)
110 {
111 	struct xen_hvm_param xhv;
112 	int ret;
113 
114 	xhv.domid = DOMID_SELF;
115 	xhv.index = idx;
116 	ret = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv);
117 	if (ret < 0) {
118 		pr_err("Cannot get hvm parameter %s (%d): %d!\n",
119 			   param_name(idx), idx, ret);
120 		BUG();
121 	}
122 
123 	*value = xhv.value;
124 
125 	return ret;
126 }
127 
map_shared_info(void * p)128 struct shared_info *map_shared_info(void *p)
129 {
130 	struct xen_add_to_physmap xatp;
131 
132 	HYPERVISOR_shared_info = (struct shared_info *)memalign(PAGE_SIZE,
133 								PAGE_SIZE);
134 	if (!HYPERVISOR_shared_info)
135 		BUG();
136 
137 	xatp.domid = DOMID_SELF;
138 	xatp.idx = 0;
139 	xatp.space = XENMAPSPACE_shared_info;
140 	xatp.gpfn = virt_to_pfn(HYPERVISOR_shared_info);
141 	if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp) != 0)
142 		BUG();
143 
144 	return HYPERVISOR_shared_info;
145 }
146 
unmap_shared_info(void)147 void unmap_shared_info(void)
148 {
149 	xen_pfn_t shared_info_pfn = virt_to_pfn(HYPERVISOR_shared_info);
150 	struct xen_remove_from_physmap xrfp = {0};
151 	struct xen_memory_reservation reservation = {0};
152 	xen_ulong_t nr_exts = 1;
153 
154 	xrfp.domid = DOMID_SELF;
155 	xrfp.gpfn = shared_info_pfn;
156 	if (HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrfp) != 0)
157 		panic("Failed to unmap HYPERVISOR_shared_info\n");
158 
159 	/*
160 	 * After removing from physmap there will be a hole in address space on
161 	 * HYPERVISOR_shared_info address, so to free memory allocated with
162 	 * memalign and prevent exceptions during access to this page we need to
163 	 * fill this 4KB hole with XENMEM_populate_physmap before jumping to Linux.
164 	 */
165 	reservation.domid = DOMID_SELF;
166 	reservation.extent_order = 0;
167 	reservation.address_bits = 0;
168 	set_xen_guest_handle(reservation.extent_start, &shared_info_pfn);
169 	reservation.nr_extents = nr_exts;
170 	if (HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation) != nr_exts)
171 		panic("Failed to populate memory on HYPERVISOR_shared_info addr\n");
172 
173 	/* Now we can return this to memory allocator */
174 	free(HYPERVISOR_shared_info);
175 }
176 
do_hypervisor_callback(struct pt_regs * regs)177 void do_hypervisor_callback(struct pt_regs *regs)
178 {
179 	unsigned long l1, l2, l1i, l2i;
180 	unsigned int port;
181 	int cpu = 0;
182 	struct shared_info *s = HYPERVISOR_shared_info;
183 	struct vcpu_info *vcpu_info = &s->vcpu_info[cpu];
184 
185 	in_callback = 1;
186 
187 	vcpu_info->evtchn_upcall_pending = 0;
188 	l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
189 
190 	while (l1 != 0) {
191 		l1i = __ffs(l1);
192 		l1 &= ~(1UL << l1i);
193 
194 		while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
195 			l2i = __ffs(l2);
196 			l2 &= ~(1UL << l2i);
197 
198 			port = (l1i * (sizeof(unsigned long) * 8)) + l2i;
199 			do_event(port, regs);
200 		}
201 	}
202 
203 	in_callback = 0;
204 }
205 
force_evtchn_callback(void)206 void force_evtchn_callback(void)
207 {
208 #ifdef XEN_HAVE_PV_UPCALL_MASK
209 	int save;
210 #endif
211 	struct vcpu_info *vcpu;
212 
213 	vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];
214 #ifdef XEN_HAVE_PV_UPCALL_MASK
215 	save = vcpu->evtchn_upcall_mask;
216 #endif
217 
218 	while (vcpu->evtchn_upcall_pending) {
219 #ifdef XEN_HAVE_PV_UPCALL_MASK
220 		vcpu->evtchn_upcall_mask = 1;
221 #endif
222 		do_hypervisor_callback(NULL);
223 #ifdef XEN_HAVE_PV_UPCALL_MASK
224 		vcpu->evtchn_upcall_mask = save;
225 #endif
226 	};
227 }
228 
mask_evtchn(uint32_t port)229 void mask_evtchn(uint32_t port)
230 {
231 	struct shared_info *s = HYPERVISOR_shared_info;
232 
233 	synch_set_bit(port, &s->evtchn_mask[0]);
234 }
235 
unmask_evtchn(uint32_t port)236 void unmask_evtchn(uint32_t port)
237 {
238 	struct shared_info *s = HYPERVISOR_shared_info;
239 	struct vcpu_info *vcpu_info = &s->vcpu_info[smp_processor_id()];
240 
241 	synch_clear_bit(port, &s->evtchn_mask[0]);
242 
243 	/*
244 	 * Just like a real IO-APIC we 'lose the interrupt edge' if the
245 	 * channel is masked.
246 	 */
247 	if (synch_test_bit(port, &s->evtchn_pending[0]) &&
248 	    !synch_test_and_set_bit(port / (sizeof(unsigned long) * 8),
249 				    &vcpu_info->evtchn_pending_sel)) {
250 		vcpu_info->evtchn_upcall_pending = 1;
251 #ifdef XEN_HAVE_PV_UPCALL_MASK
252 		if (!vcpu_info->evtchn_upcall_mask)
253 #endif
254 			force_evtchn_callback();
255 	}
256 }
257 
clear_evtchn(uint32_t port)258 void clear_evtchn(uint32_t port)
259 {
260 	struct shared_info *s = HYPERVISOR_shared_info;
261 
262 	synch_clear_bit(port, &s->evtchn_pending[0]);
263 }
264 
xen_init(void)265 int xen_init(void)
266 {
267 	int el = current_el();
268 
269 	debug("%s\n", __func__);
270 
271 	if (el != 1) {
272 		puts("XEN:\tnot running from EL1\n");
273 		return 0;
274 	}
275 
276 	map_shared_info(NULL);
277 	init_events();
278 	init_xenbus();
279 	init_gnttab();
280 
281 	return 0;
282 }
283 
xen_fini(void)284 void xen_fini(void)
285 {
286 	debug("%s\n", __func__);
287 
288 	fini_gnttab();
289 	fini_xenbus();
290 	fini_events();
291 	unmap_shared_info();
292 }
293