1 /* SPDX-License-Identifier: MIT */
2 /*
3 ****************************************************************************
4 * (C) 2006 - Cambridge University
5 * (C) 2021-2024 - EPAM Systems
6 ****************************************************************************
7 *
8 * File: gnttab.c
9 * Author: Steven Smith (sos22@cam.ac.uk)
10 * Changes: Grzegorz Milos (gm281@cam.ac.uk)
11 *
12 * Date: July 2006
13 *
14 * Environment: Xen Minimal OS
15 * Description: Simple grant tables implementation. About as stupid as it's
16 * possible to be and still work.
17 *
18 ****************************************************************************
19 */
20 #include <zephyr/arch/arm64/hypercall.h>
21 #include <zephyr/xen/generic.h>
22 #include <zephyr/xen/gnttab.h>
23 #include <zephyr/xen/public/grant_table.h>
24 #include <zephyr/xen/public/memory.h>
25 #include <zephyr/xen/public/xen.h>
26 #include <zephyr/sys/barrier.h>
27
28 #include <zephyr/init.h>
29 #include <zephyr/kernel.h>
30 #include <zephyr/logging/log.h>
31 #include <zephyr/sys/device_mmio.h>
32
33 LOG_MODULE_REGISTER(xen_gnttab);
34
35 /* Timeout for grant table ops retrying */
36 #define GOP_RETRY_DELAY 200
37
38 #define GNTTAB_GREF_USED (UINT32_MAX - 1)
39 #define GNTTAB_SIZE (CONFIG_NR_GRANT_FRAMES * XEN_PAGE_SIZE)
40 #define NR_GRANT_ENTRIES (GNTTAB_SIZE / sizeof(grant_entry_v1_t))
41
42 BUILD_ASSERT(GNTTAB_SIZE <= DT_REG_SIZE_BY_IDX(DT_INST(0, xen_xen), 0),
43 "Number of grant frames is bigger than grant table DT region!");
44 BUILD_ASSERT(GNTTAB_SIZE <= CONFIG_KERNEL_VM_SIZE);
45
46 static struct gnttab {
47 struct k_sem sem;
48 grant_entry_v1_t *table;
49 grant_ref_t gref_list[NR_GRANT_ENTRIES];
50 } gnttab;
51
get_free_entry(void)52 static grant_ref_t get_free_entry(void)
53 {
54 grant_ref_t gref;
55 unsigned int flags;
56
57 k_sem_take(&gnttab.sem, K_FOREVER);
58
59 flags = irq_lock();
60 gref = gnttab.gref_list[0];
61 __ASSERT((gref >= GNTTAB_NR_RESERVED_ENTRIES &&
62 gref < NR_GRANT_ENTRIES), "Invalid gref = %d", gref);
63 gnttab.gref_list[0] = gnttab.gref_list[gref];
64 gnttab.gref_list[gref] = GNTTAB_GREF_USED;
65 irq_unlock(flags);
66
67 return gref;
68 }
69
put_free_entry(grant_ref_t gref)70 static void put_free_entry(grant_ref_t gref)
71 {
72 unsigned int flags;
73
74 flags = irq_lock();
75 if (gnttab.gref_list[gref] != GNTTAB_GREF_USED) {
76 LOG_WRN("Trying to put already free gref = %u", gref);
77
78 return;
79 }
80
81 gnttab.gref_list[gref] = gnttab.gref_list[0];
82 gnttab.gref_list[0] = gref;
83
84 irq_unlock(flags);
85
86 k_sem_give(&gnttab.sem);
87 }
88
gnttab_grant_permit_access(grant_ref_t gref,domid_t domid,unsigned long gfn,bool readonly)89 static void gnttab_grant_permit_access(grant_ref_t gref, domid_t domid,
90 unsigned long gfn, bool readonly)
91 {
92 uint16_t flags = GTF_permit_access;
93
94 if (readonly) {
95 flags |= GTF_readonly;
96 }
97
98 gnttab.table[gref].frame = gfn;
99 gnttab.table[gref].domid = domid;
100 /* Need to be sure that gfn and domid will be set before flags */
101 barrier_dmem_fence_full();
102
103 gnttab.table[gref].flags = flags;
104 }
105
gnttab_grant_access(domid_t domid,unsigned long gfn,bool readonly)106 grant_ref_t gnttab_grant_access(domid_t domid, unsigned long gfn,
107 bool readonly)
108 {
109 grant_ref_t gref = get_free_entry();
110
111 gnttab_grant_permit_access(gref, domid, gfn, readonly);
112
113 return gref;
114 }
115
116 /* Reset flags to zero in order to stop using the grant */
gnttab_reset_flags(grant_ref_t gref)117 static int gnttab_reset_flags(grant_ref_t gref)
118 {
119 uint16_t flags, nflags;
120 uint16_t *pflags;
121
122 pflags = &gnttab.table[gref].flags;
123 nflags = *pflags;
124
125 do {
126 flags = nflags;
127 if (flags & (GTF_reading | GTF_writing)) {
128 LOG_WRN("gref = %u still in use! (0x%x)\n",
129 gref, flags);
130 return 1;
131 }
132 nflags = synch_cmpxchg(pflags, flags, 0);
133 } while (nflags != flags);
134
135 return 0;
136 }
137
gnttab_end_access(grant_ref_t gref)138 int gnttab_end_access(grant_ref_t gref)
139 {
140 int rc;
141
142 __ASSERT((gref >= GNTTAB_NR_RESERVED_ENTRIES &&
143 gref < NR_GRANT_ENTRIES), "Invalid gref = %d", gref);
144
145 rc = gnttab_reset_flags(gref);
146 if (!rc) {
147 return rc;
148 }
149
150 put_free_entry(gref);
151
152 return 0;
153 }
154
gnttab_alloc_and_grant(void ** map,bool readonly)155 int32_t gnttab_alloc_and_grant(void **map, bool readonly)
156 {
157 void *page;
158 unsigned long gfn;
159 grant_ref_t gref;
160
161 __ASSERT_NO_MSG(map != NULL);
162
163 page = k_aligned_alloc(XEN_PAGE_SIZE, XEN_PAGE_SIZE);
164 if (page == NULL) {
165 return -ENOMEM;
166 }
167
168 gfn = xen_virt_to_gfn(page);
169 gref = gnttab_grant_access(0, gfn, readonly);
170
171 *map = page;
172
173 return gref;
174 }
175
gop_eagain_retry(int cmd,struct gnttab_map_grant_ref * gref)176 static void gop_eagain_retry(int cmd, struct gnttab_map_grant_ref *gref)
177 {
178 unsigned int step = 10, delay = step;
179 int16_t *status = &gref->status;
180
181 do {
182 HYPERVISOR_grant_table_op(cmd, gref, 1);
183 if (*status == GNTST_eagain) {
184 k_sleep(K_MSEC(delay));
185 }
186
187 delay += step;
188 } while ((*status == GNTST_eagain) && (delay < GOP_RETRY_DELAY));
189
190 if (delay >= GOP_RETRY_DELAY) {
191 LOG_ERR("Failed to map grant, timeout reached\n");
192 *status = GNTST_bad_page;
193 }
194 }
195
gnttab_get_page(void)196 void *gnttab_get_page(void)
197 {
198 int ret;
199 void *page_addr;
200 struct xen_remove_from_physmap rfpm;
201
202 page_addr = k_aligned_alloc(XEN_PAGE_SIZE, XEN_PAGE_SIZE);
203 if (!page_addr) {
204 LOG_WRN("Failed to allocate memory for gnttab page!\n");
205 return NULL;
206 }
207
208 rfpm.domid = DOMID_SELF;
209 rfpm.gpfn = xen_virt_to_gfn(page_addr);
210
211 /*
212 * GNTTABOP_map_grant_ref will simply replace the entry in the P2M
213 * and not release any RAM that may have been associated with
214 * page_addr, so we release this memory before mapping.
215 */
216 ret = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &rfpm);
217 if (ret) {
218 LOG_WRN("Failed to remove gnttab page from physmap, ret = %d\n", ret);
219 return NULL;
220 }
221
222 return page_addr;
223 }
224
gnttab_put_page(void * page_addr)225 void gnttab_put_page(void *page_addr)
226 {
227 int ret, nr_extents = 1;
228 struct xen_memory_reservation reservation;
229 xen_pfn_t page = xen_virt_to_gfn(page_addr);
230
231 /*
232 * After unmapping there will be a 4Kb holes in address space
233 * at 'page_addr' positions. To keep it contiguous and be able
234 * to return such addresses to memory allocator we need to
235 * populate memory on unmapped positions here.
236 */
237 memset(&reservation, 0, sizeof(reservation));
238 reservation.domid = DOMID_SELF;
239 reservation.extent_order = 0;
240 reservation.nr_extents = nr_extents;
241 set_xen_guest_handle(reservation.extent_start, &page);
242
243 ret = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
244 if (ret != nr_extents) {
245 LOG_WRN("failed to populate physmap on gfn = 0x%llx, ret = %d\n",
246 page, ret);
247 return;
248 }
249
250 k_free(page_addr);
251 }
252
gnttab_map_refs(struct gnttab_map_grant_ref * map_ops,unsigned int count)253 int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, unsigned int count)
254 {
255 int i, ret;
256
257 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
258 if (ret) {
259 return ret;
260 }
261
262 for (i = 0; i < count; i++) {
263 switch (map_ops[i].status) {
264 case GNTST_no_device_space:
265 LOG_WRN("map_grant_ref failed, no device space for page #%d\n", i);
266 break;
267
268 case GNTST_eagain:
269 /* Operation not done; need to try again */
270 gop_eagain_retry(GNTTABOP_map_grant_ref, &map_ops[i]);
271 /* Need to re-check status for current page */
272 i--;
273
274 break;
275
276 default:
277 break;
278 }
279 }
280
281 return 0;
282 }
283
gnttab_unmap_refs(struct gnttab_unmap_grant_ref * unmap_ops,unsigned int count)284 int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, unsigned int count)
285 {
286 return HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
287 }
288
289
290 static const char * const gnttab_error_msgs[] = GNTTABOP_error_msgs;
291
gnttabop_error(int16_t status)292 const char *gnttabop_error(int16_t status)
293 {
294 status = -status;
295 if (status < 0 || (uint16_t) status >= ARRAY_SIZE(gnttab_error_msgs)) {
296 return "bad status";
297 } else {
298 return gnttab_error_msgs[status];
299 }
300 }
301
302 /* Picked from Linux implementation */
303 #define LEGACY_MAX_GNT_FRAMES_SUPPORTED 4
gnttab_get_max_frames(void)304 static unsigned long gnttab_get_max_frames(void)
305 {
306 int ret;
307 struct gnttab_query_size q = {
308 .dom = DOMID_SELF,
309 };
310
311 ret = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &q, 1);
312 if ((ret < 0) || (q.status != GNTST_okay)) {
313 return LEGACY_MAX_GNT_FRAMES_SUPPORTED;
314 }
315
316 return q.max_nr_frames;
317 }
318
gnttab_init(void)319 static int gnttab_init(void)
320 {
321 grant_ref_t gref;
322 struct xen_add_to_physmap xatp;
323 int rc = 0, i;
324 unsigned long xen_max_grant_frames;
325 uintptr_t gnttab_base = DT_REG_ADDR_BY_IDX(DT_INST(0, xen_xen), 0);
326 mm_reg_t gnttab_reg;
327
328 xen_max_grant_frames = gnttab_get_max_frames();
329 if (xen_max_grant_frames < CONFIG_NR_GRANT_FRAMES) {
330 LOG_ERR("Xen max_grant_frames is less than CONFIG_NR_GRANT_FRAMES!");
331 k_panic();
332 }
333
334 /* Will be taken/given during gnt_refs allocation/release */
335 k_sem_init(&gnttab.sem, NR_GRANT_ENTRIES - GNTTAB_NR_RESERVED_ENTRIES,
336 NR_GRANT_ENTRIES - GNTTAB_NR_RESERVED_ENTRIES);
337
338 /* Initialize O(1) allocator, gnttab.gref_list[0] always shows first free entry */
339 gnttab.gref_list[0] = GNTTAB_NR_RESERVED_ENTRIES;
340 gnttab.gref_list[NR_GRANT_ENTRIES - 1] = 0;
341 for (gref = GNTTAB_NR_RESERVED_ENTRIES; gref < NR_GRANT_ENTRIES - 1; gref++) {
342 gnttab.gref_list[gref] = gref + 1;
343 }
344
345 for (i = CONFIG_NR_GRANT_FRAMES - 1; i >= 0; i--) {
346 xatp.domid = DOMID_SELF;
347 xatp.size = 0;
348 xatp.space = XENMAPSPACE_grant_table;
349 xatp.idx = i;
350 xatp.gpfn = xen_virt_to_gfn(gnttab_base) + i;
351 rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
352 __ASSERT(!rc, "add_to_physmap failed; status = %d\n", rc);
353 }
354
355 /*
356 * Xen DT region reserved for grant table (first reg in hypervisor node)
357 * may be much bigger than CONFIG_NR_GRANT_FRAMES multiplied by page size.
358 * Thus, we need to map only part of region, that is limited by config.
359 * The size of this part is calculated in GNTTAB_SIZE macro and used as
360 * parameter for device_map()
361 */
362 device_map(&gnttab_reg, gnttab_base, GNTTAB_SIZE, K_MEM_CACHE_WB | K_MEM_PERM_RW);
363 gnttab.table = (grant_entry_v1_t *)gnttab_reg;
364
365 LOG_DBG("%s: grant table mapped\n", __func__);
366
367 return 0;
368 }
369
370 SYS_INIT(gnttab_init, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE);
371