1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 /*
4  * Xen memory reservation utilities.
5  *
6  * Copyright (c) 2003, B Dragovic
7  * Copyright (c) 2003-2004, M Williamson, K Fraser
8  * Copyright (c) 2005 Dan M. Smith, IBM Corporation
9  * Copyright (c) 2010 Daniel Kiper
10  * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
11  */
12 
13 #ifndef _XENMEM_RESERVATION_H
14 #define _XENMEM_RESERVATION_H
15 
16 #include <linux/highmem.h>
17 
18 #include <xen/page.h>
19 
20 extern bool xen_scrub_pages;
21 
xenmem_reservation_scrub_page(struct page * page)22 static inline void xenmem_reservation_scrub_page(struct page *page)
23 {
24 	if (xen_scrub_pages)
25 		clear_highpage(page);
26 }
27 
28 #ifdef CONFIG_XEN_HAVE_PVMMU
29 void __xenmem_reservation_va_mapping_update(unsigned long count,
30 					    struct page **pages,
31 					    xen_pfn_t *frames);
32 
33 void __xenmem_reservation_va_mapping_reset(unsigned long count,
34 					   struct page **pages);
35 #endif
36 
xenmem_reservation_va_mapping_update(unsigned long count,struct page ** pages,xen_pfn_t * frames)37 static inline void xenmem_reservation_va_mapping_update(unsigned long count,
38 							struct page **pages,
39 							xen_pfn_t *frames)
40 {
41 #ifdef CONFIG_XEN_HAVE_PVMMU
42 	if (!xen_feature(XENFEAT_auto_translated_physmap))
43 		__xenmem_reservation_va_mapping_update(count, pages, frames);
44 #endif
45 }
46 
xenmem_reservation_va_mapping_reset(unsigned long count,struct page ** pages)47 static inline void xenmem_reservation_va_mapping_reset(unsigned long count,
48 						       struct page **pages)
49 {
50 #ifdef CONFIG_XEN_HAVE_PVMMU
51 	if (!xen_feature(XENFEAT_auto_translated_physmap))
52 		__xenmem_reservation_va_mapping_reset(count, pages);
53 #endif
54 }
55 
56 int xenmem_reservation_increase(int count, xen_pfn_t *frames);
57 
58 int xenmem_reservation_decrease(int count, xen_pfn_t *frames);
59 
60 #endif
61