1 /******************************************************************************
2 * include/xen/mm.h
3 *
4 * Definitions for memory pages, frame numbers, addresses, allocations, etc.
5 *
6 * Copyright (c) 2002-2006, K A Fraser <keir@xensource.com>
7 *
8 * +---------------------+
9 * Xen Memory Management
10 * +---------------------+
11 *
12 * Xen has to handle many different address spaces. It is important not to
13 * get these spaces mixed up. The following is a consistent terminology which
14 * should be adhered to.
15 *
16 * mfn: Machine Frame Number
17 * The values Xen puts into its own pagetables. This is the host physical
18 * memory address space with RAM, MMIO etc.
19 *
20 * gfn: Guest Frame Number
21 * The values a guest puts in its own pagetables. For an auto-translated
22 * guest (hardware assisted with 2nd stage translation, or shadowed), gfn !=
23 * mfn. For a non-translated guest which is aware of Xen, gfn == mfn.
24 *
25 * pfn: Pseudophysical Frame Number
26 * A linear idea of a guest physical address space. For an auto-translated
27 * guest, pfn == gfn while for a non-translated guest, pfn != gfn.
28 *
29 * WARNING: Some of these terms have changed over time while others have been
30 * used inconsistently, meaning that a lot of existing code does not match the
31 * definitions above. New code should use these terms as described here, and
32 * over time older code should be corrected to be consistent.
33 *
34 * An incomplete list of larger work area:
35 * - Phase out the use of 'pfn' from the x86 pagetable code. Callers should
36 * know explicitly whether they are talking about mfns or gfns.
37 * - Phase out the use of 'pfn' from the ARM mm code. A cursory glance
38 * suggests that 'mfn' and 'pfn' are currently used interchangeably, where
39 * 'mfn' is the appropriate term to use.
40 * - Phase out the use of gpfn/gmfn where pfn/mfn are meant. This excludes
41 * the x86 shadow code, which uses gmfn/smfn pairs with different,
42 * documented, meanings.
43 */
44
45 #ifndef __XEN_MM_H__
46 #define __XEN_MM_H__
47
48 #include <xen/compiler.h>
49 #include <xen/types.h>
50 #include <xen/list.h>
51 #include <xen/spinlock.h>
52 #include <xen/typesafe.h>
53 #include <xen/kernel.h>
54 #include <xen/perfc.h>
55 #include <public/memory.h>
56
57 TYPE_SAFE(unsigned long, mfn);
58 #define PRI_mfn "05lx"
59 #define INVALID_MFN _mfn(~0UL)
60 /*
61 * To be used for global variable initialization. This workaround a bug
62 * in GCC < 5.0.
63 */
64 #define INVALID_MFN_INITIALIZER { ~0UL }
65
66 #ifndef mfn_t
67 #define mfn_t /* Grep fodder: mfn_t, _mfn() and mfn_x() are defined above */
68 #define _mfn
69 #define mfn_x
70 #undef mfn_t
71 #undef _mfn
72 #undef mfn_x
73 #endif
74
mfn_add(mfn_t mfn,unsigned long i)75 static inline mfn_t mfn_add(mfn_t mfn, unsigned long i)
76 {
77 return _mfn(mfn_x(mfn) + i);
78 }
79
mfn_max(mfn_t x,mfn_t y)80 static inline mfn_t mfn_max(mfn_t x, mfn_t y)
81 {
82 return _mfn(max(mfn_x(x), mfn_x(y)));
83 }
84
mfn_min(mfn_t x,mfn_t y)85 static inline mfn_t mfn_min(mfn_t x, mfn_t y)
86 {
87 return _mfn(min(mfn_x(x), mfn_x(y)));
88 }
89
mfn_eq(mfn_t x,mfn_t y)90 static inline bool_t mfn_eq(mfn_t x, mfn_t y)
91 {
92 return mfn_x(x) == mfn_x(y);
93 }
94
95 TYPE_SAFE(unsigned long, gfn);
96 #define PRI_gfn "05lx"
97 #define INVALID_GFN _gfn(~0UL)
98 /*
99 * To be used for global variable initialization. This workaround a bug
100 * in GCC < 5.0 https://gcc.gnu.org/bugzilla/show_bug.cgi?id=64856
101 */
102 #define INVALID_GFN_INITIALIZER { ~0UL }
103
104 #ifndef gfn_t
105 #define gfn_t /* Grep fodder: gfn_t, _gfn() and gfn_x() are defined above */
106 #define _gfn
107 #define gfn_x
108 #undef gfn_t
109 #undef _gfn
110 #undef gfn_x
111 #endif
112
gfn_add(gfn_t gfn,unsigned long i)113 static inline gfn_t gfn_add(gfn_t gfn, unsigned long i)
114 {
115 return _gfn(gfn_x(gfn) + i);
116 }
117
gfn_max(gfn_t x,gfn_t y)118 static inline gfn_t gfn_max(gfn_t x, gfn_t y)
119 {
120 return _gfn(max(gfn_x(x), gfn_x(y)));
121 }
122
gfn_min(gfn_t x,gfn_t y)123 static inline gfn_t gfn_min(gfn_t x, gfn_t y)
124 {
125 return _gfn(min(gfn_x(x), gfn_x(y)));
126 }
127
gfn_eq(gfn_t x,gfn_t y)128 static inline bool_t gfn_eq(gfn_t x, gfn_t y)
129 {
130 return gfn_x(x) == gfn_x(y);
131 }
132
133 TYPE_SAFE(unsigned long, pfn);
134 #define PRI_pfn "05lx"
135 #define INVALID_PFN (~0UL)
136
137 #ifndef pfn_t
138 #define pfn_t /* Grep fodder: pfn_t, _pfn() and pfn_x() are defined above */
139 #define _pfn
140 #define pfn_x
141 #undef pfn_t
142 #undef _pfn
143 #undef pfn_x
144 #endif
145
146 struct page_info;
147
148 void put_page(struct page_info *);
149 int get_page(struct page_info *, struct domain *);
150 struct domain *__must_check page_get_owner_and_reference(struct page_info *);
151
152 /* Boot-time allocator. Turns into generic allocator after bootstrap. */
153 void init_boot_pages(paddr_t ps, paddr_t pe);
154 mfn_t alloc_boot_pages(unsigned long nr_pfns, unsigned long pfn_align);
155 void end_boot_allocator(void);
156
157 /* Xen suballocator. These functions are interrupt-safe. */
158 void init_xenheap_pages(paddr_t ps, paddr_t pe);
159 void xenheap_max_mfn(unsigned long mfn);
160 void *alloc_xenheap_pages(unsigned int order, unsigned int memflags);
161 void free_xenheap_pages(void *v, unsigned int order);
162 bool scrub_free_pages(void);
163 #define alloc_xenheap_page() (alloc_xenheap_pages(0,0))
164 #define free_xenheap_page(v) (free_xenheap_pages(v,0))
165 /* Map machine page range in Xen virtual address space. */
166 int map_pages_to_xen(
167 unsigned long virt,
168 unsigned long mfn,
169 unsigned long nr_mfns,
170 unsigned int flags);
171 /* Alter the permissions of a range of Xen virtual address space. */
172 int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int flags);
173 int destroy_xen_mappings(unsigned long v, unsigned long e);
174 /*
175 * Create only non-leaf page table entries for the
176 * page range in Xen virtual address space.
177 */
178 int populate_pt_range(unsigned long virt, unsigned long mfn,
179 unsigned long nr_mfns);
180 /* Claim handling */
181 unsigned long domain_adjust_tot_pages(struct domain *d, long pages);
182 int domain_set_outstanding_pages(struct domain *d, unsigned long pages);
183 void get_outstanding_claims(uint64_t *free_pages, uint64_t *outstanding_pages);
184
185 /* Domain suballocator. These functions are *not* interrupt-safe.*/
186 void init_domheap_pages(paddr_t ps, paddr_t pe);
187 struct page_info *alloc_domheap_pages(
188 struct domain *d, unsigned int order, unsigned int memflags);
189 void free_domheap_pages(struct page_info *pg, unsigned int order);
190 unsigned long avail_domheap_pages_region(
191 unsigned int node, unsigned int min_width, unsigned int max_width);
192 unsigned long avail_domheap_pages(void);
193 unsigned long avail_node_heap_pages(unsigned int);
194 #define alloc_domheap_page(d,f) (alloc_domheap_pages(d,0,f))
195 #define free_domheap_page(p) (free_domheap_pages(p,0))
196 unsigned int online_page(unsigned long mfn, uint32_t *status);
197 int offline_page(unsigned long mfn, int broken, uint32_t *status);
198 int query_page_offline(unsigned long mfn, uint32_t *status);
199 unsigned long total_free_pages(void);
200
201 void heap_init_late(void);
202
203 int assign_pages(
204 struct domain *d,
205 struct page_info *pg,
206 unsigned int order,
207 unsigned int memflags);
208
209 /* Dump info to serial console */
210 void arch_dump_shared_mem_info(void);
211
212 /*
213 * Extra fault info types which are used to further describe
214 * the source of an access violation.
215 */
216 typedef enum {
217 npfec_kind_unknown, /* must be first */
218 npfec_kind_in_gpt, /* violation in guest page table */
219 npfec_kind_with_gla /* violation with guest linear address */
220 } npfec_kind_t;
221
222 /*
223 * Nested page fault exception codes.
224 */
225 struct npfec {
226 unsigned int read_access:1;
227 unsigned int write_access:1;
228 unsigned int insn_fetch:1;
229 unsigned int present:1;
230 unsigned int gla_valid:1;
231 unsigned int kind:2; /* npfec_kind_t */
232 };
233
234 /* memflags: */
235 #define _MEMF_no_refcount 0
236 #define MEMF_no_refcount (1U<<_MEMF_no_refcount)
237 #define _MEMF_populate_on_demand 1
238 #define MEMF_populate_on_demand (1U<<_MEMF_populate_on_demand)
239 #define _MEMF_tmem 2
240 #define MEMF_tmem (1U<<_MEMF_tmem)
241 #define _MEMF_no_dma 3
242 #define MEMF_no_dma (1U<<_MEMF_no_dma)
243 #define _MEMF_exact_node 4
244 #define MEMF_exact_node (1U<<_MEMF_exact_node)
245 #define _MEMF_no_owner 5
246 #define MEMF_no_owner (1U<<_MEMF_no_owner)
247 #define _MEMF_no_tlbflush 6
248 #define MEMF_no_tlbflush (1U<<_MEMF_no_tlbflush)
249 #define _MEMF_no_icache_flush 7
250 #define MEMF_no_icache_flush (1U<<_MEMF_no_icache_flush)
251 #define _MEMF_no_scrub 8
252 #define MEMF_no_scrub (1U<<_MEMF_no_scrub)
253 #define _MEMF_node 16
254 #define MEMF_node_mask ((1U << (8 * sizeof(nodeid_t))) - 1)
255 #define MEMF_node(n) ((((n) + 1) & MEMF_node_mask) << _MEMF_node)
256 #define MEMF_get_node(f) ((((f) >> _MEMF_node) - 1) & MEMF_node_mask)
257 #define _MEMF_bits 24
258 #define MEMF_bits(n) ((n)<<_MEMF_bits)
259
260 #ifdef CONFIG_PAGEALLOC_MAX_ORDER
261 #define MAX_ORDER CONFIG_PAGEALLOC_MAX_ORDER
262 #else
263 #define MAX_ORDER 20 /* 2^20 contiguous pages */
264 #endif
265
266 #define page_list_entry list_head
267
268 #include <asm/mm.h>
269
270 #ifndef page_list_entry
271 struct page_list_head
272 {
273 struct page_info *next, *tail;
274 };
275 /* These must only have instances in struct page_info. */
276 # define page_list_entry
277
278 # define PAGE_LIST_NULL ((typeof(((struct page_info){}).list.next))~0)
279
280 # if !defined(pdx_to_page) && !defined(page_to_pdx)
281 # if defined(__page_to_mfn) || defined(__mfn_to_page)
282 # define page_to_pdx __page_to_mfn
283 # define pdx_to_page __mfn_to_page
284 # else
285 # define page_to_pdx page_to_mfn
286 # define pdx_to_page mfn_to_page
287 # endif
288 # endif
289
290 # define PAGE_LIST_HEAD_INIT(name) { NULL, NULL }
291 # define PAGE_LIST_HEAD(name) \
292 struct page_list_head name = PAGE_LIST_HEAD_INIT(name)
293 # define INIT_PAGE_LIST_HEAD(head) ((head)->tail = (head)->next = NULL)
294 # define INIT_PAGE_LIST_ENTRY(ent) ((ent)->prev = (ent)->next = PAGE_LIST_NULL)
295
296 static inline bool_t
page_list_empty(const struct page_list_head * head)297 page_list_empty(const struct page_list_head *head)
298 {
299 return !head->next;
300 }
301 static inline struct page_info *
page_list_first(const struct page_list_head * head)302 page_list_first(const struct page_list_head *head)
303 {
304 return head->next;
305 }
306 static inline struct page_info *
page_list_last(const struct page_list_head * head)307 page_list_last(const struct page_list_head *head)
308 {
309 return head->tail;
310 }
311 static inline struct page_info *
page_list_next(const struct page_info * page,const struct page_list_head * head)312 page_list_next(const struct page_info *page,
313 const struct page_list_head *head)
314 {
315 return page != head->tail ? pdx_to_page(page->list.next) : NULL;
316 }
317 static inline struct page_info *
page_list_prev(const struct page_info * page,const struct page_list_head * head)318 page_list_prev(const struct page_info *page,
319 const struct page_list_head *head)
320 {
321 return page != head->next ? pdx_to_page(page->list.prev) : NULL;
322 }
323 static inline void
page_list_add(struct page_info * page,struct page_list_head * head)324 page_list_add(struct page_info *page, struct page_list_head *head)
325 {
326 if ( head->next )
327 {
328 page->list.next = page_to_pdx(head->next);
329 head->next->list.prev = page_to_pdx(page);
330 }
331 else
332 {
333 head->tail = page;
334 page->list.next = PAGE_LIST_NULL;
335 }
336 page->list.prev = PAGE_LIST_NULL;
337 head->next = page;
338 }
339 static inline void
page_list_add_tail(struct page_info * page,struct page_list_head * head)340 page_list_add_tail(struct page_info *page, struct page_list_head *head)
341 {
342 page->list.next = PAGE_LIST_NULL;
343 if ( head->next )
344 {
345 page->list.prev = page_to_pdx(head->tail);
346 head->tail->list.next = page_to_pdx(page);
347 }
348 else
349 {
350 page->list.prev = PAGE_LIST_NULL;
351 head->next = page;
352 }
353 head->tail = page;
354 }
355 static inline bool_t
__page_list_del_head(struct page_info * page,struct page_list_head * head,struct page_info * next,struct page_info * prev)356 __page_list_del_head(struct page_info *page, struct page_list_head *head,
357 struct page_info *next, struct page_info *prev)
358 {
359 if ( head->next == page )
360 {
361 if ( head->tail != page )
362 {
363 next->list.prev = PAGE_LIST_NULL;
364 head->next = next;
365 }
366 else
367 head->tail = head->next = NULL;
368 return 1;
369 }
370
371 if ( head->tail == page )
372 {
373 prev->list.next = PAGE_LIST_NULL;
374 head->tail = prev;
375 return 1;
376 }
377
378 return 0;
379 }
380 static inline void
page_list_del(struct page_info * page,struct page_list_head * head)381 page_list_del(struct page_info *page, struct page_list_head *head)
382 {
383 struct page_info *next = pdx_to_page(page->list.next);
384 struct page_info *prev = pdx_to_page(page->list.prev);
385
386 if ( !__page_list_del_head(page, head, next, prev) )
387 {
388 next->list.prev = page->list.prev;
389 prev->list.next = page->list.next;
390 }
391 }
392 static inline void
page_list_del2(struct page_info * page,struct page_list_head * head1,struct page_list_head * head2)393 page_list_del2(struct page_info *page, struct page_list_head *head1,
394 struct page_list_head *head2)
395 {
396 struct page_info *next = pdx_to_page(page->list.next);
397 struct page_info *prev = pdx_to_page(page->list.prev);
398
399 if ( !__page_list_del_head(page, head1, next, prev) &&
400 !__page_list_del_head(page, head2, next, prev) )
401 {
402 next->list.prev = page->list.prev;
403 prev->list.next = page->list.next;
404 }
405 }
406 static inline struct page_info *
page_list_remove_head(struct page_list_head * head)407 page_list_remove_head(struct page_list_head *head)
408 {
409 struct page_info *page = head->next;
410
411 if ( page )
412 page_list_del(page, head);
413
414 return page;
415 }
416 static inline void
page_list_move(struct page_list_head * dst,struct page_list_head * src)417 page_list_move(struct page_list_head *dst, struct page_list_head *src)
418 {
419 if ( !page_list_empty(src) )
420 {
421 *dst = *src;
422 INIT_PAGE_LIST_HEAD(src);
423 }
424 }
425 static inline void
page_list_splice(struct page_list_head * list,struct page_list_head * head)426 page_list_splice(struct page_list_head *list, struct page_list_head *head)
427 {
428 struct page_info *first, *last, *at;
429
430 if ( page_list_empty(list) )
431 return;
432
433 if ( page_list_empty(head) )
434 {
435 head->next = list->next;
436 head->tail = list->tail;
437 return;
438 }
439
440 first = list->next;
441 last = list->tail;
442 at = head->next;
443
444 ASSERT(first->list.prev == PAGE_LIST_NULL);
445 ASSERT(first->list.prev == at->list.prev);
446 head->next = first;
447
448 last->list.next = page_to_pdx(at);
449 at->list.prev = page_to_pdx(last);
450 }
451
452 #define page_list_for_each(pos, head) \
453 for ( pos = (head)->next; pos; pos = page_list_next(pos, head) )
454 #define page_list_for_each_safe(pos, tmp, head) \
455 for ( pos = (head)->next; \
456 pos ? (tmp = page_list_next(pos, head), 1) : 0; \
457 pos = tmp )
458 #define page_list_for_each_safe_reverse(pos, tmp, head) \
459 for ( pos = (head)->tail; \
460 pos ? (tmp = page_list_prev(pos, head), 1) : 0; \
461 pos = tmp )
462 #else
463 # define page_list_head list_head
464 # define PAGE_LIST_HEAD_INIT LIST_HEAD_INIT
465 # define PAGE_LIST_HEAD LIST_HEAD
466 # define INIT_PAGE_LIST_HEAD INIT_LIST_HEAD
467 # define INIT_PAGE_LIST_ENTRY INIT_LIST_HEAD
468
469 static inline bool_t
page_list_empty(const struct page_list_head * head)470 page_list_empty(const struct page_list_head *head)
471 {
472 return !!list_empty(head);
473 }
474 static inline struct page_info *
page_list_first(const struct page_list_head * head)475 page_list_first(const struct page_list_head *head)
476 {
477 return list_first_entry(head, struct page_info, list);
478 }
479 static inline struct page_info *
page_list_last(const struct page_list_head * head)480 page_list_last(const struct page_list_head *head)
481 {
482 return list_last_entry(head, struct page_info, list);
483 }
484 static inline struct page_info *
page_list_next(const struct page_info * page,const struct page_list_head * head)485 page_list_next(const struct page_info *page,
486 const struct page_list_head *head)
487 {
488 return list_entry(page->list.next, struct page_info, list);
489 }
490 static inline struct page_info *
page_list_prev(const struct page_info * page,const struct page_list_head * head)491 page_list_prev(const struct page_info *page,
492 const struct page_list_head *head)
493 {
494 return list_entry(page->list.prev, struct page_info, list);
495 }
496 static inline void
page_list_add(struct page_info * page,struct page_list_head * head)497 page_list_add(struct page_info *page, struct page_list_head *head)
498 {
499 list_add(&page->list, head);
500 }
501 static inline void
page_list_add_tail(struct page_info * page,struct page_list_head * head)502 page_list_add_tail(struct page_info *page, struct page_list_head *head)
503 {
504 list_add_tail(&page->list, head);
505 }
506 static inline void
page_list_del(struct page_info * page,struct page_list_head * head)507 page_list_del(struct page_info *page, struct page_list_head *head)
508 {
509 list_del(&page->list);
510 }
511 static inline void
page_list_del2(struct page_info * page,struct page_list_head * head1,struct page_list_head * head2)512 page_list_del2(struct page_info *page, struct page_list_head *head1,
513 struct page_list_head *head2)
514 {
515 list_del(&page->list);
516 }
517 static inline struct page_info *
page_list_remove_head(struct page_list_head * head)518 page_list_remove_head(struct page_list_head *head)
519 {
520 struct page_info *pg;
521
522 if ( page_list_empty(head) )
523 return NULL;
524
525 pg = page_list_first(head);
526 list_del(&pg->list);
527 return pg;
528 }
529 static inline void
page_list_move(struct page_list_head * dst,struct page_list_head * src)530 page_list_move(struct page_list_head *dst, struct page_list_head *src)
531 {
532 if ( !list_empty(src) )
533 list_replace_init(src, dst);
534 }
535 static inline void
page_list_splice(struct page_list_head * list,struct page_list_head * head)536 page_list_splice(struct page_list_head *list, struct page_list_head *head)
537 {
538 list_splice(list, head);
539 }
540
541 # define page_list_for_each(pos, head) list_for_each_entry(pos, head, list)
542 # define page_list_for_each_safe(pos, tmp, head) \
543 list_for_each_entry_safe(pos, tmp, head, list)
544 # define page_list_for_each_safe_reverse(pos, tmp, head) \
545 list_for_each_entry_safe_reverse(pos, tmp, head, list)
546 #endif
547
get_order_from_bytes(paddr_t size)548 static inline unsigned int get_order_from_bytes(paddr_t size)
549 {
550 unsigned int order;
551
552 size = (size - 1) >> PAGE_SHIFT;
553 for ( order = 0; size; order++ )
554 size >>= 1;
555
556 return order;
557 }
558
get_order_from_pages(unsigned long nr_pages)559 static inline unsigned int get_order_from_pages(unsigned long nr_pages)
560 {
561 unsigned int order;
562
563 nr_pages--;
564 for ( order = 0; nr_pages; order++ )
565 nr_pages >>= 1;
566
567 return order;
568 }
569
570 void scrub_one_page(struct page_info *);
571
572 #ifndef arch_free_heap_page
573 #define arch_free_heap_page(d, pg) \
574 page_list_del(pg, is_xen_heap_page(pg) ? \
575 &(d)->xenpage_list : &(d)->page_list)
576 #endif
577
578 int xenmem_add_to_physmap_one(struct domain *d, unsigned int space,
579 union xen_add_to_physmap_batch_extra extra,
580 unsigned long idx, gfn_t gfn);
581
582 /* Return 0 on success, or negative on error. */
583 int __must_check guest_remove_page(struct domain *d, unsigned long gmfn);
584 int __must_check steal_page(struct domain *d, struct page_info *page,
585 unsigned int memflags);
586 int __must_check donate_page(struct domain *d, struct page_info *page,
587 unsigned int memflags);
588
589 #define RAM_TYPE_CONVENTIONAL 0x00000001
590 #define RAM_TYPE_RESERVED 0x00000002
591 #define RAM_TYPE_UNUSABLE 0x00000004
592 #define RAM_TYPE_ACPI 0x00000008
593 /* TRUE if the whole page at @mfn is of the requested RAM type(s) above. */
594 int page_is_ram_type(unsigned long mfn, unsigned long mem_type);
595
596 /* Prepare/destroy a ring for a dom0 helper. Helper with talk
597 * with Xen on behalf of this domain. */
598 int prepare_ring_for_helper(struct domain *d, unsigned long gmfn,
599 struct page_info **_page, void **_va);
600 void destroy_ring_for_helper(void **_va, struct page_info *page);
601
602 /* Return the upper bound of MFNs, including hotplug memory. */
603 unsigned long get_upper_mfn_bound(void);
604
605 #include <asm/flushtlb.h>
606
accumulate_tlbflush(bool * need_tlbflush,const struct page_info * page,uint32_t * tlbflush_timestamp)607 static inline void accumulate_tlbflush(bool *need_tlbflush,
608 const struct page_info *page,
609 uint32_t *tlbflush_timestamp)
610 {
611 if ( page->u.free.need_tlbflush &&
612 page->tlbflush_timestamp <= tlbflush_current_time() &&
613 (!*need_tlbflush ||
614 page->tlbflush_timestamp > *tlbflush_timestamp) )
615 {
616 *need_tlbflush = true;
617 *tlbflush_timestamp = page->tlbflush_timestamp;
618 }
619 }
620
filtered_flush_tlb_mask(uint32_t tlbflush_timestamp)621 static inline void filtered_flush_tlb_mask(uint32_t tlbflush_timestamp)
622 {
623 cpumask_t mask;
624
625 cpumask_copy(&mask, &cpu_online_map);
626 tlbflush_filter(&mask, tlbflush_timestamp);
627 if ( !cpumask_empty(&mask) )
628 {
629 perfc_incr(need_flush_tlb_flush);
630 flush_tlb_mask(&mask);
631 }
632 }
633
634 #endif /* __XEN_MM_H__ */
635