1 /******************************************************************************
2  * flushtlb.h
3  *
4  * TLB flushes are timestamped using a global virtual 'clock' which ticks
5  * on any TLB flush on any processor.
6  *
7  * Copyright (c) 2003-2004, K A Fraser
8  */
9 
10 #ifndef __FLUSHTLB_H__
11 #define __FLUSHTLB_H__
12 
13 #include <xen/mm.h>
14 #include <xen/percpu.h>
15 #include <xen/smp.h>
16 #include <xen/types.h>
17 
18 /* The current time as shown by the virtual TLB clock. */
19 extern u32 tlbflush_clock;
20 
21 /* Time at which each CPU's TLB was last flushed. */
22 DECLARE_PER_CPU(u32, tlbflush_time);
23 
24 #define tlbflush_current_time() tlbflush_clock
25 
page_set_tlbflush_timestamp(struct page_info * page)26 static inline void page_set_tlbflush_timestamp(struct page_info *page)
27 {
28     /*
29      * Prevent storing a stale time stamp, which could happen if an update
30      * to tlbflush_clock plus a subsequent flush IPI happen between the
31      * reading of tlbflush_clock and the writing of the struct page_info
32      * field.
33      */
34     ASSERT(local_irq_is_enabled());
35     local_irq_disable();
36     page->tlbflush_timestamp = tlbflush_current_time();
37     local_irq_enable();
38 }
39 
40 /*
41  * @cpu_stamp is the timestamp at last TLB flush for the CPU we are testing.
42  * @lastuse_stamp is a timestamp taken when the PFN we are testing was last
43  * used for a purpose that may have caused the CPU's TLB to become tainted.
44  */
NEED_FLUSH(u32 cpu_stamp,u32 lastuse_stamp)45 static inline int NEED_FLUSH(u32 cpu_stamp, u32 lastuse_stamp)
46 {
47     u32 curr_time = tlbflush_current_time();
48     /*
49      * Two cases:
50      *  1. During a wrap, the clock ticks over to 0 while CPUs catch up. For
51      *     safety during this period, we force a flush if @curr_time == 0.
52      *  2. Otherwise, we look to see if @cpu_stamp <= @lastuse_stamp.
53      *     To detect false positives because @cpu_stamp has wrapped, we
54      *     also check @curr_time. If less than @lastuse_stamp we definitely
55      *     wrapped, so there's no need for a flush (one is forced every wrap).
56      */
57     return ((curr_time == 0) ||
58             ((cpu_stamp <= lastuse_stamp) &&
59              (lastuse_stamp <= curr_time)));
60 }
61 
62 /*
63  * Filter the given set of CPUs, removing those that definitely flushed their
64  * TLB since @page_timestamp.
65  */
tlbflush_filter(cpumask_t * mask,uint32_t page_timestamp)66 static inline void tlbflush_filter(cpumask_t *mask, uint32_t page_timestamp)
67 {
68     unsigned int cpu;
69 
70     for_each_cpu ( cpu, mask )
71         if ( !NEED_FLUSH(per_cpu(tlbflush_time, cpu), page_timestamp) )
72             __cpumask_clear_cpu(cpu, mask);
73 }
74 
75 void new_tlbflush_clock_period(void);
76 
77 /* Read pagetable base. */
read_cr3(void)78 static inline unsigned long read_cr3(void)
79 {
80     unsigned long cr3;
81     __asm__ __volatile__ (
82         "mov %%cr3, %0" : "=r" (cr3) : );
83     return cr3;
84 }
85 
86 /* Write pagetable base and implicitly tick the tlbflush clock. */
87 void write_cr3(unsigned long cr3);
88 
89 /* flush_* flag fields: */
90  /*
91   * Area to flush: 2^flush_order pages. Default is flush entire address space.
92   * NB. Multi-page areas do not need to have been mapped with a superpage.
93   */
94 #define FLUSH_ORDER_MASK 0xff
95 #define FLUSH_ORDER(x)   ((x)+1)
96  /* Flush TLBs (or parts thereof) */
97 #define FLUSH_TLB        0x100
98  /* Flush TLBs (or parts thereof) including global mappings */
99 #define FLUSH_TLB_GLOBAL 0x200
100  /* Flush data caches */
101 #define FLUSH_CACHE      0x400
102  /* VA for the flush has a valid mapping */
103 #define FLUSH_VA_VALID   0x800
104 
105 /* Flush local TLBs/caches. */
106 unsigned int flush_area_local(const void *va, unsigned int flags);
107 #define flush_local(flags) flush_area_local(NULL, flags)
108 
109 /* Flush specified CPUs' TLBs/caches */
110 void flush_area_mask(const cpumask_t *, const void *va, unsigned int flags);
111 #define flush_mask(mask, flags) flush_area_mask(mask, NULL, flags)
112 
113 /* Flush all CPUs' TLBs/caches */
114 #define flush_area_all(va, flags) flush_area_mask(&cpu_online_map, va, flags)
115 #define flush_all(flags) flush_mask(&cpu_online_map, flags)
116 
117 /* Flush local TLBs */
118 #define flush_tlb_local()                       \
119     flush_local(FLUSH_TLB)
120 #define flush_tlb_one_local(v)                  \
121     flush_area_local((const void *)(v), FLUSH_TLB|FLUSH_ORDER(0))
122 
123 /* Flush specified CPUs' TLBs */
124 #define flush_tlb_mask(mask)                    \
125     flush_mask(mask, FLUSH_TLB)
126 #define flush_tlb_one_mask(mask,v)              \
127     flush_area_mask(mask, (const void *)(v), FLUSH_TLB|FLUSH_ORDER(0))
128 
129 /* Flush all CPUs' TLBs */
130 #define flush_tlb_all()                         \
131     flush_tlb_mask(&cpu_online_map)
132 #define flush_tlb_one_all(v)                    \
133     flush_tlb_one_mask(&cpu_online_map, v)
134 
flush_page_to_ram(unsigned long mfn,bool sync_icache)135 static inline void flush_page_to_ram(unsigned long mfn, bool sync_icache) {}
invalidate_dcache_va_range(const void * p,unsigned long size)136 static inline int invalidate_dcache_va_range(const void *p,
137                                              unsigned long size)
138 { return -EOPNOTSUPP; }
clean_and_invalidate_dcache_va_range(const void * p,unsigned long size)139 static inline int clean_and_invalidate_dcache_va_range(const void *p,
140                                                        unsigned long size)
141 {
142     unsigned int order = get_order_from_bytes(size);
143     /* sub-page granularity support needs to be added if necessary */
144     flush_area_local(p, FLUSH_CACHE|FLUSH_ORDER(order));
145     return 0;
146 }
clean_dcache_va_range(const void * p,unsigned long size)147 static inline int clean_dcache_va_range(const void *p, unsigned long size)
148 {
149     return clean_and_invalidate_dcache_va_range(p, size);
150 }
151 
152 #endif /* __FLUSHTLB_H__ */
153