1 #ifndef _ARM_GUEST_ATOMICS_H
2 #define _ARM_GUEST_ATOMICS_H
3
4 #include <xen/bitops.h>
5 #include <xen/sched.h>
6
7 /*
8 * The guest atomics helpers shares the same logic. We first try to use
9 * the *_timeout version of the operation. If it didn't timeout, then we
10 * successfully updated the memory. Nothing else to do.
11 *
12 * If it did timeout, then it means we didn't manage to update the
13 * memory. This is possibly because the guest is misbehaving (i.e tight
14 * store loop) but can also happen for other reasons (i.e nested Xen).
15 * In that case pause the domain and retry the operation, this time
16 * without a timeout.
17 *
18 * Note, those helpers rely on other part of the code to prevent sharing
19 * a page between Xen and multiple domain.
20 */
21
22 DECLARE_PER_CPU(unsigned int, guest_safe_atomic_max);
23
24 #define guest_bitop(name) \
25 static inline void guest_##name(struct domain *d, int nr, volatile void *p) \
26 { \
27 perfc_incr(atomics_guest); \
28 \
29 if ( name##_timeout(nr, p, this_cpu(guest_safe_atomic_max)) ) \
30 return; \
31 \
32 perfc_incr(atomics_guest_paused); \
33 \
34 domain_pause_nosync(d); \
35 (name)(nr, p); \
36 domain_unpause(d); \
37 }
38
39 #define guest_testop(name) \
40 static inline int guest_##name(struct domain *d, int nr, volatile void *p) \
41 { \
42 bool succeed; \
43 int oldbit; \
44 \
45 perfc_incr(atomics_guest); \
46 \
47 succeed = name##_timeout(nr, p, &oldbit, \
48 this_cpu(guest_safe_atomic_max)); \
49 if ( succeed ) \
50 return oldbit; \
51 \
52 perfc_incr(atomics_guest_paused); \
53 \
54 domain_pause_nosync(d); \
55 oldbit = (name)(nr, p); \
56 domain_unpause(d); \
57 \
58 return oldbit; \
59 }
60
61 guest_bitop(set_bit)
guest_bitop(clear_bit)62 guest_bitop(clear_bit)
63 guest_bitop(change_bit)
64
65 #undef guest_bitop
66
67 /* test_bit does not use load-store atomic operations */
68 #define guest_test_bit(d, nr, p) ((void)(d), test_bit(nr, p))
69
70 guest_testop(test_and_set_bit)
71 guest_testop(test_and_clear_bit)
72 guest_testop(test_and_change_bit)
73
74 #undef guest_testop
75
76 static inline void guest_clear_mask16(struct domain *d, uint16_t mask,
77 volatile uint16_t *p)
78 {
79 perfc_incr(atomics_guest);
80
81 if ( clear_mask16_timeout(mask, p, this_cpu(guest_safe_atomic_max)) )
82 return;
83
84 domain_pause_nosync(d);
85 clear_mask16(mask, p);
86 domain_unpause(d);
87 }
88
__guest_cmpxchg(struct domain * d,volatile void * ptr,unsigned long old,unsigned long new,unsigned int size)89 static always_inline unsigned long __guest_cmpxchg(struct domain *d,
90 volatile void *ptr,
91 unsigned long old,
92 unsigned long new,
93 unsigned int size)
94 {
95 unsigned long oldval = old;
96
97 perfc_incr(atomics_guest);
98
99 if ( __cmpxchg_timeout(ptr, &oldval, new, size,
100 this_cpu(guest_safe_atomic_max)) )
101 return oldval;
102
103 perfc_incr(atomics_guest_paused);
104
105 domain_pause_nosync(d);
106 oldval = __cmpxchg(ptr, old, new, size);
107 domain_unpause(d);
108
109 return oldval;
110 }
111
112 #define guest_cmpxchg(d, ptr, o, n) \
113 ((__typeof__(*(ptr)))__guest_cmpxchg(d, ptr, \
114 (unsigned long)(o),\
115 (unsigned long)(n),\
116 sizeof (*(ptr))))
117
guest_cmpxchg64(struct domain * d,volatile uint64_t * ptr,uint64_t old,uint64_t new)118 static inline uint64_t guest_cmpxchg64(struct domain *d,
119 volatile uint64_t *ptr,
120 uint64_t old,
121 uint64_t new)
122 {
123 uint64_t oldval = old;
124
125 perfc_incr(atomics_guest);
126
127 if ( __cmpxchg64_timeout(ptr, &oldval, new,
128 this_cpu(guest_safe_atomic_max)) )
129 return oldval;
130
131 perfc_incr(atomics_guest_paused);
132
133 domain_pause_nosync(d);
134 oldval = cmpxchg64(ptr, old, new);
135 domain_unpause(d);
136
137 return oldval;
138 }
139
140 #endif /* _ARM_GUEST_ATOMICS_H */
141 /*
142 * Local variables:
143 * mode: C
144 * c-file-style: "BSD"
145 * c-basic-offset: 4
146 * indent-tabs-mode: nil
147 * End:
148 */
149