1 /*
2 * Helpers to emulate co-processor and system registers
3 */
4 #ifndef __ASM_ARM_VREG__
5 #define __ASM_ARM_VREG__
6
7 typedef bool (*vreg_reg32_fn_t)(struct cpu_user_regs *regs, uint32_t *r,
8 bool read);
9 typedef bool (*vreg_reg64_fn_t)(struct cpu_user_regs *regs, uint64_t *r,
10 bool read);
11
vreg_emulate_cp32(struct cpu_user_regs * regs,union hsr hsr,vreg_reg32_fn_t fn)12 static inline bool vreg_emulate_cp32(struct cpu_user_regs *regs, union hsr hsr,
13 vreg_reg32_fn_t fn)
14 {
15 struct hsr_cp32 cp32 = hsr.cp32;
16 /*
17 * Initialize to zero to avoid leaking data if there is an
18 * implementation error in the emulation (such as not correctly
19 * setting r).
20 */
21 uint32_t r = 0;
22 bool ret;
23
24 if ( !cp32.read )
25 r = get_user_reg(regs, cp32.reg);
26
27 ret = fn(regs, &r, cp32.read);
28
29 if ( ret && cp32.read )
30 set_user_reg(regs, cp32.reg, r);
31
32 return ret;
33 }
34
vreg_emulate_cp64(struct cpu_user_regs * regs,union hsr hsr,vreg_reg64_fn_t fn)35 static inline bool vreg_emulate_cp64(struct cpu_user_regs *regs, union hsr hsr,
36 vreg_reg64_fn_t fn)
37 {
38 struct hsr_cp64 cp64 = hsr.cp64;
39 /*
40 * Initialize to zero to avoid leaking data if there is an
41 * implementation error in the emulation (such as not correctly
42 * setting x).
43 */
44 uint64_t x = 0;
45 bool ret;
46
47 if ( !cp64.read )
48 {
49 uint32_t r1 = get_user_reg(regs, cp64.reg1);
50 uint32_t r2 = get_user_reg(regs, cp64.reg2);
51
52 x = (uint64_t)r1 | ((uint64_t)r2 << 32);
53 }
54
55 ret = fn(regs, &x, cp64.read);
56
57 if ( ret && cp64.read )
58 {
59 set_user_reg(regs, cp64.reg1, x & 0xffffffff);
60 set_user_reg(regs, cp64.reg2, x >> 32);
61 }
62
63 return ret;
64 }
65
66 #ifdef CONFIG_ARM_64
vreg_emulate_sysreg32(struct cpu_user_regs * regs,union hsr hsr,vreg_reg32_fn_t fn)67 static inline bool vreg_emulate_sysreg32(struct cpu_user_regs *regs, union hsr hsr,
68 vreg_reg32_fn_t fn)
69 {
70 struct hsr_sysreg sysreg = hsr.sysreg;
71 uint32_t r = 0;
72 bool ret;
73
74 if ( !sysreg.read )
75 r = get_user_reg(regs, sysreg.reg);
76
77 ret = fn(regs, &r, sysreg.read);
78
79 if ( ret && sysreg.read )
80 set_user_reg(regs, sysreg.reg, r);
81
82 return ret;
83 }
84
vreg_emulate_sysreg64(struct cpu_user_regs * regs,union hsr hsr,vreg_reg64_fn_t fn)85 static inline bool vreg_emulate_sysreg64(struct cpu_user_regs *regs, union hsr hsr,
86 vreg_reg64_fn_t fn)
87 {
88 struct hsr_sysreg sysreg = hsr.sysreg;
89 /*
90 * Initialize to zero to avoid leaking data if there is an
91 * implementation error in the emulation (such as not correctly
92 * setting x).
93 */
94 uint64_t x = 0;
95 bool ret;
96
97 if ( !sysreg.read )
98 x = get_user_reg(regs, sysreg.reg);
99
100 ret = fn(regs, &x, sysreg.read);
101
102 if ( ret && sysreg.read )
103 set_user_reg(regs, sysreg.reg, x);
104
105 return ret;
106 }
107
108 #endif
109
110 #define VREG_REG_MASK(size) ((~0UL) >> (BITS_PER_LONG - ((1 << (size)) * 8)))
111
112 /*
113 * The check on the size supported by the register has to be done by
114 * the caller of vreg_regN_*.
115 *
116 * vreg_reg_* should never be called directly. Instead use the vreg_regN_*
117 * according to size of the emulated register
118 *
119 * Note that the alignment fault will always be taken in the guest
120 * (see B3.12.7 DDI0406.b).
121 */
vreg_reg_extract(unsigned long reg,unsigned int offset,enum dabt_size size)122 static inline register_t vreg_reg_extract(unsigned long reg,
123 unsigned int offset,
124 enum dabt_size size)
125 {
126 reg >>= 8 * offset;
127 reg &= VREG_REG_MASK(size);
128
129 return reg;
130 }
131
vreg_reg_update(unsigned long * reg,register_t val,unsigned int offset,enum dabt_size size)132 static inline void vreg_reg_update(unsigned long *reg, register_t val,
133 unsigned int offset,
134 enum dabt_size size)
135 {
136 unsigned long mask = VREG_REG_MASK(size);
137 int shift = offset * 8;
138
139 *reg &= ~(mask << shift);
140 *reg |= ((unsigned long)val & mask) << shift;
141 }
142
vreg_reg_setbits(unsigned long * reg,register_t bits,unsigned int offset,enum dabt_size size)143 static inline void vreg_reg_setbits(unsigned long *reg, register_t bits,
144 unsigned int offset,
145 enum dabt_size size)
146 {
147 unsigned long mask = VREG_REG_MASK(size);
148 int shift = offset * 8;
149
150 *reg |= ((unsigned long)bits & mask) << shift;
151 }
152
vreg_reg_clearbits(unsigned long * reg,register_t bits,unsigned int offset,enum dabt_size size)153 static inline void vreg_reg_clearbits(unsigned long *reg, register_t bits,
154 unsigned int offset,
155 enum dabt_size size)
156 {
157 unsigned long mask = VREG_REG_MASK(size);
158 int shift = offset * 8;
159
160 *reg &= ~(((unsigned long)bits & mask) << shift);
161 }
162
163 /* N-bit register helpers */
164 #define VREG_REG_HELPERS(sz, offmask) \
165 static inline register_t vreg_reg##sz##_extract(uint##sz##_t reg, \
166 const mmio_info_t *info)\
167 { \
168 return vreg_reg_extract(reg, info->gpa & offmask, \
169 info->dabt.size); \
170 } \
171 \
172 static inline void vreg_reg##sz##_update(uint##sz##_t *reg, \
173 register_t val, \
174 const mmio_info_t *info) \
175 { \
176 unsigned long tmp = *reg; \
177 \
178 vreg_reg_update(&tmp, val, info->gpa & offmask, \
179 info->dabt.size); \
180 \
181 *reg = tmp; \
182 } \
183 \
184 static inline void vreg_reg##sz##_setbits(uint##sz##_t *reg, \
185 register_t bits, \
186 const mmio_info_t *info) \
187 { \
188 unsigned long tmp = *reg; \
189 \
190 vreg_reg_setbits(&tmp, bits, info->gpa & offmask, \
191 info->dabt.size); \
192 \
193 *reg = tmp; \
194 } \
195 \
196 static inline void vreg_reg##sz##_clearbits(uint##sz##_t *reg, \
197 register_t bits, \
198 const mmio_info_t *info) \
199 { \
200 unsigned long tmp = *reg; \
201 \
202 vreg_reg_clearbits(&tmp, bits, info->gpa & offmask, \
203 info->dabt.size); \
204 \
205 *reg = tmp; \
206 }
207
208 /*
209 * 64 bits registers are only supported on platform with 64-bit long.
210 * This is also allow us to optimize the 32 bit case by using
211 * unsigned long rather than uint64_t
212 */
213 #if BITS_PER_LONG == 64
214 VREG_REG_HELPERS(64, 0x7);
215 #endif
216 VREG_REG_HELPERS(32, 0x3);
217
218 #undef VREG_REG_HELPERS
219
220 #endif /* __ASM_ARM_VREG__ */
221