1 /*
2  * xen/arch/arm/vtimer.c
3  *
4  * ARM Virtual Timer emulation support
5  *
6  * Ian Campbell <ian.campbell@citrix.com>
7  * Copyright (c) 2011 Citrix Systems.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  */
19 
20 #include <xen/lib.h>
21 #include <xen/perfc.h>
22 #include <xen/sched.h>
23 #include <xen/timer.h>
24 
25 #include <asm/cpregs.h>
26 #include <asm/div64.h>
27 #include <asm/gic.h>
28 #include <asm/irq.h>
29 #include <asm/regs.h>
30 #include <asm/time.h>
31 #include <asm/vgic.h>
32 #include <asm/vreg.h>
33 #include <asm/regs.h>
34 
35 /*
36  * Check if regs is allowed access, user_gate is tail end of a
37  * CNTKCTL_EL1_ bit name which gates user access
38  */
39 #define ACCESS_ALLOWED(regs, user_gate) \
40     ( !psr_mode_is_user(regs) || \
41       (READ_SYSREG(CNTKCTL_EL1) & CNTKCTL_EL1_##user_gate) )
42 
phys_timer_expired(void * data)43 static void phys_timer_expired(void *data)
44 {
45     struct vtimer *t = data;
46     t->ctl |= CNTx_CTL_PENDING;
47     if ( !(t->ctl & CNTx_CTL_MASK) )
48     {
49         perfc_incr(vtimer_phys_inject);
50         vgic_vcpu_inject_irq(t->v, t->irq);
51     }
52     else
53         perfc_incr(vtimer_phys_masked);
54 }
55 
virt_timer_expired(void * data)56 static void virt_timer_expired(void *data)
57 {
58     struct vtimer *t = data;
59     t->ctl |= CNTx_CTL_MASK;
60     vgic_vcpu_inject_irq(t->v, t->irq);
61     perfc_incr(vtimer_virt_inject);
62 }
63 
domain_vtimer_init(struct domain * d,struct xen_arch_domainconfig * config)64 int domain_vtimer_init(struct domain *d, struct xen_arch_domainconfig *config)
65 {
66     d->arch.phys_timer_base.offset = NOW();
67     d->arch.virt_timer_base.offset = READ_SYSREG64(CNTPCT_EL0);
68     d->time_offset_seconds = ticks_to_ns(d->arch.virt_timer_base.offset - boot_count);
69     do_div(d->time_offset_seconds, 1000000000);
70 
71     config->clock_frequency = timer_dt_clock_frequency;
72 
73     /* At this stage vgic_reserve_virq can't fail */
74     if ( is_hardware_domain(d) )
75     {
76         if ( !vgic_reserve_virq(d, timer_get_irq(TIMER_PHYS_SECURE_PPI)) )
77             BUG();
78 
79         if ( !vgic_reserve_virq(d, timer_get_irq(TIMER_PHYS_NONSECURE_PPI)) )
80             BUG();
81 
82         if ( !vgic_reserve_virq(d, timer_get_irq(TIMER_VIRT_PPI)) )
83             BUG();
84     }
85     else
86     {
87         if ( !vgic_reserve_virq(d, GUEST_TIMER_PHYS_S_PPI) )
88             BUG();
89 
90         if ( !vgic_reserve_virq(d, GUEST_TIMER_PHYS_NS_PPI) )
91             BUG();
92 
93         if ( !vgic_reserve_virq(d, GUEST_TIMER_VIRT_PPI) )
94             BUG();
95     }
96 
97     return 0;
98 }
99 
vcpu_vtimer_init(struct vcpu * v)100 int vcpu_vtimer_init(struct vcpu *v)
101 {
102     struct vtimer *t = &v->arch.phys_timer;
103     bool d0 = is_hardware_domain(v->domain);
104 
105     /*
106      * Hardware domain uses the hardware interrupts, guests get the virtual
107      * platform.
108      */
109 
110     init_timer(&t->timer, phys_timer_expired, t, v->processor);
111     t->ctl = 0;
112     t->cval = NOW();
113     t->irq = d0
114         ? timer_get_irq(TIMER_PHYS_NONSECURE_PPI)
115         : GUEST_TIMER_PHYS_NS_PPI;
116     t->v = v;
117 
118     t = &v->arch.virt_timer;
119     init_timer(&t->timer, virt_timer_expired, t, v->processor);
120     t->ctl = 0;
121     t->irq = d0
122         ? timer_get_irq(TIMER_VIRT_PPI)
123         : GUEST_TIMER_VIRT_PPI;
124     t->v = v;
125 
126     v->arch.vtimer_initialized = 1;
127 
128     return 0;
129 }
130 
vcpu_timer_destroy(struct vcpu * v)131 void vcpu_timer_destroy(struct vcpu *v)
132 {
133     if ( !v->arch.vtimer_initialized )
134         return;
135 
136     kill_timer(&v->arch.virt_timer.timer);
137     kill_timer(&v->arch.phys_timer.timer);
138 }
139 
virt_timer_save(struct vcpu * v)140 int virt_timer_save(struct vcpu *v)
141 {
142     ASSERT(!is_idle_vcpu(v));
143 
144     v->arch.virt_timer.ctl = READ_SYSREG32(CNTV_CTL_EL0);
145     WRITE_SYSREG32(v->arch.virt_timer.ctl & ~CNTx_CTL_ENABLE, CNTV_CTL_EL0);
146     v->arch.virt_timer.cval = READ_SYSREG64(CNTV_CVAL_EL0);
147     if ( (v->arch.virt_timer.ctl & CNTx_CTL_ENABLE) &&
148          !(v->arch.virt_timer.ctl & CNTx_CTL_MASK))
149     {
150         set_timer(&v->arch.virt_timer.timer, ticks_to_ns(v->arch.virt_timer.cval +
151                   v->domain->arch.virt_timer_base.offset - boot_count));
152     }
153     return 0;
154 }
155 
virt_timer_restore(struct vcpu * v)156 int virt_timer_restore(struct vcpu *v)
157 {
158     ASSERT(!is_idle_vcpu(v));
159 
160     stop_timer(&v->arch.virt_timer.timer);
161     migrate_timer(&v->arch.virt_timer.timer, v->processor);
162     migrate_timer(&v->arch.phys_timer.timer, v->processor);
163 
164     WRITE_SYSREG64(v->domain->arch.virt_timer_base.offset, CNTVOFF_EL2);
165     WRITE_SYSREG64(v->arch.virt_timer.cval, CNTV_CVAL_EL0);
166     WRITE_SYSREG32(v->arch.virt_timer.ctl, CNTV_CTL_EL0);
167     return 0;
168 }
169 
vtimer_cntp_ctl(struct cpu_user_regs * regs,uint32_t * r,bool read)170 static bool vtimer_cntp_ctl(struct cpu_user_regs *regs, uint32_t *r, bool read)
171 {
172     struct vcpu *v = current;
173 
174     if ( !ACCESS_ALLOWED(regs, EL0PTEN) )
175         return false;
176 
177     if ( read )
178     {
179         *r = v->arch.phys_timer.ctl;
180     }
181     else
182     {
183         uint32_t ctl = *r & ~CNTx_CTL_PENDING;
184         if ( ctl & CNTx_CTL_ENABLE )
185             ctl |= v->arch.phys_timer.ctl & CNTx_CTL_PENDING;
186         v->arch.phys_timer.ctl = ctl;
187 
188         if ( v->arch.phys_timer.ctl & CNTx_CTL_ENABLE )
189         {
190             set_timer(&v->arch.phys_timer.timer,
191                       v->arch.phys_timer.cval + v->domain->arch.phys_timer_base.offset);
192         }
193         else
194             stop_timer(&v->arch.phys_timer.timer);
195     }
196     return true;
197 }
198 
vtimer_cntp_tval(struct cpu_user_regs * regs,uint32_t * r,bool read)199 static bool vtimer_cntp_tval(struct cpu_user_regs *regs, uint32_t *r,
200                              bool read)
201 {
202     struct vcpu *v = current;
203     s_time_t now;
204 
205     if ( !ACCESS_ALLOWED(regs, EL0PTEN) )
206         return false;
207 
208     now = NOW() - v->domain->arch.phys_timer_base.offset;
209 
210     if ( read )
211     {
212         *r = (uint32_t)(ns_to_ticks(v->arch.phys_timer.cval - now) & 0xffffffffull);
213     }
214     else
215     {
216         v->arch.phys_timer.cval = now + ticks_to_ns(*r);
217         if ( v->arch.phys_timer.ctl & CNTx_CTL_ENABLE )
218         {
219             v->arch.phys_timer.ctl &= ~CNTx_CTL_PENDING;
220             set_timer(&v->arch.phys_timer.timer,
221                       v->arch.phys_timer.cval +
222                       v->domain->arch.phys_timer_base.offset);
223         }
224     }
225     return true;
226 }
227 
vtimer_cntp_cval(struct cpu_user_regs * regs,uint64_t * r,bool read)228 static bool vtimer_cntp_cval(struct cpu_user_regs *regs, uint64_t *r,
229                              bool read)
230 {
231     struct vcpu *v = current;
232 
233     if ( !ACCESS_ALLOWED(regs, EL0PTEN) )
234         return false;
235 
236     if ( read )
237     {
238         *r = ns_to_ticks(v->arch.phys_timer.cval);
239     }
240     else
241     {
242         v->arch.phys_timer.cval = ticks_to_ns(*r);
243         if ( v->arch.phys_timer.ctl & CNTx_CTL_ENABLE )
244         {
245             v->arch.phys_timer.ctl &= ~CNTx_CTL_PENDING;
246             set_timer(&v->arch.phys_timer.timer,
247                       v->arch.phys_timer.cval +
248                       v->domain->arch.phys_timer_base.offset);
249         }
250     }
251     return true;
252 }
253 
vtimer_emulate_cp32(struct cpu_user_regs * regs,union hsr hsr)254 static bool vtimer_emulate_cp32(struct cpu_user_regs *regs, union hsr hsr)
255 {
256     struct hsr_cp32 cp32 = hsr.cp32;
257 
258     if ( cp32.read )
259         perfc_incr(vtimer_cp32_reads);
260     else
261         perfc_incr(vtimer_cp32_writes);
262 
263     switch ( hsr.bits & HSR_CP32_REGS_MASK )
264     {
265     case HSR_CPREG32(CNTP_CTL):
266         return vreg_emulate_cp32(regs, hsr, vtimer_cntp_ctl);
267 
268     case HSR_CPREG32(CNTP_TVAL):
269         return vreg_emulate_cp32(regs, hsr, vtimer_cntp_tval);
270 
271     default:
272         return false;
273     }
274 }
275 
vtimer_emulate_cp64(struct cpu_user_regs * regs,union hsr hsr)276 static bool vtimer_emulate_cp64(struct cpu_user_regs *regs, union hsr hsr)
277 {
278     struct hsr_cp64 cp64 = hsr.cp64;
279 
280     if ( cp64.read )
281         perfc_incr(vtimer_cp64_reads);
282     else
283         perfc_incr(vtimer_cp64_writes);
284 
285     switch ( hsr.bits & HSR_CP64_REGS_MASK )
286     {
287     case HSR_CPREG64(CNTP_CVAL):
288         return vreg_emulate_cp64(regs, hsr, vtimer_cntp_cval);
289 
290     default:
291         return false;
292     }
293 }
294 
295 #ifdef CONFIG_ARM_64
vtimer_emulate_sysreg(struct cpu_user_regs * regs,union hsr hsr)296 static bool vtimer_emulate_sysreg(struct cpu_user_regs *regs, union hsr hsr)
297 {
298     struct hsr_sysreg sysreg = hsr.sysreg;
299 
300     if ( sysreg.read )
301         perfc_incr(vtimer_sysreg_reads);
302     else
303         perfc_incr(vtimer_sysreg_writes);
304 
305     switch ( hsr.bits & HSR_SYSREG_REGS_MASK )
306     {
307     case HSR_SYSREG_CNTP_CTL_EL0:
308         return vreg_emulate_sysreg32(regs, hsr, vtimer_cntp_ctl);
309     case HSR_SYSREG_CNTP_TVAL_EL0:
310         return vreg_emulate_sysreg32(regs, hsr, vtimer_cntp_tval);
311     case HSR_SYSREG_CNTP_CVAL_EL0:
312         return vreg_emulate_sysreg64(regs, hsr, vtimer_cntp_cval);
313 
314     default:
315         return false;
316     }
317 
318 }
319 #endif
320 
vtimer_emulate(struct cpu_user_regs * regs,union hsr hsr)321 bool vtimer_emulate(struct cpu_user_regs *regs, union hsr hsr)
322 {
323 
324     switch (hsr.ec) {
325     case HSR_EC_CP15_32:
326         return vtimer_emulate_cp32(regs, hsr);
327     case HSR_EC_CP15_64:
328         return vtimer_emulate_cp64(regs, hsr);
329 #ifdef CONFIG_ARM_64
330     case HSR_EC_SYSREG:
331         return vtimer_emulate_sysreg(regs, hsr);
332 #endif
333     default:
334         return false;
335     }
336 }
337 
338 /*
339  * Local variables:
340  * mode: C
341  * c-file-style: "BSD"
342  * c-basic-offset: 4
343  * indent-tabs-mode: nil
344  * End:
345  */
346