1 /*
2 * vpt.c: Virtual Platform Timer
3 *
4 * Copyright (c) 2006, Xiaowei Yang, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include <xen/time.h>
20 #include <asm/hvm/support.h>
21 #include <asm/hvm/vpt.h>
22 #include <asm/event.h>
23 #include <asm/apic.h>
24 #include <asm/mc146818rtc.h>
25
26 #define mode_is(d, name) \
27 ((d)->arch.hvm_domain.params[HVM_PARAM_TIMER_MODE] == HVMPTM_##name)
28
hvm_init_guest_time(struct domain * d)29 void hvm_init_guest_time(struct domain *d)
30 {
31 struct pl_time *pl = d->arch.hvm_domain.pl_time;
32
33 spin_lock_init(&pl->pl_time_lock);
34 pl->stime_offset = -(u64)get_s_time();
35 pl->last_guest_time = 0;
36 }
37
hvm_get_guest_time_fixed(struct vcpu * v,u64 at_tsc)38 u64 hvm_get_guest_time_fixed(struct vcpu *v, u64 at_tsc)
39 {
40 struct pl_time *pl = v->domain->arch.hvm_domain.pl_time;
41 u64 now;
42
43 /* Called from device models shared with PV guests. Be careful. */
44 ASSERT(is_hvm_vcpu(v));
45
46 spin_lock(&pl->pl_time_lock);
47 now = get_s_time_fixed(at_tsc) + pl->stime_offset;
48
49 if ( !at_tsc )
50 {
51 if ( (int64_t)(now - pl->last_guest_time) > 0 )
52 pl->last_guest_time = now;
53 else
54 now = ++pl->last_guest_time;
55 }
56 spin_unlock(&pl->pl_time_lock);
57
58 return now + v->arch.hvm_vcpu.stime_offset;
59 }
60
hvm_set_guest_time(struct vcpu * v,u64 guest_time)61 void hvm_set_guest_time(struct vcpu *v, u64 guest_time)
62 {
63 u64 offset = guest_time - hvm_get_guest_time(v);
64
65 if ( offset )
66 {
67 v->arch.hvm_vcpu.stime_offset += offset;
68 /*
69 * If hvm_vcpu.stime_offset is updated make sure to
70 * also update vcpu time, since this value is used to
71 * calculate the TSC.
72 */
73 if ( v == current )
74 update_vcpu_system_time(v);
75 }
76 }
77
pt_irq_vector(struct periodic_time * pt,enum hvm_intsrc src)78 static int pt_irq_vector(struct periodic_time *pt, enum hvm_intsrc src)
79 {
80 struct vcpu *v = pt->vcpu;
81 unsigned int gsi, isa_irq;
82 int vector;
83
84 if ( pt->source == PTSRC_lapic )
85 return pt->irq;
86
87 isa_irq = pt->irq;
88 gsi = hvm_isa_irq_to_gsi(isa_irq);
89
90 if ( src == hvm_intsrc_pic )
91 return (v->domain->arch.hvm_domain.vpic[isa_irq >> 3].irq_base
92 + (isa_irq & 7));
93
94 ASSERT(src == hvm_intsrc_lapic);
95 vector = vioapic_get_vector(v->domain, gsi);
96 if ( vector < 0 )
97 {
98 dprintk(XENLOG_WARNING, "d%u: invalid GSI (%u) for platform timer\n",
99 v->domain->domain_id, gsi);
100 domain_crash(v->domain);
101 return -1;
102 }
103
104 return vector;
105 }
106
pt_irq_masked(struct periodic_time * pt)107 static int pt_irq_masked(struct periodic_time *pt)
108 {
109 struct vcpu *v = pt->vcpu;
110 unsigned int gsi, isa_irq;
111 int mask;
112 uint8_t pic_imr;
113
114 if ( pt->source == PTSRC_lapic )
115 {
116 struct vlapic *vlapic = vcpu_vlapic(v);
117 return (!vlapic_enabled(vlapic) ||
118 (vlapic_get_reg(vlapic, APIC_LVTT) & APIC_LVT_MASKED));
119 }
120
121 isa_irq = pt->irq;
122 gsi = hvm_isa_irq_to_gsi(isa_irq);
123 pic_imr = v->domain->arch.hvm_domain.vpic[isa_irq >> 3].imr;
124 mask = vioapic_get_mask(v->domain, gsi);
125 if ( mask < 0 )
126 {
127 dprintk(XENLOG_WARNING, "d%u: invalid GSI (%u) for platform timer\n",
128 v->domain->domain_id, gsi);
129 domain_crash(v->domain);
130 return -1;
131 }
132
133 return (((pic_imr & (1 << (isa_irq & 7))) || !vlapic_accept_pic_intr(v)) &&
134 mask);
135 }
136
pt_lock(struct periodic_time * pt)137 static void pt_lock(struct periodic_time *pt)
138 {
139 struct vcpu *v;
140
141 for ( ; ; )
142 {
143 v = pt->vcpu;
144 spin_lock(&v->arch.hvm_vcpu.tm_lock);
145 if ( likely(pt->vcpu == v) )
146 break;
147 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
148 }
149 }
150
pt_unlock(struct periodic_time * pt)151 static void pt_unlock(struct periodic_time *pt)
152 {
153 spin_unlock(&pt->vcpu->arch.hvm_vcpu.tm_lock);
154 }
155
pt_process_missed_ticks(struct periodic_time * pt)156 static void pt_process_missed_ticks(struct periodic_time *pt)
157 {
158 s_time_t missed_ticks, now = NOW();
159
160 if ( pt->one_shot )
161 return;
162
163 missed_ticks = now - pt->scheduled;
164 if ( missed_ticks <= 0 )
165 return;
166
167 missed_ticks = missed_ticks / (s_time_t) pt->period + 1;
168 if ( mode_is(pt->vcpu->domain, no_missed_ticks_pending) )
169 pt->do_not_freeze = !pt->pending_intr_nr;
170 else
171 pt->pending_intr_nr += missed_ticks;
172 pt->scheduled += missed_ticks * pt->period;
173 }
174
pt_freeze_time(struct vcpu * v)175 static void pt_freeze_time(struct vcpu *v)
176 {
177 if ( !mode_is(v->domain, delay_for_missed_ticks) )
178 return;
179
180 v->arch.hvm_vcpu.guest_time = hvm_get_guest_time(v);
181 }
182
pt_thaw_time(struct vcpu * v)183 static void pt_thaw_time(struct vcpu *v)
184 {
185 if ( !mode_is(v->domain, delay_for_missed_ticks) )
186 return;
187
188 if ( v->arch.hvm_vcpu.guest_time == 0 )
189 return;
190
191 hvm_set_guest_time(v, v->arch.hvm_vcpu.guest_time);
192 v->arch.hvm_vcpu.guest_time = 0;
193 }
194
pt_save_timer(struct vcpu * v)195 void pt_save_timer(struct vcpu *v)
196 {
197 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
198 struct periodic_time *pt;
199
200 if ( v->pause_flags & VPF_blocked )
201 return;
202
203 spin_lock(&v->arch.hvm_vcpu.tm_lock);
204
205 list_for_each_entry ( pt, head, list )
206 if ( !pt->do_not_freeze )
207 stop_timer(&pt->timer);
208
209 pt_freeze_time(v);
210
211 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
212 }
213
pt_restore_timer(struct vcpu * v)214 void pt_restore_timer(struct vcpu *v)
215 {
216 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
217 struct periodic_time *pt;
218
219 spin_lock(&v->arch.hvm_vcpu.tm_lock);
220
221 list_for_each_entry ( pt, head, list )
222 {
223 if ( pt->pending_intr_nr == 0 )
224 {
225 pt_process_missed_ticks(pt);
226 set_timer(&pt->timer, pt->scheduled);
227 }
228 }
229
230 pt_thaw_time(v);
231
232 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
233 }
234
pt_timer_fn(void * data)235 static void pt_timer_fn(void *data)
236 {
237 struct periodic_time *pt = data;
238
239 pt_lock(pt);
240
241 pt->pending_intr_nr++;
242 pt->scheduled += pt->period;
243 pt->do_not_freeze = 0;
244
245 vcpu_kick(pt->vcpu);
246
247 pt_unlock(pt);
248 }
249
pt_update_irq(struct vcpu * v)250 int pt_update_irq(struct vcpu *v)
251 {
252 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
253 struct periodic_time *pt, *temp, *earliest_pt;
254 uint64_t max_lag;
255 int irq, is_lapic, pt_vector;
256
257 spin_lock(&v->arch.hvm_vcpu.tm_lock);
258
259 earliest_pt = NULL;
260 max_lag = -1ULL;
261 list_for_each_entry_safe ( pt, temp, head, list )
262 {
263 if ( pt->pending_intr_nr )
264 {
265 /* RTC code takes care of disabling the timer itself. */
266 if ( (pt->irq != RTC_IRQ || !pt->priv) && pt_irq_masked(pt) )
267 {
268 /* suspend timer emulation */
269 list_del(&pt->list);
270 pt->on_list = 0;
271 }
272 else
273 {
274 if ( (pt->last_plt_gtime + pt->period) < max_lag )
275 {
276 max_lag = pt->last_plt_gtime + pt->period;
277 earliest_pt = pt;
278 }
279 }
280 }
281 }
282
283 if ( earliest_pt == NULL )
284 {
285 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
286 return -1;
287 }
288
289 earliest_pt->irq_issued = 1;
290 irq = earliest_pt->irq;
291 is_lapic = (earliest_pt->source == PTSRC_lapic);
292
293 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
294
295 /*
296 * If periodic timer interrut is handled by lapic, its vector in
297 * IRR is returned and used to set eoi_exit_bitmap for virtual
298 * interrupt delivery case. Otherwise return -1 to do nothing.
299 */
300 if ( is_lapic )
301 {
302 vlapic_set_irq(vcpu_vlapic(v), irq, 0);
303 pt_vector = irq;
304 }
305 else
306 {
307 hvm_isa_irq_deassert(v->domain, irq);
308 if ( platform_legacy_irq(irq) && vlapic_accept_pic_intr(v) &&
309 v->domain->arch.hvm_domain.vpic[irq >> 3].int_output )
310 {
311 hvm_isa_irq_assert(v->domain, irq, NULL);
312 pt_vector = -1;
313 }
314 else
315 {
316 pt_vector = hvm_isa_irq_assert(v->domain, irq, vioapic_get_vector);
317 /*
318 * hvm_isa_irq_assert may not set the corresponding bit in vIRR
319 * when mask field of IOAPIC RTE is set. Check it again.
320 */
321 if ( pt_vector < 0 || !vlapic_test_irq(vcpu_vlapic(v), pt_vector) )
322 pt_vector = -1;
323 }
324 }
325
326 return pt_vector;
327 }
328
is_pt_irq(struct vcpu * v,struct hvm_intack intack)329 static struct periodic_time *is_pt_irq(
330 struct vcpu *v, struct hvm_intack intack)
331 {
332 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
333 struct periodic_time *pt;
334
335 list_for_each_entry ( pt, head, list )
336 {
337 if ( pt->pending_intr_nr && pt->irq_issued &&
338 (intack.vector == pt_irq_vector(pt, intack.source)) )
339 return pt;
340 }
341
342 return NULL;
343 }
344
pt_intr_post(struct vcpu * v,struct hvm_intack intack)345 void pt_intr_post(struct vcpu *v, struct hvm_intack intack)
346 {
347 struct periodic_time *pt;
348 time_cb *cb;
349 void *cb_priv;
350
351 if ( intack.source == hvm_intsrc_vector )
352 return;
353
354 spin_lock(&v->arch.hvm_vcpu.tm_lock);
355
356 pt = is_pt_irq(v, intack);
357 if ( pt == NULL )
358 {
359 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
360 return;
361 }
362
363 pt->irq_issued = 0;
364
365 if ( pt->one_shot )
366 {
367 if ( pt->on_list )
368 list_del(&pt->list);
369 pt->on_list = 0;
370 pt->pending_intr_nr = 0;
371 }
372 else if ( mode_is(v->domain, one_missed_tick_pending) ||
373 mode_is(v->domain, no_missed_ticks_pending) )
374 {
375 pt->last_plt_gtime = hvm_get_guest_time(v);
376 pt_process_missed_ticks(pt);
377 pt->pending_intr_nr = 0; /* 'collapse' all missed ticks */
378 set_timer(&pt->timer, pt->scheduled);
379 }
380 else
381 {
382 pt->last_plt_gtime += pt->period;
383 if ( --pt->pending_intr_nr == 0 )
384 {
385 pt_process_missed_ticks(pt);
386 if ( pt->pending_intr_nr == 0 )
387 set_timer(&pt->timer, pt->scheduled);
388 }
389 }
390
391 if ( mode_is(v->domain, delay_for_missed_ticks) &&
392 (hvm_get_guest_time(v) < pt->last_plt_gtime) )
393 hvm_set_guest_time(v, pt->last_plt_gtime);
394
395 cb = pt->cb;
396 cb_priv = pt->priv;
397
398 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
399
400 if ( cb != NULL )
401 cb(v, cb_priv);
402 }
403
pt_migrate(struct vcpu * v)404 void pt_migrate(struct vcpu *v)
405 {
406 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
407 struct periodic_time *pt;
408
409 spin_lock(&v->arch.hvm_vcpu.tm_lock);
410
411 list_for_each_entry ( pt, head, list )
412 migrate_timer(&pt->timer, v->processor);
413
414 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
415 }
416
create_periodic_time(struct vcpu * v,struct periodic_time * pt,uint64_t delta,uint64_t period,uint8_t irq,time_cb * cb,void * data)417 void create_periodic_time(
418 struct vcpu *v, struct periodic_time *pt, uint64_t delta,
419 uint64_t period, uint8_t irq, time_cb *cb, void *data)
420 {
421 ASSERT(pt->source != 0);
422
423 destroy_periodic_time(pt);
424
425 spin_lock(&v->arch.hvm_vcpu.tm_lock);
426
427 pt->pending_intr_nr = 0;
428 pt->do_not_freeze = 0;
429 pt->irq_issued = 0;
430
431 /* Periodic timer must be at least 0.1ms. */
432 if ( (period < 100000) && period )
433 {
434 if ( !test_and_set_bool(pt->warned_timeout_too_short) )
435 gdprintk(XENLOG_WARNING, "HVM_PlatformTime: program too "
436 "small period %"PRIu64"\n", period);
437 period = 100000;
438 }
439
440 pt->period = period;
441 pt->vcpu = v;
442 pt->last_plt_gtime = hvm_get_guest_time(pt->vcpu);
443 pt->irq = irq;
444 pt->one_shot = !period;
445 pt->scheduled = NOW() + delta;
446
447 if ( !pt->one_shot )
448 {
449 if ( v->domain->arch.hvm_domain.params[HVM_PARAM_VPT_ALIGN] )
450 {
451 pt->scheduled = align_timer(pt->scheduled, pt->period);
452 }
453 else if ( pt->source == PTSRC_lapic )
454 {
455 /*
456 * Offset LAPIC ticks from other timer ticks. Otherwise guests
457 * which use LAPIC ticks for process accounting can see long
458 * sequences of process ticks incorrectly accounted to interrupt
459 * processing (seen with RHEL3 guest).
460 */
461 pt->scheduled += delta >> 1;
462 }
463 }
464
465 pt->cb = cb;
466 pt->priv = data;
467
468 pt->on_list = 1;
469 list_add(&pt->list, &v->arch.hvm_vcpu.tm_list);
470
471 init_timer(&pt->timer, pt_timer_fn, pt, v->processor);
472 set_timer(&pt->timer, pt->scheduled);
473
474 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
475 }
476
destroy_periodic_time(struct periodic_time * pt)477 void destroy_periodic_time(struct periodic_time *pt)
478 {
479 /* Was this structure previously initialised by create_periodic_time()? */
480 if ( pt->vcpu == NULL )
481 return;
482
483 pt_lock(pt);
484 if ( pt->on_list )
485 list_del(&pt->list);
486 pt->on_list = 0;
487 pt->pending_intr_nr = 0;
488 pt_unlock(pt);
489
490 /*
491 * pt_timer_fn() can run until this kill_timer() returns. We must do this
492 * outside pt_lock() otherwise we can deadlock with pt_timer_fn().
493 */
494 kill_timer(&pt->timer);
495 }
496
pt_adjust_vcpu(struct periodic_time * pt,struct vcpu * v)497 static void pt_adjust_vcpu(struct periodic_time *pt, struct vcpu *v)
498 {
499 int on_list;
500
501 ASSERT(pt->source == PTSRC_isa);
502
503 if ( pt->vcpu == NULL )
504 return;
505
506 pt_lock(pt);
507 on_list = pt->on_list;
508 if ( pt->on_list )
509 list_del(&pt->list);
510 pt->on_list = 0;
511 pt_unlock(pt);
512
513 spin_lock(&v->arch.hvm_vcpu.tm_lock);
514 pt->vcpu = v;
515 if ( on_list )
516 {
517 pt->on_list = 1;
518 list_add(&pt->list, &v->arch.hvm_vcpu.tm_list);
519
520 migrate_timer(&pt->timer, v->processor);
521 }
522 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
523 }
524
pt_adjust_global_vcpu_target(struct vcpu * v)525 void pt_adjust_global_vcpu_target(struct vcpu *v)
526 {
527 struct PITState *vpit;
528 struct pl_time *pl_time;
529 int i;
530
531 if ( !v || !has_vpit(v->domain) )
532 return;
533
534 vpit = &v->domain->arch.vpit;
535
536 spin_lock(&vpit->lock);
537 pt_adjust_vcpu(&vpit->pt0, v);
538 spin_unlock(&vpit->lock);
539
540 pl_time = v->domain->arch.hvm_domain.pl_time;
541
542 spin_lock(&pl_time->vrtc.lock);
543 pt_adjust_vcpu(&pl_time->vrtc.pt, v);
544 spin_unlock(&pl_time->vrtc.lock);
545
546 write_lock(&pl_time->vhpet.lock);
547 for ( i = 0; i < HPET_TIMER_NUM; i++ )
548 pt_adjust_vcpu(&pl_time->vhpet.pt[i], v);
549 write_unlock(&pl_time->vhpet.lock);
550 }
551
552
pt_resume(struct periodic_time * pt)553 static void pt_resume(struct periodic_time *pt)
554 {
555 if ( pt->vcpu == NULL )
556 return;
557
558 pt_lock(pt);
559 if ( pt->pending_intr_nr && !pt->on_list )
560 {
561 pt->on_list = 1;
562 list_add(&pt->list, &pt->vcpu->arch.hvm_vcpu.tm_list);
563 vcpu_kick(pt->vcpu);
564 }
565 pt_unlock(pt);
566 }
567
pt_may_unmask_irq(struct domain * d,struct periodic_time * vlapic_pt)568 void pt_may_unmask_irq(struct domain *d, struct periodic_time *vlapic_pt)
569 {
570 int i;
571
572 if ( d )
573 {
574 pt_resume(&d->arch.vpit.pt0);
575 pt_resume(&d->arch.hvm_domain.pl_time->vrtc.pt);
576 for ( i = 0; i < HPET_TIMER_NUM; i++ )
577 pt_resume(&d->arch.hvm_domain.pl_time->vhpet.pt[i]);
578 }
579
580 if ( vlapic_pt )
581 pt_resume(vlapic_pt);
582 }
583