1 /******************************************************************************
2  * arch/x86/hpet.c
3  *
4  * HPET management.
5  */
6 
7 #include <xen/errno.h>
8 #include <xen/time.h>
9 #include <xen/timer.h>
10 #include <xen/smp.h>
11 #include <xen/softirq.h>
12 #include <xen/irq.h>
13 #include <xen/numa.h>
14 #include <asm/fixmap.h>
15 #include <asm/div64.h>
16 #include <asm/hpet.h>
17 #include <asm/msi.h>
18 #include <mach_apic.h>
19 #include <xen/cpuidle.h>
20 
21 #define MAX_DELTA_NS MILLISECS(10*1000)
22 #define MIN_DELTA_NS MICROSECS(20)
23 
24 #define HPET_EVT_USED_BIT    0
25 #define HPET_EVT_USED       (1 << HPET_EVT_USED_BIT)
26 #define HPET_EVT_DISABLE_BIT 1
27 #define HPET_EVT_DISABLE    (1 << HPET_EVT_DISABLE_BIT)
28 #define HPET_EVT_LEGACY_BIT  2
29 #define HPET_EVT_LEGACY     (1 << HPET_EVT_LEGACY_BIT)
30 
31 struct hpet_event_channel
32 {
33     unsigned long mult;
34     int           shift;
35     s_time_t      next_event;
36     cpumask_var_t cpumask;
37     spinlock_t    lock;
38     void          (*event_handler)(struct hpet_event_channel *);
39 
40     unsigned int idx;   /* physical channel idx */
41     unsigned int cpu;   /* msi target */
42     struct msi_desc msi;/* msi state */
43     unsigned int flags; /* HPET_EVT_x */
44 } __cacheline_aligned;
45 static struct hpet_event_channel *__read_mostly hpet_events;
46 
47 /* msi hpet channels used for broadcast */
48 static unsigned int __read_mostly num_hpets_used;
49 
50 DEFINE_PER_CPU(struct hpet_event_channel *, cpu_bc_channel);
51 
52 unsigned long __initdata hpet_address;
53 u8 __initdata hpet_blockid;
54 u8 __initdata hpet_flags;
55 
56 /*
57  * force_hpet_broadcast: by default legacy hpet broadcast will be stopped
58  * if RTC interrupts are enabled. Enable this option if want to always enable
59  * legacy hpet broadcast for deep C state
60  */
61 static bool __initdata force_hpet_broadcast;
62 boolean_param("hpetbroadcast", force_hpet_broadcast);
63 
64 /*
65  * Calculate a multiplication factor for scaled math, which is used to convert
66  * nanoseconds based values to clock ticks:
67  *
68  * clock_ticks = (nanoseconds * factor) >> shift.
69  *
70  * div_sc is the rearranged equation to calculate a factor from a given clock
71  * ticks / nanoseconds ratio:
72  *
73  * factor = (clock_ticks << shift) / nanoseconds
74  */
div_sc(unsigned long ticks,unsigned long nsec,int shift)75 static inline unsigned long div_sc(unsigned long ticks, unsigned long nsec,
76                                    int shift)
77 {
78     uint64_t tmp = ((uint64_t)ticks) << shift;
79 
80     do_div(tmp, nsec);
81     return (unsigned long) tmp;
82 }
83 
84 /*
85  * Convert nanoseconds based values to clock ticks:
86  *
87  * clock_ticks = (nanoseconds * factor) >> shift.
88  */
ns2ticks(unsigned long nsec,int shift,unsigned long factor)89 static inline unsigned long ns2ticks(unsigned long nsec, int shift,
90                                      unsigned long factor)
91 {
92     uint64_t tmp = ((uint64_t)nsec * factor) >> shift;
93 
94     return (unsigned long) tmp;
95 }
96 
hpet_next_event(unsigned long delta,int timer)97 static int hpet_next_event(unsigned long delta, int timer)
98 {
99     uint32_t cnt, cmp;
100     unsigned long flags;
101 
102     local_irq_save(flags);
103     cnt = hpet_read32(HPET_COUNTER);
104     cmp = cnt + delta;
105     hpet_write32(cmp, HPET_Tn_CMP(timer));
106     cmp = hpet_read32(HPET_COUNTER);
107     local_irq_restore(flags);
108 
109     /* Are we within two ticks of the deadline passing? Then we may miss. */
110     return ((cmp + 2 - cnt) > delta) ? -ETIME : 0;
111 }
112 
reprogram_hpet_evt_channel(struct hpet_event_channel * ch,s_time_t expire,s_time_t now,int force)113 static int reprogram_hpet_evt_channel(
114     struct hpet_event_channel *ch,
115     s_time_t expire, s_time_t now, int force)
116 {
117     int64_t delta;
118     int ret;
119 
120     if ( (ch->flags & HPET_EVT_DISABLE) || (expire == 0) )
121         return 0;
122 
123     if ( unlikely(expire < 0) )
124     {
125         printk(KERN_DEBUG "reprogram: expire <= 0\n");
126         return -ETIME;
127     }
128 
129     delta = expire - now;
130     if ( (delta <= 0) && !force )
131         return -ETIME;
132 
133     ch->next_event = expire;
134 
135     if ( expire == STIME_MAX )
136     {
137         /* We assume it will take a long time for the timer to wrap. */
138         hpet_write32(0, HPET_Tn_CMP(ch->idx));
139         return 0;
140     }
141 
142     delta = min_t(int64_t, delta, MAX_DELTA_NS);
143     delta = max_t(int64_t, delta, MIN_DELTA_NS);
144     delta = ns2ticks(delta, ch->shift, ch->mult);
145 
146     ret = hpet_next_event(delta, ch->idx);
147     while ( ret && force )
148     {
149         delta += delta;
150         ret = hpet_next_event(delta, ch->idx);
151     }
152 
153     return ret;
154 }
155 
evt_do_broadcast(cpumask_t * mask)156 static void evt_do_broadcast(cpumask_t *mask)
157 {
158     unsigned int cpu = smp_processor_id();
159 
160     if ( __cpumask_test_and_clear_cpu(cpu, mask) )
161         raise_softirq(TIMER_SOFTIRQ);
162 
163     cpuidle_wakeup_mwait(mask);
164 
165     if ( !cpumask_empty(mask) )
166        cpumask_raise_softirq(mask, TIMER_SOFTIRQ);
167 }
168 
handle_hpet_broadcast(struct hpet_event_channel * ch)169 static void handle_hpet_broadcast(struct hpet_event_channel *ch)
170 {
171     cpumask_t mask;
172     s_time_t now, next_event;
173     unsigned int cpu;
174     unsigned long flags;
175 
176     spin_lock_irqsave(&ch->lock, flags);
177 
178 again:
179     ch->next_event = STIME_MAX;
180 
181     spin_unlock_irqrestore(&ch->lock, flags);
182 
183     next_event = STIME_MAX;
184     cpumask_clear(&mask);
185     now = NOW();
186 
187     /* find all expired events */
188     for_each_cpu(cpu, ch->cpumask)
189     {
190         s_time_t deadline;
191 
192         if ( !cpumask_test_cpu(cpu, ch->cpumask) )
193             continue;
194 
195         deadline = ACCESS_ONCE(per_cpu(timer_deadline, cpu));
196 
197         if ( deadline <= now )
198             __cpumask_set_cpu(cpu, &mask);
199         else if ( deadline < next_event )
200             next_event = deadline;
201     }
202 
203     /* wakeup the cpus which have an expired event. */
204     evt_do_broadcast(&mask);
205 
206     if ( next_event != STIME_MAX )
207     {
208         spin_lock_irqsave(&ch->lock, flags);
209 
210         if ( next_event < ch->next_event &&
211              reprogram_hpet_evt_channel(ch, next_event, now, 0) )
212             goto again;
213 
214         spin_unlock_irqrestore(&ch->lock, flags);
215     }
216 }
217 
hpet_interrupt_handler(int irq,void * data,struct cpu_user_regs * regs)218 static void hpet_interrupt_handler(int irq, void *data,
219         struct cpu_user_regs *regs)
220 {
221     struct hpet_event_channel *ch = (struct hpet_event_channel *)data;
222 
223     this_cpu(irq_count)--;
224 
225     if ( !ch->event_handler )
226     {
227         printk(XENLOG_WARNING "Spurious HPET timer interrupt on HPET timer %d\n", ch->idx);
228         return;
229     }
230 
231     ch->event_handler(ch);
232 }
233 
hpet_msi_unmask(struct irq_desc * desc)234 static void hpet_msi_unmask(struct irq_desc *desc)
235 {
236     u32 cfg;
237     struct hpet_event_channel *ch = desc->action->dev_id;
238 
239     cfg = hpet_read32(HPET_Tn_CFG(ch->idx));
240     cfg |= HPET_TN_ENABLE;
241     hpet_write32(cfg, HPET_Tn_CFG(ch->idx));
242     ch->msi.msi_attrib.host_masked = 0;
243 }
244 
hpet_msi_mask(struct irq_desc * desc)245 static void hpet_msi_mask(struct irq_desc *desc)
246 {
247     u32 cfg;
248     struct hpet_event_channel *ch = desc->action->dev_id;
249 
250     cfg = hpet_read32(HPET_Tn_CFG(ch->idx));
251     cfg &= ~HPET_TN_ENABLE;
252     hpet_write32(cfg, HPET_Tn_CFG(ch->idx));
253     ch->msi.msi_attrib.host_masked = 1;
254 }
255 
hpet_msi_write(struct hpet_event_channel * ch,struct msi_msg * msg)256 static int hpet_msi_write(struct hpet_event_channel *ch, struct msi_msg *msg)
257 {
258     ch->msi.msg = *msg;
259 
260     if ( iommu_intremap )
261     {
262         int rc = iommu_update_ire_from_msi(&ch->msi, msg);
263 
264         if ( rc )
265             return rc;
266     }
267 
268     hpet_write32(msg->data, HPET_Tn_ROUTE(ch->idx));
269     hpet_write32(msg->address_lo, HPET_Tn_ROUTE(ch->idx) + 4);
270 
271     return 0;
272 }
273 
274 static void __maybe_unused
hpet_msi_read(struct hpet_event_channel * ch,struct msi_msg * msg)275 hpet_msi_read(struct hpet_event_channel *ch, struct msi_msg *msg)
276 {
277     msg->data = hpet_read32(HPET_Tn_ROUTE(ch->idx));
278     msg->address_lo = hpet_read32(HPET_Tn_ROUTE(ch->idx) + 4);
279     msg->address_hi = MSI_ADDR_BASE_HI;
280     if ( iommu_intremap )
281         iommu_read_msi_from_ire(&ch->msi, msg);
282 }
283 
hpet_msi_startup(struct irq_desc * desc)284 static unsigned int hpet_msi_startup(struct irq_desc *desc)
285 {
286     hpet_msi_unmask(desc);
287     return 0;
288 }
289 
290 #define hpet_msi_shutdown hpet_msi_mask
291 
hpet_msi_ack(struct irq_desc * desc)292 static void hpet_msi_ack(struct irq_desc *desc)
293 {
294     irq_complete_move(desc);
295     move_native_irq(desc);
296     ack_APIC_irq();
297 }
298 
hpet_msi_set_affinity(struct irq_desc * desc,const cpumask_t * mask)299 static void hpet_msi_set_affinity(struct irq_desc *desc, const cpumask_t *mask)
300 {
301     struct hpet_event_channel *ch = desc->action->dev_id;
302     struct msi_msg msg = ch->msi.msg;
303 
304     msg.dest32 = set_desc_affinity(desc, mask);
305     if ( msg.dest32 == BAD_APICID )
306         return;
307 
308     msg.data &= ~MSI_DATA_VECTOR_MASK;
309     msg.data |= MSI_DATA_VECTOR(desc->arch.vector);
310     msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
311     msg.address_lo |= MSI_ADDR_DEST_ID(msg.dest32);
312     if ( msg.data != ch->msi.msg.data || msg.dest32 != ch->msi.msg.dest32 )
313         hpet_msi_write(ch, &msg);
314 }
315 
316 /*
317  * IRQ Chip for MSI HPET Devices,
318  */
319 static hw_irq_controller hpet_msi_type = {
320     .typename   = "HPET-MSI",
321     .startup    = hpet_msi_startup,
322     .shutdown   = hpet_msi_shutdown,
323     .enable	    = hpet_msi_unmask,
324     .disable    = hpet_msi_mask,
325     .ack        = hpet_msi_ack,
326     .set_affinity   = hpet_msi_set_affinity,
327 };
328 
__hpet_setup_msi_irq(struct irq_desc * desc)329 static int __hpet_setup_msi_irq(struct irq_desc *desc)
330 {
331     struct msi_msg msg;
332 
333     msi_compose_msg(desc->arch.vector, desc->arch.cpu_mask, &msg);
334     return hpet_msi_write(desc->action->dev_id, &msg);
335 }
336 
hpet_setup_msi_irq(struct hpet_event_channel * ch)337 static int __init hpet_setup_msi_irq(struct hpet_event_channel *ch)
338 {
339     int ret;
340     u32 cfg = hpet_read32(HPET_Tn_CFG(ch->idx));
341     irq_desc_t *desc = irq_to_desc(ch->msi.irq);
342 
343     if ( iommu_intremap )
344     {
345         ch->msi.hpet_id = hpet_blockid;
346         ret = iommu_setup_hpet_msi(&ch->msi);
347         if ( ret )
348             return ret;
349     }
350 
351     /* set HPET Tn as oneshot */
352     cfg &= ~(HPET_TN_LEVEL | HPET_TN_PERIODIC);
353     cfg |= HPET_TN_FSB | HPET_TN_32BIT;
354     hpet_write32(cfg, HPET_Tn_CFG(ch->idx));
355 
356     desc->handler = &hpet_msi_type;
357     ret = request_irq(ch->msi.irq, 0, hpet_interrupt_handler, "HPET", ch);
358     if ( ret >= 0 )
359         ret = __hpet_setup_msi_irq(desc);
360     if ( ret < 0 )
361     {
362         if ( iommu_intremap )
363             iommu_update_ire_from_msi(&ch->msi, NULL);
364         return ret;
365     }
366 
367     desc->msi_desc = &ch->msi;
368 
369     return 0;
370 }
371 
hpet_assign_irq(struct hpet_event_channel * ch)372 static int __init hpet_assign_irq(struct hpet_event_channel *ch)
373 {
374     int irq;
375 
376     if ( (irq = create_irq(NUMA_NO_NODE)) < 0 )
377         return irq;
378 
379     ch->msi.irq = irq;
380     if ( hpet_setup_msi_irq(ch) )
381     {
382         destroy_irq(irq);
383         return -EINVAL;
384     }
385 
386     return 0;
387 }
388 
hpet_fsb_cap_lookup(void)389 static void __init hpet_fsb_cap_lookup(void)
390 {
391     u32 id;
392     unsigned int i, num_chs;
393 
394     if ( unlikely(acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) )
395         return;
396 
397     id = hpet_read32(HPET_ID);
398 
399     num_chs = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT);
400     num_chs++; /* Value read out starts from 0 */
401 
402     hpet_events = xzalloc_array(struct hpet_event_channel, num_chs);
403     if ( !hpet_events )
404         return;
405 
406     for ( i = 0; i < num_chs && num_hpets_used < nr_cpu_ids; i++ )
407     {
408         struct hpet_event_channel *ch = &hpet_events[num_hpets_used];
409         u32 cfg = hpet_read32(HPET_Tn_CFG(i));
410 
411         /* Only consider HPET timer with MSI support */
412         if ( !(cfg & HPET_TN_FSB_CAP) )
413             continue;
414 
415         if ( !zalloc_cpumask_var(&ch->cpumask) )
416         {
417             if ( !num_hpets_used )
418             {
419                 xfree(hpet_events);
420                 hpet_events = NULL;
421             }
422             break;
423         }
424 
425         ch->flags = 0;
426         ch->idx = i;
427 
428         if ( hpet_assign_irq(ch) == 0 )
429             num_hpets_used++;
430     }
431 
432     printk(XENLOG_INFO "HPET: %u timers usable for broadcast (%u total)\n",
433            num_hpets_used, num_chs);
434 }
435 
hpet_get_channel(unsigned int cpu)436 static struct hpet_event_channel *hpet_get_channel(unsigned int cpu)
437 {
438     static unsigned int next_channel;
439     unsigned int i, next;
440     struct hpet_event_channel *ch;
441 
442     if ( num_hpets_used == 0 )
443         return hpet_events;
444 
445     if ( num_hpets_used >= nr_cpu_ids )
446         return &hpet_events[cpu];
447 
448     do {
449         next = next_channel;
450         if ( (i = next + 1) == num_hpets_used )
451             i = 0;
452     } while ( cmpxchg(&next_channel, next, i) != next );
453 
454     /* try unused channel first */
455     for ( i = next; i < next + num_hpets_used; i++ )
456     {
457         ch = &hpet_events[i % num_hpets_used];
458         if ( !test_and_set_bit(HPET_EVT_USED_BIT, &ch->flags) )
459         {
460             ch->cpu = cpu;
461             return ch;
462         }
463     }
464 
465     /* share a in-use channel */
466     ch = &hpet_events[next];
467     if ( !test_and_set_bit(HPET_EVT_USED_BIT, &ch->flags) )
468         ch->cpu = cpu;
469 
470     return ch;
471 }
472 
set_channel_irq_affinity(struct hpet_event_channel * ch)473 static void set_channel_irq_affinity(struct hpet_event_channel *ch)
474 {
475     struct irq_desc *desc = irq_to_desc(ch->msi.irq);
476 
477     ASSERT(!local_irq_is_enabled());
478     spin_lock(&desc->lock);
479     hpet_msi_mask(desc);
480     hpet_msi_set_affinity(desc, cpumask_of(ch->cpu));
481     hpet_msi_unmask(desc);
482     spin_unlock(&desc->lock);
483 
484     spin_unlock(&ch->lock);
485 
486     /* We may have missed an interrupt due to the temporary masking. */
487     if ( ch->event_handler && ch->next_event < NOW() )
488         ch->event_handler(ch);
489 }
490 
hpet_attach_channel(unsigned int cpu,struct hpet_event_channel * ch)491 static void hpet_attach_channel(unsigned int cpu,
492                                 struct hpet_event_channel *ch)
493 {
494     ASSERT(!local_irq_is_enabled());
495     spin_lock(&ch->lock);
496 
497     per_cpu(cpu_bc_channel, cpu) = ch;
498 
499     /* try to be the channel owner again while holding the lock */
500     if ( !test_and_set_bit(HPET_EVT_USED_BIT, &ch->flags) )
501         ch->cpu = cpu;
502 
503     if ( ch->cpu != cpu )
504         spin_unlock(&ch->lock);
505     else
506         set_channel_irq_affinity(ch);
507 }
508 
hpet_detach_channel(unsigned int cpu,struct hpet_event_channel * ch)509 static void hpet_detach_channel(unsigned int cpu,
510                                 struct hpet_event_channel *ch)
511 {
512     spin_lock_irq(&ch->lock);
513 
514     ASSERT(ch == per_cpu(cpu_bc_channel, cpu));
515 
516     per_cpu(cpu_bc_channel, cpu) = NULL;
517 
518     if ( cpu != ch->cpu )
519         spin_unlock_irq(&ch->lock);
520     else if ( cpumask_empty(ch->cpumask) )
521     {
522         ch->cpu = -1;
523         clear_bit(HPET_EVT_USED_BIT, &ch->flags);
524         spin_unlock_irq(&ch->lock);
525     }
526     else
527     {
528         ch->cpu = cpumask_first(ch->cpumask);
529         set_channel_irq_affinity(ch);
530         local_irq_enable();
531     }
532 }
533 
534 #include <asm/mc146818rtc.h>
535 
536 void (*__read_mostly pv_rtc_handler)(uint8_t index, uint8_t value);
537 
handle_rtc_once(uint8_t index,uint8_t value)538 static void handle_rtc_once(uint8_t index, uint8_t value)
539 {
540     if ( index != RTC_REG_B )
541         return;
542 
543     /* RTC Reg B, contain PIE/AIE/UIE */
544     if ( value & (RTC_PIE | RTC_AIE | RTC_UIE ) )
545     {
546         cpuidle_disable_deep_cstate();
547         pv_rtc_handler = NULL;
548     }
549 }
550 
hpet_broadcast_init(void)551 void __init hpet_broadcast_init(void)
552 {
553     u64 hpet_rate = hpet_setup();
554     u32 hpet_id, cfg;
555     unsigned int i, n;
556 
557     if ( hpet_rate == 0 || hpet_broadcast_is_available() )
558         return;
559 
560     cfg = hpet_read32(HPET_CFG);
561 
562     hpet_fsb_cap_lookup();
563     if ( num_hpets_used > 0 )
564     {
565         /* Stop HPET legacy interrupts */
566         cfg &= ~HPET_CFG_LEGACY;
567         n = num_hpets_used;
568     }
569     else
570     {
571         hpet_id = hpet_read32(HPET_ID);
572         if ( !(hpet_id & HPET_ID_LEGSUP) )
573             return;
574 
575         if ( !hpet_events )
576             hpet_events = xzalloc(struct hpet_event_channel);
577         if ( !hpet_events || !zalloc_cpumask_var(&hpet_events->cpumask) )
578             return;
579         hpet_events->msi.irq = -1;
580 
581         /* Start HPET legacy interrupts */
582         cfg |= HPET_CFG_LEGACY;
583         n = 1;
584 
585         if ( !force_hpet_broadcast )
586             pv_rtc_handler = handle_rtc_once;
587     }
588 
589     hpet_write32(cfg, HPET_CFG);
590 
591     for ( i = 0; i < n; i++ )
592     {
593         if ( i == 0 && (cfg & HPET_CFG_LEGACY) )
594         {
595             /* set HPET T0 as oneshot */
596             cfg = hpet_read32(HPET_Tn_CFG(0));
597             cfg &= ~(HPET_TN_LEVEL | HPET_TN_PERIODIC);
598             cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
599             hpet_write32(cfg, HPET_Tn_CFG(0));
600         }
601 
602         /*
603          * The period is a femto seconds value. We need to calculate the scaled
604          * math multiplication factor for nanosecond to hpet tick conversion.
605          */
606         hpet_events[i].mult = div_sc((unsigned long)hpet_rate,
607                                      1000000000ul, 32);
608         hpet_events[i].shift = 32;
609         hpet_events[i].next_event = STIME_MAX;
610         spin_lock_init(&hpet_events[i].lock);
611         wmb();
612         hpet_events[i].event_handler = handle_hpet_broadcast;
613 
614         hpet_events[i].msi.msi_attrib.maskbit = 1;
615         hpet_events[i].msi.msi_attrib.pos = MSI_TYPE_HPET;
616     }
617 
618     if ( !num_hpets_used )
619         hpet_events->flags = HPET_EVT_LEGACY;
620 }
621 
hpet_broadcast_resume(void)622 void hpet_broadcast_resume(void)
623 {
624     u32 cfg;
625     unsigned int i, n;
626 
627     if ( !hpet_events )
628         return;
629 
630     hpet_resume(NULL);
631 
632     cfg = hpet_read32(HPET_CFG);
633 
634     if ( num_hpets_used > 0 )
635     {
636         /* Stop HPET legacy interrupts */
637         cfg &= ~HPET_CFG_LEGACY;
638         n = num_hpets_used;
639     }
640     else if ( hpet_events->flags & HPET_EVT_DISABLE )
641         return;
642     else
643     {
644         /* Start HPET legacy interrupts */
645         cfg |= HPET_CFG_LEGACY;
646         n = 1;
647     }
648 
649     hpet_write32(cfg, HPET_CFG);
650 
651     for ( i = 0; i < n; i++ )
652     {
653         if ( hpet_events[i].msi.irq >= 0 )
654             __hpet_setup_msi_irq(irq_to_desc(hpet_events[i].msi.irq));
655 
656         /* set HPET Tn as oneshot */
657         cfg = hpet_read32(HPET_Tn_CFG(hpet_events[i].idx));
658         cfg &= ~(HPET_TN_LEVEL | HPET_TN_PERIODIC);
659         cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
660         if ( !(hpet_events[i].flags & HPET_EVT_LEGACY) )
661             cfg |= HPET_TN_FSB;
662         hpet_write32(cfg, HPET_Tn_CFG(hpet_events[i].idx));
663 
664         hpet_events[i].next_event = STIME_MAX;
665     }
666 }
667 
hpet_disable_legacy_broadcast(void)668 void hpet_disable_legacy_broadcast(void)
669 {
670     u32 cfg;
671     unsigned long flags;
672 
673     if ( !hpet_events || !(hpet_events->flags & HPET_EVT_LEGACY) )
674         return;
675 
676     spin_lock_irqsave(&hpet_events->lock, flags);
677 
678     hpet_events->flags |= HPET_EVT_DISABLE;
679 
680     /* disable HPET T0 */
681     cfg = hpet_read32(HPET_Tn_CFG(0));
682     cfg &= ~HPET_TN_ENABLE;
683     hpet_write32(cfg, HPET_Tn_CFG(0));
684 
685     /* Stop HPET legacy interrupts */
686     cfg = hpet_read32(HPET_CFG);
687     cfg &= ~HPET_CFG_LEGACY;
688     hpet_write32(cfg, HPET_CFG);
689 
690     spin_unlock_irqrestore(&hpet_events->lock, flags);
691 
692     smp_send_event_check_mask(&cpu_online_map);
693 }
694 
hpet_broadcast_enter(void)695 void hpet_broadcast_enter(void)
696 {
697     unsigned int cpu = smp_processor_id();
698     struct hpet_event_channel *ch = per_cpu(cpu_bc_channel, cpu);
699     s_time_t deadline = per_cpu(timer_deadline, cpu);
700 
701     if ( deadline == 0 )
702         return;
703 
704     if ( !ch )
705         ch = hpet_get_channel(cpu);
706 
707     ASSERT(!local_irq_is_enabled());
708 
709     if ( !(ch->flags & HPET_EVT_LEGACY) )
710         hpet_attach_channel(cpu, ch);
711 
712     /* Disable LAPIC timer interrupts. */
713     disable_APIC_timer();
714     cpumask_set_cpu(cpu, ch->cpumask);
715 
716     spin_lock(&ch->lock);
717     /*
718      * Reprogram if current cpu expire time is nearer.  deadline is never
719      * written by a remote cpu, so the value read earlier is still valid.
720      */
721     if ( deadline < ch->next_event )
722         reprogram_hpet_evt_channel(ch, deadline, NOW(), 1);
723     spin_unlock(&ch->lock);
724 }
725 
hpet_broadcast_exit(void)726 void hpet_broadcast_exit(void)
727 {
728     unsigned int cpu = smp_processor_id();
729     struct hpet_event_channel *ch = per_cpu(cpu_bc_channel, cpu);
730     s_time_t deadline = per_cpu(timer_deadline, cpu);
731 
732     if ( deadline == 0 )
733         return;
734 
735     if ( !ch )
736         ch = hpet_get_channel(cpu);
737 
738     /* Reprogram the deadline; trigger timer work now if it has passed. */
739     enable_APIC_timer();
740     if ( !reprogram_timer(deadline) )
741         raise_softirq(TIMER_SOFTIRQ);
742 
743     cpumask_clear_cpu(cpu, ch->cpumask);
744 
745     if ( !(ch->flags & HPET_EVT_LEGACY) )
746         hpet_detach_channel(cpu, ch);
747 }
748 
hpet_broadcast_is_available(void)749 int hpet_broadcast_is_available(void)
750 {
751     return ((hpet_events && (hpet_events->flags & HPET_EVT_LEGACY))
752             || num_hpets_used > 0);
753 }
754 
hpet_legacy_irq_tick(void)755 int hpet_legacy_irq_tick(void)
756 {
757     this_cpu(irq_count)--;
758 
759     if ( !hpet_events ||
760          (hpet_events->flags & (HPET_EVT_DISABLE|HPET_EVT_LEGACY)) !=
761          HPET_EVT_LEGACY )
762         return 0;
763     hpet_events->event_handler(hpet_events);
764     return 1;
765 }
766 
767 static u32 *hpet_boot_cfg;
768 
hpet_setup(void)769 u64 __init hpet_setup(void)
770 {
771     static u64 __initdata hpet_rate;
772     u32 hpet_id, hpet_period;
773     unsigned int last;
774 
775     if ( hpet_rate )
776         return hpet_rate;
777 
778     if ( hpet_address == 0 )
779         return 0;
780 
781     set_fixmap_nocache(FIX_HPET_BASE, hpet_address);
782 
783     hpet_id = hpet_read32(HPET_ID);
784     if ( (hpet_id & HPET_ID_REV) == 0 )
785     {
786         printk("BAD HPET revision id.\n");
787         return 0;
788     }
789 
790     /* Check for sane period (100ps <= period <= 100ns). */
791     hpet_period = hpet_read32(HPET_PERIOD);
792     if ( (hpet_period > 100000000) || (hpet_period < 100000) )
793     {
794         printk("BAD HPET period %u.\n", hpet_period);
795         return 0;
796     }
797 
798     last = (hpet_id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT;
799     hpet_boot_cfg = xmalloc_array(u32, 2 + last);
800     hpet_resume(hpet_boot_cfg);
801 
802     hpet_rate = 1000000000000000ULL; /* 10^15 */
803     (void)do_div(hpet_rate, hpet_period);
804 
805     return hpet_rate;
806 }
807 
hpet_resume(u32 * boot_cfg)808 void hpet_resume(u32 *boot_cfg)
809 {
810     static u32 system_reset_latch;
811     u32 hpet_id, cfg;
812     unsigned int i, last;
813 
814     if ( system_reset_latch == system_reset_counter )
815         return;
816     system_reset_latch = system_reset_counter;
817 
818     cfg = hpet_read32(HPET_CFG);
819     if ( boot_cfg )
820         *boot_cfg = cfg;
821     cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
822     if ( cfg )
823     {
824         printk(XENLOG_WARNING
825                "HPET: reserved bits %#x set in global config register\n",
826                cfg);
827         cfg = 0;
828     }
829     hpet_write32(cfg, HPET_CFG);
830 
831     hpet_id = hpet_read32(HPET_ID);
832     last = (hpet_id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT;
833     for ( i = 0; i <= last; ++i )
834     {
835         cfg = hpet_read32(HPET_Tn_CFG(i));
836         if ( boot_cfg )
837             boot_cfg[i + 1] = cfg;
838         cfg &= ~HPET_TN_ENABLE;
839         if ( cfg & HPET_TN_RESERVED )
840         {
841             printk(XENLOG_WARNING
842                    "HPET: reserved bits %#x set in channel %u config register\n",
843                    cfg & HPET_TN_RESERVED, i);
844             cfg &= ~HPET_TN_RESERVED;
845         }
846         hpet_write32(cfg, HPET_Tn_CFG(i));
847     }
848 
849     cfg = hpet_read32(HPET_CFG);
850     cfg |= HPET_CFG_ENABLE;
851     hpet_write32(cfg, HPET_CFG);
852 }
853 
hpet_disable(void)854 void hpet_disable(void)
855 {
856     unsigned int i;
857     u32 id;
858 
859     if ( !hpet_boot_cfg )
860     {
861         if ( hpet_broadcast_is_available() )
862             hpet_disable_legacy_broadcast();
863         return;
864     }
865 
866     hpet_write32(*hpet_boot_cfg & ~HPET_CFG_ENABLE, HPET_CFG);
867 
868     id = hpet_read32(HPET_ID);
869     for ( i = 0; i <= ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT); ++i )
870         hpet_write32(hpet_boot_cfg[i + 1], HPET_Tn_CFG(i));
871 
872     if ( *hpet_boot_cfg & HPET_CFG_ENABLE )
873         hpet_write32(*hpet_boot_cfg, HPET_CFG);
874 }
875