1 // Copyright 2016 The Fuchsia Authors
2 // Copyright (c) 2009 Corey Tabaka
3 //
4 // Use of this source code is governed by a MIT-style
5 // license that can be found in the LICENSE file or at
6 // https://opensource.org/licenses/MIT
7 
8 #include <sys/types.h>
9 
10 #include <assert.h>
11 #include <debug.h>
12 #include <err.h>
13 #include <inttypes.h>
14 #include <reg.h>
15 #include <trace.h>
16 
17 #include <arch/x86.h>
18 #include <arch/x86/apic.h>
19 #include <arch/x86/feature.h>
20 #include <arch/x86/pvclock.h>
21 #include <arch/x86/timer_freq.h>
22 #include <dev/interrupt.h>
23 #include <fbl/algorithm.h>
24 #include <kernel/cmdline.h>
25 #include <kernel/spinlock.h>
26 #include <kernel/thread.h>
27 #include <lib/fixed_point.h>
28 #include <lk/init.h>
29 #include <platform.h>
30 #include <platform/console.h>
31 #include <platform/pc.h>
32 #include <platform/pc/acpi.h>
33 #include <platform/pc/hpet.h>
34 #include <platform/pc/timer.h>
35 #include <platform/timer.h>
36 #include <pow2.h>
37 #include <zircon/time.h>
38 #include <zircon/types.h>
39 
40 #include "platform_p.h"
41 
42 // Current timer scheme:
43 // The HPET is used to calibrate the local APIC timers and the TSC.  If the
44 // HPET is not present, we will fallback to calibrating using the PIT.
45 //
46 // For wall-time, we use the following mechanisms, in order of highest
47 // preference to least:
48 // 1) TSC: If the CPU advertises an invariant TSC, then we will use the TSC for
49 // tracking wall time in a tickless manner.
50 // 2) HPET: If there is an HPET present, we will use its count to track wall
51 // time in a tickless manner.
52 // 3) PIT: We will use periodic interrupts to update wall time.
53 //
54 // The local APICs are responsible for handling timer callbacks
55 // sent from the scheduler.
56 
57 enum clock_source {
58     CLOCK_PIT,
59     CLOCK_HPET,
60     CLOCK_TSC,
61 
62     CLOCK_COUNT
63 };
64 
65 const char* clock_name[] = {
66         [CLOCK_PIT] = "PIT",
67         [CLOCK_HPET] = "HPET",
68         [CLOCK_TSC] = "TSC",
69 };
70 static_assert(fbl::count_of(clock_name) == CLOCK_COUNT, "");
71 
72 // PIT time accounting info
73 static struct fp_32_64 us_per_pit;
74 static volatile uint64_t pit_ticks;
75 static uint16_t pit_divisor;
76 static uint32_t ns_per_pit_rounded_up;
77 
78 // Whether or not we have an Invariant TSC (controls whether we use the PIT or
79 // not after initialization).  The Invariant TSC is rate-invariant under P-, C-,
80 // and T-state transitions.
81 static bool invariant_tsc;
82 // Whether or not we have a Constant TSC (controls whether we bother calibrating
83 // the TSC).  Constant TSC predates the Invariant TSC.  The Constant TSC is
84 // rate-invariant under P-state transitions.
85 static bool constant_tsc;
86 
87 static enum clock_source wall_clock;
88 static enum clock_source calibration_clock;
89 
90 // APIC timer calibration values
91 static bool use_tsc_deadline;
92 static uint32_t apic_ticks_per_ms = 0;
93 static struct fp_32_64 apic_ticks_per_ns;
94 static uint8_t apic_divisor = 0;
95 
96 // TSC timer calibration values
97 static uint64_t tsc_ticks_per_ms;
98 static struct fp_32_64 ns_per_tsc;
99 static struct fp_32_64 tsc_per_ns;
100 static uint32_t ns_per_tsc_rounded_up;
101 
102 // HPET calibration values
103 static struct fp_32_64 ns_per_hpet;
104 static uint32_t ns_per_hpet_rounded_up;
105 
106 #define INTERNAL_FREQ 1193182U
107 #define INTERNAL_FREQ_3X 3579546U
108 
109 #define INTERNAL_FREQ_TICKS_PER_MS (INTERNAL_FREQ / 1000)
110 
111 /* Maximum amount of time that can be program on the timer to schedule the next
112  *  interrupt, in miliseconds */
113 #define MAX_TIMER_INTERVAL ZX_MSEC(55)
114 
115 #define LOCAL_TRACE 0
116 
current_time(void)117 zx_time_t current_time(void) {
118     zx_time_t time;
119 
120     switch (wall_clock) {
121     case CLOCK_TSC: {
122         uint64_t tsc = rdtsc();
123         time = ticks_to_nanos(tsc);
124         break;
125     }
126     case CLOCK_HPET: {
127         uint64_t counter = hpet_get_value();
128         time = u64_mul_u64_fp32_64(counter, ns_per_hpet);
129         break;
130     }
131     case CLOCK_PIT: {
132         time = u64_mul_u64_fp32_64(pit_ticks, us_per_pit) * 1000;
133         break;
134     }
135     default:
136         panic("Invalid wall clock source\n");
137     }
138 
139     return time;
140 }
141 
142 // Round up t to a clock tick, so that when the APIC timer fires, the wall time
143 // will have elapsed.
discrete_time_roundup(zx_time_t t)144 static zx_time_t discrete_time_roundup(zx_time_t t) {
145     zx_duration_t value;
146     switch (wall_clock) {
147     case CLOCK_TSC: {
148         value = ns_per_tsc_rounded_up;
149         break;
150     }
151     case CLOCK_HPET: {
152         value = ns_per_hpet_rounded_up;
153         break;
154     }
155     case CLOCK_PIT: {
156         value = ns_per_pit_rounded_up;
157         break;
158     }
159     default:
160         panic("Invalid wall clock source\n");
161     }
162 
163     return zx_time_add_duration(t, value);
164 }
165 
ticks_per_second(void)166 zx_ticks_t ticks_per_second(void) {
167     return tsc_ticks_per_ms * 1000;
168 }
169 
current_ticks(void)170 zx_ticks_t current_ticks(void) {
171     return rdtsc();
172 }
173 
ticks_to_nanos(zx_ticks_t ticks)174 zx_time_t ticks_to_nanos(zx_ticks_t ticks) {
175     return u64_mul_u64_fp32_64(ticks, ns_per_tsc);
176 }
177 
178 // The PIT timer will keep track of wall time if we aren't using the TSC
pit_timer_tick(void * arg)179 static interrupt_eoi pit_timer_tick(void* arg) {
180     pit_ticks += 1;
181     return IRQ_EOI_DEACTIVATE;
182 }
183 
184 // The APIC timers will call this when they fire
platform_handle_apic_timer_tick(void)185 void platform_handle_apic_timer_tick(void) {
186     timer_tick(current_time());
187 }
188 
set_pit_frequency(uint32_t frequency)189 static void set_pit_frequency(uint32_t frequency) {
190     uint32_t count, remainder;
191 
192     /* figure out the correct pit_divisor for the desired frequency */
193     if (frequency <= 18) {
194         count = 0xffff;
195     } else if (frequency >= INTERNAL_FREQ) {
196         count = 1;
197     } else {
198         count = INTERNAL_FREQ_3X / frequency;
199         remainder = INTERNAL_FREQ_3X % frequency;
200 
201         if (remainder >= INTERNAL_FREQ_3X / 2) {
202             count += 1;
203         }
204 
205         count /= 3;
206         remainder = count % 3;
207 
208         if (remainder >= 1) {
209             count += 1;
210         }
211     }
212 
213     pit_divisor = count & 0xffff;
214 
215     /*
216      * funky math that i don't feel like explaining. essentially 32.32 fixed
217      * point representation of the configured timer delta.
218      */
219     fp_32_64_div_32_32(&us_per_pit, 1000 * 1000 * 3 * count, INTERNAL_FREQ_3X);
220 
221     // Add 1us to the PIT tick rate to deal with rounding
222     ns_per_pit_rounded_up = (u32_mul_u64_fp32_64(1, us_per_pit) + 1) * 1000;
223 
224     //dprintf(DEBUG, "set_pit_frequency: pit_divisor=%04x\n", pit_divisor);
225 
226     /*
227      * setup the Programmable Interval Timer
228      * timer 0, mode 2, binary counter, LSB followed by MSB
229      */
230     outp(I8253_CONTROL_REG, 0x34);
231     outp(I8253_DATA_REG, static_cast<uint8_t>(pit_divisor));      // LSB
232     outp(I8253_DATA_REG, static_cast<uint8_t>(pit_divisor >> 8)); // MSB
233 }
234 
pit_calibration_cycle_preamble(uint16_t ms)235 static inline void pit_calibration_cycle_preamble(uint16_t ms) {
236     // Make the PIT run for
237     const uint16_t init_pic_count = static_cast<uint16_t>(INTERNAL_FREQ_TICKS_PER_MS * ms);
238     // Program PIT in the interrupt on terminal count configuration,
239     // this makes it count down and set the output high when it hits 0.
240     outp(I8253_CONTROL_REG, 0x30);
241     outp(I8253_DATA_REG, static_cast<uint8_t>(init_pic_count)); // LSB
242 }
243 
pit_calibration_cycle(uint16_t ms)244 static inline void pit_calibration_cycle(uint16_t ms) {
245     // Make the PIT run for ms millis, see comments in the preamble
246     const uint16_t init_pic_count = static_cast<uint16_t>(INTERNAL_FREQ_TICKS_PER_MS * ms);
247     outp(I8253_DATA_REG, static_cast<uint8_t>(init_pic_count >> 8)); // MSB
248 
249     uint8_t status = 0;
250     do {
251         // Send a read-back command that latches the status of ch0
252         outp(I8253_CONTROL_REG, 0xe2);
253         status = inp(I8253_DATA_REG);
254         // Wait for bit 7 (output) to go high and for bit 6 (null count) to go low
255     } while ((status & 0xc0) != 0x80);
256 }
257 
pit_calibration_cycle_cleanup(void)258 static inline void pit_calibration_cycle_cleanup(void) {
259     // Stop the PIT by starting a mode change but not writing a counter
260     outp(I8253_CONTROL_REG, 0x38);
261 }
262 
hpet_calibration_cycle_preamble(void)263 static inline void hpet_calibration_cycle_preamble(void) {
264     hpet_enable();
265 }
266 
hpet_calibration_cycle(uint16_t ms)267 static inline void hpet_calibration_cycle(uint16_t ms) {
268     hpet_wait_ms(ms);
269 }
270 
hpet_calibration_cycle_cleanup(void)271 static inline void hpet_calibration_cycle_cleanup(void) {
272     hpet_disable();
273 }
274 
calibrate_apic_timer(void)275 static void calibrate_apic_timer(void) {
276     ASSERT(arch_ints_disabled());
277 
278     const uint64_t apic_freq = x86_lookup_core_crystal_freq();
279     if (apic_freq != 0) {
280         ASSERT(apic_freq / 1000 <= UINT32_MAX);
281         apic_ticks_per_ms = static_cast<uint32_t>(apic_freq / 1000);
282         apic_divisor = 1;
283         fp_32_64_div_32_32(&apic_ticks_per_ns, apic_ticks_per_ms, 1000 * 1000);
284         printf("APIC frequency: %" PRIu32 " ticks/ms\n", apic_ticks_per_ms);
285         return;
286     }
287 
288     printf("Could not find APIC frequency: Calibrating APIC with %s\n",
289            clock_name[calibration_clock]);
290 
291     apic_divisor = 1;
292 outer:
293     while (apic_divisor != 0) {
294         uint32_t best_time[2] = {UINT32_MAX, UINT32_MAX};
295         const uint16_t duration_ms[2] = {2, 4};
296         for (int trial = 0; trial < 2; ++trial) {
297             for (int tries = 0; tries < 3; ++tries) {
298                 switch (calibration_clock) {
299                 case CLOCK_HPET:
300                     hpet_calibration_cycle_preamble();
301                     break;
302                 case CLOCK_PIT:
303                     pit_calibration_cycle_preamble(duration_ms[trial]);
304                     break;
305                 default:
306                     PANIC_UNIMPLEMENTED;
307                 }
308 
309                 // Setup APIC timer to count down with interrupt masked
310                 zx_status_t status = apic_timer_set_oneshot(
311                     UINT32_MAX,
312                     apic_divisor,
313                     true);
314                 ASSERT(status == ZX_OK);
315 
316                 switch (calibration_clock) {
317                 case CLOCK_HPET:
318                     hpet_calibration_cycle(duration_ms[trial]);
319                     break;
320                 case CLOCK_PIT:
321                     pit_calibration_cycle(duration_ms[trial]);
322                     break;
323                 default:
324                     PANIC_UNIMPLEMENTED;
325                 }
326 
327                 uint32_t apic_ticks = UINT32_MAX - apic_timer_current_count();
328                 if (apic_ticks < best_time[trial]) {
329                     best_time[trial] = apic_ticks;
330                 }
331                 LTRACEF("Calibration trial %d found %u ticks/ms\n",
332                         tries, apic_ticks);
333 
334                 switch (calibration_clock) {
335                 case CLOCK_HPET:
336                     hpet_calibration_cycle_cleanup();
337                     break;
338                 case CLOCK_PIT:
339                     pit_calibration_cycle_cleanup();
340                     break;
341                 default:
342                     PANIC_UNIMPLEMENTED;
343                 }
344             }
345 
346             // If the APIC ran out of time every time, try again with a higher
347             // divisor
348             if (best_time[trial] == UINT32_MAX) {
349                 apic_divisor = static_cast<uint8_t>(apic_divisor * 2);
350                 goto outer;
351             }
352         }
353         apic_ticks_per_ms = (best_time[1] - best_time[0]) / (duration_ms[1] - duration_ms[0]);
354         fp_32_64_div_32_32(&apic_ticks_per_ns, apic_ticks_per_ms, 1000 * 1000);
355         break;
356     }
357     ASSERT(apic_divisor != 0);
358 
359     printf("APIC timer calibrated: %" PRIu32 " ticks/ms, divisor %d\n",
360            apic_ticks_per_ms, apic_divisor);
361 }
362 
calibrate_tsc_count(uint16_t duration_ms)363 static uint64_t calibrate_tsc_count(uint16_t duration_ms) {
364     uint64_t best_time = UINT64_MAX;
365 
366     for (int tries = 0; tries < 3; ++tries) {
367         switch (calibration_clock) {
368         case CLOCK_HPET:
369             hpet_calibration_cycle_preamble();
370             break;
371         case CLOCK_PIT:
372             pit_calibration_cycle_preamble(duration_ms);
373             break;
374         default:
375             PANIC_UNIMPLEMENTED;
376         }
377 
378         // Use CPUID to serialize the instruction stream
379         uint32_t _ignored;
380         cpuid(0, &_ignored, &_ignored, &_ignored, &_ignored);
381         uint64_t start = rdtsc();
382         cpuid(0, &_ignored, &_ignored, &_ignored, &_ignored);
383 
384         switch (calibration_clock) {
385         case CLOCK_HPET:
386             hpet_calibration_cycle(duration_ms);
387             break;
388         case CLOCK_PIT:
389             pit_calibration_cycle(duration_ms);
390             break;
391         default:
392             PANIC_UNIMPLEMENTED;
393         }
394 
395         cpuid(0, &_ignored, &_ignored, &_ignored, &_ignored);
396         zx_ticks_t end = rdtsc();
397         cpuid(0, &_ignored, &_ignored, &_ignored, &_ignored);
398 
399         zx_ticks_t tsc_ticks = end - start;
400         if (tsc_ticks < best_time) {
401             best_time = tsc_ticks;
402         }
403         LTRACEF("Calibration trial %d found %" PRIu64 " ticks/ms\n",
404                 tries, tsc_ticks);
405         switch (calibration_clock) {
406         case CLOCK_HPET:
407             hpet_calibration_cycle_cleanup();
408             break;
409         case CLOCK_PIT:
410             pit_calibration_cycle_cleanup();
411             break;
412         default:
413             PANIC_UNIMPLEMENTED;
414         }
415     }
416 
417     return best_time;
418 }
419 
calibrate_tsc(bool has_pvclock)420 static void calibrate_tsc(bool has_pvclock) {
421     ASSERT(arch_ints_disabled());
422 
423     const uint64_t tsc_freq = has_pvclock ? pvclock_get_tsc_freq() : x86_lookup_tsc_freq();
424     if (tsc_freq != 0) {
425         tsc_ticks_per_ms = tsc_freq / 1000;
426         printf("TSC frequency: %" PRIu64 " ticks/ms\n", tsc_ticks_per_ms);
427     } else {
428         printf("Could not find TSC frequency: Calibrating TSC with %s\n",
429                clock_name[calibration_clock]);
430 
431         uint32_t duration_ms[2] = {2, 4};
432         uint64_t best_time[2] = {
433             calibrate_tsc_count(static_cast<uint16_t>(duration_ms[0])),
434             calibrate_tsc_count(static_cast<uint16_t>(duration_ms[1]))};
435 
436         while (best_time[0] >= best_time[1] && 2 * duration_ms[1] < MAX_TIMER_INTERVAL) {
437             duration_ms[0] = duration_ms[1];
438             duration_ms[1] *= 2;
439             best_time[0] = best_time[1];
440             best_time[1] = calibrate_tsc_count(static_cast<uint16_t>(duration_ms[1]));
441         }
442 
443         ASSERT(best_time[0] < best_time[1]);
444 
445         tsc_ticks_per_ms = (best_time[1] - best_time[0]) / (duration_ms[1] - duration_ms[0]);
446 
447         printf("TSC calibrated: %" PRIu64 " ticks/ms\n", tsc_ticks_per_ms);
448     }
449 
450     ASSERT(tsc_ticks_per_ms <= UINT32_MAX);
451     fp_32_64_div_32_32(&ns_per_tsc, 1000 * 1000, static_cast<uint32_t>(tsc_ticks_per_ms));
452     fp_32_64_div_32_32(&tsc_per_ns, static_cast<uint32_t>(tsc_ticks_per_ms), 1000 * 1000);
453     // Add 1ns to conservatively deal with rounding
454     ns_per_tsc_rounded_up = u32_mul_u64_fp32_64(1, ns_per_tsc) + 1;
455 
456     LTRACEF("ns_per_tsc: %08x.%08x%08x\n", ns_per_tsc.l0, ns_per_tsc.l32, ns_per_tsc.l64);
457 }
458 
pc_init_timer(uint level)459 static void pc_init_timer(uint level) {
460     const struct x86_model_info* cpu_model = x86_get_model();
461 
462     constant_tsc = false;
463     if (x86_vendor == X86_VENDOR_INTEL) {
464         /* This condition taken from Intel 3B 17.15 (Time-Stamp Counter).  This
465          * is the negation of the non-Constant TSC section, since the Constant
466          * TSC section is incomplete (the behavior is architectural going
467          * forward, and modern CPUs are not on the list). */
468         constant_tsc = !((cpu_model->family == 0x6 && cpu_model->model == 0x9) ||
469                          (cpu_model->family == 0x6 && cpu_model->model == 0xd) ||
470                          (cpu_model->family == 0xf && cpu_model->model < 0x3));
471     }
472     invariant_tsc = x86_feature_test(X86_FEATURE_INVAR_TSC);
473 
474     bool has_pvclock = pvclock_is_present();
475     if (has_pvclock) {
476         zx_status_t status = pvclock_init();
477         if (status == ZX_OK) {
478             invariant_tsc = pvclock_is_stable();
479         } else {
480             has_pvclock = false;
481         }
482     }
483 
484     bool has_hpet = hpet_is_present();
485     if (has_hpet) {
486         calibration_clock = CLOCK_HPET;
487         const uint64_t hpet_ms_rate = hpet_ticks_per_ms();
488         ASSERT(hpet_ms_rate <= UINT32_MAX);
489         printf("HPET frequency: %" PRIu64 " ticks/ms\n", hpet_ms_rate);
490         fp_32_64_div_32_32(&ns_per_hpet, 1000 * 1000, static_cast<uint32_t>(hpet_ms_rate));
491         // Add 1ns to conservatively deal with rounding
492         ns_per_hpet_rounded_up = u32_mul_u64_fp32_64(1, ns_per_hpet) + 1;
493     } else {
494         calibration_clock = CLOCK_PIT;
495     }
496 
497     const char* force_wallclock = cmdline_get("kernel.wallclock");
498     bool use_invariant_tsc = invariant_tsc && (!force_wallclock || !strcmp(force_wallclock, "tsc"));
499 
500     use_tsc_deadline = use_invariant_tsc &&
501                        x86_feature_test(X86_FEATURE_TSC_DEADLINE);
502     if (!use_tsc_deadline) {
503         calibrate_apic_timer();
504     }
505 
506     if (use_invariant_tsc) {
507         calibrate_tsc(has_pvclock);
508 
509         // Program PIT in the software strobe configuration, but do not load
510         // the count.  This will pause the PIT.
511         outp(I8253_CONTROL_REG, 0x38);
512         wall_clock = CLOCK_TSC;
513     } else {
514         if (constant_tsc || invariant_tsc) {
515             // Calibrate the TSC even though it's not as good as we want, so we
516             // can still let folks still use it for cheap timing.
517             calibrate_tsc(has_pvclock);
518         }
519 
520         if (has_hpet && (!force_wallclock || !strcmp(force_wallclock, "hpet"))) {
521             wall_clock = CLOCK_HPET;
522             hpet_set_value(0);
523             hpet_enable();
524         } else {
525             if (force_wallclock && strcmp(force_wallclock, "pit")) {
526                 panic("Could not satisfy kernel.wallclock choice\n");
527             }
528 
529             wall_clock = CLOCK_PIT;
530 
531             set_pit_frequency(1000); // ~1ms granularity
532 
533             uint32_t irq = apic_io_isa_to_global(ISA_IRQ_PIT);
534             zx_status_t status = register_int_handler(irq, &pit_timer_tick, NULL);
535             DEBUG_ASSERT(status == ZX_OK);
536             unmask_interrupt(irq);
537         }
538     }
539 
540     printf("timer features: constant_tsc %d invariant_tsc %d tsc_deadline %d\n",
541            constant_tsc, invariant_tsc, use_tsc_deadline);
542     printf("Using %s as wallclock\n", clock_name[wall_clock]);
543 }
544 LK_INIT_HOOK(timer, &pc_init_timer, LK_INIT_LEVEL_VM + 3);
545 
platform_set_oneshot_timer(zx_time_t deadline)546 zx_status_t platform_set_oneshot_timer(zx_time_t deadline) {
547     DEBUG_ASSERT(arch_ints_disabled());
548 
549     if (deadline < 0) {
550         deadline = 0;
551     }
552     deadline = discrete_time_roundup(deadline);
553     DEBUG_ASSERT(deadline > 0);
554 
555     if (use_tsc_deadline) {
556         // Check if the deadline would overflow the TSC.
557         const uint64_t tsc_ticks_per_ns = tsc_ticks_per_ms / ZX_MSEC(1);
558         if (UINT64_MAX / deadline < tsc_ticks_per_ns) {
559             return ZX_ERR_INVALID_ARGS;
560         }
561 
562         // We rounded up to the tick after above.
563         const uint64_t tsc_deadline = u64_mul_u64_fp32_64(deadline, tsc_per_ns);
564         LTRACEF("Scheduling oneshot timer: %" PRIu64 " deadline\n", tsc_deadline);
565         apic_timer_set_tsc_deadline(tsc_deadline, false /* unmasked */);
566         return ZX_OK;
567     }
568 
569     const zx_time_t now = current_time();
570     if (now >= deadline) {
571         // Deadline has already passed. We still need to schedule a timer so that
572         // the interrupt fires.
573         LTRACEF("Scheduling oneshot timer for min duration\n");
574         return apic_timer_set_oneshot(1, 1, false /* unmasked */);
575     }
576     const zx_duration_t interval = zx_time_sub_time(deadline, now);
577     DEBUG_ASSERT(interval > 0);
578 
579     uint64_t apic_ticks_needed = u64_mul_u64_fp32_64(interval, apic_ticks_per_ns);
580     if (apic_ticks_needed == 0) {
581         apic_ticks_needed = 1;
582     }
583 
584     // Find the shift needed for this timeout, since count is 32-bit.
585     const uint highest_set_bit = log2_ulong_floor(apic_ticks_needed);
586     uint8_t extra_shift = (highest_set_bit <= 31) ? 0 : static_cast<uint8_t>(highest_set_bit - 31);
587     if (extra_shift > 8) {
588         extra_shift = 8;
589     }
590 
591     uint32_t divisor = apic_divisor << extra_shift;
592     uint32_t count;
593     // If the divisor is too large, we're at our maximum timeout.  Saturate the
594     // timer.  It'll fire earlier than requested, but the scheduler will notice
595     // and ask us to set the timer up again.
596     if (divisor <= 128) {
597         count = (uint32_t)(apic_ticks_needed >> extra_shift);
598         DEBUG_ASSERT((apic_ticks_needed >> extra_shift) <= UINT32_MAX);
599     } else {
600         divisor = 128;
601         count = UINT32_MAX;
602     }
603 
604     // Make sure we're not underflowing
605     if (count == 0) {
606         DEBUG_ASSERT(divisor == 1);
607         count = 1;
608     }
609 
610     LTRACEF("Scheduling oneshot timer: %u count, %u div\n", count, divisor);
611     return apic_timer_set_oneshot(count, static_cast<uint8_t>(divisor), false /* unmasked */);
612 }
613 
platform_stop_timer(void)614 void platform_stop_timer(void) {
615     /* Enable interrupt mode that will stop the decreasing counter of the PIT */
616     //outp(I8253_CONTROL_REG, 0x30);
617     apic_timer_stop();
618 }
619 
platform_shutdown_timer(void)620 void platform_shutdown_timer(void) {
621     DEBUG_ASSERT(arch_ints_disabled());
622 
623     // TODO(maniscalco): What should we do here?  Anything?
624 }
625 
626 static uint64_t saved_hpet_val;
pc_prep_suspend_timer(void)627 void pc_prep_suspend_timer(void) {
628     if (hpet_is_present()) {
629         saved_hpet_val = hpet_get_value();
630     }
631 }
632 
pc_resume_timer(void)633 void pc_resume_timer(void) {
634     switch (wall_clock) {
635     case CLOCK_HPET:
636         hpet_set_value(saved_hpet_val);
637         hpet_enable();
638         break;
639     case CLOCK_PIT: {
640         set_pit_frequency(1000); // ~1ms granularity
641 
642         uint32_t irq = apic_io_isa_to_global(ISA_IRQ_PIT);
643         unmask_interrupt(irq);
644         break;
645     }
646     default:
647         break;
648     }
649 }
650