1 // Copyright 2018 The Fuchsia Authors
2 //
3 // Use of this source code is governed by a MIT-style
4 // license that can be found in the LICENSE file or at
5 // https://opensource.org/licenses/MIT
6
7 #include "pvclock_priv.h"
8 #include <arch/hypervisor.h>
9 #include <arch/x86/pvclock.h>
10 #include <bits.h>
11 #include <hypervisor/guest_physical_address_space.h>
12 #include <platform.h>
13 #include <vm/physmap.h>
14 #include <zircon/types.h>
15
16 namespace {
17
calculate_scale_factor(uint64_t tsc_freq,uint32_t * mul,int8_t * shift)18 void calculate_scale_factor(uint64_t tsc_freq, uint32_t* mul, int8_t* shift) {
19 // Guests converts TSC ticks to nanoseconds using this formula:
20 // ns = #TSCticks * mul * 2^(shift - 32).
21 // mul * 2^(shift - 32) is a fractional number used as a scale factor in conversion.
22 // It's very similar to how floating point numbers are usually represented in memory.
23 static const uint64_t target_freq = 1000000000ul;
24
25 DEBUG_ASSERT(tsc_freq != 0);
26
27 // We maintain the folowing invariant: 2^(exponent - 32) * x/y ~ target_freq / tsc_freq,
28 int8_t exponent = 32;
29 uint64_t x = target_freq;
30 uint64_t y = tsc_freq;
31
32 // First make y small enough so that (y << 31) doesn't overflow in the next step. Adjust
33 // exponent along the way to maintain invariant.
34 while (y >= (1ull << 31)) {
35 y >>= 1;
36 exponent--;
37 }
38
39 // We scale x/y multiplying x by 2 until it gets big enough or we run out of bits.
40 while (x < (y << 31) && BIT(x, 63) == 0) {
41 x <<= 1;
42 exponent--;
43 }
44
45 // Though it's very unlikely lets also consider a situation when x/y is still too small.
46 while (x < y) {
47 y >>= 1;
48 exponent++;
49 }
50
51 // Finally make sure that x/y fits within 32 bits.
52 while (x >= (y << 32)) {
53 x >>= 1;
54 exponent++;
55 }
56
57 *shift = static_cast<int8_t>(exponent);
58 *mul = static_cast<uint32_t>(x / y);
59 }
60
61 } // namespace
62
63 extern fbl::atomic<int64_t> utc_offset;
64
pvclock_update_boot_time(hypervisor::GuestPhysicalAddressSpace * gpas,zx_vaddr_t guest_paddr)65 zx_status_t pvclock_update_boot_time(hypervisor::GuestPhysicalAddressSpace* gpas,
66 zx_vaddr_t guest_paddr) {
67 // KVM doesn't provide any protection against concurrent wall time requests from different
68 // VCPUs, but documentation doesn't mention that it cannot happen and moreover it properly
69 // protects per VCPU system time. Therefore to be on the safer side we use one global mutex
70 // for protection.
71 static fbl::Mutex mutex;
72 static uint32_t version __TA_GUARDED(mutex);
73
74 hypervisor::GuestPtr guest_ptr;
75 zx_status_t status = gpas->CreateGuestPtr(guest_paddr, sizeof(pvclock_boot_time),
76 "pvclock-boot-time-guest-mapping", &guest_ptr);
77 if (status != ZX_OK) {
78 return status;
79 }
80 auto boot_time = guest_ptr.as<pvclock_boot_time>();
81 ZX_DEBUG_ASSERT(boot_time != nullptr);
82 memset(boot_time, 0, sizeof(*boot_time));
83
84 fbl::AutoLock lock(&mutex);
85 zx_time_t time = utc_offset.load();
86 // See the comment for pvclock_boot_time structure in arch/x86/pvclock.h
87 atomic_store_relaxed_u32(&boot_time->version, version + 1);
88 atomic_fence();
89 boot_time->seconds = static_cast<uint32_t>(time / ZX_SEC(1));
90 boot_time->nseconds = static_cast<uint32_t>(time % ZX_SEC(1));
91 atomic_fence();
92 atomic_store_relaxed_u32(&boot_time->version, version + 2);
93 version += 2;
94 return ZX_OK;
95 }
96
pvclock_reset_clock(PvClockState * pvclock,hypervisor::GuestPhysicalAddressSpace * gpas,zx_vaddr_t guest_paddr)97 zx_status_t pvclock_reset_clock(PvClockState* pvclock, hypervisor::GuestPhysicalAddressSpace* gpas,
98 zx_vaddr_t guest_paddr) {
99 zx_status_t status =
100 gpas->CreateGuestPtr(guest_paddr, sizeof(pvclock_system_time),
101 "pvclock-system-time-guest-mapping", &pvclock->guest_ptr);
102 if (status != ZX_OK) {
103 return status;
104 }
105 pvclock->system_time = pvclock->guest_ptr.as<pvclock_system_time>();
106 ZX_DEBUG_ASSERT(pvclock->system_time != nullptr);
107 memset(pvclock->system_time, 0, sizeof(*pvclock->system_time));
108 return ZX_OK;
109 }
110
pvclock_update_system_time(PvClockState * pvclock,hypervisor::GuestPhysicalAddressSpace * gpas)111 void pvclock_update_system_time(PvClockState* pvclock,
112 hypervisor::GuestPhysicalAddressSpace* gpas) {
113 if (!pvclock->system_time) {
114 return;
115 }
116
117 uint32_t tsc_mul;
118 int8_t tsc_shift;
119 calculate_scale_factor(ticks_per_second(), &tsc_mul, &tsc_shift);
120
121 // See the comment for pvclock_boot_time structure in arch/x86/pvclock.h
122 pvclock_system_time* system_time = pvclock->system_time;
123 atomic_store_relaxed_u32(&system_time->version, pvclock->version + 1);
124 atomic_fence();
125 system_time->tsc_mul = tsc_mul;
126 system_time->tsc_shift = tsc_shift;
127 system_time->system_time = current_time();
128 system_time->tsc_timestamp = rdtsc();
129 system_time->flags = pvclock->is_stable ? kKvmSystemTimeStable : 0;
130 atomic_fence();
131 atomic_store_relaxed_u32(&system_time->version, pvclock->version + 2);
132 pvclock->version += 2;
133 }
134
pvclock_stop_clock(PvClockState * pvclock)135 void pvclock_stop_clock(PvClockState* pvclock) {
136 pvclock->system_time = nullptr;
137 pvclock->guest_ptr.reset();
138 }
139
pvclock_populate_offset(hypervisor::GuestPhysicalAddressSpace * gpas,zx_vaddr_t guest_paddr)140 zx_status_t pvclock_populate_offset(hypervisor::GuestPhysicalAddressSpace* gpas,
141 zx_vaddr_t guest_paddr) {
142 hypervisor::GuestPtr guest_ptr;
143 zx_status_t status =
144 gpas->CreateGuestPtr(guest_paddr, sizeof(PvClockOffset),
145 "pvclock-offset-guest-mapping", &guest_ptr);
146 if (status != ZX_OK) {
147 return status;
148 }
149 auto offset = guest_ptr.as<PvClockOffset>();
150 ZX_DEBUG_ASSERT(offset != nullptr);
151 memset(offset, 0, sizeof(*offset));
152 zx_time_t time = utc_offset.load() + current_time();
153 uint64_t tsc = rdtsc();
154 offset->sec = time / ZX_SEC(1);
155 offset->nsec = time % ZX_SEC(1);
156 offset->tsc = tsc;
157 return ZX_OK;
158 }
159