1 /*
2 * hvm/pmtimer.c: emulation of the ACPI PM timer
3 *
4 * Copyright (c) 2007, XenSource inc.
5 * Copyright (c) 2006, Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <asm/hvm/vpt.h>
21 #include <asm/hvm/io.h>
22 #include <asm/hvm/support.h>
23 #include <asm/acpi.h> /* for hvm_acpi_power_button prototype */
24 #include <public/hvm/params.h>
25
26 /* Slightly more readable port I/O addresses for the registers we intercept */
27 #define PM1a_STS_ADDR_V0 (ACPI_PM1A_EVT_BLK_ADDRESS_V0)
28 #define PM1a_EN_ADDR_V0 (ACPI_PM1A_EVT_BLK_ADDRESS_V0 + 2)
29 #define TMR_VAL_ADDR_V0 (ACPI_PM_TMR_BLK_ADDRESS_V0)
30 #define PM1a_STS_ADDR_V1 (ACPI_PM1A_EVT_BLK_ADDRESS_V1)
31 #define PM1a_EN_ADDR_V1 (ACPI_PM1A_EVT_BLK_ADDRESS_V1 + 2)
32 #define TMR_VAL_ADDR_V1 (ACPI_PM_TMR_BLK_ADDRESS_V1)
33
34 /* The interesting bits of the PM1a_STS register */
35 #define TMR_STS (1 << 0)
36 #define GBL_STS (1 << 5)
37 #define PWRBTN_STS (1 << 8)
38 #define SLPBTN_STS (1 << 9)
39
40 /* The same in PM1a_EN */
41 #define TMR_EN (1 << 0)
42 #define GBL_EN (1 << 5)
43 #define PWRBTN_EN (1 << 8)
44 #define SLPBTN_EN (1 << 9)
45
46 /* Mask of bits in PM1a_STS that can generate an SCI. */
47 #define SCI_MASK (TMR_STS|PWRBTN_STS|SLPBTN_STS|GBL_STS)
48
49 /* SCI IRQ number (must match SCI_INT number in ACPI FADT in hvmloader) */
50 #define SCI_IRQ 9
51
52 /* We provide a 32-bit counter (must match the TMR_VAL_EXT bit in the FADT) */
53 #define TMR_VAL_MASK (0xffffffff)
54 #define TMR_VAL_MSB (0x80000000)
55
56 /* Dispatch SCIs based on the PM1a_STS and PM1a_EN registers */
pmt_update_sci(PMTState * s)57 static void pmt_update_sci(PMTState *s)
58 {
59 struct hvm_hw_acpi *acpi = &s->vcpu->domain->arch.hvm_domain.acpi;
60
61 ASSERT(spin_is_locked(&s->lock));
62
63 if ( acpi->pm1a_en & acpi->pm1a_sts & SCI_MASK )
64 hvm_isa_irq_assert(s->vcpu->domain, SCI_IRQ, NULL);
65 else
66 hvm_isa_irq_deassert(s->vcpu->domain, SCI_IRQ);
67 }
68
hvm_acpi_power_button(struct domain * d)69 void hvm_acpi_power_button(struct domain *d)
70 {
71 PMTState *s = &d->arch.hvm_domain.pl_time->vpmt;
72
73 if ( !has_vpm(d) )
74 return;
75
76 spin_lock(&s->lock);
77 d->arch.hvm_domain.acpi.pm1a_sts |= PWRBTN_STS;
78 pmt_update_sci(s);
79 spin_unlock(&s->lock);
80 }
81
hvm_acpi_sleep_button(struct domain * d)82 void hvm_acpi_sleep_button(struct domain *d)
83 {
84 PMTState *s = &d->arch.hvm_domain.pl_time->vpmt;
85
86 if ( !has_vpm(d) )
87 return;
88
89 spin_lock(&s->lock);
90 d->arch.hvm_domain.acpi.pm1a_sts |= PWRBTN_STS;
91 pmt_update_sci(s);
92 spin_unlock(&s->lock);
93 }
94
95 /* Set the correct value in the timer, accounting for time elapsed
96 * since the last time we did that. */
pmt_update_time(PMTState * s)97 static void pmt_update_time(PMTState *s)
98 {
99 uint64_t curr_gtime, tmp;
100 struct hvm_hw_acpi *acpi = &s->vcpu->domain->arch.hvm_domain.acpi;
101 uint32_t tmr_val = acpi->tmr_val, msb = tmr_val & TMR_VAL_MSB;
102
103 ASSERT(spin_is_locked(&s->lock));
104
105 /* Update the timer */
106 curr_gtime = hvm_get_guest_time(s->vcpu);
107 tmp = ((curr_gtime - s->last_gtime) * s->scale) + s->not_accounted;
108 s->not_accounted = (uint32_t)tmp;
109 tmr_val += tmp >> 32;
110 tmr_val &= TMR_VAL_MASK;
111 s->last_gtime = curr_gtime;
112
113 /* Update timer value atomically wrt lock-free reads in handle_pmt_io(). */
114 write_atomic(&acpi->tmr_val, tmr_val);
115
116 /* If the counter's MSB has changed, set the status bit */
117 if ( (tmr_val & TMR_VAL_MSB) != msb )
118 {
119 acpi->pm1a_sts |= TMR_STS;
120 pmt_update_sci(s);
121 }
122 }
123
124 /* This function should be called soon after each time the MSB of the
125 * pmtimer register rolls over, to make sure we update the status
126 * registers and SCI at least once per rollover */
pmt_timer_callback(void * opaque)127 static void pmt_timer_callback(void *opaque)
128 {
129 PMTState *s = opaque;
130 uint32_t pmt_cycles_until_flip;
131 uint64_t time_until_flip;
132
133 spin_lock(&s->lock);
134
135 /* Recalculate the timer and make sure we get an SCI if we need one */
136 pmt_update_time(s);
137
138 /* How close are we to the next MSB flip? */
139 pmt_cycles_until_flip = TMR_VAL_MSB -
140 (s->vcpu->domain->arch.hvm_domain.acpi.tmr_val & (TMR_VAL_MSB - 1));
141
142 /* Overall time between MSB flips */
143 time_until_flip = (1000000000ULL << 23) / FREQUENCE_PMTIMER;
144
145 /* Reduced appropriately */
146 time_until_flip = (time_until_flip * pmt_cycles_until_flip) >> 23;
147
148 /* Wake up again near the next bit-flip */
149 set_timer(&s->timer, NOW() + time_until_flip + MILLISECS(1));
150
151 spin_unlock(&s->lock);
152 }
153
154 /* Handle port I/O to the PM1a_STS and PM1a_EN registers */
handle_evt_io(int dir,unsigned int port,unsigned int bytes,uint32_t * val)155 static int handle_evt_io(
156 int dir, unsigned int port, unsigned int bytes, uint32_t *val)
157 {
158 struct vcpu *v = current;
159 struct hvm_hw_acpi *acpi = &v->domain->arch.hvm_domain.acpi;
160 PMTState *s = &v->domain->arch.hvm_domain.pl_time->vpmt;
161 uint32_t addr, data, byte;
162 int i;
163
164 addr = port -
165 ((v->domain->arch.hvm_domain.params[
166 HVM_PARAM_ACPI_IOPORTS_LOCATION] == 0) ?
167 PM1a_STS_ADDR_V0 : PM1a_STS_ADDR_V1);
168
169 spin_lock(&s->lock);
170
171 if ( dir == IOREQ_WRITE )
172 {
173 /* Handle this I/O one byte at a time */
174 for ( i = bytes, data = *val;
175 i > 0;
176 i--, addr++, data >>= 8 )
177 {
178 byte = data & 0xff;
179 switch ( addr )
180 {
181 /* PM1a_STS register bits are write-to-clear */
182 case 0 /* PM1a_STS_ADDR */:
183 acpi->pm1a_sts &= ~byte;
184 break;
185 case 1 /* PM1a_STS_ADDR + 1 */:
186 acpi->pm1a_sts &= ~(byte << 8);
187 break;
188 case 2 /* PM1a_EN_ADDR */:
189 acpi->pm1a_en = (acpi->pm1a_en & 0xff00) | byte;
190 break;
191 case 3 /* PM1a_EN_ADDR + 1 */:
192 acpi->pm1a_en = (acpi->pm1a_en & 0xff) | (byte << 8);
193 break;
194 default:
195 gdprintk(XENLOG_WARNING,
196 "Bad ACPI PM register write: %x bytes (%x) at %x\n",
197 bytes, *val, port);
198 }
199 }
200 /* Fix up the SCI state to match the new register state */
201 pmt_update_sci(s);
202 }
203 else /* p->dir == IOREQ_READ */
204 {
205 data = acpi->pm1a_sts | ((uint32_t)acpi->pm1a_en << 16);
206 data >>= 8 * addr;
207 if ( bytes == 1 ) data &= 0xff;
208 else if ( bytes == 2 ) data &= 0xffff;
209 *val = data;
210 }
211
212 spin_unlock(&s->lock);
213
214 return X86EMUL_OKAY;
215 }
216
217
218 /* Handle port I/O to the TMR_VAL register */
handle_pmt_io(int dir,unsigned int port,unsigned int bytes,uint32_t * val)219 static int handle_pmt_io(
220 int dir, unsigned int port, unsigned int bytes, uint32_t *val)
221 {
222 struct vcpu *v = current;
223 struct hvm_hw_acpi *acpi = &v->domain->arch.hvm_domain.acpi;
224 PMTState *s = &v->domain->arch.hvm_domain.pl_time->vpmt;
225
226 if ( bytes != 4 || dir != IOREQ_READ )
227 {
228 gdprintk(XENLOG_WARNING, "HVM_PMT bad access\n");
229 *val = ~0;
230 }
231 else if ( spin_trylock(&s->lock) )
232 {
233 /* We hold the lock: update timer value and return it. */
234 pmt_update_time(s);
235 *val = acpi->tmr_val;
236 spin_unlock(&s->lock);
237 }
238 else
239 {
240 /*
241 * Someone else is updating the timer: rather than do the work
242 * again ourselves, wait for them to finish and then steal their
243 * updated value with a lock-free atomic read.
244 */
245 spin_barrier(&s->lock);
246 *val = read_atomic(&acpi->tmr_val);
247 }
248
249 return X86EMUL_OKAY;
250 }
251
acpi_save(struct domain * d,hvm_domain_context_t * h)252 static int acpi_save(struct domain *d, hvm_domain_context_t *h)
253 {
254 struct hvm_hw_acpi *acpi = &d->arch.hvm_domain.acpi;
255 PMTState *s = &d->arch.hvm_domain.pl_time->vpmt;
256 uint32_t x, msb = acpi->tmr_val & TMR_VAL_MSB;
257 int rc;
258
259 if ( !has_vpm(d) )
260 return 0;
261
262 spin_lock(&s->lock);
263
264 /*
265 * Update the counter to the guest's current time. Make sure it only
266 * goes forwards.
267 */
268 x = (((s->vcpu->arch.hvm_vcpu.guest_time ?: hvm_get_guest_time(s->vcpu)) -
269 s->last_gtime) * s->scale) >> 32;
270 if ( x < 1UL<<31 )
271 acpi->tmr_val += x;
272 if ( (acpi->tmr_val & TMR_VAL_MSB) != msb )
273 acpi->pm1a_sts |= TMR_STS;
274 /* No point in setting the SCI here because we'll already have saved the
275 * IRQ and *PIC state; we'll fix it up when we restore the domain */
276 rc = hvm_save_entry(PMTIMER, 0, h, acpi);
277
278 spin_unlock(&s->lock);
279
280 return rc;
281 }
282
acpi_load(struct domain * d,hvm_domain_context_t * h)283 static int acpi_load(struct domain *d, hvm_domain_context_t *h)
284 {
285 struct hvm_hw_acpi *acpi = &d->arch.hvm_domain.acpi;
286 PMTState *s = &d->arch.hvm_domain.pl_time->vpmt;
287
288 if ( !has_vpm(d) )
289 return -ENODEV;
290
291 spin_lock(&s->lock);
292
293 /* Reload the registers */
294 if ( hvm_load_entry(PMTIMER, h, acpi) )
295 {
296 spin_unlock(&s->lock);
297 return -EINVAL;
298 }
299
300 /* Calculate future counter values from now. */
301 s->last_gtime = hvm_get_guest_time(s->vcpu);
302 s->not_accounted = 0;
303
304 /* Set the SCI state from the registers */
305 pmt_update_sci(s);
306
307 spin_unlock(&s->lock);
308
309 return 0;
310 }
311
312 HVM_REGISTER_SAVE_RESTORE(PMTIMER, acpi_save, acpi_load,
313 1, HVMSR_PER_DOM);
314
pmtimer_change_ioport(struct domain * d,unsigned int version)315 int pmtimer_change_ioport(struct domain *d, unsigned int version)
316 {
317 unsigned int old_version;
318
319 if ( !has_vpm(d) )
320 return -ENODEV;
321
322 /* Check that version is changing. */
323 old_version = d->arch.hvm_domain.params[HVM_PARAM_ACPI_IOPORTS_LOCATION];
324 if ( version == old_version )
325 return 0;
326
327 /* Only allow changes between versions 0 and 1. */
328 if ( (version ^ old_version) != 1 )
329 return -EINVAL;
330
331 if ( version == 1 )
332 {
333 /* Moving from version 0 to version 1. */
334 relocate_portio_handler(d, TMR_VAL_ADDR_V0, TMR_VAL_ADDR_V1, 4);
335 relocate_portio_handler(d, PM1a_STS_ADDR_V0, PM1a_STS_ADDR_V1, 4);
336 }
337 else
338 {
339 /* Moving from version 1 to version 0. */
340 relocate_portio_handler(d, TMR_VAL_ADDR_V1, TMR_VAL_ADDR_V0, 4);
341 relocate_portio_handler(d, PM1a_STS_ADDR_V1, PM1a_STS_ADDR_V0, 4);
342 }
343
344 return 0;
345 }
346
pmtimer_init(struct vcpu * v)347 void pmtimer_init(struct vcpu *v)
348 {
349 PMTState *s = &v->domain->arch.hvm_domain.pl_time->vpmt;
350
351 if ( !has_vpm(v->domain) )
352 return;
353
354 spin_lock_init(&s->lock);
355
356 s->scale = ((uint64_t)FREQUENCE_PMTIMER << 32) / SYSTEM_TIME_HZ;
357 s->not_accounted = 0;
358 s->vcpu = v;
359
360 /* Intercept port I/O (need two handlers because PM1a_CNT is between
361 * PM1a_EN and TMR_VAL and is handled by qemu) */
362 register_portio_handler(v->domain, TMR_VAL_ADDR_V0, 4, handle_pmt_io);
363 register_portio_handler(v->domain, PM1a_STS_ADDR_V0, 4, handle_evt_io);
364
365 /* Set up callback to fire SCIs when the MSB of TMR_VAL changes */
366 init_timer(&s->timer, pmt_timer_callback, s, v->processor);
367 pmt_timer_callback(s);
368 }
369
370
pmtimer_deinit(struct domain * d)371 void pmtimer_deinit(struct domain *d)
372 {
373 PMTState *s = &d->arch.hvm_domain.pl_time->vpmt;
374
375 if ( !has_vpm(d) )
376 return;
377
378 kill_timer(&s->timer);
379 }
380
pmtimer_reset(struct domain * d)381 void pmtimer_reset(struct domain *d)
382 {
383 if ( !has_vpm(d) )
384 return;
385
386 /* Reset the counter. */
387 d->arch.hvm_domain.acpi.tmr_val = 0;
388 }
389
390 /*
391 * Local variables:
392 * mode: C
393 * c-file-style: "BSD"
394 * c-basic-offset: 4
395 * tab-width: 4
396 * indent-tabs-mode: nil
397 * End:
398 */
399