1  /*
2   * i8259 interrupt controller emulation
3   *
4   * Copyright (c) 2003-2004 Fabrice Bellard
5   * Copyright (c) 2005 Intel Corperation
6   * Copyright (c) 2006 Keir Fraser, XenSource Inc.
7   *
8   * Permission is hereby granted, free of charge, to any person obtaining a copy
9   * of this software and associated documentation files (the "Software"), to
10   * deal in the Software without restriction, including without limitation the
11   * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
12   * sell copies of the Software, and to permit persons to whom the Software is
13   * furnished to do so, subject to the following conditions:
14   *
15   * The above copyright notice and this permission notice shall be included in
16   * all copies or substantial portions of the Software.
17   *
18   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20   * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21   * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22   * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23   * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24   * IN THE SOFTWARE.
25   */
26  
27  #include <xen/types.h>
28  #include <xen/event.h>
29  #include <xen/lib.h>
30  #include <xen/errno.h>
31  #include <xen/sched.h>
32  #include <xen/trace.h>
33  #include <asm/hvm/hvm.h>
34  #include <asm/hvm/io.h>
35  #include <asm/hvm/support.h>
36  
37  #define vpic_domain(v) (container_of((v), struct domain, \
38                          arch.hvm_domain.vpic[!vpic->is_master]))
39  #define __vpic_lock(v) &container_of((v), struct hvm_domain, \
40                                          vpic[!(v)->is_master])->irq_lock
41  #define vpic_lock(v)   spin_lock(__vpic_lock(v))
42  #define vpic_unlock(v) spin_unlock(__vpic_lock(v))
43  #define vpic_is_locked(v) spin_is_locked(__vpic_lock(v))
44  #define vpic_elcr_mask(v) (vpic->is_master ? (uint8_t)0xf8 : (uint8_t)0xde);
45  
46  /* Return the highest priority found in mask. Return 8 if none. */
47  #define VPIC_PRIO_NONE 8
vpic_get_priority(struct hvm_hw_vpic * vpic,uint8_t mask)48  static int vpic_get_priority(struct hvm_hw_vpic *vpic, uint8_t mask)
49  {
50      int prio;
51  
52      ASSERT(vpic_is_locked(vpic));
53  
54      if ( mask == 0 )
55          return VPIC_PRIO_NONE;
56  
57      /* prio = ffs(mask ROR vpic->priority_add); */
58      asm ( "ror %%cl,%b1 ; rep; bsf %1,%0"
59            : "=r" (prio) : "q" ((uint32_t)mask), "c" (vpic->priority_add) );
60      return prio;
61  }
62  
63  /* Return the PIC's highest priority pending interrupt. Return -1 if none. */
vpic_get_highest_priority_irq(struct hvm_hw_vpic * vpic)64  static int vpic_get_highest_priority_irq(struct hvm_hw_vpic *vpic)
65  {
66      int cur_priority, priority, irq;
67      uint8_t mask;
68  
69      ASSERT(vpic_is_locked(vpic));
70  
71      mask = vpic->irr & ~vpic->imr;
72      priority = vpic_get_priority(vpic, mask);
73      if ( priority == VPIC_PRIO_NONE )
74          return -1;
75  
76      irq = (priority + vpic->priority_add) & 7;
77  
78      /*
79       * Compute current priority. If special fully nested mode on the master,
80       * the IRQ coming from the slave is not taken into account for the
81       * priority computation. In special mask mode, masked interrupts do not
82       * block lower-priority interrupts even if their IS bit is set.
83       */
84      mask = vpic->isr;
85      if ( vpic->special_fully_nested_mode && vpic->is_master && (irq == 2) )
86          mask &= ~(1 << 2);
87      if ( vpic->special_mask_mode )
88          mask &= ~vpic->imr;
89      cur_priority = vpic_get_priority(vpic, mask);
90  
91      /* If a higher priority is found then an irq should be generated. */
92      return (priority < cur_priority) ? irq : -1;
93  }
94  
vpic_update_int_output(struct hvm_hw_vpic * vpic)95  static void vpic_update_int_output(struct hvm_hw_vpic *vpic)
96  {
97      int irq;
98  
99      ASSERT(vpic_is_locked(vpic));
100  
101      irq = vpic_get_highest_priority_irq(vpic);
102      TRACE_3D(TRC_HVM_EMUL_PIC_INT_OUTPUT, vpic->int_output, vpic->is_master,
103               irq);
104      if ( vpic->int_output == (irq >= 0) )
105          return;
106  
107      /* INT line transition L->H or H->L. */
108      vpic->int_output = !vpic->int_output;
109  
110      if ( vpic->int_output )
111      {
112          if ( vpic->is_master )
113          {
114              /* Master INT line is connected in Virtual Wire Mode. */
115              struct vcpu *v = vpic_domain(vpic)->arch.hvm_domain.i8259_target;
116              if ( v != NULL )
117              {
118                  TRACE_1D(TRC_HVM_EMUL_PIC_KICK, irq);
119                  vcpu_kick(v);
120              }
121          }
122          else
123          {
124              /* Assert slave line in master PIC. */
125              (--vpic)->irr |= 1 << 2;
126              vpic_update_int_output(vpic);
127          }
128      }
129      else if ( !vpic->is_master )
130      {
131          /* Clear slave line in master PIC. */
132          (--vpic)->irr &= ~(1 << 2);
133          vpic_update_int_output(vpic);
134      }
135  }
136  
__vpic_intack(struct hvm_hw_vpic * vpic,int irq)137  static void __vpic_intack(struct hvm_hw_vpic *vpic, int irq)
138  {
139      uint8_t mask = 1 << irq;
140  
141      ASSERT(vpic_is_locked(vpic));
142  
143      TRACE_2D(TRC_HVM_EMUL_PIC_INTACK, vpic->is_master, irq);
144      /* Edge-triggered: clear the IRR (forget the edge). */
145      if ( !(vpic->elcr & mask) )
146          vpic->irr &= ~mask;
147  
148      if ( !vpic->auto_eoi )
149          vpic->isr |= mask;
150      else if ( vpic->rotate_on_auto_eoi )
151          vpic->priority_add = (irq + 1) & 7;
152  
153      vpic_update_int_output(vpic);
154  }
155  
vpic_intack(struct hvm_hw_vpic * vpic)156  static int vpic_intack(struct hvm_hw_vpic *vpic)
157  {
158      int irq = -1;
159  
160      vpic_lock(vpic);
161  
162      if ( !vpic->int_output )
163          goto out;
164  
165      irq = vpic_get_highest_priority_irq(vpic);
166      BUG_ON(irq < 0);
167      __vpic_intack(vpic, irq);
168  
169      if ( (irq == 2) && vpic->is_master )
170      {
171          vpic++; /* Slave PIC */
172          irq = vpic_get_highest_priority_irq(vpic);
173          BUG_ON(irq < 0);
174          __vpic_intack(vpic, irq);
175          irq += 8;
176      }
177  
178   out:
179      vpic_unlock(vpic);
180      return irq;
181  }
182  
vpic_ioport_write(struct hvm_hw_vpic * vpic,uint32_t addr,uint32_t val)183  static void vpic_ioport_write(
184      struct hvm_hw_vpic *vpic, uint32_t addr, uint32_t val)
185  {
186      int priority, cmd, irq;
187      uint8_t mask, unmasked = 0;
188  
189      vpic_lock(vpic);
190  
191      if ( (addr & 1) == 0 )
192      {
193          if ( val & 0x10 )
194          {
195              /* ICW1 */
196              /* Clear edge-sensing logic. */
197              vpic->irr &= vpic->elcr;
198  
199              unmasked = vpic->imr;
200              /* No interrupts masked or in service. */
201              vpic->imr = vpic->isr = 0;
202  
203              /* IR7 is lowest priority. */
204              vpic->priority_add = 0;
205              vpic->rotate_on_auto_eoi = 0;
206  
207              vpic->special_mask_mode = 0;
208              vpic->readsel_isr = 0;
209              vpic->poll = 0;
210  
211              if ( !(val & 1) )
212              {
213                  /* NO ICW4: ICW4 features are cleared. */
214                  vpic->auto_eoi = 0;
215                  vpic->special_fully_nested_mode = 0;
216              }
217  
218              vpic->init_state = ((val & 3) << 2) | 1;
219          }
220          else if ( val & 0x08 )
221          {
222              /* OCW3 */
223              if ( val & 0x04 )
224                  vpic->poll = 1;
225              if ( val & 0x02 )
226                  vpic->readsel_isr = val & 1;
227              if ( val & 0x40 )
228                  vpic->special_mask_mode = (val >> 5) & 1;
229          }
230          else
231          {
232              /* OCW2 */
233              cmd = val >> 5;
234              switch ( cmd )
235              {
236              case 0: /* Rotate in AEOI Mode (Clear) */
237              case 4: /* Rotate in AEOI Mode (Set)   */
238                  vpic->rotate_on_auto_eoi = cmd >> 2;
239                  break;
240              case 1: /* Non-Specific EOI            */
241              case 5: /* Non-Specific EOI & Rotate   */
242                  mask = vpic->isr;
243                  if ( vpic->special_mask_mode )
244                      mask &= ~vpic->imr; /* SMM: ignore masked IRs. */
245                  priority = vpic_get_priority(vpic, mask);
246                  if ( priority == VPIC_PRIO_NONE )
247                      break;
248                  irq = (priority + vpic->priority_add) & 7;
249                  vpic->isr &= ~(1 << irq);
250                  if ( cmd == 5 )
251                      vpic->priority_add = (irq + 1) & 7;
252                  break;
253              case 3: /* Specific EOI                */
254              case 7: /* Specific EOI & Rotate       */
255                  irq = val & 7;
256                  vpic->isr &= ~(1 << irq);
257                  if ( cmd == 7 )
258                      vpic->priority_add = (irq + 1) & 7;
259                  /* Release lock and EOI the physical interrupt (if any). */
260                  vpic_update_int_output(vpic);
261                  vpic_unlock(vpic);
262                  hvm_dpci_eoi(current->domain,
263                               hvm_isa_irq_to_gsi((addr >> 7) ? (irq|8) : irq),
264                               NULL);
265                  return; /* bail immediately */
266              case 6: /* Set Priority                */
267                  vpic->priority_add = (val + 1) & 7;
268                  break;
269              }
270          }
271      }
272      else
273      {
274          switch ( vpic->init_state & 3 )
275          {
276          case 0:
277              /* OCW1 */
278              unmasked = vpic->imr & (~val);
279              vpic->imr = val;
280              break;
281          case 1:
282              /* ICW2 */
283              vpic->irq_base = val & 0xf8;
284              vpic->init_state++;
285              if ( !(vpic->init_state & 8) )
286                  break; /* CASCADE mode: wait for write to ICW3. */
287              /* SNGL mode: fall through (no ICW3). */
288          case 2:
289              /* ICW3 */
290              vpic->init_state++;
291              if ( !(vpic->init_state & 4) )
292                  vpic->init_state = 0; /* No ICW4: init done */
293              break;
294          case 3:
295              /* ICW4 */
296              vpic->special_fully_nested_mode = (val >> 4) & 1;
297              vpic->auto_eoi = (val >> 1) & 1;
298              vpic->init_state = 0;
299              break;
300          }
301      }
302  
303      vpic_update_int_output(vpic);
304  
305      vpic_unlock(vpic);
306  
307      if ( unmasked )
308          pt_may_unmask_irq(vpic_domain(vpic), NULL);
309  }
310  
vpic_ioport_read(struct hvm_hw_vpic * vpic,uint32_t addr)311  static uint32_t vpic_ioport_read(struct hvm_hw_vpic *vpic, uint32_t addr)
312  {
313      if ( vpic->poll )
314      {
315          vpic->poll = 0;
316          return vpic_intack(vpic);
317      }
318  
319      if ( (addr & 1) == 0 )
320          return (vpic->readsel_isr ? vpic->isr : vpic->irr);
321  
322      return vpic->imr;
323  }
324  
vpic_intercept_pic_io(int dir,unsigned int port,unsigned int bytes,uint32_t * val)325  static int vpic_intercept_pic_io(
326      int dir, unsigned int port, unsigned int bytes, uint32_t *val)
327  {
328      struct hvm_hw_vpic *vpic;
329  
330      if ( bytes != 1 )
331      {
332          gdprintk(XENLOG_WARNING, "PIC_IO bad access size %d\n", bytes);
333          *val = ~0;
334          return X86EMUL_OKAY;
335      }
336  
337      vpic = &current->domain->arch.hvm_domain.vpic[port >> 7];
338  
339      if ( dir == IOREQ_WRITE )
340          vpic_ioport_write(vpic, port, (uint8_t)*val);
341      else
342          *val = (uint8_t)vpic_ioport_read(vpic, port);
343  
344      return X86EMUL_OKAY;
345  }
346  
vpic_intercept_elcr_io(int dir,unsigned int port,unsigned int bytes,uint32_t * val)347  static int vpic_intercept_elcr_io(
348      int dir, unsigned int port, unsigned int bytes, uint32_t *val)
349  {
350      struct hvm_hw_vpic *vpic;
351      uint32_t data;
352  
353      BUG_ON(bytes != 1);
354  
355      vpic = &current->domain->arch.hvm_domain.vpic[port & 1];
356  
357      if ( dir == IOREQ_WRITE )
358      {
359          /* Some IRs are always edge trig. Slave IR is always level trig. */
360          data = *val & vpic_elcr_mask(vpic);
361          if ( vpic->is_master )
362              data |= 1 << 2;
363          vpic->elcr = data;
364      }
365      else
366      {
367          /* Reader should not see hardcoded level-triggered slave IR. */
368          *val = vpic->elcr & vpic_elcr_mask(vpic);
369      }
370  
371      return X86EMUL_OKAY;
372  }
373  
vpic_save(struct domain * d,hvm_domain_context_t * h)374  static int vpic_save(struct domain *d, hvm_domain_context_t *h)
375  {
376      struct hvm_hw_vpic *s;
377      int i;
378  
379      if ( !has_vpic(d) )
380          return 0;
381  
382      /* Save the state of both PICs */
383      for ( i = 0; i < 2 ; i++ )
384      {
385          s = &d->arch.hvm_domain.vpic[i];
386          if ( hvm_save_entry(PIC, i, h, s) )
387              return 1;
388      }
389  
390      return 0;
391  }
392  
vpic_load(struct domain * d,hvm_domain_context_t * h)393  static int vpic_load(struct domain *d, hvm_domain_context_t *h)
394  {
395      struct hvm_hw_vpic *s;
396      uint16_t inst;
397  
398      if ( !has_vpic(d) )
399          return -ENODEV;
400  
401      /* Which PIC is this? */
402      inst = hvm_load_instance(h);
403      if ( inst > 1 )
404          return -EINVAL;
405      s = &d->arch.hvm_domain.vpic[inst];
406  
407      /* Load the state */
408      if ( hvm_load_entry(PIC, h, s) != 0 )
409          return -EINVAL;
410  
411      return 0;
412  }
413  
414  HVM_REGISTER_SAVE_RESTORE(PIC, vpic_save, vpic_load, 2, HVMSR_PER_DOM);
415  
vpic_reset(struct domain * d)416  void vpic_reset(struct domain *d)
417  {
418      struct hvm_hw_vpic *vpic;
419  
420      if ( !has_vpic(d) )
421          return;
422  
423      /* Master PIC. */
424      vpic = &d->arch.hvm_domain.vpic[0];
425      memset(vpic, 0, sizeof(*vpic));
426      vpic->is_master = 1;
427      vpic->elcr      = 1 << 2;
428  
429      /* Slave PIC. */
430      vpic++;
431      memset(vpic, 0, sizeof(*vpic));
432  }
433  
vpic_init(struct domain * d)434  void vpic_init(struct domain *d)
435  {
436      if ( !has_vpic(d) )
437          return;
438  
439      vpic_reset(d);
440  
441      register_portio_handler(d, 0x20, 2, vpic_intercept_pic_io);
442      register_portio_handler(d, 0xa0, 2, vpic_intercept_pic_io);
443  
444      register_portio_handler(d, 0x4d0, 1, vpic_intercept_elcr_io);
445      register_portio_handler(d, 0x4d1, 1, vpic_intercept_elcr_io);
446  }
447  
vpic_irq_positive_edge(struct domain * d,int irq)448  void vpic_irq_positive_edge(struct domain *d, int irq)
449  {
450      struct hvm_hw_vpic *vpic = &d->arch.hvm_domain.vpic[irq >> 3];
451      uint8_t mask = 1 << (irq & 7);
452  
453      ASSERT(has_vpic(d));
454      ASSERT(irq <= 15);
455      ASSERT(vpic_is_locked(vpic));
456  
457      TRACE_1D(TRC_HVM_EMUL_PIC_POSEDGE, irq);
458      if ( irq == 2 )
459          return;
460  
461      vpic->irr |= mask;
462      if ( !(vpic->imr & mask) )
463          vpic_update_int_output(vpic);
464  }
465  
vpic_irq_negative_edge(struct domain * d,int irq)466  void vpic_irq_negative_edge(struct domain *d, int irq)
467  {
468      struct hvm_hw_vpic *vpic = &d->arch.hvm_domain.vpic[irq >> 3];
469      uint8_t mask = 1 << (irq & 7);
470  
471      ASSERT(has_vpic(d));
472      ASSERT(irq <= 15);
473      ASSERT(vpic_is_locked(vpic));
474  
475      TRACE_1D(TRC_HVM_EMUL_PIC_NEGEDGE, irq);
476      if ( irq == 2 )
477          return;
478  
479      vpic->irr &= ~mask;
480      if ( !(vpic->imr & mask) )
481          vpic_update_int_output(vpic);
482  }
483  
vpic_ack_pending_irq(struct vcpu * v)484  int vpic_ack_pending_irq(struct vcpu *v)
485  {
486      int irq, vector;
487      struct hvm_hw_vpic *vpic = &v->domain->arch.hvm_domain.vpic[0];
488  
489      ASSERT(has_vpic(v->domain));
490  
491      TRACE_2D(TRC_HVM_EMUL_PIC_PEND_IRQ_CALL, vlapic_accept_pic_intr(v),
492               vpic->int_output);
493      if ( !vlapic_accept_pic_intr(v) || !vpic->int_output )
494          return -1;
495  
496      irq = vpic_intack(vpic);
497      if ( irq == -1 )
498          return -1;
499  
500      vector = vpic[irq >> 3].irq_base + (irq & 7);
501      return vector;
502  }
503  
504  /*
505   * Local variables:
506   * mode: C
507   * c-file-style: "BSD"
508   * c-basic-offset: 4
509   * indent-tabs-mode: nil
510   * End:
511   */
512