1 /*
2  * Copyright (c) 2006, Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; If not, see <http://www.gnu.org/licenses/>.
15  *
16  * Copyright (C) Ashok Raj <ashok.raj@intel.com>
17  */
18 
19 #ifndef _INTEL_IOMMU_H_
20 #define _INTEL_IOMMU_H_
21 
22 #include <xen/iommu.h>
23 #include <asm/msi.h>
24 
25 /*
26  * Intel IOMMU register specification per version 1.0 public spec.
27  */
28 
29 #define    DMAR_VER_REG    0x0    /* Arch version supported by this IOMMU */
30 #define    DMAR_CAP_REG    0x8    /* Hardware supported capabilities */
31 #define    DMAR_ECAP_REG    0x10    /* Extended capabilities supported */
32 #define    DMAR_GCMD_REG    0x18    /* Global command register */
33 #define    DMAR_GSTS_REG    0x1c    /* Global status register */
34 #define    DMAR_RTADDR_REG    0x20    /* Root entry table */
35 #define    DMAR_CCMD_REG    0x28    /* Context command reg */
36 #define    DMAR_FSTS_REG    0x34    /* Fault Status register */
37 #define    DMAR_FECTL_REG    0x38    /* Fault control register */
38 #define    DMAR_FEDATA_REG    0x3c    /* Fault event interrupt data register */
39 #define    DMAR_FEADDR_REG    0x40    /* Fault event interrupt addr register */
40 #define    DMAR_FEUADDR_REG 0x44    /* Upper address register */
41 #define    DMAR_AFLOG_REG    0x58    /* Advanced Fault control */
42 #define    DMAR_PMEN_REG    0x64    /* Enable Protected Memory Region */
43 #define    DMAR_PLMBASE_REG 0x68    /* PMRR Low addr */
44 #define    DMAR_PLMLIMIT_REG 0x6c    /* PMRR low limit */
45 #define    DMAR_PHMBASE_REG 0x70    /* pmrr high base addr */
46 #define    DMAR_PHMLIMIT_REG 0x78    /* pmrr high limit */
47 #define    DMAR_IQH_REG    0x80    /* invalidation queue head */
48 #define    DMAR_IQT_REG    0x88    /* invalidation queue tail */
49 #define    DMAR_IQA_REG    0x90    /* invalidation queue addr */
50 #define    DMAR_IRTA_REG   0xB8    /* intr remap */
51 
52 #define OFFSET_STRIDE        (9)
53 #define dmar_readl(dmar, reg) readl((dmar) + (reg))
54 #define dmar_readq(dmar, reg) readq((dmar) + (reg))
55 #define dmar_writel(dmar, reg, val) writel(val, (dmar) + (reg))
56 #define dmar_writeq(dmar, reg, val) writeq(val, (dmar) + (reg))
57 
58 #define VER_MAJOR(v)        (((v) & 0xf0) >> 4)
59 #define VER_MINOR(v)        ((v) & 0x0f)
60 
61 /*
62  * Decoding Capability Register
63  */
64 #define cap_intr_post(c)       (((c) >> 59) & 1)
65 #define cap_read_drain(c)      (((c) >> 55) & 1)
66 #define cap_write_drain(c)     (((c) >> 54) & 1)
67 #define cap_max_amask_val(c)   (((c) >> 48) & 0x3f)
68 #define cap_num_fault_regs(c)  ((((c) >> 40) & 0xff) + 1)
69 #define cap_pgsel_inv(c)       (((c) >> 39) & 1)
70 
71 #define cap_super_page_val(c)  (((c) >> 34) & 0xf)
72 #define cap_super_offset(c)    (((find_first_bit(&cap_super_page_val(c), 4)) \
73                                  * OFFSET_STRIDE) + 21)
74 #define cap_sps_2mb(c)         ((c >> 34) & 1)
75 #define cap_sps_1gb(c)         ((c >> 35) & 1)
76 #define cap_sps_512gb(c)       ((c >> 36) & 1)
77 #define cap_sps_1tb(c)         ((c >> 37) & 1)
78 
79 #define cap_fault_reg_offset(c)    ((((c) >> 24) & 0x3ff) * 16)
80 
81 #define cap_isoch(c)        (((c) >> 23) & 1)
82 #define cap_qos(c)        (((c) >> 22) & 1)
83 #define cap_mgaw(c)        ((((c) >> 16) & 0x3f) + 1)
84 #define cap_sagaw(c)        (((c) >> 8) & 0x1f)
85 #define cap_caching_mode(c)    (((c) >> 7) & 1)
86 #define cap_phmr(c)        (((c) >> 6) & 1)
87 #define cap_plmr(c)        (((c) >> 5) & 1)
88 #define cap_rwbf(c)        (((c) >> 4) & 1)
89 #define cap_afl(c)        (((c) >> 3) & 1)
90 #define cap_ndoms(c)        (1 << (4 + 2 * ((c) & 0x7)))
91 
92 /*
93  * Extended Capability Register
94  */
95 
96 #define ecap_niotlb_iunits(e)    ((((e) >> 24) & 0xff) + 1)
97 #define ecap_iotlb_offset(e)     ((((e) >> 8) & 0x3ff) * 16)
98 #define ecap_coherent(e)         ((e >> 0) & 0x1)
99 #define ecap_queued_inval(e)     ((e >> 1) & 0x1)
100 #define ecap_dev_iotlb(e)        ((e >> 2) & 0x1)
101 #define ecap_intr_remap(e)       ((e >> 3) & 0x1)
102 #define ecap_eim(e)              ((e >> 4) & 0x1)
103 #define ecap_cache_hints(e)      ((e >> 5) & 0x1)
104 #define ecap_pass_thru(e)        ((e >> 6) & 0x1)
105 #define ecap_snp_ctl(e)          ((e >> 7) & 0x1)
106 
107 /* IOTLB_REG */
108 #define DMA_TLB_FLUSH_GRANU_OFFSET  60
109 #define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
110 #define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
111 #define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
112 #define DMA_TLB_IIRG(x) (((x) >> 60) & 7)
113 #define DMA_TLB_IAIG(val) (((val) >> 57) & 7)
114 #define DMA_TLB_DID(x) (((u64)(x & 0xffff)) << 32)
115 
116 #define DMA_TLB_READ_DRAIN (((u64)1) << 49)
117 #define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
118 #define DMA_TLB_IVT (((u64)1) << 63)
119 
120 #define DMA_TLB_IVA_ADDR(x) ((((u64)x) >> 12) << 12)
121 #define DMA_TLB_IVA_HINT(x) ((((u64)x) & 1) << 6)
122 
123 /* GCMD_REG */
124 #define DMA_GCMD_TE     (((u64)1) << 31)
125 #define DMA_GCMD_SRTP   (((u64)1) << 30)
126 #define DMA_GCMD_SFL    (((u64)1) << 29)
127 #define DMA_GCMD_EAFL   (((u64)1) << 28)
128 #define DMA_GCMD_WBF    (((u64)1) << 27)
129 #define DMA_GCMD_QIE    (((u64)1) << 26)
130 #define DMA_GCMD_IRE    (((u64)1) << 25)
131 #define DMA_GCMD_SIRTP  (((u64)1) << 24)
132 #define DMA_GCMD_CFI    (((u64)1) << 23)
133 
134 /* GSTS_REG */
135 #define DMA_GSTS_TES    (((u64)1) << 31)
136 #define DMA_GSTS_RTPS   (((u64)1) << 30)
137 #define DMA_GSTS_FLS    (((u64)1) << 29)
138 #define DMA_GSTS_AFLS   (((u64)1) << 28)
139 #define DMA_GSTS_WBFS   (((u64)1) << 27)
140 #define DMA_GSTS_QIES   (((u64)1) <<26)
141 #define DMA_GSTS_IRES   (((u64)1) <<25)
142 #define DMA_GSTS_SIRTPS (((u64)1) << 24)
143 #define DMA_GSTS_CFIS   (((u64)1) <<23)
144 
145 /* PMEN_REG */
146 #define DMA_PMEN_EPM    (((u32)1) << 31)
147 #define DMA_PMEN_PRS    (((u32)1) << 0)
148 
149 /* CCMD_REG */
150 #define DMA_CCMD_INVL_GRANU_OFFSET  61
151 #define DMA_CCMD_ICC   (((u64)1) << 63)
152 #define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
153 #define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
154 #define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
155 #define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
156 #define DMA_CCMD_CIRG(x) ((((u64)3) << 61) & x)
157 #define DMA_CCMD_MASK_NOBIT 0
158 #define DMA_CCMD_MASK_1BIT 1
159 #define DMA_CCMD_MASK_2BIT 2
160 #define DMA_CCMD_MASK_3BIT 3
161 #define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
162 #define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
163 
164 #define DMA_CCMD_CAIG_MASK(x) (((u64)x) & ((u64) 0x3 << 59))
165 
166 /* FECTL_REG */
167 #define DMA_FECTL_IM (((u64)1) << 31)
168 
169 /* FSTS_REG */
170 #define DMA_FSTS_PFO ((u64)1 << 0)
171 #define DMA_FSTS_PPF ((u64)1 << 1)
172 #define DMA_FSTS_AFO ((u64)1 << 2)
173 #define DMA_FSTS_APF ((u64)1 << 3)
174 #define DMA_FSTS_IQE ((u64)1 << 4)
175 #define DMA_FSTS_ICE ((u64)1 << 5)
176 #define DMA_FSTS_ITE ((u64)1 << 6)
177 #define DMA_FSTS_FAULTS    DMA_FSTS_PFO | DMA_FSTS_PPF | DMA_FSTS_AFO | DMA_FSTS_APF | DMA_FSTS_IQE | DMA_FSTS_ICE | DMA_FSTS_ITE
178 #define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
179 
180 /* FRCD_REG, 32 bits access */
181 #define DMA_FRCD_F (((u64)1) << 31)
182 #define dma_frcd_type(d) ((d >> 30) & 1)
183 #define dma_frcd_fault_reason(c) (c & 0xff)
184 #define dma_frcd_source_id(c) (c & 0xffff)
185 #define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */
186 
187 /*
188  * 0: Present
189  * 1-11: Reserved
190  * 12-63: Context Ptr (12 - (haw-1))
191  * 64-127: Reserved
192  */
193 struct root_entry {
194     u64    val;
195     u64    rsvd1;
196 };
197 #define root_present(root)    ((root).val & 1)
198 #define set_root_present(root) do {(root).val |= 1;} while(0)
199 #define get_context_addr(root) ((root).val & PAGE_MASK_4K)
200 #define set_root_value(root, value) \
201     do {(root).val |= ((value) & PAGE_MASK_4K);} while(0)
202 
203 struct context_entry {
204     u64 lo;
205     u64 hi;
206 };
207 #define ROOT_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct root_entry))
208 #define context_present(c) ((c).lo & 1)
209 #define context_fault_disable(c) (((c).lo >> 1) & 1)
210 #define context_translation_type(c) (((c).lo >> 2) & 3)
211 #define context_address_root(c) ((c).lo & PAGE_MASK_4K)
212 #define context_address_width(c) ((c).hi &  7)
213 #define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1))
214 
215 #define context_set_present(c) do {(c).lo |= 1;} while(0)
216 #define context_clear_present(c) do {(c).lo &= ~1;} while(0)
217 #define context_set_fault_enable(c) \
218     do {(c).lo &= (((u64)-1) << 2) | 1;} while(0)
219 
220 #define context_set_translation_type(c, val) do { \
221         (c).lo &= (((u64)-1) << 4) | 3; \
222         (c).lo |= (val & 3) << 2; \
223     } while(0)
224 #define CONTEXT_TT_MULTI_LEVEL 0
225 #define CONTEXT_TT_DEV_IOTLB   1
226 #define CONTEXT_TT_PASS_THRU   2
227 
228 #define context_set_address_root(c, val) \
229     do {(c).lo &= 0xfff; (c).lo |= (val) & PAGE_MASK_4K ;} while(0)
230 #define context_set_address_width(c, val) \
231     do {(c).hi &= 0xfffffff8; (c).hi |= (val) & 7;} while(0)
232 #define context_clear_entry(c) do {(c).lo = 0; (c).hi = 0;} while(0)
233 
234 /* page table handling */
235 #define LEVEL_STRIDE       (9)
236 #define LEVEL_MASK         ((1 << LEVEL_STRIDE) - 1)
237 #define PTE_NUM            (1 << LEVEL_STRIDE)
238 #define level_to_agaw(val) ((val) - 2)
239 #define agaw_to_level(val) ((val) + 2)
240 #define agaw_to_width(val) (30 + val * LEVEL_STRIDE)
241 #define width_to_agaw(w)   ((w - 30)/LEVEL_STRIDE)
242 #define level_to_offset_bits(l) (12 + (l - 1) * LEVEL_STRIDE)
243 #define address_level_offset(addr, level) \
244             ((addr >> level_to_offset_bits(level)) & LEVEL_MASK)
245 #define offset_level_address(offset, level) \
246             ((u64)(offset) << level_to_offset_bits(level))
247 #define level_mask(l) (((u64)(-1)) << level_to_offset_bits(l))
248 #define level_size(l) (1 << level_to_offset_bits(l))
249 #define align_to_level(addr, l) ((addr + level_size(l) - 1) & level_mask(l))
250 
251 /*
252  * 0: readable
253  * 1: writable
254  * 2-6: reserved
255  * 7: super page
256  * 8-11: available
257  * 12-63: Host physcial address
258  */
259 struct dma_pte {
260     u64 val;
261 };
262 #define DMA_PTE_READ (1)
263 #define DMA_PTE_WRITE (2)
264 #define DMA_PTE_PROT (DMA_PTE_READ | DMA_PTE_WRITE)
265 #define DMA_PTE_SP   (1 << 7)
266 #define DMA_PTE_SNP  (1 << 11)
267 #define dma_clear_pte(p)    do {(p).val = 0;} while(0)
268 #define dma_set_pte_readable(p) do {(p).val |= DMA_PTE_READ;} while(0)
269 #define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while(0)
270 #define dma_set_pte_superpage(p) do {(p).val |= DMA_PTE_SP;} while(0)
271 #define dma_set_pte_snp(p)  do {(p).val |= DMA_PTE_SNP;} while(0)
272 #define dma_set_pte_prot(p, prot) do { \
273         (p).val = ((p).val & ~DMA_PTE_PROT) | ((prot) & DMA_PTE_PROT); \
274     } while (0)
275 #define dma_pte_addr(p) ((p).val & PADDR_MASK & PAGE_MASK_4K)
276 #define dma_set_pte_addr(p, addr) do {\
277             (p).val |= ((addr) & PAGE_MASK_4K); } while (0)
278 #define dma_pte_present(p) (((p).val & DMA_PTE_PROT) != 0)
279 #define dma_pte_superpage(p) (((p).val & DMA_PTE_SP) != 0)
280 
281 /* interrupt remap entry */
282 struct iremap_entry {
283   union {
284     __uint128_t val;
285     struct { u64 lo, hi; };
286     struct {
287         u16 p       : 1,
288             fpd     : 1,
289             dm      : 1,
290             rh      : 1,
291             tm      : 1,
292             dlm     : 3,
293             avail   : 4,
294             res_1   : 3,
295             im      : 1;
296         u8  vector;
297         u8  res_2;
298         u32 dst;
299         u16 sid;
300         u16 sq      : 2,
301             svt     : 2,
302             res_3   : 12;
303         u32 res_4;
304     } remap;
305     struct {
306         u16 p       : 1,
307             fpd     : 1,
308             res_1   : 6,
309             avail   : 4,
310             res_2   : 2,
311             urg     : 1,
312             im      : 1;
313         u8  vector;
314         u8  res_3;
315         u32 res_4   : 6,
316             pda_l   : 26;
317         u16 sid;
318         u16 sq      : 2,
319             svt     : 2,
320             res_5   : 12;
321         u32 pda_h;
322     } post;
323   };
324 };
325 
326 /*
327  * Posted-interrupt descriptor address is 64 bits with 64-byte aligned, only
328  * the upper 26 bits of lest significiant 32 bits is available.
329  */
330 #define PDA_LOW_BIT    26
331 
332 /* Max intr remapping table page order is 8, as max number of IRTEs is 64K */
333 #define IREMAP_PAGE_ORDER  8
334 
335 /*
336  * VTd engine handles 4K page, while CPU may have different page size on
337  * different arch. E.g. 16K on IPF.
338  */
339 #define IREMAP_ARCH_PAGE_ORDER  (IREMAP_PAGE_ORDER + PAGE_SHIFT_4K - PAGE_SHIFT)
340 #define IREMAP_ARCH_PAGE_NR     ( IREMAP_ARCH_PAGE_ORDER < 0 ?  \
341                                 1 :                             \
342                                 1 << IREMAP_ARCH_PAGE_ORDER )
343 
344 /* Each entry is 16 bytes, so 2^8 entries per 4K page */
345 #define IREMAP_ENTRY_ORDER  ( PAGE_SHIFT - 4 )
346 #define IREMAP_ENTRY_NR     ( 1 << ( IREMAP_PAGE_ORDER + 8 ) )
347 
348 #define iremap_present(v) ((v).lo & 1)
349 #define iremap_fault_disable(v) (((v).lo >> 1) & 1)
350 
351 #define iremap_set_present(v) do {(v).lo |= 1;} while(0)
352 #define iremap_clear_present(v) do {(v).lo &= ~1;} while(0)
353 
354 /*
355  * Get the intr remap entry:
356  * maddr   - machine addr of the table
357  * index   - index of the entry
358  * entries - return addr of the page holding this entry, need unmap it
359  * entry   - return required entry
360  */
361 #define GET_IREMAP_ENTRY(maddr, index, entries, entry)                        \
362 do {                                                                          \
363     entries = (struct iremap_entry *)map_vtd_domain_page(                     \
364               (maddr) + (( (index) >> IREMAP_ENTRY_ORDER ) << PAGE_SHIFT ) ); \
365     entry = &entries[(index) % (1 << IREMAP_ENTRY_ORDER)];                    \
366 } while(0)
367 
368 /* queue invalidation entry */
369 struct qinval_entry {
370     union {
371         struct {
372             u64 lo;
373             u64 hi;
374         }val;
375         struct {
376             struct {
377                 u64 type    : 4,
378                     granu   : 2,
379                     res_1   : 10,
380                     did     : 16,
381                     sid     : 16,
382                     fm      : 2,
383                     res_2   : 14;
384             }lo;
385             struct {
386                 u64 res;
387             }hi;
388         }cc_inv_dsc;
389         struct {
390             struct {
391                 u64 type    : 4,
392                     granu   : 2,
393                     dw      : 1,
394                     dr      : 1,
395                     res_1   : 8,
396                     did     : 16,
397                     res_2   : 32;
398             }lo;
399             struct {
400                 u64 am      : 6,
401                     ih      : 1,
402                     res_1   : 5,
403                     addr    : 52;
404             }hi;
405         }iotlb_inv_dsc;
406         struct {
407             struct {
408                 u64 type    : 4,
409                     res_1   : 12,
410                     max_invs_pend: 5,
411                     res_2   : 11,
412                     sid     : 16,
413                     res_3   : 16;
414             }lo;
415             struct {
416                 u64 size    : 1,
417                     res_1   : 11,
418                     addr    : 52;
419             }hi;
420         }dev_iotlb_inv_dsc;
421         struct {
422             struct {
423                 u64 type    : 4,
424                     granu   : 1,
425                     res_1   : 22,
426                     im      : 5,
427                     iidx    : 16,
428                     res_2   : 16;
429             }lo;
430             struct {
431                 u64 res;
432             }hi;
433         }iec_inv_dsc;
434         struct {
435             struct {
436                 u64 type    : 4,
437                     iflag   : 1,
438                     sw      : 1,
439                     fn      : 1,
440                     res_1   : 25,
441                     sdata   : 32;
442             }lo;
443             struct {
444                 u64 res_1   : 2,
445                     saddr   : 62;
446             }hi;
447         }inv_wait_dsc;
448     }q;
449 };
450 
451 /* Order of queue invalidation pages(max is 8) */
452 #define QINVAL_PAGE_ORDER   2
453 
454 #define QINVAL_ARCH_PAGE_ORDER  (QINVAL_PAGE_ORDER + PAGE_SHIFT_4K - PAGE_SHIFT)
455 #define QINVAL_ARCH_PAGE_NR     ( QINVAL_ARCH_PAGE_ORDER < 0 ?  \
456                                 1 :                             \
457                                 1 << QINVAL_ARCH_PAGE_ORDER )
458 
459 /* Each entry is 16 bytes, so 2^8 entries per page */
460 #define QINVAL_ENTRY_ORDER  ( PAGE_SHIFT - 4 )
461 #define QINVAL_ENTRY_NR     (1 << (QINVAL_PAGE_ORDER + 8))
462 
463 /* Status data flag */
464 #define QINVAL_STAT_INIT  0
465 #define QINVAL_STAT_DONE  1
466 
467 /* Queue invalidation head/tail shift */
468 #define QINVAL_INDEX_SHIFT 4
469 
470 #define qinval_present(v) ((v).lo & 1)
471 #define qinval_fault_disable(v) (((v).lo >> 1) & 1)
472 
473 #define qinval_set_present(v) do {(v).lo |= 1;} while(0)
474 #define qinval_clear_present(v) do {(v).lo &= ~1;} while(0)
475 
476 #define RESERVED_VAL        0
477 
478 #define TYPE_INVAL_CONTEXT      0x1
479 #define TYPE_INVAL_IOTLB        0x2
480 #define TYPE_INVAL_DEVICE_IOTLB 0x3
481 #define TYPE_INVAL_IEC          0x4
482 #define TYPE_INVAL_WAIT         0x5
483 
484 #define NOTIFY_TYPE_POLL        1
485 #define NOTIFY_TYPE_INTR        1
486 #define INTERRUTP_FLAG          1
487 #define STATUS_WRITE            1
488 #define FENCE_FLAG              1
489 
490 #define IEC_GLOBAL_INVL         0
491 #define IEC_INDEX_INVL          1
492 #define IRTA_EIME               (((u64)1) << 11)
493 
494 /* 2^(IRTA_REG_TABLE_SIZE + 1) = IREMAP_ENTRY_NR */
495 #define IRTA_REG_TABLE_SIZE     ( IREMAP_PAGE_ORDER + 7 )
496 
497 #define VTD_PAGE_TABLE_LEVEL_3  3
498 #define VTD_PAGE_TABLE_LEVEL_4  4
499 
500 #define MAX_IOMMU_REGS 0xc0
501 
502 extern struct list_head acpi_drhd_units;
503 extern struct list_head acpi_rmrr_units;
504 extern struct list_head acpi_ioapic_units;
505 
506 struct qi_ctrl {
507     u64 qinval_maddr;  /* queue invalidation page machine address */
508 };
509 
510 struct ir_ctrl {
511     u64 iremap_maddr;            /* interrupt remap table machine address */
512     int iremap_num;              /* total num of used interrupt remap entry */
513     spinlock_t iremap_lock;      /* lock for irq remappping table */
514 };
515 
516 struct iommu_flush {
517     int __must_check (*context)(void *iommu, u16 did, u16 source_id,
518                                 u8 function_mask, u64 type,
519                                 bool_t non_present_entry_flush);
520     int __must_check (*iotlb)(void *iommu, u16 did, u64 addr,
521                               unsigned int size_order, u64 type,
522                               bool_t flush_non_present_entry,
523                               bool_t flush_dev_iotlb);
524 };
525 
526 struct intel_iommu {
527     struct qi_ctrl qi_ctrl;
528     struct ir_ctrl ir_ctrl;
529     struct iommu_flush flush;
530     struct acpi_drhd_unit *drhd;
531 };
532 
533 struct iommu {
534     struct list_head list;
535     void __iomem *reg; /* Pointer to hardware regs, virtual addr */
536     u32	index;         /* Sequence number of iommu */
537     u32 nr_pt_levels;
538     u64	cap;
539     u64	ecap;
540     spinlock_t lock; /* protect context, domain ids */
541     spinlock_t register_lock; /* protect iommu register handling */
542     u64 root_maddr; /* root entry machine address */
543     struct msi_desc msi;
544     struct intel_iommu *intel;
545     struct list_head ats_devices;
546     unsigned long *domid_bitmap;  /* domain id bitmap */
547     u16 *domid_map;               /* domain id mapping array */
548 };
549 
iommu_qi_ctrl(struct iommu * iommu)550 static inline struct qi_ctrl *iommu_qi_ctrl(struct iommu *iommu)
551 {
552     return iommu ? &iommu->intel->qi_ctrl : NULL;
553 }
554 
iommu_ir_ctrl(struct iommu * iommu)555 static inline struct ir_ctrl *iommu_ir_ctrl(struct iommu *iommu)
556 {
557     return iommu ? &iommu->intel->ir_ctrl : NULL;
558 }
559 
iommu_get_flush(struct iommu * iommu)560 static inline struct iommu_flush *iommu_get_flush(struct iommu *iommu)
561 {
562     return iommu ? &iommu->intel->flush : NULL;
563 }
564 
565 #define INTEL_IOMMU_DEBUG(fmt, args...) \
566     do  \
567     {   \
568         if ( iommu_debug )  \
569             dprintk(XENLOG_WARNING VTDPREFIX, fmt, ## args);    \
570     } while(0)
571 
572 #endif
573