1 /*
2  * Copyright (C) 2007 Advanced Micro Devices, Inc.
3  * Author: Leo Duran <leo.duran@amd.com>
4  * Author: Wei Wang <wei.wang2@amd.com> - adapted to xen
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #ifndef _ASM_X86_64_AMD_IOMMU_PROTO_H
21 #define _ASM_X86_64_AMD_IOMMU_PROTO_H
22 
23 #include <xen/sched.h>
24 #include <asm/amd-iommu.h>
25 #include <asm/apicdef.h>
26 #include <xen/domain_page.h>
27 
28 struct acpi_ivrs_hardware;
29 
30 #define for_each_amd_iommu(amd_iommu) \
31     list_for_each_entry(amd_iommu, \
32         &amd_iommu_head, list)
33 
34 #define DMA_32BIT_MASK  0x00000000ffffffffULL
35 
36 #define AMD_IOMMU_DEBUG(fmt, args...) \
37     do  \
38     {   \
39         if ( iommu_debug )  \
40             printk(XENLOG_INFO "AMD-Vi: " fmt, ## args);    \
41     } while(0)
42 
43 /* amd-iommu-detect functions */
44 int amd_iommu_get_ivrs_dev_entries(void);
45 int amd_iommu_get_supported_ivhd_type(void);
46 int amd_iommu_detect_one_acpi(const struct acpi_ivrs_hardware *);
47 int amd_iommu_detect_acpi(void);
48 void get_iommu_features(struct amd_iommu *iommu);
49 
50 /* amd-iommu-init functions */
51 int amd_iommu_init(void);
52 int amd_iommu_update_ivrs_mapping_acpi(void);
53 
54 /* mapping functions */
55 int __must_check amd_iommu_map_page(struct domain *d, unsigned long gfn,
56                                     unsigned long mfn, unsigned int flags);
57 int __must_check amd_iommu_unmap_page(struct domain *d, unsigned long gfn);
58 u64 amd_iommu_get_next_table_from_pte(u32 *entry);
59 int __must_check amd_iommu_alloc_root(struct domain_iommu *hd);
60 int amd_iommu_reserve_domain_unity_map(struct domain *domain,
61                                        u64 phys_addr, unsigned long size,
62                                        int iw, int ir);
63 
64 /* Share p2m table with iommu */
65 void amd_iommu_share_p2m(struct domain *d);
66 
67 /* device table functions */
68 int get_dma_requestor_id(u16 seg, u16 bdf);
69 void amd_iommu_set_intremap_table(
70     u32 *dte, u64 intremap_ptr, u8 int_valid);
71 void amd_iommu_set_root_page_table(
72     u32 *dte, u64 root_ptr, u16 domain_id, u8 paging_mode, u8 valid);
73 void iommu_dte_set_iotlb(u32 *dte, u8 i);
74 void iommu_dte_add_device_entry(u32 *dte, struct ivrs_mappings *ivrs_dev);
75 void iommu_dte_set_guest_cr3(u32 *dte, u16 dom_id, u64 gcr3,
76                              int gv, unsigned int glx);
77 
78 /* send cmd to iommu */
79 void amd_iommu_flush_all_pages(struct domain *d);
80 void amd_iommu_flush_pages(struct domain *d, unsigned long gfn,
81                            unsigned int order);
82 void amd_iommu_flush_iotlb(u8 devfn, const struct pci_dev *pdev,
83                            uint64_t gaddr, unsigned int order);
84 void amd_iommu_flush_device(struct amd_iommu *iommu, uint16_t bdf);
85 void amd_iommu_flush_intremap(struct amd_iommu *iommu, uint16_t bdf);
86 void amd_iommu_flush_all_caches(struct amd_iommu *iommu);
87 
88 /* find iommu for bdf */
89 struct amd_iommu *find_iommu_for_device(int seg, int bdf);
90 
91 /* interrupt remapping */
92 int amd_iommu_setup_ioapic_remapping(void);
93 void *amd_iommu_alloc_intremap_table(unsigned long **);
94 int amd_iommu_free_intremap_table(u16 seg, struct ivrs_mappings *);
95 void amd_iommu_ioapic_update_ire(
96     unsigned int apic, unsigned int reg, unsigned int value);
97 unsigned int amd_iommu_read_ioapic_from_ire(
98     unsigned int apic, unsigned int reg);
99 int amd_iommu_msi_msg_update_ire(
100     struct msi_desc *msi_desc, struct msi_msg *msg);
101 void amd_iommu_read_msi_from_ire(
102     struct msi_desc *msi_desc, struct msi_msg *msg);
103 int amd_setup_hpet_msi(struct msi_desc *msi_desc);
104 
105 extern struct ioapic_sbdf {
106     u16 bdf, seg;
107     u8 id;
108     bool cmdline;
109     u16 *pin_2_idx;
110 } ioapic_sbdf[MAX_IO_APICS];
111 
112 extern unsigned int nr_ioapic_sbdf;
113 unsigned int ioapic_id_to_index(unsigned int apic_id);
114 unsigned int get_next_ioapic_sbdf_index(void);
115 
116 extern struct hpet_sbdf {
117     u16 bdf, seg, id;
118     enum {
119         HPET_NONE,
120         HPET_CMDL,
121         HPET_IVHD,
122     } init;
123 } hpet_sbdf;
124 
125 extern void *shared_intremap_table;
126 extern unsigned long *shared_intremap_inuse;
127 
128 /* power management support */
129 void amd_iommu_resume(void);
130 int __must_check amd_iommu_suspend(void);
131 void amd_iommu_crash_shutdown(void);
132 
133 /* guest iommu support */
134 void amd_iommu_send_guest_cmd(struct amd_iommu *iommu, u32 cmd[]);
135 void guest_iommu_add_ppr_log(struct domain *d, u32 entry[]);
136 void guest_iommu_add_event_log(struct domain *d, u32 entry[]);
137 int guest_iommu_init(struct domain* d);
138 void guest_iommu_destroy(struct domain *d);
139 int guest_iommu_set_base(struct domain *d, uint64_t base);
140 
get_field_from_reg_u32(u32 reg_value,u32 mask,u32 shift)141 static inline u32 get_field_from_reg_u32(u32 reg_value, u32 mask, u32 shift)
142 {
143     u32 field;
144     field = (reg_value & mask) >> shift;
145     return field;
146 }
147 
set_field_in_reg_u32(u32 field,u32 reg_value,u32 mask,u32 shift,u32 * reg)148 static inline u32 set_field_in_reg_u32(u32 field, u32 reg_value,
149         u32 mask, u32 shift, u32 *reg)
150 {
151     reg_value &= ~mask;
152     reg_value |= (field << shift) & mask;
153     if (reg)
154         *reg = reg_value;
155     return reg_value;
156 }
157 
get_field_from_byte(u8 value,u8 mask)158 static inline u8 get_field_from_byte(u8 value, u8 mask)
159 {
160     return (value & mask) / (mask & -mask);
161 }
162 
region_to_pages(unsigned long addr,unsigned long size)163 static inline unsigned long region_to_pages(unsigned long addr, unsigned long size)
164 {
165     return (PAGE_ALIGN(addr + size) - (addr & PAGE_MASK)) >> PAGE_SHIFT;
166 }
167 
alloc_amd_iommu_pgtable(void)168 static inline struct page_info* alloc_amd_iommu_pgtable(void)
169 {
170     struct page_info *pg;
171     void *vaddr;
172 
173     pg = alloc_domheap_page(NULL, 0);
174     if ( pg == NULL )
175         return 0;
176     vaddr = __map_domain_page(pg);
177     memset(vaddr, 0, PAGE_SIZE);
178     unmap_domain_page(vaddr);
179     return pg;
180 }
181 
free_amd_iommu_pgtable(struct page_info * pg)182 static inline void free_amd_iommu_pgtable(struct page_info *pg)
183 {
184     if ( pg != 0 )
185         free_domheap_page(pg);
186 }
187 
__alloc_amd_iommu_tables(int order)188 static inline void* __alloc_amd_iommu_tables(int order)
189 {
190     void *buf;
191     buf = alloc_xenheap_pages(order, 0);
192     return buf;
193 }
194 
__free_amd_iommu_tables(void * table,int order)195 static inline void __free_amd_iommu_tables(void *table, int order)
196 {
197     free_xenheap_pages(table, order);
198 }
199 
iommu_set_bit(uint32_t * reg,uint32_t bit)200 static inline void iommu_set_bit(uint32_t *reg, uint32_t bit)
201 {
202     set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, *reg, 1U << bit, bit, reg);
203 }
204 
iommu_clear_bit(uint32_t * reg,uint32_t bit)205 static inline void iommu_clear_bit(uint32_t *reg, uint32_t bit)
206 {
207     set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, *reg, 1U << bit, bit, reg);
208 }
209 
iommu_get_bit(uint32_t reg,uint32_t bit)210 static inline uint32_t iommu_get_bit(uint32_t reg, uint32_t bit)
211 {
212     return get_field_from_reg_u32(reg, 1U << bit, bit);
213 }
214 
iommu_has_cap(struct amd_iommu * iommu,uint32_t bit)215 static inline int iommu_has_cap(struct amd_iommu *iommu, uint32_t bit)
216 {
217     return !!(iommu->cap.header & (1u << bit));
218 }
219 
amd_iommu_has_feature(struct amd_iommu * iommu,uint32_t bit)220 static inline int amd_iommu_has_feature(struct amd_iommu *iommu, uint32_t bit)
221 {
222     if ( !iommu_has_cap(iommu, PCI_CAP_EFRSUP_SHIFT) )
223         return 0;
224     return !!(iommu->features & (1U << bit));
225 }
226 
227 /* access tail or head pointer of ring buffer */
iommu_get_rb_pointer(uint32_t reg)228 static inline uint32_t iommu_get_rb_pointer(uint32_t reg)
229 {
230     return get_field_from_reg_u32(reg, IOMMU_RING_BUFFER_PTR_MASK,
231                                   IOMMU_RING_BUFFER_PTR_SHIFT);
232 }
233 
iommu_set_rb_pointer(uint32_t * reg,uint32_t val)234 static inline void iommu_set_rb_pointer(uint32_t *reg, uint32_t val)
235 {
236     set_field_in_reg_u32(val, *reg, IOMMU_RING_BUFFER_PTR_MASK,
237                          IOMMU_RING_BUFFER_PTR_SHIFT, reg);
238 }
239 
240 /* access device id field from iommu cmd */
iommu_get_devid_from_cmd(uint32_t cmd)241 static inline uint16_t iommu_get_devid_from_cmd(uint32_t cmd)
242 {
243     return get_field_from_reg_u32(cmd, IOMMU_CMD_DEVICE_ID_MASK,
244                                   IOMMU_CMD_DEVICE_ID_SHIFT);
245 }
246 
iommu_set_devid_to_cmd(uint32_t * cmd,uint16_t id)247 static inline void iommu_set_devid_to_cmd(uint32_t *cmd, uint16_t id)
248 {
249     set_field_in_reg_u32(id, *cmd, IOMMU_CMD_DEVICE_ID_MASK,
250                          IOMMU_CMD_DEVICE_ID_SHIFT, cmd);
251 }
252 
253 /* access address field from iommu cmd */
iommu_get_addr_lo_from_cmd(uint32_t cmd)254 static inline uint32_t iommu_get_addr_lo_from_cmd(uint32_t cmd)
255 {
256     return get_field_from_reg_u32(cmd, IOMMU_CMD_ADDR_LOW_MASK,
257                                   IOMMU_CMD_ADDR_LOW_SHIFT);
258 }
259 
iommu_get_addr_hi_from_cmd(uint32_t cmd)260 static inline uint32_t iommu_get_addr_hi_from_cmd(uint32_t cmd)
261 {
262     return get_field_from_reg_u32(cmd, IOMMU_CMD_ADDR_LOW_MASK,
263                                   IOMMU_CMD_ADDR_HIGH_SHIFT);
264 }
265 
266 /* access address field from event log entry */
267 #define iommu_get_devid_from_event          iommu_get_devid_from_cmd
268 
269 /* access iommu base addresses field from mmio regs */
iommu_set_addr_lo_to_reg(uint32_t * reg,uint32_t addr)270 static inline void iommu_set_addr_lo_to_reg(uint32_t *reg, uint32_t addr)
271 {
272     set_field_in_reg_u32(addr, *reg, IOMMU_REG_BASE_ADDR_LOW_MASK,
273                          IOMMU_REG_BASE_ADDR_LOW_SHIFT, reg);
274 }
275 
iommu_set_addr_hi_to_reg(uint32_t * reg,uint32_t addr)276 static inline void iommu_set_addr_hi_to_reg(uint32_t *reg, uint32_t addr)
277 {
278     set_field_in_reg_u32(addr, *reg, IOMMU_REG_BASE_ADDR_HIGH_MASK,
279                          IOMMU_REG_BASE_ADDR_HIGH_SHIFT, reg);
280 }
281 
iommu_is_pte_present(const u32 * entry)282 static inline int iommu_is_pte_present(const u32 *entry)
283 {
284     return get_field_from_reg_u32(entry[0],
285                                   IOMMU_PDE_PRESENT_MASK,
286                                   IOMMU_PDE_PRESENT_SHIFT);
287 }
288 
iommu_next_level(const u32 * entry)289 static inline unsigned int iommu_next_level(const u32 *entry)
290 {
291     return get_field_from_reg_u32(entry[0],
292                                   IOMMU_PDE_NEXT_LEVEL_MASK,
293                                   IOMMU_PDE_NEXT_LEVEL_SHIFT);
294 }
295 
296 #endif /* _ASM_X86_64_AMD_IOMMU_PROTO_H */
297