1 /*
2 * Copyright (C) 2018-2022 Intel Corporation.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <types.h>
8 #include <logmsg.h>
9 #include <asm/io.h>
10 #include <asm/lib/spinlock.h>
11 #include <asm/cpu_caps.h>
12 #include <pci.h>
13 #include <asm/vtd.h>
14 #include <acpi.h>
15
16 static uint32_t dmar_unit_cnt;
17 static struct dmar_drhd drhd_info_array[MAX_DRHDS];
18 static struct dmar_dev_scope drhd_dev_scope[MAX_DRHDS][MAX_DRHD_DEVSCOPES];
19
20 /*
21 * @post return != NULL
22 */
get_dmar_table(void)23 static void *get_dmar_table(void)
24 {
25 return get_acpi_tbl(ACPI_SIG_DMAR);
26 }
27
get_secondary_bus(uint8_t bus,uint8_t dev,uint8_t func)28 static uint8_t get_secondary_bus(uint8_t bus, uint8_t dev, uint8_t func)
29 {
30 uint32_t data;
31
32 pio_write32(PCI_CFG_ENABLE | ((uint32_t)bus << 16U) | ((uint32_t)dev << 11U) |
33 ((uint32_t)func << 8U) | 0x18U, PCI_CONFIG_ADDR);
34
35 data = pio_read32(PCI_CONFIG_DATA);
36
37 return (data >> 8U) & 0xffU;
38 }
39
dmar_path_bdf(int32_t path_len,uint8_t busno,const struct acpi_dmar_pci_path * path)40 static union pci_bdf dmar_path_bdf(int32_t path_len, uint8_t busno, const struct acpi_dmar_pci_path *path)
41 {
42 int32_t i;
43 union pci_bdf dmar_bdf;
44
45 dmar_bdf.bits.b = busno;
46 dmar_bdf.bits.d = path->device;
47 dmar_bdf.bits.f = path->function;
48
49 for (i = 1; i < path_len; i++) {
50 dmar_bdf.bits.b = get_secondary_bus(dmar_bdf.bits.b, dmar_bdf.bits.d, dmar_bdf.bits.f);
51 dmar_bdf.bits.d = path[i].device;
52 dmar_bdf.bits.f = path[i].function;
53 }
54 return dmar_bdf;
55 }
56
57
handle_dmar_devscope(struct dmar_dev_scope * dev_scope,void * addr,int32_t remaining)58 static int32_t handle_dmar_devscope(struct dmar_dev_scope *dev_scope, void *addr, int32_t remaining)
59 {
60 int32_t path_len, ret = -1;
61 union pci_bdf dmar_bdf;
62 struct acpi_dmar_pci_path *path;
63 struct acpi_dmar_device_scope *apci_devscope = addr;
64
65 if ((remaining >= (int32_t)sizeof(struct acpi_dmar_device_scope)) &&
66 (remaining >= (int32_t)apci_devscope->length)) {
67 path = (struct acpi_dmar_pci_path *)(apci_devscope + 1);
68 path_len = (int32_t)((apci_devscope->length - sizeof(struct acpi_dmar_device_scope)) /
69 sizeof(struct acpi_dmar_pci_path));
70
71 dmar_bdf = dmar_path_bdf(path_len, apci_devscope->bus, path);
72 dev_scope->id = apci_devscope->enumeration_id;
73 dev_scope->type = apci_devscope->entry_type;
74 dev_scope->bus = dmar_bdf.fields.bus;
75 dev_scope->devfun = dmar_bdf.fields.devfun;
76 ret = (int32_t)apci_devscope->length;
77 }
78
79 return ret;
80 }
81
get_drhd_dev_scope_cnt(struct acpi_dmar_hardware_unit * drhd)82 static uint32_t get_drhd_dev_scope_cnt(struct acpi_dmar_hardware_unit *drhd)
83 {
84 struct acpi_dmar_device_scope *scope;
85 char *start;
86 char *end;
87 uint32_t count = 0;
88
89 start = (char *)drhd + sizeof(struct acpi_dmar_hardware_unit);
90 end = (char *)drhd + drhd->header.length;
91
92 while (start < end) {
93 scope = (struct acpi_dmar_device_scope *)start;
94 if ((scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NOT_USED) &&
95 (scope->entry_type < ACPI_DMAR_SCOPE_TYPE_RESERVED)) {
96 count++;
97 }
98 start += scope->length;
99 }
100 return count;
101 }
102
103 /**
104 * @Application constraint: The dedicated DMAR unit for Intel integrated GPU
105 * shall be available on the physical platform.
106 */
handle_one_drhd(struct acpi_dmar_hardware_unit * acpi_drhd,struct dmar_drhd * drhd)107 static int32_t handle_one_drhd(struct acpi_dmar_hardware_unit *acpi_drhd, struct dmar_drhd *drhd)
108 {
109 struct dmar_dev_scope *dev_scope;
110 struct acpi_dmar_device_scope *ads;
111 int32_t remaining, consumed;
112 char *cp;
113 uint32_t dev_count;
114
115 drhd->segment = acpi_drhd->segment;
116 drhd->flags = acpi_drhd->flags;
117 drhd->reg_base_addr = acpi_drhd->address;
118
119 dev_count = get_drhd_dev_scope_cnt(acpi_drhd);
120 ASSERT(dev_count <= MAX_DRHD_DEVSCOPES, "parsed dev_count > MAX_DRHD_DEVSCOPES");
121
122 drhd->dev_cnt = dev_count;
123
124 remaining = (int32_t)(acpi_drhd->header.length - sizeof(struct acpi_dmar_hardware_unit));
125
126 dev_scope = drhd->devices;
127
128 while (remaining > 0) {
129 cp = (char *)acpi_drhd + acpi_drhd->header.length - remaining;
130
131 consumed = handle_dmar_devscope(dev_scope, cp, remaining);
132
133 /* Disable GPU IOMMU due to gvt-d hasn’t been enabled on APL yet. */
134 if (is_apl_platform()) {
135 if ((((uint32_t)drhd->segment << 16U) |
136 ((uint32_t)dev_scope->bus << 8U) |
137 dev_scope->devfun) == CONFIG_IGD_SBDF) {
138 drhd->ignore = true;
139 }
140 }
141
142 if (consumed <= 0) {
143 break;
144 }
145
146 remaining -= consumed;
147 /* skip IOAPIC & HPET */
148 ads = (struct acpi_dmar_device_scope *)cp;
149 if ((ads->entry_type != ACPI_DMAR_SCOPE_TYPE_NOT_USED) &&
150 (ads->entry_type < ACPI_DMAR_SCOPE_TYPE_RESERVED)) {
151 dev_scope++;
152 } else {
153 pr_dbg("drhd: skip dev_scope type %d", ads->entry_type);
154 }
155 }
156
157 return 0;
158 }
159
parse_dmar_table(struct dmar_info * plat_dmar_info)160 int32_t parse_dmar_table(struct dmar_info *plat_dmar_info)
161 {
162 struct acpi_table_dmar *dmar_tbl;
163 struct acpi_dmar_header *dmar_header;
164 struct acpi_dmar_hardware_unit *acpi_drhd;
165 char *ptr, *ptr_end;
166 uint32_t include_all_idx = ~0U;
167 uint16_t segment = 0;
168
169 dmar_tbl = (struct acpi_table_dmar *)get_dmar_table();
170 ASSERT(dmar_tbl != NULL, "");
171
172 ptr = (char *)dmar_tbl + sizeof(*dmar_tbl);
173 ptr_end = (char *)dmar_tbl + dmar_tbl->header.length;
174
175 plat_dmar_info->drhd_units = drhd_info_array;
176 for (; ptr < ptr_end; ptr += dmar_header->length) {
177 dmar_header = (struct acpi_dmar_header *)ptr;
178 ASSERT(dmar_header->length >= sizeof(struct acpi_dmar_header), "corrupted DMAR table");
179
180 if (dmar_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
181 acpi_drhd = (struct acpi_dmar_hardware_unit *)dmar_header;
182 /* Treat a valid DRHD has a non-zero base address */
183 ASSERT(acpi_drhd->address != 0UL, "a zero base address DRHD. Please fix the BIOS.");
184
185 if (dmar_unit_cnt == 0U) {
186 segment = acpi_drhd->segment;
187 } else {
188 /* Only support single PCI Segment */
189 if (segment != acpi_drhd->segment) {
190 panic("Only support single PCI Segment.");
191 }
192 }
193
194 if (acpi_drhd->flags & DRHD_FLAG_INCLUDE_PCI_ALL_MASK) {
195 /* Check more than one DRHD with INCLUDE_PCI_ALL flag ? */
196 include_all_idx = dmar_unit_cnt;
197 }
198
199 dmar_unit_cnt++;
200 plat_dmar_info->drhd_units[dmar_unit_cnt - 1].devices = drhd_dev_scope[dmar_unit_cnt - 1];
201 handle_one_drhd(acpi_drhd, &(plat_dmar_info->drhd_units[dmar_unit_cnt - 1]));
202 }
203 }
204
205 if ((include_all_idx != ~0U) && (dmar_unit_cnt != (include_all_idx + 1U))) {
206 pr_err("DRHD%d with INCLUDE_PCI_ALL flag is NOT the last one. Please fix the BIOS.", include_all_idx);
207 }
208
209 ASSERT(dmar_unit_cnt <= MAX_DRHDS, "parsed dmar_unit_cnt > MAX_DRHDS");
210 plat_dmar_info->drhd_count = dmar_unit_cnt;
211
212 return 0;
213 }
214