1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2016, Semihalf
4  *	Author: Tomasz Nowicki <tn@semihalf.com>
5  *
6  * This file implements early detection/parsing of I/O mapping
7  * reported to OS through firmware via I/O Remapping Table (IORT)
8  * IORT document number: ARM DEN 0049A
9  */
10 
11 #define pr_fmt(fmt)	"ACPI: IORT: " fmt
12 
13 #include <linux/acpi_iort.h>
14 #include <linux/bitfield.h>
15 #include <linux/iommu.h>
16 #include <linux/kernel.h>
17 #include <linux/list.h>
18 #include <linux/pci.h>
19 #include <linux/platform_device.h>
20 #include <linux/slab.h>
21 #include <linux/dma-map-ops.h>
22 
23 #define IORT_TYPE_MASK(type)	(1 << (type))
24 #define IORT_MSI_TYPE		(1 << ACPI_IORT_NODE_ITS_GROUP)
25 #define IORT_IOMMU_TYPE		((1 << ACPI_IORT_NODE_SMMU) |	\
26 				(1 << ACPI_IORT_NODE_SMMU_V3))
27 
28 struct iort_its_msi_chip {
29 	struct list_head	list;
30 	struct fwnode_handle	*fw_node;
31 	phys_addr_t		base_addr;
32 	u32			translation_id;
33 };
34 
35 struct iort_fwnode {
36 	struct list_head list;
37 	struct acpi_iort_node *iort_node;
38 	struct fwnode_handle *fwnode;
39 };
40 static LIST_HEAD(iort_fwnode_list);
41 static DEFINE_SPINLOCK(iort_fwnode_lock);
42 
43 /**
44  * iort_set_fwnode() - Create iort_fwnode and use it to register
45  *		       iommu data in the iort_fwnode_list
46  *
47  * @iort_node: IORT table node associated with the IOMMU
48  * @fwnode: fwnode associated with the IORT node
49  *
50  * Returns: 0 on success
51  *          <0 on failure
52  */
iort_set_fwnode(struct acpi_iort_node * iort_node,struct fwnode_handle * fwnode)53 static inline int iort_set_fwnode(struct acpi_iort_node *iort_node,
54 				  struct fwnode_handle *fwnode)
55 {
56 	struct iort_fwnode *np;
57 
58 	np = kzalloc(sizeof(struct iort_fwnode), GFP_ATOMIC);
59 
60 	if (WARN_ON(!np))
61 		return -ENOMEM;
62 
63 	INIT_LIST_HEAD(&np->list);
64 	np->iort_node = iort_node;
65 	np->fwnode = fwnode;
66 
67 	spin_lock(&iort_fwnode_lock);
68 	list_add_tail(&np->list, &iort_fwnode_list);
69 	spin_unlock(&iort_fwnode_lock);
70 
71 	return 0;
72 }
73 
74 /**
75  * iort_get_fwnode() - Retrieve fwnode associated with an IORT node
76  *
77  * @node: IORT table node to be looked-up
78  *
79  * Returns: fwnode_handle pointer on success, NULL on failure
80  */
iort_get_fwnode(struct acpi_iort_node * node)81 static inline struct fwnode_handle *iort_get_fwnode(
82 			struct acpi_iort_node *node)
83 {
84 	struct iort_fwnode *curr;
85 	struct fwnode_handle *fwnode = NULL;
86 
87 	spin_lock(&iort_fwnode_lock);
88 	list_for_each_entry(curr, &iort_fwnode_list, list) {
89 		if (curr->iort_node == node) {
90 			fwnode = curr->fwnode;
91 			break;
92 		}
93 	}
94 	spin_unlock(&iort_fwnode_lock);
95 
96 	return fwnode;
97 }
98 
99 /**
100  * iort_delete_fwnode() - Delete fwnode associated with an IORT node
101  *
102  * @node: IORT table node associated with fwnode to delete
103  */
iort_delete_fwnode(struct acpi_iort_node * node)104 static inline void iort_delete_fwnode(struct acpi_iort_node *node)
105 {
106 	struct iort_fwnode *curr, *tmp;
107 
108 	spin_lock(&iort_fwnode_lock);
109 	list_for_each_entry_safe(curr, tmp, &iort_fwnode_list, list) {
110 		if (curr->iort_node == node) {
111 			list_del(&curr->list);
112 			kfree(curr);
113 			break;
114 		}
115 	}
116 	spin_unlock(&iort_fwnode_lock);
117 }
118 
119 /**
120  * iort_get_iort_node() - Retrieve iort_node associated with an fwnode
121  *
122  * @fwnode: fwnode associated with device to be looked-up
123  *
124  * Returns: iort_node pointer on success, NULL on failure
125  */
iort_get_iort_node(struct fwnode_handle * fwnode)126 static inline struct acpi_iort_node *iort_get_iort_node(
127 			struct fwnode_handle *fwnode)
128 {
129 	struct iort_fwnode *curr;
130 	struct acpi_iort_node *iort_node = NULL;
131 
132 	spin_lock(&iort_fwnode_lock);
133 	list_for_each_entry(curr, &iort_fwnode_list, list) {
134 		if (curr->fwnode == fwnode) {
135 			iort_node = curr->iort_node;
136 			break;
137 		}
138 	}
139 	spin_unlock(&iort_fwnode_lock);
140 
141 	return iort_node;
142 }
143 
144 typedef acpi_status (*iort_find_node_callback)
145 	(struct acpi_iort_node *node, void *context);
146 
147 /* Root pointer to the mapped IORT table */
148 static struct acpi_table_header *iort_table;
149 
150 static LIST_HEAD(iort_msi_chip_list);
151 static DEFINE_SPINLOCK(iort_msi_chip_lock);
152 
153 /**
154  * iort_register_domain_token() - register domain token along with related
155  * ITS ID and base address to the list from where we can get it back later on.
156  * @trans_id: ITS ID.
157  * @base: ITS base address.
158  * @fw_node: Domain token.
159  *
160  * Returns: 0 on success, -ENOMEM if no memory when allocating list element
161  */
iort_register_domain_token(int trans_id,phys_addr_t base,struct fwnode_handle * fw_node)162 int iort_register_domain_token(int trans_id, phys_addr_t base,
163 			       struct fwnode_handle *fw_node)
164 {
165 	struct iort_its_msi_chip *its_msi_chip;
166 
167 	its_msi_chip = kzalloc(sizeof(*its_msi_chip), GFP_KERNEL);
168 	if (!its_msi_chip)
169 		return -ENOMEM;
170 
171 	its_msi_chip->fw_node = fw_node;
172 	its_msi_chip->translation_id = trans_id;
173 	its_msi_chip->base_addr = base;
174 
175 	spin_lock(&iort_msi_chip_lock);
176 	list_add(&its_msi_chip->list, &iort_msi_chip_list);
177 	spin_unlock(&iort_msi_chip_lock);
178 
179 	return 0;
180 }
181 
182 /**
183  * iort_deregister_domain_token() - Deregister domain token based on ITS ID
184  * @trans_id: ITS ID.
185  *
186  * Returns: none.
187  */
iort_deregister_domain_token(int trans_id)188 void iort_deregister_domain_token(int trans_id)
189 {
190 	struct iort_its_msi_chip *its_msi_chip, *t;
191 
192 	spin_lock(&iort_msi_chip_lock);
193 	list_for_each_entry_safe(its_msi_chip, t, &iort_msi_chip_list, list) {
194 		if (its_msi_chip->translation_id == trans_id) {
195 			list_del(&its_msi_chip->list);
196 			kfree(its_msi_chip);
197 			break;
198 		}
199 	}
200 	spin_unlock(&iort_msi_chip_lock);
201 }
202 
203 /**
204  * iort_find_domain_token() - Find domain token based on given ITS ID
205  * @trans_id: ITS ID.
206  *
207  * Returns: domain token when find on the list, NULL otherwise
208  */
iort_find_domain_token(int trans_id)209 struct fwnode_handle *iort_find_domain_token(int trans_id)
210 {
211 	struct fwnode_handle *fw_node = NULL;
212 	struct iort_its_msi_chip *its_msi_chip;
213 
214 	spin_lock(&iort_msi_chip_lock);
215 	list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
216 		if (its_msi_chip->translation_id == trans_id) {
217 			fw_node = its_msi_chip->fw_node;
218 			break;
219 		}
220 	}
221 	spin_unlock(&iort_msi_chip_lock);
222 
223 	return fw_node;
224 }
225 
iort_scan_node(enum acpi_iort_node_type type,iort_find_node_callback callback,void * context)226 static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type,
227 					     iort_find_node_callback callback,
228 					     void *context)
229 {
230 	struct acpi_iort_node *iort_node, *iort_end;
231 	struct acpi_table_iort *iort;
232 	int i;
233 
234 	if (!iort_table)
235 		return NULL;
236 
237 	/* Get the first IORT node */
238 	iort = (struct acpi_table_iort *)iort_table;
239 	iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
240 				 iort->node_offset);
241 	iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
242 				iort_table->length);
243 
244 	for (i = 0; i < iort->node_count; i++) {
245 		if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND,
246 			       "IORT node pointer overflows, bad table!\n"))
247 			return NULL;
248 
249 		if (iort_node->type == type &&
250 		    ACPI_SUCCESS(callback(iort_node, context)))
251 			return iort_node;
252 
253 		iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
254 					 iort_node->length);
255 	}
256 
257 	return NULL;
258 }
259 
iort_match_node_callback(struct acpi_iort_node * node,void * context)260 static acpi_status iort_match_node_callback(struct acpi_iort_node *node,
261 					    void *context)
262 {
263 	struct device *dev = context;
264 	acpi_status status = AE_NOT_FOUND;
265 
266 	if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) {
267 		struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
268 		struct acpi_device *adev;
269 		struct acpi_iort_named_component *ncomp;
270 		struct device *nc_dev = dev;
271 
272 		/*
273 		 * Walk the device tree to find a device with an
274 		 * ACPI companion; there is no point in scanning
275 		 * IORT for a device matching a named component if
276 		 * the device does not have an ACPI companion to
277 		 * start with.
278 		 */
279 		do {
280 			adev = ACPI_COMPANION(nc_dev);
281 			if (adev)
282 				break;
283 
284 			nc_dev = nc_dev->parent;
285 		} while (nc_dev);
286 
287 		if (!adev)
288 			goto out;
289 
290 		status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf);
291 		if (ACPI_FAILURE(status)) {
292 			dev_warn(nc_dev, "Can't get device full path name\n");
293 			goto out;
294 		}
295 
296 		ncomp = (struct acpi_iort_named_component *)node->node_data;
297 		status = !strcmp(ncomp->device_name, buf.pointer) ?
298 							AE_OK : AE_NOT_FOUND;
299 		acpi_os_free(buf.pointer);
300 	} else if (node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
301 		struct acpi_iort_root_complex *pci_rc;
302 		struct pci_bus *bus;
303 
304 		bus = to_pci_bus(dev);
305 		pci_rc = (struct acpi_iort_root_complex *)node->node_data;
306 
307 		/*
308 		 * It is assumed that PCI segment numbers maps one-to-one
309 		 * with root complexes. Each segment number can represent only
310 		 * one root complex.
311 		 */
312 		status = pci_rc->pci_segment_number == pci_domain_nr(bus) ?
313 							AE_OK : AE_NOT_FOUND;
314 	}
315 out:
316 	return status;
317 }
318 
iort_id_map(struct acpi_iort_id_mapping * map,u8 type,u32 rid_in,u32 * rid_out,bool check_overlap)319 static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
320 		       u32 *rid_out, bool check_overlap)
321 {
322 	/* Single mapping does not care for input id */
323 	if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
324 		if (type == ACPI_IORT_NODE_NAMED_COMPONENT ||
325 		    type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
326 			*rid_out = map->output_base;
327 			return 0;
328 		}
329 
330 		pr_warn(FW_BUG "[map %p] SINGLE MAPPING flag not allowed for node type %d, skipping ID map\n",
331 			map, type);
332 		return -ENXIO;
333 	}
334 
335 	if (rid_in < map->input_base ||
336 	    (rid_in > map->input_base + map->id_count))
337 		return -ENXIO;
338 
339 	if (check_overlap) {
340 		/*
341 		 * We already found a mapping for this input ID at the end of
342 		 * another region. If it coincides with the start of this
343 		 * region, we assume the prior match was due to the off-by-1
344 		 * issue mentioned below, and allow it to be superseded.
345 		 * Otherwise, things are *really* broken, and we just disregard
346 		 * duplicate matches entirely to retain compatibility.
347 		 */
348 		pr_err(FW_BUG "[map %p] conflicting mapping for input ID 0x%x\n",
349 		       map, rid_in);
350 		if (rid_in != map->input_base)
351 			return -ENXIO;
352 
353 		pr_err(FW_BUG "applying workaround.\n");
354 	}
355 
356 	*rid_out = map->output_base + (rid_in - map->input_base);
357 
358 	/*
359 	 * Due to confusion regarding the meaning of the id_count field (which
360 	 * carries the number of IDs *minus 1*), we may have to disregard this
361 	 * match if it is at the end of the range, and overlaps with the start
362 	 * of another one.
363 	 */
364 	if (map->id_count > 0 && rid_in == map->input_base + map->id_count)
365 		return -EAGAIN;
366 	return 0;
367 }
368 
iort_node_get_id(struct acpi_iort_node * node,u32 * id_out,int index)369 static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
370 					       u32 *id_out, int index)
371 {
372 	struct acpi_iort_node *parent;
373 	struct acpi_iort_id_mapping *map;
374 
375 	if (!node->mapping_offset || !node->mapping_count ||
376 				     index >= node->mapping_count)
377 		return NULL;
378 
379 	map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
380 			   node->mapping_offset + index * sizeof(*map));
381 
382 	/* Firmware bug! */
383 	if (!map->output_reference) {
384 		pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
385 		       node, node->type);
386 		return NULL;
387 	}
388 
389 	parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
390 			       map->output_reference);
391 
392 	if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
393 		if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT ||
394 		    node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX ||
395 		    node->type == ACPI_IORT_NODE_SMMU_V3 ||
396 		    node->type == ACPI_IORT_NODE_PMCG) {
397 			*id_out = map->output_base;
398 			return parent;
399 		}
400 	}
401 
402 	return NULL;
403 }
404 
405 #ifndef ACPI_IORT_SMMU_V3_DEVICEID_VALID
406 #define ACPI_IORT_SMMU_V3_DEVICEID_VALID (1 << 4)
407 #endif
408 
iort_get_id_mapping_index(struct acpi_iort_node * node)409 static int iort_get_id_mapping_index(struct acpi_iort_node *node)
410 {
411 	struct acpi_iort_smmu_v3 *smmu;
412 	struct acpi_iort_pmcg *pmcg;
413 
414 	switch (node->type) {
415 	case ACPI_IORT_NODE_SMMU_V3:
416 		/*
417 		 * SMMUv3 dev ID mapping index was introduced in revision 1
418 		 * table, not available in revision 0
419 		 */
420 		if (node->revision < 1)
421 			return -EINVAL;
422 
423 		smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
424 		/*
425 		 * Until IORT E.e (node rev. 5), the ID mapping index was
426 		 * defined to be valid unless all interrupts are GSIV-based.
427 		 */
428 		if (node->revision < 5) {
429 			if (smmu->event_gsiv && smmu->pri_gsiv &&
430 			    smmu->gerr_gsiv && smmu->sync_gsiv)
431 				return -EINVAL;
432 		} else if (!(smmu->flags & ACPI_IORT_SMMU_V3_DEVICEID_VALID)) {
433 			return -EINVAL;
434 		}
435 
436 		if (smmu->id_mapping_index >= node->mapping_count) {
437 			pr_err(FW_BUG "[node %p type %d] ID mapping index overflows valid mappings\n",
438 			       node, node->type);
439 			return -EINVAL;
440 		}
441 
442 		return smmu->id_mapping_index;
443 	case ACPI_IORT_NODE_PMCG:
444 		pmcg = (struct acpi_iort_pmcg *)node->node_data;
445 		if (pmcg->overflow_gsiv || node->mapping_count == 0)
446 			return -EINVAL;
447 
448 		return 0;
449 	default:
450 		return -EINVAL;
451 	}
452 }
453 
iort_node_map_id(struct acpi_iort_node * node,u32 id_in,u32 * id_out,u8 type_mask)454 static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node,
455 					       u32 id_in, u32 *id_out,
456 					       u8 type_mask)
457 {
458 	u32 id = id_in;
459 
460 	/* Parse the ID mapping tree to find specified node type */
461 	while (node) {
462 		struct acpi_iort_id_mapping *map;
463 		int i, index, rc = 0;
464 		u32 out_ref = 0, map_id = id;
465 
466 		if (IORT_TYPE_MASK(node->type) & type_mask) {
467 			if (id_out)
468 				*id_out = id;
469 			return node;
470 		}
471 
472 		if (!node->mapping_offset || !node->mapping_count)
473 			goto fail_map;
474 
475 		map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
476 				   node->mapping_offset);
477 
478 		/* Firmware bug! */
479 		if (!map->output_reference) {
480 			pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
481 			       node, node->type);
482 			goto fail_map;
483 		}
484 
485 		/*
486 		 * Get the special ID mapping index (if any) and skip its
487 		 * associated ID map to prevent erroneous multi-stage
488 		 * IORT ID translations.
489 		 */
490 		index = iort_get_id_mapping_index(node);
491 
492 		/* Do the ID translation */
493 		for (i = 0; i < node->mapping_count; i++, map++) {
494 			/* if it is special mapping index, skip it */
495 			if (i == index)
496 				continue;
497 
498 			rc = iort_id_map(map, node->type, map_id, &id, out_ref);
499 			if (!rc)
500 				break;
501 			if (rc == -EAGAIN)
502 				out_ref = map->output_reference;
503 		}
504 
505 		if (i == node->mapping_count && !out_ref)
506 			goto fail_map;
507 
508 		node = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
509 				    rc ? out_ref : map->output_reference);
510 	}
511 
512 fail_map:
513 	/* Map input ID to output ID unchanged on mapping failure */
514 	if (id_out)
515 		*id_out = id_in;
516 
517 	return NULL;
518 }
519 
iort_node_map_platform_id(struct acpi_iort_node * node,u32 * id_out,u8 type_mask,int index)520 static struct acpi_iort_node *iort_node_map_platform_id(
521 		struct acpi_iort_node *node, u32 *id_out, u8 type_mask,
522 		int index)
523 {
524 	struct acpi_iort_node *parent;
525 	u32 id;
526 
527 	/* step 1: retrieve the initial dev id */
528 	parent = iort_node_get_id(node, &id, index);
529 	if (!parent)
530 		return NULL;
531 
532 	/*
533 	 * optional step 2: map the initial dev id if its parent is not
534 	 * the target type we want, map it again for the use cases such
535 	 * as NC (named component) -> SMMU -> ITS. If the type is matched,
536 	 * return the initial dev id and its parent pointer directly.
537 	 */
538 	if (!(IORT_TYPE_MASK(parent->type) & type_mask))
539 		parent = iort_node_map_id(parent, id, id_out, type_mask);
540 	else
541 		if (id_out)
542 			*id_out = id;
543 
544 	return parent;
545 }
546 
iort_find_dev_node(struct device * dev)547 static struct acpi_iort_node *iort_find_dev_node(struct device *dev)
548 {
549 	struct pci_bus *pbus;
550 
551 	if (!dev_is_pci(dev)) {
552 		struct acpi_iort_node *node;
553 		/*
554 		 * scan iort_fwnode_list to see if it's an iort platform
555 		 * device (such as SMMU, PMCG),its iort node already cached
556 		 * and associated with fwnode when iort platform devices
557 		 * were initialized.
558 		 */
559 		node = iort_get_iort_node(dev->fwnode);
560 		if (node)
561 			return node;
562 		/*
563 		 * if not, then it should be a platform device defined in
564 		 * DSDT/SSDT (with Named Component node in IORT)
565 		 */
566 		return iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
567 				      iort_match_node_callback, dev);
568 	}
569 
570 	pbus = to_pci_dev(dev)->bus;
571 
572 	return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
573 			      iort_match_node_callback, &pbus->dev);
574 }
575 
576 /**
577  * iort_msi_map_id() - Map a MSI input ID for a device
578  * @dev: The device for which the mapping is to be done.
579  * @input_id: The device input ID.
580  *
581  * Returns: mapped MSI ID on success, input ID otherwise
582  */
iort_msi_map_id(struct device * dev,u32 input_id)583 u32 iort_msi_map_id(struct device *dev, u32 input_id)
584 {
585 	struct acpi_iort_node *node;
586 	u32 dev_id;
587 
588 	node = iort_find_dev_node(dev);
589 	if (!node)
590 		return input_id;
591 
592 	iort_node_map_id(node, input_id, &dev_id, IORT_MSI_TYPE);
593 	return dev_id;
594 }
595 
596 /**
597  * iort_pmsi_get_dev_id() - Get the device id for a device
598  * @dev: The device for which the mapping is to be done.
599  * @dev_id: The device ID found.
600  *
601  * Returns: 0 for successful find a dev id, -ENODEV on error
602  */
iort_pmsi_get_dev_id(struct device * dev,u32 * dev_id)603 int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id)
604 {
605 	int i, index;
606 	struct acpi_iort_node *node;
607 
608 	node = iort_find_dev_node(dev);
609 	if (!node)
610 		return -ENODEV;
611 
612 	index = iort_get_id_mapping_index(node);
613 	/* if there is a valid index, go get the dev_id directly */
614 	if (index >= 0) {
615 		if (iort_node_get_id(node, dev_id, index))
616 			return 0;
617 	} else {
618 		for (i = 0; i < node->mapping_count; i++) {
619 			if (iort_node_map_platform_id(node, dev_id,
620 						      IORT_MSI_TYPE, i))
621 				return 0;
622 		}
623 	}
624 
625 	return -ENODEV;
626 }
627 
iort_find_its_base(u32 its_id,phys_addr_t * base)628 static int __maybe_unused iort_find_its_base(u32 its_id, phys_addr_t *base)
629 {
630 	struct iort_its_msi_chip *its_msi_chip;
631 	int ret = -ENODEV;
632 
633 	spin_lock(&iort_msi_chip_lock);
634 	list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
635 		if (its_msi_chip->translation_id == its_id) {
636 			*base = its_msi_chip->base_addr;
637 			ret = 0;
638 			break;
639 		}
640 	}
641 	spin_unlock(&iort_msi_chip_lock);
642 
643 	return ret;
644 }
645 
646 /**
647  * iort_dev_find_its_id() - Find the ITS identifier for a device
648  * @dev: The device.
649  * @id: Device's ID
650  * @idx: Index of the ITS identifier list.
651  * @its_id: ITS identifier.
652  *
653  * Returns: 0 on success, appropriate error value otherwise
654  */
iort_dev_find_its_id(struct device * dev,u32 id,unsigned int idx,int * its_id)655 static int iort_dev_find_its_id(struct device *dev, u32 id,
656 				unsigned int idx, int *its_id)
657 {
658 	struct acpi_iort_its_group *its;
659 	struct acpi_iort_node *node;
660 
661 	node = iort_find_dev_node(dev);
662 	if (!node)
663 		return -ENXIO;
664 
665 	node = iort_node_map_id(node, id, NULL, IORT_MSI_TYPE);
666 	if (!node)
667 		return -ENXIO;
668 
669 	/* Move to ITS specific data */
670 	its = (struct acpi_iort_its_group *)node->node_data;
671 	if (idx >= its->its_count) {
672 		dev_err(dev, "requested ITS ID index [%d] overruns ITS entries [%d]\n",
673 			idx, its->its_count);
674 		return -ENXIO;
675 	}
676 
677 	*its_id = its->identifiers[idx];
678 	return 0;
679 }
680 
681 /**
682  * iort_get_device_domain() - Find MSI domain related to a device
683  * @dev: The device.
684  * @id: Requester ID for the device.
685  * @bus_token: irq domain bus token.
686  *
687  * Returns: the MSI domain for this device, NULL otherwise
688  */
iort_get_device_domain(struct device * dev,u32 id,enum irq_domain_bus_token bus_token)689 struct irq_domain *iort_get_device_domain(struct device *dev, u32 id,
690 					  enum irq_domain_bus_token bus_token)
691 {
692 	struct fwnode_handle *handle;
693 	int its_id;
694 
695 	if (iort_dev_find_its_id(dev, id, 0, &its_id))
696 		return NULL;
697 
698 	handle = iort_find_domain_token(its_id);
699 	if (!handle)
700 		return NULL;
701 
702 	return irq_find_matching_fwnode(handle, bus_token);
703 }
704 
iort_set_device_domain(struct device * dev,struct acpi_iort_node * node)705 static void iort_set_device_domain(struct device *dev,
706 				   struct acpi_iort_node *node)
707 {
708 	struct acpi_iort_its_group *its;
709 	struct acpi_iort_node *msi_parent;
710 	struct acpi_iort_id_mapping *map;
711 	struct fwnode_handle *iort_fwnode;
712 	struct irq_domain *domain;
713 	int index;
714 
715 	index = iort_get_id_mapping_index(node);
716 	if (index < 0)
717 		return;
718 
719 	map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
720 			   node->mapping_offset + index * sizeof(*map));
721 
722 	/* Firmware bug! */
723 	if (!map->output_reference ||
724 	    !(map->flags & ACPI_IORT_ID_SINGLE_MAPPING)) {
725 		pr_err(FW_BUG "[node %p type %d] Invalid MSI mapping\n",
726 		       node, node->type);
727 		return;
728 	}
729 
730 	msi_parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
731 				  map->output_reference);
732 
733 	if (!msi_parent || msi_parent->type != ACPI_IORT_NODE_ITS_GROUP)
734 		return;
735 
736 	/* Move to ITS specific data */
737 	its = (struct acpi_iort_its_group *)msi_parent->node_data;
738 
739 	iort_fwnode = iort_find_domain_token(its->identifiers[0]);
740 	if (!iort_fwnode)
741 		return;
742 
743 	domain = irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI);
744 	if (domain)
745 		dev_set_msi_domain(dev, domain);
746 }
747 
748 /**
749  * iort_get_platform_device_domain() - Find MSI domain related to a
750  * platform device
751  * @dev: the dev pointer associated with the platform device
752  *
753  * Returns: the MSI domain for this device, NULL otherwise
754  */
iort_get_platform_device_domain(struct device * dev)755 static struct irq_domain *iort_get_platform_device_domain(struct device *dev)
756 {
757 	struct acpi_iort_node *node, *msi_parent = NULL;
758 	struct fwnode_handle *iort_fwnode;
759 	struct acpi_iort_its_group *its;
760 	int i;
761 
762 	/* find its associated iort node */
763 	node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
764 			      iort_match_node_callback, dev);
765 	if (!node)
766 		return NULL;
767 
768 	/* then find its msi parent node */
769 	for (i = 0; i < node->mapping_count; i++) {
770 		msi_parent = iort_node_map_platform_id(node, NULL,
771 						       IORT_MSI_TYPE, i);
772 		if (msi_parent)
773 			break;
774 	}
775 
776 	if (!msi_parent)
777 		return NULL;
778 
779 	/* Move to ITS specific data */
780 	its = (struct acpi_iort_its_group *)msi_parent->node_data;
781 
782 	iort_fwnode = iort_find_domain_token(its->identifiers[0]);
783 	if (!iort_fwnode)
784 		return NULL;
785 
786 	return irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI);
787 }
788 
acpi_configure_pmsi_domain(struct device * dev)789 void acpi_configure_pmsi_domain(struct device *dev)
790 {
791 	struct irq_domain *msi_domain;
792 
793 	msi_domain = iort_get_platform_device_domain(dev);
794 	if (msi_domain)
795 		dev_set_msi_domain(dev, msi_domain);
796 }
797 
798 #ifdef CONFIG_IOMMU_API
iort_rmr_free(struct device * dev,struct iommu_resv_region * region)799 static void iort_rmr_free(struct device *dev,
800 			  struct iommu_resv_region *region)
801 {
802 	struct iommu_iort_rmr_data *rmr_data;
803 
804 	rmr_data = container_of(region, struct iommu_iort_rmr_data, rr);
805 	kfree(rmr_data->sids);
806 	kfree(rmr_data);
807 }
808 
iort_rmr_alloc(struct acpi_iort_rmr_desc * rmr_desc,int prot,enum iommu_resv_type type,u32 * sids,u32 num_sids)809 static struct iommu_iort_rmr_data *iort_rmr_alloc(
810 					struct acpi_iort_rmr_desc *rmr_desc,
811 					int prot, enum iommu_resv_type type,
812 					u32 *sids, u32 num_sids)
813 {
814 	struct iommu_iort_rmr_data *rmr_data;
815 	struct iommu_resv_region *region;
816 	u32 *sids_copy;
817 	u64 addr = rmr_desc->base_address, size = rmr_desc->length;
818 
819 	rmr_data = kmalloc(sizeof(*rmr_data), GFP_KERNEL);
820 	if (!rmr_data)
821 		return NULL;
822 
823 	/* Create a copy of SIDs array to associate with this rmr_data */
824 	sids_copy = kmemdup(sids, num_sids * sizeof(*sids), GFP_KERNEL);
825 	if (!sids_copy) {
826 		kfree(rmr_data);
827 		return NULL;
828 	}
829 	rmr_data->sids = sids_copy;
830 	rmr_data->num_sids = num_sids;
831 
832 	if (!IS_ALIGNED(addr, SZ_64K) || !IS_ALIGNED(size, SZ_64K)) {
833 		/* PAGE align base addr and size */
834 		addr &= PAGE_MASK;
835 		size = PAGE_ALIGN(size + offset_in_page(rmr_desc->base_address));
836 
837 		pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] not aligned to 64K, continue with [0x%llx - 0x%llx]\n",
838 		       rmr_desc->base_address,
839 		       rmr_desc->base_address + rmr_desc->length - 1,
840 		       addr, addr + size - 1);
841 	}
842 
843 	region = &rmr_data->rr;
844 	INIT_LIST_HEAD(&region->list);
845 	region->start = addr;
846 	region->length = size;
847 	region->prot = prot;
848 	region->type = type;
849 	region->free = iort_rmr_free;
850 
851 	return rmr_data;
852 }
853 
iort_rmr_desc_check_overlap(struct acpi_iort_rmr_desc * desc,u32 count)854 static void iort_rmr_desc_check_overlap(struct acpi_iort_rmr_desc *desc,
855 					u32 count)
856 {
857 	int i, j;
858 
859 	for (i = 0; i < count; i++) {
860 		u64 end, start = desc[i].base_address, length = desc[i].length;
861 
862 		if (!length) {
863 			pr_err(FW_BUG "RMR descriptor[0x%llx] with zero length, continue anyway\n",
864 			       start);
865 			continue;
866 		}
867 
868 		end = start + length - 1;
869 
870 		/* Check for address overlap */
871 		for (j = i + 1; j < count; j++) {
872 			u64 e_start = desc[j].base_address;
873 			u64 e_end = e_start + desc[j].length - 1;
874 
875 			if (start <= e_end && end >= e_start)
876 				pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] overlaps, continue anyway\n",
877 				       start, end);
878 		}
879 	}
880 }
881 
882 /*
883  * Please note, we will keep the already allocated RMR reserve
884  * regions in case of a memory allocation failure.
885  */
iort_get_rmrs(struct acpi_iort_node * node,struct acpi_iort_node * smmu,u32 * sids,u32 num_sids,struct list_head * head)886 static void iort_get_rmrs(struct acpi_iort_node *node,
887 			  struct acpi_iort_node *smmu,
888 			  u32 *sids, u32 num_sids,
889 			  struct list_head *head)
890 {
891 	struct acpi_iort_rmr *rmr = (struct acpi_iort_rmr *)node->node_data;
892 	struct acpi_iort_rmr_desc *rmr_desc;
893 	int i;
894 
895 	rmr_desc = ACPI_ADD_PTR(struct acpi_iort_rmr_desc, node,
896 				rmr->rmr_offset);
897 
898 	iort_rmr_desc_check_overlap(rmr_desc, rmr->rmr_count);
899 
900 	for (i = 0; i < rmr->rmr_count; i++, rmr_desc++) {
901 		struct iommu_iort_rmr_data *rmr_data;
902 		enum iommu_resv_type type;
903 		int prot = IOMMU_READ | IOMMU_WRITE;
904 
905 		if (rmr->flags & ACPI_IORT_RMR_REMAP_PERMITTED)
906 			type = IOMMU_RESV_DIRECT_RELAXABLE;
907 		else
908 			type = IOMMU_RESV_DIRECT;
909 
910 		if (rmr->flags & ACPI_IORT_RMR_ACCESS_PRIVILEGE)
911 			prot |= IOMMU_PRIV;
912 
913 		/* Attributes 0x00 - 0x03 represents device memory */
914 		if (ACPI_IORT_RMR_ACCESS_ATTRIBUTES(rmr->flags) <=
915 				ACPI_IORT_RMR_ATTR_DEVICE_GRE)
916 			prot |= IOMMU_MMIO;
917 		else if (ACPI_IORT_RMR_ACCESS_ATTRIBUTES(rmr->flags) ==
918 				ACPI_IORT_RMR_ATTR_NORMAL_IWB_OWB)
919 			prot |= IOMMU_CACHE;
920 
921 		rmr_data = iort_rmr_alloc(rmr_desc, prot, type,
922 					  sids, num_sids);
923 		if (!rmr_data)
924 			return;
925 
926 		list_add_tail(&rmr_data->rr.list, head);
927 	}
928 }
929 
iort_rmr_alloc_sids(u32 * sids,u32 count,u32 id_start,u32 new_count)930 static u32 *iort_rmr_alloc_sids(u32 *sids, u32 count, u32 id_start,
931 				u32 new_count)
932 {
933 	u32 *new_sids;
934 	u32 total_count = count + new_count;
935 	int i;
936 
937 	new_sids = krealloc_array(sids, count + new_count,
938 				  sizeof(*new_sids), GFP_KERNEL);
939 	if (!new_sids)
940 		return NULL;
941 
942 	for (i = count; i < total_count; i++)
943 		new_sids[i] = id_start++;
944 
945 	return new_sids;
946 }
947 
iort_rmr_has_dev(struct device * dev,u32 id_start,u32 id_count)948 static bool iort_rmr_has_dev(struct device *dev, u32 id_start,
949 			     u32 id_count)
950 {
951 	int i;
952 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
953 
954 	/*
955 	 * Make sure the kernel has preserved the boot firmware PCIe
956 	 * configuration. This is required to ensure that the RMR PCIe
957 	 * StreamIDs are still valid (Refer: ARM DEN 0049E.d Section 3.1.1.5).
958 	 */
959 	if (dev_is_pci(dev)) {
960 		struct pci_dev *pdev = to_pci_dev(dev);
961 		struct pci_host_bridge *host = pci_find_host_bridge(pdev->bus);
962 
963 		if (!host->preserve_config)
964 			return false;
965 	}
966 
967 	for (i = 0; i < fwspec->num_ids; i++) {
968 		if (fwspec->ids[i] >= id_start &&
969 		    fwspec->ids[i] <= id_start + id_count)
970 			return true;
971 	}
972 
973 	return false;
974 }
975 
iort_node_get_rmr_info(struct acpi_iort_node * node,struct acpi_iort_node * iommu,struct device * dev,struct list_head * head)976 static void iort_node_get_rmr_info(struct acpi_iort_node *node,
977 				   struct acpi_iort_node *iommu,
978 				   struct device *dev, struct list_head *head)
979 {
980 	struct acpi_iort_node *smmu = NULL;
981 	struct acpi_iort_rmr *rmr;
982 	struct acpi_iort_id_mapping *map;
983 	u32 *sids = NULL;
984 	u32 num_sids = 0;
985 	int i;
986 
987 	if (!node->mapping_offset || !node->mapping_count) {
988 		pr_err(FW_BUG "Invalid ID mapping, skipping RMR node %p\n",
989 		       node);
990 		return;
991 	}
992 
993 	rmr = (struct acpi_iort_rmr *)node->node_data;
994 	if (!rmr->rmr_offset || !rmr->rmr_count)
995 		return;
996 
997 	map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
998 			   node->mapping_offset);
999 
1000 	/*
1001 	 * Go through the ID mappings and see if we have a match for SMMU
1002 	 * and dev(if !NULL). If found, get the sids for the Node.
1003 	 * Please note, id_count is equal to the number of IDs  in the
1004 	 * range minus one.
1005 	 */
1006 	for (i = 0; i < node->mapping_count; i++, map++) {
1007 		struct acpi_iort_node *parent;
1008 
1009 		if (!map->id_count)
1010 			continue;
1011 
1012 		parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
1013 				      map->output_reference);
1014 		if (parent != iommu)
1015 			continue;
1016 
1017 		/* If dev is valid, check RMR node corresponds to the dev SID */
1018 		if (dev && !iort_rmr_has_dev(dev, map->output_base,
1019 					     map->id_count))
1020 			continue;
1021 
1022 		/* Retrieve SIDs associated with the Node. */
1023 		sids = iort_rmr_alloc_sids(sids, num_sids, map->output_base,
1024 					   map->id_count + 1);
1025 		if (!sids)
1026 			return;
1027 
1028 		num_sids += map->id_count + 1;
1029 	}
1030 
1031 	if (!sids)
1032 		return;
1033 
1034 	iort_get_rmrs(node, smmu, sids, num_sids, head);
1035 	kfree(sids);
1036 }
1037 
iort_find_rmrs(struct acpi_iort_node * iommu,struct device * dev,struct list_head * head)1038 static void iort_find_rmrs(struct acpi_iort_node *iommu, struct device *dev,
1039 			   struct list_head *head)
1040 {
1041 	struct acpi_table_iort *iort;
1042 	struct acpi_iort_node *iort_node, *iort_end;
1043 	int i;
1044 
1045 	/* Only supports ARM DEN 0049E.d onwards */
1046 	if (iort_table->revision < 5)
1047 		return;
1048 
1049 	iort = (struct acpi_table_iort *)iort_table;
1050 
1051 	iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
1052 				 iort->node_offset);
1053 	iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
1054 				iort_table->length);
1055 
1056 	for (i = 0; i < iort->node_count; i++) {
1057 		if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND,
1058 			       "IORT node pointer overflows, bad table!\n"))
1059 			return;
1060 
1061 		if (iort_node->type == ACPI_IORT_NODE_RMR)
1062 			iort_node_get_rmr_info(iort_node, iommu, dev, head);
1063 
1064 		iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
1065 					 iort_node->length);
1066 	}
1067 }
1068 
1069 /*
1070  * Populate the RMR list associated with a given IOMMU and dev(if provided).
1071  * If dev is NULL, the function populates all the RMRs associated with the
1072  * given IOMMU.
1073  */
iort_iommu_rmr_get_resv_regions(struct fwnode_handle * iommu_fwnode,struct device * dev,struct list_head * head)1074 static void iort_iommu_rmr_get_resv_regions(struct fwnode_handle *iommu_fwnode,
1075 					    struct device *dev,
1076 					    struct list_head *head)
1077 {
1078 	struct acpi_iort_node *iommu;
1079 
1080 	iommu = iort_get_iort_node(iommu_fwnode);
1081 	if (!iommu)
1082 		return;
1083 
1084 	iort_find_rmrs(iommu, dev, head);
1085 }
1086 
iort_get_msi_resv_iommu(struct device * dev)1087 static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev)
1088 {
1089 	struct acpi_iort_node *iommu;
1090 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1091 
1092 	iommu = iort_get_iort_node(fwspec->iommu_fwnode);
1093 
1094 	if (iommu && (iommu->type == ACPI_IORT_NODE_SMMU_V3)) {
1095 		struct acpi_iort_smmu_v3 *smmu;
1096 
1097 		smmu = (struct acpi_iort_smmu_v3 *)iommu->node_data;
1098 		if (smmu->model == ACPI_IORT_SMMU_V3_HISILICON_HI161X)
1099 			return iommu;
1100 	}
1101 
1102 	return NULL;
1103 }
1104 
1105 /*
1106  * Retrieve platform specific HW MSI reserve regions.
1107  * The ITS interrupt translation spaces (ITS_base + SZ_64K, SZ_64K)
1108  * associated with the device are the HW MSI reserved regions.
1109  */
iort_iommu_msi_get_resv_regions(struct device * dev,struct list_head * head)1110 static void iort_iommu_msi_get_resv_regions(struct device *dev,
1111 					    struct list_head *head)
1112 {
1113 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1114 	struct acpi_iort_its_group *its;
1115 	struct acpi_iort_node *iommu_node, *its_node = NULL;
1116 	int i;
1117 
1118 	iommu_node = iort_get_msi_resv_iommu(dev);
1119 	if (!iommu_node)
1120 		return;
1121 
1122 	/*
1123 	 * Current logic to reserve ITS regions relies on HW topologies
1124 	 * where a given PCI or named component maps its IDs to only one
1125 	 * ITS group; if a PCI or named component can map its IDs to
1126 	 * different ITS groups through IORT mappings this function has
1127 	 * to be reworked to ensure we reserve regions for all ITS groups
1128 	 * a given PCI or named component may map IDs to.
1129 	 */
1130 
1131 	for (i = 0; i < fwspec->num_ids; i++) {
1132 		its_node = iort_node_map_id(iommu_node,
1133 					fwspec->ids[i],
1134 					NULL, IORT_MSI_TYPE);
1135 		if (its_node)
1136 			break;
1137 	}
1138 
1139 	if (!its_node)
1140 		return;
1141 
1142 	/* Move to ITS specific data */
1143 	its = (struct acpi_iort_its_group *)its_node->node_data;
1144 
1145 	for (i = 0; i < its->its_count; i++) {
1146 		phys_addr_t base;
1147 
1148 		if (!iort_find_its_base(its->identifiers[i], &base)) {
1149 			int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1150 			struct iommu_resv_region *region;
1151 
1152 			region = iommu_alloc_resv_region(base + SZ_64K, SZ_64K,
1153 							 prot, IOMMU_RESV_MSI,
1154 							 GFP_KERNEL);
1155 			if (region)
1156 				list_add_tail(&region->list, head);
1157 		}
1158 	}
1159 }
1160 
1161 /**
1162  * iort_iommu_get_resv_regions - Generic helper to retrieve reserved regions.
1163  * @dev: Device from iommu_get_resv_regions()
1164  * @head: Reserved region list from iommu_get_resv_regions()
1165  */
iort_iommu_get_resv_regions(struct device * dev,struct list_head * head)1166 void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head)
1167 {
1168 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1169 
1170 	iort_iommu_msi_get_resv_regions(dev, head);
1171 	iort_iommu_rmr_get_resv_regions(fwspec->iommu_fwnode, dev, head);
1172 }
1173 
1174 /**
1175  * iort_get_rmr_sids - Retrieve IORT RMR node reserved regions with
1176  *                     associated StreamIDs information.
1177  * @iommu_fwnode: fwnode associated with IOMMU
1178  * @head: Resereved region list
1179  */
iort_get_rmr_sids(struct fwnode_handle * iommu_fwnode,struct list_head * head)1180 void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode,
1181 		       struct list_head *head)
1182 {
1183 	iort_iommu_rmr_get_resv_regions(iommu_fwnode, NULL, head);
1184 }
1185 EXPORT_SYMBOL_GPL(iort_get_rmr_sids);
1186 
1187 /**
1188  * iort_put_rmr_sids - Free memory allocated for RMR reserved regions.
1189  * @iommu_fwnode: fwnode associated with IOMMU
1190  * @head: Resereved region list
1191  */
iort_put_rmr_sids(struct fwnode_handle * iommu_fwnode,struct list_head * head)1192 void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode,
1193 		       struct list_head *head)
1194 {
1195 	struct iommu_resv_region *entry, *next;
1196 
1197 	list_for_each_entry_safe(entry, next, head, list)
1198 		entry->free(NULL, entry);
1199 }
1200 EXPORT_SYMBOL_GPL(iort_put_rmr_sids);
1201 
iort_iommu_driver_enabled(u8 type)1202 static inline bool iort_iommu_driver_enabled(u8 type)
1203 {
1204 	switch (type) {
1205 	case ACPI_IORT_NODE_SMMU_V3:
1206 		return IS_ENABLED(CONFIG_ARM_SMMU_V3);
1207 	case ACPI_IORT_NODE_SMMU:
1208 		return IS_ENABLED(CONFIG_ARM_SMMU);
1209 	default:
1210 		pr_warn("IORT node type %u does not describe an SMMU\n", type);
1211 		return false;
1212 	}
1213 }
1214 
iort_pci_rc_supports_ats(struct acpi_iort_node * node)1215 static bool iort_pci_rc_supports_ats(struct acpi_iort_node *node)
1216 {
1217 	struct acpi_iort_root_complex *pci_rc;
1218 
1219 	pci_rc = (struct acpi_iort_root_complex *)node->node_data;
1220 	return pci_rc->ats_attribute & ACPI_IORT_ATS_SUPPORTED;
1221 }
1222 
iort_iommu_xlate(struct device * dev,struct acpi_iort_node * node,u32 streamid)1223 static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node,
1224 			    u32 streamid)
1225 {
1226 	const struct iommu_ops *ops;
1227 	struct fwnode_handle *iort_fwnode;
1228 
1229 	if (!node)
1230 		return -ENODEV;
1231 
1232 	iort_fwnode = iort_get_fwnode(node);
1233 	if (!iort_fwnode)
1234 		return -ENODEV;
1235 
1236 	/*
1237 	 * If the ops look-up fails, this means that either
1238 	 * the SMMU drivers have not been probed yet or that
1239 	 * the SMMU drivers are not built in the kernel;
1240 	 * Depending on whether the SMMU drivers are built-in
1241 	 * in the kernel or not, defer the IOMMU configuration
1242 	 * or just abort it.
1243 	 */
1244 	ops = iommu_ops_from_fwnode(iort_fwnode);
1245 	if (!ops)
1246 		return iort_iommu_driver_enabled(node->type) ?
1247 		       -EPROBE_DEFER : -ENODEV;
1248 
1249 	return acpi_iommu_fwspec_init(dev, streamid, iort_fwnode, ops);
1250 }
1251 
1252 struct iort_pci_alias_info {
1253 	struct device *dev;
1254 	struct acpi_iort_node *node;
1255 };
1256 
iort_pci_iommu_init(struct pci_dev * pdev,u16 alias,void * data)1257 static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
1258 {
1259 	struct iort_pci_alias_info *info = data;
1260 	struct acpi_iort_node *parent;
1261 	u32 streamid;
1262 
1263 	parent = iort_node_map_id(info->node, alias, &streamid,
1264 				  IORT_IOMMU_TYPE);
1265 	return iort_iommu_xlate(info->dev, parent, streamid);
1266 }
1267 
iort_named_component_init(struct device * dev,struct acpi_iort_node * node)1268 static void iort_named_component_init(struct device *dev,
1269 				      struct acpi_iort_node *node)
1270 {
1271 	struct property_entry props[3] = {};
1272 	struct acpi_iort_named_component *nc;
1273 
1274 	nc = (struct acpi_iort_named_component *)node->node_data;
1275 	props[0] = PROPERTY_ENTRY_U32("pasid-num-bits",
1276 				      FIELD_GET(ACPI_IORT_NC_PASID_BITS,
1277 						nc->node_flags));
1278 	if (nc->node_flags & ACPI_IORT_NC_STALL_SUPPORTED)
1279 		props[1] = PROPERTY_ENTRY_BOOL("dma-can-stall");
1280 
1281 	if (device_create_managed_software_node(dev, props, NULL))
1282 		dev_warn(dev, "Could not add device properties\n");
1283 }
1284 
iort_nc_iommu_map(struct device * dev,struct acpi_iort_node * node)1285 static int iort_nc_iommu_map(struct device *dev, struct acpi_iort_node *node)
1286 {
1287 	struct acpi_iort_node *parent;
1288 	int err = -ENODEV, i = 0;
1289 	u32 streamid = 0;
1290 
1291 	do {
1292 
1293 		parent = iort_node_map_platform_id(node, &streamid,
1294 						   IORT_IOMMU_TYPE,
1295 						   i++);
1296 
1297 		if (parent)
1298 			err = iort_iommu_xlate(dev, parent, streamid);
1299 	} while (parent && !err);
1300 
1301 	return err;
1302 }
1303 
iort_nc_iommu_map_id(struct device * dev,struct acpi_iort_node * node,const u32 * in_id)1304 static int iort_nc_iommu_map_id(struct device *dev,
1305 				struct acpi_iort_node *node,
1306 				const u32 *in_id)
1307 {
1308 	struct acpi_iort_node *parent;
1309 	u32 streamid;
1310 
1311 	parent = iort_node_map_id(node, *in_id, &streamid, IORT_IOMMU_TYPE);
1312 	if (parent)
1313 		return iort_iommu_xlate(dev, parent, streamid);
1314 
1315 	return -ENODEV;
1316 }
1317 
1318 
1319 /**
1320  * iort_iommu_configure_id - Set-up IOMMU configuration for a device.
1321  *
1322  * @dev: device to configure
1323  * @id_in: optional input id const value pointer
1324  *
1325  * Returns: 0 on success, <0 on failure
1326  */
iort_iommu_configure_id(struct device * dev,const u32 * id_in)1327 int iort_iommu_configure_id(struct device *dev, const u32 *id_in)
1328 {
1329 	struct acpi_iort_node *node;
1330 	int err = -ENODEV;
1331 
1332 	if (dev_is_pci(dev)) {
1333 		struct iommu_fwspec *fwspec;
1334 		struct pci_bus *bus = to_pci_dev(dev)->bus;
1335 		struct iort_pci_alias_info info = { .dev = dev };
1336 
1337 		node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
1338 				      iort_match_node_callback, &bus->dev);
1339 		if (!node)
1340 			return -ENODEV;
1341 
1342 		info.node = node;
1343 		err = pci_for_each_dma_alias(to_pci_dev(dev),
1344 					     iort_pci_iommu_init, &info);
1345 
1346 		fwspec = dev_iommu_fwspec_get(dev);
1347 		if (fwspec && iort_pci_rc_supports_ats(node))
1348 			fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
1349 	} else {
1350 		node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
1351 				      iort_match_node_callback, dev);
1352 		if (!node)
1353 			return -ENODEV;
1354 
1355 		err = id_in ? iort_nc_iommu_map_id(dev, node, id_in) :
1356 			      iort_nc_iommu_map(dev, node);
1357 
1358 		if (!err)
1359 			iort_named_component_init(dev, node);
1360 	}
1361 
1362 	return err;
1363 }
1364 
1365 #else
iort_iommu_get_resv_regions(struct device * dev,struct list_head * head)1366 void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head)
1367 { }
iort_iommu_configure_id(struct device * dev,const u32 * input_id)1368 int iort_iommu_configure_id(struct device *dev, const u32 *input_id)
1369 { return -ENODEV; }
1370 #endif
1371 
nc_dma_get_range(struct device * dev,u64 * size)1372 static int nc_dma_get_range(struct device *dev, u64 *size)
1373 {
1374 	struct acpi_iort_node *node;
1375 	struct acpi_iort_named_component *ncomp;
1376 
1377 	node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
1378 			      iort_match_node_callback, dev);
1379 	if (!node)
1380 		return -ENODEV;
1381 
1382 	ncomp = (struct acpi_iort_named_component *)node->node_data;
1383 
1384 	if (!ncomp->memory_address_limit) {
1385 		pr_warn(FW_BUG "Named component missing memory address limit\n");
1386 		return -EINVAL;
1387 	}
1388 
1389 	*size = ncomp->memory_address_limit >= 64 ? U64_MAX :
1390 			1ULL<<ncomp->memory_address_limit;
1391 
1392 	return 0;
1393 }
1394 
rc_dma_get_range(struct device * dev,u64 * size)1395 static int rc_dma_get_range(struct device *dev, u64 *size)
1396 {
1397 	struct acpi_iort_node *node;
1398 	struct acpi_iort_root_complex *rc;
1399 	struct pci_bus *pbus = to_pci_dev(dev)->bus;
1400 
1401 	node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
1402 			      iort_match_node_callback, &pbus->dev);
1403 	if (!node || node->revision < 1)
1404 		return -ENODEV;
1405 
1406 	rc = (struct acpi_iort_root_complex *)node->node_data;
1407 
1408 	if (!rc->memory_address_limit) {
1409 		pr_warn(FW_BUG "Root complex missing memory address limit\n");
1410 		return -EINVAL;
1411 	}
1412 
1413 	*size = rc->memory_address_limit >= 64 ? U64_MAX :
1414 			1ULL<<rc->memory_address_limit;
1415 
1416 	return 0;
1417 }
1418 
1419 /**
1420  * iort_dma_get_ranges() - Look up DMA addressing limit for the device
1421  * @dev: device to lookup
1422  * @size: DMA range size result pointer
1423  *
1424  * Return: 0 on success, an error otherwise.
1425  */
iort_dma_get_ranges(struct device * dev,u64 * size)1426 int iort_dma_get_ranges(struct device *dev, u64 *size)
1427 {
1428 	if (dev_is_pci(dev))
1429 		return rc_dma_get_range(dev, size);
1430 	else
1431 		return nc_dma_get_range(dev, size);
1432 }
1433 
acpi_iort_register_irq(int hwirq,const char * name,int trigger,struct resource * res)1434 static void __init acpi_iort_register_irq(int hwirq, const char *name,
1435 					  int trigger,
1436 					  struct resource *res)
1437 {
1438 	int irq = acpi_register_gsi(NULL, hwirq, trigger,
1439 				    ACPI_ACTIVE_HIGH);
1440 
1441 	if (irq <= 0) {
1442 		pr_err("could not register gsi hwirq %d name [%s]\n", hwirq,
1443 								      name);
1444 		return;
1445 	}
1446 
1447 	res->start = irq;
1448 	res->end = irq;
1449 	res->flags = IORESOURCE_IRQ;
1450 	res->name = name;
1451 }
1452 
arm_smmu_v3_count_resources(struct acpi_iort_node * node)1453 static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node)
1454 {
1455 	struct acpi_iort_smmu_v3 *smmu;
1456 	/* Always present mem resource */
1457 	int num_res = 1;
1458 
1459 	/* Retrieve SMMUv3 specific data */
1460 	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1461 
1462 	if (smmu->event_gsiv)
1463 		num_res++;
1464 
1465 	if (smmu->pri_gsiv)
1466 		num_res++;
1467 
1468 	if (smmu->gerr_gsiv)
1469 		num_res++;
1470 
1471 	if (smmu->sync_gsiv)
1472 		num_res++;
1473 
1474 	return num_res;
1475 }
1476 
arm_smmu_v3_is_combined_irq(struct acpi_iort_smmu_v3 * smmu)1477 static bool arm_smmu_v3_is_combined_irq(struct acpi_iort_smmu_v3 *smmu)
1478 {
1479 	/*
1480 	 * Cavium ThunderX2 implementation doesn't not support unique
1481 	 * irq line. Use single irq line for all the SMMUv3 interrupts.
1482 	 */
1483 	if (smmu->model != ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
1484 		return false;
1485 
1486 	/*
1487 	 * ThunderX2 doesn't support MSIs from the SMMU, so we're checking
1488 	 * SPI numbers here.
1489 	 */
1490 	return smmu->event_gsiv == smmu->pri_gsiv &&
1491 	       smmu->event_gsiv == smmu->gerr_gsiv &&
1492 	       smmu->event_gsiv == smmu->sync_gsiv;
1493 }
1494 
arm_smmu_v3_resource_size(struct acpi_iort_smmu_v3 * smmu)1495 static unsigned long arm_smmu_v3_resource_size(struct acpi_iort_smmu_v3 *smmu)
1496 {
1497 	/*
1498 	 * Override the size, for Cavium ThunderX2 implementation
1499 	 * which doesn't support the page 1 SMMU register space.
1500 	 */
1501 	if (smmu->model == ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
1502 		return SZ_64K;
1503 
1504 	return SZ_128K;
1505 }
1506 
arm_smmu_v3_init_resources(struct resource * res,struct acpi_iort_node * node)1507 static void __init arm_smmu_v3_init_resources(struct resource *res,
1508 					      struct acpi_iort_node *node)
1509 {
1510 	struct acpi_iort_smmu_v3 *smmu;
1511 	int num_res = 0;
1512 
1513 	/* Retrieve SMMUv3 specific data */
1514 	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1515 
1516 	res[num_res].start = smmu->base_address;
1517 	res[num_res].end = smmu->base_address +
1518 				arm_smmu_v3_resource_size(smmu) - 1;
1519 	res[num_res].flags = IORESOURCE_MEM;
1520 
1521 	num_res++;
1522 	if (arm_smmu_v3_is_combined_irq(smmu)) {
1523 		if (smmu->event_gsiv)
1524 			acpi_iort_register_irq(smmu->event_gsiv, "combined",
1525 					       ACPI_EDGE_SENSITIVE,
1526 					       &res[num_res++]);
1527 	} else {
1528 
1529 		if (smmu->event_gsiv)
1530 			acpi_iort_register_irq(smmu->event_gsiv, "eventq",
1531 					       ACPI_EDGE_SENSITIVE,
1532 					       &res[num_res++]);
1533 
1534 		if (smmu->pri_gsiv)
1535 			acpi_iort_register_irq(smmu->pri_gsiv, "priq",
1536 					       ACPI_EDGE_SENSITIVE,
1537 					       &res[num_res++]);
1538 
1539 		if (smmu->gerr_gsiv)
1540 			acpi_iort_register_irq(smmu->gerr_gsiv, "gerror",
1541 					       ACPI_EDGE_SENSITIVE,
1542 					       &res[num_res++]);
1543 
1544 		if (smmu->sync_gsiv)
1545 			acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync",
1546 					       ACPI_EDGE_SENSITIVE,
1547 					       &res[num_res++]);
1548 	}
1549 }
1550 
arm_smmu_v3_dma_configure(struct device * dev,struct acpi_iort_node * node)1551 static void __init arm_smmu_v3_dma_configure(struct device *dev,
1552 					     struct acpi_iort_node *node)
1553 {
1554 	struct acpi_iort_smmu_v3 *smmu;
1555 	enum dev_dma_attr attr;
1556 
1557 	/* Retrieve SMMUv3 specific data */
1558 	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1559 
1560 	attr = (smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) ?
1561 			DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
1562 
1563 	/* We expect the dma masks to be equivalent for all SMMUv3 set-ups */
1564 	dev->dma_mask = &dev->coherent_dma_mask;
1565 
1566 	/* Configure DMA for the page table walker */
1567 	acpi_dma_configure(dev, attr);
1568 }
1569 
1570 #if defined(CONFIG_ACPI_NUMA)
1571 /*
1572  * set numa proximity domain for smmuv3 device
1573  */
arm_smmu_v3_set_proximity(struct device * dev,struct acpi_iort_node * node)1574 static int  __init arm_smmu_v3_set_proximity(struct device *dev,
1575 					      struct acpi_iort_node *node)
1576 {
1577 	struct acpi_iort_smmu_v3 *smmu;
1578 
1579 	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1580 	if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) {
1581 		int dev_node = pxm_to_node(smmu->pxm);
1582 
1583 		if (dev_node != NUMA_NO_NODE && !node_online(dev_node))
1584 			return -EINVAL;
1585 
1586 		set_dev_node(dev, dev_node);
1587 		pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n",
1588 			smmu->base_address,
1589 			smmu->pxm);
1590 	}
1591 	return 0;
1592 }
1593 #else
1594 #define arm_smmu_v3_set_proximity NULL
1595 #endif
1596 
arm_smmu_count_resources(struct acpi_iort_node * node)1597 static int __init arm_smmu_count_resources(struct acpi_iort_node *node)
1598 {
1599 	struct acpi_iort_smmu *smmu;
1600 
1601 	/* Retrieve SMMU specific data */
1602 	smmu = (struct acpi_iort_smmu *)node->node_data;
1603 
1604 	/*
1605 	 * Only consider the global fault interrupt and ignore the
1606 	 * configuration access interrupt.
1607 	 *
1608 	 * MMIO address and global fault interrupt resources are always
1609 	 * present so add them to the context interrupt count as a static
1610 	 * value.
1611 	 */
1612 	return smmu->context_interrupt_count + 2;
1613 }
1614 
arm_smmu_init_resources(struct resource * res,struct acpi_iort_node * node)1615 static void __init arm_smmu_init_resources(struct resource *res,
1616 					   struct acpi_iort_node *node)
1617 {
1618 	struct acpi_iort_smmu *smmu;
1619 	int i, hw_irq, trigger, num_res = 0;
1620 	u64 *ctx_irq, *glb_irq;
1621 
1622 	/* Retrieve SMMU specific data */
1623 	smmu = (struct acpi_iort_smmu *)node->node_data;
1624 
1625 	res[num_res].start = smmu->base_address;
1626 	res[num_res].end = smmu->base_address + smmu->span - 1;
1627 	res[num_res].flags = IORESOURCE_MEM;
1628 	num_res++;
1629 
1630 	glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset);
1631 	/* Global IRQs */
1632 	hw_irq = IORT_IRQ_MASK(glb_irq[0]);
1633 	trigger = IORT_IRQ_TRIGGER_MASK(glb_irq[0]);
1634 
1635 	acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger,
1636 				     &res[num_res++]);
1637 
1638 	/* Context IRQs */
1639 	ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset);
1640 	for (i = 0; i < smmu->context_interrupt_count; i++) {
1641 		hw_irq = IORT_IRQ_MASK(ctx_irq[i]);
1642 		trigger = IORT_IRQ_TRIGGER_MASK(ctx_irq[i]);
1643 
1644 		acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger,
1645 				       &res[num_res++]);
1646 	}
1647 }
1648 
arm_smmu_dma_configure(struct device * dev,struct acpi_iort_node * node)1649 static void __init arm_smmu_dma_configure(struct device *dev,
1650 					  struct acpi_iort_node *node)
1651 {
1652 	struct acpi_iort_smmu *smmu;
1653 	enum dev_dma_attr attr;
1654 
1655 	/* Retrieve SMMU specific data */
1656 	smmu = (struct acpi_iort_smmu *)node->node_data;
1657 
1658 	attr = (smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK) ?
1659 			DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
1660 
1661 	/* We expect the dma masks to be equivalent for SMMU set-ups */
1662 	dev->dma_mask = &dev->coherent_dma_mask;
1663 
1664 	/* Configure DMA for the page table walker */
1665 	acpi_dma_configure(dev, attr);
1666 }
1667 
arm_smmu_v3_pmcg_count_resources(struct acpi_iort_node * node)1668 static int __init arm_smmu_v3_pmcg_count_resources(struct acpi_iort_node *node)
1669 {
1670 	struct acpi_iort_pmcg *pmcg;
1671 
1672 	/* Retrieve PMCG specific data */
1673 	pmcg = (struct acpi_iort_pmcg *)node->node_data;
1674 
1675 	/*
1676 	 * There are always 2 memory resources.
1677 	 * If the overflow_gsiv is present then add that for a total of 3.
1678 	 */
1679 	return pmcg->overflow_gsiv ? 3 : 2;
1680 }
1681 
arm_smmu_v3_pmcg_init_resources(struct resource * res,struct acpi_iort_node * node)1682 static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res,
1683 						   struct acpi_iort_node *node)
1684 {
1685 	struct acpi_iort_pmcg *pmcg;
1686 
1687 	/* Retrieve PMCG specific data */
1688 	pmcg = (struct acpi_iort_pmcg *)node->node_data;
1689 
1690 	res[0].start = pmcg->page0_base_address;
1691 	res[0].end = pmcg->page0_base_address + SZ_4K - 1;
1692 	res[0].flags = IORESOURCE_MEM;
1693 	/*
1694 	 * The initial version in DEN0049C lacked a way to describe register
1695 	 * page 1, which makes it broken for most PMCG implementations; in
1696 	 * that case, just let the driver fail gracefully if it expects to
1697 	 * find a second memory resource.
1698 	 */
1699 	if (node->revision > 0) {
1700 		res[1].start = pmcg->page1_base_address;
1701 		res[1].end = pmcg->page1_base_address + SZ_4K - 1;
1702 		res[1].flags = IORESOURCE_MEM;
1703 	}
1704 
1705 	if (pmcg->overflow_gsiv)
1706 		acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow",
1707 				       ACPI_EDGE_SENSITIVE, &res[2]);
1708 }
1709 
1710 static struct acpi_platform_list pmcg_plat_info[] __initdata = {
1711 	/* HiSilicon Hip08 Platform */
1712 	{"HISI  ", "HIP08   ", 0, ACPI_SIG_IORT, greater_than_or_equal,
1713 	 "Erratum #162001800", IORT_SMMU_V3_PMCG_HISI_HIP08},
1714 	{ }
1715 };
1716 
arm_smmu_v3_pmcg_add_platdata(struct platform_device * pdev)1717 static int __init arm_smmu_v3_pmcg_add_platdata(struct platform_device *pdev)
1718 {
1719 	u32 model;
1720 	int idx;
1721 
1722 	idx = acpi_match_platform_list(pmcg_plat_info);
1723 	if (idx >= 0)
1724 		model = pmcg_plat_info[idx].data;
1725 	else
1726 		model = IORT_SMMU_V3_PMCG_GENERIC;
1727 
1728 	return platform_device_add_data(pdev, &model, sizeof(model));
1729 }
1730 
1731 struct iort_dev_config {
1732 	const char *name;
1733 	int (*dev_init)(struct acpi_iort_node *node);
1734 	void (*dev_dma_configure)(struct device *dev,
1735 				  struct acpi_iort_node *node);
1736 	int (*dev_count_resources)(struct acpi_iort_node *node);
1737 	void (*dev_init_resources)(struct resource *res,
1738 				     struct acpi_iort_node *node);
1739 	int (*dev_set_proximity)(struct device *dev,
1740 				    struct acpi_iort_node *node);
1741 	int (*dev_add_platdata)(struct platform_device *pdev);
1742 };
1743 
1744 static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = {
1745 	.name = "arm-smmu-v3",
1746 	.dev_dma_configure = arm_smmu_v3_dma_configure,
1747 	.dev_count_resources = arm_smmu_v3_count_resources,
1748 	.dev_init_resources = arm_smmu_v3_init_resources,
1749 	.dev_set_proximity = arm_smmu_v3_set_proximity,
1750 };
1751 
1752 static const struct iort_dev_config iort_arm_smmu_cfg __initconst = {
1753 	.name = "arm-smmu",
1754 	.dev_dma_configure = arm_smmu_dma_configure,
1755 	.dev_count_resources = arm_smmu_count_resources,
1756 	.dev_init_resources = arm_smmu_init_resources,
1757 };
1758 
1759 static const struct iort_dev_config iort_arm_smmu_v3_pmcg_cfg __initconst = {
1760 	.name = "arm-smmu-v3-pmcg",
1761 	.dev_count_resources = arm_smmu_v3_pmcg_count_resources,
1762 	.dev_init_resources = arm_smmu_v3_pmcg_init_resources,
1763 	.dev_add_platdata = arm_smmu_v3_pmcg_add_platdata,
1764 };
1765 
iort_get_dev_cfg(struct acpi_iort_node * node)1766 static __init const struct iort_dev_config *iort_get_dev_cfg(
1767 			struct acpi_iort_node *node)
1768 {
1769 	switch (node->type) {
1770 	case ACPI_IORT_NODE_SMMU_V3:
1771 		return &iort_arm_smmu_v3_cfg;
1772 	case ACPI_IORT_NODE_SMMU:
1773 		return &iort_arm_smmu_cfg;
1774 	case ACPI_IORT_NODE_PMCG:
1775 		return &iort_arm_smmu_v3_pmcg_cfg;
1776 	default:
1777 		return NULL;
1778 	}
1779 }
1780 
1781 /**
1782  * iort_add_platform_device() - Allocate a platform device for IORT node
1783  * @node: Pointer to device ACPI IORT node
1784  * @ops: Pointer to IORT device config struct
1785  *
1786  * Returns: 0 on success, <0 failure
1787  */
iort_add_platform_device(struct acpi_iort_node * node,const struct iort_dev_config * ops)1788 static int __init iort_add_platform_device(struct acpi_iort_node *node,
1789 					   const struct iort_dev_config *ops)
1790 {
1791 	struct fwnode_handle *fwnode;
1792 	struct platform_device *pdev;
1793 	struct resource *r;
1794 	int ret, count;
1795 
1796 	pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO);
1797 	if (!pdev)
1798 		return -ENOMEM;
1799 
1800 	if (ops->dev_set_proximity) {
1801 		ret = ops->dev_set_proximity(&pdev->dev, node);
1802 		if (ret)
1803 			goto dev_put;
1804 	}
1805 
1806 	count = ops->dev_count_resources(node);
1807 
1808 	r = kcalloc(count, sizeof(*r), GFP_KERNEL);
1809 	if (!r) {
1810 		ret = -ENOMEM;
1811 		goto dev_put;
1812 	}
1813 
1814 	ops->dev_init_resources(r, node);
1815 
1816 	ret = platform_device_add_resources(pdev, r, count);
1817 	/*
1818 	 * Resources are duplicated in platform_device_add_resources,
1819 	 * free their allocated memory
1820 	 */
1821 	kfree(r);
1822 
1823 	if (ret)
1824 		goto dev_put;
1825 
1826 	/*
1827 	 * Platform devices based on PMCG nodes uses platform_data to
1828 	 * pass the hardware model info to the driver. For others, add
1829 	 * a copy of IORT node pointer to platform_data to be used to
1830 	 * retrieve IORT data information.
1831 	 */
1832 	if (ops->dev_add_platdata)
1833 		ret = ops->dev_add_platdata(pdev);
1834 	else
1835 		ret = platform_device_add_data(pdev, &node, sizeof(node));
1836 
1837 	if (ret)
1838 		goto dev_put;
1839 
1840 	fwnode = iort_get_fwnode(node);
1841 
1842 	if (!fwnode) {
1843 		ret = -ENODEV;
1844 		goto dev_put;
1845 	}
1846 
1847 	pdev->dev.fwnode = fwnode;
1848 
1849 	if (ops->dev_dma_configure)
1850 		ops->dev_dma_configure(&pdev->dev, node);
1851 
1852 	iort_set_device_domain(&pdev->dev, node);
1853 
1854 	ret = platform_device_add(pdev);
1855 	if (ret)
1856 		goto dma_deconfigure;
1857 
1858 	return 0;
1859 
1860 dma_deconfigure:
1861 	arch_teardown_dma_ops(&pdev->dev);
1862 dev_put:
1863 	platform_device_put(pdev);
1864 
1865 	return ret;
1866 }
1867 
1868 #ifdef CONFIG_PCI
iort_enable_acs(struct acpi_iort_node * iort_node)1869 static void __init iort_enable_acs(struct acpi_iort_node *iort_node)
1870 {
1871 	static bool acs_enabled __initdata;
1872 
1873 	if (acs_enabled)
1874 		return;
1875 
1876 	if (iort_node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
1877 		struct acpi_iort_node *parent;
1878 		struct acpi_iort_id_mapping *map;
1879 		int i;
1880 
1881 		map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, iort_node,
1882 				   iort_node->mapping_offset);
1883 
1884 		for (i = 0; i < iort_node->mapping_count; i++, map++) {
1885 			if (!map->output_reference)
1886 				continue;
1887 
1888 			parent = ACPI_ADD_PTR(struct acpi_iort_node,
1889 					iort_table,  map->output_reference);
1890 			/*
1891 			 * If we detect a RC->SMMU mapping, make sure
1892 			 * we enable ACS on the system.
1893 			 */
1894 			if ((parent->type == ACPI_IORT_NODE_SMMU) ||
1895 				(parent->type == ACPI_IORT_NODE_SMMU_V3)) {
1896 				pci_request_acs();
1897 				acs_enabled = true;
1898 				return;
1899 			}
1900 		}
1901 	}
1902 }
1903 #else
iort_enable_acs(struct acpi_iort_node * iort_node)1904 static inline void iort_enable_acs(struct acpi_iort_node *iort_node) { }
1905 #endif
1906 
iort_init_platform_devices(void)1907 static void __init iort_init_platform_devices(void)
1908 {
1909 	struct acpi_iort_node *iort_node, *iort_end;
1910 	struct acpi_table_iort *iort;
1911 	struct fwnode_handle *fwnode;
1912 	int i, ret;
1913 	const struct iort_dev_config *ops;
1914 
1915 	/*
1916 	 * iort_table and iort both point to the start of IORT table, but
1917 	 * have different struct types
1918 	 */
1919 	iort = (struct acpi_table_iort *)iort_table;
1920 
1921 	/* Get the first IORT node */
1922 	iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
1923 				 iort->node_offset);
1924 	iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
1925 				iort_table->length);
1926 
1927 	for (i = 0; i < iort->node_count; i++) {
1928 		if (iort_node >= iort_end) {
1929 			pr_err("iort node pointer overflows, bad table\n");
1930 			return;
1931 		}
1932 
1933 		iort_enable_acs(iort_node);
1934 
1935 		ops = iort_get_dev_cfg(iort_node);
1936 		if (ops) {
1937 			fwnode = acpi_alloc_fwnode_static();
1938 			if (!fwnode)
1939 				return;
1940 
1941 			iort_set_fwnode(iort_node, fwnode);
1942 
1943 			ret = iort_add_platform_device(iort_node, ops);
1944 			if (ret) {
1945 				iort_delete_fwnode(iort_node);
1946 				acpi_free_fwnode_static(fwnode);
1947 				return;
1948 			}
1949 		}
1950 
1951 		iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
1952 					 iort_node->length);
1953 	}
1954 }
1955 
acpi_iort_init(void)1956 void __init acpi_iort_init(void)
1957 {
1958 	acpi_status status;
1959 
1960 	/* iort_table will be used at runtime after the iort init,
1961 	 * so we don't need to call acpi_put_table() to release
1962 	 * the IORT table mapping.
1963 	 */
1964 	status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table);
1965 	if (ACPI_FAILURE(status)) {
1966 		if (status != AE_NOT_FOUND) {
1967 			const char *msg = acpi_format_exception(status);
1968 
1969 			pr_err("Failed to get table, %s\n", msg);
1970 		}
1971 
1972 		return;
1973 	}
1974 
1975 	iort_init_platform_devices();
1976 }
1977 
1978 #ifdef CONFIG_ZONE_DMA
1979 /*
1980  * Extract the highest CPU physical address accessible to all DMA masters in
1981  * the system. PHYS_ADDR_MAX is returned when no constrained device is found.
1982  */
acpi_iort_dma_get_max_cpu_address(void)1983 phys_addr_t __init acpi_iort_dma_get_max_cpu_address(void)
1984 {
1985 	phys_addr_t limit = PHYS_ADDR_MAX;
1986 	struct acpi_iort_node *node, *end;
1987 	struct acpi_table_iort *iort;
1988 	acpi_status status;
1989 	int i;
1990 
1991 	if (acpi_disabled)
1992 		return limit;
1993 
1994 	status = acpi_get_table(ACPI_SIG_IORT, 0,
1995 				(struct acpi_table_header **)&iort);
1996 	if (ACPI_FAILURE(status))
1997 		return limit;
1998 
1999 	node = ACPI_ADD_PTR(struct acpi_iort_node, iort, iort->node_offset);
2000 	end = ACPI_ADD_PTR(struct acpi_iort_node, iort, iort->header.length);
2001 
2002 	for (i = 0; i < iort->node_count; i++) {
2003 		if (node >= end)
2004 			break;
2005 
2006 		switch (node->type) {
2007 			struct acpi_iort_named_component *ncomp;
2008 			struct acpi_iort_root_complex *rc;
2009 			phys_addr_t local_limit;
2010 
2011 		case ACPI_IORT_NODE_NAMED_COMPONENT:
2012 			ncomp = (struct acpi_iort_named_component *)node->node_data;
2013 			local_limit = DMA_BIT_MASK(ncomp->memory_address_limit);
2014 			limit = min_not_zero(limit, local_limit);
2015 			break;
2016 
2017 		case ACPI_IORT_NODE_PCI_ROOT_COMPLEX:
2018 			if (node->revision < 1)
2019 				break;
2020 
2021 			rc = (struct acpi_iort_root_complex *)node->node_data;
2022 			local_limit = DMA_BIT_MASK(rc->memory_address_limit);
2023 			limit = min_not_zero(limit, local_limit);
2024 			break;
2025 		}
2026 		node = ACPI_ADD_PTR(struct acpi_iort_node, node, node->length);
2027 	}
2028 	acpi_put_table(&iort->header);
2029 	return limit;
2030 }
2031 #endif
2032