1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * intel-pasid.c - PASID idr, table and entry manipulation
4  *
5  * Copyright (C) 2018 Intel Corporation
6  *
7  * Author: Lu Baolu <baolu.lu@linux.intel.com>
8  */
9 
10 #define pr_fmt(fmt)	"DMAR: " fmt
11 
12 #include <linux/bitops.h>
13 #include <linux/cpufeature.h>
14 #include <linux/dmar.h>
15 #include <linux/iommu.h>
16 #include <linux/memory.h>
17 #include <linux/pci.h>
18 #include <linux/pci-ats.h>
19 #include <linux/spinlock.h>
20 
21 #include "iommu.h"
22 #include "pasid.h"
23 
24 /*
25  * Intel IOMMU system wide PASID name space:
26  */
27 u32 intel_pasid_max_id = PASID_MAX;
28 
vcmd_alloc_pasid(struct intel_iommu * iommu,u32 * pasid)29 int vcmd_alloc_pasid(struct intel_iommu *iommu, u32 *pasid)
30 {
31 	unsigned long flags;
32 	u8 status_code;
33 	int ret = 0;
34 	u64 res;
35 
36 	raw_spin_lock_irqsave(&iommu->register_lock, flags);
37 	dmar_writeq(iommu->reg + DMAR_VCMD_REG, VCMD_CMD_ALLOC);
38 	IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq,
39 		      !(res & VCMD_VRSP_IP), res);
40 	raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
41 
42 	status_code = VCMD_VRSP_SC(res);
43 	switch (status_code) {
44 	case VCMD_VRSP_SC_SUCCESS:
45 		*pasid = VCMD_VRSP_RESULT_PASID(res);
46 		break;
47 	case VCMD_VRSP_SC_NO_PASID_AVAIL:
48 		pr_info("IOMMU: %s: No PASID available\n", iommu->name);
49 		ret = -ENOSPC;
50 		break;
51 	default:
52 		ret = -ENODEV;
53 		pr_warn("IOMMU: %s: Unexpected error code %d\n",
54 			iommu->name, status_code);
55 	}
56 
57 	return ret;
58 }
59 
vcmd_free_pasid(struct intel_iommu * iommu,u32 pasid)60 void vcmd_free_pasid(struct intel_iommu *iommu, u32 pasid)
61 {
62 	unsigned long flags;
63 	u8 status_code;
64 	u64 res;
65 
66 	raw_spin_lock_irqsave(&iommu->register_lock, flags);
67 	dmar_writeq(iommu->reg + DMAR_VCMD_REG,
68 		    VCMD_CMD_OPERAND(pasid) | VCMD_CMD_FREE);
69 	IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq,
70 		      !(res & VCMD_VRSP_IP), res);
71 	raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
72 
73 	status_code = VCMD_VRSP_SC(res);
74 	switch (status_code) {
75 	case VCMD_VRSP_SC_SUCCESS:
76 		break;
77 	case VCMD_VRSP_SC_INVALID_PASID:
78 		pr_info("IOMMU: %s: Invalid PASID\n", iommu->name);
79 		break;
80 	default:
81 		pr_warn("IOMMU: %s: Unexpected error code %d\n",
82 			iommu->name, status_code);
83 	}
84 }
85 
86 /*
87  * Per device pasid table management:
88  */
89 
90 /*
91  * Allocate a pasid table for @dev. It should be called in a
92  * single-thread context.
93  */
intel_pasid_alloc_table(struct device * dev)94 int intel_pasid_alloc_table(struct device *dev)
95 {
96 	struct device_domain_info *info;
97 	struct pasid_table *pasid_table;
98 	struct page *pages;
99 	u32 max_pasid = 0;
100 	int order, size;
101 
102 	might_sleep();
103 	info = dev_iommu_priv_get(dev);
104 	if (WARN_ON(!info || !dev_is_pci(dev)))
105 		return -ENODEV;
106 	if (WARN_ON(info->pasid_table))
107 		return -EEXIST;
108 
109 	pasid_table = kzalloc(sizeof(*pasid_table), GFP_KERNEL);
110 	if (!pasid_table)
111 		return -ENOMEM;
112 
113 	if (info->pasid_supported)
114 		max_pasid = min_t(u32, pci_max_pasids(to_pci_dev(dev)),
115 				  intel_pasid_max_id);
116 
117 	size = max_pasid >> (PASID_PDE_SHIFT - 3);
118 	order = size ? get_order(size) : 0;
119 	pages = alloc_pages_node(info->iommu->node,
120 				 GFP_KERNEL | __GFP_ZERO, order);
121 	if (!pages) {
122 		kfree(pasid_table);
123 		return -ENOMEM;
124 	}
125 
126 	pasid_table->table = page_address(pages);
127 	pasid_table->order = order;
128 	pasid_table->max_pasid = 1 << (order + PAGE_SHIFT + 3);
129 	info->pasid_table = pasid_table;
130 
131 	if (!ecap_coherent(info->iommu->ecap))
132 		clflush_cache_range(pasid_table->table, size);
133 
134 	return 0;
135 }
136 
intel_pasid_free_table(struct device * dev)137 void intel_pasid_free_table(struct device *dev)
138 {
139 	struct device_domain_info *info;
140 	struct pasid_table *pasid_table;
141 	struct pasid_dir_entry *dir;
142 	struct pasid_entry *table;
143 	int i, max_pde;
144 
145 	info = dev_iommu_priv_get(dev);
146 	if (!info || !dev_is_pci(dev) || !info->pasid_table)
147 		return;
148 
149 	pasid_table = info->pasid_table;
150 	info->pasid_table = NULL;
151 
152 	/* Free scalable mode PASID directory tables: */
153 	dir = pasid_table->table;
154 	max_pde = pasid_table->max_pasid >> PASID_PDE_SHIFT;
155 	for (i = 0; i < max_pde; i++) {
156 		table = get_pasid_table_from_pde(&dir[i]);
157 		free_pgtable_page(table);
158 	}
159 
160 	free_pages((unsigned long)pasid_table->table, pasid_table->order);
161 	kfree(pasid_table);
162 }
163 
intel_pasid_get_table(struct device * dev)164 struct pasid_table *intel_pasid_get_table(struct device *dev)
165 {
166 	struct device_domain_info *info;
167 
168 	info = dev_iommu_priv_get(dev);
169 	if (!info)
170 		return NULL;
171 
172 	return info->pasid_table;
173 }
174 
intel_pasid_get_dev_max_id(struct device * dev)175 static int intel_pasid_get_dev_max_id(struct device *dev)
176 {
177 	struct device_domain_info *info;
178 
179 	info = dev_iommu_priv_get(dev);
180 	if (!info || !info->pasid_table)
181 		return 0;
182 
183 	return info->pasid_table->max_pasid;
184 }
185 
intel_pasid_get_entry(struct device * dev,u32 pasid)186 static struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid)
187 {
188 	struct device_domain_info *info;
189 	struct pasid_table *pasid_table;
190 	struct pasid_dir_entry *dir;
191 	struct pasid_entry *entries;
192 	int dir_index, index;
193 
194 	pasid_table = intel_pasid_get_table(dev);
195 	if (WARN_ON(!pasid_table || pasid >= intel_pasid_get_dev_max_id(dev)))
196 		return NULL;
197 
198 	dir = pasid_table->table;
199 	info = dev_iommu_priv_get(dev);
200 	dir_index = pasid >> PASID_PDE_SHIFT;
201 	index = pasid & PASID_PTE_MASK;
202 
203 retry:
204 	entries = get_pasid_table_from_pde(&dir[dir_index]);
205 	if (!entries) {
206 		entries = alloc_pgtable_page(info->iommu->node, GFP_ATOMIC);
207 		if (!entries)
208 			return NULL;
209 
210 		/*
211 		 * The pasid directory table entry won't be freed after
212 		 * allocation. No worry about the race with free and
213 		 * clear. However, this entry might be populated by others
214 		 * while we are preparing it. Use theirs with a retry.
215 		 */
216 		if (cmpxchg64(&dir[dir_index].val, 0ULL,
217 			      (u64)virt_to_phys(entries) | PASID_PTE_PRESENT)) {
218 			free_pgtable_page(entries);
219 			goto retry;
220 		}
221 		if (!ecap_coherent(info->iommu->ecap)) {
222 			clflush_cache_range(entries, VTD_PAGE_SIZE);
223 			clflush_cache_range(&dir[dir_index].val, sizeof(*dir));
224 		}
225 	}
226 
227 	return &entries[index];
228 }
229 
230 /*
231  * Interfaces for PASID table entry manipulation:
232  */
pasid_clear_entry(struct pasid_entry * pe)233 static inline void pasid_clear_entry(struct pasid_entry *pe)
234 {
235 	WRITE_ONCE(pe->val[0], 0);
236 	WRITE_ONCE(pe->val[1], 0);
237 	WRITE_ONCE(pe->val[2], 0);
238 	WRITE_ONCE(pe->val[3], 0);
239 	WRITE_ONCE(pe->val[4], 0);
240 	WRITE_ONCE(pe->val[5], 0);
241 	WRITE_ONCE(pe->val[6], 0);
242 	WRITE_ONCE(pe->val[7], 0);
243 }
244 
pasid_clear_entry_with_fpd(struct pasid_entry * pe)245 static inline void pasid_clear_entry_with_fpd(struct pasid_entry *pe)
246 {
247 	WRITE_ONCE(pe->val[0], PASID_PTE_FPD);
248 	WRITE_ONCE(pe->val[1], 0);
249 	WRITE_ONCE(pe->val[2], 0);
250 	WRITE_ONCE(pe->val[3], 0);
251 	WRITE_ONCE(pe->val[4], 0);
252 	WRITE_ONCE(pe->val[5], 0);
253 	WRITE_ONCE(pe->val[6], 0);
254 	WRITE_ONCE(pe->val[7], 0);
255 }
256 
257 static void
intel_pasid_clear_entry(struct device * dev,u32 pasid,bool fault_ignore)258 intel_pasid_clear_entry(struct device *dev, u32 pasid, bool fault_ignore)
259 {
260 	struct pasid_entry *pe;
261 
262 	pe = intel_pasid_get_entry(dev, pasid);
263 	if (WARN_ON(!pe))
264 		return;
265 
266 	if (fault_ignore && pasid_pte_is_present(pe))
267 		pasid_clear_entry_with_fpd(pe);
268 	else
269 		pasid_clear_entry(pe);
270 }
271 
pasid_set_bits(u64 * ptr,u64 mask,u64 bits)272 static inline void pasid_set_bits(u64 *ptr, u64 mask, u64 bits)
273 {
274 	u64 old;
275 
276 	old = READ_ONCE(*ptr);
277 	WRITE_ONCE(*ptr, (old & ~mask) | bits);
278 }
279 
280 /*
281  * Setup the DID(Domain Identifier) field (Bit 64~79) of scalable mode
282  * PASID entry.
283  */
284 static inline void
pasid_set_domain_id(struct pasid_entry * pe,u64 value)285 pasid_set_domain_id(struct pasid_entry *pe, u64 value)
286 {
287 	pasid_set_bits(&pe->val[1], GENMASK_ULL(15, 0), value);
288 }
289 
290 /*
291  * Get domain ID value of a scalable mode PASID entry.
292  */
293 static inline u16
pasid_get_domain_id(struct pasid_entry * pe)294 pasid_get_domain_id(struct pasid_entry *pe)
295 {
296 	return (u16)(READ_ONCE(pe->val[1]) & GENMASK_ULL(15, 0));
297 }
298 
299 /*
300  * Setup the SLPTPTR(Second Level Page Table Pointer) field (Bit 12~63)
301  * of a scalable mode PASID entry.
302  */
303 static inline void
pasid_set_slptr(struct pasid_entry * pe,u64 value)304 pasid_set_slptr(struct pasid_entry *pe, u64 value)
305 {
306 	pasid_set_bits(&pe->val[0], VTD_PAGE_MASK, value);
307 }
308 
309 /*
310  * Setup the AW(Address Width) field (Bit 2~4) of a scalable mode PASID
311  * entry.
312  */
313 static inline void
pasid_set_address_width(struct pasid_entry * pe,u64 value)314 pasid_set_address_width(struct pasid_entry *pe, u64 value)
315 {
316 	pasid_set_bits(&pe->val[0], GENMASK_ULL(4, 2), value << 2);
317 }
318 
319 /*
320  * Setup the PGTT(PASID Granular Translation Type) field (Bit 6~8)
321  * of a scalable mode PASID entry.
322  */
323 static inline void
pasid_set_translation_type(struct pasid_entry * pe,u64 value)324 pasid_set_translation_type(struct pasid_entry *pe, u64 value)
325 {
326 	pasid_set_bits(&pe->val[0], GENMASK_ULL(8, 6), value << 6);
327 }
328 
329 /*
330  * Enable fault processing by clearing the FPD(Fault Processing
331  * Disable) field (Bit 1) of a scalable mode PASID entry.
332  */
pasid_set_fault_enable(struct pasid_entry * pe)333 static inline void pasid_set_fault_enable(struct pasid_entry *pe)
334 {
335 	pasid_set_bits(&pe->val[0], 1 << 1, 0);
336 }
337 
338 /*
339  * Setup the SRE(Supervisor Request Enable) field (Bit 128) of a
340  * scalable mode PASID entry.
341  */
pasid_set_sre(struct pasid_entry * pe)342 static inline void pasid_set_sre(struct pasid_entry *pe)
343 {
344 	pasid_set_bits(&pe->val[2], 1 << 0, 1);
345 }
346 
347 /*
348  * Setup the WPE(Write Protect Enable) field (Bit 132) of a
349  * scalable mode PASID entry.
350  */
pasid_set_wpe(struct pasid_entry * pe)351 static inline void pasid_set_wpe(struct pasid_entry *pe)
352 {
353 	pasid_set_bits(&pe->val[2], 1 << 4, 1 << 4);
354 }
355 
356 /*
357  * Setup the P(Present) field (Bit 0) of a scalable mode PASID
358  * entry.
359  */
pasid_set_present(struct pasid_entry * pe)360 static inline void pasid_set_present(struct pasid_entry *pe)
361 {
362 	pasid_set_bits(&pe->val[0], 1 << 0, 1);
363 }
364 
365 /*
366  * Setup Page Walk Snoop bit (Bit 87) of a scalable mode PASID
367  * entry.
368  */
pasid_set_page_snoop(struct pasid_entry * pe,bool value)369 static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
370 {
371 	pasid_set_bits(&pe->val[1], 1 << 23, value << 23);
372 }
373 
374 /*
375  * Setup No Execute Enable bit (Bit 133) of a scalable mode PASID
376  * entry. It is required when XD bit of the first level page table
377  * entry is about to be set.
378  */
pasid_set_nxe(struct pasid_entry * pe)379 static inline void pasid_set_nxe(struct pasid_entry *pe)
380 {
381 	pasid_set_bits(&pe->val[2], 1 << 5, 1 << 5);
382 }
383 
384 /*
385  * Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode
386  * PASID entry.
387  */
388 static inline void
pasid_set_pgsnp(struct pasid_entry * pe)389 pasid_set_pgsnp(struct pasid_entry *pe)
390 {
391 	pasid_set_bits(&pe->val[1], 1ULL << 24, 1ULL << 24);
392 }
393 
394 /*
395  * Setup the First Level Page table Pointer field (Bit 140~191)
396  * of a scalable mode PASID entry.
397  */
398 static inline void
pasid_set_flptr(struct pasid_entry * pe,u64 value)399 pasid_set_flptr(struct pasid_entry *pe, u64 value)
400 {
401 	pasid_set_bits(&pe->val[2], VTD_PAGE_MASK, value);
402 }
403 
404 /*
405  * Setup the First Level Paging Mode field (Bit 130~131) of a
406  * scalable mode PASID entry.
407  */
408 static inline void
pasid_set_flpm(struct pasid_entry * pe,u64 value)409 pasid_set_flpm(struct pasid_entry *pe, u64 value)
410 {
411 	pasid_set_bits(&pe->val[2], GENMASK_ULL(3, 2), value << 2);
412 }
413 
414 static void
pasid_cache_invalidation_with_pasid(struct intel_iommu * iommu,u16 did,u32 pasid)415 pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu,
416 				    u16 did, u32 pasid)
417 {
418 	struct qi_desc desc;
419 
420 	desc.qw0 = QI_PC_DID(did) | QI_PC_GRAN(QI_PC_PASID_SEL) |
421 		QI_PC_PASID(pasid) | QI_PC_TYPE;
422 	desc.qw1 = 0;
423 	desc.qw2 = 0;
424 	desc.qw3 = 0;
425 
426 	qi_submit_sync(iommu, &desc, 1, 0);
427 }
428 
429 static void
devtlb_invalidation_with_pasid(struct intel_iommu * iommu,struct device * dev,u32 pasid)430 devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
431 			       struct device *dev, u32 pasid)
432 {
433 	struct device_domain_info *info;
434 	u16 sid, qdep, pfsid;
435 
436 	info = dev_iommu_priv_get(dev);
437 	if (!info || !info->ats_enabled)
438 		return;
439 
440 	sid = info->bus << 8 | info->devfn;
441 	qdep = info->ats_qdep;
442 	pfsid = info->pfsid;
443 
444 	/*
445 	 * When PASID 0 is used, it indicates RID2PASID(DMA request w/o PASID),
446 	 * devTLB flush w/o PASID should be used. For non-zero PASID under
447 	 * SVA usage, device could do DMA with multiple PASIDs. It is more
448 	 * efficient to flush devTLB specific to the PASID.
449 	 */
450 	if (pasid == PASID_RID2PASID)
451 		qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT);
452 	else
453 		qi_flush_dev_iotlb_pasid(iommu, sid, pfsid, pasid, qdep, 0, 64 - VTD_PAGE_SHIFT);
454 }
455 
intel_pasid_tear_down_entry(struct intel_iommu * iommu,struct device * dev,u32 pasid,bool fault_ignore)456 void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
457 				 u32 pasid, bool fault_ignore)
458 {
459 	struct pasid_entry *pte;
460 	u16 did, pgtt;
461 
462 	spin_lock(&iommu->lock);
463 	pte = intel_pasid_get_entry(dev, pasid);
464 	if (WARN_ON(!pte) || !pasid_pte_is_present(pte)) {
465 		spin_unlock(&iommu->lock);
466 		return;
467 	}
468 
469 	did = pasid_get_domain_id(pte);
470 	pgtt = pasid_pte_get_pgtt(pte);
471 	intel_pasid_clear_entry(dev, pasid, fault_ignore);
472 	spin_unlock(&iommu->lock);
473 
474 	if (!ecap_coherent(iommu->ecap))
475 		clflush_cache_range(pte, sizeof(*pte));
476 
477 	pasid_cache_invalidation_with_pasid(iommu, did, pasid);
478 
479 	if (pgtt == PASID_ENTRY_PGTT_PT || pgtt == PASID_ENTRY_PGTT_FL_ONLY)
480 		qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
481 	else
482 		iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
483 
484 	/* Device IOTLB doesn't need to be flushed in caching mode. */
485 	if (!cap_caching_mode(iommu->cap))
486 		devtlb_invalidation_with_pasid(iommu, dev, pasid);
487 }
488 
489 /*
490  * This function flushes cache for a newly setup pasid table entry.
491  * Caller of it should not modify the in-use pasid table entries.
492  */
pasid_flush_caches(struct intel_iommu * iommu,struct pasid_entry * pte,u32 pasid,u16 did)493 static void pasid_flush_caches(struct intel_iommu *iommu,
494 				struct pasid_entry *pte,
495 			       u32 pasid, u16 did)
496 {
497 	if (!ecap_coherent(iommu->ecap))
498 		clflush_cache_range(pte, sizeof(*pte));
499 
500 	if (cap_caching_mode(iommu->cap)) {
501 		pasid_cache_invalidation_with_pasid(iommu, did, pasid);
502 		qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
503 	} else {
504 		iommu_flush_write_buffer(iommu);
505 	}
506 }
507 
508 /*
509  * Set up the scalable mode pasid table entry for first only
510  * translation type.
511  */
intel_pasid_setup_first_level(struct intel_iommu * iommu,struct device * dev,pgd_t * pgd,u32 pasid,u16 did,int flags)512 int intel_pasid_setup_first_level(struct intel_iommu *iommu,
513 				  struct device *dev, pgd_t *pgd,
514 				  u32 pasid, u16 did, int flags)
515 {
516 	struct pasid_entry *pte;
517 
518 	if (!ecap_flts(iommu->ecap)) {
519 		pr_err("No first level translation support on %s\n",
520 		       iommu->name);
521 		return -EINVAL;
522 	}
523 
524 	if (flags & PASID_FLAG_SUPERVISOR_MODE) {
525 #ifdef CONFIG_X86
526 		unsigned long cr0 = read_cr0();
527 
528 		/* CR0.WP is normally set but just to be sure */
529 		if (unlikely(!(cr0 & X86_CR0_WP))) {
530 			pr_err("No CPU write protect!\n");
531 			return -EINVAL;
532 		}
533 #endif
534 		if (!ecap_srs(iommu->ecap)) {
535 			pr_err("No supervisor request support on %s\n",
536 			       iommu->name);
537 			return -EINVAL;
538 		}
539 	}
540 
541 	if ((flags & PASID_FLAG_FL5LP) && !cap_fl5lp_support(iommu->cap)) {
542 		pr_err("No 5-level paging support for first-level on %s\n",
543 		       iommu->name);
544 		return -EINVAL;
545 	}
546 
547 	spin_lock(&iommu->lock);
548 	pte = intel_pasid_get_entry(dev, pasid);
549 	if (!pte) {
550 		spin_unlock(&iommu->lock);
551 		return -ENODEV;
552 	}
553 
554 	if (pasid_pte_is_present(pte)) {
555 		spin_unlock(&iommu->lock);
556 		return -EBUSY;
557 	}
558 
559 	pasid_clear_entry(pte);
560 
561 	/* Setup the first level page table pointer: */
562 	pasid_set_flptr(pte, (u64)__pa(pgd));
563 	if (flags & PASID_FLAG_SUPERVISOR_MODE) {
564 		pasid_set_sre(pte);
565 		pasid_set_wpe(pte);
566 	}
567 
568 	if (flags & PASID_FLAG_FL5LP)
569 		pasid_set_flpm(pte, 1);
570 
571 	if (flags & PASID_FLAG_PAGE_SNOOP)
572 		pasid_set_pgsnp(pte);
573 
574 	pasid_set_domain_id(pte, did);
575 	pasid_set_address_width(pte, iommu->agaw);
576 	pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
577 	pasid_set_nxe(pte);
578 
579 	/* Setup Present and PASID Granular Transfer Type: */
580 	pasid_set_translation_type(pte, PASID_ENTRY_PGTT_FL_ONLY);
581 	pasid_set_present(pte);
582 	spin_unlock(&iommu->lock);
583 
584 	pasid_flush_caches(iommu, pte, pasid, did);
585 
586 	return 0;
587 }
588 
589 /*
590  * Skip top levels of page tables for iommu which has less agaw
591  * than default. Unnecessary for PT mode.
592  */
iommu_skip_agaw(struct dmar_domain * domain,struct intel_iommu * iommu,struct dma_pte ** pgd)593 static inline int iommu_skip_agaw(struct dmar_domain *domain,
594 				  struct intel_iommu *iommu,
595 				  struct dma_pte **pgd)
596 {
597 	int agaw;
598 
599 	for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
600 		*pgd = phys_to_virt(dma_pte_addr(*pgd));
601 		if (!dma_pte_present(*pgd))
602 			return -EINVAL;
603 	}
604 
605 	return agaw;
606 }
607 
608 /*
609  * Set up the scalable mode pasid entry for second only translation type.
610  */
intel_pasid_setup_second_level(struct intel_iommu * iommu,struct dmar_domain * domain,struct device * dev,u32 pasid)611 int intel_pasid_setup_second_level(struct intel_iommu *iommu,
612 				   struct dmar_domain *domain,
613 				   struct device *dev, u32 pasid)
614 {
615 	struct pasid_entry *pte;
616 	struct dma_pte *pgd;
617 	u64 pgd_val;
618 	int agaw;
619 	u16 did;
620 
621 	/*
622 	 * If hardware advertises no support for second level
623 	 * translation, return directly.
624 	 */
625 	if (!ecap_slts(iommu->ecap)) {
626 		pr_err("No second level translation support on %s\n",
627 		       iommu->name);
628 		return -EINVAL;
629 	}
630 
631 	pgd = domain->pgd;
632 	agaw = iommu_skip_agaw(domain, iommu, &pgd);
633 	if (agaw < 0) {
634 		dev_err(dev, "Invalid domain page table\n");
635 		return -EINVAL;
636 	}
637 
638 	pgd_val = virt_to_phys(pgd);
639 	did = domain_id_iommu(domain, iommu);
640 
641 	spin_lock(&iommu->lock);
642 	pte = intel_pasid_get_entry(dev, pasid);
643 	if (!pte) {
644 		spin_unlock(&iommu->lock);
645 		return -ENODEV;
646 	}
647 
648 	if (pasid_pte_is_present(pte)) {
649 		spin_unlock(&iommu->lock);
650 		return -EBUSY;
651 	}
652 
653 	pasid_clear_entry(pte);
654 	pasid_set_domain_id(pte, did);
655 	pasid_set_slptr(pte, pgd_val);
656 	pasid_set_address_width(pte, agaw);
657 	pasid_set_translation_type(pte, PASID_ENTRY_PGTT_SL_ONLY);
658 	pasid_set_fault_enable(pte);
659 	pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
660 
661 	/*
662 	 * Since it is a second level only translation setup, we should
663 	 * set SRE bit as well (addresses are expected to be GPAs).
664 	 */
665 	if (pasid != PASID_RID2PASID && ecap_srs(iommu->ecap))
666 		pasid_set_sre(pte);
667 	pasid_set_present(pte);
668 	spin_unlock(&iommu->lock);
669 
670 	pasid_flush_caches(iommu, pte, pasid, did);
671 
672 	return 0;
673 }
674 
675 /*
676  * Set up the scalable mode pasid entry for passthrough translation type.
677  */
intel_pasid_setup_pass_through(struct intel_iommu * iommu,struct dmar_domain * domain,struct device * dev,u32 pasid)678 int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
679 				   struct dmar_domain *domain,
680 				   struct device *dev, u32 pasid)
681 {
682 	u16 did = FLPT_DEFAULT_DID;
683 	struct pasid_entry *pte;
684 
685 	spin_lock(&iommu->lock);
686 	pte = intel_pasid_get_entry(dev, pasid);
687 	if (!pte) {
688 		spin_unlock(&iommu->lock);
689 		return -ENODEV;
690 	}
691 
692 	if (pasid_pte_is_present(pte)) {
693 		spin_unlock(&iommu->lock);
694 		return -EBUSY;
695 	}
696 
697 	pasid_clear_entry(pte);
698 	pasid_set_domain_id(pte, did);
699 	pasid_set_address_width(pte, iommu->agaw);
700 	pasid_set_translation_type(pte, PASID_ENTRY_PGTT_PT);
701 	pasid_set_fault_enable(pte);
702 	pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
703 
704 	/*
705 	 * We should set SRE bit as well since the addresses are expected
706 	 * to be GPAs.
707 	 */
708 	if (ecap_srs(iommu->ecap))
709 		pasid_set_sre(pte);
710 	pasid_set_present(pte);
711 	spin_unlock(&iommu->lock);
712 
713 	pasid_flush_caches(iommu, pte, pasid, did);
714 
715 	return 0;
716 }
717 
718 /*
719  * Set the page snoop control for a pasid entry which has been set up.
720  */
intel_pasid_setup_page_snoop_control(struct intel_iommu * iommu,struct device * dev,u32 pasid)721 void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu,
722 					  struct device *dev, u32 pasid)
723 {
724 	struct pasid_entry *pte;
725 	u16 did;
726 
727 	spin_lock(&iommu->lock);
728 	pte = intel_pasid_get_entry(dev, pasid);
729 	if (WARN_ON(!pte || !pasid_pte_is_present(pte))) {
730 		spin_unlock(&iommu->lock);
731 		return;
732 	}
733 
734 	pasid_set_pgsnp(pte);
735 	did = pasid_get_domain_id(pte);
736 	spin_unlock(&iommu->lock);
737 
738 	if (!ecap_coherent(iommu->ecap))
739 		clflush_cache_range(pte, sizeof(*pte));
740 
741 	/*
742 	 * VT-d spec 3.4 table23 states guides for cache invalidation:
743 	 *
744 	 * - PASID-selective-within-Domain PASID-cache invalidation
745 	 * - PASID-selective PASID-based IOTLB invalidation
746 	 * - If (pasid is RID_PASID)
747 	 *    - Global Device-TLB invalidation to affected functions
748 	 *   Else
749 	 *    - PASID-based Device-TLB invalidation (with S=1 and
750 	 *      Addr[63:12]=0x7FFFFFFF_FFFFF) to affected functions
751 	 */
752 	pasid_cache_invalidation_with_pasid(iommu, did, pasid);
753 	qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
754 
755 	/* Device IOTLB doesn't need to be flushed in caching mode. */
756 	if (!cap_caching_mode(iommu->cap))
757 		devtlb_invalidation_with_pasid(iommu, dev, pasid);
758 }
759