xref: /OK3568_Linux_fs/kernel/drivers/iommu/intel/pasid.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /**
3*4882a593Smuzhiyun  * intel-pasid.c - PASID idr, table and entry manipulation
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2018 Intel Corporation
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Author: Lu Baolu <baolu.lu@linux.intel.com>
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #define pr_fmt(fmt)	"DMAR: " fmt
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/bitops.h>
13*4882a593Smuzhiyun #include <linux/cpufeature.h>
14*4882a593Smuzhiyun #include <linux/dmar.h>
15*4882a593Smuzhiyun #include <linux/intel-iommu.h>
16*4882a593Smuzhiyun #include <linux/iommu.h>
17*4882a593Smuzhiyun #include <linux/memory.h>
18*4882a593Smuzhiyun #include <linux/pci.h>
19*4882a593Smuzhiyun #include <linux/pci-ats.h>
20*4882a593Smuzhiyun #include <linux/spinlock.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #include "pasid.h"
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun /*
25*4882a593Smuzhiyun  * Intel IOMMU system wide PASID name space:
26*4882a593Smuzhiyun  */
27*4882a593Smuzhiyun static DEFINE_SPINLOCK(pasid_lock);
28*4882a593Smuzhiyun u32 intel_pasid_max_id = PASID_MAX;
29*4882a593Smuzhiyun 
vcmd_alloc_pasid(struct intel_iommu * iommu,u32 * pasid)30*4882a593Smuzhiyun int vcmd_alloc_pasid(struct intel_iommu *iommu, u32 *pasid)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun 	unsigned long flags;
33*4882a593Smuzhiyun 	u8 status_code;
34*4882a593Smuzhiyun 	int ret = 0;
35*4882a593Smuzhiyun 	u64 res;
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	raw_spin_lock_irqsave(&iommu->register_lock, flags);
38*4882a593Smuzhiyun 	dmar_writeq(iommu->reg + DMAR_VCMD_REG, VCMD_CMD_ALLOC);
39*4882a593Smuzhiyun 	IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq,
40*4882a593Smuzhiyun 		      !(res & VCMD_VRSP_IP), res);
41*4882a593Smuzhiyun 	raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 	status_code = VCMD_VRSP_SC(res);
44*4882a593Smuzhiyun 	switch (status_code) {
45*4882a593Smuzhiyun 	case VCMD_VRSP_SC_SUCCESS:
46*4882a593Smuzhiyun 		*pasid = VCMD_VRSP_RESULT_PASID(res);
47*4882a593Smuzhiyun 		break;
48*4882a593Smuzhiyun 	case VCMD_VRSP_SC_NO_PASID_AVAIL:
49*4882a593Smuzhiyun 		pr_info("IOMMU: %s: No PASID available\n", iommu->name);
50*4882a593Smuzhiyun 		ret = -ENOSPC;
51*4882a593Smuzhiyun 		break;
52*4882a593Smuzhiyun 	default:
53*4882a593Smuzhiyun 		ret = -ENODEV;
54*4882a593Smuzhiyun 		pr_warn("IOMMU: %s: Unexpected error code %d\n",
55*4882a593Smuzhiyun 			iommu->name, status_code);
56*4882a593Smuzhiyun 	}
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	return ret;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun 
vcmd_free_pasid(struct intel_iommu * iommu,u32 pasid)61*4882a593Smuzhiyun void vcmd_free_pasid(struct intel_iommu *iommu, u32 pasid)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun 	unsigned long flags;
64*4882a593Smuzhiyun 	u8 status_code;
65*4882a593Smuzhiyun 	u64 res;
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	raw_spin_lock_irqsave(&iommu->register_lock, flags);
68*4882a593Smuzhiyun 	dmar_writeq(iommu->reg + DMAR_VCMD_REG,
69*4882a593Smuzhiyun 		    VCMD_CMD_OPERAND(pasid) | VCMD_CMD_FREE);
70*4882a593Smuzhiyun 	IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq,
71*4882a593Smuzhiyun 		      !(res & VCMD_VRSP_IP), res);
72*4882a593Smuzhiyun 	raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	status_code = VCMD_VRSP_SC(res);
75*4882a593Smuzhiyun 	switch (status_code) {
76*4882a593Smuzhiyun 	case VCMD_VRSP_SC_SUCCESS:
77*4882a593Smuzhiyun 		break;
78*4882a593Smuzhiyun 	case VCMD_VRSP_SC_INVALID_PASID:
79*4882a593Smuzhiyun 		pr_info("IOMMU: %s: Invalid PASID\n", iommu->name);
80*4882a593Smuzhiyun 		break;
81*4882a593Smuzhiyun 	default:
82*4882a593Smuzhiyun 		pr_warn("IOMMU: %s: Unexpected error code %d\n",
83*4882a593Smuzhiyun 			iommu->name, status_code);
84*4882a593Smuzhiyun 	}
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun /*
88*4882a593Smuzhiyun  * Per device pasid table management:
89*4882a593Smuzhiyun  */
90*4882a593Smuzhiyun static inline void
device_attach_pasid_table(struct device_domain_info * info,struct pasid_table * pasid_table)91*4882a593Smuzhiyun device_attach_pasid_table(struct device_domain_info *info,
92*4882a593Smuzhiyun 			  struct pasid_table *pasid_table)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun 	info->pasid_table = pasid_table;
95*4882a593Smuzhiyun 	list_add(&info->table, &pasid_table->dev);
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun static inline void
device_detach_pasid_table(struct device_domain_info * info,struct pasid_table * pasid_table)99*4882a593Smuzhiyun device_detach_pasid_table(struct device_domain_info *info,
100*4882a593Smuzhiyun 			  struct pasid_table *pasid_table)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun 	info->pasid_table = NULL;
103*4882a593Smuzhiyun 	list_del(&info->table);
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun struct pasid_table_opaque {
107*4882a593Smuzhiyun 	struct pasid_table	**pasid_table;
108*4882a593Smuzhiyun 	int			segment;
109*4882a593Smuzhiyun 	int			bus;
110*4882a593Smuzhiyun 	int			devfn;
111*4882a593Smuzhiyun };
112*4882a593Smuzhiyun 
search_pasid_table(struct device_domain_info * info,void * opaque)113*4882a593Smuzhiyun static int search_pasid_table(struct device_domain_info *info, void *opaque)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun 	struct pasid_table_opaque *data = opaque;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	if (info->iommu->segment == data->segment &&
118*4882a593Smuzhiyun 	    info->bus == data->bus &&
119*4882a593Smuzhiyun 	    info->devfn == data->devfn &&
120*4882a593Smuzhiyun 	    info->pasid_table) {
121*4882a593Smuzhiyun 		*data->pasid_table = info->pasid_table;
122*4882a593Smuzhiyun 		return 1;
123*4882a593Smuzhiyun 	}
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	return 0;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun 
get_alias_pasid_table(struct pci_dev * pdev,u16 alias,void * opaque)128*4882a593Smuzhiyun static int get_alias_pasid_table(struct pci_dev *pdev, u16 alias, void *opaque)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	struct pasid_table_opaque *data = opaque;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	data->segment = pci_domain_nr(pdev->bus);
133*4882a593Smuzhiyun 	data->bus = PCI_BUS_NUM(alias);
134*4882a593Smuzhiyun 	data->devfn = alias & 0xff;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	return for_each_device_domain(&search_pasid_table, data);
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun /*
140*4882a593Smuzhiyun  * Allocate a pasid table for @dev. It should be called in a
141*4882a593Smuzhiyun  * single-thread context.
142*4882a593Smuzhiyun  */
intel_pasid_alloc_table(struct device * dev)143*4882a593Smuzhiyun int intel_pasid_alloc_table(struct device *dev)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun 	struct device_domain_info *info;
146*4882a593Smuzhiyun 	struct pasid_table *pasid_table;
147*4882a593Smuzhiyun 	struct pasid_table_opaque data;
148*4882a593Smuzhiyun 	struct page *pages;
149*4882a593Smuzhiyun 	u32 max_pasid = 0;
150*4882a593Smuzhiyun 	int ret, order;
151*4882a593Smuzhiyun 	int size;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	might_sleep();
154*4882a593Smuzhiyun 	info = get_domain_info(dev);
155*4882a593Smuzhiyun 	if (WARN_ON(!info || !dev_is_pci(dev) || info->pasid_table))
156*4882a593Smuzhiyun 		return -EINVAL;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	/* DMA alias device already has a pasid table, use it: */
159*4882a593Smuzhiyun 	data.pasid_table = &pasid_table;
160*4882a593Smuzhiyun 	ret = pci_for_each_dma_alias(to_pci_dev(dev),
161*4882a593Smuzhiyun 				     &get_alias_pasid_table, &data);
162*4882a593Smuzhiyun 	if (ret)
163*4882a593Smuzhiyun 		goto attach_out;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	pasid_table = kzalloc(sizeof(*pasid_table), GFP_KERNEL);
166*4882a593Smuzhiyun 	if (!pasid_table)
167*4882a593Smuzhiyun 		return -ENOMEM;
168*4882a593Smuzhiyun 	INIT_LIST_HEAD(&pasid_table->dev);
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	if (info->pasid_supported)
171*4882a593Smuzhiyun 		max_pasid = min_t(u32, pci_max_pasids(to_pci_dev(dev)),
172*4882a593Smuzhiyun 				  intel_pasid_max_id);
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	size = max_pasid >> (PASID_PDE_SHIFT - 3);
175*4882a593Smuzhiyun 	order = size ? get_order(size) : 0;
176*4882a593Smuzhiyun 	pages = alloc_pages_node(info->iommu->node,
177*4882a593Smuzhiyun 				 GFP_KERNEL | __GFP_ZERO, order);
178*4882a593Smuzhiyun 	if (!pages) {
179*4882a593Smuzhiyun 		kfree(pasid_table);
180*4882a593Smuzhiyun 		return -ENOMEM;
181*4882a593Smuzhiyun 	}
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	pasid_table->table = page_address(pages);
184*4882a593Smuzhiyun 	pasid_table->order = order;
185*4882a593Smuzhiyun 	pasid_table->max_pasid = 1 << (order + PAGE_SHIFT + 3);
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun attach_out:
188*4882a593Smuzhiyun 	device_attach_pasid_table(info, pasid_table);
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	return 0;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun 
intel_pasid_free_table(struct device * dev)193*4882a593Smuzhiyun void intel_pasid_free_table(struct device *dev)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun 	struct device_domain_info *info;
196*4882a593Smuzhiyun 	struct pasid_table *pasid_table;
197*4882a593Smuzhiyun 	struct pasid_dir_entry *dir;
198*4882a593Smuzhiyun 	struct pasid_entry *table;
199*4882a593Smuzhiyun 	int i, max_pde;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	info = get_domain_info(dev);
202*4882a593Smuzhiyun 	if (!info || !dev_is_pci(dev) || !info->pasid_table)
203*4882a593Smuzhiyun 		return;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	pasid_table = info->pasid_table;
206*4882a593Smuzhiyun 	device_detach_pasid_table(info, pasid_table);
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	if (!list_empty(&pasid_table->dev))
209*4882a593Smuzhiyun 		return;
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	/* Free scalable mode PASID directory tables: */
212*4882a593Smuzhiyun 	dir = pasid_table->table;
213*4882a593Smuzhiyun 	max_pde = pasid_table->max_pasid >> PASID_PDE_SHIFT;
214*4882a593Smuzhiyun 	for (i = 0; i < max_pde; i++) {
215*4882a593Smuzhiyun 		table = get_pasid_table_from_pde(&dir[i]);
216*4882a593Smuzhiyun 		free_pgtable_page(table);
217*4882a593Smuzhiyun 	}
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	free_pages((unsigned long)pasid_table->table, pasid_table->order);
220*4882a593Smuzhiyun 	kfree(pasid_table);
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun 
intel_pasid_get_table(struct device * dev)223*4882a593Smuzhiyun struct pasid_table *intel_pasid_get_table(struct device *dev)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun 	struct device_domain_info *info;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	info = get_domain_info(dev);
228*4882a593Smuzhiyun 	if (!info)
229*4882a593Smuzhiyun 		return NULL;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	return info->pasid_table;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun 
intel_pasid_get_dev_max_id(struct device * dev)234*4882a593Smuzhiyun int intel_pasid_get_dev_max_id(struct device *dev)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun 	struct device_domain_info *info;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	info = get_domain_info(dev);
239*4882a593Smuzhiyun 	if (!info || !info->pasid_table)
240*4882a593Smuzhiyun 		return 0;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	return info->pasid_table->max_pasid;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun 
intel_pasid_get_entry(struct device * dev,u32 pasid)245*4882a593Smuzhiyun struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun 	struct device_domain_info *info;
248*4882a593Smuzhiyun 	struct pasid_table *pasid_table;
249*4882a593Smuzhiyun 	struct pasid_dir_entry *dir;
250*4882a593Smuzhiyun 	struct pasid_entry *entries;
251*4882a593Smuzhiyun 	int dir_index, index;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	pasid_table = intel_pasid_get_table(dev);
254*4882a593Smuzhiyun 	if (WARN_ON(!pasid_table || pasid >= intel_pasid_get_dev_max_id(dev)))
255*4882a593Smuzhiyun 		return NULL;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	dir = pasid_table->table;
258*4882a593Smuzhiyun 	info = get_domain_info(dev);
259*4882a593Smuzhiyun 	dir_index = pasid >> PASID_PDE_SHIFT;
260*4882a593Smuzhiyun 	index = pasid & PASID_PTE_MASK;
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	spin_lock(&pasid_lock);
263*4882a593Smuzhiyun 	entries = get_pasid_table_from_pde(&dir[dir_index]);
264*4882a593Smuzhiyun 	if (!entries) {
265*4882a593Smuzhiyun 		entries = alloc_pgtable_page(info->iommu->node);
266*4882a593Smuzhiyun 		if (!entries) {
267*4882a593Smuzhiyun 			spin_unlock(&pasid_lock);
268*4882a593Smuzhiyun 			return NULL;
269*4882a593Smuzhiyun 		}
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 		WRITE_ONCE(dir[dir_index].val,
272*4882a593Smuzhiyun 			   (u64)virt_to_phys(entries) | PASID_PTE_PRESENT);
273*4882a593Smuzhiyun 	}
274*4882a593Smuzhiyun 	spin_unlock(&pasid_lock);
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	return &entries[index];
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun /*
280*4882a593Smuzhiyun  * Interfaces for PASID table entry manipulation:
281*4882a593Smuzhiyun  */
pasid_clear_entry(struct pasid_entry * pe)282*4882a593Smuzhiyun static inline void pasid_clear_entry(struct pasid_entry *pe)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun 	WRITE_ONCE(pe->val[0], 0);
285*4882a593Smuzhiyun 	WRITE_ONCE(pe->val[1], 0);
286*4882a593Smuzhiyun 	WRITE_ONCE(pe->val[2], 0);
287*4882a593Smuzhiyun 	WRITE_ONCE(pe->val[3], 0);
288*4882a593Smuzhiyun 	WRITE_ONCE(pe->val[4], 0);
289*4882a593Smuzhiyun 	WRITE_ONCE(pe->val[5], 0);
290*4882a593Smuzhiyun 	WRITE_ONCE(pe->val[6], 0);
291*4882a593Smuzhiyun 	WRITE_ONCE(pe->val[7], 0);
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun 
pasid_clear_entry_with_fpd(struct pasid_entry * pe)294*4882a593Smuzhiyun static inline void pasid_clear_entry_with_fpd(struct pasid_entry *pe)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun 	WRITE_ONCE(pe->val[0], PASID_PTE_FPD);
297*4882a593Smuzhiyun 	WRITE_ONCE(pe->val[1], 0);
298*4882a593Smuzhiyun 	WRITE_ONCE(pe->val[2], 0);
299*4882a593Smuzhiyun 	WRITE_ONCE(pe->val[3], 0);
300*4882a593Smuzhiyun 	WRITE_ONCE(pe->val[4], 0);
301*4882a593Smuzhiyun 	WRITE_ONCE(pe->val[5], 0);
302*4882a593Smuzhiyun 	WRITE_ONCE(pe->val[6], 0);
303*4882a593Smuzhiyun 	WRITE_ONCE(pe->val[7], 0);
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun static void
intel_pasid_clear_entry(struct device * dev,u32 pasid,bool fault_ignore)307*4882a593Smuzhiyun intel_pasid_clear_entry(struct device *dev, u32 pasid, bool fault_ignore)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun 	struct pasid_entry *pe;
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	pe = intel_pasid_get_entry(dev, pasid);
312*4882a593Smuzhiyun 	if (WARN_ON(!pe))
313*4882a593Smuzhiyun 		return;
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	if (fault_ignore && pasid_pte_is_present(pe))
316*4882a593Smuzhiyun 		pasid_clear_entry_with_fpd(pe);
317*4882a593Smuzhiyun 	else
318*4882a593Smuzhiyun 		pasid_clear_entry(pe);
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun 
pasid_set_bits(u64 * ptr,u64 mask,u64 bits)321*4882a593Smuzhiyun static inline void pasid_set_bits(u64 *ptr, u64 mask, u64 bits)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun 	u64 old;
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	old = READ_ONCE(*ptr);
326*4882a593Smuzhiyun 	WRITE_ONCE(*ptr, (old & ~mask) | bits);
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun /*
330*4882a593Smuzhiyun  * Setup the DID(Domain Identifier) field (Bit 64~79) of scalable mode
331*4882a593Smuzhiyun  * PASID entry.
332*4882a593Smuzhiyun  */
333*4882a593Smuzhiyun static inline void
pasid_set_domain_id(struct pasid_entry * pe,u64 value)334*4882a593Smuzhiyun pasid_set_domain_id(struct pasid_entry *pe, u64 value)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun 	pasid_set_bits(&pe->val[1], GENMASK_ULL(15, 0), value);
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun /*
340*4882a593Smuzhiyun  * Get domain ID value of a scalable mode PASID entry.
341*4882a593Smuzhiyun  */
342*4882a593Smuzhiyun static inline u16
pasid_get_domain_id(struct pasid_entry * pe)343*4882a593Smuzhiyun pasid_get_domain_id(struct pasid_entry *pe)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun 	return (u16)(READ_ONCE(pe->val[1]) & GENMASK_ULL(15, 0));
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun /*
349*4882a593Smuzhiyun  * Setup the SLPTPTR(Second Level Page Table Pointer) field (Bit 12~63)
350*4882a593Smuzhiyun  * of a scalable mode PASID entry.
351*4882a593Smuzhiyun  */
352*4882a593Smuzhiyun static inline void
pasid_set_slptr(struct pasid_entry * pe,u64 value)353*4882a593Smuzhiyun pasid_set_slptr(struct pasid_entry *pe, u64 value)
354*4882a593Smuzhiyun {
355*4882a593Smuzhiyun 	pasid_set_bits(&pe->val[0], VTD_PAGE_MASK, value);
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun /*
359*4882a593Smuzhiyun  * Setup the AW(Address Width) field (Bit 2~4) of a scalable mode PASID
360*4882a593Smuzhiyun  * entry.
361*4882a593Smuzhiyun  */
362*4882a593Smuzhiyun static inline void
pasid_set_address_width(struct pasid_entry * pe,u64 value)363*4882a593Smuzhiyun pasid_set_address_width(struct pasid_entry *pe, u64 value)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun 	pasid_set_bits(&pe->val[0], GENMASK_ULL(4, 2), value << 2);
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun /*
369*4882a593Smuzhiyun  * Setup the PGTT(PASID Granular Translation Type) field (Bit 6~8)
370*4882a593Smuzhiyun  * of a scalable mode PASID entry.
371*4882a593Smuzhiyun  */
372*4882a593Smuzhiyun static inline void
pasid_set_translation_type(struct pasid_entry * pe,u64 value)373*4882a593Smuzhiyun pasid_set_translation_type(struct pasid_entry *pe, u64 value)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun 	pasid_set_bits(&pe->val[0], GENMASK_ULL(8, 6), value << 6);
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun /*
379*4882a593Smuzhiyun  * Enable fault processing by clearing the FPD(Fault Processing
380*4882a593Smuzhiyun  * Disable) field (Bit 1) of a scalable mode PASID entry.
381*4882a593Smuzhiyun  */
pasid_set_fault_enable(struct pasid_entry * pe)382*4882a593Smuzhiyun static inline void pasid_set_fault_enable(struct pasid_entry *pe)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun 	pasid_set_bits(&pe->val[0], 1 << 1, 0);
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun /*
388*4882a593Smuzhiyun  * Setup the SRE(Supervisor Request Enable) field (Bit 128) of a
389*4882a593Smuzhiyun  * scalable mode PASID entry.
390*4882a593Smuzhiyun  */
pasid_set_sre(struct pasid_entry * pe)391*4882a593Smuzhiyun static inline void pasid_set_sre(struct pasid_entry *pe)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun 	pasid_set_bits(&pe->val[2], 1 << 0, 1);
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun /*
397*4882a593Smuzhiyun  * Setup the P(Present) field (Bit 0) of a scalable mode PASID
398*4882a593Smuzhiyun  * entry.
399*4882a593Smuzhiyun  */
pasid_set_present(struct pasid_entry * pe)400*4882a593Smuzhiyun static inline void pasid_set_present(struct pasid_entry *pe)
401*4882a593Smuzhiyun {
402*4882a593Smuzhiyun 	pasid_set_bits(&pe->val[0], 1 << 0, 1);
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun /*
406*4882a593Smuzhiyun  * Setup Page Walk Snoop bit (Bit 87) of a scalable mode PASID
407*4882a593Smuzhiyun  * entry.
408*4882a593Smuzhiyun  */
pasid_set_page_snoop(struct pasid_entry * pe,bool value)409*4882a593Smuzhiyun static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun 	pasid_set_bits(&pe->val[1], 1 << 23, value << 23);
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun /*
415*4882a593Smuzhiyun  * Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode
416*4882a593Smuzhiyun  * PASID entry.
417*4882a593Smuzhiyun  */
418*4882a593Smuzhiyun static inline void
pasid_set_pgsnp(struct pasid_entry * pe)419*4882a593Smuzhiyun pasid_set_pgsnp(struct pasid_entry *pe)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun 	pasid_set_bits(&pe->val[1], 1ULL << 24, 1ULL << 24);
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun /*
425*4882a593Smuzhiyun  * Setup the First Level Page table Pointer field (Bit 140~191)
426*4882a593Smuzhiyun  * of a scalable mode PASID entry.
427*4882a593Smuzhiyun  */
428*4882a593Smuzhiyun static inline void
pasid_set_flptr(struct pasid_entry * pe,u64 value)429*4882a593Smuzhiyun pasid_set_flptr(struct pasid_entry *pe, u64 value)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun 	pasid_set_bits(&pe->val[2], VTD_PAGE_MASK, value);
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun /*
435*4882a593Smuzhiyun  * Setup the First Level Paging Mode field (Bit 130~131) of a
436*4882a593Smuzhiyun  * scalable mode PASID entry.
437*4882a593Smuzhiyun  */
438*4882a593Smuzhiyun static inline void
pasid_set_flpm(struct pasid_entry * pe,u64 value)439*4882a593Smuzhiyun pasid_set_flpm(struct pasid_entry *pe, u64 value)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun 	pasid_set_bits(&pe->val[2], GENMASK_ULL(3, 2), value << 2);
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun /*
445*4882a593Smuzhiyun  * Setup the Extended Access Flag Enable (EAFE) field (Bit 135)
446*4882a593Smuzhiyun  * of a scalable mode PASID entry.
447*4882a593Smuzhiyun  */
448*4882a593Smuzhiyun static inline void
pasid_set_eafe(struct pasid_entry * pe)449*4882a593Smuzhiyun pasid_set_eafe(struct pasid_entry *pe)
450*4882a593Smuzhiyun {
451*4882a593Smuzhiyun 	pasid_set_bits(&pe->val[2], 1 << 7, 1 << 7);
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun static void
pasid_cache_invalidation_with_pasid(struct intel_iommu * iommu,u16 did,u32 pasid)455*4882a593Smuzhiyun pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu,
456*4882a593Smuzhiyun 				    u16 did, u32 pasid)
457*4882a593Smuzhiyun {
458*4882a593Smuzhiyun 	struct qi_desc desc;
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	desc.qw0 = QI_PC_DID(did) | QI_PC_GRAN(QI_PC_PASID_SEL) |
461*4882a593Smuzhiyun 		QI_PC_PASID(pasid) | QI_PC_TYPE;
462*4882a593Smuzhiyun 	desc.qw1 = 0;
463*4882a593Smuzhiyun 	desc.qw2 = 0;
464*4882a593Smuzhiyun 	desc.qw3 = 0;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	qi_submit_sync(iommu, &desc, 1, 0);
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun static void
devtlb_invalidation_with_pasid(struct intel_iommu * iommu,struct device * dev,u32 pasid)470*4882a593Smuzhiyun devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
471*4882a593Smuzhiyun 			       struct device *dev, u32 pasid)
472*4882a593Smuzhiyun {
473*4882a593Smuzhiyun 	struct device_domain_info *info;
474*4882a593Smuzhiyun 	u16 sid, qdep, pfsid;
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	info = get_domain_info(dev);
477*4882a593Smuzhiyun 	if (!info || !info->ats_enabled)
478*4882a593Smuzhiyun 		return;
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	sid = info->bus << 8 | info->devfn;
481*4882a593Smuzhiyun 	qdep = info->ats_qdep;
482*4882a593Smuzhiyun 	pfsid = info->pfsid;
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	/*
485*4882a593Smuzhiyun 	 * When PASID 0 is used, it indicates RID2PASID(DMA request w/o PASID),
486*4882a593Smuzhiyun 	 * devTLB flush w/o PASID should be used. For non-zero PASID under
487*4882a593Smuzhiyun 	 * SVA usage, device could do DMA with multiple PASIDs. It is more
488*4882a593Smuzhiyun 	 * efficient to flush devTLB specific to the PASID.
489*4882a593Smuzhiyun 	 */
490*4882a593Smuzhiyun 	if (pasid == PASID_RID2PASID)
491*4882a593Smuzhiyun 		qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT);
492*4882a593Smuzhiyun 	else
493*4882a593Smuzhiyun 		qi_flush_dev_iotlb_pasid(iommu, sid, pfsid, pasid, qdep, 0, 64 - VTD_PAGE_SHIFT);
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun 
intel_pasid_tear_down_entry(struct intel_iommu * iommu,struct device * dev,u32 pasid,bool fault_ignore)496*4882a593Smuzhiyun void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
497*4882a593Smuzhiyun 				 u32 pasid, bool fault_ignore)
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun 	struct pasid_entry *pte;
500*4882a593Smuzhiyun 	u16 did, pgtt;
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	pte = intel_pasid_get_entry(dev, pasid);
503*4882a593Smuzhiyun 	if (WARN_ON(!pte))
504*4882a593Smuzhiyun 		return;
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	did = pasid_get_domain_id(pte);
507*4882a593Smuzhiyun 	pgtt = pasid_pte_get_pgtt(pte);
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	intel_pasid_clear_entry(dev, pasid, fault_ignore);
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	if (!ecap_coherent(iommu->ecap))
512*4882a593Smuzhiyun 		clflush_cache_range(pte, sizeof(*pte));
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	pasid_cache_invalidation_with_pasid(iommu, did, pasid);
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	if (pgtt == PASID_ENTRY_PGTT_PT || pgtt == PASID_ENTRY_PGTT_FL_ONLY)
517*4882a593Smuzhiyun 		qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
518*4882a593Smuzhiyun 	else
519*4882a593Smuzhiyun 		iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	/* Device IOTLB doesn't need to be flushed in caching mode. */
522*4882a593Smuzhiyun 	if (!cap_caching_mode(iommu->cap))
523*4882a593Smuzhiyun 		devtlb_invalidation_with_pasid(iommu, dev, pasid);
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun 
pasid_flush_caches(struct intel_iommu * iommu,struct pasid_entry * pte,u32 pasid,u16 did)526*4882a593Smuzhiyun static void pasid_flush_caches(struct intel_iommu *iommu,
527*4882a593Smuzhiyun 				struct pasid_entry *pte,
528*4882a593Smuzhiyun 			       u32 pasid, u16 did)
529*4882a593Smuzhiyun {
530*4882a593Smuzhiyun 	if (!ecap_coherent(iommu->ecap))
531*4882a593Smuzhiyun 		clflush_cache_range(pte, sizeof(*pte));
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	if (cap_caching_mode(iommu->cap)) {
534*4882a593Smuzhiyun 		pasid_cache_invalidation_with_pasid(iommu, did, pasid);
535*4882a593Smuzhiyun 		qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
536*4882a593Smuzhiyun 	} else {
537*4882a593Smuzhiyun 		iommu_flush_write_buffer(iommu);
538*4882a593Smuzhiyun 	}
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun /*
542*4882a593Smuzhiyun  * Set up the scalable mode pasid table entry for first only
543*4882a593Smuzhiyun  * translation type.
544*4882a593Smuzhiyun  */
intel_pasid_setup_first_level(struct intel_iommu * iommu,struct device * dev,pgd_t * pgd,u32 pasid,u16 did,int flags)545*4882a593Smuzhiyun int intel_pasid_setup_first_level(struct intel_iommu *iommu,
546*4882a593Smuzhiyun 				  struct device *dev, pgd_t *pgd,
547*4882a593Smuzhiyun 				  u32 pasid, u16 did, int flags)
548*4882a593Smuzhiyun {
549*4882a593Smuzhiyun 	struct pasid_entry *pte;
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	if (!ecap_flts(iommu->ecap)) {
552*4882a593Smuzhiyun 		pr_err("No first level translation support on %s\n",
553*4882a593Smuzhiyun 		       iommu->name);
554*4882a593Smuzhiyun 		return -EINVAL;
555*4882a593Smuzhiyun 	}
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	pte = intel_pasid_get_entry(dev, pasid);
558*4882a593Smuzhiyun 	if (WARN_ON(!pte))
559*4882a593Smuzhiyun 		return -EINVAL;
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 	pasid_clear_entry(pte);
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 	/* Setup the first level page table pointer: */
564*4882a593Smuzhiyun 	pasid_set_flptr(pte, (u64)__pa(pgd));
565*4882a593Smuzhiyun 	if (flags & PASID_FLAG_SUPERVISOR_MODE) {
566*4882a593Smuzhiyun 		if (!ecap_srs(iommu->ecap)) {
567*4882a593Smuzhiyun 			pr_err("No supervisor request support on %s\n",
568*4882a593Smuzhiyun 			       iommu->name);
569*4882a593Smuzhiyun 			return -EINVAL;
570*4882a593Smuzhiyun 		}
571*4882a593Smuzhiyun 		pasid_set_sre(pte);
572*4882a593Smuzhiyun 	}
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	if (flags & PASID_FLAG_FL5LP) {
575*4882a593Smuzhiyun 		if (cap_5lp_support(iommu->cap)) {
576*4882a593Smuzhiyun 			pasid_set_flpm(pte, 1);
577*4882a593Smuzhiyun 		} else {
578*4882a593Smuzhiyun 			pr_err("No 5-level paging support for first-level\n");
579*4882a593Smuzhiyun 			pasid_clear_entry(pte);
580*4882a593Smuzhiyun 			return -EINVAL;
581*4882a593Smuzhiyun 		}
582*4882a593Smuzhiyun 	}
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	if (flags & PASID_FLAG_PAGE_SNOOP)
585*4882a593Smuzhiyun 		pasid_set_pgsnp(pte);
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	pasid_set_domain_id(pte, did);
588*4882a593Smuzhiyun 	pasid_set_address_width(pte, iommu->agaw);
589*4882a593Smuzhiyun 	pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	/* Setup Present and PASID Granular Transfer Type: */
592*4882a593Smuzhiyun 	pasid_set_translation_type(pte, PASID_ENTRY_PGTT_FL_ONLY);
593*4882a593Smuzhiyun 	pasid_set_present(pte);
594*4882a593Smuzhiyun 	pasid_flush_caches(iommu, pte, pasid, did);
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	return 0;
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun /*
600*4882a593Smuzhiyun  * Skip top levels of page tables for iommu which has less agaw
601*4882a593Smuzhiyun  * than default. Unnecessary for PT mode.
602*4882a593Smuzhiyun  */
iommu_skip_agaw(struct dmar_domain * domain,struct intel_iommu * iommu,struct dma_pte ** pgd)603*4882a593Smuzhiyun static inline int iommu_skip_agaw(struct dmar_domain *domain,
604*4882a593Smuzhiyun 				  struct intel_iommu *iommu,
605*4882a593Smuzhiyun 				  struct dma_pte **pgd)
606*4882a593Smuzhiyun {
607*4882a593Smuzhiyun 	int agaw;
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 	for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
610*4882a593Smuzhiyun 		*pgd = phys_to_virt(dma_pte_addr(*pgd));
611*4882a593Smuzhiyun 		if (!dma_pte_present(*pgd))
612*4882a593Smuzhiyun 			return -EINVAL;
613*4882a593Smuzhiyun 	}
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	return agaw;
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun /*
619*4882a593Smuzhiyun  * Set up the scalable mode pasid entry for second only translation type.
620*4882a593Smuzhiyun  */
intel_pasid_setup_second_level(struct intel_iommu * iommu,struct dmar_domain * domain,struct device * dev,u32 pasid)621*4882a593Smuzhiyun int intel_pasid_setup_second_level(struct intel_iommu *iommu,
622*4882a593Smuzhiyun 				   struct dmar_domain *domain,
623*4882a593Smuzhiyun 				   struct device *dev, u32 pasid)
624*4882a593Smuzhiyun {
625*4882a593Smuzhiyun 	struct pasid_entry *pte;
626*4882a593Smuzhiyun 	struct dma_pte *pgd;
627*4882a593Smuzhiyun 	u64 pgd_val;
628*4882a593Smuzhiyun 	int agaw;
629*4882a593Smuzhiyun 	u16 did;
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 	/*
632*4882a593Smuzhiyun 	 * If hardware advertises no support for second level
633*4882a593Smuzhiyun 	 * translation, return directly.
634*4882a593Smuzhiyun 	 */
635*4882a593Smuzhiyun 	if (!ecap_slts(iommu->ecap)) {
636*4882a593Smuzhiyun 		pr_err("No second level translation support on %s\n",
637*4882a593Smuzhiyun 		       iommu->name);
638*4882a593Smuzhiyun 		return -EINVAL;
639*4882a593Smuzhiyun 	}
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 	pgd = domain->pgd;
642*4882a593Smuzhiyun 	agaw = iommu_skip_agaw(domain, iommu, &pgd);
643*4882a593Smuzhiyun 	if (agaw < 0) {
644*4882a593Smuzhiyun 		dev_err(dev, "Invalid domain page table\n");
645*4882a593Smuzhiyun 		return -EINVAL;
646*4882a593Smuzhiyun 	}
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun 	pgd_val = virt_to_phys(pgd);
649*4882a593Smuzhiyun 	did = domain->iommu_did[iommu->seq_id];
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	pte = intel_pasid_get_entry(dev, pasid);
652*4882a593Smuzhiyun 	if (!pte) {
653*4882a593Smuzhiyun 		dev_err(dev, "Failed to get pasid entry of PASID %d\n", pasid);
654*4882a593Smuzhiyun 		return -ENODEV;
655*4882a593Smuzhiyun 	}
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 	pasid_clear_entry(pte);
658*4882a593Smuzhiyun 	pasid_set_domain_id(pte, did);
659*4882a593Smuzhiyun 	pasid_set_slptr(pte, pgd_val);
660*4882a593Smuzhiyun 	pasid_set_address_width(pte, agaw);
661*4882a593Smuzhiyun 	pasid_set_translation_type(pte, PASID_ENTRY_PGTT_SL_ONLY);
662*4882a593Smuzhiyun 	pasid_set_fault_enable(pte);
663*4882a593Smuzhiyun 	pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun 	if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
666*4882a593Smuzhiyun 		pasid_set_pgsnp(pte);
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 	/*
669*4882a593Smuzhiyun 	 * Since it is a second level only translation setup, we should
670*4882a593Smuzhiyun 	 * set SRE bit as well (addresses are expected to be GPAs).
671*4882a593Smuzhiyun 	 */
672*4882a593Smuzhiyun 	if (pasid != PASID_RID2PASID && ecap_srs(iommu->ecap))
673*4882a593Smuzhiyun 		pasid_set_sre(pte);
674*4882a593Smuzhiyun 	pasid_set_present(pte);
675*4882a593Smuzhiyun 	pasid_flush_caches(iommu, pte, pasid, did);
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 	return 0;
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun /*
681*4882a593Smuzhiyun  * Set up the scalable mode pasid entry for passthrough translation type.
682*4882a593Smuzhiyun  */
intel_pasid_setup_pass_through(struct intel_iommu * iommu,struct dmar_domain * domain,struct device * dev,u32 pasid)683*4882a593Smuzhiyun int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
684*4882a593Smuzhiyun 				   struct dmar_domain *domain,
685*4882a593Smuzhiyun 				   struct device *dev, u32 pasid)
686*4882a593Smuzhiyun {
687*4882a593Smuzhiyun 	u16 did = FLPT_DEFAULT_DID;
688*4882a593Smuzhiyun 	struct pasid_entry *pte;
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 	pte = intel_pasid_get_entry(dev, pasid);
691*4882a593Smuzhiyun 	if (!pte) {
692*4882a593Smuzhiyun 		dev_err(dev, "Failed to get pasid entry of PASID %d\n", pasid);
693*4882a593Smuzhiyun 		return -ENODEV;
694*4882a593Smuzhiyun 	}
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	pasid_clear_entry(pte);
697*4882a593Smuzhiyun 	pasid_set_domain_id(pte, did);
698*4882a593Smuzhiyun 	pasid_set_address_width(pte, iommu->agaw);
699*4882a593Smuzhiyun 	pasid_set_translation_type(pte, PASID_ENTRY_PGTT_PT);
700*4882a593Smuzhiyun 	pasid_set_fault_enable(pte);
701*4882a593Smuzhiyun 	pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	/*
704*4882a593Smuzhiyun 	 * We should set SRE bit as well since the addresses are expected
705*4882a593Smuzhiyun 	 * to be GPAs.
706*4882a593Smuzhiyun 	 */
707*4882a593Smuzhiyun 	if (ecap_srs(iommu->ecap))
708*4882a593Smuzhiyun 		pasid_set_sre(pte);
709*4882a593Smuzhiyun 	pasid_set_present(pte);
710*4882a593Smuzhiyun 	pasid_flush_caches(iommu, pte, pasid, did);
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 	return 0;
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun static int
intel_pasid_setup_bind_data(struct intel_iommu * iommu,struct pasid_entry * pte,struct iommu_gpasid_bind_data_vtd * pasid_data)716*4882a593Smuzhiyun intel_pasid_setup_bind_data(struct intel_iommu *iommu, struct pasid_entry *pte,
717*4882a593Smuzhiyun 			    struct iommu_gpasid_bind_data_vtd *pasid_data)
718*4882a593Smuzhiyun {
719*4882a593Smuzhiyun 	/*
720*4882a593Smuzhiyun 	 * Not all guest PASID table entry fields are passed down during bind,
721*4882a593Smuzhiyun 	 * here we only set up the ones that are dependent on guest settings.
722*4882a593Smuzhiyun 	 * Execution related bits such as NXE, SMEP are not supported.
723*4882a593Smuzhiyun 	 * Other fields, such as snoop related, are set based on host needs
724*4882a593Smuzhiyun 	 * regardless of guest settings.
725*4882a593Smuzhiyun 	 */
726*4882a593Smuzhiyun 	if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_SRE) {
727*4882a593Smuzhiyun 		if (!ecap_srs(iommu->ecap)) {
728*4882a593Smuzhiyun 			pr_err_ratelimited("No supervisor request support on %s\n",
729*4882a593Smuzhiyun 					   iommu->name);
730*4882a593Smuzhiyun 			return -EINVAL;
731*4882a593Smuzhiyun 		}
732*4882a593Smuzhiyun 		pasid_set_sre(pte);
733*4882a593Smuzhiyun 	}
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun 	if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_EAFE) {
736*4882a593Smuzhiyun 		if (!ecap_eafs(iommu->ecap)) {
737*4882a593Smuzhiyun 			pr_err_ratelimited("No extended access flag support on %s\n",
738*4882a593Smuzhiyun 					   iommu->name);
739*4882a593Smuzhiyun 			return -EINVAL;
740*4882a593Smuzhiyun 		}
741*4882a593Smuzhiyun 		pasid_set_eafe(pte);
742*4882a593Smuzhiyun 	}
743*4882a593Smuzhiyun 
744*4882a593Smuzhiyun 	/*
745*4882a593Smuzhiyun 	 * Memory type is only applicable to devices inside processor coherent
746*4882a593Smuzhiyun 	 * domain. Will add MTS support once coherent devices are available.
747*4882a593Smuzhiyun 	 */
748*4882a593Smuzhiyun 	if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_MTS_MASK) {
749*4882a593Smuzhiyun 		pr_warn_ratelimited("No memory type support %s\n",
750*4882a593Smuzhiyun 				    iommu->name);
751*4882a593Smuzhiyun 		return -EINVAL;
752*4882a593Smuzhiyun 	}
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 	return 0;
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun /**
758*4882a593Smuzhiyun  * intel_pasid_setup_nested() - Set up PASID entry for nested translation.
759*4882a593Smuzhiyun  * This could be used for guest shared virtual address. In this case, the
760*4882a593Smuzhiyun  * first level page tables are used for GVA-GPA translation in the guest,
761*4882a593Smuzhiyun  * second level page tables are used for GPA-HPA translation.
762*4882a593Smuzhiyun  *
763*4882a593Smuzhiyun  * @iommu:      IOMMU which the device belong to
764*4882a593Smuzhiyun  * @dev:        Device to be set up for translation
765*4882a593Smuzhiyun  * @gpgd:       FLPTPTR: First Level Page translation pointer in GPA
766*4882a593Smuzhiyun  * @pasid:      PASID to be programmed in the device PASID table
767*4882a593Smuzhiyun  * @pasid_data: Additional PASID info from the guest bind request
768*4882a593Smuzhiyun  * @domain:     Domain info for setting up second level page tables
769*4882a593Smuzhiyun  * @addr_width: Address width of the first level (guest)
770*4882a593Smuzhiyun  */
intel_pasid_setup_nested(struct intel_iommu * iommu,struct device * dev,pgd_t * gpgd,u32 pasid,struct iommu_gpasid_bind_data_vtd * pasid_data,struct dmar_domain * domain,int addr_width)771*4882a593Smuzhiyun int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
772*4882a593Smuzhiyun 			     pgd_t *gpgd, u32 pasid,
773*4882a593Smuzhiyun 			     struct iommu_gpasid_bind_data_vtd *pasid_data,
774*4882a593Smuzhiyun 			     struct dmar_domain *domain, int addr_width)
775*4882a593Smuzhiyun {
776*4882a593Smuzhiyun 	struct pasid_entry *pte;
777*4882a593Smuzhiyun 	struct dma_pte *pgd;
778*4882a593Smuzhiyun 	int ret = 0;
779*4882a593Smuzhiyun 	u64 pgd_val;
780*4882a593Smuzhiyun 	int agaw;
781*4882a593Smuzhiyun 	u16 did;
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 	if (!ecap_nest(iommu->ecap)) {
784*4882a593Smuzhiyun 		pr_err_ratelimited("IOMMU: %s: No nested translation support\n",
785*4882a593Smuzhiyun 				   iommu->name);
786*4882a593Smuzhiyun 		return -EINVAL;
787*4882a593Smuzhiyun 	}
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	if (!(domain->flags & DOMAIN_FLAG_NESTING_MODE)) {
790*4882a593Smuzhiyun 		pr_err_ratelimited("Domain is not in nesting mode, %x\n",
791*4882a593Smuzhiyun 				   domain->flags);
792*4882a593Smuzhiyun 		return -EINVAL;
793*4882a593Smuzhiyun 	}
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 	pte = intel_pasid_get_entry(dev, pasid);
796*4882a593Smuzhiyun 	if (WARN_ON(!pte))
797*4882a593Smuzhiyun 		return -EINVAL;
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun 	/*
800*4882a593Smuzhiyun 	 * Caller must ensure PASID entry is not in use, i.e. not bind the
801*4882a593Smuzhiyun 	 * same PASID to the same device twice.
802*4882a593Smuzhiyun 	 */
803*4882a593Smuzhiyun 	if (pasid_pte_is_present(pte))
804*4882a593Smuzhiyun 		return -EBUSY;
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 	pasid_clear_entry(pte);
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 	/* Sanity checking performed by caller to make sure address
809*4882a593Smuzhiyun 	 * width matching in two dimensions:
810*4882a593Smuzhiyun 	 * 1. CPU vs. IOMMU
811*4882a593Smuzhiyun 	 * 2. Guest vs. Host.
812*4882a593Smuzhiyun 	 */
813*4882a593Smuzhiyun 	switch (addr_width) {
814*4882a593Smuzhiyun #ifdef CONFIG_X86
815*4882a593Smuzhiyun 	case ADDR_WIDTH_5LEVEL:
816*4882a593Smuzhiyun 		if (!cpu_feature_enabled(X86_FEATURE_LA57) ||
817*4882a593Smuzhiyun 		    !cap_5lp_support(iommu->cap)) {
818*4882a593Smuzhiyun 			dev_err_ratelimited(dev,
819*4882a593Smuzhiyun 					    "5-level paging not supported\n");
820*4882a593Smuzhiyun 			return -EINVAL;
821*4882a593Smuzhiyun 		}
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun 		pasid_set_flpm(pte, 1);
824*4882a593Smuzhiyun 		break;
825*4882a593Smuzhiyun #endif
826*4882a593Smuzhiyun 	case ADDR_WIDTH_4LEVEL:
827*4882a593Smuzhiyun 		pasid_set_flpm(pte, 0);
828*4882a593Smuzhiyun 		break;
829*4882a593Smuzhiyun 	default:
830*4882a593Smuzhiyun 		dev_err_ratelimited(dev, "Invalid guest address width %d\n",
831*4882a593Smuzhiyun 				    addr_width);
832*4882a593Smuzhiyun 		return -EINVAL;
833*4882a593Smuzhiyun 	}
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun 	/* First level PGD is in GPA, must be supported by the second level */
836*4882a593Smuzhiyun 	if ((uintptr_t)gpgd > domain->max_addr) {
837*4882a593Smuzhiyun 		dev_err_ratelimited(dev,
838*4882a593Smuzhiyun 				    "Guest PGD %lx not supported, max %llx\n",
839*4882a593Smuzhiyun 				    (uintptr_t)gpgd, domain->max_addr);
840*4882a593Smuzhiyun 		return -EINVAL;
841*4882a593Smuzhiyun 	}
842*4882a593Smuzhiyun 	pasid_set_flptr(pte, (uintptr_t)gpgd);
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 	ret = intel_pasid_setup_bind_data(iommu, pte, pasid_data);
845*4882a593Smuzhiyun 	if (ret)
846*4882a593Smuzhiyun 		return ret;
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun 	/* Setup the second level based on the given domain */
849*4882a593Smuzhiyun 	pgd = domain->pgd;
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun 	agaw = iommu_skip_agaw(domain, iommu, &pgd);
852*4882a593Smuzhiyun 	if (agaw < 0) {
853*4882a593Smuzhiyun 		dev_err_ratelimited(dev, "Invalid domain page table\n");
854*4882a593Smuzhiyun 		return -EINVAL;
855*4882a593Smuzhiyun 	}
856*4882a593Smuzhiyun 	pgd_val = virt_to_phys(pgd);
857*4882a593Smuzhiyun 	pasid_set_slptr(pte, pgd_val);
858*4882a593Smuzhiyun 	pasid_set_fault_enable(pte);
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun 	did = domain->iommu_did[iommu->seq_id];
861*4882a593Smuzhiyun 	pasid_set_domain_id(pte, did);
862*4882a593Smuzhiyun 
863*4882a593Smuzhiyun 	pasid_set_address_width(pte, agaw);
864*4882a593Smuzhiyun 	pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 	pasid_set_translation_type(pte, PASID_ENTRY_PGTT_NESTED);
867*4882a593Smuzhiyun 	pasid_set_present(pte);
868*4882a593Smuzhiyun 	pasid_flush_caches(iommu, pte, pasid, did);
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun 	return ret;
871*4882a593Smuzhiyun }
872