1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright © 2015 Intel Corporation.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Authors: David Woodhouse <dwmw2@infradead.org>
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/intel-iommu.h>
9*4882a593Smuzhiyun #include <linux/mmu_notifier.h>
10*4882a593Smuzhiyun #include <linux/sched.h>
11*4882a593Smuzhiyun #include <linux/sched/mm.h>
12*4882a593Smuzhiyun #include <linux/slab.h>
13*4882a593Smuzhiyun #include <linux/intel-svm.h>
14*4882a593Smuzhiyun #include <linux/rculist.h>
15*4882a593Smuzhiyun #include <linux/pci.h>
16*4882a593Smuzhiyun #include <linux/pci-ats.h>
17*4882a593Smuzhiyun #include <linux/dmar.h>
18*4882a593Smuzhiyun #include <linux/interrupt.h>
19*4882a593Smuzhiyun #include <linux/mm_types.h>
20*4882a593Smuzhiyun #include <linux/ioasid.h>
21*4882a593Smuzhiyun #include <asm/page.h>
22*4882a593Smuzhiyun #include <asm/fpu/api.h>
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #include "pasid.h"
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun static irqreturn_t prq_event_thread(int irq, void *d);
27*4882a593Smuzhiyun static void intel_svm_drain_prq(struct device *dev, u32 pasid);
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #define PRQ_ORDER 0
30*4882a593Smuzhiyun
intel_svm_enable_prq(struct intel_iommu * iommu)31*4882a593Smuzhiyun int intel_svm_enable_prq(struct intel_iommu *iommu)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun struct page *pages;
34*4882a593Smuzhiyun int irq, ret;
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
37*4882a593Smuzhiyun if (!pages) {
38*4882a593Smuzhiyun pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
39*4882a593Smuzhiyun iommu->name);
40*4882a593Smuzhiyun return -ENOMEM;
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun iommu->prq = page_address(pages);
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun irq = dmar_alloc_hwirq(DMAR_UNITS_SUPPORTED + iommu->seq_id, iommu->node, iommu);
45*4882a593Smuzhiyun if (irq <= 0) {
46*4882a593Smuzhiyun pr_err("IOMMU: %s: Failed to create IRQ vector for page request queue\n",
47*4882a593Smuzhiyun iommu->name);
48*4882a593Smuzhiyun ret = -EINVAL;
49*4882a593Smuzhiyun err:
50*4882a593Smuzhiyun free_pages((unsigned long)iommu->prq, PRQ_ORDER);
51*4882a593Smuzhiyun iommu->prq = NULL;
52*4882a593Smuzhiyun return ret;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun iommu->pr_irq = irq;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun snprintf(iommu->prq_name, sizeof(iommu->prq_name), "dmar%d-prq", iommu->seq_id);
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun ret = request_threaded_irq(irq, NULL, prq_event_thread, IRQF_ONESHOT,
59*4882a593Smuzhiyun iommu->prq_name, iommu);
60*4882a593Smuzhiyun if (ret) {
61*4882a593Smuzhiyun pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n",
62*4882a593Smuzhiyun iommu->name);
63*4882a593Smuzhiyun dmar_free_hwirq(irq);
64*4882a593Smuzhiyun iommu->pr_irq = 0;
65*4882a593Smuzhiyun goto err;
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL);
68*4882a593Smuzhiyun dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
69*4882a593Smuzhiyun dmar_writeq(iommu->reg + DMAR_PQA_REG, virt_to_phys(iommu->prq) | PRQ_ORDER);
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun init_completion(&iommu->prq_complete);
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun return 0;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
intel_svm_finish_prq(struct intel_iommu * iommu)76*4882a593Smuzhiyun int intel_svm_finish_prq(struct intel_iommu *iommu)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL);
79*4882a593Smuzhiyun dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
80*4882a593Smuzhiyun dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL);
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun if (iommu->pr_irq) {
83*4882a593Smuzhiyun free_irq(iommu->pr_irq, iommu);
84*4882a593Smuzhiyun dmar_free_hwirq(iommu->pr_irq);
85*4882a593Smuzhiyun iommu->pr_irq = 0;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun free_pages((unsigned long)iommu->prq, PRQ_ORDER);
89*4882a593Smuzhiyun iommu->prq = NULL;
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun return 0;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun
intel_svm_capable(struct intel_iommu * iommu)94*4882a593Smuzhiyun static inline bool intel_svm_capable(struct intel_iommu *iommu)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun return iommu->flags & VTD_FLAG_SVM_CAPABLE;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
intel_svm_check(struct intel_iommu * iommu)99*4882a593Smuzhiyun void intel_svm_check(struct intel_iommu *iommu)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun if (!pasid_supported(iommu))
102*4882a593Smuzhiyun return;
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun if (cpu_feature_enabled(X86_FEATURE_GBPAGES) &&
105*4882a593Smuzhiyun !cap_fl1gp_support(iommu->cap)) {
106*4882a593Smuzhiyun pr_err("%s SVM disabled, incompatible 1GB page capability\n",
107*4882a593Smuzhiyun iommu->name);
108*4882a593Smuzhiyun return;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun if (cpu_feature_enabled(X86_FEATURE_LA57) &&
112*4882a593Smuzhiyun !cap_5lp_support(iommu->cap)) {
113*4882a593Smuzhiyun pr_err("%s SVM disabled, incompatible paging mode\n",
114*4882a593Smuzhiyun iommu->name);
115*4882a593Smuzhiyun return;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun iommu->flags |= VTD_FLAG_SVM_CAPABLE;
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
__flush_svm_range_dev(struct intel_svm * svm,struct intel_svm_dev * sdev,unsigned long address,unsigned long pages,int ih)121*4882a593Smuzhiyun static void __flush_svm_range_dev(struct intel_svm *svm,
122*4882a593Smuzhiyun struct intel_svm_dev *sdev,
123*4882a593Smuzhiyun unsigned long address,
124*4882a593Smuzhiyun unsigned long pages, int ih)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun struct device_domain_info *info = get_domain_info(sdev->dev);
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun if (WARN_ON(!pages))
129*4882a593Smuzhiyun return;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, address, pages, ih);
132*4882a593Smuzhiyun if (info->ats_enabled)
133*4882a593Smuzhiyun qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid,
134*4882a593Smuzhiyun svm->pasid, sdev->qdep, address,
135*4882a593Smuzhiyun order_base_2(pages));
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
intel_flush_svm_range_dev(struct intel_svm * svm,struct intel_svm_dev * sdev,unsigned long address,unsigned long pages,int ih)138*4882a593Smuzhiyun static void intel_flush_svm_range_dev(struct intel_svm *svm,
139*4882a593Smuzhiyun struct intel_svm_dev *sdev,
140*4882a593Smuzhiyun unsigned long address,
141*4882a593Smuzhiyun unsigned long pages, int ih)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun unsigned long shift = ilog2(__roundup_pow_of_two(pages));
144*4882a593Smuzhiyun unsigned long align = (1ULL << (VTD_PAGE_SHIFT + shift));
145*4882a593Smuzhiyun unsigned long start = ALIGN_DOWN(address, align);
146*4882a593Smuzhiyun unsigned long end = ALIGN(address + (pages << VTD_PAGE_SHIFT), align);
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun while (start < end) {
149*4882a593Smuzhiyun __flush_svm_range_dev(svm, sdev, start, align >> VTD_PAGE_SHIFT, ih);
150*4882a593Smuzhiyun start += align;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
intel_flush_svm_range(struct intel_svm * svm,unsigned long address,unsigned long pages,int ih)154*4882a593Smuzhiyun static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
155*4882a593Smuzhiyun unsigned long pages, int ih)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun struct intel_svm_dev *sdev;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun rcu_read_lock();
160*4882a593Smuzhiyun list_for_each_entry_rcu(sdev, &svm->devs, list)
161*4882a593Smuzhiyun intel_flush_svm_range_dev(svm, sdev, address, pages, ih);
162*4882a593Smuzhiyun rcu_read_unlock();
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun /* Pages have been freed at this point */
intel_invalidate_range(struct mmu_notifier * mn,struct mm_struct * mm,unsigned long start,unsigned long end)166*4882a593Smuzhiyun static void intel_invalidate_range(struct mmu_notifier *mn,
167*4882a593Smuzhiyun struct mm_struct *mm,
168*4882a593Smuzhiyun unsigned long start, unsigned long end)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun intel_flush_svm_range(svm, start,
173*4882a593Smuzhiyun (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0);
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
intel_mm_release(struct mmu_notifier * mn,struct mm_struct * mm)176*4882a593Smuzhiyun static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
179*4882a593Smuzhiyun struct intel_svm_dev *sdev;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun /* This might end up being called from exit_mmap(), *before* the page
182*4882a593Smuzhiyun * tables are cleared. And __mmu_notifier_release() will delete us from
183*4882a593Smuzhiyun * the list of notifiers so that our invalidate_range() callback doesn't
184*4882a593Smuzhiyun * get called when the page tables are cleared. So we need to protect
185*4882a593Smuzhiyun * against hardware accessing those page tables.
186*4882a593Smuzhiyun *
187*4882a593Smuzhiyun * We do it by clearing the entry in the PASID table and then flushing
188*4882a593Smuzhiyun * the IOTLB and the PASID table caches. This might upset hardware;
189*4882a593Smuzhiyun * perhaps we'll want to point the PASID to a dummy PGD (like the zero
190*4882a593Smuzhiyun * page) so that we end up taking a fault that the hardware really
191*4882a593Smuzhiyun * *has* to handle gracefully without affecting other processes.
192*4882a593Smuzhiyun */
193*4882a593Smuzhiyun rcu_read_lock();
194*4882a593Smuzhiyun list_for_each_entry_rcu(sdev, &svm->devs, list)
195*4882a593Smuzhiyun intel_pasid_tear_down_entry(sdev->iommu, sdev->dev,
196*4882a593Smuzhiyun svm->pasid, true);
197*4882a593Smuzhiyun rcu_read_unlock();
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun static const struct mmu_notifier_ops intel_mmuops = {
202*4882a593Smuzhiyun .release = intel_mm_release,
203*4882a593Smuzhiyun .invalidate_range = intel_invalidate_range,
204*4882a593Smuzhiyun };
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun static DEFINE_MUTEX(pasid_mutex);
207*4882a593Smuzhiyun static LIST_HEAD(global_svm_list);
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun #define for_each_svm_dev(sdev, svm, d) \
210*4882a593Smuzhiyun list_for_each_entry((sdev), &(svm)->devs, list) \
211*4882a593Smuzhiyun if ((d) != (sdev)->dev) {} else
212*4882a593Smuzhiyun
pasid_to_svm_sdev(struct device * dev,unsigned int pasid,struct intel_svm ** rsvm,struct intel_svm_dev ** rsdev)213*4882a593Smuzhiyun static int pasid_to_svm_sdev(struct device *dev, unsigned int pasid,
214*4882a593Smuzhiyun struct intel_svm **rsvm,
215*4882a593Smuzhiyun struct intel_svm_dev **rsdev)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun struct intel_svm_dev *d, *sdev = NULL;
218*4882a593Smuzhiyun struct intel_svm *svm;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun /* The caller should hold the pasid_mutex lock */
221*4882a593Smuzhiyun if (WARN_ON(!mutex_is_locked(&pasid_mutex)))
222*4882a593Smuzhiyun return -EINVAL;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun if (pasid == INVALID_IOASID || pasid >= PASID_MAX)
225*4882a593Smuzhiyun return -EINVAL;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun svm = ioasid_find(NULL, pasid, NULL);
228*4882a593Smuzhiyun if (IS_ERR(svm))
229*4882a593Smuzhiyun return PTR_ERR(svm);
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun if (!svm)
232*4882a593Smuzhiyun goto out;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun /*
235*4882a593Smuzhiyun * If we found svm for the PASID, there must be at least one device
236*4882a593Smuzhiyun * bond.
237*4882a593Smuzhiyun */
238*4882a593Smuzhiyun if (WARN_ON(list_empty(&svm->devs)))
239*4882a593Smuzhiyun return -EINVAL;
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun rcu_read_lock();
242*4882a593Smuzhiyun list_for_each_entry_rcu(d, &svm->devs, list) {
243*4882a593Smuzhiyun if (d->dev == dev) {
244*4882a593Smuzhiyun sdev = d;
245*4882a593Smuzhiyun break;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun rcu_read_unlock();
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun out:
251*4882a593Smuzhiyun *rsvm = svm;
252*4882a593Smuzhiyun *rsdev = sdev;
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun return 0;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
intel_svm_bind_gpasid(struct iommu_domain * domain,struct device * dev,struct iommu_gpasid_bind_data * data)257*4882a593Smuzhiyun int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
258*4882a593Smuzhiyun struct iommu_gpasid_bind_data *data)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
261*4882a593Smuzhiyun struct intel_svm_dev *sdev = NULL;
262*4882a593Smuzhiyun struct dmar_domain *dmar_domain;
263*4882a593Smuzhiyun struct device_domain_info *info;
264*4882a593Smuzhiyun struct intel_svm *svm = NULL;
265*4882a593Smuzhiyun unsigned long iflags;
266*4882a593Smuzhiyun int ret = 0;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun if (WARN_ON(!iommu) || !data)
269*4882a593Smuzhiyun return -EINVAL;
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun if (data->format != IOMMU_PASID_FORMAT_INTEL_VTD)
272*4882a593Smuzhiyun return -EINVAL;
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun /* IOMMU core ensures argsz is more than the start of the union */
275*4882a593Smuzhiyun if (data->argsz < offsetofend(struct iommu_gpasid_bind_data, vendor.vtd))
276*4882a593Smuzhiyun return -EINVAL;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun /* Make sure no undefined flags are used in vendor data */
279*4882a593Smuzhiyun if (data->vendor.vtd.flags & ~(IOMMU_SVA_VTD_GPASID_LAST - 1))
280*4882a593Smuzhiyun return -EINVAL;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun if (!dev_is_pci(dev))
283*4882a593Smuzhiyun return -ENOTSUPP;
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun /* VT-d supports devices with full 20 bit PASIDs only */
286*4882a593Smuzhiyun if (pci_max_pasids(to_pci_dev(dev)) != PASID_MAX)
287*4882a593Smuzhiyun return -EINVAL;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun /*
290*4882a593Smuzhiyun * We only check host PASID range, we have no knowledge to check
291*4882a593Smuzhiyun * guest PASID range.
292*4882a593Smuzhiyun */
293*4882a593Smuzhiyun if (data->hpasid <= 0 || data->hpasid >= PASID_MAX)
294*4882a593Smuzhiyun return -EINVAL;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun info = get_domain_info(dev);
297*4882a593Smuzhiyun if (!info)
298*4882a593Smuzhiyun return -EINVAL;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun dmar_domain = to_dmar_domain(domain);
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun mutex_lock(&pasid_mutex);
303*4882a593Smuzhiyun ret = pasid_to_svm_sdev(dev, data->hpasid, &svm, &sdev);
304*4882a593Smuzhiyun if (ret)
305*4882a593Smuzhiyun goto out;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun if (sdev) {
308*4882a593Smuzhiyun /*
309*4882a593Smuzhiyun * Do not allow multiple bindings of the same device-PASID since
310*4882a593Smuzhiyun * there is only one SL page tables per PASID. We may revisit
311*4882a593Smuzhiyun * once sharing PGD across domains are supported.
312*4882a593Smuzhiyun */
313*4882a593Smuzhiyun dev_warn_ratelimited(dev, "Already bound with PASID %u\n",
314*4882a593Smuzhiyun svm->pasid);
315*4882a593Smuzhiyun ret = -EBUSY;
316*4882a593Smuzhiyun goto out;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun if (!svm) {
320*4882a593Smuzhiyun /* We come here when PASID has never been bond to a device. */
321*4882a593Smuzhiyun svm = kzalloc(sizeof(*svm), GFP_KERNEL);
322*4882a593Smuzhiyun if (!svm) {
323*4882a593Smuzhiyun ret = -ENOMEM;
324*4882a593Smuzhiyun goto out;
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun /* REVISIT: upper layer/VFIO can track host process that bind
327*4882a593Smuzhiyun * the PASID. ioasid_set = mm might be sufficient for vfio to
328*4882a593Smuzhiyun * check pasid VMM ownership. We can drop the following line
329*4882a593Smuzhiyun * once VFIO and IOASID set check is in place.
330*4882a593Smuzhiyun */
331*4882a593Smuzhiyun svm->mm = get_task_mm(current);
332*4882a593Smuzhiyun svm->pasid = data->hpasid;
333*4882a593Smuzhiyun if (data->flags & IOMMU_SVA_GPASID_VAL) {
334*4882a593Smuzhiyun svm->gpasid = data->gpasid;
335*4882a593Smuzhiyun svm->flags |= SVM_FLAG_GUEST_PASID;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun ioasid_set_data(data->hpasid, svm);
338*4882a593Smuzhiyun INIT_LIST_HEAD_RCU(&svm->devs);
339*4882a593Smuzhiyun mmput(svm->mm);
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
342*4882a593Smuzhiyun if (!sdev) {
343*4882a593Smuzhiyun ret = -ENOMEM;
344*4882a593Smuzhiyun goto out;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun sdev->dev = dev;
347*4882a593Smuzhiyun sdev->sid = PCI_DEVID(info->bus, info->devfn);
348*4882a593Smuzhiyun sdev->iommu = iommu;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun /* Only count users if device has aux domains */
351*4882a593Smuzhiyun if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
352*4882a593Smuzhiyun sdev->users = 1;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun /* Set up device context entry for PASID if not enabled already */
355*4882a593Smuzhiyun ret = intel_iommu_enable_pasid(iommu, sdev->dev);
356*4882a593Smuzhiyun if (ret) {
357*4882a593Smuzhiyun dev_err_ratelimited(dev, "Failed to enable PASID capability\n");
358*4882a593Smuzhiyun kfree(sdev);
359*4882a593Smuzhiyun goto out;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun /*
363*4882a593Smuzhiyun * PASID table is per device for better security. Therefore, for
364*4882a593Smuzhiyun * each bind of a new device even with an existing PASID, we need to
365*4882a593Smuzhiyun * call the nested mode setup function here.
366*4882a593Smuzhiyun */
367*4882a593Smuzhiyun spin_lock_irqsave(&iommu->lock, iflags);
368*4882a593Smuzhiyun ret = intel_pasid_setup_nested(iommu, dev,
369*4882a593Smuzhiyun (pgd_t *)(uintptr_t)data->gpgd,
370*4882a593Smuzhiyun data->hpasid, &data->vendor.vtd, dmar_domain,
371*4882a593Smuzhiyun data->addr_width);
372*4882a593Smuzhiyun spin_unlock_irqrestore(&iommu->lock, iflags);
373*4882a593Smuzhiyun if (ret) {
374*4882a593Smuzhiyun dev_err_ratelimited(dev, "Failed to set up PASID %llu in nested mode, Err %d\n",
375*4882a593Smuzhiyun data->hpasid, ret);
376*4882a593Smuzhiyun /*
377*4882a593Smuzhiyun * PASID entry should be in cleared state if nested mode
378*4882a593Smuzhiyun * set up failed. So we only need to clear IOASID tracking
379*4882a593Smuzhiyun * data such that free call will succeed.
380*4882a593Smuzhiyun */
381*4882a593Smuzhiyun kfree(sdev);
382*4882a593Smuzhiyun goto out;
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun svm->flags |= SVM_FLAG_GUEST_MODE;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun init_rcu_head(&sdev->rcu);
388*4882a593Smuzhiyun list_add_rcu(&sdev->list, &svm->devs);
389*4882a593Smuzhiyun out:
390*4882a593Smuzhiyun if (!IS_ERR_OR_NULL(svm) && list_empty(&svm->devs)) {
391*4882a593Smuzhiyun ioasid_set_data(data->hpasid, NULL);
392*4882a593Smuzhiyun kfree(svm);
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun mutex_unlock(&pasid_mutex);
396*4882a593Smuzhiyun return ret;
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun
intel_svm_unbind_gpasid(struct device * dev,u32 pasid)399*4882a593Smuzhiyun int intel_svm_unbind_gpasid(struct device *dev, u32 pasid)
400*4882a593Smuzhiyun {
401*4882a593Smuzhiyun struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
402*4882a593Smuzhiyun struct intel_svm_dev *sdev;
403*4882a593Smuzhiyun struct intel_svm *svm;
404*4882a593Smuzhiyun int ret;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun if (WARN_ON(!iommu))
407*4882a593Smuzhiyun return -EINVAL;
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun mutex_lock(&pasid_mutex);
410*4882a593Smuzhiyun ret = pasid_to_svm_sdev(dev, pasid, &svm, &sdev);
411*4882a593Smuzhiyun if (ret)
412*4882a593Smuzhiyun goto out;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun if (sdev) {
415*4882a593Smuzhiyun if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
416*4882a593Smuzhiyun sdev->users--;
417*4882a593Smuzhiyun if (!sdev->users) {
418*4882a593Smuzhiyun list_del_rcu(&sdev->list);
419*4882a593Smuzhiyun intel_pasid_tear_down_entry(iommu, dev,
420*4882a593Smuzhiyun svm->pasid, false);
421*4882a593Smuzhiyun intel_svm_drain_prq(dev, svm->pasid);
422*4882a593Smuzhiyun kfree_rcu(sdev, rcu);
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun if (list_empty(&svm->devs)) {
425*4882a593Smuzhiyun /*
426*4882a593Smuzhiyun * We do not free the IOASID here in that
427*4882a593Smuzhiyun * IOMMU driver did not allocate it.
428*4882a593Smuzhiyun * Unlike native SVM, IOASID for guest use was
429*4882a593Smuzhiyun * allocated prior to the bind call.
430*4882a593Smuzhiyun * In any case, if the free call comes before
431*4882a593Smuzhiyun * the unbind, IOMMU driver will get notified
432*4882a593Smuzhiyun * and perform cleanup.
433*4882a593Smuzhiyun */
434*4882a593Smuzhiyun ioasid_set_data(pasid, NULL);
435*4882a593Smuzhiyun kfree(svm);
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun out:
440*4882a593Smuzhiyun mutex_unlock(&pasid_mutex);
441*4882a593Smuzhiyun return ret;
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun
_load_pasid(void * unused)444*4882a593Smuzhiyun static void _load_pasid(void *unused)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun update_pasid();
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun
load_pasid(struct mm_struct * mm,u32 pasid)449*4882a593Smuzhiyun static void load_pasid(struct mm_struct *mm, u32 pasid)
450*4882a593Smuzhiyun {
451*4882a593Smuzhiyun mutex_lock(&mm->context.lock);
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun /* Synchronize with READ_ONCE in update_pasid(). */
454*4882a593Smuzhiyun smp_store_release(&mm->pasid, pasid);
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun /* Update PASID MSR on all CPUs running the mm's tasks. */
457*4882a593Smuzhiyun on_each_cpu_mask(mm_cpumask(mm), _load_pasid, NULL, true);
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun mutex_unlock(&mm->context.lock);
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun /* Caller must hold pasid_mutex, mm reference */
463*4882a593Smuzhiyun static int
intel_svm_bind_mm(struct device * dev,unsigned int flags,struct svm_dev_ops * ops,struct mm_struct * mm,struct intel_svm_dev ** sd)464*4882a593Smuzhiyun intel_svm_bind_mm(struct device *dev, unsigned int flags,
465*4882a593Smuzhiyun struct svm_dev_ops *ops,
466*4882a593Smuzhiyun struct mm_struct *mm, struct intel_svm_dev **sd)
467*4882a593Smuzhiyun {
468*4882a593Smuzhiyun struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
469*4882a593Smuzhiyun struct device_domain_info *info;
470*4882a593Smuzhiyun struct intel_svm_dev *sdev;
471*4882a593Smuzhiyun struct intel_svm *svm = NULL;
472*4882a593Smuzhiyun unsigned long iflags;
473*4882a593Smuzhiyun int pasid_max;
474*4882a593Smuzhiyun int ret;
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun if (!iommu || dmar_disabled)
477*4882a593Smuzhiyun return -EINVAL;
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun if (!intel_svm_capable(iommu))
480*4882a593Smuzhiyun return -ENOTSUPP;
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun if (dev_is_pci(dev)) {
483*4882a593Smuzhiyun pasid_max = pci_max_pasids(to_pci_dev(dev));
484*4882a593Smuzhiyun if (pasid_max < 0)
485*4882a593Smuzhiyun return -EINVAL;
486*4882a593Smuzhiyun } else
487*4882a593Smuzhiyun pasid_max = 1 << 20;
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun /* Bind supervisor PASID shuld have mm = NULL */
490*4882a593Smuzhiyun if (flags & SVM_FLAG_SUPERVISOR_MODE) {
491*4882a593Smuzhiyun if (!ecap_srs(iommu->ecap) || mm) {
492*4882a593Smuzhiyun pr_err("Supervisor PASID with user provided mm.\n");
493*4882a593Smuzhiyun return -EINVAL;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun if (!(flags & SVM_FLAG_PRIVATE_PASID)) {
498*4882a593Smuzhiyun struct intel_svm *t;
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun list_for_each_entry(t, &global_svm_list, list) {
501*4882a593Smuzhiyun if (t->mm != mm || (t->flags & SVM_FLAG_PRIVATE_PASID))
502*4882a593Smuzhiyun continue;
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun svm = t;
505*4882a593Smuzhiyun if (svm->pasid >= pasid_max) {
506*4882a593Smuzhiyun dev_warn(dev,
507*4882a593Smuzhiyun "Limited PASID width. Cannot use existing PASID %d\n",
508*4882a593Smuzhiyun svm->pasid);
509*4882a593Smuzhiyun ret = -ENOSPC;
510*4882a593Smuzhiyun goto out;
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun /* Find the matching device in svm list */
514*4882a593Smuzhiyun for_each_svm_dev(sdev, svm, dev) {
515*4882a593Smuzhiyun if (sdev->ops != ops) {
516*4882a593Smuzhiyun ret = -EBUSY;
517*4882a593Smuzhiyun goto out;
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun sdev->users++;
520*4882a593Smuzhiyun goto success;
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun break;
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
528*4882a593Smuzhiyun if (!sdev) {
529*4882a593Smuzhiyun ret = -ENOMEM;
530*4882a593Smuzhiyun goto out;
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun sdev->dev = dev;
533*4882a593Smuzhiyun sdev->iommu = iommu;
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun ret = intel_iommu_enable_pasid(iommu, dev);
536*4882a593Smuzhiyun if (ret) {
537*4882a593Smuzhiyun kfree(sdev);
538*4882a593Smuzhiyun goto out;
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun info = get_domain_info(dev);
542*4882a593Smuzhiyun sdev->did = FLPT_DEFAULT_DID;
543*4882a593Smuzhiyun sdev->sid = PCI_DEVID(info->bus, info->devfn);
544*4882a593Smuzhiyun if (info->ats_enabled) {
545*4882a593Smuzhiyun sdev->dev_iotlb = 1;
546*4882a593Smuzhiyun sdev->qdep = info->ats_qdep;
547*4882a593Smuzhiyun if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
548*4882a593Smuzhiyun sdev->qdep = 0;
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun /* Finish the setup now we know we're keeping it */
552*4882a593Smuzhiyun sdev->users = 1;
553*4882a593Smuzhiyun sdev->ops = ops;
554*4882a593Smuzhiyun init_rcu_head(&sdev->rcu);
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun if (!svm) {
557*4882a593Smuzhiyun svm = kzalloc(sizeof(*svm), GFP_KERNEL);
558*4882a593Smuzhiyun if (!svm) {
559*4882a593Smuzhiyun ret = -ENOMEM;
560*4882a593Smuzhiyun kfree(sdev);
561*4882a593Smuzhiyun goto out;
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun if (pasid_max > intel_pasid_max_id)
565*4882a593Smuzhiyun pasid_max = intel_pasid_max_id;
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun /* Do not use PASID 0, reserved for RID to PASID */
568*4882a593Smuzhiyun svm->pasid = ioasid_alloc(NULL, PASID_MIN,
569*4882a593Smuzhiyun pasid_max - 1, svm);
570*4882a593Smuzhiyun if (svm->pasid == INVALID_IOASID) {
571*4882a593Smuzhiyun kfree(svm);
572*4882a593Smuzhiyun kfree(sdev);
573*4882a593Smuzhiyun ret = -ENOSPC;
574*4882a593Smuzhiyun goto out;
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun svm->notifier.ops = &intel_mmuops;
577*4882a593Smuzhiyun svm->mm = mm;
578*4882a593Smuzhiyun svm->flags = flags;
579*4882a593Smuzhiyun INIT_LIST_HEAD_RCU(&svm->devs);
580*4882a593Smuzhiyun INIT_LIST_HEAD(&svm->list);
581*4882a593Smuzhiyun ret = -ENOMEM;
582*4882a593Smuzhiyun if (mm) {
583*4882a593Smuzhiyun ret = mmu_notifier_register(&svm->notifier, mm);
584*4882a593Smuzhiyun if (ret) {
585*4882a593Smuzhiyun ioasid_free(svm->pasid);
586*4882a593Smuzhiyun kfree(svm);
587*4882a593Smuzhiyun kfree(sdev);
588*4882a593Smuzhiyun goto out;
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun spin_lock_irqsave(&iommu->lock, iflags);
593*4882a593Smuzhiyun ret = intel_pasid_setup_first_level(iommu, dev,
594*4882a593Smuzhiyun mm ? mm->pgd : init_mm.pgd,
595*4882a593Smuzhiyun svm->pasid, FLPT_DEFAULT_DID,
596*4882a593Smuzhiyun (mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
597*4882a593Smuzhiyun (cpu_feature_enabled(X86_FEATURE_LA57) ?
598*4882a593Smuzhiyun PASID_FLAG_FL5LP : 0));
599*4882a593Smuzhiyun spin_unlock_irqrestore(&iommu->lock, iflags);
600*4882a593Smuzhiyun if (ret) {
601*4882a593Smuzhiyun if (mm)
602*4882a593Smuzhiyun mmu_notifier_unregister(&svm->notifier, mm);
603*4882a593Smuzhiyun ioasid_free(svm->pasid);
604*4882a593Smuzhiyun kfree(svm);
605*4882a593Smuzhiyun kfree(sdev);
606*4882a593Smuzhiyun goto out;
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun list_add_tail(&svm->list, &global_svm_list);
610*4882a593Smuzhiyun if (mm) {
611*4882a593Smuzhiyun /* The newly allocated pasid is loaded to the mm. */
612*4882a593Smuzhiyun load_pasid(mm, svm->pasid);
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun } else {
615*4882a593Smuzhiyun /*
616*4882a593Smuzhiyun * Binding a new device with existing PASID, need to setup
617*4882a593Smuzhiyun * the PASID entry.
618*4882a593Smuzhiyun */
619*4882a593Smuzhiyun spin_lock_irqsave(&iommu->lock, iflags);
620*4882a593Smuzhiyun ret = intel_pasid_setup_first_level(iommu, dev,
621*4882a593Smuzhiyun mm ? mm->pgd : init_mm.pgd,
622*4882a593Smuzhiyun svm->pasid, FLPT_DEFAULT_DID,
623*4882a593Smuzhiyun (mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
624*4882a593Smuzhiyun (cpu_feature_enabled(X86_FEATURE_LA57) ?
625*4882a593Smuzhiyun PASID_FLAG_FL5LP : 0));
626*4882a593Smuzhiyun spin_unlock_irqrestore(&iommu->lock, iflags);
627*4882a593Smuzhiyun if (ret) {
628*4882a593Smuzhiyun kfree(sdev);
629*4882a593Smuzhiyun goto out;
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun list_add_rcu(&sdev->list, &svm->devs);
633*4882a593Smuzhiyun success:
634*4882a593Smuzhiyun sdev->pasid = svm->pasid;
635*4882a593Smuzhiyun sdev->sva.dev = dev;
636*4882a593Smuzhiyun if (sd)
637*4882a593Smuzhiyun *sd = sdev;
638*4882a593Smuzhiyun ret = 0;
639*4882a593Smuzhiyun out:
640*4882a593Smuzhiyun return ret;
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun /* Caller must hold pasid_mutex */
intel_svm_unbind_mm(struct device * dev,u32 pasid)644*4882a593Smuzhiyun static int intel_svm_unbind_mm(struct device *dev, u32 pasid)
645*4882a593Smuzhiyun {
646*4882a593Smuzhiyun struct intel_svm_dev *sdev;
647*4882a593Smuzhiyun struct intel_iommu *iommu;
648*4882a593Smuzhiyun struct intel_svm *svm;
649*4882a593Smuzhiyun int ret = -EINVAL;
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun iommu = device_to_iommu(dev, NULL, NULL);
652*4882a593Smuzhiyun if (!iommu)
653*4882a593Smuzhiyun goto out;
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun ret = pasid_to_svm_sdev(dev, pasid, &svm, &sdev);
656*4882a593Smuzhiyun if (ret)
657*4882a593Smuzhiyun goto out;
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun if (sdev) {
660*4882a593Smuzhiyun sdev->users--;
661*4882a593Smuzhiyun if (!sdev->users) {
662*4882a593Smuzhiyun list_del_rcu(&sdev->list);
663*4882a593Smuzhiyun /* Flush the PASID cache and IOTLB for this device.
664*4882a593Smuzhiyun * Note that we do depend on the hardware *not* using
665*4882a593Smuzhiyun * the PASID any more. Just as we depend on other
666*4882a593Smuzhiyun * devices never using PASIDs that they have no right
667*4882a593Smuzhiyun * to use. We have a *shared* PASID table, because it's
668*4882a593Smuzhiyun * large and has to be physically contiguous. So it's
669*4882a593Smuzhiyun * hard to be as defensive as we might like. */
670*4882a593Smuzhiyun intel_pasid_tear_down_entry(iommu, dev,
671*4882a593Smuzhiyun svm->pasid, false);
672*4882a593Smuzhiyun intel_svm_drain_prq(dev, svm->pasid);
673*4882a593Smuzhiyun kfree_rcu(sdev, rcu);
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun if (list_empty(&svm->devs)) {
676*4882a593Smuzhiyun ioasid_free(svm->pasid);
677*4882a593Smuzhiyun if (svm->mm) {
678*4882a593Smuzhiyun mmu_notifier_unregister(&svm->notifier, svm->mm);
679*4882a593Smuzhiyun /* Clear mm's pasid. */
680*4882a593Smuzhiyun load_pasid(svm->mm, PASID_DISABLED);
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun list_del(&svm->list);
683*4882a593Smuzhiyun /* We mandate that no page faults may be outstanding
684*4882a593Smuzhiyun * for the PASID when intel_svm_unbind_mm() is called.
685*4882a593Smuzhiyun * If that is not obeyed, subtle errors will happen.
686*4882a593Smuzhiyun * Let's make them less subtle... */
687*4882a593Smuzhiyun memset(svm, 0x6b, sizeof(*svm));
688*4882a593Smuzhiyun kfree(svm);
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun out:
693*4882a593Smuzhiyun return ret;
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun /* Page request queue descriptor */
697*4882a593Smuzhiyun struct page_req_dsc {
698*4882a593Smuzhiyun union {
699*4882a593Smuzhiyun struct {
700*4882a593Smuzhiyun u64 type:8;
701*4882a593Smuzhiyun u64 pasid_present:1;
702*4882a593Smuzhiyun u64 priv_data_present:1;
703*4882a593Smuzhiyun u64 rsvd:6;
704*4882a593Smuzhiyun u64 rid:16;
705*4882a593Smuzhiyun u64 pasid:20;
706*4882a593Smuzhiyun u64 exe_req:1;
707*4882a593Smuzhiyun u64 pm_req:1;
708*4882a593Smuzhiyun u64 rsvd2:10;
709*4882a593Smuzhiyun };
710*4882a593Smuzhiyun u64 qw_0;
711*4882a593Smuzhiyun };
712*4882a593Smuzhiyun union {
713*4882a593Smuzhiyun struct {
714*4882a593Smuzhiyun u64 rd_req:1;
715*4882a593Smuzhiyun u64 wr_req:1;
716*4882a593Smuzhiyun u64 lpig:1;
717*4882a593Smuzhiyun u64 prg_index:9;
718*4882a593Smuzhiyun u64 addr:52;
719*4882a593Smuzhiyun };
720*4882a593Smuzhiyun u64 qw_1;
721*4882a593Smuzhiyun };
722*4882a593Smuzhiyun u64 priv_data[2];
723*4882a593Smuzhiyun };
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun #define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20)
726*4882a593Smuzhiyun
access_error(struct vm_area_struct * vma,struct page_req_dsc * req)727*4882a593Smuzhiyun static bool access_error(struct vm_area_struct *vma, struct page_req_dsc *req)
728*4882a593Smuzhiyun {
729*4882a593Smuzhiyun unsigned long requested = 0;
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun if (req->exe_req)
732*4882a593Smuzhiyun requested |= VM_EXEC;
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun if (req->rd_req)
735*4882a593Smuzhiyun requested |= VM_READ;
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun if (req->wr_req)
738*4882a593Smuzhiyun requested |= VM_WRITE;
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun return (requested & ~vma->vm_flags) != 0;
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun
is_canonical_address(u64 addr)743*4882a593Smuzhiyun static bool is_canonical_address(u64 addr)
744*4882a593Smuzhiyun {
745*4882a593Smuzhiyun int shift = 64 - (__VIRTUAL_MASK_SHIFT + 1);
746*4882a593Smuzhiyun long saddr = (long) addr;
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun return (((saddr << shift) >> shift) == saddr);
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun /**
752*4882a593Smuzhiyun * intel_svm_drain_prq - Drain page requests and responses for a pasid
753*4882a593Smuzhiyun * @dev: target device
754*4882a593Smuzhiyun * @pasid: pasid for draining
755*4882a593Smuzhiyun *
756*4882a593Smuzhiyun * Drain all pending page requests and responses related to @pasid in both
757*4882a593Smuzhiyun * software and hardware. This is supposed to be called after the device
758*4882a593Smuzhiyun * driver has stopped DMA, the pasid entry has been cleared, and both IOTLB
759*4882a593Smuzhiyun * and DevTLB have been invalidated.
760*4882a593Smuzhiyun *
761*4882a593Smuzhiyun * It waits until all pending page requests for @pasid in the page fault
762*4882a593Smuzhiyun * queue are completed by the prq handling thread. Then follow the steps
763*4882a593Smuzhiyun * described in VT-d spec CH7.10 to drain all page requests and page
764*4882a593Smuzhiyun * responses pending in the hardware.
765*4882a593Smuzhiyun */
intel_svm_drain_prq(struct device * dev,u32 pasid)766*4882a593Smuzhiyun static void intel_svm_drain_prq(struct device *dev, u32 pasid)
767*4882a593Smuzhiyun {
768*4882a593Smuzhiyun struct device_domain_info *info;
769*4882a593Smuzhiyun struct dmar_domain *domain;
770*4882a593Smuzhiyun struct intel_iommu *iommu;
771*4882a593Smuzhiyun struct qi_desc desc[3];
772*4882a593Smuzhiyun struct pci_dev *pdev;
773*4882a593Smuzhiyun int head, tail;
774*4882a593Smuzhiyun u16 sid, did;
775*4882a593Smuzhiyun int qdep;
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun info = get_domain_info(dev);
778*4882a593Smuzhiyun if (WARN_ON(!info || !dev_is_pci(dev)))
779*4882a593Smuzhiyun return;
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun if (!info->pri_enabled)
782*4882a593Smuzhiyun return;
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun iommu = info->iommu;
785*4882a593Smuzhiyun domain = info->domain;
786*4882a593Smuzhiyun pdev = to_pci_dev(dev);
787*4882a593Smuzhiyun sid = PCI_DEVID(info->bus, info->devfn);
788*4882a593Smuzhiyun did = domain->iommu_did[iommu->seq_id];
789*4882a593Smuzhiyun qdep = pci_ats_queue_depth(pdev);
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun /*
792*4882a593Smuzhiyun * Check and wait until all pending page requests in the queue are
793*4882a593Smuzhiyun * handled by the prq handling thread.
794*4882a593Smuzhiyun */
795*4882a593Smuzhiyun prq_retry:
796*4882a593Smuzhiyun reinit_completion(&iommu->prq_complete);
797*4882a593Smuzhiyun tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
798*4882a593Smuzhiyun head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
799*4882a593Smuzhiyun while (head != tail) {
800*4882a593Smuzhiyun struct page_req_dsc *req;
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun req = &iommu->prq[head / sizeof(*req)];
803*4882a593Smuzhiyun if (!req->pasid_present || req->pasid != pasid) {
804*4882a593Smuzhiyun head = (head + sizeof(*req)) & PRQ_RING_MASK;
805*4882a593Smuzhiyun continue;
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun wait_for_completion(&iommu->prq_complete);
809*4882a593Smuzhiyun goto prq_retry;
810*4882a593Smuzhiyun }
811*4882a593Smuzhiyun
812*4882a593Smuzhiyun /*
813*4882a593Smuzhiyun * Perform steps described in VT-d spec CH7.10 to drain page
814*4882a593Smuzhiyun * requests and responses in hardware.
815*4882a593Smuzhiyun */
816*4882a593Smuzhiyun memset(desc, 0, sizeof(desc));
817*4882a593Smuzhiyun desc[0].qw0 = QI_IWD_STATUS_DATA(QI_DONE) |
818*4882a593Smuzhiyun QI_IWD_FENCE |
819*4882a593Smuzhiyun QI_IWD_TYPE;
820*4882a593Smuzhiyun desc[1].qw0 = QI_EIOTLB_PASID(pasid) |
821*4882a593Smuzhiyun QI_EIOTLB_DID(did) |
822*4882a593Smuzhiyun QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
823*4882a593Smuzhiyun QI_EIOTLB_TYPE;
824*4882a593Smuzhiyun desc[2].qw0 = QI_DEV_EIOTLB_PASID(pasid) |
825*4882a593Smuzhiyun QI_DEV_EIOTLB_SID(sid) |
826*4882a593Smuzhiyun QI_DEV_EIOTLB_QDEP(qdep) |
827*4882a593Smuzhiyun QI_DEIOTLB_TYPE |
828*4882a593Smuzhiyun QI_DEV_IOTLB_PFSID(info->pfsid);
829*4882a593Smuzhiyun qi_retry:
830*4882a593Smuzhiyun reinit_completion(&iommu->prq_complete);
831*4882a593Smuzhiyun qi_submit_sync(iommu, desc, 3, QI_OPT_WAIT_DRAIN);
832*4882a593Smuzhiyun if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) {
833*4882a593Smuzhiyun wait_for_completion(&iommu->prq_complete);
834*4882a593Smuzhiyun goto qi_retry;
835*4882a593Smuzhiyun }
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun
prq_to_iommu_prot(struct page_req_dsc * req)838*4882a593Smuzhiyun static int prq_to_iommu_prot(struct page_req_dsc *req)
839*4882a593Smuzhiyun {
840*4882a593Smuzhiyun int prot = 0;
841*4882a593Smuzhiyun
842*4882a593Smuzhiyun if (req->rd_req)
843*4882a593Smuzhiyun prot |= IOMMU_FAULT_PERM_READ;
844*4882a593Smuzhiyun if (req->wr_req)
845*4882a593Smuzhiyun prot |= IOMMU_FAULT_PERM_WRITE;
846*4882a593Smuzhiyun if (req->exe_req)
847*4882a593Smuzhiyun prot |= IOMMU_FAULT_PERM_EXEC;
848*4882a593Smuzhiyun if (req->pm_req)
849*4882a593Smuzhiyun prot |= IOMMU_FAULT_PERM_PRIV;
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun return prot;
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun static int
intel_svm_prq_report(struct device * dev,struct page_req_dsc * desc)855*4882a593Smuzhiyun intel_svm_prq_report(struct device *dev, struct page_req_dsc *desc)
856*4882a593Smuzhiyun {
857*4882a593Smuzhiyun struct iommu_fault_event event;
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun if (!dev || !dev_is_pci(dev))
860*4882a593Smuzhiyun return -ENODEV;
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun /* Fill in event data for device specific processing */
863*4882a593Smuzhiyun memset(&event, 0, sizeof(struct iommu_fault_event));
864*4882a593Smuzhiyun event.fault.type = IOMMU_FAULT_PAGE_REQ;
865*4882a593Smuzhiyun event.fault.prm.addr = (u64)desc->addr << VTD_PAGE_SHIFT;
866*4882a593Smuzhiyun event.fault.prm.pasid = desc->pasid;
867*4882a593Smuzhiyun event.fault.prm.grpid = desc->prg_index;
868*4882a593Smuzhiyun event.fault.prm.perm = prq_to_iommu_prot(desc);
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun if (desc->lpig)
871*4882a593Smuzhiyun event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
872*4882a593Smuzhiyun if (desc->pasid_present) {
873*4882a593Smuzhiyun event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
874*4882a593Smuzhiyun event.fault.prm.flags |= IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
875*4882a593Smuzhiyun }
876*4882a593Smuzhiyun if (desc->priv_data_present) {
877*4882a593Smuzhiyun /*
878*4882a593Smuzhiyun * Set last page in group bit if private data is present,
879*4882a593Smuzhiyun * page response is required as it does for LPIG.
880*4882a593Smuzhiyun * iommu_report_device_fault() doesn't understand this vendor
881*4882a593Smuzhiyun * specific requirement thus we set last_page as a workaround.
882*4882a593Smuzhiyun */
883*4882a593Smuzhiyun event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
884*4882a593Smuzhiyun event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA;
885*4882a593Smuzhiyun memcpy(event.fault.prm.private_data, desc->priv_data,
886*4882a593Smuzhiyun sizeof(desc->priv_data));
887*4882a593Smuzhiyun }
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun return iommu_report_device_fault(dev, &event);
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun
prq_event_thread(int irq,void * d)892*4882a593Smuzhiyun static irqreturn_t prq_event_thread(int irq, void *d)
893*4882a593Smuzhiyun {
894*4882a593Smuzhiyun struct intel_svm_dev *sdev = NULL;
895*4882a593Smuzhiyun struct intel_iommu *iommu = d;
896*4882a593Smuzhiyun struct intel_svm *svm = NULL;
897*4882a593Smuzhiyun int head, tail, handled = 0;
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun /* Clear PPR bit before reading head/tail registers, to
900*4882a593Smuzhiyun * ensure that we get a new interrupt if needed. */
901*4882a593Smuzhiyun writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG);
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
904*4882a593Smuzhiyun head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
905*4882a593Smuzhiyun while (head != tail) {
906*4882a593Smuzhiyun struct vm_area_struct *vma;
907*4882a593Smuzhiyun struct page_req_dsc *req;
908*4882a593Smuzhiyun struct qi_desc resp;
909*4882a593Smuzhiyun int result;
910*4882a593Smuzhiyun vm_fault_t ret;
911*4882a593Smuzhiyun u64 address;
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun handled = 1;
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun req = &iommu->prq[head / sizeof(*req)];
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun result = QI_RESP_FAILURE;
918*4882a593Smuzhiyun address = (u64)req->addr << VTD_PAGE_SHIFT;
919*4882a593Smuzhiyun if (!req->pasid_present) {
920*4882a593Smuzhiyun pr_err("%s: Page request without PASID: %08llx %08llx\n",
921*4882a593Smuzhiyun iommu->name, ((unsigned long long *)req)[0],
922*4882a593Smuzhiyun ((unsigned long long *)req)[1]);
923*4882a593Smuzhiyun goto no_pasid;
924*4882a593Smuzhiyun }
925*4882a593Smuzhiyun /* We shall not receive page request for supervisor SVM */
926*4882a593Smuzhiyun if (req->pm_req && (req->rd_req | req->wr_req)) {
927*4882a593Smuzhiyun pr_err("Unexpected page request in Privilege Mode");
928*4882a593Smuzhiyun /* No need to find the matching sdev as for bad_req */
929*4882a593Smuzhiyun goto no_pasid;
930*4882a593Smuzhiyun }
931*4882a593Smuzhiyun /* DMA read with exec requeset is not supported. */
932*4882a593Smuzhiyun if (req->exe_req && req->rd_req) {
933*4882a593Smuzhiyun pr_err("Execution request not supported\n");
934*4882a593Smuzhiyun goto no_pasid;
935*4882a593Smuzhiyun }
936*4882a593Smuzhiyun if (!svm || svm->pasid != req->pasid) {
937*4882a593Smuzhiyun rcu_read_lock();
938*4882a593Smuzhiyun svm = ioasid_find(NULL, req->pasid, NULL);
939*4882a593Smuzhiyun /* It *can't* go away, because the driver is not permitted
940*4882a593Smuzhiyun * to unbind the mm while any page faults are outstanding.
941*4882a593Smuzhiyun * So we only need RCU to protect the internal idr code. */
942*4882a593Smuzhiyun rcu_read_unlock();
943*4882a593Smuzhiyun if (IS_ERR_OR_NULL(svm)) {
944*4882a593Smuzhiyun pr_err("%s: Page request for invalid PASID %d: %08llx %08llx\n",
945*4882a593Smuzhiyun iommu->name, req->pasid, ((unsigned long long *)req)[0],
946*4882a593Smuzhiyun ((unsigned long long *)req)[1]);
947*4882a593Smuzhiyun goto no_pasid;
948*4882a593Smuzhiyun }
949*4882a593Smuzhiyun }
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun if (!sdev || sdev->sid != req->rid) {
952*4882a593Smuzhiyun struct intel_svm_dev *t;
953*4882a593Smuzhiyun
954*4882a593Smuzhiyun sdev = NULL;
955*4882a593Smuzhiyun rcu_read_lock();
956*4882a593Smuzhiyun list_for_each_entry_rcu(t, &svm->devs, list) {
957*4882a593Smuzhiyun if (t->sid == req->rid) {
958*4882a593Smuzhiyun sdev = t;
959*4882a593Smuzhiyun break;
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun rcu_read_unlock();
963*4882a593Smuzhiyun }
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun result = QI_RESP_INVALID;
966*4882a593Smuzhiyun /* Since we're using init_mm.pgd directly, we should never take
967*4882a593Smuzhiyun * any faults on kernel addresses. */
968*4882a593Smuzhiyun if (!svm->mm)
969*4882a593Smuzhiyun goto bad_req;
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun /* If address is not canonical, return invalid response */
972*4882a593Smuzhiyun if (!is_canonical_address(address))
973*4882a593Smuzhiyun goto bad_req;
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun /*
976*4882a593Smuzhiyun * If prq is to be handled outside iommu driver via receiver of
977*4882a593Smuzhiyun * the fault notifiers, we skip the page response here.
978*4882a593Smuzhiyun */
979*4882a593Smuzhiyun if (svm->flags & SVM_FLAG_GUEST_MODE) {
980*4882a593Smuzhiyun if (sdev && !intel_svm_prq_report(sdev->dev, req))
981*4882a593Smuzhiyun goto prq_advance;
982*4882a593Smuzhiyun else
983*4882a593Smuzhiyun goto bad_req;
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun /* If the mm is already defunct, don't handle faults. */
987*4882a593Smuzhiyun if (!mmget_not_zero(svm->mm))
988*4882a593Smuzhiyun goto bad_req;
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun mmap_read_lock(svm->mm);
991*4882a593Smuzhiyun vma = find_extend_vma(svm->mm, address);
992*4882a593Smuzhiyun if (!vma || address < vma->vm_start)
993*4882a593Smuzhiyun goto invalid;
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun if (access_error(vma, req))
996*4882a593Smuzhiyun goto invalid;
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun ret = handle_mm_fault(vma, address,
999*4882a593Smuzhiyun req->wr_req ? FAULT_FLAG_WRITE : 0,
1000*4882a593Smuzhiyun NULL);
1001*4882a593Smuzhiyun if (ret & VM_FAULT_ERROR)
1002*4882a593Smuzhiyun goto invalid;
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun result = QI_RESP_SUCCESS;
1005*4882a593Smuzhiyun invalid:
1006*4882a593Smuzhiyun mmap_read_unlock(svm->mm);
1007*4882a593Smuzhiyun mmput(svm->mm);
1008*4882a593Smuzhiyun bad_req:
1009*4882a593Smuzhiyun WARN_ON(!sdev);
1010*4882a593Smuzhiyun if (sdev && sdev->ops && sdev->ops->fault_cb) {
1011*4882a593Smuzhiyun int rwxp = (req->rd_req << 3) | (req->wr_req << 2) |
1012*4882a593Smuzhiyun (req->exe_req << 1) | (req->pm_req);
1013*4882a593Smuzhiyun sdev->ops->fault_cb(sdev->dev, req->pasid, req->addr,
1014*4882a593Smuzhiyun req->priv_data, rwxp, result);
1015*4882a593Smuzhiyun }
1016*4882a593Smuzhiyun /* We get here in the error case where the PASID lookup failed,
1017*4882a593Smuzhiyun and these can be NULL. Do not use them below this point! */
1018*4882a593Smuzhiyun sdev = NULL;
1019*4882a593Smuzhiyun svm = NULL;
1020*4882a593Smuzhiyun no_pasid:
1021*4882a593Smuzhiyun if (req->lpig || req->priv_data_present) {
1022*4882a593Smuzhiyun /*
1023*4882a593Smuzhiyun * Per VT-d spec. v3.0 ch7.7, system software must
1024*4882a593Smuzhiyun * respond with page group response if private data
1025*4882a593Smuzhiyun * is present (PDP) or last page in group (LPIG) bit
1026*4882a593Smuzhiyun * is set. This is an additional VT-d feature beyond
1027*4882a593Smuzhiyun * PCI ATS spec.
1028*4882a593Smuzhiyun */
1029*4882a593Smuzhiyun resp.qw0 = QI_PGRP_PASID(req->pasid) |
1030*4882a593Smuzhiyun QI_PGRP_DID(req->rid) |
1031*4882a593Smuzhiyun QI_PGRP_PASID_P(req->pasid_present) |
1032*4882a593Smuzhiyun QI_PGRP_PDP(req->priv_data_present) |
1033*4882a593Smuzhiyun QI_PGRP_RESP_CODE(result) |
1034*4882a593Smuzhiyun QI_PGRP_RESP_TYPE;
1035*4882a593Smuzhiyun resp.qw1 = QI_PGRP_IDX(req->prg_index) |
1036*4882a593Smuzhiyun QI_PGRP_LPIG(req->lpig);
1037*4882a593Smuzhiyun resp.qw2 = 0;
1038*4882a593Smuzhiyun resp.qw3 = 0;
1039*4882a593Smuzhiyun
1040*4882a593Smuzhiyun if (req->priv_data_present)
1041*4882a593Smuzhiyun memcpy(&resp.qw2, req->priv_data,
1042*4882a593Smuzhiyun sizeof(req->priv_data));
1043*4882a593Smuzhiyun qi_submit_sync(iommu, &resp, 1, 0);
1044*4882a593Smuzhiyun }
1045*4882a593Smuzhiyun prq_advance:
1046*4882a593Smuzhiyun head = (head + sizeof(*req)) & PRQ_RING_MASK;
1047*4882a593Smuzhiyun }
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun dmar_writeq(iommu->reg + DMAR_PQH_REG, tail);
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun /*
1052*4882a593Smuzhiyun * Clear the page request overflow bit and wake up all threads that
1053*4882a593Smuzhiyun * are waiting for the completion of this handling.
1054*4882a593Smuzhiyun */
1055*4882a593Smuzhiyun if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) {
1056*4882a593Smuzhiyun pr_info_ratelimited("IOMMU: %s: PRQ overflow detected\n",
1057*4882a593Smuzhiyun iommu->name);
1058*4882a593Smuzhiyun head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
1059*4882a593Smuzhiyun tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
1060*4882a593Smuzhiyun if (head == tail) {
1061*4882a593Smuzhiyun writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG);
1062*4882a593Smuzhiyun pr_info_ratelimited("IOMMU: %s: PRQ overflow cleared",
1063*4882a593Smuzhiyun iommu->name);
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun }
1066*4882a593Smuzhiyun
1067*4882a593Smuzhiyun if (!completion_done(&iommu->prq_complete))
1068*4882a593Smuzhiyun complete(&iommu->prq_complete);
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun return IRQ_RETVAL(handled);
1071*4882a593Smuzhiyun }
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun #define to_intel_svm_dev(handle) container_of(handle, struct intel_svm_dev, sva)
1074*4882a593Smuzhiyun struct iommu_sva *
intel_svm_bind(struct device * dev,struct mm_struct * mm,void * drvdata)1075*4882a593Smuzhiyun intel_svm_bind(struct device *dev, struct mm_struct *mm, void *drvdata)
1076*4882a593Smuzhiyun {
1077*4882a593Smuzhiyun struct iommu_sva *sva = ERR_PTR(-EINVAL);
1078*4882a593Smuzhiyun struct intel_svm_dev *sdev = NULL;
1079*4882a593Smuzhiyun unsigned int flags = 0;
1080*4882a593Smuzhiyun int ret;
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun /*
1083*4882a593Smuzhiyun * TODO: Consolidate with generic iommu-sva bind after it is merged.
1084*4882a593Smuzhiyun * It will require shared SVM data structures, i.e. combine io_mm
1085*4882a593Smuzhiyun * and intel_svm etc.
1086*4882a593Smuzhiyun */
1087*4882a593Smuzhiyun if (drvdata)
1088*4882a593Smuzhiyun flags = *(unsigned int *)drvdata;
1089*4882a593Smuzhiyun mutex_lock(&pasid_mutex);
1090*4882a593Smuzhiyun ret = intel_svm_bind_mm(dev, flags, NULL, mm, &sdev);
1091*4882a593Smuzhiyun if (ret)
1092*4882a593Smuzhiyun sva = ERR_PTR(ret);
1093*4882a593Smuzhiyun else if (sdev)
1094*4882a593Smuzhiyun sva = &sdev->sva;
1095*4882a593Smuzhiyun else
1096*4882a593Smuzhiyun WARN(!sdev, "SVM bind succeeded with no sdev!\n");
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun mutex_unlock(&pasid_mutex);
1099*4882a593Smuzhiyun
1100*4882a593Smuzhiyun return sva;
1101*4882a593Smuzhiyun }
1102*4882a593Smuzhiyun
intel_svm_unbind(struct iommu_sva * sva)1103*4882a593Smuzhiyun void intel_svm_unbind(struct iommu_sva *sva)
1104*4882a593Smuzhiyun {
1105*4882a593Smuzhiyun struct intel_svm_dev *sdev;
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun mutex_lock(&pasid_mutex);
1108*4882a593Smuzhiyun sdev = to_intel_svm_dev(sva);
1109*4882a593Smuzhiyun intel_svm_unbind_mm(sdev->dev, sdev->pasid);
1110*4882a593Smuzhiyun mutex_unlock(&pasid_mutex);
1111*4882a593Smuzhiyun }
1112*4882a593Smuzhiyun
intel_svm_get_pasid(struct iommu_sva * sva)1113*4882a593Smuzhiyun u32 intel_svm_get_pasid(struct iommu_sva *sva)
1114*4882a593Smuzhiyun {
1115*4882a593Smuzhiyun struct intel_svm_dev *sdev;
1116*4882a593Smuzhiyun u32 pasid;
1117*4882a593Smuzhiyun
1118*4882a593Smuzhiyun mutex_lock(&pasid_mutex);
1119*4882a593Smuzhiyun sdev = to_intel_svm_dev(sva);
1120*4882a593Smuzhiyun pasid = sdev->pasid;
1121*4882a593Smuzhiyun mutex_unlock(&pasid_mutex);
1122*4882a593Smuzhiyun
1123*4882a593Smuzhiyun return pasid;
1124*4882a593Smuzhiyun }
1125*4882a593Smuzhiyun
intel_svm_page_response(struct device * dev,struct iommu_fault_event * evt,struct iommu_page_response * msg)1126*4882a593Smuzhiyun int intel_svm_page_response(struct device *dev,
1127*4882a593Smuzhiyun struct iommu_fault_event *evt,
1128*4882a593Smuzhiyun struct iommu_page_response *msg)
1129*4882a593Smuzhiyun {
1130*4882a593Smuzhiyun struct iommu_fault_page_request *prm;
1131*4882a593Smuzhiyun struct intel_svm_dev *sdev = NULL;
1132*4882a593Smuzhiyun struct intel_svm *svm = NULL;
1133*4882a593Smuzhiyun struct intel_iommu *iommu;
1134*4882a593Smuzhiyun bool private_present;
1135*4882a593Smuzhiyun bool pasid_present;
1136*4882a593Smuzhiyun bool last_page;
1137*4882a593Smuzhiyun u8 bus, devfn;
1138*4882a593Smuzhiyun int ret = 0;
1139*4882a593Smuzhiyun u16 sid;
1140*4882a593Smuzhiyun
1141*4882a593Smuzhiyun if (!dev || !dev_is_pci(dev))
1142*4882a593Smuzhiyun return -ENODEV;
1143*4882a593Smuzhiyun
1144*4882a593Smuzhiyun iommu = device_to_iommu(dev, &bus, &devfn);
1145*4882a593Smuzhiyun if (!iommu)
1146*4882a593Smuzhiyun return -ENODEV;
1147*4882a593Smuzhiyun
1148*4882a593Smuzhiyun if (!msg || !evt)
1149*4882a593Smuzhiyun return -EINVAL;
1150*4882a593Smuzhiyun
1151*4882a593Smuzhiyun mutex_lock(&pasid_mutex);
1152*4882a593Smuzhiyun
1153*4882a593Smuzhiyun prm = &evt->fault.prm;
1154*4882a593Smuzhiyun sid = PCI_DEVID(bus, devfn);
1155*4882a593Smuzhiyun pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
1156*4882a593Smuzhiyun private_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA;
1157*4882a593Smuzhiyun last_page = prm->flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun if (!pasid_present) {
1160*4882a593Smuzhiyun ret = -EINVAL;
1161*4882a593Smuzhiyun goto out;
1162*4882a593Smuzhiyun }
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun if (prm->pasid == 0 || prm->pasid >= PASID_MAX) {
1165*4882a593Smuzhiyun ret = -EINVAL;
1166*4882a593Smuzhiyun goto out;
1167*4882a593Smuzhiyun }
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun ret = pasid_to_svm_sdev(dev, prm->pasid, &svm, &sdev);
1170*4882a593Smuzhiyun if (ret || !sdev) {
1171*4882a593Smuzhiyun ret = -ENODEV;
1172*4882a593Smuzhiyun goto out;
1173*4882a593Smuzhiyun }
1174*4882a593Smuzhiyun
1175*4882a593Smuzhiyun /*
1176*4882a593Smuzhiyun * For responses from userspace, need to make sure that the
1177*4882a593Smuzhiyun * pasid has been bound to its mm.
1178*4882a593Smuzhiyun */
1179*4882a593Smuzhiyun if (svm->flags & SVM_FLAG_GUEST_MODE) {
1180*4882a593Smuzhiyun struct mm_struct *mm;
1181*4882a593Smuzhiyun
1182*4882a593Smuzhiyun mm = get_task_mm(current);
1183*4882a593Smuzhiyun if (!mm) {
1184*4882a593Smuzhiyun ret = -EINVAL;
1185*4882a593Smuzhiyun goto out;
1186*4882a593Smuzhiyun }
1187*4882a593Smuzhiyun
1188*4882a593Smuzhiyun if (mm != svm->mm) {
1189*4882a593Smuzhiyun ret = -ENODEV;
1190*4882a593Smuzhiyun mmput(mm);
1191*4882a593Smuzhiyun goto out;
1192*4882a593Smuzhiyun }
1193*4882a593Smuzhiyun
1194*4882a593Smuzhiyun mmput(mm);
1195*4882a593Smuzhiyun }
1196*4882a593Smuzhiyun
1197*4882a593Smuzhiyun /*
1198*4882a593Smuzhiyun * Per VT-d spec. v3.0 ch7.7, system software must respond
1199*4882a593Smuzhiyun * with page group response if private data is present (PDP)
1200*4882a593Smuzhiyun * or last page in group (LPIG) bit is set. This is an
1201*4882a593Smuzhiyun * additional VT-d requirement beyond PCI ATS spec.
1202*4882a593Smuzhiyun */
1203*4882a593Smuzhiyun if (last_page || private_present) {
1204*4882a593Smuzhiyun struct qi_desc desc;
1205*4882a593Smuzhiyun
1206*4882a593Smuzhiyun desc.qw0 = QI_PGRP_PASID(prm->pasid) | QI_PGRP_DID(sid) |
1207*4882a593Smuzhiyun QI_PGRP_PASID_P(pasid_present) |
1208*4882a593Smuzhiyun QI_PGRP_PDP(private_present) |
1209*4882a593Smuzhiyun QI_PGRP_RESP_CODE(msg->code) |
1210*4882a593Smuzhiyun QI_PGRP_RESP_TYPE;
1211*4882a593Smuzhiyun desc.qw1 = QI_PGRP_IDX(prm->grpid) | QI_PGRP_LPIG(last_page);
1212*4882a593Smuzhiyun desc.qw2 = 0;
1213*4882a593Smuzhiyun desc.qw3 = 0;
1214*4882a593Smuzhiyun if (private_present)
1215*4882a593Smuzhiyun memcpy(&desc.qw2, prm->private_data,
1216*4882a593Smuzhiyun sizeof(prm->private_data));
1217*4882a593Smuzhiyun
1218*4882a593Smuzhiyun qi_submit_sync(iommu, &desc, 1, 0);
1219*4882a593Smuzhiyun }
1220*4882a593Smuzhiyun out:
1221*4882a593Smuzhiyun mutex_unlock(&pasid_mutex);
1222*4882a593Smuzhiyun return ret;
1223*4882a593Smuzhiyun }
1224