xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright 2018 Advanced Micro Devices, Inc.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun  * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun  * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun  * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun  * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * The above copyright notice and this permission notice shall be included in
12*4882a593Smuzhiyun  * all copies or substantial portions of the Software.
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17*4882a593Smuzhiyun  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18*4882a593Smuzhiyun  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19*4882a593Smuzhiyun  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20*4882a593Smuzhiyun  * OTHER DEALINGS IN THE SOFTWARE.
21*4882a593Smuzhiyun  */
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #include <linux/kconfig.h>
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #if IS_REACHABLE(CONFIG_AMD_IOMMU_V2)
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #include <linux/printk.h>
28*4882a593Smuzhiyun #include <linux/device.h>
29*4882a593Smuzhiyun #include <linux/slab.h>
30*4882a593Smuzhiyun #include <linux/pci.h>
31*4882a593Smuzhiyun #include <linux/amd-iommu.h>
32*4882a593Smuzhiyun #include "kfd_priv.h"
33*4882a593Smuzhiyun #include "kfd_dbgmgr.h"
34*4882a593Smuzhiyun #include "kfd_topology.h"
35*4882a593Smuzhiyun #include "kfd_iommu.h"
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun static const u32 required_iommu_flags = AMD_IOMMU_DEVICE_FLAG_ATS_SUP |
38*4882a593Smuzhiyun 					AMD_IOMMU_DEVICE_FLAG_PRI_SUP |
39*4882a593Smuzhiyun 					AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun /** kfd_iommu_check_device - Check whether IOMMU is available for device
42*4882a593Smuzhiyun  */
kfd_iommu_check_device(struct kfd_dev * kfd)43*4882a593Smuzhiyun int kfd_iommu_check_device(struct kfd_dev *kfd)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun 	struct amd_iommu_device_info iommu_info;
46*4882a593Smuzhiyun 	int err;
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	if (!kfd->use_iommu_v2)
49*4882a593Smuzhiyun 		return -ENODEV;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	iommu_info.flags = 0;
52*4882a593Smuzhiyun 	err = amd_iommu_device_info(kfd->pdev, &iommu_info);
53*4882a593Smuzhiyun 	if (err)
54*4882a593Smuzhiyun 		return err;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags)
57*4882a593Smuzhiyun 		return -ENODEV;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	return 0;
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun /** kfd_iommu_device_init - Initialize IOMMU for device
63*4882a593Smuzhiyun  */
kfd_iommu_device_init(struct kfd_dev * kfd)64*4882a593Smuzhiyun int kfd_iommu_device_init(struct kfd_dev *kfd)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	struct amd_iommu_device_info iommu_info;
67*4882a593Smuzhiyun 	unsigned int pasid_limit;
68*4882a593Smuzhiyun 	int err;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	if (!kfd->use_iommu_v2)
71*4882a593Smuzhiyun 		return 0;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	iommu_info.flags = 0;
74*4882a593Smuzhiyun 	err = amd_iommu_device_info(kfd->pdev, &iommu_info);
75*4882a593Smuzhiyun 	if (err < 0) {
76*4882a593Smuzhiyun 		dev_err(kfd_device,
77*4882a593Smuzhiyun 			"error getting iommu info. is the iommu enabled?\n");
78*4882a593Smuzhiyun 		return -ENODEV;
79*4882a593Smuzhiyun 	}
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags) {
82*4882a593Smuzhiyun 		dev_err(kfd_device,
83*4882a593Smuzhiyun 			"error required iommu flags ats %i, pri %i, pasid %i\n",
84*4882a593Smuzhiyun 		       (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP) != 0,
85*4882a593Smuzhiyun 		       (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) != 0,
86*4882a593Smuzhiyun 		       (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP)
87*4882a593Smuzhiyun 									!= 0);
88*4882a593Smuzhiyun 		return -ENODEV;
89*4882a593Smuzhiyun 	}
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	pasid_limit = min_t(unsigned int,
92*4882a593Smuzhiyun 			(unsigned int)(1 << kfd->device_info->max_pasid_bits),
93*4882a593Smuzhiyun 			iommu_info.max_pasids);
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	if (!kfd_set_pasid_limit(pasid_limit)) {
96*4882a593Smuzhiyun 		dev_err(kfd_device, "error setting pasid limit\n");
97*4882a593Smuzhiyun 		return -EBUSY;
98*4882a593Smuzhiyun 	}
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	return 0;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun /** kfd_iommu_bind_process_to_device - Have the IOMMU bind a process
104*4882a593Smuzhiyun  *
105*4882a593Smuzhiyun  * Binds the given process to the given device using its PASID. This
106*4882a593Smuzhiyun  * enables IOMMUv2 address translation for the process on the device.
107*4882a593Smuzhiyun  *
108*4882a593Smuzhiyun  * This function assumes that the process mutex is held.
109*4882a593Smuzhiyun  */
kfd_iommu_bind_process_to_device(struct kfd_process_device * pdd)110*4882a593Smuzhiyun int kfd_iommu_bind_process_to_device(struct kfd_process_device *pdd)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun 	struct kfd_dev *dev = pdd->dev;
113*4882a593Smuzhiyun 	struct kfd_process *p = pdd->process;
114*4882a593Smuzhiyun 	int err;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	if (!dev->use_iommu_v2 || pdd->bound == PDD_BOUND)
117*4882a593Smuzhiyun 		return 0;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	if (unlikely(pdd->bound == PDD_BOUND_SUSPENDED)) {
120*4882a593Smuzhiyun 		pr_err("Binding PDD_BOUND_SUSPENDED pdd is unexpected!\n");
121*4882a593Smuzhiyun 		return -EINVAL;
122*4882a593Smuzhiyun 	}
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	err = amd_iommu_bind_pasid(dev->pdev, p->pasid, p->lead_thread);
125*4882a593Smuzhiyun 	if (!err)
126*4882a593Smuzhiyun 		pdd->bound = PDD_BOUND;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	return err;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun /** kfd_iommu_unbind_process - Unbind process from all devices
132*4882a593Smuzhiyun  *
133*4882a593Smuzhiyun  * This removes all IOMMU device bindings of the process. To be used
134*4882a593Smuzhiyun  * before process termination.
135*4882a593Smuzhiyun  */
kfd_iommu_unbind_process(struct kfd_process * p)136*4882a593Smuzhiyun void kfd_iommu_unbind_process(struct kfd_process *p)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun 	struct kfd_process_device *pdd;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	list_for_each_entry(pdd, &p->per_device_data, per_device_list)
141*4882a593Smuzhiyun 		if (pdd->bound == PDD_BOUND)
142*4882a593Smuzhiyun 			amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid);
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun /* Callback for process shutdown invoked by the IOMMU driver */
iommu_pasid_shutdown_callback(struct pci_dev * pdev,u32 pasid)146*4882a593Smuzhiyun static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, u32 pasid)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun 	struct kfd_dev *dev = kfd_device_by_pci_dev(pdev);
149*4882a593Smuzhiyun 	struct kfd_process *p;
150*4882a593Smuzhiyun 	struct kfd_process_device *pdd;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	if (!dev)
153*4882a593Smuzhiyun 		return;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	/*
156*4882a593Smuzhiyun 	 * Look for the process that matches the pasid. If there is no such
157*4882a593Smuzhiyun 	 * process, we either released it in amdkfd's own notifier, or there
158*4882a593Smuzhiyun 	 * is a bug. Unfortunately, there is no way to tell...
159*4882a593Smuzhiyun 	 */
160*4882a593Smuzhiyun 	p = kfd_lookup_process_by_pasid(pasid);
161*4882a593Smuzhiyun 	if (!p)
162*4882a593Smuzhiyun 		return;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	pr_debug("Unbinding process 0x%x from IOMMU\n", pasid);
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	mutex_lock(kfd_get_dbgmgr_mutex());
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	if (dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
169*4882a593Smuzhiyun 		if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
170*4882a593Smuzhiyun 			kfd_dbgmgr_destroy(dev->dbgmgr);
171*4882a593Smuzhiyun 			dev->dbgmgr = NULL;
172*4882a593Smuzhiyun 		}
173*4882a593Smuzhiyun 	}
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	mutex_unlock(kfd_get_dbgmgr_mutex());
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	mutex_lock(&p->mutex);
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	pdd = kfd_get_process_device_data(dev, p);
180*4882a593Smuzhiyun 	if (pdd)
181*4882a593Smuzhiyun 		/* For GPU relying on IOMMU, we need to dequeue here
182*4882a593Smuzhiyun 		 * when PASID is still bound.
183*4882a593Smuzhiyun 		 */
184*4882a593Smuzhiyun 		kfd_process_dequeue_from_device(pdd);
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	mutex_unlock(&p->mutex);
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	kfd_unref_process(p);
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun /* This function called by IOMMU driver on PPR failure */
iommu_invalid_ppr_cb(struct pci_dev * pdev,u32 pasid,unsigned long address,u16 flags)192*4882a593Smuzhiyun static int iommu_invalid_ppr_cb(struct pci_dev *pdev, u32 pasid,
193*4882a593Smuzhiyun 				unsigned long address, u16 flags)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun 	struct kfd_dev *dev;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	dev_warn_ratelimited(kfd_device,
198*4882a593Smuzhiyun 			"Invalid PPR device %x:%x.%x pasid 0x%x address 0x%lX flags 0x%X",
199*4882a593Smuzhiyun 			pdev->bus->number,
200*4882a593Smuzhiyun 			PCI_SLOT(pdev->devfn),
201*4882a593Smuzhiyun 			PCI_FUNC(pdev->devfn),
202*4882a593Smuzhiyun 			pasid,
203*4882a593Smuzhiyun 			address,
204*4882a593Smuzhiyun 			flags);
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	dev = kfd_device_by_pci_dev(pdev);
207*4882a593Smuzhiyun 	if (!WARN_ON(!dev))
208*4882a593Smuzhiyun 		kfd_signal_iommu_event(dev, pasid, address,
209*4882a593Smuzhiyun 			flags & PPR_FAULT_WRITE, flags & PPR_FAULT_EXEC);
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	return AMD_IOMMU_INV_PRI_RSP_INVALID;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun /*
215*4882a593Smuzhiyun  * Bind processes do the device that have been temporarily unbound
216*4882a593Smuzhiyun  * (PDD_BOUND_SUSPENDED) in kfd_unbind_processes_from_device.
217*4882a593Smuzhiyun  */
kfd_bind_processes_to_device(struct kfd_dev * kfd)218*4882a593Smuzhiyun static int kfd_bind_processes_to_device(struct kfd_dev *kfd)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun 	struct kfd_process_device *pdd;
221*4882a593Smuzhiyun 	struct kfd_process *p;
222*4882a593Smuzhiyun 	unsigned int temp;
223*4882a593Smuzhiyun 	int err = 0;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	int idx = srcu_read_lock(&kfd_processes_srcu);
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
228*4882a593Smuzhiyun 		mutex_lock(&p->mutex);
229*4882a593Smuzhiyun 		pdd = kfd_get_process_device_data(kfd, p);
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 		if (WARN_ON(!pdd) || pdd->bound != PDD_BOUND_SUSPENDED) {
232*4882a593Smuzhiyun 			mutex_unlock(&p->mutex);
233*4882a593Smuzhiyun 			continue;
234*4882a593Smuzhiyun 		}
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 		err = amd_iommu_bind_pasid(kfd->pdev, p->pasid,
237*4882a593Smuzhiyun 				p->lead_thread);
238*4882a593Smuzhiyun 		if (err < 0) {
239*4882a593Smuzhiyun 			pr_err("Unexpected pasid 0x%x binding failure\n",
240*4882a593Smuzhiyun 					p->pasid);
241*4882a593Smuzhiyun 			mutex_unlock(&p->mutex);
242*4882a593Smuzhiyun 			break;
243*4882a593Smuzhiyun 		}
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 		pdd->bound = PDD_BOUND;
246*4882a593Smuzhiyun 		mutex_unlock(&p->mutex);
247*4882a593Smuzhiyun 	}
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	srcu_read_unlock(&kfd_processes_srcu, idx);
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	return err;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun /*
255*4882a593Smuzhiyun  * Mark currently bound processes as PDD_BOUND_SUSPENDED. These
256*4882a593Smuzhiyun  * processes will be restored to PDD_BOUND state in
257*4882a593Smuzhiyun  * kfd_bind_processes_to_device.
258*4882a593Smuzhiyun  */
kfd_unbind_processes_from_device(struct kfd_dev * kfd)259*4882a593Smuzhiyun static void kfd_unbind_processes_from_device(struct kfd_dev *kfd)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun 	struct kfd_process_device *pdd;
262*4882a593Smuzhiyun 	struct kfd_process *p;
263*4882a593Smuzhiyun 	unsigned int temp;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	int idx = srcu_read_lock(&kfd_processes_srcu);
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
268*4882a593Smuzhiyun 		mutex_lock(&p->mutex);
269*4882a593Smuzhiyun 		pdd = kfd_get_process_device_data(kfd, p);
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 		if (WARN_ON(!pdd)) {
272*4882a593Smuzhiyun 			mutex_unlock(&p->mutex);
273*4882a593Smuzhiyun 			continue;
274*4882a593Smuzhiyun 		}
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 		if (pdd->bound == PDD_BOUND)
277*4882a593Smuzhiyun 			pdd->bound = PDD_BOUND_SUSPENDED;
278*4882a593Smuzhiyun 		mutex_unlock(&p->mutex);
279*4882a593Smuzhiyun 	}
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	srcu_read_unlock(&kfd_processes_srcu, idx);
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun /** kfd_iommu_suspend - Prepare IOMMU for suspend
285*4882a593Smuzhiyun  *
286*4882a593Smuzhiyun  * This unbinds processes from the device and disables the IOMMU for
287*4882a593Smuzhiyun  * the device.
288*4882a593Smuzhiyun  */
kfd_iommu_suspend(struct kfd_dev * kfd)289*4882a593Smuzhiyun void kfd_iommu_suspend(struct kfd_dev *kfd)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun 	if (!kfd->use_iommu_v2)
292*4882a593Smuzhiyun 		return;
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	kfd_unbind_processes_from_device(kfd);
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL);
297*4882a593Smuzhiyun 	amd_iommu_set_invalid_ppr_cb(kfd->pdev, NULL);
298*4882a593Smuzhiyun 	amd_iommu_free_device(kfd->pdev);
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun /** kfd_iommu_resume - Restore IOMMU after resume
302*4882a593Smuzhiyun  *
303*4882a593Smuzhiyun  * This reinitializes the IOMMU for the device and re-binds previously
304*4882a593Smuzhiyun  * suspended processes to the device.
305*4882a593Smuzhiyun  */
kfd_iommu_resume(struct kfd_dev * kfd)306*4882a593Smuzhiyun int kfd_iommu_resume(struct kfd_dev *kfd)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun 	unsigned int pasid_limit;
309*4882a593Smuzhiyun 	int err;
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	if (!kfd->use_iommu_v2)
312*4882a593Smuzhiyun 		return 0;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	pasid_limit = kfd_get_pasid_limit();
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	err = amd_iommu_init_device(kfd->pdev, pasid_limit);
317*4882a593Smuzhiyun 	if (err)
318*4882a593Smuzhiyun 		return -ENXIO;
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
321*4882a593Smuzhiyun 					iommu_pasid_shutdown_callback);
322*4882a593Smuzhiyun 	amd_iommu_set_invalid_ppr_cb(kfd->pdev,
323*4882a593Smuzhiyun 				     iommu_invalid_ppr_cb);
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	err = kfd_bind_processes_to_device(kfd);
326*4882a593Smuzhiyun 	if (err) {
327*4882a593Smuzhiyun 		amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL);
328*4882a593Smuzhiyun 		amd_iommu_set_invalid_ppr_cb(kfd->pdev, NULL);
329*4882a593Smuzhiyun 		amd_iommu_free_device(kfd->pdev);
330*4882a593Smuzhiyun 		return err;
331*4882a593Smuzhiyun 	}
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	return 0;
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun extern bool amd_iommu_pc_supported(void);
337*4882a593Smuzhiyun extern u8 amd_iommu_pc_get_max_banks(u16 devid);
338*4882a593Smuzhiyun extern u8 amd_iommu_pc_get_max_counters(u16 devid);
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun /** kfd_iommu_add_perf_counters - Add IOMMU performance counters to topology
341*4882a593Smuzhiyun  */
kfd_iommu_add_perf_counters(struct kfd_topology_device * kdev)342*4882a593Smuzhiyun int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun 	struct kfd_perf_properties *props;
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	if (!(kdev->node_props.capability & HSA_CAP_ATS_PRESENT))
347*4882a593Smuzhiyun 		return 0;
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	if (!amd_iommu_pc_supported())
350*4882a593Smuzhiyun 		return 0;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	props = kfd_alloc_struct(props);
353*4882a593Smuzhiyun 	if (!props)
354*4882a593Smuzhiyun 		return -ENOMEM;
355*4882a593Smuzhiyun 	strcpy(props->block_name, "iommu");
356*4882a593Smuzhiyun 	props->max_concurrent = amd_iommu_pc_get_max_banks(0) *
357*4882a593Smuzhiyun 		amd_iommu_pc_get_max_counters(0); /* assume one iommu */
358*4882a593Smuzhiyun 	list_add_tail(&props->list, &kdev->perf_props);
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	return 0;
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun #endif
364