xref: /OK3568_Linux_fs/kernel/drivers/vfio/pci/vfio_pci_nvlink2.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * VFIO PCI NVIDIA Whitherspoon GPU support a.k.a. NVLink2.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2018 IBM Corp.  All rights reserved.
6*4882a593Smuzhiyun  *     Author: Alexey Kardashevskiy <aik@ozlabs.ru>
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Register an on-GPU RAM region for cacheable access.
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * Derived from original vfio_pci_igd.c:
11*4882a593Smuzhiyun  * Copyright (C) 2016 Red Hat, Inc.  All rights reserved.
12*4882a593Smuzhiyun  *	Author: Alex Williamson <alex.williamson@redhat.com>
13*4882a593Smuzhiyun  */
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <linux/io.h>
16*4882a593Smuzhiyun #include <linux/pci.h>
17*4882a593Smuzhiyun #include <linux/uaccess.h>
18*4882a593Smuzhiyun #include <linux/vfio.h>
19*4882a593Smuzhiyun #include <linux/sched/mm.h>
20*4882a593Smuzhiyun #include <linux/mmu_context.h>
21*4882a593Smuzhiyun #include <asm/kvm_ppc.h>
22*4882a593Smuzhiyun #include "vfio_pci_private.h"
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #define CREATE_TRACE_POINTS
25*4882a593Smuzhiyun #include "trace.h"
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun EXPORT_TRACEPOINT_SYMBOL_GPL(vfio_pci_nvgpu_mmap_fault);
28*4882a593Smuzhiyun EXPORT_TRACEPOINT_SYMBOL_GPL(vfio_pci_nvgpu_mmap);
29*4882a593Smuzhiyun EXPORT_TRACEPOINT_SYMBOL_GPL(vfio_pci_npu2_mmap);
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun struct vfio_pci_nvgpu_data {
32*4882a593Smuzhiyun 	unsigned long gpu_hpa; /* GPU RAM physical address */
33*4882a593Smuzhiyun 	unsigned long gpu_tgt; /* TGT address of corresponding GPU RAM */
34*4882a593Smuzhiyun 	unsigned long useraddr; /* GPU RAM userspace address */
35*4882a593Smuzhiyun 	unsigned long size; /* Size of the GPU RAM window (usually 128GB) */
36*4882a593Smuzhiyun 	struct mm_struct *mm;
37*4882a593Smuzhiyun 	struct mm_iommu_table_group_mem_t *mem; /* Pre-registered RAM descr. */
38*4882a593Smuzhiyun 	struct pci_dev *gpdev;
39*4882a593Smuzhiyun 	struct notifier_block group_notifier;
40*4882a593Smuzhiyun };
41*4882a593Smuzhiyun 
vfio_pci_nvgpu_rw(struct vfio_pci_device * vdev,char __user * buf,size_t count,loff_t * ppos,bool iswrite)42*4882a593Smuzhiyun static size_t vfio_pci_nvgpu_rw(struct vfio_pci_device *vdev,
43*4882a593Smuzhiyun 		char __user *buf, size_t count, loff_t *ppos, bool iswrite)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun 	unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
46*4882a593Smuzhiyun 	struct vfio_pci_nvgpu_data *data = vdev->region[i].data;
47*4882a593Smuzhiyun 	loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
48*4882a593Smuzhiyun 	loff_t posaligned = pos & PAGE_MASK, posoff = pos & ~PAGE_MASK;
49*4882a593Smuzhiyun 	size_t sizealigned;
50*4882a593Smuzhiyun 	void __iomem *ptr;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	if (pos >= vdev->region[i].size)
53*4882a593Smuzhiyun 		return -EINVAL;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	count = min(count, (size_t)(vdev->region[i].size - pos));
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	/*
58*4882a593Smuzhiyun 	 * We map only a bit of GPU RAM for a short time instead of mapping it
59*4882a593Smuzhiyun 	 * for the guest lifetime as:
60*4882a593Smuzhiyun 	 *
61*4882a593Smuzhiyun 	 * 1) we do not know GPU RAM size, only aperture which is 4-8 times
62*4882a593Smuzhiyun 	 *    bigger than actual RAM size (16/32GB RAM vs. 128GB aperture);
63*4882a593Smuzhiyun 	 * 2) mapping GPU RAM allows CPU to prefetch and if this happens
64*4882a593Smuzhiyun 	 *    before NVLink bridge is reset (which fences GPU RAM),
65*4882a593Smuzhiyun 	 *    hardware management interrupts (HMI) might happen, this
66*4882a593Smuzhiyun 	 *    will freeze NVLink bridge.
67*4882a593Smuzhiyun 	 *
68*4882a593Smuzhiyun 	 * This is not fast path anyway.
69*4882a593Smuzhiyun 	 */
70*4882a593Smuzhiyun 	sizealigned = ALIGN(posoff + count, PAGE_SIZE);
71*4882a593Smuzhiyun 	ptr = ioremap_cache(data->gpu_hpa + posaligned, sizealigned);
72*4882a593Smuzhiyun 	if (!ptr)
73*4882a593Smuzhiyun 		return -EFAULT;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	if (iswrite) {
76*4882a593Smuzhiyun 		if (copy_from_user(ptr + posoff, buf, count))
77*4882a593Smuzhiyun 			count = -EFAULT;
78*4882a593Smuzhiyun 		else
79*4882a593Smuzhiyun 			*ppos += count;
80*4882a593Smuzhiyun 	} else {
81*4882a593Smuzhiyun 		if (copy_to_user(buf, ptr + posoff, count))
82*4882a593Smuzhiyun 			count = -EFAULT;
83*4882a593Smuzhiyun 		else
84*4882a593Smuzhiyun 			*ppos += count;
85*4882a593Smuzhiyun 	}
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	iounmap(ptr);
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	return count;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun 
vfio_pci_nvgpu_release(struct vfio_pci_device * vdev,struct vfio_pci_region * region)92*4882a593Smuzhiyun static void vfio_pci_nvgpu_release(struct vfio_pci_device *vdev,
93*4882a593Smuzhiyun 		struct vfio_pci_region *region)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun 	struct vfio_pci_nvgpu_data *data = region->data;
96*4882a593Smuzhiyun 	long ret;
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	/* If there were any mappings at all... */
99*4882a593Smuzhiyun 	if (data->mm) {
100*4882a593Smuzhiyun 		if (data->mem) {
101*4882a593Smuzhiyun 			ret = mm_iommu_put(data->mm, data->mem);
102*4882a593Smuzhiyun 			WARN_ON(ret);
103*4882a593Smuzhiyun 		}
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 		mmdrop(data->mm);
106*4882a593Smuzhiyun 	}
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	vfio_unregister_notifier(&data->gpdev->dev, VFIO_GROUP_NOTIFY,
109*4882a593Smuzhiyun 			&data->group_notifier);
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	pnv_npu2_unmap_lpar_dev(data->gpdev);
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	kfree(data);
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun 
vfio_pci_nvgpu_mmap_fault(struct vm_fault * vmf)116*4882a593Smuzhiyun static vm_fault_t vfio_pci_nvgpu_mmap_fault(struct vm_fault *vmf)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	vm_fault_t ret;
119*4882a593Smuzhiyun 	struct vm_area_struct *vma = vmf->vma;
120*4882a593Smuzhiyun 	struct vfio_pci_region *region = vma->vm_private_data;
121*4882a593Smuzhiyun 	struct vfio_pci_nvgpu_data *data = region->data;
122*4882a593Smuzhiyun 	unsigned long vmf_off = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
123*4882a593Smuzhiyun 	unsigned long nv2pg = data->gpu_hpa >> PAGE_SHIFT;
124*4882a593Smuzhiyun 	unsigned long vm_pgoff = vma->vm_pgoff &
125*4882a593Smuzhiyun 		((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
126*4882a593Smuzhiyun 	unsigned long pfn = nv2pg + vm_pgoff + vmf_off;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	ret = vmf_insert_pfn(vma, vmf->address, pfn);
129*4882a593Smuzhiyun 	trace_vfio_pci_nvgpu_mmap_fault(data->gpdev, pfn << PAGE_SHIFT,
130*4882a593Smuzhiyun 			vmf->address, ret);
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	return ret;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun static const struct vm_operations_struct vfio_pci_nvgpu_mmap_vmops = {
136*4882a593Smuzhiyun 	.fault = vfio_pci_nvgpu_mmap_fault,
137*4882a593Smuzhiyun };
138*4882a593Smuzhiyun 
vfio_pci_nvgpu_mmap(struct vfio_pci_device * vdev,struct vfio_pci_region * region,struct vm_area_struct * vma)139*4882a593Smuzhiyun static int vfio_pci_nvgpu_mmap(struct vfio_pci_device *vdev,
140*4882a593Smuzhiyun 		struct vfio_pci_region *region, struct vm_area_struct *vma)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun 	int ret;
143*4882a593Smuzhiyun 	struct vfio_pci_nvgpu_data *data = region->data;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	if (data->useraddr)
146*4882a593Smuzhiyun 		return -EPERM;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	if (vma->vm_end - vma->vm_start > data->size)
149*4882a593Smuzhiyun 		return -EINVAL;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	vma->vm_private_data = region;
152*4882a593Smuzhiyun 	vma->vm_flags |= VM_PFNMAP;
153*4882a593Smuzhiyun 	vma->vm_ops = &vfio_pci_nvgpu_mmap_vmops;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	/*
156*4882a593Smuzhiyun 	 * Calling mm_iommu_newdev() here once as the region is not
157*4882a593Smuzhiyun 	 * registered yet and therefore right initialization will happen now.
158*4882a593Smuzhiyun 	 * Other places will use mm_iommu_find() which returns
159*4882a593Smuzhiyun 	 * registered @mem and does not go gup().
160*4882a593Smuzhiyun 	 */
161*4882a593Smuzhiyun 	data->useraddr = vma->vm_start;
162*4882a593Smuzhiyun 	data->mm = current->mm;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	mmgrab(data->mm);
165*4882a593Smuzhiyun 	ret = (int) mm_iommu_newdev(data->mm, data->useraddr,
166*4882a593Smuzhiyun 			vma_pages(vma), data->gpu_hpa, &data->mem);
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	trace_vfio_pci_nvgpu_mmap(vdev->pdev, data->gpu_hpa, data->useraddr,
169*4882a593Smuzhiyun 			vma->vm_end - vma->vm_start, ret);
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	return ret;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun 
vfio_pci_nvgpu_add_capability(struct vfio_pci_device * vdev,struct vfio_pci_region * region,struct vfio_info_cap * caps)174*4882a593Smuzhiyun static int vfio_pci_nvgpu_add_capability(struct vfio_pci_device *vdev,
175*4882a593Smuzhiyun 		struct vfio_pci_region *region, struct vfio_info_cap *caps)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun 	struct vfio_pci_nvgpu_data *data = region->data;
178*4882a593Smuzhiyun 	struct vfio_region_info_cap_nvlink2_ssatgt cap = {
179*4882a593Smuzhiyun 		.header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT,
180*4882a593Smuzhiyun 		.header.version = 1,
181*4882a593Smuzhiyun 		.tgt = data->gpu_tgt
182*4882a593Smuzhiyun 	};
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	return vfio_info_add_capability(caps, &cap.header, sizeof(cap));
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun static const struct vfio_pci_regops vfio_pci_nvgpu_regops = {
188*4882a593Smuzhiyun 	.rw = vfio_pci_nvgpu_rw,
189*4882a593Smuzhiyun 	.release = vfio_pci_nvgpu_release,
190*4882a593Smuzhiyun 	.mmap = vfio_pci_nvgpu_mmap,
191*4882a593Smuzhiyun 	.add_capability = vfio_pci_nvgpu_add_capability,
192*4882a593Smuzhiyun };
193*4882a593Smuzhiyun 
vfio_pci_nvgpu_group_notifier(struct notifier_block * nb,unsigned long action,void * opaque)194*4882a593Smuzhiyun static int vfio_pci_nvgpu_group_notifier(struct notifier_block *nb,
195*4882a593Smuzhiyun 		unsigned long action, void *opaque)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun 	struct kvm *kvm = opaque;
198*4882a593Smuzhiyun 	struct vfio_pci_nvgpu_data *data = container_of(nb,
199*4882a593Smuzhiyun 			struct vfio_pci_nvgpu_data,
200*4882a593Smuzhiyun 			group_notifier);
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	if (action == VFIO_GROUP_NOTIFY_SET_KVM && kvm &&
203*4882a593Smuzhiyun 			pnv_npu2_map_lpar_dev(data->gpdev,
204*4882a593Smuzhiyun 				kvm->arch.lpid, MSR_DR | MSR_PR))
205*4882a593Smuzhiyun 		return NOTIFY_BAD;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	return NOTIFY_OK;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun 
vfio_pci_nvdia_v100_nvlink2_init(struct vfio_pci_device * vdev)210*4882a593Smuzhiyun int vfio_pci_nvdia_v100_nvlink2_init(struct vfio_pci_device *vdev)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun 	int ret;
213*4882a593Smuzhiyun 	u64 reg[2];
214*4882a593Smuzhiyun 	u64 tgt = 0;
215*4882a593Smuzhiyun 	struct device_node *npu_node, *mem_node;
216*4882a593Smuzhiyun 	struct pci_dev *npu_dev;
217*4882a593Smuzhiyun 	struct vfio_pci_nvgpu_data *data;
218*4882a593Smuzhiyun 	uint32_t mem_phandle = 0;
219*4882a593Smuzhiyun 	unsigned long events = VFIO_GROUP_NOTIFY_SET_KVM;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	/*
222*4882a593Smuzhiyun 	 * PCI config space does not tell us about NVLink presense but
223*4882a593Smuzhiyun 	 * platform does, use this.
224*4882a593Smuzhiyun 	 */
225*4882a593Smuzhiyun 	npu_dev = pnv_pci_get_npu_dev(vdev->pdev, 0);
226*4882a593Smuzhiyun 	if (!npu_dev)
227*4882a593Smuzhiyun 		return -ENODEV;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	npu_node = pci_device_to_OF_node(npu_dev);
230*4882a593Smuzhiyun 	if (!npu_node)
231*4882a593Smuzhiyun 		return -EINVAL;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	if (of_property_read_u32(npu_node, "memory-region", &mem_phandle))
234*4882a593Smuzhiyun 		return -ENODEV;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	mem_node = of_find_node_by_phandle(mem_phandle);
237*4882a593Smuzhiyun 	if (!mem_node)
238*4882a593Smuzhiyun 		return -EINVAL;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	if (of_property_read_variable_u64_array(mem_node, "reg", reg,
241*4882a593Smuzhiyun 				ARRAY_SIZE(reg), ARRAY_SIZE(reg)) !=
242*4882a593Smuzhiyun 			ARRAY_SIZE(reg))
243*4882a593Smuzhiyun 		return -EINVAL;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	if (of_property_read_u64(npu_node, "ibm,device-tgt-addr", &tgt)) {
246*4882a593Smuzhiyun 		dev_warn(&vdev->pdev->dev, "No ibm,device-tgt-addr found\n");
247*4882a593Smuzhiyun 		return -EFAULT;
248*4882a593Smuzhiyun 	}
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	data = kzalloc(sizeof(*data), GFP_KERNEL);
251*4882a593Smuzhiyun 	if (!data)
252*4882a593Smuzhiyun 		return -ENOMEM;
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	data->gpu_hpa = reg[0];
255*4882a593Smuzhiyun 	data->gpu_tgt = tgt;
256*4882a593Smuzhiyun 	data->size = reg[1];
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	dev_dbg(&vdev->pdev->dev, "%lx..%lx\n", data->gpu_hpa,
259*4882a593Smuzhiyun 			data->gpu_hpa + data->size - 1);
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	data->gpdev = vdev->pdev;
262*4882a593Smuzhiyun 	data->group_notifier.notifier_call = vfio_pci_nvgpu_group_notifier;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	ret = vfio_register_notifier(&data->gpdev->dev, VFIO_GROUP_NOTIFY,
265*4882a593Smuzhiyun 			&events, &data->group_notifier);
266*4882a593Smuzhiyun 	if (ret)
267*4882a593Smuzhiyun 		goto free_exit;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	/*
270*4882a593Smuzhiyun 	 * We have just set KVM, we do not need the listener anymore.
271*4882a593Smuzhiyun 	 * Also, keeping it registered means that if more than one GPU is
272*4882a593Smuzhiyun 	 * assigned, we will get several similar notifiers notifying about
273*4882a593Smuzhiyun 	 * the same device again which does not help with anything.
274*4882a593Smuzhiyun 	 */
275*4882a593Smuzhiyun 	vfio_unregister_notifier(&data->gpdev->dev, VFIO_GROUP_NOTIFY,
276*4882a593Smuzhiyun 			&data->group_notifier);
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	ret = vfio_pci_register_dev_region(vdev,
279*4882a593Smuzhiyun 			PCI_VENDOR_ID_NVIDIA | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
280*4882a593Smuzhiyun 			VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM,
281*4882a593Smuzhiyun 			&vfio_pci_nvgpu_regops,
282*4882a593Smuzhiyun 			data->size,
283*4882a593Smuzhiyun 			VFIO_REGION_INFO_FLAG_READ |
284*4882a593Smuzhiyun 			VFIO_REGION_INFO_FLAG_WRITE |
285*4882a593Smuzhiyun 			VFIO_REGION_INFO_FLAG_MMAP,
286*4882a593Smuzhiyun 			data);
287*4882a593Smuzhiyun 	if (ret)
288*4882a593Smuzhiyun 		goto free_exit;
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	return 0;
291*4882a593Smuzhiyun free_exit:
292*4882a593Smuzhiyun 	kfree(data);
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	return ret;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun /*
298*4882a593Smuzhiyun  * IBM NPU2 bridge
299*4882a593Smuzhiyun  */
300*4882a593Smuzhiyun struct vfio_pci_npu2_data {
301*4882a593Smuzhiyun 	void *base; /* ATSD register virtual address, for emulated access */
302*4882a593Smuzhiyun 	unsigned long mmio_atsd; /* ATSD physical address */
303*4882a593Smuzhiyun 	unsigned long gpu_tgt; /* TGT address of corresponding GPU RAM */
304*4882a593Smuzhiyun 	unsigned int link_speed; /* The link speed from DT's ibm,nvlink-speed */
305*4882a593Smuzhiyun };
306*4882a593Smuzhiyun 
vfio_pci_npu2_rw(struct vfio_pci_device * vdev,char __user * buf,size_t count,loff_t * ppos,bool iswrite)307*4882a593Smuzhiyun static size_t vfio_pci_npu2_rw(struct vfio_pci_device *vdev,
308*4882a593Smuzhiyun 		char __user *buf, size_t count, loff_t *ppos, bool iswrite)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun 	unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
311*4882a593Smuzhiyun 	struct vfio_pci_npu2_data *data = vdev->region[i].data;
312*4882a593Smuzhiyun 	loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	if (pos >= vdev->region[i].size)
315*4882a593Smuzhiyun 		return -EINVAL;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	count = min(count, (size_t)(vdev->region[i].size - pos));
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	if (iswrite) {
320*4882a593Smuzhiyun 		if (copy_from_user(data->base + pos, buf, count))
321*4882a593Smuzhiyun 			return -EFAULT;
322*4882a593Smuzhiyun 	} else {
323*4882a593Smuzhiyun 		if (copy_to_user(buf, data->base + pos, count))
324*4882a593Smuzhiyun 			return -EFAULT;
325*4882a593Smuzhiyun 	}
326*4882a593Smuzhiyun 	*ppos += count;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	return count;
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun 
vfio_pci_npu2_mmap(struct vfio_pci_device * vdev,struct vfio_pci_region * region,struct vm_area_struct * vma)331*4882a593Smuzhiyun static int vfio_pci_npu2_mmap(struct vfio_pci_device *vdev,
332*4882a593Smuzhiyun 		struct vfio_pci_region *region, struct vm_area_struct *vma)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun 	int ret;
335*4882a593Smuzhiyun 	struct vfio_pci_npu2_data *data = region->data;
336*4882a593Smuzhiyun 	unsigned long req_len = vma->vm_end - vma->vm_start;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	if (req_len != PAGE_SIZE)
339*4882a593Smuzhiyun 		return -EINVAL;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	vma->vm_flags |= VM_PFNMAP;
342*4882a593Smuzhiyun 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	ret = remap_pfn_range(vma, vma->vm_start, data->mmio_atsd >> PAGE_SHIFT,
345*4882a593Smuzhiyun 			req_len, vma->vm_page_prot);
346*4882a593Smuzhiyun 	trace_vfio_pci_npu2_mmap(vdev->pdev, data->mmio_atsd, vma->vm_start,
347*4882a593Smuzhiyun 			vma->vm_end - vma->vm_start, ret);
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	return ret;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun 
vfio_pci_npu2_release(struct vfio_pci_device * vdev,struct vfio_pci_region * region)352*4882a593Smuzhiyun static void vfio_pci_npu2_release(struct vfio_pci_device *vdev,
353*4882a593Smuzhiyun 		struct vfio_pci_region *region)
354*4882a593Smuzhiyun {
355*4882a593Smuzhiyun 	struct vfio_pci_npu2_data *data = region->data;
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	memunmap(data->base);
358*4882a593Smuzhiyun 	kfree(data);
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun 
vfio_pci_npu2_add_capability(struct vfio_pci_device * vdev,struct vfio_pci_region * region,struct vfio_info_cap * caps)361*4882a593Smuzhiyun static int vfio_pci_npu2_add_capability(struct vfio_pci_device *vdev,
362*4882a593Smuzhiyun 		struct vfio_pci_region *region, struct vfio_info_cap *caps)
363*4882a593Smuzhiyun {
364*4882a593Smuzhiyun 	struct vfio_pci_npu2_data *data = region->data;
365*4882a593Smuzhiyun 	struct vfio_region_info_cap_nvlink2_ssatgt captgt = {
366*4882a593Smuzhiyun 		.header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT,
367*4882a593Smuzhiyun 		.header.version = 1,
368*4882a593Smuzhiyun 		.tgt = data->gpu_tgt
369*4882a593Smuzhiyun 	};
370*4882a593Smuzhiyun 	struct vfio_region_info_cap_nvlink2_lnkspd capspd = {
371*4882a593Smuzhiyun 		.header.id = VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD,
372*4882a593Smuzhiyun 		.header.version = 1,
373*4882a593Smuzhiyun 		.link_speed = data->link_speed
374*4882a593Smuzhiyun 	};
375*4882a593Smuzhiyun 	int ret;
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	ret = vfio_info_add_capability(caps, &captgt.header, sizeof(captgt));
378*4882a593Smuzhiyun 	if (ret)
379*4882a593Smuzhiyun 		return ret;
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	return vfio_info_add_capability(caps, &capspd.header, sizeof(capspd));
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun static const struct vfio_pci_regops vfio_pci_npu2_regops = {
385*4882a593Smuzhiyun 	.rw = vfio_pci_npu2_rw,
386*4882a593Smuzhiyun 	.mmap = vfio_pci_npu2_mmap,
387*4882a593Smuzhiyun 	.release = vfio_pci_npu2_release,
388*4882a593Smuzhiyun 	.add_capability = vfio_pci_npu2_add_capability,
389*4882a593Smuzhiyun };
390*4882a593Smuzhiyun 
vfio_pci_ibm_npu2_init(struct vfio_pci_device * vdev)391*4882a593Smuzhiyun int vfio_pci_ibm_npu2_init(struct vfio_pci_device *vdev)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun 	int ret;
394*4882a593Smuzhiyun 	struct vfio_pci_npu2_data *data;
395*4882a593Smuzhiyun 	struct device_node *nvlink_dn;
396*4882a593Smuzhiyun 	u32 nvlink_index = 0, mem_phandle = 0;
397*4882a593Smuzhiyun 	struct pci_dev *npdev = vdev->pdev;
398*4882a593Smuzhiyun 	struct device_node *npu_node = pci_device_to_OF_node(npdev);
399*4882a593Smuzhiyun 	struct pci_controller *hose = pci_bus_to_host(npdev->bus);
400*4882a593Smuzhiyun 	u64 mmio_atsd = 0;
401*4882a593Smuzhiyun 	u64 tgt = 0;
402*4882a593Smuzhiyun 	u32 link_speed = 0xff;
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	/*
405*4882a593Smuzhiyun 	 * PCI config space does not tell us about NVLink presense but
406*4882a593Smuzhiyun 	 * platform does, use this.
407*4882a593Smuzhiyun 	 */
408*4882a593Smuzhiyun 	if (!pnv_pci_get_gpu_dev(vdev->pdev))
409*4882a593Smuzhiyun 		return -ENODEV;
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	if (of_property_read_u32(npu_node, "memory-region", &mem_phandle))
412*4882a593Smuzhiyun 		return -ENODEV;
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	/*
415*4882a593Smuzhiyun 	 * NPU2 normally has 8 ATSD registers (for concurrency) and 6 links
416*4882a593Smuzhiyun 	 * so we can allocate one register per link, using nvlink index as
417*4882a593Smuzhiyun 	 * a key.
418*4882a593Smuzhiyun 	 * There is always at least one ATSD register so as long as at least
419*4882a593Smuzhiyun 	 * NVLink bridge #0 is passed to the guest, ATSD will be available.
420*4882a593Smuzhiyun 	 */
421*4882a593Smuzhiyun 	nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
422*4882a593Smuzhiyun 	if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
423*4882a593Smuzhiyun 			&nvlink_index)))
424*4882a593Smuzhiyun 		return -ENODEV;
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	if (of_property_read_u64_index(hose->dn, "ibm,mmio-atsd", nvlink_index,
427*4882a593Smuzhiyun 			&mmio_atsd)) {
428*4882a593Smuzhiyun 		if (of_property_read_u64_index(hose->dn, "ibm,mmio-atsd", 0,
429*4882a593Smuzhiyun 				&mmio_atsd)) {
430*4882a593Smuzhiyun 			dev_warn(&vdev->pdev->dev, "No available ATSD found\n");
431*4882a593Smuzhiyun 			mmio_atsd = 0;
432*4882a593Smuzhiyun 		} else {
433*4882a593Smuzhiyun 			dev_warn(&vdev->pdev->dev,
434*4882a593Smuzhiyun 				 "Using fallback ibm,mmio-atsd[0] for ATSD.\n");
435*4882a593Smuzhiyun 		}
436*4882a593Smuzhiyun 	}
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	if (of_property_read_u64(npu_node, "ibm,device-tgt-addr", &tgt)) {
439*4882a593Smuzhiyun 		dev_warn(&vdev->pdev->dev, "No ibm,device-tgt-addr found\n");
440*4882a593Smuzhiyun 		return -EFAULT;
441*4882a593Smuzhiyun 	}
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	if (of_property_read_u32(npu_node, "ibm,nvlink-speed", &link_speed)) {
444*4882a593Smuzhiyun 		dev_warn(&vdev->pdev->dev, "No ibm,nvlink-speed found\n");
445*4882a593Smuzhiyun 		return -EFAULT;
446*4882a593Smuzhiyun 	}
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	data = kzalloc(sizeof(*data), GFP_KERNEL);
449*4882a593Smuzhiyun 	if (!data)
450*4882a593Smuzhiyun 		return -ENOMEM;
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	data->mmio_atsd = mmio_atsd;
453*4882a593Smuzhiyun 	data->gpu_tgt = tgt;
454*4882a593Smuzhiyun 	data->link_speed = link_speed;
455*4882a593Smuzhiyun 	if (data->mmio_atsd) {
456*4882a593Smuzhiyun 		data->base = memremap(data->mmio_atsd, SZ_64K, MEMREMAP_WT);
457*4882a593Smuzhiyun 		if (!data->base) {
458*4882a593Smuzhiyun 			ret = -ENOMEM;
459*4882a593Smuzhiyun 			goto free_exit;
460*4882a593Smuzhiyun 		}
461*4882a593Smuzhiyun 	}
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	/*
464*4882a593Smuzhiyun 	 * We want to expose the capability even if this specific NVLink
465*4882a593Smuzhiyun 	 * did not get its own ATSD register because capabilities
466*4882a593Smuzhiyun 	 * belong to VFIO regions and normally there will be ATSD register
467*4882a593Smuzhiyun 	 * assigned to the NVLink bridge.
468*4882a593Smuzhiyun 	 */
469*4882a593Smuzhiyun 	ret = vfio_pci_register_dev_region(vdev,
470*4882a593Smuzhiyun 			PCI_VENDOR_ID_IBM |
471*4882a593Smuzhiyun 			VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
472*4882a593Smuzhiyun 			VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD,
473*4882a593Smuzhiyun 			&vfio_pci_npu2_regops,
474*4882a593Smuzhiyun 			data->mmio_atsd ? PAGE_SIZE : 0,
475*4882a593Smuzhiyun 			VFIO_REGION_INFO_FLAG_READ |
476*4882a593Smuzhiyun 			VFIO_REGION_INFO_FLAG_WRITE |
477*4882a593Smuzhiyun 			VFIO_REGION_INFO_FLAG_MMAP,
478*4882a593Smuzhiyun 			data);
479*4882a593Smuzhiyun 	if (ret)
480*4882a593Smuzhiyun 		goto free_exit;
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	return 0;
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun free_exit:
485*4882a593Smuzhiyun 	if (data->base)
486*4882a593Smuzhiyun 		memunmap(data->base);
487*4882a593Smuzhiyun 	kfree(data);
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	return ret;
490*4882a593Smuzhiyun }
491