xref: /OK3568_Linux_fs/kernel/arch/powerpc/kvm/book3s_64_vio.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
5*4882a593Smuzhiyun  * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
6*4882a593Smuzhiyun  * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/types.h>
10*4882a593Smuzhiyun #include <linux/string.h>
11*4882a593Smuzhiyun #include <linux/kvm.h>
12*4882a593Smuzhiyun #include <linux/kvm_host.h>
13*4882a593Smuzhiyun #include <linux/highmem.h>
14*4882a593Smuzhiyun #include <linux/gfp.h>
15*4882a593Smuzhiyun #include <linux/slab.h>
16*4882a593Smuzhiyun #include <linux/sched/signal.h>
17*4882a593Smuzhiyun #include <linux/hugetlb.h>
18*4882a593Smuzhiyun #include <linux/list.h>
19*4882a593Smuzhiyun #include <linux/anon_inodes.h>
20*4882a593Smuzhiyun #include <linux/iommu.h>
21*4882a593Smuzhiyun #include <linux/file.h>
22*4882a593Smuzhiyun #include <linux/mm.h>
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #include <asm/kvm_ppc.h>
25*4882a593Smuzhiyun #include <asm/kvm_book3s.h>
26*4882a593Smuzhiyun #include <asm/book3s/64/mmu-hash.h>
27*4882a593Smuzhiyun #include <asm/hvcall.h>
28*4882a593Smuzhiyun #include <asm/synch.h>
29*4882a593Smuzhiyun #include <asm/ppc-opcode.h>
30*4882a593Smuzhiyun #include <asm/udbg.h>
31*4882a593Smuzhiyun #include <asm/iommu.h>
32*4882a593Smuzhiyun #include <asm/tce.h>
33*4882a593Smuzhiyun #include <asm/mmu_context.h>
34*4882a593Smuzhiyun 
kvmppc_tce_pages(unsigned long iommu_pages)35*4882a593Smuzhiyun static unsigned long kvmppc_tce_pages(unsigned long iommu_pages)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun 	return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun 
kvmppc_stt_pages(unsigned long tce_pages)40*4882a593Smuzhiyun static unsigned long kvmppc_stt_pages(unsigned long tce_pages)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun 	unsigned long stt_bytes = sizeof(struct kvmppc_spapr_tce_table) +
43*4882a593Smuzhiyun 			(tce_pages * sizeof(struct page *));
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	return tce_pages + ALIGN(stt_bytes, PAGE_SIZE) / PAGE_SIZE;
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun 
kvm_spapr_tce_iommu_table_free(struct rcu_head * head)48*4882a593Smuzhiyun static void kvm_spapr_tce_iommu_table_free(struct rcu_head *head)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun 	struct kvmppc_spapr_tce_iommu_table *stit = container_of(head,
51*4882a593Smuzhiyun 			struct kvmppc_spapr_tce_iommu_table, rcu);
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	iommu_tce_table_put(stit->tbl);
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	kfree(stit);
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun 
kvm_spapr_tce_liobn_put(struct kref * kref)58*4882a593Smuzhiyun static void kvm_spapr_tce_liobn_put(struct kref *kref)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	struct kvmppc_spapr_tce_iommu_table *stit = container_of(kref,
61*4882a593Smuzhiyun 			struct kvmppc_spapr_tce_iommu_table, kref);
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	list_del_rcu(&stit->next);
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	call_rcu(&stit->rcu, kvm_spapr_tce_iommu_table_free);
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun 
kvm_spapr_tce_release_iommu_group(struct kvm * kvm,struct iommu_group * grp)68*4882a593Smuzhiyun extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
69*4882a593Smuzhiyun 		struct iommu_group *grp)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun 	int i;
72*4882a593Smuzhiyun 	struct kvmppc_spapr_tce_table *stt;
73*4882a593Smuzhiyun 	struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
74*4882a593Smuzhiyun 	struct iommu_table_group *table_group = NULL;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	rcu_read_lock();
77*4882a593Smuzhiyun 	list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 		table_group = iommu_group_get_iommudata(grp);
80*4882a593Smuzhiyun 		if (WARN_ON(!table_group))
81*4882a593Smuzhiyun 			continue;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 		list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
84*4882a593Smuzhiyun 			for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
85*4882a593Smuzhiyun 				if (table_group->tables[i] != stit->tbl)
86*4882a593Smuzhiyun 					continue;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 				kref_put(&stit->kref, kvm_spapr_tce_liobn_put);
89*4882a593Smuzhiyun 			}
90*4882a593Smuzhiyun 		}
91*4882a593Smuzhiyun 		cond_resched_rcu();
92*4882a593Smuzhiyun 	}
93*4882a593Smuzhiyun 	rcu_read_unlock();
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun 
kvm_spapr_tce_attach_iommu_group(struct kvm * kvm,int tablefd,struct iommu_group * grp)96*4882a593Smuzhiyun extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
97*4882a593Smuzhiyun 		struct iommu_group *grp)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun 	struct kvmppc_spapr_tce_table *stt = NULL;
100*4882a593Smuzhiyun 	bool found = false;
101*4882a593Smuzhiyun 	struct iommu_table *tbl = NULL;
102*4882a593Smuzhiyun 	struct iommu_table_group *table_group;
103*4882a593Smuzhiyun 	long i;
104*4882a593Smuzhiyun 	struct kvmppc_spapr_tce_iommu_table *stit;
105*4882a593Smuzhiyun 	struct fd f;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	f = fdget(tablefd);
108*4882a593Smuzhiyun 	if (!f.file)
109*4882a593Smuzhiyun 		return -EBADF;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	rcu_read_lock();
112*4882a593Smuzhiyun 	list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
113*4882a593Smuzhiyun 		if (stt == f.file->private_data) {
114*4882a593Smuzhiyun 			found = true;
115*4882a593Smuzhiyun 			break;
116*4882a593Smuzhiyun 		}
117*4882a593Smuzhiyun 	}
118*4882a593Smuzhiyun 	rcu_read_unlock();
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	fdput(f);
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	if (!found)
123*4882a593Smuzhiyun 		return -EINVAL;
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	table_group = iommu_group_get_iommudata(grp);
126*4882a593Smuzhiyun 	if (WARN_ON(!table_group))
127*4882a593Smuzhiyun 		return -EFAULT;
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
130*4882a593Smuzhiyun 		struct iommu_table *tbltmp = table_group->tables[i];
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 		if (!tbltmp)
133*4882a593Smuzhiyun 			continue;
134*4882a593Smuzhiyun 		/* Make sure hardware table parameters are compatible */
135*4882a593Smuzhiyun 		if ((tbltmp->it_page_shift <= stt->page_shift) &&
136*4882a593Smuzhiyun 				(tbltmp->it_offset << tbltmp->it_page_shift ==
137*4882a593Smuzhiyun 				 stt->offset << stt->page_shift) &&
138*4882a593Smuzhiyun 				(tbltmp->it_size << tbltmp->it_page_shift >=
139*4882a593Smuzhiyun 				 stt->size << stt->page_shift)) {
140*4882a593Smuzhiyun 			/*
141*4882a593Smuzhiyun 			 * Reference the table to avoid races with
142*4882a593Smuzhiyun 			 * add/remove DMA windows.
143*4882a593Smuzhiyun 			 */
144*4882a593Smuzhiyun 			tbl = iommu_tce_table_get(tbltmp);
145*4882a593Smuzhiyun 			break;
146*4882a593Smuzhiyun 		}
147*4882a593Smuzhiyun 	}
148*4882a593Smuzhiyun 	if (!tbl)
149*4882a593Smuzhiyun 		return -EINVAL;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	rcu_read_lock();
152*4882a593Smuzhiyun 	list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
153*4882a593Smuzhiyun 		if (tbl != stit->tbl)
154*4882a593Smuzhiyun 			continue;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 		if (!kref_get_unless_zero(&stit->kref)) {
157*4882a593Smuzhiyun 			/* stit is being destroyed */
158*4882a593Smuzhiyun 			iommu_tce_table_put(tbl);
159*4882a593Smuzhiyun 			rcu_read_unlock();
160*4882a593Smuzhiyun 			return -ENOTTY;
161*4882a593Smuzhiyun 		}
162*4882a593Smuzhiyun 		/*
163*4882a593Smuzhiyun 		 * The table is already known to this KVM, we just increased
164*4882a593Smuzhiyun 		 * its KVM reference counter and can return.
165*4882a593Smuzhiyun 		 */
166*4882a593Smuzhiyun 		rcu_read_unlock();
167*4882a593Smuzhiyun 		return 0;
168*4882a593Smuzhiyun 	}
169*4882a593Smuzhiyun 	rcu_read_unlock();
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	stit = kzalloc(sizeof(*stit), GFP_KERNEL);
172*4882a593Smuzhiyun 	if (!stit) {
173*4882a593Smuzhiyun 		iommu_tce_table_put(tbl);
174*4882a593Smuzhiyun 		return -ENOMEM;
175*4882a593Smuzhiyun 	}
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	stit->tbl = tbl;
178*4882a593Smuzhiyun 	kref_init(&stit->kref);
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	list_add_rcu(&stit->next, &stt->iommu_tables);
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	return 0;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun 
release_spapr_tce_table(struct rcu_head * head)185*4882a593Smuzhiyun static void release_spapr_tce_table(struct rcu_head *head)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun 	struct kvmppc_spapr_tce_table *stt = container_of(head,
188*4882a593Smuzhiyun 			struct kvmppc_spapr_tce_table, rcu);
189*4882a593Smuzhiyun 	unsigned long i, npages = kvmppc_tce_pages(stt->size);
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	for (i = 0; i < npages; i++)
192*4882a593Smuzhiyun 		if (stt->pages[i])
193*4882a593Smuzhiyun 			__free_page(stt->pages[i]);
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	kfree(stt);
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun 
kvm_spapr_get_tce_page(struct kvmppc_spapr_tce_table * stt,unsigned long sttpage)198*4882a593Smuzhiyun static struct page *kvm_spapr_get_tce_page(struct kvmppc_spapr_tce_table *stt,
199*4882a593Smuzhiyun 		unsigned long sttpage)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun 	struct page *page = stt->pages[sttpage];
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	if (page)
204*4882a593Smuzhiyun 		return page;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	mutex_lock(&stt->alloc_lock);
207*4882a593Smuzhiyun 	page = stt->pages[sttpage];
208*4882a593Smuzhiyun 	if (!page) {
209*4882a593Smuzhiyun 		page = alloc_page(GFP_KERNEL | __GFP_ZERO);
210*4882a593Smuzhiyun 		WARN_ON_ONCE(!page);
211*4882a593Smuzhiyun 		if (page)
212*4882a593Smuzhiyun 			stt->pages[sttpage] = page;
213*4882a593Smuzhiyun 	}
214*4882a593Smuzhiyun 	mutex_unlock(&stt->alloc_lock);
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	return page;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun 
kvm_spapr_tce_fault(struct vm_fault * vmf)219*4882a593Smuzhiyun static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun 	struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
222*4882a593Smuzhiyun 	struct page *page;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
225*4882a593Smuzhiyun 		return VM_FAULT_SIGBUS;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	page = kvm_spapr_get_tce_page(stt, vmf->pgoff);
228*4882a593Smuzhiyun 	if (!page)
229*4882a593Smuzhiyun 		return VM_FAULT_OOM;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	get_page(page);
232*4882a593Smuzhiyun 	vmf->page = page;
233*4882a593Smuzhiyun 	return 0;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
237*4882a593Smuzhiyun 	.fault = kvm_spapr_tce_fault,
238*4882a593Smuzhiyun };
239*4882a593Smuzhiyun 
kvm_spapr_tce_mmap(struct file * file,struct vm_area_struct * vma)240*4882a593Smuzhiyun static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun 	vma->vm_ops = &kvm_spapr_tce_vm_ops;
243*4882a593Smuzhiyun 	return 0;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun 
kvm_spapr_tce_release(struct inode * inode,struct file * filp)246*4882a593Smuzhiyun static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun 	struct kvmppc_spapr_tce_table *stt = filp->private_data;
249*4882a593Smuzhiyun 	struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
250*4882a593Smuzhiyun 	struct kvm *kvm = stt->kvm;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	mutex_lock(&kvm->lock);
253*4882a593Smuzhiyun 	list_del_rcu(&stt->list);
254*4882a593Smuzhiyun 	mutex_unlock(&kvm->lock);
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
257*4882a593Smuzhiyun 		WARN_ON(!kref_read(&stit->kref));
258*4882a593Smuzhiyun 		while (1) {
259*4882a593Smuzhiyun 			if (kref_put(&stit->kref, kvm_spapr_tce_liobn_put))
260*4882a593Smuzhiyun 				break;
261*4882a593Smuzhiyun 		}
262*4882a593Smuzhiyun 	}
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	account_locked_vm(kvm->mm,
265*4882a593Smuzhiyun 		kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	kvm_put_kvm(stt->kvm);
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	call_rcu(&stt->rcu, release_spapr_tce_table);
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	return 0;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun static const struct file_operations kvm_spapr_tce_fops = {
275*4882a593Smuzhiyun 	.mmap           = kvm_spapr_tce_mmap,
276*4882a593Smuzhiyun 	.release	= kvm_spapr_tce_release,
277*4882a593Smuzhiyun };
278*4882a593Smuzhiyun 
kvm_vm_ioctl_create_spapr_tce(struct kvm * kvm,struct kvm_create_spapr_tce_64 * args)279*4882a593Smuzhiyun long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
280*4882a593Smuzhiyun 				   struct kvm_create_spapr_tce_64 *args)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun 	struct kvmppc_spapr_tce_table *stt = NULL;
283*4882a593Smuzhiyun 	struct kvmppc_spapr_tce_table *siter;
284*4882a593Smuzhiyun 	struct mm_struct *mm = kvm->mm;
285*4882a593Smuzhiyun 	unsigned long npages, size = args->size;
286*4882a593Smuzhiyun 	int ret;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	if (!args->size || args->page_shift < 12 || args->page_shift > 34 ||
289*4882a593Smuzhiyun 		(args->offset + args->size > (ULLONG_MAX >> args->page_shift)))
290*4882a593Smuzhiyun 		return -EINVAL;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	npages = kvmppc_tce_pages(size);
293*4882a593Smuzhiyun 	ret = account_locked_vm(mm, kvmppc_stt_pages(npages), true);
294*4882a593Smuzhiyun 	if (ret)
295*4882a593Smuzhiyun 		return ret;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	ret = -ENOMEM;
298*4882a593Smuzhiyun 	stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
299*4882a593Smuzhiyun 		      GFP_KERNEL);
300*4882a593Smuzhiyun 	if (!stt)
301*4882a593Smuzhiyun 		goto fail_acct;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	stt->liobn = args->liobn;
304*4882a593Smuzhiyun 	stt->page_shift = args->page_shift;
305*4882a593Smuzhiyun 	stt->offset = args->offset;
306*4882a593Smuzhiyun 	stt->size = size;
307*4882a593Smuzhiyun 	stt->kvm = kvm;
308*4882a593Smuzhiyun 	mutex_init(&stt->alloc_lock);
309*4882a593Smuzhiyun 	INIT_LIST_HEAD_RCU(&stt->iommu_tables);
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	mutex_lock(&kvm->lock);
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	/* Check this LIOBN hasn't been previously allocated */
314*4882a593Smuzhiyun 	ret = 0;
315*4882a593Smuzhiyun 	list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
316*4882a593Smuzhiyun 		if (siter->liobn == args->liobn) {
317*4882a593Smuzhiyun 			ret = -EBUSY;
318*4882a593Smuzhiyun 			break;
319*4882a593Smuzhiyun 		}
320*4882a593Smuzhiyun 	}
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	kvm_get_kvm(kvm);
323*4882a593Smuzhiyun 	if (!ret)
324*4882a593Smuzhiyun 		ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
325*4882a593Smuzhiyun 				       stt, O_RDWR | O_CLOEXEC);
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	if (ret >= 0)
328*4882a593Smuzhiyun 		list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
329*4882a593Smuzhiyun 	else
330*4882a593Smuzhiyun 		kvm_put_kvm_no_destroy(kvm);
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	mutex_unlock(&kvm->lock);
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	if (ret >= 0)
335*4882a593Smuzhiyun 		return ret;
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	kfree(stt);
338*4882a593Smuzhiyun  fail_acct:
339*4882a593Smuzhiyun 	account_locked_vm(mm, kvmppc_stt_pages(npages), false);
340*4882a593Smuzhiyun 	return ret;
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun 
kvmppc_tce_to_ua(struct kvm * kvm,unsigned long tce,unsigned long * ua)343*4882a593Smuzhiyun static long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
344*4882a593Smuzhiyun 		unsigned long *ua)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun 	unsigned long gfn = tce >> PAGE_SHIFT;
347*4882a593Smuzhiyun 	struct kvm_memory_slot *memslot;
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	memslot = search_memslots(kvm_memslots(kvm), gfn);
350*4882a593Smuzhiyun 	if (!memslot)
351*4882a593Smuzhiyun 		return -EINVAL;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	*ua = __gfn_to_hva_memslot(memslot, gfn) |
354*4882a593Smuzhiyun 		(tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	return 0;
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun 
kvmppc_tce_validate(struct kvmppc_spapr_tce_table * stt,unsigned long tce)359*4882a593Smuzhiyun static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
360*4882a593Smuzhiyun 		unsigned long tce)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun 	unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
363*4882a593Smuzhiyun 	enum dma_data_direction dir = iommu_tce_direction(tce);
364*4882a593Smuzhiyun 	struct kvmppc_spapr_tce_iommu_table *stit;
365*4882a593Smuzhiyun 	unsigned long ua = 0;
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	/* Allow userspace to poison TCE table */
368*4882a593Smuzhiyun 	if (dir == DMA_NONE)
369*4882a593Smuzhiyun 		return H_SUCCESS;
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	if (iommu_tce_check_gpa(stt->page_shift, gpa))
372*4882a593Smuzhiyun 		return H_TOO_HARD;
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	if (kvmppc_tce_to_ua(stt->kvm, tce, &ua))
375*4882a593Smuzhiyun 		return H_TOO_HARD;
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	rcu_read_lock();
378*4882a593Smuzhiyun 	list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
379*4882a593Smuzhiyun 		unsigned long hpa = 0;
380*4882a593Smuzhiyun 		struct mm_iommu_table_group_mem_t *mem;
381*4882a593Smuzhiyun 		long shift = stit->tbl->it_page_shift;
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 		mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift);
384*4882a593Smuzhiyun 		if (!mem || mm_iommu_ua_to_hpa(mem, ua, shift, &hpa)) {
385*4882a593Smuzhiyun 			rcu_read_unlock();
386*4882a593Smuzhiyun 			return H_TOO_HARD;
387*4882a593Smuzhiyun 		}
388*4882a593Smuzhiyun 	}
389*4882a593Smuzhiyun 	rcu_read_unlock();
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	return H_SUCCESS;
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun /*
395*4882a593Smuzhiyun  * Handles TCE requests for emulated devices.
396*4882a593Smuzhiyun  * Puts guest TCE values to the table and expects user space to convert them.
397*4882a593Smuzhiyun  * Cannot fail so kvmppc_tce_validate must be called before it.
398*4882a593Smuzhiyun  */
kvmppc_tce_put(struct kvmppc_spapr_tce_table * stt,unsigned long idx,unsigned long tce)399*4882a593Smuzhiyun static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
400*4882a593Smuzhiyun 		unsigned long idx, unsigned long tce)
401*4882a593Smuzhiyun {
402*4882a593Smuzhiyun 	struct page *page;
403*4882a593Smuzhiyun 	u64 *tbl;
404*4882a593Smuzhiyun 	unsigned long sttpage;
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	idx -= stt->offset;
407*4882a593Smuzhiyun 	sttpage = idx / TCES_PER_PAGE;
408*4882a593Smuzhiyun 	page = stt->pages[sttpage];
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	if (!page) {
411*4882a593Smuzhiyun 		/* We allow any TCE, not just with read|write permissions */
412*4882a593Smuzhiyun 		if (!tce)
413*4882a593Smuzhiyun 			return;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 		page = kvm_spapr_get_tce_page(stt, sttpage);
416*4882a593Smuzhiyun 		if (!page)
417*4882a593Smuzhiyun 			return;
418*4882a593Smuzhiyun 	}
419*4882a593Smuzhiyun 	tbl = page_to_virt(page);
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	tbl[idx % TCES_PER_PAGE] = tce;
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun 
kvmppc_clear_tce(struct mm_struct * mm,struct kvmppc_spapr_tce_table * stt,struct iommu_table * tbl,unsigned long entry)424*4882a593Smuzhiyun static void kvmppc_clear_tce(struct mm_struct *mm, struct kvmppc_spapr_tce_table *stt,
425*4882a593Smuzhiyun 		struct iommu_table *tbl, unsigned long entry)
426*4882a593Smuzhiyun {
427*4882a593Smuzhiyun 	unsigned long i;
428*4882a593Smuzhiyun 	unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
429*4882a593Smuzhiyun 	unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	for (i = 0; i < subpages; ++i) {
432*4882a593Smuzhiyun 		unsigned long hpa = 0;
433*4882a593Smuzhiyun 		enum dma_data_direction dir = DMA_NONE;
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 		iommu_tce_xchg_no_kill(mm, tbl, io_entry + i, &hpa, &dir);
436*4882a593Smuzhiyun 	}
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun 
kvmppc_tce_iommu_mapped_dec(struct kvm * kvm,struct iommu_table * tbl,unsigned long entry)439*4882a593Smuzhiyun static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
440*4882a593Smuzhiyun 		struct iommu_table *tbl, unsigned long entry)
441*4882a593Smuzhiyun {
442*4882a593Smuzhiyun 	struct mm_iommu_table_group_mem_t *mem = NULL;
443*4882a593Smuzhiyun 	const unsigned long pgsize = 1ULL << tbl->it_page_shift;
444*4882a593Smuzhiyun 	__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	if (!pua)
447*4882a593Smuzhiyun 		return H_SUCCESS;
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize);
450*4882a593Smuzhiyun 	if (!mem)
451*4882a593Smuzhiyun 		return H_TOO_HARD;
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	mm_iommu_mapped_dec(mem);
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	*pua = cpu_to_be64(0);
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 	return H_SUCCESS;
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun 
kvmppc_tce_iommu_do_unmap(struct kvm * kvm,struct iommu_table * tbl,unsigned long entry)460*4882a593Smuzhiyun static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
461*4882a593Smuzhiyun 		struct iommu_table *tbl, unsigned long entry)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun 	enum dma_data_direction dir = DMA_NONE;
464*4882a593Smuzhiyun 	unsigned long hpa = 0;
465*4882a593Smuzhiyun 	long ret;
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	if (WARN_ON_ONCE(iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa,
468*4882a593Smuzhiyun 					&dir)))
469*4882a593Smuzhiyun 		return H_TOO_HARD;
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	if (dir == DMA_NONE)
472*4882a593Smuzhiyun 		return H_SUCCESS;
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
475*4882a593Smuzhiyun 	if (ret != H_SUCCESS)
476*4882a593Smuzhiyun 		iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	return ret;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun 
kvmppc_tce_iommu_unmap(struct kvm * kvm,struct kvmppc_spapr_tce_table * stt,struct iommu_table * tbl,unsigned long entry)481*4882a593Smuzhiyun static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
482*4882a593Smuzhiyun 		struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
483*4882a593Smuzhiyun 		unsigned long entry)
484*4882a593Smuzhiyun {
485*4882a593Smuzhiyun 	unsigned long i, ret = H_SUCCESS;
486*4882a593Smuzhiyun 	unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
487*4882a593Smuzhiyun 	unsigned long io_entry = entry * subpages;
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	for (i = 0; i < subpages; ++i) {
490*4882a593Smuzhiyun 		ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
491*4882a593Smuzhiyun 		if (ret != H_SUCCESS)
492*4882a593Smuzhiyun 			break;
493*4882a593Smuzhiyun 	}
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	iommu_tce_kill(tbl, io_entry, subpages);
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	return ret;
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun 
kvmppc_tce_iommu_do_map(struct kvm * kvm,struct iommu_table * tbl,unsigned long entry,unsigned long ua,enum dma_data_direction dir)500*4882a593Smuzhiyun static long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
501*4882a593Smuzhiyun 		unsigned long entry, unsigned long ua,
502*4882a593Smuzhiyun 		enum dma_data_direction dir)
503*4882a593Smuzhiyun {
504*4882a593Smuzhiyun 	long ret;
505*4882a593Smuzhiyun 	unsigned long hpa;
506*4882a593Smuzhiyun 	__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
507*4882a593Smuzhiyun 	struct mm_iommu_table_group_mem_t *mem;
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	if (!pua)
510*4882a593Smuzhiyun 		/* it_userspace allocation might be delayed */
511*4882a593Smuzhiyun 		return H_TOO_HARD;
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	mem = mm_iommu_lookup(kvm->mm, ua, 1ULL << tbl->it_page_shift);
514*4882a593Smuzhiyun 	if (!mem)
515*4882a593Smuzhiyun 		/* This only handles v2 IOMMU type, v1 is handled via ioctl() */
516*4882a593Smuzhiyun 		return H_TOO_HARD;
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
519*4882a593Smuzhiyun 		return H_TOO_HARD;
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	if (mm_iommu_mapped_inc(mem))
522*4882a593Smuzhiyun 		return H_TOO_HARD;
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	ret = iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
525*4882a593Smuzhiyun 	if (WARN_ON_ONCE(ret)) {
526*4882a593Smuzhiyun 		mm_iommu_mapped_dec(mem);
527*4882a593Smuzhiyun 		return H_TOO_HARD;
528*4882a593Smuzhiyun 	}
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	if (dir != DMA_NONE)
531*4882a593Smuzhiyun 		kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	*pua = cpu_to_be64(ua);
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	return 0;
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun 
kvmppc_tce_iommu_map(struct kvm * kvm,struct kvmppc_spapr_tce_table * stt,struct iommu_table * tbl,unsigned long entry,unsigned long ua,enum dma_data_direction dir)538*4882a593Smuzhiyun static long kvmppc_tce_iommu_map(struct kvm *kvm,
539*4882a593Smuzhiyun 		struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
540*4882a593Smuzhiyun 		unsigned long entry, unsigned long ua,
541*4882a593Smuzhiyun 		enum dma_data_direction dir)
542*4882a593Smuzhiyun {
543*4882a593Smuzhiyun 	unsigned long i, pgoff, ret = H_SUCCESS;
544*4882a593Smuzhiyun 	unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
545*4882a593Smuzhiyun 	unsigned long io_entry = entry * subpages;
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	for (i = 0, pgoff = 0; i < subpages;
548*4882a593Smuzhiyun 			++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 		ret = kvmppc_tce_iommu_do_map(kvm, tbl,
551*4882a593Smuzhiyun 				io_entry + i, ua + pgoff, dir);
552*4882a593Smuzhiyun 		if (ret != H_SUCCESS)
553*4882a593Smuzhiyun 			break;
554*4882a593Smuzhiyun 	}
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	iommu_tce_kill(tbl, io_entry, subpages);
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	return ret;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun 
kvmppc_h_put_tce(struct kvm_vcpu * vcpu,unsigned long liobn,unsigned long ioba,unsigned long tce)561*4882a593Smuzhiyun long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
562*4882a593Smuzhiyun 		      unsigned long ioba, unsigned long tce)
563*4882a593Smuzhiyun {
564*4882a593Smuzhiyun 	struct kvmppc_spapr_tce_table *stt;
565*4882a593Smuzhiyun 	long ret, idx;
566*4882a593Smuzhiyun 	struct kvmppc_spapr_tce_iommu_table *stit;
567*4882a593Smuzhiyun 	unsigned long entry, ua = 0;
568*4882a593Smuzhiyun 	enum dma_data_direction dir;
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	/* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
571*4882a593Smuzhiyun 	/* 	    liobn, ioba, tce); */
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	stt = kvmppc_find_table(vcpu->kvm, liobn);
574*4882a593Smuzhiyun 	if (!stt)
575*4882a593Smuzhiyun 		return H_TOO_HARD;
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	ret = kvmppc_ioba_validate(stt, ioba, 1);
578*4882a593Smuzhiyun 	if (ret != H_SUCCESS)
579*4882a593Smuzhiyun 		return ret;
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	idx = srcu_read_lock(&vcpu->kvm->srcu);
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	ret = kvmppc_tce_validate(stt, tce);
584*4882a593Smuzhiyun 	if (ret != H_SUCCESS)
585*4882a593Smuzhiyun 		goto unlock_exit;
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	dir = iommu_tce_direction(tce);
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 	if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
590*4882a593Smuzhiyun 		ret = H_PARAMETER;
591*4882a593Smuzhiyun 		goto unlock_exit;
592*4882a593Smuzhiyun 	}
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	entry = ioba >> stt->page_shift;
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
597*4882a593Smuzhiyun 		if (dir == DMA_NONE)
598*4882a593Smuzhiyun 			ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
599*4882a593Smuzhiyun 					stit->tbl, entry);
600*4882a593Smuzhiyun 		else
601*4882a593Smuzhiyun 			ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
602*4882a593Smuzhiyun 					entry, ua, dir);
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 		if (ret != H_SUCCESS) {
606*4882a593Smuzhiyun 			kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry);
607*4882a593Smuzhiyun 			goto unlock_exit;
608*4882a593Smuzhiyun 		}
609*4882a593Smuzhiyun 	}
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	kvmppc_tce_put(stt, entry, tce);
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun unlock_exit:
614*4882a593Smuzhiyun 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	return ret;
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
619*4882a593Smuzhiyun 
kvmppc_h_put_tce_indirect(struct kvm_vcpu * vcpu,unsigned long liobn,unsigned long ioba,unsigned long tce_list,unsigned long npages)620*4882a593Smuzhiyun long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
621*4882a593Smuzhiyun 		unsigned long liobn, unsigned long ioba,
622*4882a593Smuzhiyun 		unsigned long tce_list, unsigned long npages)
623*4882a593Smuzhiyun {
624*4882a593Smuzhiyun 	struct kvmppc_spapr_tce_table *stt;
625*4882a593Smuzhiyun 	long i, ret = H_SUCCESS, idx;
626*4882a593Smuzhiyun 	unsigned long entry, ua = 0;
627*4882a593Smuzhiyun 	u64 __user *tces;
628*4882a593Smuzhiyun 	u64 tce;
629*4882a593Smuzhiyun 	struct kvmppc_spapr_tce_iommu_table *stit;
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 	stt = kvmppc_find_table(vcpu->kvm, liobn);
632*4882a593Smuzhiyun 	if (!stt)
633*4882a593Smuzhiyun 		return H_TOO_HARD;
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 	entry = ioba >> stt->page_shift;
636*4882a593Smuzhiyun 	/*
637*4882a593Smuzhiyun 	 * SPAPR spec says that the maximum size of the list is 512 TCEs
638*4882a593Smuzhiyun 	 * so the whole table fits in 4K page
639*4882a593Smuzhiyun 	 */
640*4882a593Smuzhiyun 	if (npages > 512)
641*4882a593Smuzhiyun 		return H_PARAMETER;
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	if (tce_list & (SZ_4K - 1))
644*4882a593Smuzhiyun 		return H_PARAMETER;
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun 	ret = kvmppc_ioba_validate(stt, ioba, npages);
647*4882a593Smuzhiyun 	if (ret != H_SUCCESS)
648*4882a593Smuzhiyun 		return ret;
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 	idx = srcu_read_lock(&vcpu->kvm->srcu);
651*4882a593Smuzhiyun 	if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua)) {
652*4882a593Smuzhiyun 		ret = H_TOO_HARD;
653*4882a593Smuzhiyun 		goto unlock_exit;
654*4882a593Smuzhiyun 	}
655*4882a593Smuzhiyun 	tces = (u64 __user *) ua;
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 	for (i = 0; i < npages; ++i) {
658*4882a593Smuzhiyun 		if (get_user(tce, tces + i)) {
659*4882a593Smuzhiyun 			ret = H_TOO_HARD;
660*4882a593Smuzhiyun 			goto unlock_exit;
661*4882a593Smuzhiyun 		}
662*4882a593Smuzhiyun 		tce = be64_to_cpu(tce);
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 		ret = kvmppc_tce_validate(stt, tce);
665*4882a593Smuzhiyun 		if (ret != H_SUCCESS)
666*4882a593Smuzhiyun 			goto unlock_exit;
667*4882a593Smuzhiyun 	}
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 	for (i = 0; i < npages; ++i) {
670*4882a593Smuzhiyun 		/*
671*4882a593Smuzhiyun 		 * This looks unsafe, because we validate, then regrab
672*4882a593Smuzhiyun 		 * the TCE from userspace which could have been changed by
673*4882a593Smuzhiyun 		 * another thread.
674*4882a593Smuzhiyun 		 *
675*4882a593Smuzhiyun 		 * But it actually is safe, because the relevant checks will be
676*4882a593Smuzhiyun 		 * re-executed in the following code.  If userspace tries to
677*4882a593Smuzhiyun 		 * change this dodgily it will result in a messier failure mode
678*4882a593Smuzhiyun 		 * but won't threaten the host.
679*4882a593Smuzhiyun 		 */
680*4882a593Smuzhiyun 		if (get_user(tce, tces + i)) {
681*4882a593Smuzhiyun 			ret = H_TOO_HARD;
682*4882a593Smuzhiyun 			goto unlock_exit;
683*4882a593Smuzhiyun 		}
684*4882a593Smuzhiyun 		tce = be64_to_cpu(tce);
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 		if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
687*4882a593Smuzhiyun 			ret = H_PARAMETER;
688*4882a593Smuzhiyun 			goto unlock_exit;
689*4882a593Smuzhiyun 		}
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 		list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
692*4882a593Smuzhiyun 			ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
693*4882a593Smuzhiyun 					stit->tbl, entry + i, ua,
694*4882a593Smuzhiyun 					iommu_tce_direction(tce));
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 			if (ret != H_SUCCESS) {
697*4882a593Smuzhiyun 				kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl,
698*4882a593Smuzhiyun 						 entry + i);
699*4882a593Smuzhiyun 				goto unlock_exit;
700*4882a593Smuzhiyun 			}
701*4882a593Smuzhiyun 		}
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 		kvmppc_tce_put(stt, entry + i, tce);
704*4882a593Smuzhiyun 	}
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun unlock_exit:
707*4882a593Smuzhiyun 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	return ret;
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect);
712*4882a593Smuzhiyun 
kvmppc_h_stuff_tce(struct kvm_vcpu * vcpu,unsigned long liobn,unsigned long ioba,unsigned long tce_value,unsigned long npages)713*4882a593Smuzhiyun long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
714*4882a593Smuzhiyun 		unsigned long liobn, unsigned long ioba,
715*4882a593Smuzhiyun 		unsigned long tce_value, unsigned long npages)
716*4882a593Smuzhiyun {
717*4882a593Smuzhiyun 	struct kvmppc_spapr_tce_table *stt;
718*4882a593Smuzhiyun 	long i, ret;
719*4882a593Smuzhiyun 	struct kvmppc_spapr_tce_iommu_table *stit;
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 	stt = kvmppc_find_table(vcpu->kvm, liobn);
722*4882a593Smuzhiyun 	if (!stt)
723*4882a593Smuzhiyun 		return H_TOO_HARD;
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 	ret = kvmppc_ioba_validate(stt, ioba, npages);
726*4882a593Smuzhiyun 	if (ret != H_SUCCESS)
727*4882a593Smuzhiyun 		return ret;
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 	/* Check permission bits only to allow userspace poison TCE for debug */
730*4882a593Smuzhiyun 	if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
731*4882a593Smuzhiyun 		return H_PARAMETER;
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun 	list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
734*4882a593Smuzhiyun 		unsigned long entry = ioba >> stt->page_shift;
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun 		for (i = 0; i < npages; ++i) {
737*4882a593Smuzhiyun 			ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
738*4882a593Smuzhiyun 					stit->tbl, entry + i);
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 			if (ret == H_SUCCESS)
741*4882a593Smuzhiyun 				continue;
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 			if (ret == H_TOO_HARD)
744*4882a593Smuzhiyun 				return ret;
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 			WARN_ON_ONCE(1);
747*4882a593Smuzhiyun 			kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry + i);
748*4882a593Smuzhiyun 		}
749*4882a593Smuzhiyun 	}
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 	for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
752*4882a593Smuzhiyun 		kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 	return ret;
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);
757