xref: /OK3568_Linux_fs/kernel/arch/powerpc/kvm/book3s_64_vio_hv.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
5*4882a593Smuzhiyun  * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
6*4882a593Smuzhiyun  * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/types.h>
10*4882a593Smuzhiyun #include <linux/string.h>
11*4882a593Smuzhiyun #include <linux/kvm.h>
12*4882a593Smuzhiyun #include <linux/kvm_host.h>
13*4882a593Smuzhiyun #include <linux/highmem.h>
14*4882a593Smuzhiyun #include <linux/gfp.h>
15*4882a593Smuzhiyun #include <linux/slab.h>
16*4882a593Smuzhiyun #include <linux/hugetlb.h>
17*4882a593Smuzhiyun #include <linux/list.h>
18*4882a593Smuzhiyun #include <linux/stringify.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #include <asm/kvm_ppc.h>
21*4882a593Smuzhiyun #include <asm/kvm_book3s.h>
22*4882a593Smuzhiyun #include <asm/book3s/64/mmu-hash.h>
23*4882a593Smuzhiyun #include <asm/mmu_context.h>
24*4882a593Smuzhiyun #include <asm/hvcall.h>
25*4882a593Smuzhiyun #include <asm/synch.h>
26*4882a593Smuzhiyun #include <asm/ppc-opcode.h>
27*4882a593Smuzhiyun #include <asm/udbg.h>
28*4882a593Smuzhiyun #include <asm/iommu.h>
29*4882a593Smuzhiyun #include <asm/tce.h>
30*4882a593Smuzhiyun #include <asm/pte-walk.h>
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #ifdef CONFIG_BUG
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #define WARN_ON_ONCE_RM(condition)	({			\
35*4882a593Smuzhiyun 	static bool __section(".data.unlikely") __warned;	\
36*4882a593Smuzhiyun 	int __ret_warn_once = !!(condition);			\
37*4882a593Smuzhiyun 								\
38*4882a593Smuzhiyun 	if (unlikely(__ret_warn_once && !__warned)) {		\
39*4882a593Smuzhiyun 		__warned = true;				\
40*4882a593Smuzhiyun 		pr_err("WARN_ON_ONCE_RM: (%s) at %s:%u\n",	\
41*4882a593Smuzhiyun 				__stringify(condition),		\
42*4882a593Smuzhiyun 				__func__, __LINE__);		\
43*4882a593Smuzhiyun 		dump_stack();					\
44*4882a593Smuzhiyun 	}							\
45*4882a593Smuzhiyun 	unlikely(__ret_warn_once);				\
46*4882a593Smuzhiyun })
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #else
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun #define WARN_ON_ONCE_RM(condition) ({				\
51*4882a593Smuzhiyun 	int __ret_warn_on = !!(condition);			\
52*4882a593Smuzhiyun 	unlikely(__ret_warn_on);				\
53*4882a593Smuzhiyun })
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun #endif
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun /*
58*4882a593Smuzhiyun  * Finds a TCE table descriptor by LIOBN.
59*4882a593Smuzhiyun  *
60*4882a593Smuzhiyun  * WARNING: This will be called in real or virtual mode on HV KVM and virtual
61*4882a593Smuzhiyun  *          mode on PR KVM
62*4882a593Smuzhiyun  */
kvmppc_find_table(struct kvm * kvm,unsigned long liobn)63*4882a593Smuzhiyun struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
64*4882a593Smuzhiyun 		unsigned long liobn)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	struct kvmppc_spapr_tce_table *stt;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
69*4882a593Smuzhiyun 		if (stt->liobn == liobn)
70*4882a593Smuzhiyun 			return stt;
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	return NULL;
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvmppc_find_table);
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
kvmppc_rm_tce_to_ua(struct kvm * kvm,unsigned long tce,unsigned long * ua)77*4882a593Smuzhiyun static long kvmppc_rm_tce_to_ua(struct kvm *kvm,
78*4882a593Smuzhiyun 				unsigned long tce, unsigned long *ua)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun 	unsigned long gfn = tce >> PAGE_SHIFT;
81*4882a593Smuzhiyun 	struct kvm_memory_slot *memslot;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	memslot = search_memslots(kvm_memslots_raw(kvm), gfn);
84*4882a593Smuzhiyun 	if (!memslot)
85*4882a593Smuzhiyun 		return -EINVAL;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	*ua = __gfn_to_hva_memslot(memslot, gfn) |
88*4882a593Smuzhiyun 		(tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	return 0;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun /*
94*4882a593Smuzhiyun  * Validates TCE address.
95*4882a593Smuzhiyun  * At the moment flags and page mask are validated.
96*4882a593Smuzhiyun  * As the host kernel does not access those addresses (just puts them
97*4882a593Smuzhiyun  * to the table and user space is supposed to process them), we can skip
98*4882a593Smuzhiyun  * checking other things (such as TCE is a guest RAM address or the page
99*4882a593Smuzhiyun  * was actually allocated).
100*4882a593Smuzhiyun  */
kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table * stt,unsigned long tce)101*4882a593Smuzhiyun static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt,
102*4882a593Smuzhiyun 		unsigned long tce)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
105*4882a593Smuzhiyun 	enum dma_data_direction dir = iommu_tce_direction(tce);
106*4882a593Smuzhiyun 	struct kvmppc_spapr_tce_iommu_table *stit;
107*4882a593Smuzhiyun 	unsigned long ua = 0;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	/* Allow userspace to poison TCE table */
110*4882a593Smuzhiyun 	if (dir == DMA_NONE)
111*4882a593Smuzhiyun 		return H_SUCCESS;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	if (iommu_tce_check_gpa(stt->page_shift, gpa))
114*4882a593Smuzhiyun 		return H_PARAMETER;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	if (kvmppc_rm_tce_to_ua(stt->kvm, tce, &ua))
117*4882a593Smuzhiyun 		return H_TOO_HARD;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
120*4882a593Smuzhiyun 		unsigned long hpa = 0;
121*4882a593Smuzhiyun 		struct mm_iommu_table_group_mem_t *mem;
122*4882a593Smuzhiyun 		long shift = stit->tbl->it_page_shift;
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 		mem = mm_iommu_lookup_rm(stt->kvm->mm, ua, 1ULL << shift);
125*4882a593Smuzhiyun 		if (!mem)
126*4882a593Smuzhiyun 			return H_TOO_HARD;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 		if (mm_iommu_ua_to_hpa_rm(mem, ua, shift, &hpa))
129*4882a593Smuzhiyun 			return H_TOO_HARD;
130*4882a593Smuzhiyun 	}
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	return H_SUCCESS;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun /* Note on the use of page_address() in real mode,
136*4882a593Smuzhiyun  *
137*4882a593Smuzhiyun  * It is safe to use page_address() in real mode on ppc64 because
138*4882a593Smuzhiyun  * page_address() is always defined as lowmem_page_address()
139*4882a593Smuzhiyun  * which returns __va(PFN_PHYS(page_to_pfn(page))) which is arithmetic
140*4882a593Smuzhiyun  * operation and does not access page struct.
141*4882a593Smuzhiyun  *
142*4882a593Smuzhiyun  * Theoretically page_address() could be defined different
143*4882a593Smuzhiyun  * but either WANT_PAGE_VIRTUAL or HASHED_PAGE_VIRTUAL
144*4882a593Smuzhiyun  * would have to be enabled.
145*4882a593Smuzhiyun  * WANT_PAGE_VIRTUAL is never enabled on ppc32/ppc64,
146*4882a593Smuzhiyun  * HASHED_PAGE_VIRTUAL could be enabled for ppc32 only and only
147*4882a593Smuzhiyun  * if CONFIG_HIGHMEM is defined. As CONFIG_SPARSEMEM_VMEMMAP
148*4882a593Smuzhiyun  * is not expected to be enabled on ppc32, page_address()
149*4882a593Smuzhiyun  * is safe for ppc32 as well.
150*4882a593Smuzhiyun  *
151*4882a593Smuzhiyun  * WARNING: This will be called in real-mode on HV KVM and virtual
152*4882a593Smuzhiyun  *          mode on PR KVM
153*4882a593Smuzhiyun  */
kvmppc_page_address(struct page * page)154*4882a593Smuzhiyun static u64 *kvmppc_page_address(struct page *page)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun #if defined(HASHED_PAGE_VIRTUAL) || defined(WANT_PAGE_VIRTUAL)
157*4882a593Smuzhiyun #error TODO: fix to avoid page_address() here
158*4882a593Smuzhiyun #endif
159*4882a593Smuzhiyun 	return (u64 *) page_address(page);
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun /*
163*4882a593Smuzhiyun  * Handles TCE requests for emulated devices.
164*4882a593Smuzhiyun  * Puts guest TCE values to the table and expects user space to convert them.
165*4882a593Smuzhiyun  * Cannot fail so kvmppc_rm_tce_validate must be called before it.
166*4882a593Smuzhiyun  */
kvmppc_rm_tce_put(struct kvmppc_spapr_tce_table * stt,unsigned long idx,unsigned long tce)167*4882a593Smuzhiyun static void kvmppc_rm_tce_put(struct kvmppc_spapr_tce_table *stt,
168*4882a593Smuzhiyun 		unsigned long idx, unsigned long tce)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	struct page *page;
171*4882a593Smuzhiyun 	u64 *tbl;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	idx -= stt->offset;
174*4882a593Smuzhiyun 	page = stt->pages[idx / TCES_PER_PAGE];
175*4882a593Smuzhiyun 	/*
176*4882a593Smuzhiyun 	 * kvmppc_rm_ioba_validate() allows pages not be allocated if TCE is
177*4882a593Smuzhiyun 	 * being cleared, otherwise it returns H_TOO_HARD and we skip this.
178*4882a593Smuzhiyun 	 */
179*4882a593Smuzhiyun 	if (!page) {
180*4882a593Smuzhiyun 		WARN_ON_ONCE_RM(tce != 0);
181*4882a593Smuzhiyun 		return;
182*4882a593Smuzhiyun 	}
183*4882a593Smuzhiyun 	tbl = kvmppc_page_address(page);
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	tbl[idx % TCES_PER_PAGE] = tce;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun /*
189*4882a593Smuzhiyun  * TCEs pages are allocated in kvmppc_rm_tce_put() which won't be able to do so
190*4882a593Smuzhiyun  * in real mode.
191*4882a593Smuzhiyun  * Check if kvmppc_rm_tce_put() can succeed in real mode, i.e. a TCEs page is
192*4882a593Smuzhiyun  * allocated or not required (when clearing a tce entry).
193*4882a593Smuzhiyun  */
kvmppc_rm_ioba_validate(struct kvmppc_spapr_tce_table * stt,unsigned long ioba,unsigned long npages,bool clearing)194*4882a593Smuzhiyun static long kvmppc_rm_ioba_validate(struct kvmppc_spapr_tce_table *stt,
195*4882a593Smuzhiyun 		unsigned long ioba, unsigned long npages, bool clearing)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun 	unsigned long i, idx, sttpage, sttpages;
198*4882a593Smuzhiyun 	unsigned long ret = kvmppc_ioba_validate(stt, ioba, npages);
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	if (ret)
201*4882a593Smuzhiyun 		return ret;
202*4882a593Smuzhiyun 	/*
203*4882a593Smuzhiyun 	 * clearing==true says kvmppc_rm_tce_put won't be allocating pages
204*4882a593Smuzhiyun 	 * for empty tces.
205*4882a593Smuzhiyun 	 */
206*4882a593Smuzhiyun 	if (clearing)
207*4882a593Smuzhiyun 		return H_SUCCESS;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	idx = (ioba >> stt->page_shift) - stt->offset;
210*4882a593Smuzhiyun 	sttpage = idx / TCES_PER_PAGE;
211*4882a593Smuzhiyun 	sttpages = ALIGN(idx % TCES_PER_PAGE + npages, TCES_PER_PAGE) /
212*4882a593Smuzhiyun 			TCES_PER_PAGE;
213*4882a593Smuzhiyun 	for (i = sttpage; i < sttpage + sttpages; ++i)
214*4882a593Smuzhiyun 		if (!stt->pages[i])
215*4882a593Smuzhiyun 			return H_TOO_HARD;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	return H_SUCCESS;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun 
iommu_tce_xchg_no_kill_rm(struct mm_struct * mm,struct iommu_table * tbl,unsigned long entry,unsigned long * hpa,enum dma_data_direction * direction)220*4882a593Smuzhiyun static long iommu_tce_xchg_no_kill_rm(struct mm_struct *mm,
221*4882a593Smuzhiyun 		struct iommu_table *tbl,
222*4882a593Smuzhiyun 		unsigned long entry, unsigned long *hpa,
223*4882a593Smuzhiyun 		enum dma_data_direction *direction)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun 	long ret;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, true);
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	if (!ret && ((*direction == DMA_FROM_DEVICE) ||
230*4882a593Smuzhiyun 				(*direction == DMA_BIDIRECTIONAL))) {
231*4882a593Smuzhiyun 		__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
232*4882a593Smuzhiyun 		/*
233*4882a593Smuzhiyun 		 * kvmppc_rm_tce_iommu_do_map() updates the UA cache after
234*4882a593Smuzhiyun 		 * calling this so we still get here a valid UA.
235*4882a593Smuzhiyun 		 */
236*4882a593Smuzhiyun 		if (pua && *pua)
237*4882a593Smuzhiyun 			mm_iommu_ua_mark_dirty_rm(mm, be64_to_cpu(*pua));
238*4882a593Smuzhiyun 	}
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	return ret;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun 
iommu_tce_kill_rm(struct iommu_table * tbl,unsigned long entry,unsigned long pages)243*4882a593Smuzhiyun static void iommu_tce_kill_rm(struct iommu_table *tbl,
244*4882a593Smuzhiyun 		unsigned long entry, unsigned long pages)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun 	if (tbl->it_ops->tce_kill)
247*4882a593Smuzhiyun 		tbl->it_ops->tce_kill(tbl, entry, pages, true);
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun 
kvmppc_rm_clear_tce(struct kvm * kvm,struct kvmppc_spapr_tce_table * stt,struct iommu_table * tbl,unsigned long entry)250*4882a593Smuzhiyun static void kvmppc_rm_clear_tce(struct kvm *kvm, struct kvmppc_spapr_tce_table *stt,
251*4882a593Smuzhiyun 		struct iommu_table *tbl, unsigned long entry)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun 	unsigned long i;
254*4882a593Smuzhiyun 	unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
255*4882a593Smuzhiyun 	unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	for (i = 0; i < subpages; ++i) {
258*4882a593Smuzhiyun 		unsigned long hpa = 0;
259*4882a593Smuzhiyun 		enum dma_data_direction dir = DMA_NONE;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 		iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, io_entry + i, &hpa, &dir);
262*4882a593Smuzhiyun 	}
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun 
kvmppc_rm_tce_iommu_mapped_dec(struct kvm * kvm,struct iommu_table * tbl,unsigned long entry)265*4882a593Smuzhiyun static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
266*4882a593Smuzhiyun 		struct iommu_table *tbl, unsigned long entry)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun 	struct mm_iommu_table_group_mem_t *mem = NULL;
269*4882a593Smuzhiyun 	const unsigned long pgsize = 1ULL << tbl->it_page_shift;
270*4882a593Smuzhiyun 	__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	if (!pua)
273*4882a593Smuzhiyun 		/* it_userspace allocation might be delayed */
274*4882a593Smuzhiyun 		return H_TOO_HARD;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	mem = mm_iommu_lookup_rm(kvm->mm, be64_to_cpu(*pua), pgsize);
277*4882a593Smuzhiyun 	if (!mem)
278*4882a593Smuzhiyun 		return H_TOO_HARD;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	mm_iommu_mapped_dec(mem);
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	*pua = cpu_to_be64(0);
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	return H_SUCCESS;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun 
kvmppc_rm_tce_iommu_do_unmap(struct kvm * kvm,struct iommu_table * tbl,unsigned long entry)287*4882a593Smuzhiyun static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
288*4882a593Smuzhiyun 		struct iommu_table *tbl, unsigned long entry)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun 	enum dma_data_direction dir = DMA_NONE;
291*4882a593Smuzhiyun 	unsigned long hpa = 0;
292*4882a593Smuzhiyun 	long ret;
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	if (iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir))
295*4882a593Smuzhiyun 		/*
296*4882a593Smuzhiyun 		 * real mode xchg can fail if struct page crosses
297*4882a593Smuzhiyun 		 * a page boundary
298*4882a593Smuzhiyun 		 */
299*4882a593Smuzhiyun 		return H_TOO_HARD;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	if (dir == DMA_NONE)
302*4882a593Smuzhiyun 		return H_SUCCESS;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
305*4882a593Smuzhiyun 	if (ret)
306*4882a593Smuzhiyun 		iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	return ret;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun 
kvmppc_rm_tce_iommu_unmap(struct kvm * kvm,struct kvmppc_spapr_tce_table * stt,struct iommu_table * tbl,unsigned long entry)311*4882a593Smuzhiyun static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
312*4882a593Smuzhiyun 		struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
313*4882a593Smuzhiyun 		unsigned long entry)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun 	unsigned long i, ret = H_SUCCESS;
316*4882a593Smuzhiyun 	unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
317*4882a593Smuzhiyun 	unsigned long io_entry = entry * subpages;
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	for (i = 0; i < subpages; ++i) {
320*4882a593Smuzhiyun 		ret = kvmppc_rm_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
321*4882a593Smuzhiyun 		if (ret != H_SUCCESS)
322*4882a593Smuzhiyun 			break;
323*4882a593Smuzhiyun 	}
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	iommu_tce_kill_rm(tbl, io_entry, subpages);
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	return ret;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun 
kvmppc_rm_tce_iommu_do_map(struct kvm * kvm,struct iommu_table * tbl,unsigned long entry,unsigned long ua,enum dma_data_direction dir)330*4882a593Smuzhiyun static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
331*4882a593Smuzhiyun 		unsigned long entry, unsigned long ua,
332*4882a593Smuzhiyun 		enum dma_data_direction dir)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun 	long ret;
335*4882a593Smuzhiyun 	unsigned long hpa = 0;
336*4882a593Smuzhiyun 	__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
337*4882a593Smuzhiyun 	struct mm_iommu_table_group_mem_t *mem;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	if (!pua)
340*4882a593Smuzhiyun 		/* it_userspace allocation might be delayed */
341*4882a593Smuzhiyun 		return H_TOO_HARD;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	mem = mm_iommu_lookup_rm(kvm->mm, ua, 1ULL << tbl->it_page_shift);
344*4882a593Smuzhiyun 	if (!mem)
345*4882a593Smuzhiyun 		return H_TOO_HARD;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
348*4882a593Smuzhiyun 			&hpa)))
349*4882a593Smuzhiyun 		return H_TOO_HARD;
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
352*4882a593Smuzhiyun 		return H_TOO_HARD;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	ret = iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
355*4882a593Smuzhiyun 	if (ret) {
356*4882a593Smuzhiyun 		mm_iommu_mapped_dec(mem);
357*4882a593Smuzhiyun 		/*
358*4882a593Smuzhiyun 		 * real mode xchg can fail if struct page crosses
359*4882a593Smuzhiyun 		 * a page boundary
360*4882a593Smuzhiyun 		 */
361*4882a593Smuzhiyun 		return H_TOO_HARD;
362*4882a593Smuzhiyun 	}
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	if (dir != DMA_NONE)
365*4882a593Smuzhiyun 		kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	*pua = cpu_to_be64(ua);
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	return 0;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun 
kvmppc_rm_tce_iommu_map(struct kvm * kvm,struct kvmppc_spapr_tce_table * stt,struct iommu_table * tbl,unsigned long entry,unsigned long ua,enum dma_data_direction dir)372*4882a593Smuzhiyun static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
373*4882a593Smuzhiyun 		struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
374*4882a593Smuzhiyun 		unsigned long entry, unsigned long ua,
375*4882a593Smuzhiyun 		enum dma_data_direction dir)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun 	unsigned long i, pgoff, ret = H_SUCCESS;
378*4882a593Smuzhiyun 	unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
379*4882a593Smuzhiyun 	unsigned long io_entry = entry * subpages;
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	for (i = 0, pgoff = 0; i < subpages;
382*4882a593Smuzhiyun 			++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 		ret = kvmppc_rm_tce_iommu_do_map(kvm, tbl,
385*4882a593Smuzhiyun 				io_entry + i, ua + pgoff, dir);
386*4882a593Smuzhiyun 		if (ret != H_SUCCESS)
387*4882a593Smuzhiyun 			break;
388*4882a593Smuzhiyun 	}
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	iommu_tce_kill_rm(tbl, io_entry, subpages);
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	return ret;
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun 
kvmppc_rm_h_put_tce(struct kvm_vcpu * vcpu,unsigned long liobn,unsigned long ioba,unsigned long tce)395*4882a593Smuzhiyun long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
396*4882a593Smuzhiyun 		unsigned long ioba, unsigned long tce)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun 	struct kvmppc_spapr_tce_table *stt;
399*4882a593Smuzhiyun 	long ret;
400*4882a593Smuzhiyun 	struct kvmppc_spapr_tce_iommu_table *stit;
401*4882a593Smuzhiyun 	unsigned long entry, ua = 0;
402*4882a593Smuzhiyun 	enum dma_data_direction dir;
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	/* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
405*4882a593Smuzhiyun 	/* 	    liobn, ioba, tce); */
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	/* For radix, we might be in virtual mode, so punt */
408*4882a593Smuzhiyun 	if (kvm_is_radix(vcpu->kvm))
409*4882a593Smuzhiyun 		return H_TOO_HARD;
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	stt = kvmppc_find_table(vcpu->kvm, liobn);
412*4882a593Smuzhiyun 	if (!stt)
413*4882a593Smuzhiyun 		return H_TOO_HARD;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	ret = kvmppc_rm_ioba_validate(stt, ioba, 1, tce == 0);
416*4882a593Smuzhiyun 	if (ret != H_SUCCESS)
417*4882a593Smuzhiyun 		return ret;
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	ret = kvmppc_rm_tce_validate(stt, tce);
420*4882a593Smuzhiyun 	if (ret != H_SUCCESS)
421*4882a593Smuzhiyun 		return ret;
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	dir = iommu_tce_direction(tce);
424*4882a593Smuzhiyun 	if ((dir != DMA_NONE) && kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua))
425*4882a593Smuzhiyun 		return H_PARAMETER;
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	entry = ioba >> stt->page_shift;
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
430*4882a593Smuzhiyun 		if (dir == DMA_NONE)
431*4882a593Smuzhiyun 			ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
432*4882a593Smuzhiyun 					stit->tbl, entry);
433*4882a593Smuzhiyun 		else
434*4882a593Smuzhiyun 			ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
435*4882a593Smuzhiyun 					stit->tbl, entry, ua, dir);
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 		if (ret != H_SUCCESS) {
438*4882a593Smuzhiyun 			kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry);
439*4882a593Smuzhiyun 			return ret;
440*4882a593Smuzhiyun 		}
441*4882a593Smuzhiyun 	}
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	kvmppc_rm_tce_put(stt, entry, tce);
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	return H_SUCCESS;
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun 
kvmppc_rm_ua_to_hpa(struct kvm_vcpu * vcpu,unsigned long mmu_seq,unsigned long ua,unsigned long * phpa)448*4882a593Smuzhiyun static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu, unsigned long mmu_seq,
449*4882a593Smuzhiyun 				unsigned long ua, unsigned long *phpa)
450*4882a593Smuzhiyun {
451*4882a593Smuzhiyun 	pte_t *ptep, pte;
452*4882a593Smuzhiyun 	unsigned shift = 0;
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	/*
455*4882a593Smuzhiyun 	 * Called in real mode with MSR_EE = 0. We are safe here.
456*4882a593Smuzhiyun 	 * It is ok to do the lookup with arch.pgdir here, because
457*4882a593Smuzhiyun 	 * we are doing this on secondary cpus and current task there
458*4882a593Smuzhiyun 	 * is not the hypervisor. Also this is safe against THP in the
459*4882a593Smuzhiyun 	 * host, because an IPI to primary thread will wait for the secondary
460*4882a593Smuzhiyun 	 * to exit which will agains result in the below page table walk
461*4882a593Smuzhiyun 	 * to finish.
462*4882a593Smuzhiyun 	 */
463*4882a593Smuzhiyun 	/* an rmap lock won't make it safe. because that just ensure hash
464*4882a593Smuzhiyun 	 * page table entries are removed with rmap lock held. After that
465*4882a593Smuzhiyun 	 * mmu notifier returns and we go ahead and removing ptes from Qemu page table.
466*4882a593Smuzhiyun 	 */
467*4882a593Smuzhiyun 	ptep = find_kvm_host_pte(vcpu->kvm, mmu_seq, ua, &shift);
468*4882a593Smuzhiyun 	if (!ptep)
469*4882a593Smuzhiyun 		return -ENXIO;
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	pte = READ_ONCE(*ptep);
472*4882a593Smuzhiyun 	if (!pte_present(pte))
473*4882a593Smuzhiyun 		return -ENXIO;
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	if (!shift)
476*4882a593Smuzhiyun 		shift = PAGE_SHIFT;
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	/* Avoid handling anything potentially complicated in realmode */
479*4882a593Smuzhiyun 	if (shift > PAGE_SHIFT)
480*4882a593Smuzhiyun 		return -EAGAIN;
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	if (!pte_young(pte))
483*4882a593Smuzhiyun 		return -EAGAIN;
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	*phpa = (pte_pfn(pte) << PAGE_SHIFT) | (ua & ((1ULL << shift) - 1)) |
486*4882a593Smuzhiyun 			(ua & ~PAGE_MASK);
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	return 0;
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun 
kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu * vcpu,unsigned long liobn,unsigned long ioba,unsigned long tce_list,unsigned long npages)491*4882a593Smuzhiyun long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
492*4882a593Smuzhiyun 		unsigned long liobn, unsigned long ioba,
493*4882a593Smuzhiyun 		unsigned long tce_list,	unsigned long npages)
494*4882a593Smuzhiyun {
495*4882a593Smuzhiyun 	struct kvm *kvm = vcpu->kvm;
496*4882a593Smuzhiyun 	struct kvmppc_spapr_tce_table *stt;
497*4882a593Smuzhiyun 	long i, ret = H_SUCCESS;
498*4882a593Smuzhiyun 	unsigned long tces, entry, ua = 0;
499*4882a593Smuzhiyun 	unsigned long mmu_seq;
500*4882a593Smuzhiyun 	bool prereg = false;
501*4882a593Smuzhiyun 	struct kvmppc_spapr_tce_iommu_table *stit;
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	/* For radix, we might be in virtual mode, so punt */
504*4882a593Smuzhiyun 	if (kvm_is_radix(vcpu->kvm))
505*4882a593Smuzhiyun 		return H_TOO_HARD;
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	/*
508*4882a593Smuzhiyun 	 * used to check for invalidations in progress
509*4882a593Smuzhiyun 	 */
510*4882a593Smuzhiyun 	mmu_seq = kvm->mmu_notifier_seq;
511*4882a593Smuzhiyun 	smp_rmb();
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	stt = kvmppc_find_table(vcpu->kvm, liobn);
514*4882a593Smuzhiyun 	if (!stt)
515*4882a593Smuzhiyun 		return H_TOO_HARD;
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	entry = ioba >> stt->page_shift;
518*4882a593Smuzhiyun 	/*
519*4882a593Smuzhiyun 	 * The spec says that the maximum size of the list is 512 TCEs
520*4882a593Smuzhiyun 	 * so the whole table addressed resides in 4K page
521*4882a593Smuzhiyun 	 */
522*4882a593Smuzhiyun 	if (npages > 512)
523*4882a593Smuzhiyun 		return H_PARAMETER;
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 	if (tce_list & (SZ_4K - 1))
526*4882a593Smuzhiyun 		return H_PARAMETER;
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	ret = kvmppc_rm_ioba_validate(stt, ioba, npages, false);
529*4882a593Smuzhiyun 	if (ret != H_SUCCESS)
530*4882a593Smuzhiyun 		return ret;
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	if (mm_iommu_preregistered(vcpu->kvm->mm)) {
533*4882a593Smuzhiyun 		/*
534*4882a593Smuzhiyun 		 * We get here if guest memory was pre-registered which
535*4882a593Smuzhiyun 		 * is normally VFIO case and gpa->hpa translation does not
536*4882a593Smuzhiyun 		 * depend on hpt.
537*4882a593Smuzhiyun 		 */
538*4882a593Smuzhiyun 		struct mm_iommu_table_group_mem_t *mem;
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 		if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua))
541*4882a593Smuzhiyun 			return H_TOO_HARD;
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 		mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
544*4882a593Smuzhiyun 		if (mem)
545*4882a593Smuzhiyun 			prereg = mm_iommu_ua_to_hpa_rm(mem, ua,
546*4882a593Smuzhiyun 					IOMMU_PAGE_SHIFT_4K, &tces) == 0;
547*4882a593Smuzhiyun 	}
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	if (!prereg) {
550*4882a593Smuzhiyun 		/*
551*4882a593Smuzhiyun 		 * This is usually a case of a guest with emulated devices only
552*4882a593Smuzhiyun 		 * when TCE list is not in preregistered memory.
553*4882a593Smuzhiyun 		 * We do not require memory to be preregistered in this case
554*4882a593Smuzhiyun 		 * so lock rmap and do __find_linux_pte_or_hugepte().
555*4882a593Smuzhiyun 		 */
556*4882a593Smuzhiyun 		if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua))
557*4882a593Smuzhiyun 			return H_TOO_HARD;
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 		arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
560*4882a593Smuzhiyun 		if (kvmppc_rm_ua_to_hpa(vcpu, mmu_seq, ua, &tces)) {
561*4882a593Smuzhiyun 			ret = H_TOO_HARD;
562*4882a593Smuzhiyun 			goto unlock_exit;
563*4882a593Smuzhiyun 		}
564*4882a593Smuzhiyun 	}
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	for (i = 0; i < npages; ++i) {
567*4882a593Smuzhiyun 		unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 		ret = kvmppc_rm_tce_validate(stt, tce);
570*4882a593Smuzhiyun 		if (ret != H_SUCCESS)
571*4882a593Smuzhiyun 			goto unlock_exit;
572*4882a593Smuzhiyun 	}
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	for (i = 0; i < npages; ++i) {
575*4882a593Smuzhiyun 		unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 		ua = 0;
578*4882a593Smuzhiyun 		if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua)) {
579*4882a593Smuzhiyun 			ret = H_PARAMETER;
580*4882a593Smuzhiyun 			goto unlock_exit;
581*4882a593Smuzhiyun 		}
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 		list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
584*4882a593Smuzhiyun 			ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
585*4882a593Smuzhiyun 					stit->tbl, entry + i, ua,
586*4882a593Smuzhiyun 					iommu_tce_direction(tce));
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 			if (ret != H_SUCCESS) {
589*4882a593Smuzhiyun 				kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl,
590*4882a593Smuzhiyun 						entry + i);
591*4882a593Smuzhiyun 				goto unlock_exit;
592*4882a593Smuzhiyun 			}
593*4882a593Smuzhiyun 		}
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 		kvmppc_rm_tce_put(stt, entry + i, tce);
596*4882a593Smuzhiyun 	}
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun unlock_exit:
599*4882a593Smuzhiyun 	if (!prereg)
600*4882a593Smuzhiyun 		arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
601*4882a593Smuzhiyun 	return ret;
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun 
kvmppc_rm_h_stuff_tce(struct kvm_vcpu * vcpu,unsigned long liobn,unsigned long ioba,unsigned long tce_value,unsigned long npages)604*4882a593Smuzhiyun long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
605*4882a593Smuzhiyun 		unsigned long liobn, unsigned long ioba,
606*4882a593Smuzhiyun 		unsigned long tce_value, unsigned long npages)
607*4882a593Smuzhiyun {
608*4882a593Smuzhiyun 	struct kvmppc_spapr_tce_table *stt;
609*4882a593Smuzhiyun 	long i, ret;
610*4882a593Smuzhiyun 	struct kvmppc_spapr_tce_iommu_table *stit;
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	/* For radix, we might be in virtual mode, so punt */
613*4882a593Smuzhiyun 	if (kvm_is_radix(vcpu->kvm))
614*4882a593Smuzhiyun 		return H_TOO_HARD;
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	stt = kvmppc_find_table(vcpu->kvm, liobn);
617*4882a593Smuzhiyun 	if (!stt)
618*4882a593Smuzhiyun 		return H_TOO_HARD;
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 	ret = kvmppc_rm_ioba_validate(stt, ioba, npages, tce_value == 0);
621*4882a593Smuzhiyun 	if (ret != H_SUCCESS)
622*4882a593Smuzhiyun 		return ret;
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 	/* Check permission bits only to allow userspace poison TCE for debug */
625*4882a593Smuzhiyun 	if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
626*4882a593Smuzhiyun 		return H_PARAMETER;
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 	list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
629*4882a593Smuzhiyun 		unsigned long entry = ioba >> stt->page_shift;
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 		for (i = 0; i < npages; ++i) {
632*4882a593Smuzhiyun 			ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
633*4882a593Smuzhiyun 					stit->tbl, entry + i);
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 			if (ret == H_SUCCESS)
636*4882a593Smuzhiyun 				continue;
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 			if (ret == H_TOO_HARD)
639*4882a593Smuzhiyun 				return ret;
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 			WARN_ON_ONCE_RM(1);
642*4882a593Smuzhiyun 			kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry + i);
643*4882a593Smuzhiyun 		}
644*4882a593Smuzhiyun 	}
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun 	for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
647*4882a593Smuzhiyun 		kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value);
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	return ret;
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun /* This can be called in either virtual mode or real mode */
kvmppc_h_get_tce(struct kvm_vcpu * vcpu,unsigned long liobn,unsigned long ioba)653*4882a593Smuzhiyun long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
654*4882a593Smuzhiyun 		      unsigned long ioba)
655*4882a593Smuzhiyun {
656*4882a593Smuzhiyun 	struct kvmppc_spapr_tce_table *stt;
657*4882a593Smuzhiyun 	long ret;
658*4882a593Smuzhiyun 	unsigned long idx;
659*4882a593Smuzhiyun 	struct page *page;
660*4882a593Smuzhiyun 	u64 *tbl;
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	stt = kvmppc_find_table(vcpu->kvm, liobn);
663*4882a593Smuzhiyun 	if (!stt)
664*4882a593Smuzhiyun 		return H_TOO_HARD;
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	ret = kvmppc_ioba_validate(stt, ioba, 1);
667*4882a593Smuzhiyun 	if (ret != H_SUCCESS)
668*4882a593Smuzhiyun 		return ret;
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 	idx = (ioba >> stt->page_shift) - stt->offset;
671*4882a593Smuzhiyun 	page = stt->pages[idx / TCES_PER_PAGE];
672*4882a593Smuzhiyun 	if (!page) {
673*4882a593Smuzhiyun 		vcpu->arch.regs.gpr[4] = 0;
674*4882a593Smuzhiyun 		return H_SUCCESS;
675*4882a593Smuzhiyun 	}
676*4882a593Smuzhiyun 	tbl = (u64 *)page_address(page);
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 	vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE];
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	return H_SUCCESS;
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun #endif /* KVM_BOOK3S_HV_POSSIBLE */
685