1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * IOMMU helpers in MMU context.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2015 IBM Corp. <aik@ozlabs.ru>
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/sched/signal.h>
9*4882a593Smuzhiyun #include <linux/slab.h>
10*4882a593Smuzhiyun #include <linux/rculist.h>
11*4882a593Smuzhiyun #include <linux/vmalloc.h>
12*4882a593Smuzhiyun #include <linux/mutex.h>
13*4882a593Smuzhiyun #include <linux/migrate.h>
14*4882a593Smuzhiyun #include <linux/hugetlb.h>
15*4882a593Smuzhiyun #include <linux/swap.h>
16*4882a593Smuzhiyun #include <linux/sizes.h>
17*4882a593Smuzhiyun #include <linux/mm.h>
18*4882a593Smuzhiyun #include <asm/mmu_context.h>
19*4882a593Smuzhiyun #include <asm/pte-walk.h>
20*4882a593Smuzhiyun #include <linux/mm_inline.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun static DEFINE_MUTEX(mem_list_mutex);
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #define MM_IOMMU_TABLE_GROUP_PAGE_DIRTY 0x1
25*4882a593Smuzhiyun #define MM_IOMMU_TABLE_GROUP_PAGE_MASK ~(SZ_4K - 1)
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun struct mm_iommu_table_group_mem_t {
28*4882a593Smuzhiyun struct list_head next;
29*4882a593Smuzhiyun struct rcu_head rcu;
30*4882a593Smuzhiyun unsigned long used;
31*4882a593Smuzhiyun atomic64_t mapped;
32*4882a593Smuzhiyun unsigned int pageshift;
33*4882a593Smuzhiyun u64 ua; /* userspace address */
34*4882a593Smuzhiyun u64 entries; /* number of entries in hpas/hpages[] */
35*4882a593Smuzhiyun /*
36*4882a593Smuzhiyun * in mm_iommu_get we temporarily use this to store
37*4882a593Smuzhiyun * struct page address.
38*4882a593Smuzhiyun *
39*4882a593Smuzhiyun * We need to convert ua to hpa in real mode. Make it
40*4882a593Smuzhiyun * simpler by storing physical address.
41*4882a593Smuzhiyun */
42*4882a593Smuzhiyun union {
43*4882a593Smuzhiyun struct page **hpages; /* vmalloc'ed */
44*4882a593Smuzhiyun phys_addr_t *hpas;
45*4882a593Smuzhiyun };
46*4882a593Smuzhiyun #define MM_IOMMU_TABLE_INVALID_HPA ((uint64_t)-1)
47*4882a593Smuzhiyun u64 dev_hpa; /* Device memory base address */
48*4882a593Smuzhiyun };
49*4882a593Smuzhiyun
mm_iommu_preregistered(struct mm_struct * mm)50*4882a593Smuzhiyun bool mm_iommu_preregistered(struct mm_struct *mm)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun return !list_empty(&mm->context.iommu_group_mem_list);
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mm_iommu_preregistered);
55*4882a593Smuzhiyun
mm_iommu_do_alloc(struct mm_struct * mm,unsigned long ua,unsigned long entries,unsigned long dev_hpa,struct mm_iommu_table_group_mem_t ** pmem)56*4882a593Smuzhiyun static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
57*4882a593Smuzhiyun unsigned long entries, unsigned long dev_hpa,
58*4882a593Smuzhiyun struct mm_iommu_table_group_mem_t **pmem)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun struct mm_iommu_table_group_mem_t *mem, *mem2;
61*4882a593Smuzhiyun long i, ret, locked_entries = 0, pinned = 0;
62*4882a593Smuzhiyun unsigned int pageshift;
63*4882a593Smuzhiyun unsigned long entry, chunk;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) {
66*4882a593Smuzhiyun ret = account_locked_vm(mm, entries, true);
67*4882a593Smuzhiyun if (ret)
68*4882a593Smuzhiyun return ret;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun locked_entries = entries;
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun mem = kzalloc(sizeof(*mem), GFP_KERNEL);
74*4882a593Smuzhiyun if (!mem) {
75*4882a593Smuzhiyun ret = -ENOMEM;
76*4882a593Smuzhiyun goto unlock_exit;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun if (dev_hpa != MM_IOMMU_TABLE_INVALID_HPA) {
80*4882a593Smuzhiyun mem->pageshift = __ffs(dev_hpa | (entries << PAGE_SHIFT));
81*4882a593Smuzhiyun mem->dev_hpa = dev_hpa;
82*4882a593Smuzhiyun goto good_exit;
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun mem->dev_hpa = MM_IOMMU_TABLE_INVALID_HPA;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /*
87*4882a593Smuzhiyun * For a starting point for a maximum page size calculation
88*4882a593Smuzhiyun * we use @ua and @entries natural alignment to allow IOMMU pages
89*4882a593Smuzhiyun * smaller than huge pages but still bigger than PAGE_SIZE.
90*4882a593Smuzhiyun */
91*4882a593Smuzhiyun mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT));
92*4882a593Smuzhiyun mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0])));
93*4882a593Smuzhiyun if (!mem->hpas) {
94*4882a593Smuzhiyun kfree(mem);
95*4882a593Smuzhiyun ret = -ENOMEM;
96*4882a593Smuzhiyun goto unlock_exit;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun mmap_read_lock(mm);
100*4882a593Smuzhiyun chunk = (1UL << (PAGE_SHIFT + MAX_ORDER - 1)) /
101*4882a593Smuzhiyun sizeof(struct vm_area_struct *);
102*4882a593Smuzhiyun chunk = min(chunk, entries);
103*4882a593Smuzhiyun for (entry = 0; entry < entries; entry += chunk) {
104*4882a593Smuzhiyun unsigned long n = min(entries - entry, chunk);
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun ret = pin_user_pages(ua + (entry << PAGE_SHIFT), n,
107*4882a593Smuzhiyun FOLL_WRITE | FOLL_LONGTERM,
108*4882a593Smuzhiyun mem->hpages + entry, NULL);
109*4882a593Smuzhiyun if (ret == n) {
110*4882a593Smuzhiyun pinned += n;
111*4882a593Smuzhiyun continue;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun if (ret > 0)
114*4882a593Smuzhiyun pinned += ret;
115*4882a593Smuzhiyun break;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun mmap_read_unlock(mm);
118*4882a593Smuzhiyun if (pinned != entries) {
119*4882a593Smuzhiyun if (!ret)
120*4882a593Smuzhiyun ret = -EFAULT;
121*4882a593Smuzhiyun goto free_exit;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun good_exit:
125*4882a593Smuzhiyun atomic64_set(&mem->mapped, 1);
126*4882a593Smuzhiyun mem->used = 1;
127*4882a593Smuzhiyun mem->ua = ua;
128*4882a593Smuzhiyun mem->entries = entries;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun mutex_lock(&mem_list_mutex);
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun list_for_each_entry_rcu(mem2, &mm->context.iommu_group_mem_list, next) {
133*4882a593Smuzhiyun /* Overlap? */
134*4882a593Smuzhiyun if ((mem2->ua < (ua + (entries << PAGE_SHIFT))) &&
135*4882a593Smuzhiyun (ua < (mem2->ua +
136*4882a593Smuzhiyun (mem2->entries << PAGE_SHIFT)))) {
137*4882a593Smuzhiyun ret = -EINVAL;
138*4882a593Smuzhiyun mutex_unlock(&mem_list_mutex);
139*4882a593Smuzhiyun goto free_exit;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) {
144*4882a593Smuzhiyun /*
145*4882a593Smuzhiyun * Allow to use larger than 64k IOMMU pages. Only do that
146*4882a593Smuzhiyun * if we are backed by hugetlb. Skip device memory as it is not
147*4882a593Smuzhiyun * backed with page structs.
148*4882a593Smuzhiyun */
149*4882a593Smuzhiyun pageshift = PAGE_SHIFT;
150*4882a593Smuzhiyun for (i = 0; i < entries; ++i) {
151*4882a593Smuzhiyun struct page *page = mem->hpages[i];
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page))
154*4882a593Smuzhiyun pageshift = page_shift(compound_head(page));
155*4882a593Smuzhiyun mem->pageshift = min(mem->pageshift, pageshift);
156*4882a593Smuzhiyun /*
157*4882a593Smuzhiyun * We don't need struct page reference any more, switch
158*4882a593Smuzhiyun * to physical address.
159*4882a593Smuzhiyun */
160*4882a593Smuzhiyun mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun mutex_unlock(&mem_list_mutex);
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun *pmem = mem;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun return 0;
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun free_exit:
173*4882a593Smuzhiyun /* free the references taken */
174*4882a593Smuzhiyun unpin_user_pages(mem->hpages, pinned);
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun vfree(mem->hpas);
177*4882a593Smuzhiyun kfree(mem);
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun unlock_exit:
180*4882a593Smuzhiyun account_locked_vm(mm, locked_entries, false);
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun return ret;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
mm_iommu_new(struct mm_struct * mm,unsigned long ua,unsigned long entries,struct mm_iommu_table_group_mem_t ** pmem)185*4882a593Smuzhiyun long mm_iommu_new(struct mm_struct *mm, unsigned long ua, unsigned long entries,
186*4882a593Smuzhiyun struct mm_iommu_table_group_mem_t **pmem)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun return mm_iommu_do_alloc(mm, ua, entries, MM_IOMMU_TABLE_INVALID_HPA,
189*4882a593Smuzhiyun pmem);
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mm_iommu_new);
192*4882a593Smuzhiyun
mm_iommu_newdev(struct mm_struct * mm,unsigned long ua,unsigned long entries,unsigned long dev_hpa,struct mm_iommu_table_group_mem_t ** pmem)193*4882a593Smuzhiyun long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua,
194*4882a593Smuzhiyun unsigned long entries, unsigned long dev_hpa,
195*4882a593Smuzhiyun struct mm_iommu_table_group_mem_t **pmem)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun return mm_iommu_do_alloc(mm, ua, entries, dev_hpa, pmem);
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mm_iommu_newdev);
200*4882a593Smuzhiyun
mm_iommu_unpin(struct mm_iommu_table_group_mem_t * mem)201*4882a593Smuzhiyun static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun long i;
204*4882a593Smuzhiyun struct page *page = NULL;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun if (!mem->hpas)
207*4882a593Smuzhiyun return;
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun for (i = 0; i < mem->entries; ++i) {
210*4882a593Smuzhiyun if (!mem->hpas[i])
211*4882a593Smuzhiyun continue;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun page = pfn_to_page(mem->hpas[i] >> PAGE_SHIFT);
214*4882a593Smuzhiyun if (!page)
215*4882a593Smuzhiyun continue;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun if (mem->hpas[i] & MM_IOMMU_TABLE_GROUP_PAGE_DIRTY)
218*4882a593Smuzhiyun SetPageDirty(page);
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun unpin_user_page(page);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun mem->hpas[i] = 0;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
mm_iommu_do_free(struct mm_iommu_table_group_mem_t * mem)226*4882a593Smuzhiyun static void mm_iommu_do_free(struct mm_iommu_table_group_mem_t *mem)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun mm_iommu_unpin(mem);
230*4882a593Smuzhiyun vfree(mem->hpas);
231*4882a593Smuzhiyun kfree(mem);
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
mm_iommu_free(struct rcu_head * head)234*4882a593Smuzhiyun static void mm_iommu_free(struct rcu_head *head)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun struct mm_iommu_table_group_mem_t *mem = container_of(head,
237*4882a593Smuzhiyun struct mm_iommu_table_group_mem_t, rcu);
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun mm_iommu_do_free(mem);
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun
mm_iommu_release(struct mm_iommu_table_group_mem_t * mem)242*4882a593Smuzhiyun static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun list_del_rcu(&mem->next);
245*4882a593Smuzhiyun call_rcu(&mem->rcu, mm_iommu_free);
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun
mm_iommu_put(struct mm_struct * mm,struct mm_iommu_table_group_mem_t * mem)248*4882a593Smuzhiyun long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun long ret = 0;
251*4882a593Smuzhiyun unsigned long unlock_entries = 0;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun mutex_lock(&mem_list_mutex);
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun if (mem->used == 0) {
256*4882a593Smuzhiyun ret = -ENOENT;
257*4882a593Smuzhiyun goto unlock_exit;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun --mem->used;
261*4882a593Smuzhiyun /* There are still users, exit */
262*4882a593Smuzhiyun if (mem->used)
263*4882a593Smuzhiyun goto unlock_exit;
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun /* Are there still mappings? */
266*4882a593Smuzhiyun if (atomic_cmpxchg(&mem->mapped, 1, 0) != 1) {
267*4882a593Smuzhiyun ++mem->used;
268*4882a593Smuzhiyun ret = -EBUSY;
269*4882a593Smuzhiyun goto unlock_exit;
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
273*4882a593Smuzhiyun unlock_entries = mem->entries;
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun /* @mapped became 0 so now mappings are disabled, release the region */
276*4882a593Smuzhiyun mm_iommu_release(mem);
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun unlock_exit:
279*4882a593Smuzhiyun mutex_unlock(&mem_list_mutex);
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun account_locked_vm(mm, unlock_entries, false);
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun return ret;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mm_iommu_put);
286*4882a593Smuzhiyun
mm_iommu_lookup(struct mm_struct * mm,unsigned long ua,unsigned long size)287*4882a593Smuzhiyun struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
288*4882a593Smuzhiyun unsigned long ua, unsigned long size)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
293*4882a593Smuzhiyun if ((mem->ua <= ua) &&
294*4882a593Smuzhiyun (ua + size <= mem->ua +
295*4882a593Smuzhiyun (mem->entries << PAGE_SHIFT))) {
296*4882a593Smuzhiyun ret = mem;
297*4882a593Smuzhiyun break;
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun return ret;
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mm_iommu_lookup);
304*4882a593Smuzhiyun
mm_iommu_lookup_rm(struct mm_struct * mm,unsigned long ua,unsigned long size)305*4882a593Smuzhiyun struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm,
306*4882a593Smuzhiyun unsigned long ua, unsigned long size)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun list_for_each_entry_lockless(mem, &mm->context.iommu_group_mem_list,
311*4882a593Smuzhiyun next) {
312*4882a593Smuzhiyun if ((mem->ua <= ua) &&
313*4882a593Smuzhiyun (ua + size <= mem->ua +
314*4882a593Smuzhiyun (mem->entries << PAGE_SHIFT))) {
315*4882a593Smuzhiyun ret = mem;
316*4882a593Smuzhiyun break;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun return ret;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun
mm_iommu_get(struct mm_struct * mm,unsigned long ua,unsigned long entries)323*4882a593Smuzhiyun struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm,
324*4882a593Smuzhiyun unsigned long ua, unsigned long entries)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun mutex_lock(&mem_list_mutex);
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
331*4882a593Smuzhiyun if ((mem->ua == ua) && (mem->entries == entries)) {
332*4882a593Smuzhiyun ret = mem;
333*4882a593Smuzhiyun ++mem->used;
334*4882a593Smuzhiyun break;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun mutex_unlock(&mem_list_mutex);
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun return ret;
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mm_iommu_get);
343*4882a593Smuzhiyun
mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t * mem,unsigned long ua,unsigned int pageshift,unsigned long * hpa)344*4882a593Smuzhiyun long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
345*4882a593Smuzhiyun unsigned long ua, unsigned int pageshift, unsigned long *hpa)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun const long entry = (ua - mem->ua) >> PAGE_SHIFT;
348*4882a593Smuzhiyun u64 *va;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun if (entry >= mem->entries)
351*4882a593Smuzhiyun return -EFAULT;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun if (pageshift > mem->pageshift)
354*4882a593Smuzhiyun return -EFAULT;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun if (!mem->hpas) {
357*4882a593Smuzhiyun *hpa = mem->dev_hpa + (ua - mem->ua);
358*4882a593Smuzhiyun return 0;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun va = &mem->hpas[entry];
362*4882a593Smuzhiyun *hpa = (*va & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK);
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun return 0;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
367*4882a593Smuzhiyun
mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t * mem,unsigned long ua,unsigned int pageshift,unsigned long * hpa)368*4882a593Smuzhiyun long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
369*4882a593Smuzhiyun unsigned long ua, unsigned int pageshift, unsigned long *hpa)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun const long entry = (ua - mem->ua) >> PAGE_SHIFT;
372*4882a593Smuzhiyun unsigned long *pa;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun if (entry >= mem->entries)
375*4882a593Smuzhiyun return -EFAULT;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun if (pageshift > mem->pageshift)
378*4882a593Smuzhiyun return -EFAULT;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun if (!mem->hpas) {
381*4882a593Smuzhiyun *hpa = mem->dev_hpa + (ua - mem->ua);
382*4882a593Smuzhiyun return 0;
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun pa = (void *) vmalloc_to_phys(&mem->hpas[entry]);
386*4882a593Smuzhiyun if (!pa)
387*4882a593Smuzhiyun return -EFAULT;
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun *hpa = (*pa & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK);
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun return 0;
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun
mm_iommu_ua_mark_dirty_rm(struct mm_struct * mm,unsigned long ua)394*4882a593Smuzhiyun extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun struct mm_iommu_table_group_mem_t *mem;
397*4882a593Smuzhiyun long entry;
398*4882a593Smuzhiyun void *va;
399*4882a593Smuzhiyun unsigned long *pa;
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun mem = mm_iommu_lookup_rm(mm, ua, PAGE_SIZE);
402*4882a593Smuzhiyun if (!mem)
403*4882a593Smuzhiyun return;
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun if (mem->dev_hpa != MM_IOMMU_TABLE_INVALID_HPA)
406*4882a593Smuzhiyun return;
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun entry = (ua - mem->ua) >> PAGE_SHIFT;
409*4882a593Smuzhiyun va = &mem->hpas[entry];
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun pa = (void *) vmalloc_to_phys(va);
412*4882a593Smuzhiyun if (!pa)
413*4882a593Smuzhiyun return;
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun *pa |= MM_IOMMU_TABLE_GROUP_PAGE_DIRTY;
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun
mm_iommu_is_devmem(struct mm_struct * mm,unsigned long hpa,unsigned int pageshift,unsigned long * size)418*4882a593Smuzhiyun bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
419*4882a593Smuzhiyun unsigned int pageshift, unsigned long *size)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun struct mm_iommu_table_group_mem_t *mem;
422*4882a593Smuzhiyun unsigned long end;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
425*4882a593Smuzhiyun if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
426*4882a593Smuzhiyun continue;
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun end = mem->dev_hpa + (mem->entries << PAGE_SHIFT);
429*4882a593Smuzhiyun if ((mem->dev_hpa <= hpa) && (hpa < end)) {
430*4882a593Smuzhiyun /*
431*4882a593Smuzhiyun * Since the IOMMU page size might be bigger than
432*4882a593Smuzhiyun * PAGE_SIZE, the amount of preregistered memory
433*4882a593Smuzhiyun * starting from @hpa might be smaller than 1<<pageshift
434*4882a593Smuzhiyun * and the caller needs to distinguish this situation.
435*4882a593Smuzhiyun */
436*4882a593Smuzhiyun *size = min(1UL << pageshift, end - hpa);
437*4882a593Smuzhiyun return true;
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun return false;
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mm_iommu_is_devmem);
444*4882a593Smuzhiyun
mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t * mem)445*4882a593Smuzhiyun long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem)
446*4882a593Smuzhiyun {
447*4882a593Smuzhiyun if (atomic64_inc_not_zero(&mem->mapped))
448*4882a593Smuzhiyun return 0;
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun /* Last mm_iommu_put() has been called, no more mappings allowed() */
451*4882a593Smuzhiyun return -ENXIO;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mm_iommu_mapped_inc);
454*4882a593Smuzhiyun
mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t * mem)455*4882a593Smuzhiyun void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem)
456*4882a593Smuzhiyun {
457*4882a593Smuzhiyun atomic64_add_unless(&mem->mapped, -1, 1);
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mm_iommu_mapped_dec);
460*4882a593Smuzhiyun
mm_iommu_init(struct mm_struct * mm)461*4882a593Smuzhiyun void mm_iommu_init(struct mm_struct *mm)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun INIT_LIST_HEAD_RCU(&mm->context.iommu_group_mem_list);
464*4882a593Smuzhiyun }
465