1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /**************************************************************************
3*4882a593Smuzhiyun * Copyright (c) 2007, Intel Corporation.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun **************************************************************************/
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/highmem.h>
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include "mmu.h"
10*4882a593Smuzhiyun #include "psb_drv.h"
11*4882a593Smuzhiyun #include "psb_reg.h"
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun /*
14*4882a593Smuzhiyun * Code for the SGX MMU:
15*4882a593Smuzhiyun */
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun /*
18*4882a593Smuzhiyun * clflush on one processor only:
19*4882a593Smuzhiyun * clflush should apparently flush the cache line on all processors in an
20*4882a593Smuzhiyun * SMP system.
21*4882a593Smuzhiyun */
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun /*
24*4882a593Smuzhiyun * kmap atomic:
25*4882a593Smuzhiyun * The usage of the slots must be completely encapsulated within a spinlock, and
26*4882a593Smuzhiyun * no other functions that may be using the locks for other purposed may be
27*4882a593Smuzhiyun * called from within the locked region.
28*4882a593Smuzhiyun * Since the slots are per processor, this will guarantee that we are the only
29*4882a593Smuzhiyun * user.
30*4882a593Smuzhiyun */
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun * TODO: Inserting ptes from an interrupt handler:
34*4882a593Smuzhiyun * This may be desirable for some SGX functionality where the GPU can fault in
35*4882a593Smuzhiyun * needed pages. For that, we need to make an atomic insert_pages function, that
36*4882a593Smuzhiyun * may fail.
37*4882a593Smuzhiyun * If it fails, the caller need to insert the page using a workqueue function,
38*4882a593Smuzhiyun * but on average it should be fast.
39*4882a593Smuzhiyun */
40*4882a593Smuzhiyun
psb_mmu_pt_index(uint32_t offset)41*4882a593Smuzhiyun static inline uint32_t psb_mmu_pt_index(uint32_t offset)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun return (offset >> PSB_PTE_SHIFT) & 0x3FF;
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun
psb_mmu_pd_index(uint32_t offset)46*4882a593Smuzhiyun static inline uint32_t psb_mmu_pd_index(uint32_t offset)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun return offset >> PSB_PDE_SHIFT;
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun #if defined(CONFIG_X86)
psb_clflush(void * addr)52*4882a593Smuzhiyun static inline void psb_clflush(void *addr)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun
psb_mmu_clflush(struct psb_mmu_driver * driver,void * addr)57*4882a593Smuzhiyun static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun if (!driver->has_clflush)
60*4882a593Smuzhiyun return;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun mb();
63*4882a593Smuzhiyun psb_clflush(addr);
64*4882a593Smuzhiyun mb();
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun #else
67*4882a593Smuzhiyun
psb_mmu_clflush(struct psb_mmu_driver * driver,void * addr)68*4882a593Smuzhiyun static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
69*4882a593Smuzhiyun {;
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun #endif
73*4882a593Smuzhiyun
psb_mmu_flush_pd_locked(struct psb_mmu_driver * driver,int force)74*4882a593Smuzhiyun static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, int force)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun struct drm_device *dev = driver->dev;
77*4882a593Smuzhiyun struct drm_psb_private *dev_priv = dev->dev_private;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun if (atomic_read(&driver->needs_tlbflush) || force) {
80*4882a593Smuzhiyun uint32_t val = PSB_RSGX32(PSB_CR_BIF_CTRL);
81*4882a593Smuzhiyun PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun /* Make sure data cache is turned off before enabling it */
84*4882a593Smuzhiyun wmb();
85*4882a593Smuzhiyun PSB_WSGX32(val & ~_PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
86*4882a593Smuzhiyun (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
87*4882a593Smuzhiyun if (driver->msvdx_mmu_invaldc)
88*4882a593Smuzhiyun atomic_set(driver->msvdx_mmu_invaldc, 1);
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun atomic_set(&driver->needs_tlbflush, 0);
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun #if 0
94*4882a593Smuzhiyun static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun down_write(&driver->sem);
97*4882a593Smuzhiyun psb_mmu_flush_pd_locked(driver, force);
98*4882a593Smuzhiyun up_write(&driver->sem);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun #endif
101*4882a593Smuzhiyun
psb_mmu_flush(struct psb_mmu_driver * driver)102*4882a593Smuzhiyun void psb_mmu_flush(struct psb_mmu_driver *driver)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun struct drm_device *dev = driver->dev;
105*4882a593Smuzhiyun struct drm_psb_private *dev_priv = dev->dev_private;
106*4882a593Smuzhiyun uint32_t val;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun down_write(&driver->sem);
109*4882a593Smuzhiyun val = PSB_RSGX32(PSB_CR_BIF_CTRL);
110*4882a593Smuzhiyun if (atomic_read(&driver->needs_tlbflush))
111*4882a593Smuzhiyun PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
112*4882a593Smuzhiyun else
113*4882a593Smuzhiyun PSB_WSGX32(val | _PSB_CB_CTRL_FLUSH, PSB_CR_BIF_CTRL);
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun /* Make sure data cache is turned off and MMU is flushed before
116*4882a593Smuzhiyun restoring bank interface control register */
117*4882a593Smuzhiyun wmb();
118*4882a593Smuzhiyun PSB_WSGX32(val & ~(_PSB_CB_CTRL_FLUSH | _PSB_CB_CTRL_INVALDC),
119*4882a593Smuzhiyun PSB_CR_BIF_CTRL);
120*4882a593Smuzhiyun (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun atomic_set(&driver->needs_tlbflush, 0);
123*4882a593Smuzhiyun if (driver->msvdx_mmu_invaldc)
124*4882a593Smuzhiyun atomic_set(driver->msvdx_mmu_invaldc, 1);
125*4882a593Smuzhiyun up_write(&driver->sem);
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
psb_mmu_set_pd_context(struct psb_mmu_pd * pd,int hw_context)128*4882a593Smuzhiyun void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun struct drm_device *dev = pd->driver->dev;
131*4882a593Smuzhiyun struct drm_psb_private *dev_priv = dev->dev_private;
132*4882a593Smuzhiyun uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 :
133*4882a593Smuzhiyun PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun down_write(&pd->driver->sem);
136*4882a593Smuzhiyun PSB_WSGX32(page_to_pfn(pd->p) << PAGE_SHIFT, offset);
137*4882a593Smuzhiyun wmb();
138*4882a593Smuzhiyun psb_mmu_flush_pd_locked(pd->driver, 1);
139*4882a593Smuzhiyun pd->hw_context = hw_context;
140*4882a593Smuzhiyun up_write(&pd->driver->sem);
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
psb_pd_addr_end(unsigned long addr,unsigned long end)144*4882a593Smuzhiyun static inline unsigned long psb_pd_addr_end(unsigned long addr,
145*4882a593Smuzhiyun unsigned long end)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
148*4882a593Smuzhiyun return (addr < end) ? addr : end;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
psb_mmu_mask_pte(uint32_t pfn,int type)151*4882a593Smuzhiyun static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun uint32_t mask = PSB_PTE_VALID;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun if (type & PSB_MMU_CACHED_MEMORY)
156*4882a593Smuzhiyun mask |= PSB_PTE_CACHED;
157*4882a593Smuzhiyun if (type & PSB_MMU_RO_MEMORY)
158*4882a593Smuzhiyun mask |= PSB_PTE_RO;
159*4882a593Smuzhiyun if (type & PSB_MMU_WO_MEMORY)
160*4882a593Smuzhiyun mask |= PSB_PTE_WO;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun return (pfn << PAGE_SHIFT) | mask;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
psb_mmu_alloc_pd(struct psb_mmu_driver * driver,int trap_pagefaults,int invalid_type)165*4882a593Smuzhiyun struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
166*4882a593Smuzhiyun int trap_pagefaults, int invalid_type)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
169*4882a593Smuzhiyun uint32_t *v;
170*4882a593Smuzhiyun int i;
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun if (!pd)
173*4882a593Smuzhiyun return NULL;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun pd->p = alloc_page(GFP_DMA32);
176*4882a593Smuzhiyun if (!pd->p)
177*4882a593Smuzhiyun goto out_err1;
178*4882a593Smuzhiyun pd->dummy_pt = alloc_page(GFP_DMA32);
179*4882a593Smuzhiyun if (!pd->dummy_pt)
180*4882a593Smuzhiyun goto out_err2;
181*4882a593Smuzhiyun pd->dummy_page = alloc_page(GFP_DMA32);
182*4882a593Smuzhiyun if (!pd->dummy_page)
183*4882a593Smuzhiyun goto out_err3;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun if (!trap_pagefaults) {
186*4882a593Smuzhiyun pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
187*4882a593Smuzhiyun invalid_type);
188*4882a593Smuzhiyun pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
189*4882a593Smuzhiyun invalid_type);
190*4882a593Smuzhiyun } else {
191*4882a593Smuzhiyun pd->invalid_pde = 0;
192*4882a593Smuzhiyun pd->invalid_pte = 0;
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun v = kmap(pd->dummy_pt);
196*4882a593Smuzhiyun for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
197*4882a593Smuzhiyun v[i] = pd->invalid_pte;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun kunmap(pd->dummy_pt);
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun v = kmap(pd->p);
202*4882a593Smuzhiyun for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
203*4882a593Smuzhiyun v[i] = pd->invalid_pde;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun kunmap(pd->p);
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun clear_page(kmap(pd->dummy_page));
208*4882a593Smuzhiyun kunmap(pd->dummy_page);
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
211*4882a593Smuzhiyun if (!pd->tables)
212*4882a593Smuzhiyun goto out_err4;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun pd->hw_context = -1;
215*4882a593Smuzhiyun pd->pd_mask = PSB_PTE_VALID;
216*4882a593Smuzhiyun pd->driver = driver;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun return pd;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun out_err4:
221*4882a593Smuzhiyun __free_page(pd->dummy_page);
222*4882a593Smuzhiyun out_err3:
223*4882a593Smuzhiyun __free_page(pd->dummy_pt);
224*4882a593Smuzhiyun out_err2:
225*4882a593Smuzhiyun __free_page(pd->p);
226*4882a593Smuzhiyun out_err1:
227*4882a593Smuzhiyun kfree(pd);
228*4882a593Smuzhiyun return NULL;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun
psb_mmu_free_pt(struct psb_mmu_pt * pt)231*4882a593Smuzhiyun static void psb_mmu_free_pt(struct psb_mmu_pt *pt)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun __free_page(pt->p);
234*4882a593Smuzhiyun kfree(pt);
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
psb_mmu_free_pagedir(struct psb_mmu_pd * pd)237*4882a593Smuzhiyun void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun struct psb_mmu_driver *driver = pd->driver;
240*4882a593Smuzhiyun struct drm_device *dev = driver->dev;
241*4882a593Smuzhiyun struct drm_psb_private *dev_priv = dev->dev_private;
242*4882a593Smuzhiyun struct psb_mmu_pt *pt;
243*4882a593Smuzhiyun int i;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun down_write(&driver->sem);
246*4882a593Smuzhiyun if (pd->hw_context != -1) {
247*4882a593Smuzhiyun PSB_WSGX32(0, PSB_CR_BIF_DIR_LIST_BASE0 + pd->hw_context * 4);
248*4882a593Smuzhiyun psb_mmu_flush_pd_locked(driver, 1);
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun /* Should take the spinlock here, but we don't need to do that
252*4882a593Smuzhiyun since we have the semaphore in write mode. */
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun for (i = 0; i < 1024; ++i) {
255*4882a593Smuzhiyun pt = pd->tables[i];
256*4882a593Smuzhiyun if (pt)
257*4882a593Smuzhiyun psb_mmu_free_pt(pt);
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun vfree(pd->tables);
261*4882a593Smuzhiyun __free_page(pd->dummy_page);
262*4882a593Smuzhiyun __free_page(pd->dummy_pt);
263*4882a593Smuzhiyun __free_page(pd->p);
264*4882a593Smuzhiyun kfree(pd);
265*4882a593Smuzhiyun up_write(&driver->sem);
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
psb_mmu_alloc_pt(struct psb_mmu_pd * pd)268*4882a593Smuzhiyun static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
271*4882a593Smuzhiyun void *v;
272*4882a593Smuzhiyun uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
273*4882a593Smuzhiyun uint32_t clflush_count = PAGE_SIZE / clflush_add;
274*4882a593Smuzhiyun spinlock_t *lock = &pd->driver->lock;
275*4882a593Smuzhiyun uint8_t *clf;
276*4882a593Smuzhiyun uint32_t *ptes;
277*4882a593Smuzhiyun int i;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun if (!pt)
280*4882a593Smuzhiyun return NULL;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun pt->p = alloc_page(GFP_DMA32);
283*4882a593Smuzhiyun if (!pt->p) {
284*4882a593Smuzhiyun kfree(pt);
285*4882a593Smuzhiyun return NULL;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun spin_lock(lock);
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun v = kmap_atomic(pt->p);
291*4882a593Smuzhiyun clf = (uint8_t *) v;
292*4882a593Smuzhiyun ptes = (uint32_t *) v;
293*4882a593Smuzhiyun for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
294*4882a593Smuzhiyun *ptes++ = pd->invalid_pte;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun #if defined(CONFIG_X86)
297*4882a593Smuzhiyun if (pd->driver->has_clflush && pd->hw_context != -1) {
298*4882a593Smuzhiyun mb();
299*4882a593Smuzhiyun for (i = 0; i < clflush_count; ++i) {
300*4882a593Smuzhiyun psb_clflush(clf);
301*4882a593Smuzhiyun clf += clflush_add;
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun mb();
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun #endif
306*4882a593Smuzhiyun kunmap_atomic(v);
307*4882a593Smuzhiyun spin_unlock(lock);
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun pt->count = 0;
310*4882a593Smuzhiyun pt->pd = pd;
311*4882a593Smuzhiyun pt->index = 0;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun return pt;
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun
psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd * pd,unsigned long addr)316*4882a593Smuzhiyun struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
317*4882a593Smuzhiyun unsigned long addr)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun uint32_t index = psb_mmu_pd_index(addr);
320*4882a593Smuzhiyun struct psb_mmu_pt *pt;
321*4882a593Smuzhiyun uint32_t *v;
322*4882a593Smuzhiyun spinlock_t *lock = &pd->driver->lock;
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun spin_lock(lock);
325*4882a593Smuzhiyun pt = pd->tables[index];
326*4882a593Smuzhiyun while (!pt) {
327*4882a593Smuzhiyun spin_unlock(lock);
328*4882a593Smuzhiyun pt = psb_mmu_alloc_pt(pd);
329*4882a593Smuzhiyun if (!pt)
330*4882a593Smuzhiyun return NULL;
331*4882a593Smuzhiyun spin_lock(lock);
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun if (pd->tables[index]) {
334*4882a593Smuzhiyun spin_unlock(lock);
335*4882a593Smuzhiyun psb_mmu_free_pt(pt);
336*4882a593Smuzhiyun spin_lock(lock);
337*4882a593Smuzhiyun pt = pd->tables[index];
338*4882a593Smuzhiyun continue;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun v = kmap_atomic(pd->p);
342*4882a593Smuzhiyun pd->tables[index] = pt;
343*4882a593Smuzhiyun v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
344*4882a593Smuzhiyun pt->index = index;
345*4882a593Smuzhiyun kunmap_atomic((void *) v);
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun if (pd->hw_context != -1) {
348*4882a593Smuzhiyun psb_mmu_clflush(pd->driver, (void *)&v[index]);
349*4882a593Smuzhiyun atomic_set(&pd->driver->needs_tlbflush, 1);
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun pt->v = kmap_atomic(pt->p);
353*4882a593Smuzhiyun return pt;
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun
psb_mmu_pt_map_lock(struct psb_mmu_pd * pd,unsigned long addr)356*4882a593Smuzhiyun static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
357*4882a593Smuzhiyun unsigned long addr)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun uint32_t index = psb_mmu_pd_index(addr);
360*4882a593Smuzhiyun struct psb_mmu_pt *pt;
361*4882a593Smuzhiyun spinlock_t *lock = &pd->driver->lock;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun spin_lock(lock);
364*4882a593Smuzhiyun pt = pd->tables[index];
365*4882a593Smuzhiyun if (!pt) {
366*4882a593Smuzhiyun spin_unlock(lock);
367*4882a593Smuzhiyun return NULL;
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun pt->v = kmap_atomic(pt->p);
370*4882a593Smuzhiyun return pt;
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun
psb_mmu_pt_unmap_unlock(struct psb_mmu_pt * pt)373*4882a593Smuzhiyun static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun struct psb_mmu_pd *pd = pt->pd;
376*4882a593Smuzhiyun uint32_t *v;
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun kunmap_atomic(pt->v);
379*4882a593Smuzhiyun if (pt->count == 0) {
380*4882a593Smuzhiyun v = kmap_atomic(pd->p);
381*4882a593Smuzhiyun v[pt->index] = pd->invalid_pde;
382*4882a593Smuzhiyun pd->tables[pt->index] = NULL;
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun if (pd->hw_context != -1) {
385*4882a593Smuzhiyun psb_mmu_clflush(pd->driver, (void *)&v[pt->index]);
386*4882a593Smuzhiyun atomic_set(&pd->driver->needs_tlbflush, 1);
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun kunmap_atomic(v);
389*4882a593Smuzhiyun spin_unlock(&pd->driver->lock);
390*4882a593Smuzhiyun psb_mmu_free_pt(pt);
391*4882a593Smuzhiyun return;
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun spin_unlock(&pd->driver->lock);
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun
psb_mmu_set_pte(struct psb_mmu_pt * pt,unsigned long addr,uint32_t pte)396*4882a593Smuzhiyun static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, unsigned long addr,
397*4882a593Smuzhiyun uint32_t pte)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun pt->v[psb_mmu_pt_index(addr)] = pte;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun
psb_mmu_invalidate_pte(struct psb_mmu_pt * pt,unsigned long addr)402*4882a593Smuzhiyun static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
403*4882a593Smuzhiyun unsigned long addr)
404*4882a593Smuzhiyun {
405*4882a593Smuzhiyun pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun
psb_mmu_get_default_pd(struct psb_mmu_driver * driver)408*4882a593Smuzhiyun struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun struct psb_mmu_pd *pd;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun down_read(&driver->sem);
413*4882a593Smuzhiyun pd = driver->default_pd;
414*4882a593Smuzhiyun up_read(&driver->sem);
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun return pd;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun /* Returns the physical address of the PD shared by sgx/msvdx */
psb_get_default_pd_addr(struct psb_mmu_driver * driver)420*4882a593Smuzhiyun uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun struct psb_mmu_pd *pd;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun pd = psb_mmu_get_default_pd(driver);
425*4882a593Smuzhiyun return page_to_pfn(pd->p) << PAGE_SHIFT;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun
psb_mmu_driver_takedown(struct psb_mmu_driver * driver)428*4882a593Smuzhiyun void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
429*4882a593Smuzhiyun {
430*4882a593Smuzhiyun struct drm_device *dev = driver->dev;
431*4882a593Smuzhiyun struct drm_psb_private *dev_priv = dev->dev_private;
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun PSB_WSGX32(driver->bif_ctrl, PSB_CR_BIF_CTRL);
434*4882a593Smuzhiyun psb_mmu_free_pagedir(driver->default_pd);
435*4882a593Smuzhiyun kfree(driver);
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun
psb_mmu_driver_init(struct drm_device * dev,int trap_pagefaults,int invalid_type,atomic_t * msvdx_mmu_invaldc)438*4882a593Smuzhiyun struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
439*4882a593Smuzhiyun int trap_pagefaults,
440*4882a593Smuzhiyun int invalid_type,
441*4882a593Smuzhiyun atomic_t *msvdx_mmu_invaldc)
442*4882a593Smuzhiyun {
443*4882a593Smuzhiyun struct psb_mmu_driver *driver;
444*4882a593Smuzhiyun struct drm_psb_private *dev_priv = dev->dev_private;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun driver = kmalloc(sizeof(*driver), GFP_KERNEL);
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun if (!driver)
449*4882a593Smuzhiyun return NULL;
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun driver->dev = dev;
452*4882a593Smuzhiyun driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
453*4882a593Smuzhiyun invalid_type);
454*4882a593Smuzhiyun if (!driver->default_pd)
455*4882a593Smuzhiyun goto out_err1;
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun spin_lock_init(&driver->lock);
458*4882a593Smuzhiyun init_rwsem(&driver->sem);
459*4882a593Smuzhiyun down_write(&driver->sem);
460*4882a593Smuzhiyun atomic_set(&driver->needs_tlbflush, 1);
461*4882a593Smuzhiyun driver->msvdx_mmu_invaldc = msvdx_mmu_invaldc;
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun driver->bif_ctrl = PSB_RSGX32(PSB_CR_BIF_CTRL);
464*4882a593Smuzhiyun PSB_WSGX32(driver->bif_ctrl | _PSB_CB_CTRL_CLEAR_FAULT,
465*4882a593Smuzhiyun PSB_CR_BIF_CTRL);
466*4882a593Smuzhiyun PSB_WSGX32(driver->bif_ctrl & ~_PSB_CB_CTRL_CLEAR_FAULT,
467*4882a593Smuzhiyun PSB_CR_BIF_CTRL);
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun driver->has_clflush = 0;
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun #if defined(CONFIG_X86)
472*4882a593Smuzhiyun if (boot_cpu_has(X86_FEATURE_CLFLUSH)) {
473*4882a593Smuzhiyun uint32_t tfms, misc, cap0, cap4, clflush_size;
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun /*
476*4882a593Smuzhiyun * clflush size is determined at kernel setup for x86_64 but not
477*4882a593Smuzhiyun * for i386. We have to do it here.
478*4882a593Smuzhiyun */
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
481*4882a593Smuzhiyun clflush_size = ((misc >> 8) & 0xff) * 8;
482*4882a593Smuzhiyun driver->has_clflush = 1;
483*4882a593Smuzhiyun driver->clflush_add =
484*4882a593Smuzhiyun PAGE_SIZE * clflush_size / sizeof(uint32_t);
485*4882a593Smuzhiyun driver->clflush_mask = driver->clflush_add - 1;
486*4882a593Smuzhiyun driver->clflush_mask = ~driver->clflush_mask;
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun #endif
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun up_write(&driver->sem);
491*4882a593Smuzhiyun return driver;
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun out_err1:
494*4882a593Smuzhiyun kfree(driver);
495*4882a593Smuzhiyun return NULL;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun #if defined(CONFIG_X86)
psb_mmu_flush_ptes(struct psb_mmu_pd * pd,unsigned long address,uint32_t num_pages,uint32_t desired_tile_stride,uint32_t hw_tile_stride)499*4882a593Smuzhiyun static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
500*4882a593Smuzhiyun uint32_t num_pages, uint32_t desired_tile_stride,
501*4882a593Smuzhiyun uint32_t hw_tile_stride)
502*4882a593Smuzhiyun {
503*4882a593Smuzhiyun struct psb_mmu_pt *pt;
504*4882a593Smuzhiyun uint32_t rows = 1;
505*4882a593Smuzhiyun uint32_t i;
506*4882a593Smuzhiyun unsigned long addr;
507*4882a593Smuzhiyun unsigned long end;
508*4882a593Smuzhiyun unsigned long next;
509*4882a593Smuzhiyun unsigned long add;
510*4882a593Smuzhiyun unsigned long row_add;
511*4882a593Smuzhiyun unsigned long clflush_add = pd->driver->clflush_add;
512*4882a593Smuzhiyun unsigned long clflush_mask = pd->driver->clflush_mask;
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun if (!pd->driver->has_clflush)
515*4882a593Smuzhiyun return;
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun if (hw_tile_stride)
518*4882a593Smuzhiyun rows = num_pages / desired_tile_stride;
519*4882a593Smuzhiyun else
520*4882a593Smuzhiyun desired_tile_stride = num_pages;
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun add = desired_tile_stride << PAGE_SHIFT;
523*4882a593Smuzhiyun row_add = hw_tile_stride << PAGE_SHIFT;
524*4882a593Smuzhiyun mb();
525*4882a593Smuzhiyun for (i = 0; i < rows; ++i) {
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun addr = address;
528*4882a593Smuzhiyun end = addr + add;
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun do {
531*4882a593Smuzhiyun next = psb_pd_addr_end(addr, end);
532*4882a593Smuzhiyun pt = psb_mmu_pt_map_lock(pd, addr);
533*4882a593Smuzhiyun if (!pt)
534*4882a593Smuzhiyun continue;
535*4882a593Smuzhiyun do {
536*4882a593Smuzhiyun psb_clflush(&pt->v[psb_mmu_pt_index(addr)]);
537*4882a593Smuzhiyun } while (addr += clflush_add,
538*4882a593Smuzhiyun (addr & clflush_mask) < next);
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun psb_mmu_pt_unmap_unlock(pt);
541*4882a593Smuzhiyun } while (addr = next, next != end);
542*4882a593Smuzhiyun address += row_add;
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun mb();
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun #else
psb_mmu_flush_ptes(struct psb_mmu_pd * pd,unsigned long address,uint32_t num_pages,uint32_t desired_tile_stride,uint32_t hw_tile_stride)547*4882a593Smuzhiyun static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
548*4882a593Smuzhiyun uint32_t num_pages, uint32_t desired_tile_stride,
549*4882a593Smuzhiyun uint32_t hw_tile_stride)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun drm_ttm_cache_flush();
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun #endif
554*4882a593Smuzhiyun
psb_mmu_remove_pfn_sequence(struct psb_mmu_pd * pd,unsigned long address,uint32_t num_pages)555*4882a593Smuzhiyun void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
556*4882a593Smuzhiyun unsigned long address, uint32_t num_pages)
557*4882a593Smuzhiyun {
558*4882a593Smuzhiyun struct psb_mmu_pt *pt;
559*4882a593Smuzhiyun unsigned long addr;
560*4882a593Smuzhiyun unsigned long end;
561*4882a593Smuzhiyun unsigned long next;
562*4882a593Smuzhiyun unsigned long f_address = address;
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun down_read(&pd->driver->sem);
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun addr = address;
567*4882a593Smuzhiyun end = addr + (num_pages << PAGE_SHIFT);
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun do {
570*4882a593Smuzhiyun next = psb_pd_addr_end(addr, end);
571*4882a593Smuzhiyun pt = psb_mmu_pt_alloc_map_lock(pd, addr);
572*4882a593Smuzhiyun if (!pt)
573*4882a593Smuzhiyun goto out;
574*4882a593Smuzhiyun do {
575*4882a593Smuzhiyun psb_mmu_invalidate_pte(pt, addr);
576*4882a593Smuzhiyun --pt->count;
577*4882a593Smuzhiyun } while (addr += PAGE_SIZE, addr < next);
578*4882a593Smuzhiyun psb_mmu_pt_unmap_unlock(pt);
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun } while (addr = next, next != end);
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun out:
583*4882a593Smuzhiyun if (pd->hw_context != -1)
584*4882a593Smuzhiyun psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun up_read(&pd->driver->sem);
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun if (pd->hw_context != -1)
589*4882a593Smuzhiyun psb_mmu_flush(pd->driver);
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun return;
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun
psb_mmu_remove_pages(struct psb_mmu_pd * pd,unsigned long address,uint32_t num_pages,uint32_t desired_tile_stride,uint32_t hw_tile_stride)594*4882a593Smuzhiyun void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
595*4882a593Smuzhiyun uint32_t num_pages, uint32_t desired_tile_stride,
596*4882a593Smuzhiyun uint32_t hw_tile_stride)
597*4882a593Smuzhiyun {
598*4882a593Smuzhiyun struct psb_mmu_pt *pt;
599*4882a593Smuzhiyun uint32_t rows = 1;
600*4882a593Smuzhiyun uint32_t i;
601*4882a593Smuzhiyun unsigned long addr;
602*4882a593Smuzhiyun unsigned long end;
603*4882a593Smuzhiyun unsigned long next;
604*4882a593Smuzhiyun unsigned long add;
605*4882a593Smuzhiyun unsigned long row_add;
606*4882a593Smuzhiyun unsigned long f_address = address;
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun if (hw_tile_stride)
609*4882a593Smuzhiyun rows = num_pages / desired_tile_stride;
610*4882a593Smuzhiyun else
611*4882a593Smuzhiyun desired_tile_stride = num_pages;
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun add = desired_tile_stride << PAGE_SHIFT;
614*4882a593Smuzhiyun row_add = hw_tile_stride << PAGE_SHIFT;
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun down_read(&pd->driver->sem);
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun /* Make sure we only need to flush this processor's cache */
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun for (i = 0; i < rows; ++i) {
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun addr = address;
623*4882a593Smuzhiyun end = addr + add;
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun do {
626*4882a593Smuzhiyun next = psb_pd_addr_end(addr, end);
627*4882a593Smuzhiyun pt = psb_mmu_pt_map_lock(pd, addr);
628*4882a593Smuzhiyun if (!pt)
629*4882a593Smuzhiyun continue;
630*4882a593Smuzhiyun do {
631*4882a593Smuzhiyun psb_mmu_invalidate_pte(pt, addr);
632*4882a593Smuzhiyun --pt->count;
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun } while (addr += PAGE_SIZE, addr < next);
635*4882a593Smuzhiyun psb_mmu_pt_unmap_unlock(pt);
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun } while (addr = next, next != end);
638*4882a593Smuzhiyun address += row_add;
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun if (pd->hw_context != -1)
641*4882a593Smuzhiyun psb_mmu_flush_ptes(pd, f_address, num_pages,
642*4882a593Smuzhiyun desired_tile_stride, hw_tile_stride);
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun up_read(&pd->driver->sem);
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun if (pd->hw_context != -1)
647*4882a593Smuzhiyun psb_mmu_flush(pd->driver);
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun
psb_mmu_insert_pfn_sequence(struct psb_mmu_pd * pd,uint32_t start_pfn,unsigned long address,uint32_t num_pages,int type)650*4882a593Smuzhiyun int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
651*4882a593Smuzhiyun unsigned long address, uint32_t num_pages,
652*4882a593Smuzhiyun int type)
653*4882a593Smuzhiyun {
654*4882a593Smuzhiyun struct psb_mmu_pt *pt;
655*4882a593Smuzhiyun uint32_t pte;
656*4882a593Smuzhiyun unsigned long addr;
657*4882a593Smuzhiyun unsigned long end;
658*4882a593Smuzhiyun unsigned long next;
659*4882a593Smuzhiyun unsigned long f_address = address;
660*4882a593Smuzhiyun int ret = -ENOMEM;
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun down_read(&pd->driver->sem);
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun addr = address;
665*4882a593Smuzhiyun end = addr + (num_pages << PAGE_SHIFT);
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun do {
668*4882a593Smuzhiyun next = psb_pd_addr_end(addr, end);
669*4882a593Smuzhiyun pt = psb_mmu_pt_alloc_map_lock(pd, addr);
670*4882a593Smuzhiyun if (!pt) {
671*4882a593Smuzhiyun ret = -ENOMEM;
672*4882a593Smuzhiyun goto out;
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun do {
675*4882a593Smuzhiyun pte = psb_mmu_mask_pte(start_pfn++, type);
676*4882a593Smuzhiyun psb_mmu_set_pte(pt, addr, pte);
677*4882a593Smuzhiyun pt->count++;
678*4882a593Smuzhiyun } while (addr += PAGE_SIZE, addr < next);
679*4882a593Smuzhiyun psb_mmu_pt_unmap_unlock(pt);
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun } while (addr = next, next != end);
682*4882a593Smuzhiyun ret = 0;
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun out:
685*4882a593Smuzhiyun if (pd->hw_context != -1)
686*4882a593Smuzhiyun psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun up_read(&pd->driver->sem);
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun if (pd->hw_context != -1)
691*4882a593Smuzhiyun psb_mmu_flush(pd->driver);
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun return 0;
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun
psb_mmu_insert_pages(struct psb_mmu_pd * pd,struct page ** pages,unsigned long address,uint32_t num_pages,uint32_t desired_tile_stride,uint32_t hw_tile_stride,int type)696*4882a593Smuzhiyun int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
697*4882a593Smuzhiyun unsigned long address, uint32_t num_pages,
698*4882a593Smuzhiyun uint32_t desired_tile_stride, uint32_t hw_tile_stride,
699*4882a593Smuzhiyun int type)
700*4882a593Smuzhiyun {
701*4882a593Smuzhiyun struct psb_mmu_pt *pt;
702*4882a593Smuzhiyun uint32_t rows = 1;
703*4882a593Smuzhiyun uint32_t i;
704*4882a593Smuzhiyun uint32_t pte;
705*4882a593Smuzhiyun unsigned long addr;
706*4882a593Smuzhiyun unsigned long end;
707*4882a593Smuzhiyun unsigned long next;
708*4882a593Smuzhiyun unsigned long add;
709*4882a593Smuzhiyun unsigned long row_add;
710*4882a593Smuzhiyun unsigned long f_address = address;
711*4882a593Smuzhiyun int ret = -ENOMEM;
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun if (hw_tile_stride) {
714*4882a593Smuzhiyun if (num_pages % desired_tile_stride != 0)
715*4882a593Smuzhiyun return -EINVAL;
716*4882a593Smuzhiyun rows = num_pages / desired_tile_stride;
717*4882a593Smuzhiyun } else {
718*4882a593Smuzhiyun desired_tile_stride = num_pages;
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun add = desired_tile_stride << PAGE_SHIFT;
722*4882a593Smuzhiyun row_add = hw_tile_stride << PAGE_SHIFT;
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun down_read(&pd->driver->sem);
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun for (i = 0; i < rows; ++i) {
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun addr = address;
729*4882a593Smuzhiyun end = addr + add;
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun do {
732*4882a593Smuzhiyun next = psb_pd_addr_end(addr, end);
733*4882a593Smuzhiyun pt = psb_mmu_pt_alloc_map_lock(pd, addr);
734*4882a593Smuzhiyun if (!pt)
735*4882a593Smuzhiyun goto out;
736*4882a593Smuzhiyun do {
737*4882a593Smuzhiyun pte = psb_mmu_mask_pte(page_to_pfn(*pages++),
738*4882a593Smuzhiyun type);
739*4882a593Smuzhiyun psb_mmu_set_pte(pt, addr, pte);
740*4882a593Smuzhiyun pt->count++;
741*4882a593Smuzhiyun } while (addr += PAGE_SIZE, addr < next);
742*4882a593Smuzhiyun psb_mmu_pt_unmap_unlock(pt);
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun } while (addr = next, next != end);
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun address += row_add;
747*4882a593Smuzhiyun }
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun ret = 0;
750*4882a593Smuzhiyun out:
751*4882a593Smuzhiyun if (pd->hw_context != -1)
752*4882a593Smuzhiyun psb_mmu_flush_ptes(pd, f_address, num_pages,
753*4882a593Smuzhiyun desired_tile_stride, hw_tile_stride);
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun up_read(&pd->driver->sem);
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun if (pd->hw_context != -1)
758*4882a593Smuzhiyun psb_mmu_flush(pd->driver);
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun return ret;
761*4882a593Smuzhiyun }
762*4882a593Smuzhiyun
psb_mmu_virtual_to_pfn(struct psb_mmu_pd * pd,uint32_t virtual,unsigned long * pfn)763*4882a593Smuzhiyun int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
764*4882a593Smuzhiyun unsigned long *pfn)
765*4882a593Smuzhiyun {
766*4882a593Smuzhiyun int ret;
767*4882a593Smuzhiyun struct psb_mmu_pt *pt;
768*4882a593Smuzhiyun uint32_t tmp;
769*4882a593Smuzhiyun spinlock_t *lock = &pd->driver->lock;
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun down_read(&pd->driver->sem);
772*4882a593Smuzhiyun pt = psb_mmu_pt_map_lock(pd, virtual);
773*4882a593Smuzhiyun if (!pt) {
774*4882a593Smuzhiyun uint32_t *v;
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun spin_lock(lock);
777*4882a593Smuzhiyun v = kmap_atomic(pd->p);
778*4882a593Smuzhiyun tmp = v[psb_mmu_pd_index(virtual)];
779*4882a593Smuzhiyun kunmap_atomic(v);
780*4882a593Smuzhiyun spin_unlock(lock);
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
783*4882a593Smuzhiyun !(pd->invalid_pte & PSB_PTE_VALID)) {
784*4882a593Smuzhiyun ret = -EINVAL;
785*4882a593Smuzhiyun goto out;
786*4882a593Smuzhiyun }
787*4882a593Smuzhiyun ret = 0;
788*4882a593Smuzhiyun *pfn = pd->invalid_pte >> PAGE_SHIFT;
789*4882a593Smuzhiyun goto out;
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun tmp = pt->v[psb_mmu_pt_index(virtual)];
792*4882a593Smuzhiyun if (!(tmp & PSB_PTE_VALID)) {
793*4882a593Smuzhiyun ret = -EINVAL;
794*4882a593Smuzhiyun } else {
795*4882a593Smuzhiyun ret = 0;
796*4882a593Smuzhiyun *pfn = tmp >> PAGE_SHIFT;
797*4882a593Smuzhiyun }
798*4882a593Smuzhiyun psb_mmu_pt_unmap_unlock(pt);
799*4882a593Smuzhiyun out:
800*4882a593Smuzhiyun up_read(&pd->driver->sem);
801*4882a593Smuzhiyun return ret;
802*4882a593Smuzhiyun }
803