1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * arch/sh/mm/pmb.c
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Privileged Space Mapping Buffer (PMB) Support.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright (C) 2005 - 2011 Paul Mundt
7*4882a593Smuzhiyun * Copyright (C) 2010 Matt Fleming
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * This file is subject to the terms and conditions of the GNU General Public
10*4882a593Smuzhiyun * License. See the file "COPYING" in the main directory of this archive
11*4882a593Smuzhiyun * for more details.
12*4882a593Smuzhiyun */
13*4882a593Smuzhiyun #include <linux/init.h>
14*4882a593Smuzhiyun #include <linux/kernel.h>
15*4882a593Smuzhiyun #include <linux/syscore_ops.h>
16*4882a593Smuzhiyun #include <linux/cpu.h>
17*4882a593Smuzhiyun #include <linux/module.h>
18*4882a593Smuzhiyun #include <linux/bitops.h>
19*4882a593Smuzhiyun #include <linux/debugfs.h>
20*4882a593Smuzhiyun #include <linux/fs.h>
21*4882a593Smuzhiyun #include <linux/seq_file.h>
22*4882a593Smuzhiyun #include <linux/err.h>
23*4882a593Smuzhiyun #include <linux/io.h>
24*4882a593Smuzhiyun #include <linux/spinlock.h>
25*4882a593Smuzhiyun #include <linux/vmalloc.h>
26*4882a593Smuzhiyun #include <linux/pgtable.h>
27*4882a593Smuzhiyun #include <asm/cacheflush.h>
28*4882a593Smuzhiyun #include <linux/sizes.h>
29*4882a593Smuzhiyun #include <linux/uaccess.h>
30*4882a593Smuzhiyun #include <asm/page.h>
31*4882a593Smuzhiyun #include <asm/mmu.h>
32*4882a593Smuzhiyun #include <asm/mmu_context.h>
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun struct pmb_entry;
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun struct pmb_entry {
37*4882a593Smuzhiyun unsigned long vpn;
38*4882a593Smuzhiyun unsigned long ppn;
39*4882a593Smuzhiyun unsigned long flags;
40*4882a593Smuzhiyun unsigned long size;
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun raw_spinlock_t lock;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /*
45*4882a593Smuzhiyun * 0 .. NR_PMB_ENTRIES for specific entry selection, or
46*4882a593Smuzhiyun * PMB_NO_ENTRY to search for a free one
47*4882a593Smuzhiyun */
48*4882a593Smuzhiyun int entry;
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun /* Adjacent entry link for contiguous multi-entry mappings */
51*4882a593Smuzhiyun struct pmb_entry *link;
52*4882a593Smuzhiyun };
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun static struct {
55*4882a593Smuzhiyun unsigned long size;
56*4882a593Smuzhiyun int flag;
57*4882a593Smuzhiyun } pmb_sizes[] = {
58*4882a593Smuzhiyun { .size = SZ_512M, .flag = PMB_SZ_512M, },
59*4882a593Smuzhiyun { .size = SZ_128M, .flag = PMB_SZ_128M, },
60*4882a593Smuzhiyun { .size = SZ_64M, .flag = PMB_SZ_64M, },
61*4882a593Smuzhiyun { .size = SZ_16M, .flag = PMB_SZ_16M, },
62*4882a593Smuzhiyun };
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun static void pmb_unmap_entry(struct pmb_entry *, int depth);
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun static DEFINE_RWLOCK(pmb_rwlock);
67*4882a593Smuzhiyun static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
68*4882a593Smuzhiyun static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun static unsigned int pmb_iomapping_enabled;
71*4882a593Smuzhiyun
mk_pmb_entry(unsigned int entry)72*4882a593Smuzhiyun static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun return (entry & PMB_E_MASK) << PMB_E_SHIFT;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
mk_pmb_addr(unsigned int entry)77*4882a593Smuzhiyun static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun return mk_pmb_entry(entry) | PMB_ADDR;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun
mk_pmb_data(unsigned int entry)82*4882a593Smuzhiyun static __always_inline unsigned long mk_pmb_data(unsigned int entry)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun return mk_pmb_entry(entry) | PMB_DATA;
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun
pmb_ppn_in_range(unsigned long ppn)87*4882a593Smuzhiyun static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun /*
93*4882a593Smuzhiyun * Ensure that the PMB entries match our cache configuration.
94*4882a593Smuzhiyun *
95*4882a593Smuzhiyun * When we are in 32-bit address extended mode, CCR.CB becomes
96*4882a593Smuzhiyun * invalid, so care must be taken to manually adjust cacheable
97*4882a593Smuzhiyun * translations.
98*4882a593Smuzhiyun */
pmb_cache_flags(void)99*4882a593Smuzhiyun static __always_inline unsigned long pmb_cache_flags(void)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun unsigned long flags = 0;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun #if defined(CONFIG_CACHE_OFF)
104*4882a593Smuzhiyun flags |= PMB_WT | PMB_UB;
105*4882a593Smuzhiyun #elif defined(CONFIG_CACHE_WRITETHROUGH)
106*4882a593Smuzhiyun flags |= PMB_C | PMB_WT | PMB_UB;
107*4882a593Smuzhiyun #elif defined(CONFIG_CACHE_WRITEBACK)
108*4882a593Smuzhiyun flags |= PMB_C;
109*4882a593Smuzhiyun #endif
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun return flags;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun /*
115*4882a593Smuzhiyun * Convert typical pgprot value to the PMB equivalent
116*4882a593Smuzhiyun */
pgprot_to_pmb_flags(pgprot_t prot)117*4882a593Smuzhiyun static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun unsigned long pmb_flags = 0;
120*4882a593Smuzhiyun u64 flags = pgprot_val(prot);
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun if (flags & _PAGE_CACHABLE)
123*4882a593Smuzhiyun pmb_flags |= PMB_C;
124*4882a593Smuzhiyun if (flags & _PAGE_WT)
125*4882a593Smuzhiyun pmb_flags |= PMB_WT | PMB_UB;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun return pmb_flags;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
pmb_can_merge(struct pmb_entry * a,struct pmb_entry * b)130*4882a593Smuzhiyun static inline bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun return (b->vpn == (a->vpn + a->size)) &&
133*4882a593Smuzhiyun (b->ppn == (a->ppn + a->size)) &&
134*4882a593Smuzhiyun (b->flags == a->flags);
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
pmb_mapping_exists(unsigned long vaddr,phys_addr_t phys,unsigned long size)137*4882a593Smuzhiyun static bool pmb_mapping_exists(unsigned long vaddr, phys_addr_t phys,
138*4882a593Smuzhiyun unsigned long size)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun int i;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun read_lock(&pmb_rwlock);
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
145*4882a593Smuzhiyun struct pmb_entry *pmbe, *iter;
146*4882a593Smuzhiyun unsigned long span;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun if (!test_bit(i, pmb_map))
149*4882a593Smuzhiyun continue;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun pmbe = &pmb_entry_list[i];
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun /*
154*4882a593Smuzhiyun * See if VPN and PPN are bounded by an existing mapping.
155*4882a593Smuzhiyun */
156*4882a593Smuzhiyun if ((vaddr < pmbe->vpn) || (vaddr >= (pmbe->vpn + pmbe->size)))
157*4882a593Smuzhiyun continue;
158*4882a593Smuzhiyun if ((phys < pmbe->ppn) || (phys >= (pmbe->ppn + pmbe->size)))
159*4882a593Smuzhiyun continue;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun /*
162*4882a593Smuzhiyun * Now see if we're in range of a simple mapping.
163*4882a593Smuzhiyun */
164*4882a593Smuzhiyun if (size <= pmbe->size) {
165*4882a593Smuzhiyun read_unlock(&pmb_rwlock);
166*4882a593Smuzhiyun return true;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun span = pmbe->size;
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun /*
172*4882a593Smuzhiyun * Finally for sizes that involve compound mappings, walk
173*4882a593Smuzhiyun * the chain.
174*4882a593Smuzhiyun */
175*4882a593Smuzhiyun for (iter = pmbe->link; iter; iter = iter->link)
176*4882a593Smuzhiyun span += iter->size;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun /*
179*4882a593Smuzhiyun * Nothing else to do if the range requirements are met.
180*4882a593Smuzhiyun */
181*4882a593Smuzhiyun if (size <= span) {
182*4882a593Smuzhiyun read_unlock(&pmb_rwlock);
183*4882a593Smuzhiyun return true;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun read_unlock(&pmb_rwlock);
188*4882a593Smuzhiyun return false;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
pmb_size_valid(unsigned long size)191*4882a593Smuzhiyun static bool pmb_size_valid(unsigned long size)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun int i;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
196*4882a593Smuzhiyun if (pmb_sizes[i].size == size)
197*4882a593Smuzhiyun return true;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun return false;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
pmb_addr_valid(unsigned long addr,unsigned long size)202*4882a593Smuzhiyun static inline bool pmb_addr_valid(unsigned long addr, unsigned long size)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun return (addr >= P1SEG && (addr + size - 1) < P3SEG);
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
pmb_prot_valid(pgprot_t prot)207*4882a593Smuzhiyun static inline bool pmb_prot_valid(pgprot_t prot)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun return (pgprot_val(prot) & _PAGE_USER) == 0;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
pmb_size_to_flags(unsigned long size)212*4882a593Smuzhiyun static int pmb_size_to_flags(unsigned long size)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun int i;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
217*4882a593Smuzhiyun if (pmb_sizes[i].size == size)
218*4882a593Smuzhiyun return pmb_sizes[i].flag;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun return 0;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
pmb_alloc_entry(void)223*4882a593Smuzhiyun static int pmb_alloc_entry(void)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun int pos;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
228*4882a593Smuzhiyun if (pos >= 0 && pos < NR_PMB_ENTRIES)
229*4882a593Smuzhiyun __set_bit(pos, pmb_map);
230*4882a593Smuzhiyun else
231*4882a593Smuzhiyun pos = -ENOSPC;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun return pos;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
pmb_alloc(unsigned long vpn,unsigned long ppn,unsigned long flags,int entry)236*4882a593Smuzhiyun static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
237*4882a593Smuzhiyun unsigned long flags, int entry)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun struct pmb_entry *pmbe;
240*4882a593Smuzhiyun unsigned long irqflags;
241*4882a593Smuzhiyun void *ret = NULL;
242*4882a593Smuzhiyun int pos;
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun write_lock_irqsave(&pmb_rwlock, irqflags);
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun if (entry == PMB_NO_ENTRY) {
247*4882a593Smuzhiyun pos = pmb_alloc_entry();
248*4882a593Smuzhiyun if (unlikely(pos < 0)) {
249*4882a593Smuzhiyun ret = ERR_PTR(pos);
250*4882a593Smuzhiyun goto out;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun } else {
253*4882a593Smuzhiyun if (__test_and_set_bit(entry, pmb_map)) {
254*4882a593Smuzhiyun ret = ERR_PTR(-ENOSPC);
255*4882a593Smuzhiyun goto out;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun pos = entry;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun write_unlock_irqrestore(&pmb_rwlock, irqflags);
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun pmbe = &pmb_entry_list[pos];
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun memset(pmbe, 0, sizeof(struct pmb_entry));
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun raw_spin_lock_init(&pmbe->lock);
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun pmbe->vpn = vpn;
270*4882a593Smuzhiyun pmbe->ppn = ppn;
271*4882a593Smuzhiyun pmbe->flags = flags;
272*4882a593Smuzhiyun pmbe->entry = pos;
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun return pmbe;
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun out:
277*4882a593Smuzhiyun write_unlock_irqrestore(&pmb_rwlock, irqflags);
278*4882a593Smuzhiyun return ret;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
pmb_free(struct pmb_entry * pmbe)281*4882a593Smuzhiyun static void pmb_free(struct pmb_entry *pmbe)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun __clear_bit(pmbe->entry, pmb_map);
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun pmbe->entry = PMB_NO_ENTRY;
286*4882a593Smuzhiyun pmbe->link = NULL;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun /*
290*4882a593Smuzhiyun * Must be run uncached.
291*4882a593Smuzhiyun */
__set_pmb_entry(struct pmb_entry * pmbe)292*4882a593Smuzhiyun static void __set_pmb_entry(struct pmb_entry *pmbe)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun unsigned long addr, data;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun addr = mk_pmb_addr(pmbe->entry);
297*4882a593Smuzhiyun data = mk_pmb_data(pmbe->entry);
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun jump_to_uncached();
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun /* Set V-bit */
302*4882a593Smuzhiyun __raw_writel(pmbe->vpn | PMB_V, addr);
303*4882a593Smuzhiyun __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, data);
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun back_to_cached();
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
__clear_pmb_entry(struct pmb_entry * pmbe)308*4882a593Smuzhiyun static void __clear_pmb_entry(struct pmb_entry *pmbe)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun unsigned long addr, data;
311*4882a593Smuzhiyun unsigned long addr_val, data_val;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun addr = mk_pmb_addr(pmbe->entry);
314*4882a593Smuzhiyun data = mk_pmb_data(pmbe->entry);
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun addr_val = __raw_readl(addr);
317*4882a593Smuzhiyun data_val = __raw_readl(data);
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun /* Clear V-bit */
320*4882a593Smuzhiyun writel_uncached(addr_val & ~PMB_V, addr);
321*4882a593Smuzhiyun writel_uncached(data_val & ~PMB_V, data);
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun #ifdef CONFIG_PM
set_pmb_entry(struct pmb_entry * pmbe)325*4882a593Smuzhiyun static void set_pmb_entry(struct pmb_entry *pmbe)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun unsigned long flags;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun raw_spin_lock_irqsave(&pmbe->lock, flags);
330*4882a593Smuzhiyun __set_pmb_entry(pmbe);
331*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&pmbe->lock, flags);
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun #endif /* CONFIG_PM */
334*4882a593Smuzhiyun
pmb_bolt_mapping(unsigned long vaddr,phys_addr_t phys,unsigned long size,pgprot_t prot)335*4882a593Smuzhiyun int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
336*4882a593Smuzhiyun unsigned long size, pgprot_t prot)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun struct pmb_entry *pmbp, *pmbe;
339*4882a593Smuzhiyun unsigned long orig_addr, orig_size;
340*4882a593Smuzhiyun unsigned long flags, pmb_flags;
341*4882a593Smuzhiyun int i, mapped;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun if (size < SZ_16M)
344*4882a593Smuzhiyun return -EINVAL;
345*4882a593Smuzhiyun if (!pmb_addr_valid(vaddr, size))
346*4882a593Smuzhiyun return -EFAULT;
347*4882a593Smuzhiyun if (pmb_mapping_exists(vaddr, phys, size))
348*4882a593Smuzhiyun return 0;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun orig_addr = vaddr;
351*4882a593Smuzhiyun orig_size = size;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun flush_tlb_kernel_range(vaddr, vaddr + size);
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun pmb_flags = pgprot_to_pmb_flags(prot);
356*4882a593Smuzhiyun pmbp = NULL;
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun do {
359*4882a593Smuzhiyun for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
360*4882a593Smuzhiyun if (size < pmb_sizes[i].size)
361*4882a593Smuzhiyun continue;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun pmbe = pmb_alloc(vaddr, phys, pmb_flags |
364*4882a593Smuzhiyun pmb_sizes[i].flag, PMB_NO_ENTRY);
365*4882a593Smuzhiyun if (IS_ERR(pmbe)) {
366*4882a593Smuzhiyun pmb_unmap_entry(pmbp, mapped);
367*4882a593Smuzhiyun return PTR_ERR(pmbe);
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun raw_spin_lock_irqsave(&pmbe->lock, flags);
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun pmbe->size = pmb_sizes[i].size;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun __set_pmb_entry(pmbe);
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun phys += pmbe->size;
377*4882a593Smuzhiyun vaddr += pmbe->size;
378*4882a593Smuzhiyun size -= pmbe->size;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun /*
381*4882a593Smuzhiyun * Link adjacent entries that span multiple PMB
382*4882a593Smuzhiyun * entries for easier tear-down.
383*4882a593Smuzhiyun */
384*4882a593Smuzhiyun if (likely(pmbp)) {
385*4882a593Smuzhiyun raw_spin_lock_nested(&pmbp->lock,
386*4882a593Smuzhiyun SINGLE_DEPTH_NESTING);
387*4882a593Smuzhiyun pmbp->link = pmbe;
388*4882a593Smuzhiyun raw_spin_unlock(&pmbp->lock);
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun pmbp = pmbe;
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun /*
394*4882a593Smuzhiyun * Instead of trying smaller sizes on every
395*4882a593Smuzhiyun * iteration (even if we succeed in allocating
396*4882a593Smuzhiyun * space), try using pmb_sizes[i].size again.
397*4882a593Smuzhiyun */
398*4882a593Smuzhiyun i--;
399*4882a593Smuzhiyun mapped++;
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&pmbe->lock, flags);
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun } while (size >= SZ_16M);
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun flush_cache_vmap(orig_addr, orig_addr + orig_size);
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun return 0;
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun
pmb_remap_caller(phys_addr_t phys,unsigned long size,pgprot_t prot,void * caller)410*4882a593Smuzhiyun void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
411*4882a593Smuzhiyun pgprot_t prot, void *caller)
412*4882a593Smuzhiyun {
413*4882a593Smuzhiyun unsigned long vaddr;
414*4882a593Smuzhiyun phys_addr_t offset, last_addr;
415*4882a593Smuzhiyun phys_addr_t align_mask;
416*4882a593Smuzhiyun unsigned long aligned;
417*4882a593Smuzhiyun struct vm_struct *area;
418*4882a593Smuzhiyun int i, ret;
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun if (!pmb_iomapping_enabled)
421*4882a593Smuzhiyun return NULL;
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun /*
424*4882a593Smuzhiyun * Small mappings need to go through the TLB.
425*4882a593Smuzhiyun */
426*4882a593Smuzhiyun if (size < SZ_16M)
427*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
428*4882a593Smuzhiyun if (!pmb_prot_valid(prot))
429*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
432*4882a593Smuzhiyun if (size >= pmb_sizes[i].size)
433*4882a593Smuzhiyun break;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun last_addr = phys + size;
436*4882a593Smuzhiyun align_mask = ~(pmb_sizes[i].size - 1);
437*4882a593Smuzhiyun offset = phys & ~align_mask;
438*4882a593Smuzhiyun phys &= align_mask;
439*4882a593Smuzhiyun aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys;
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun /*
442*4882a593Smuzhiyun * XXX: This should really start from uncached_end, but this
443*4882a593Smuzhiyun * causes the MMU to reset, so for now we restrict it to the
444*4882a593Smuzhiyun * 0xb000...0xc000 range.
445*4882a593Smuzhiyun */
446*4882a593Smuzhiyun area = __get_vm_area_caller(aligned, VM_IOREMAP, 0xb0000000,
447*4882a593Smuzhiyun P3SEG, caller);
448*4882a593Smuzhiyun if (!area)
449*4882a593Smuzhiyun return NULL;
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun area->phys_addr = phys;
452*4882a593Smuzhiyun vaddr = (unsigned long)area->addr;
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun ret = pmb_bolt_mapping(vaddr, phys, size, prot);
455*4882a593Smuzhiyun if (unlikely(ret != 0))
456*4882a593Smuzhiyun return ERR_PTR(ret);
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun return (void __iomem *)(offset + (char *)vaddr);
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun
pmb_unmap(void __iomem * addr)461*4882a593Smuzhiyun int pmb_unmap(void __iomem *addr)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun struct pmb_entry *pmbe = NULL;
464*4882a593Smuzhiyun unsigned long vaddr = (unsigned long __force)addr;
465*4882a593Smuzhiyun int i, found = 0;
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun read_lock(&pmb_rwlock);
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
470*4882a593Smuzhiyun if (test_bit(i, pmb_map)) {
471*4882a593Smuzhiyun pmbe = &pmb_entry_list[i];
472*4882a593Smuzhiyun if (pmbe->vpn == vaddr) {
473*4882a593Smuzhiyun found = 1;
474*4882a593Smuzhiyun break;
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun read_unlock(&pmb_rwlock);
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun if (found) {
482*4882a593Smuzhiyun pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
483*4882a593Smuzhiyun return 0;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun return -EINVAL;
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun
__pmb_unmap_entry(struct pmb_entry * pmbe,int depth)489*4882a593Smuzhiyun static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
490*4882a593Smuzhiyun {
491*4882a593Smuzhiyun do {
492*4882a593Smuzhiyun struct pmb_entry *pmblink = pmbe;
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun /*
495*4882a593Smuzhiyun * We may be called before this pmb_entry has been
496*4882a593Smuzhiyun * entered into the PMB table via set_pmb_entry(), but
497*4882a593Smuzhiyun * that's OK because we've allocated a unique slot for
498*4882a593Smuzhiyun * this entry in pmb_alloc() (even if we haven't filled
499*4882a593Smuzhiyun * it yet).
500*4882a593Smuzhiyun *
501*4882a593Smuzhiyun * Therefore, calling __clear_pmb_entry() is safe as no
502*4882a593Smuzhiyun * other mapping can be using that slot.
503*4882a593Smuzhiyun */
504*4882a593Smuzhiyun __clear_pmb_entry(pmbe);
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun flush_cache_vunmap(pmbe->vpn, pmbe->vpn + pmbe->size);
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun pmbe = pmblink->link;
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun pmb_free(pmblink);
511*4882a593Smuzhiyun } while (pmbe && --depth);
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun
pmb_unmap_entry(struct pmb_entry * pmbe,int depth)514*4882a593Smuzhiyun static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
515*4882a593Smuzhiyun {
516*4882a593Smuzhiyun unsigned long flags;
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun if (unlikely(!pmbe))
519*4882a593Smuzhiyun return;
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun write_lock_irqsave(&pmb_rwlock, flags);
522*4882a593Smuzhiyun __pmb_unmap_entry(pmbe, depth);
523*4882a593Smuzhiyun write_unlock_irqrestore(&pmb_rwlock, flags);
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun
pmb_notify(void)526*4882a593Smuzhiyun static void __init pmb_notify(void)
527*4882a593Smuzhiyun {
528*4882a593Smuzhiyun int i;
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun pr_info("PMB: boot mappings:\n");
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun read_lock(&pmb_rwlock);
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
535*4882a593Smuzhiyun struct pmb_entry *pmbe;
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun if (!test_bit(i, pmb_map))
538*4882a593Smuzhiyun continue;
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun pmbe = &pmb_entry_list[i];
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
543*4882a593Smuzhiyun pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT,
544*4882a593Smuzhiyun pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un");
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun read_unlock(&pmb_rwlock);
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun /*
551*4882a593Smuzhiyun * Sync our software copy of the PMB mappings with those in hardware. The
552*4882a593Smuzhiyun * mappings in the hardware PMB were either set up by the bootloader or
553*4882a593Smuzhiyun * very early on by the kernel.
554*4882a593Smuzhiyun */
pmb_synchronize(void)555*4882a593Smuzhiyun static void __init pmb_synchronize(void)
556*4882a593Smuzhiyun {
557*4882a593Smuzhiyun struct pmb_entry *pmbp = NULL;
558*4882a593Smuzhiyun int i, j;
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun /*
561*4882a593Smuzhiyun * Run through the initial boot mappings, log the established
562*4882a593Smuzhiyun * ones, and blow away anything that falls outside of the valid
563*4882a593Smuzhiyun * PPN range. Specifically, we only care about existing mappings
564*4882a593Smuzhiyun * that impact the cached/uncached sections.
565*4882a593Smuzhiyun *
566*4882a593Smuzhiyun * Note that touching these can be a bit of a minefield; the boot
567*4882a593Smuzhiyun * loader can establish multi-page mappings with the same caching
568*4882a593Smuzhiyun * attributes, so we need to ensure that we aren't modifying a
569*4882a593Smuzhiyun * mapping that we're presently executing from, or may execute
570*4882a593Smuzhiyun * from in the case of straddling page boundaries.
571*4882a593Smuzhiyun *
572*4882a593Smuzhiyun * In the future we will have to tidy up after the boot loader by
573*4882a593Smuzhiyun * jumping between the cached and uncached mappings and tearing
574*4882a593Smuzhiyun * down alternating mappings while executing from the other.
575*4882a593Smuzhiyun */
576*4882a593Smuzhiyun for (i = 0; i < NR_PMB_ENTRIES; i++) {
577*4882a593Smuzhiyun unsigned long addr, data;
578*4882a593Smuzhiyun unsigned long addr_val, data_val;
579*4882a593Smuzhiyun unsigned long ppn, vpn, flags;
580*4882a593Smuzhiyun unsigned long irqflags;
581*4882a593Smuzhiyun unsigned int size;
582*4882a593Smuzhiyun struct pmb_entry *pmbe;
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun addr = mk_pmb_addr(i);
585*4882a593Smuzhiyun data = mk_pmb_data(i);
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun addr_val = __raw_readl(addr);
588*4882a593Smuzhiyun data_val = __raw_readl(data);
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun /*
591*4882a593Smuzhiyun * Skip over any bogus entries
592*4882a593Smuzhiyun */
593*4882a593Smuzhiyun if (!(data_val & PMB_V) || !(addr_val & PMB_V))
594*4882a593Smuzhiyun continue;
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun ppn = data_val & PMB_PFN_MASK;
597*4882a593Smuzhiyun vpn = addr_val & PMB_PFN_MASK;
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun /*
600*4882a593Smuzhiyun * Only preserve in-range mappings.
601*4882a593Smuzhiyun */
602*4882a593Smuzhiyun if (!pmb_ppn_in_range(ppn)) {
603*4882a593Smuzhiyun /*
604*4882a593Smuzhiyun * Invalidate anything out of bounds.
605*4882a593Smuzhiyun */
606*4882a593Smuzhiyun writel_uncached(addr_val & ~PMB_V, addr);
607*4882a593Smuzhiyun writel_uncached(data_val & ~PMB_V, data);
608*4882a593Smuzhiyun continue;
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun /*
612*4882a593Smuzhiyun * Update the caching attributes if necessary
613*4882a593Smuzhiyun */
614*4882a593Smuzhiyun if (data_val & PMB_C) {
615*4882a593Smuzhiyun data_val &= ~PMB_CACHE_MASK;
616*4882a593Smuzhiyun data_val |= pmb_cache_flags();
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun writel_uncached(data_val, data);
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun size = data_val & PMB_SZ_MASK;
622*4882a593Smuzhiyun flags = size | (data_val & PMB_CACHE_MASK);
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun pmbe = pmb_alloc(vpn, ppn, flags, i);
625*4882a593Smuzhiyun if (IS_ERR(pmbe)) {
626*4882a593Smuzhiyun WARN_ON_ONCE(1);
627*4882a593Smuzhiyun continue;
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun raw_spin_lock_irqsave(&pmbe->lock, irqflags);
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
633*4882a593Smuzhiyun if (pmb_sizes[j].flag == size)
634*4882a593Smuzhiyun pmbe->size = pmb_sizes[j].size;
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun if (pmbp) {
637*4882a593Smuzhiyun raw_spin_lock_nested(&pmbp->lock, SINGLE_DEPTH_NESTING);
638*4882a593Smuzhiyun /*
639*4882a593Smuzhiyun * Compare the previous entry against the current one to
640*4882a593Smuzhiyun * see if the entries span a contiguous mapping. If so,
641*4882a593Smuzhiyun * setup the entry links accordingly. Compound mappings
642*4882a593Smuzhiyun * are later coalesced.
643*4882a593Smuzhiyun */
644*4882a593Smuzhiyun if (pmb_can_merge(pmbp, pmbe))
645*4882a593Smuzhiyun pmbp->link = pmbe;
646*4882a593Smuzhiyun raw_spin_unlock(&pmbp->lock);
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun pmbp = pmbe;
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&pmbe->lock, irqflags);
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun
pmb_merge(struct pmb_entry * head)655*4882a593Smuzhiyun static void __init pmb_merge(struct pmb_entry *head)
656*4882a593Smuzhiyun {
657*4882a593Smuzhiyun unsigned long span, newsize;
658*4882a593Smuzhiyun struct pmb_entry *tail;
659*4882a593Smuzhiyun int i = 1, depth = 0;
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun span = newsize = head->size;
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun tail = head->link;
664*4882a593Smuzhiyun while (tail) {
665*4882a593Smuzhiyun span += tail->size;
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun if (pmb_size_valid(span)) {
668*4882a593Smuzhiyun newsize = span;
669*4882a593Smuzhiyun depth = i;
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun /* This is the end of the line.. */
673*4882a593Smuzhiyun if (!tail->link)
674*4882a593Smuzhiyun break;
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun tail = tail->link;
677*4882a593Smuzhiyun i++;
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun /*
681*4882a593Smuzhiyun * The merged page size must be valid.
682*4882a593Smuzhiyun */
683*4882a593Smuzhiyun if (!depth || !pmb_size_valid(newsize))
684*4882a593Smuzhiyun return;
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun head->flags &= ~PMB_SZ_MASK;
687*4882a593Smuzhiyun head->flags |= pmb_size_to_flags(newsize);
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun head->size = newsize;
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun __pmb_unmap_entry(head->link, depth);
692*4882a593Smuzhiyun __set_pmb_entry(head);
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun
pmb_coalesce(void)695*4882a593Smuzhiyun static void __init pmb_coalesce(void)
696*4882a593Smuzhiyun {
697*4882a593Smuzhiyun unsigned long flags;
698*4882a593Smuzhiyun int i;
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun write_lock_irqsave(&pmb_rwlock, flags);
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
703*4882a593Smuzhiyun struct pmb_entry *pmbe;
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun if (!test_bit(i, pmb_map))
706*4882a593Smuzhiyun continue;
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun pmbe = &pmb_entry_list[i];
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun /*
711*4882a593Smuzhiyun * We're only interested in compound mappings
712*4882a593Smuzhiyun */
713*4882a593Smuzhiyun if (!pmbe->link)
714*4882a593Smuzhiyun continue;
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun /*
717*4882a593Smuzhiyun * Nothing to do if it already uses the largest possible
718*4882a593Smuzhiyun * page size.
719*4882a593Smuzhiyun */
720*4882a593Smuzhiyun if (pmbe->size == SZ_512M)
721*4882a593Smuzhiyun continue;
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun pmb_merge(pmbe);
724*4882a593Smuzhiyun }
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun write_unlock_irqrestore(&pmb_rwlock, flags);
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun #ifdef CONFIG_UNCACHED_MAPPING
pmb_resize(void)730*4882a593Smuzhiyun static void __init pmb_resize(void)
731*4882a593Smuzhiyun {
732*4882a593Smuzhiyun int i;
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun /*
735*4882a593Smuzhiyun * If the uncached mapping was constructed by the kernel, it will
736*4882a593Smuzhiyun * already be a reasonable size.
737*4882a593Smuzhiyun */
738*4882a593Smuzhiyun if (uncached_size == SZ_16M)
739*4882a593Smuzhiyun return;
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun read_lock(&pmb_rwlock);
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
744*4882a593Smuzhiyun struct pmb_entry *pmbe;
745*4882a593Smuzhiyun unsigned long flags;
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun if (!test_bit(i, pmb_map))
748*4882a593Smuzhiyun continue;
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun pmbe = &pmb_entry_list[i];
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun if (pmbe->vpn != uncached_start)
753*4882a593Smuzhiyun continue;
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun /*
756*4882a593Smuzhiyun * Found it, now resize it.
757*4882a593Smuzhiyun */
758*4882a593Smuzhiyun raw_spin_lock_irqsave(&pmbe->lock, flags);
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun pmbe->size = SZ_16M;
761*4882a593Smuzhiyun pmbe->flags &= ~PMB_SZ_MASK;
762*4882a593Smuzhiyun pmbe->flags |= pmb_size_to_flags(pmbe->size);
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun uncached_resize(pmbe->size);
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun __set_pmb_entry(pmbe);
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&pmbe->lock, flags);
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun read_unlock(&pmb_rwlock);
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun #endif
774*4882a593Smuzhiyun
early_pmb(char * p)775*4882a593Smuzhiyun static int __init early_pmb(char *p)
776*4882a593Smuzhiyun {
777*4882a593Smuzhiyun if (!p)
778*4882a593Smuzhiyun return 0;
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun if (strstr(p, "iomap"))
781*4882a593Smuzhiyun pmb_iomapping_enabled = 1;
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun return 0;
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun early_param("pmb", early_pmb);
786*4882a593Smuzhiyun
pmb_init(void)787*4882a593Smuzhiyun void __init pmb_init(void)
788*4882a593Smuzhiyun {
789*4882a593Smuzhiyun /* Synchronize software state */
790*4882a593Smuzhiyun pmb_synchronize();
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun /* Attempt to combine compound mappings */
793*4882a593Smuzhiyun pmb_coalesce();
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun #ifdef CONFIG_UNCACHED_MAPPING
796*4882a593Smuzhiyun /* Resize initial mappings, if necessary */
797*4882a593Smuzhiyun pmb_resize();
798*4882a593Smuzhiyun #endif
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun /* Log them */
801*4882a593Smuzhiyun pmb_notify();
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun writel_uncached(0, PMB_IRMCR);
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun /* Flush out the TLB */
806*4882a593Smuzhiyun local_flush_tlb_all();
807*4882a593Smuzhiyun ctrl_barrier();
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun
__in_29bit_mode(void)810*4882a593Smuzhiyun bool __in_29bit_mode(void)
811*4882a593Smuzhiyun {
812*4882a593Smuzhiyun return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
813*4882a593Smuzhiyun }
814*4882a593Smuzhiyun
pmb_seq_show(struct seq_file * file,void * iter)815*4882a593Smuzhiyun static int pmb_seq_show(struct seq_file *file, void *iter)
816*4882a593Smuzhiyun {
817*4882a593Smuzhiyun int i;
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
820*4882a593Smuzhiyun "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
821*4882a593Smuzhiyun seq_printf(file, "ety vpn ppn size flags\n");
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun for (i = 0; i < NR_PMB_ENTRIES; i++) {
824*4882a593Smuzhiyun unsigned long addr, data;
825*4882a593Smuzhiyun unsigned int size;
826*4882a593Smuzhiyun char *sz_str = NULL;
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun addr = __raw_readl(mk_pmb_addr(i));
829*4882a593Smuzhiyun data = __raw_readl(mk_pmb_data(i));
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun size = data & PMB_SZ_MASK;
832*4882a593Smuzhiyun sz_str = (size == PMB_SZ_16M) ? " 16MB":
833*4882a593Smuzhiyun (size == PMB_SZ_64M) ? " 64MB":
834*4882a593Smuzhiyun (size == PMB_SZ_128M) ? "128MB":
835*4882a593Smuzhiyun "512MB";
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun /* 02: V 0x88 0x08 128MB C CB B */
838*4882a593Smuzhiyun seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
839*4882a593Smuzhiyun i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
840*4882a593Smuzhiyun (addr >> 24) & 0xff, (data >> 24) & 0xff,
841*4882a593Smuzhiyun sz_str, (data & PMB_C) ? 'C' : ' ',
842*4882a593Smuzhiyun (data & PMB_WT) ? "WT" : "CB",
843*4882a593Smuzhiyun (data & PMB_UB) ? "UB" : " B");
844*4882a593Smuzhiyun }
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun return 0;
847*4882a593Smuzhiyun }
848*4882a593Smuzhiyun
pmb_debugfs_open(struct inode * inode,struct file * file)849*4882a593Smuzhiyun static int pmb_debugfs_open(struct inode *inode, struct file *file)
850*4882a593Smuzhiyun {
851*4882a593Smuzhiyun return single_open(file, pmb_seq_show, NULL);
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun static const struct file_operations pmb_debugfs_fops = {
855*4882a593Smuzhiyun .owner = THIS_MODULE,
856*4882a593Smuzhiyun .open = pmb_debugfs_open,
857*4882a593Smuzhiyun .read = seq_read,
858*4882a593Smuzhiyun .llseek = seq_lseek,
859*4882a593Smuzhiyun .release = single_release,
860*4882a593Smuzhiyun };
861*4882a593Smuzhiyun
pmb_debugfs_init(void)862*4882a593Smuzhiyun static int __init pmb_debugfs_init(void)
863*4882a593Smuzhiyun {
864*4882a593Smuzhiyun debugfs_create_file("pmb", S_IFREG | S_IRUGO, arch_debugfs_dir, NULL,
865*4882a593Smuzhiyun &pmb_debugfs_fops);
866*4882a593Smuzhiyun return 0;
867*4882a593Smuzhiyun }
868*4882a593Smuzhiyun subsys_initcall(pmb_debugfs_init);
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun #ifdef CONFIG_PM
pmb_syscore_resume(void)871*4882a593Smuzhiyun static void pmb_syscore_resume(void)
872*4882a593Smuzhiyun {
873*4882a593Smuzhiyun struct pmb_entry *pmbe;
874*4882a593Smuzhiyun int i;
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun read_lock(&pmb_rwlock);
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
879*4882a593Smuzhiyun if (test_bit(i, pmb_map)) {
880*4882a593Smuzhiyun pmbe = &pmb_entry_list[i];
881*4882a593Smuzhiyun set_pmb_entry(pmbe);
882*4882a593Smuzhiyun }
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun read_unlock(&pmb_rwlock);
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun static struct syscore_ops pmb_syscore_ops = {
889*4882a593Smuzhiyun .resume = pmb_syscore_resume,
890*4882a593Smuzhiyun };
891*4882a593Smuzhiyun
pmb_sysdev_init(void)892*4882a593Smuzhiyun static int __init pmb_sysdev_init(void)
893*4882a593Smuzhiyun {
894*4882a593Smuzhiyun register_syscore_ops(&pmb_syscore_ops);
895*4882a593Smuzhiyun return 0;
896*4882a593Smuzhiyun }
897*4882a593Smuzhiyun subsys_initcall(pmb_sysdev_init);
898*4882a593Smuzhiyun #endif
899