xref: /OK3568_Linux_fs/kernel/arch/sh/mm/ioremap.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * arch/sh/mm/ioremap.c
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * (C) Copyright 1995 1996 Linus Torvalds
5*4882a593Smuzhiyun  * (C) Copyright 2005 - 2010  Paul Mundt
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Re-map IO memory to kernel address space so that we can access it.
8*4882a593Smuzhiyun  * This is needed for high PCI addresses that aren't mapped in the
9*4882a593Smuzhiyun  * 640k-1MB IO memory area on PC's
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * This file is subject to the terms and conditions of the GNU General
12*4882a593Smuzhiyun  * Public License. See the file "COPYING" in the main directory of this
13*4882a593Smuzhiyun  * archive for more details.
14*4882a593Smuzhiyun  */
15*4882a593Smuzhiyun #include <linux/vmalloc.h>
16*4882a593Smuzhiyun #include <linux/module.h>
17*4882a593Smuzhiyun #include <linux/slab.h>
18*4882a593Smuzhiyun #include <linux/mm.h>
19*4882a593Smuzhiyun #include <linux/pci.h>
20*4882a593Smuzhiyun #include <linux/io.h>
21*4882a593Smuzhiyun #include <asm/io_trapped.h>
22*4882a593Smuzhiyun #include <asm/page.h>
23*4882a593Smuzhiyun #include <asm/pgalloc.h>
24*4882a593Smuzhiyun #include <asm/addrspace.h>
25*4882a593Smuzhiyun #include <asm/cacheflush.h>
26*4882a593Smuzhiyun #include <asm/tlbflush.h>
27*4882a593Smuzhiyun #include <asm/mmu.h>
28*4882a593Smuzhiyun #include "ioremap.h"
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun /*
31*4882a593Smuzhiyun  * On 32-bit SH, we traditionally have the whole physical address space mapped
32*4882a593Smuzhiyun  * at all times (as MIPS does), so "ioremap()" and "iounmap()" do not need to do
33*4882a593Smuzhiyun  * anything but place the address in the proper segment.  This is true for P1
34*4882a593Smuzhiyun  * and P2 addresses, as well as some P3 ones.  However, most of the P3 addresses
35*4882a593Smuzhiyun  * and newer cores using extended addressing need to map through page tables, so
36*4882a593Smuzhiyun  * the ioremap() implementation becomes a bit more complicated.
37*4882a593Smuzhiyun  */
38*4882a593Smuzhiyun #ifdef CONFIG_29BIT
39*4882a593Smuzhiyun static void __iomem *
__ioremap_29bit(phys_addr_t offset,unsigned long size,pgprot_t prot)40*4882a593Smuzhiyun __ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun 	phys_addr_t last_addr = offset + size - 1;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	/*
45*4882a593Smuzhiyun 	 * For P1 and P2 space this is trivial, as everything is already
46*4882a593Smuzhiyun 	 * mapped. Uncached access for P1 addresses are done through P2.
47*4882a593Smuzhiyun 	 * In the P3 case or for addresses outside of the 29-bit space,
48*4882a593Smuzhiyun 	 * mapping must be done by the PMB or by using page tables.
49*4882a593Smuzhiyun 	 */
50*4882a593Smuzhiyun 	if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
51*4882a593Smuzhiyun 		u64 flags = pgprot_val(prot);
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 		/*
54*4882a593Smuzhiyun 		 * Anything using the legacy PTEA space attributes needs
55*4882a593Smuzhiyun 		 * to be kicked down to page table mappings.
56*4882a593Smuzhiyun 		 */
57*4882a593Smuzhiyun 		if (unlikely(flags & _PAGE_PCC_MASK))
58*4882a593Smuzhiyun 			return NULL;
59*4882a593Smuzhiyun 		if (unlikely(flags & _PAGE_CACHABLE))
60*4882a593Smuzhiyun 			return (void __iomem *)P1SEGADDR(offset);
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 		return (void __iomem *)P2SEGADDR(offset);
63*4882a593Smuzhiyun 	}
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	/* P4 above the store queues are always mapped. */
66*4882a593Smuzhiyun 	if (unlikely(offset >= P3_ADDR_MAX))
67*4882a593Smuzhiyun 		return (void __iomem *)P4SEGADDR(offset);
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	return NULL;
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun #else
72*4882a593Smuzhiyun #define __ioremap_29bit(offset, size, prot)		NULL
73*4882a593Smuzhiyun #endif /* CONFIG_29BIT */
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun /*
76*4882a593Smuzhiyun  * Remap an arbitrary physical address space into the kernel virtual
77*4882a593Smuzhiyun  * address space. Needed when the kernel wants to access high addresses
78*4882a593Smuzhiyun  * directly.
79*4882a593Smuzhiyun  *
80*4882a593Smuzhiyun  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
81*4882a593Smuzhiyun  * have to convert them into an offset in a page-aligned mapping, but the
82*4882a593Smuzhiyun  * caller shouldn't need to know that small detail.
83*4882a593Smuzhiyun  */
84*4882a593Smuzhiyun void __iomem * __ref
__ioremap_caller(phys_addr_t phys_addr,unsigned long size,pgprot_t pgprot,void * caller)85*4882a593Smuzhiyun __ioremap_caller(phys_addr_t phys_addr, unsigned long size,
86*4882a593Smuzhiyun 		 pgprot_t pgprot, void *caller)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun 	struct vm_struct *area;
89*4882a593Smuzhiyun 	unsigned long offset, last_addr, addr, orig_addr;
90*4882a593Smuzhiyun 	void __iomem *mapped;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	mapped = __ioremap_trapped(phys_addr, size);
93*4882a593Smuzhiyun 	if (mapped)
94*4882a593Smuzhiyun 		return mapped;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	mapped = __ioremap_29bit(phys_addr, size, pgprot);
97*4882a593Smuzhiyun 	if (mapped)
98*4882a593Smuzhiyun 		return mapped;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	/* Don't allow wraparound or zero size */
101*4882a593Smuzhiyun 	last_addr = phys_addr + size - 1;
102*4882a593Smuzhiyun 	if (!size || last_addr < phys_addr)
103*4882a593Smuzhiyun 		return NULL;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	/*
106*4882a593Smuzhiyun 	 * If we can't yet use the regular approach, go the fixmap route.
107*4882a593Smuzhiyun 	 */
108*4882a593Smuzhiyun 	if (!mem_init_done)
109*4882a593Smuzhiyun 		return ioremap_fixed(phys_addr, size, pgprot);
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	/*
112*4882a593Smuzhiyun 	 * First try to remap through the PMB.
113*4882a593Smuzhiyun 	 * PMB entries are all pre-faulted.
114*4882a593Smuzhiyun 	 */
115*4882a593Smuzhiyun 	mapped = pmb_remap_caller(phys_addr, size, pgprot, caller);
116*4882a593Smuzhiyun 	if (mapped && !IS_ERR(mapped))
117*4882a593Smuzhiyun 		return mapped;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	/*
120*4882a593Smuzhiyun 	 * Mappings have to be page-aligned
121*4882a593Smuzhiyun 	 */
122*4882a593Smuzhiyun 	offset = phys_addr & ~PAGE_MASK;
123*4882a593Smuzhiyun 	phys_addr &= PAGE_MASK;
124*4882a593Smuzhiyun 	size = PAGE_ALIGN(last_addr+1) - phys_addr;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	/*
127*4882a593Smuzhiyun 	 * Ok, go for it..
128*4882a593Smuzhiyun 	 */
129*4882a593Smuzhiyun 	area = get_vm_area_caller(size, VM_IOREMAP, caller);
130*4882a593Smuzhiyun 	if (!area)
131*4882a593Smuzhiyun 		return NULL;
132*4882a593Smuzhiyun 	area->phys_addr = phys_addr;
133*4882a593Smuzhiyun 	orig_addr = addr = (unsigned long)area->addr;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
136*4882a593Smuzhiyun 		vunmap((void *)orig_addr);
137*4882a593Smuzhiyun 		return NULL;
138*4882a593Smuzhiyun 	}
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	return (void __iomem *)(offset + (char *)orig_addr);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun EXPORT_SYMBOL(__ioremap_caller);
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun /*
145*4882a593Smuzhiyun  * Simple checks for non-translatable mappings.
146*4882a593Smuzhiyun  */
iomapping_nontranslatable(unsigned long offset)147*4882a593Smuzhiyun static inline int iomapping_nontranslatable(unsigned long offset)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun #ifdef CONFIG_29BIT
150*4882a593Smuzhiyun 	/*
151*4882a593Smuzhiyun 	 * In 29-bit mode this includes the fixed P1/P2 areas, as well as
152*4882a593Smuzhiyun 	 * parts of P3.
153*4882a593Smuzhiyun 	 */
154*4882a593Smuzhiyun 	if (PXSEG(offset) < P3SEG || offset >= P3_ADDR_MAX)
155*4882a593Smuzhiyun 		return 1;
156*4882a593Smuzhiyun #endif
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	return 0;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun 
iounmap(void __iomem * addr)161*4882a593Smuzhiyun void iounmap(void __iomem *addr)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun 	unsigned long vaddr = (unsigned long __force)addr;
164*4882a593Smuzhiyun 	struct vm_struct *p;
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	/*
167*4882a593Smuzhiyun 	 * Nothing to do if there is no translatable mapping.
168*4882a593Smuzhiyun 	 */
169*4882a593Smuzhiyun 	if (iomapping_nontranslatable(vaddr))
170*4882a593Smuzhiyun 		return;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	/*
173*4882a593Smuzhiyun 	 * There's no VMA if it's from an early fixed mapping.
174*4882a593Smuzhiyun 	 */
175*4882a593Smuzhiyun 	if (iounmap_fixed(addr) == 0)
176*4882a593Smuzhiyun 		return;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	/*
179*4882a593Smuzhiyun 	 * If the PMB handled it, there's nothing else to do.
180*4882a593Smuzhiyun 	 */
181*4882a593Smuzhiyun 	if (pmb_unmap(addr) == 0)
182*4882a593Smuzhiyun 		return;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	p = remove_vm_area((void *)(vaddr & PAGE_MASK));
185*4882a593Smuzhiyun 	if (!p) {
186*4882a593Smuzhiyun 		printk(KERN_ERR "%s: bad address %p\n", __func__, addr);
187*4882a593Smuzhiyun 		return;
188*4882a593Smuzhiyun 	}
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	kfree(p);
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun EXPORT_SYMBOL(iounmap);
193