1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * This file is subject to the terms and conditions of the GNU General Public
3*4882a593Smuzhiyun * License. See the file "COPYING" in the main directory of this archive
4*4882a593Smuzhiyun * for more details.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * (C) Copyright 1995 1996 Linus Torvalds
7*4882a593Smuzhiyun * (C) Copyright 2001, 2002 Ralf Baechle
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun #include <linux/export.h>
10*4882a593Smuzhiyun #include <asm/addrspace.h>
11*4882a593Smuzhiyun #include <asm/byteorder.h>
12*4882a593Smuzhiyun #include <linux/ioport.h>
13*4882a593Smuzhiyun #include <linux/sched.h>
14*4882a593Smuzhiyun #include <linux/slab.h>
15*4882a593Smuzhiyun #include <linux/vmalloc.h>
16*4882a593Smuzhiyun #include <linux/mm_types.h>
17*4882a593Smuzhiyun #include <linux/io.h>
18*4882a593Smuzhiyun #include <asm/cacheflush.h>
19*4882a593Smuzhiyun #include <asm/tlbflush.h>
20*4882a593Smuzhiyun #include <ioremap.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #define IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL))
23*4882a593Smuzhiyun #define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
24*4882a593Smuzhiyun
__ioremap_check_ram(unsigned long start_pfn,unsigned long nr_pages,void * arg)25*4882a593Smuzhiyun static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
26*4882a593Smuzhiyun void *arg)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun unsigned long i;
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun for (i = 0; i < nr_pages; i++) {
31*4882a593Smuzhiyun if (pfn_valid(start_pfn + i) &&
32*4882a593Smuzhiyun !PageReserved(pfn_to_page(start_pfn + i)))
33*4882a593Smuzhiyun return 1;
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun return 0;
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun /*
40*4882a593Smuzhiyun * ioremap_prot - map bus memory into CPU space
41*4882a593Smuzhiyun * @phys_addr: bus address of the memory
42*4882a593Smuzhiyun * @size: size of the resource to map
43*4882a593Smuzhiyun *
44*4882a593Smuzhiyun * ioremap_prot gives the caller control over cache coherency attributes (CCA)
45*4882a593Smuzhiyun */
ioremap_prot(phys_addr_t phys_addr,unsigned long size,unsigned long prot_val)46*4882a593Smuzhiyun void __iomem *ioremap_prot(phys_addr_t phys_addr, unsigned long size,
47*4882a593Smuzhiyun unsigned long prot_val)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun unsigned long flags = prot_val & _CACHE_MASK;
50*4882a593Smuzhiyun unsigned long offset, pfn, last_pfn;
51*4882a593Smuzhiyun struct vm_struct *area;
52*4882a593Smuzhiyun phys_addr_t last_addr;
53*4882a593Smuzhiyun unsigned long vaddr;
54*4882a593Smuzhiyun void __iomem *cpu_addr;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun cpu_addr = plat_ioremap(phys_addr, size, flags);
57*4882a593Smuzhiyun if (cpu_addr)
58*4882a593Smuzhiyun return cpu_addr;
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun phys_addr = fixup_bigphys_addr(phys_addr, size);
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun /* Don't allow wraparound or zero size */
63*4882a593Smuzhiyun last_addr = phys_addr + size - 1;
64*4882a593Smuzhiyun if (!size || last_addr < phys_addr)
65*4882a593Smuzhiyun return NULL;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun /*
68*4882a593Smuzhiyun * Map uncached objects in the low 512mb of address space using KSEG1,
69*4882a593Smuzhiyun * otherwise map using page tables.
70*4882a593Smuzhiyun */
71*4882a593Smuzhiyun if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
72*4882a593Smuzhiyun flags == _CACHE_UNCACHED)
73*4882a593Smuzhiyun return (void __iomem *) CKSEG1ADDR(phys_addr);
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun /*
76*4882a593Smuzhiyun * Don't allow anybody to remap RAM that may be allocated by the page
77*4882a593Smuzhiyun * allocator, since that could lead to races & data clobbering.
78*4882a593Smuzhiyun */
79*4882a593Smuzhiyun pfn = PFN_DOWN(phys_addr);
80*4882a593Smuzhiyun last_pfn = PFN_DOWN(last_addr);
81*4882a593Smuzhiyun if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
82*4882a593Smuzhiyun __ioremap_check_ram) == 1) {
83*4882a593Smuzhiyun WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
84*4882a593Smuzhiyun &phys_addr, &last_addr);
85*4882a593Smuzhiyun return NULL;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun /*
89*4882a593Smuzhiyun * Mappings have to be page-aligned
90*4882a593Smuzhiyun */
91*4882a593Smuzhiyun offset = phys_addr & ~PAGE_MASK;
92*4882a593Smuzhiyun phys_addr &= PAGE_MASK;
93*4882a593Smuzhiyun size = PAGE_ALIGN(last_addr + 1) - phys_addr;
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun /*
96*4882a593Smuzhiyun * Ok, go for it..
97*4882a593Smuzhiyun */
98*4882a593Smuzhiyun area = get_vm_area(size, VM_IOREMAP);
99*4882a593Smuzhiyun if (!area)
100*4882a593Smuzhiyun return NULL;
101*4882a593Smuzhiyun vaddr = (unsigned long)area->addr;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun flags |= _PAGE_GLOBAL | _PAGE_PRESENT | __READABLE | __WRITEABLE;
104*4882a593Smuzhiyun if (ioremap_page_range(vaddr, vaddr + size, phys_addr,
105*4882a593Smuzhiyun __pgprot(flags))) {
106*4882a593Smuzhiyun free_vm_area(area);
107*4882a593Smuzhiyun return NULL;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun return (void __iomem *)(vaddr + offset);
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun EXPORT_SYMBOL(ioremap_prot);
113*4882a593Smuzhiyun
iounmap(const volatile void __iomem * addr)114*4882a593Smuzhiyun void iounmap(const volatile void __iomem *addr)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun if (!plat_iounmap(addr) && !IS_KSEG1(addr))
117*4882a593Smuzhiyun vunmap((void *)((unsigned long)addr & PAGE_MASK));
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun EXPORT_SYMBOL(iounmap);
120