1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * This file contains the routines setting up the linux page tables.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2008 Michal Simek
5*4882a593Smuzhiyun * Copyright (C) 2008 PetaLogix
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright (C) 2007 Xilinx, Inc. All rights reserved.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Derived from arch/ppc/mm/pgtable.c:
10*4882a593Smuzhiyun * -- paulus
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * Derived from arch/ppc/mm/init.c:
13*4882a593Smuzhiyun * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
16*4882a593Smuzhiyun * and Cort Dougan (PReP) (cort@cs.nmt.edu)
17*4882a593Smuzhiyun * Copyright (C) 1996 Paul Mackerras
18*4882a593Smuzhiyun * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
19*4882a593Smuzhiyun *
20*4882a593Smuzhiyun * Derived from "arch/i386/mm/init.c"
21*4882a593Smuzhiyun * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * This file is subject to the terms and conditions of the GNU General
24*4882a593Smuzhiyun * Public License. See the file COPYING in the main directory of this
25*4882a593Smuzhiyun * archive for more details.
26*4882a593Smuzhiyun *
27*4882a593Smuzhiyun */
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #include <linux/export.h>
30*4882a593Smuzhiyun #include <linux/kernel.h>
31*4882a593Smuzhiyun #include <linux/types.h>
32*4882a593Smuzhiyun #include <linux/vmalloc.h>
33*4882a593Smuzhiyun #include <linux/init.h>
34*4882a593Smuzhiyun #include <linux/mm_types.h>
35*4882a593Smuzhiyun #include <linux/pgtable.h>
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun #include <asm/pgalloc.h>
38*4882a593Smuzhiyun #include <linux/io.h>
39*4882a593Smuzhiyun #include <asm/mmu.h>
40*4882a593Smuzhiyun #include <asm/sections.h>
41*4882a593Smuzhiyun #include <asm/fixmap.h>
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun unsigned long ioremap_base;
44*4882a593Smuzhiyun unsigned long ioremap_bot;
45*4882a593Smuzhiyun EXPORT_SYMBOL(ioremap_bot);
46*4882a593Smuzhiyun
__ioremap(phys_addr_t addr,unsigned long size,unsigned long flags)47*4882a593Smuzhiyun static void __iomem *__ioremap(phys_addr_t addr, unsigned long size,
48*4882a593Smuzhiyun unsigned long flags)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun unsigned long v, i;
51*4882a593Smuzhiyun phys_addr_t p;
52*4882a593Smuzhiyun int err;
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /*
55*4882a593Smuzhiyun * Choose an address to map it to.
56*4882a593Smuzhiyun * Once the vmalloc system is running, we use it.
57*4882a593Smuzhiyun * Before then, we use space going down from ioremap_base
58*4882a593Smuzhiyun * (ioremap_bot records where we're up to).
59*4882a593Smuzhiyun */
60*4882a593Smuzhiyun p = addr & PAGE_MASK;
61*4882a593Smuzhiyun size = PAGE_ALIGN(addr + size) - p;
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun /*
64*4882a593Smuzhiyun * Don't allow anybody to remap normal RAM that we're using.
65*4882a593Smuzhiyun * mem_init() sets high_memory so only do the check after that.
66*4882a593Smuzhiyun *
67*4882a593Smuzhiyun * However, allow remap of rootfs: TBD
68*4882a593Smuzhiyun */
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun if (mem_init_done &&
71*4882a593Smuzhiyun p >= memory_start && p < virt_to_phys(high_memory) &&
72*4882a593Smuzhiyun !(p >= __virt_to_phys((phys_addr_t)__bss_stop) &&
73*4882a593Smuzhiyun p < __virt_to_phys((phys_addr_t)__bss_stop))) {
74*4882a593Smuzhiyun pr_warn("__ioremap(): phys addr "PTE_FMT" is RAM lr %ps\n",
75*4882a593Smuzhiyun (unsigned long)p, __builtin_return_address(0));
76*4882a593Smuzhiyun return NULL;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun if (size == 0)
80*4882a593Smuzhiyun return NULL;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun /*
83*4882a593Smuzhiyun * Is it already mapped? If the whole area is mapped then we're
84*4882a593Smuzhiyun * done, otherwise remap it since we want to keep the virt addrs for
85*4882a593Smuzhiyun * each request contiguous.
86*4882a593Smuzhiyun *
87*4882a593Smuzhiyun * We make the assumption here that if the bottom and top
88*4882a593Smuzhiyun * of the range we want are mapped then it's mapped to the
89*4882a593Smuzhiyun * same virt address (and this is contiguous).
90*4882a593Smuzhiyun * -- Cort
91*4882a593Smuzhiyun */
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun if (mem_init_done) {
94*4882a593Smuzhiyun struct vm_struct *area;
95*4882a593Smuzhiyun area = get_vm_area(size, VM_IOREMAP);
96*4882a593Smuzhiyun if (area == NULL)
97*4882a593Smuzhiyun return NULL;
98*4882a593Smuzhiyun v = (unsigned long) area->addr;
99*4882a593Smuzhiyun } else {
100*4882a593Smuzhiyun v = (ioremap_bot -= size);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun if ((flags & _PAGE_PRESENT) == 0)
104*4882a593Smuzhiyun flags |= _PAGE_KERNEL;
105*4882a593Smuzhiyun if (flags & _PAGE_NO_CACHE)
106*4882a593Smuzhiyun flags |= _PAGE_GUARDED;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun err = 0;
109*4882a593Smuzhiyun for (i = 0; i < size && err == 0; i += PAGE_SIZE)
110*4882a593Smuzhiyun err = map_page(v + i, p + i, flags);
111*4882a593Smuzhiyun if (err) {
112*4882a593Smuzhiyun if (mem_init_done)
113*4882a593Smuzhiyun vfree((void *)v);
114*4882a593Smuzhiyun return NULL;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
ioremap(phys_addr_t addr,unsigned long size)120*4882a593Smuzhiyun void __iomem *ioremap(phys_addr_t addr, unsigned long size)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun return __ioremap(addr, size, _PAGE_NO_CACHE);
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun EXPORT_SYMBOL(ioremap);
125*4882a593Smuzhiyun
iounmap(volatile void __iomem * addr)126*4882a593Smuzhiyun void iounmap(volatile void __iomem *addr)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun if ((__force void *)addr > high_memory &&
129*4882a593Smuzhiyun (unsigned long) addr < ioremap_bot)
130*4882a593Smuzhiyun vfree((void *) (PAGE_MASK & (unsigned long) addr));
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun EXPORT_SYMBOL(iounmap);
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun
map_page(unsigned long va,phys_addr_t pa,int flags)135*4882a593Smuzhiyun int map_page(unsigned long va, phys_addr_t pa, int flags)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun p4d_t *p4d;
138*4882a593Smuzhiyun pud_t *pud;
139*4882a593Smuzhiyun pmd_t *pd;
140*4882a593Smuzhiyun pte_t *pg;
141*4882a593Smuzhiyun int err = -ENOMEM;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun /* Use upper 10 bits of VA to index the first level map */
144*4882a593Smuzhiyun p4d = p4d_offset(pgd_offset_k(va), va);
145*4882a593Smuzhiyun pud = pud_offset(p4d, va);
146*4882a593Smuzhiyun pd = pmd_offset(pud, va);
147*4882a593Smuzhiyun /* Use middle 10 bits of VA to index the second-level map */
148*4882a593Smuzhiyun pg = pte_alloc_kernel(pd, va); /* from powerpc - pgtable.c */
149*4882a593Smuzhiyun /* pg = pte_alloc_kernel(&init_mm, pd, va); */
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun if (pg != NULL) {
152*4882a593Smuzhiyun err = 0;
153*4882a593Smuzhiyun set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
154*4882a593Smuzhiyun __pgprot(flags)));
155*4882a593Smuzhiyun if (unlikely(mem_init_done))
156*4882a593Smuzhiyun _tlbie(va);
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun return err;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun /*
162*4882a593Smuzhiyun * Map in all of physical memory starting at CONFIG_KERNEL_START.
163*4882a593Smuzhiyun */
mapin_ram(void)164*4882a593Smuzhiyun void __init mapin_ram(void)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun unsigned long v, p, s, f;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun v = CONFIG_KERNEL_START;
169*4882a593Smuzhiyun p = memory_start;
170*4882a593Smuzhiyun for (s = 0; s < lowmem_size; s += PAGE_SIZE) {
171*4882a593Smuzhiyun f = _PAGE_PRESENT | _PAGE_ACCESSED |
172*4882a593Smuzhiyun _PAGE_SHARED | _PAGE_HWEXEC;
173*4882a593Smuzhiyun if ((char *) v < _stext || (char *) v >= _etext)
174*4882a593Smuzhiyun f |= _PAGE_WRENABLE;
175*4882a593Smuzhiyun else
176*4882a593Smuzhiyun /* On the MicroBlaze, no user access
177*4882a593Smuzhiyun forces R/W kernel access */
178*4882a593Smuzhiyun f |= _PAGE_USER;
179*4882a593Smuzhiyun map_page(v, p, f);
180*4882a593Smuzhiyun v += PAGE_SIZE;
181*4882a593Smuzhiyun p += PAGE_SIZE;
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun /* is x a power of 2? */
186*4882a593Smuzhiyun #define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun /* Scan the real Linux page tables and return a PTE pointer for
189*4882a593Smuzhiyun * a virtual address in a context.
190*4882a593Smuzhiyun * Returns true (1) if PTE was found, zero otherwise. The pointer to
191*4882a593Smuzhiyun * the PTE pointer is unmodified if PTE is not found.
192*4882a593Smuzhiyun */
get_pteptr(struct mm_struct * mm,unsigned long addr,pte_t ** ptep)193*4882a593Smuzhiyun static int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun pgd_t *pgd;
196*4882a593Smuzhiyun p4d_t *p4d;
197*4882a593Smuzhiyun pud_t *pud;
198*4882a593Smuzhiyun pmd_t *pmd;
199*4882a593Smuzhiyun pte_t *pte;
200*4882a593Smuzhiyun int retval = 0;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun pgd = pgd_offset(mm, addr & PAGE_MASK);
203*4882a593Smuzhiyun if (pgd) {
204*4882a593Smuzhiyun p4d = p4d_offset(pgd, addr & PAGE_MASK);
205*4882a593Smuzhiyun pud = pud_offset(p4d, addr & PAGE_MASK);
206*4882a593Smuzhiyun pmd = pmd_offset(pud, addr & PAGE_MASK);
207*4882a593Smuzhiyun if (pmd_present(*pmd)) {
208*4882a593Smuzhiyun pte = pte_offset_kernel(pmd, addr & PAGE_MASK);
209*4882a593Smuzhiyun if (pte) {
210*4882a593Smuzhiyun retval = 1;
211*4882a593Smuzhiyun *ptep = pte;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun return retval;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun /* Find physical address for this virtual address. Normally used by
219*4882a593Smuzhiyun * I/O functions, but anyone can call it.
220*4882a593Smuzhiyun */
iopa(unsigned long addr)221*4882a593Smuzhiyun unsigned long iopa(unsigned long addr)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun unsigned long pa;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun pte_t *pte;
226*4882a593Smuzhiyun struct mm_struct *mm;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun /* Allow mapping of user addresses (within the thread)
229*4882a593Smuzhiyun * for DMA if necessary.
230*4882a593Smuzhiyun */
231*4882a593Smuzhiyun if (addr < TASK_SIZE)
232*4882a593Smuzhiyun mm = current->mm;
233*4882a593Smuzhiyun else
234*4882a593Smuzhiyun mm = &init_mm;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun pa = 0;
237*4882a593Smuzhiyun if (get_pteptr(mm, addr, &pte))
238*4882a593Smuzhiyun pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun return pa;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
pte_alloc_one_kernel(struct mm_struct * mm)243*4882a593Smuzhiyun __ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun pte_t *pte;
246*4882a593Smuzhiyun if (mem_init_done) {
247*4882a593Smuzhiyun pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
248*4882a593Smuzhiyun } else {
249*4882a593Smuzhiyun pte = (pte_t *)early_get_page();
250*4882a593Smuzhiyun if (pte)
251*4882a593Smuzhiyun clear_page(pte);
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun return pte;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
__set_fixmap(enum fixed_addresses idx,phys_addr_t phys,pgprot_t flags)256*4882a593Smuzhiyun void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun unsigned long address = __fix_to_virt(idx);
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun if (idx >= __end_of_fixed_addresses)
261*4882a593Smuzhiyun BUG();
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun map_page(address, phys, pgprot_val(flags));
264*4882a593Smuzhiyun }
265