xref: /OK3568_Linux_fs/kernel/arch/alpha/mm/init.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  linux/arch/alpha/mm/init.c
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *  Copyright (C) 1995  Linus Torvalds
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun /* 2.3.x zone allocator, 1999 Andrea Arcangeli <andrea@suse.de> */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/pagemap.h>
11*4882a593Smuzhiyun #include <linux/signal.h>
12*4882a593Smuzhiyun #include <linux/sched.h>
13*4882a593Smuzhiyun #include <linux/kernel.h>
14*4882a593Smuzhiyun #include <linux/errno.h>
15*4882a593Smuzhiyun #include <linux/string.h>
16*4882a593Smuzhiyun #include <linux/types.h>
17*4882a593Smuzhiyun #include <linux/ptrace.h>
18*4882a593Smuzhiyun #include <linux/mman.h>
19*4882a593Smuzhiyun #include <linux/mm.h>
20*4882a593Smuzhiyun #include <linux/swap.h>
21*4882a593Smuzhiyun #include <linux/init.h>
22*4882a593Smuzhiyun #include <linux/memblock.h> /* max_low_pfn */
23*4882a593Smuzhiyun #include <linux/vmalloc.h>
24*4882a593Smuzhiyun #include <linux/gfp.h>
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #include <linux/uaccess.h>
27*4882a593Smuzhiyun #include <asm/pgalloc.h>
28*4882a593Smuzhiyun #include <asm/hwrpb.h>
29*4882a593Smuzhiyun #include <asm/dma.h>
30*4882a593Smuzhiyun #include <asm/mmu_context.h>
31*4882a593Smuzhiyun #include <asm/console.h>
32*4882a593Smuzhiyun #include <asm/tlb.h>
33*4882a593Smuzhiyun #include <asm/setup.h>
34*4882a593Smuzhiyun #include <asm/sections.h>
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun extern void die_if_kernel(char *,struct pt_regs *,long);
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun static struct pcb_struct original_pcb;
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun pgd_t *
pgd_alloc(struct mm_struct * mm)41*4882a593Smuzhiyun pgd_alloc(struct mm_struct *mm)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun 	pgd_t *ret, *init;
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	ret = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
46*4882a593Smuzhiyun 	init = pgd_offset(&init_mm, 0UL);
47*4882a593Smuzhiyun 	if (ret) {
48*4882a593Smuzhiyun #ifdef CONFIG_ALPHA_LARGE_VMALLOC
49*4882a593Smuzhiyun 		memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
50*4882a593Smuzhiyun 			(PTRS_PER_PGD - USER_PTRS_PER_PGD - 1)*sizeof(pgd_t));
51*4882a593Smuzhiyun #else
52*4882a593Smuzhiyun 		pgd_val(ret[PTRS_PER_PGD-2]) = pgd_val(init[PTRS_PER_PGD-2]);
53*4882a593Smuzhiyun #endif
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 		/* The last PGD entry is the VPTB self-map.  */
56*4882a593Smuzhiyun 		pgd_val(ret[PTRS_PER_PGD-1])
57*4882a593Smuzhiyun 		  = pte_val(mk_pte(virt_to_page(ret), PAGE_KERNEL));
58*4882a593Smuzhiyun 	}
59*4882a593Smuzhiyun 	return ret;
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun /*
64*4882a593Smuzhiyun  * BAD_PAGE is the page that is used for page faults when linux
65*4882a593Smuzhiyun  * is out-of-memory. Older versions of linux just did a
66*4882a593Smuzhiyun  * do_exit(), but using this instead means there is less risk
67*4882a593Smuzhiyun  * for a process dying in kernel mode, possibly leaving an inode
68*4882a593Smuzhiyun  * unused etc..
69*4882a593Smuzhiyun  *
70*4882a593Smuzhiyun  * BAD_PAGETABLE is the accompanying page-table: it is initialized
71*4882a593Smuzhiyun  * to point to BAD_PAGE entries.
72*4882a593Smuzhiyun  *
73*4882a593Smuzhiyun  * ZERO_PAGE is a special page that is used for zero-initialized
74*4882a593Smuzhiyun  * data and COW.
75*4882a593Smuzhiyun  */
76*4882a593Smuzhiyun pmd_t *
__bad_pagetable(void)77*4882a593Smuzhiyun __bad_pagetable(void)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun 	memset((void *) EMPTY_PGT, 0, PAGE_SIZE);
80*4882a593Smuzhiyun 	return (pmd_t *) EMPTY_PGT;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun pte_t
__bad_page(void)84*4882a593Smuzhiyun __bad_page(void)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun 	memset((void *) EMPTY_PGE, 0, PAGE_SIZE);
87*4882a593Smuzhiyun 	return pte_mkdirty(mk_pte(virt_to_page(EMPTY_PGE), PAGE_SHARED));
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun static inline unsigned long
load_PCB(struct pcb_struct * pcb)91*4882a593Smuzhiyun load_PCB(struct pcb_struct *pcb)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun 	register unsigned long sp __asm__("$30");
94*4882a593Smuzhiyun 	pcb->ksp = sp;
95*4882a593Smuzhiyun 	return __reload_thread(pcb);
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun /* Set up initial PCB, VPTB, and other such nicities.  */
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun static inline void
switch_to_system_map(void)101*4882a593Smuzhiyun switch_to_system_map(void)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun 	unsigned long newptbr;
104*4882a593Smuzhiyun 	unsigned long original_pcb_ptr;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	/* Initialize the kernel's page tables.  Linux puts the vptb in
107*4882a593Smuzhiyun 	   the last slot of the L1 page table.  */
108*4882a593Smuzhiyun 	memset(swapper_pg_dir, 0, PAGE_SIZE);
109*4882a593Smuzhiyun 	newptbr = ((unsigned long) swapper_pg_dir - PAGE_OFFSET) >> PAGE_SHIFT;
110*4882a593Smuzhiyun 	pgd_val(swapper_pg_dir[1023]) =
111*4882a593Smuzhiyun 		(newptbr << 32) | pgprot_val(PAGE_KERNEL);
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	/* Set the vptb.  This is often done by the bootloader, but
114*4882a593Smuzhiyun 	   shouldn't be required.  */
115*4882a593Smuzhiyun 	if (hwrpb->vptb != 0xfffffffe00000000UL) {
116*4882a593Smuzhiyun 		wrvptptr(0xfffffffe00000000UL);
117*4882a593Smuzhiyun 		hwrpb->vptb = 0xfffffffe00000000UL;
118*4882a593Smuzhiyun 		hwrpb_update_checksum(hwrpb);
119*4882a593Smuzhiyun 	}
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	/* Also set up the real kernel PCB while we're at it.  */
122*4882a593Smuzhiyun 	init_thread_info.pcb.ptbr = newptbr;
123*4882a593Smuzhiyun 	init_thread_info.pcb.flags = 1;	/* set FEN, clear everything else */
124*4882a593Smuzhiyun 	original_pcb_ptr = load_PCB(&init_thread_info.pcb);
125*4882a593Smuzhiyun 	tbia();
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	/* Save off the contents of the original PCB so that we can
128*4882a593Smuzhiyun 	   restore the original console's page tables for a clean reboot.
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	   Note that the PCB is supposed to be a physical address, but
131*4882a593Smuzhiyun 	   since KSEG values also happen to work, folks get confused.
132*4882a593Smuzhiyun 	   Check this here.  */
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	if (original_pcb_ptr < PAGE_OFFSET) {
135*4882a593Smuzhiyun 		original_pcb_ptr = (unsigned long)
136*4882a593Smuzhiyun 			phys_to_virt(original_pcb_ptr);
137*4882a593Smuzhiyun 	}
138*4882a593Smuzhiyun 	original_pcb = *(struct pcb_struct *) original_pcb_ptr;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun int callback_init_done;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun void * __init
callback_init(void * kernel_end)144*4882a593Smuzhiyun callback_init(void * kernel_end)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun 	struct crb_struct * crb;
147*4882a593Smuzhiyun 	pgd_t *pgd;
148*4882a593Smuzhiyun 	p4d_t *p4d;
149*4882a593Smuzhiyun 	pud_t *pud;
150*4882a593Smuzhiyun 	pmd_t *pmd;
151*4882a593Smuzhiyun 	void *two_pages;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	/* Starting at the HWRPB, locate the CRB. */
154*4882a593Smuzhiyun 	crb = (struct crb_struct *)((char *)hwrpb + hwrpb->crb_offset);
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	if (alpha_using_srm) {
157*4882a593Smuzhiyun 		/* Tell the console whither it is to be remapped. */
158*4882a593Smuzhiyun 		if (srm_fixup(VMALLOC_START, (unsigned long)hwrpb))
159*4882a593Smuzhiyun 			__halt();		/* "We're boned."  --Bender */
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 		/* Edit the procedure descriptors for DISPATCH and FIXUP. */
162*4882a593Smuzhiyun 		crb->dispatch_va = (struct procdesc_struct *)
163*4882a593Smuzhiyun 			(VMALLOC_START + (unsigned long)crb->dispatch_va
164*4882a593Smuzhiyun 			 - crb->map[0].va);
165*4882a593Smuzhiyun 		crb->fixup_va = (struct procdesc_struct *)
166*4882a593Smuzhiyun 			(VMALLOC_START + (unsigned long)crb->fixup_va
167*4882a593Smuzhiyun 			 - crb->map[0].va);
168*4882a593Smuzhiyun 	}
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	switch_to_system_map();
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	/* Allocate one PGD and one PMD.  In the case of SRM, we'll need
173*4882a593Smuzhiyun 	   these to actually remap the console.  There is an assumption
174*4882a593Smuzhiyun 	   here that only one of each is needed, and this allows for 8MB.
175*4882a593Smuzhiyun 	   On systems with larger consoles, additional pages will be
176*4882a593Smuzhiyun 	   allocated as needed during the mapping process.
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	   In the case of not SRM, but not CONFIG_ALPHA_LARGE_VMALLOC,
179*4882a593Smuzhiyun 	   we need to allocate the PGD we use for vmalloc before we start
180*4882a593Smuzhiyun 	   forking other tasks.  */
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	two_pages = (void *)
183*4882a593Smuzhiyun 	  (((unsigned long)kernel_end + ~PAGE_MASK) & PAGE_MASK);
184*4882a593Smuzhiyun 	kernel_end = two_pages + 2*PAGE_SIZE;
185*4882a593Smuzhiyun 	memset(two_pages, 0, 2*PAGE_SIZE);
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	pgd = pgd_offset_k(VMALLOC_START);
188*4882a593Smuzhiyun 	p4d = p4d_offset(pgd, VMALLOC_START);
189*4882a593Smuzhiyun 	pud = pud_offset(p4d, VMALLOC_START);
190*4882a593Smuzhiyun 	pud_set(pud, (pmd_t *)two_pages);
191*4882a593Smuzhiyun 	pmd = pmd_offset(pud, VMALLOC_START);
192*4882a593Smuzhiyun 	pmd_set(pmd, (pte_t *)(two_pages + PAGE_SIZE));
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	if (alpha_using_srm) {
195*4882a593Smuzhiyun 		static struct vm_struct console_remap_vm;
196*4882a593Smuzhiyun 		unsigned long nr_pages = 0;
197*4882a593Smuzhiyun 		unsigned long vaddr;
198*4882a593Smuzhiyun 		unsigned long i, j;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 		/* calculate needed size */
201*4882a593Smuzhiyun 		for (i = 0; i < crb->map_entries; ++i)
202*4882a593Smuzhiyun 			nr_pages += crb->map[i].count;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 		/* register the vm area */
205*4882a593Smuzhiyun 		console_remap_vm.flags = VM_ALLOC;
206*4882a593Smuzhiyun 		console_remap_vm.size = nr_pages << PAGE_SHIFT;
207*4882a593Smuzhiyun 		vm_area_register_early(&console_remap_vm, PAGE_SIZE);
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 		vaddr = (unsigned long)console_remap_vm.addr;
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 		/* Set up the third level PTEs and update the virtual
212*4882a593Smuzhiyun 		   addresses of the CRB entries.  */
213*4882a593Smuzhiyun 		for (i = 0; i < crb->map_entries; ++i) {
214*4882a593Smuzhiyun 			unsigned long pfn = crb->map[i].pa >> PAGE_SHIFT;
215*4882a593Smuzhiyun 			crb->map[i].va = vaddr;
216*4882a593Smuzhiyun 			for (j = 0; j < crb->map[i].count; ++j) {
217*4882a593Smuzhiyun 				/* Newer consoles (especially on larger
218*4882a593Smuzhiyun 				   systems) may require more pages of
219*4882a593Smuzhiyun 				   PTEs. Grab additional pages as needed. */
220*4882a593Smuzhiyun 				if (pmd != pmd_offset(pud, vaddr)) {
221*4882a593Smuzhiyun 					memset(kernel_end, 0, PAGE_SIZE);
222*4882a593Smuzhiyun 					pmd = pmd_offset(pud, vaddr);
223*4882a593Smuzhiyun 					pmd_set(pmd, (pte_t *)kernel_end);
224*4882a593Smuzhiyun 					kernel_end += PAGE_SIZE;
225*4882a593Smuzhiyun 				}
226*4882a593Smuzhiyun 				set_pte(pte_offset_kernel(pmd, vaddr),
227*4882a593Smuzhiyun 					pfn_pte(pfn, PAGE_KERNEL));
228*4882a593Smuzhiyun 				pfn++;
229*4882a593Smuzhiyun 				vaddr += PAGE_SIZE;
230*4882a593Smuzhiyun 			}
231*4882a593Smuzhiyun 		}
232*4882a593Smuzhiyun 	}
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	callback_init_done = 1;
235*4882a593Smuzhiyun 	return kernel_end;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun #ifndef CONFIG_DISCONTIGMEM
240*4882a593Smuzhiyun /*
241*4882a593Smuzhiyun  * paging_init() sets up the memory map.
242*4882a593Smuzhiyun  */
paging_init(void)243*4882a593Smuzhiyun void __init paging_init(void)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun 	unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, };
246*4882a593Smuzhiyun 	unsigned long dma_pfn;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	dma_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
249*4882a593Smuzhiyun 	max_pfn = max_low_pfn;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	max_zone_pfn[ZONE_DMA] = dma_pfn;
252*4882a593Smuzhiyun 	max_zone_pfn[ZONE_NORMAL] = max_pfn;
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	/* Initialize mem_map[].  */
255*4882a593Smuzhiyun 	free_area_init(max_zone_pfn);
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	/* Initialize the kernel's ZERO_PGE. */
258*4882a593Smuzhiyun 	memset((void *)ZERO_PGE, 0, PAGE_SIZE);
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun #endif /* CONFIG_DISCONTIGMEM */
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM)
263*4882a593Smuzhiyun void
srm_paging_stop(void)264*4882a593Smuzhiyun srm_paging_stop (void)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	/* Move the vptb back to where the SRM console expects it.  */
267*4882a593Smuzhiyun 	swapper_pg_dir[1] = swapper_pg_dir[1023];
268*4882a593Smuzhiyun 	tbia();
269*4882a593Smuzhiyun 	wrvptptr(0x200000000UL);
270*4882a593Smuzhiyun 	hwrpb->vptb = 0x200000000UL;
271*4882a593Smuzhiyun 	hwrpb_update_checksum(hwrpb);
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	/* Reload the page tables that the console had in use.  */
274*4882a593Smuzhiyun 	load_PCB(&original_pcb);
275*4882a593Smuzhiyun 	tbia();
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun #endif
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun void __init
mem_init(void)280*4882a593Smuzhiyun mem_init(void)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun 	set_max_mapnr(max_low_pfn);
283*4882a593Smuzhiyun 	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
284*4882a593Smuzhiyun 	memblock_free_all();
285*4882a593Smuzhiyun 	mem_init_print_info(NULL);
286*4882a593Smuzhiyun }
287