xref: /OK3568_Linux_fs/kernel/arch/m68k/sun3/mmu_emu.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun ** Tablewalk MMU emulator
4*4882a593Smuzhiyun **
5*4882a593Smuzhiyun ** by Toshiyasu Morita
6*4882a593Smuzhiyun **
7*4882a593Smuzhiyun ** Started 1/16/98 @ 2:22 am
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/init.h>
11*4882a593Smuzhiyun #include <linux/mman.h>
12*4882a593Smuzhiyun #include <linux/mm.h>
13*4882a593Smuzhiyun #include <linux/kernel.h>
14*4882a593Smuzhiyun #include <linux/ptrace.h>
15*4882a593Smuzhiyun #include <linux/delay.h>
16*4882a593Smuzhiyun #include <linux/memblock.h>
17*4882a593Smuzhiyun #include <linux/bitops.h>
18*4882a593Smuzhiyun #include <linux/module.h>
19*4882a593Smuzhiyun #include <linux/sched/mm.h>
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #include <asm/setup.h>
22*4882a593Smuzhiyun #include <asm/traps.h>
23*4882a593Smuzhiyun #include <linux/uaccess.h>
24*4882a593Smuzhiyun #include <asm/page.h>
25*4882a593Smuzhiyun #include <asm/sun3mmu.h>
26*4882a593Smuzhiyun #include <asm/segment.h>
27*4882a593Smuzhiyun #include <asm/oplib.h>
28*4882a593Smuzhiyun #include <asm/mmu_context.h>
29*4882a593Smuzhiyun #include <asm/dvma.h>
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #undef DEBUG_MMU_EMU
33*4882a593Smuzhiyun #define DEBUG_PROM_MAPS
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun /*
36*4882a593Smuzhiyun ** Defines
37*4882a593Smuzhiyun */
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun #define CONTEXTS_NUM		8
40*4882a593Smuzhiyun #define SEGMAPS_PER_CONTEXT_NUM 2048
41*4882a593Smuzhiyun #define PAGES_PER_SEGMENT	16
42*4882a593Smuzhiyun #define PMEGS_NUM		256
43*4882a593Smuzhiyun #define PMEG_MASK		0xFF
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun /*
46*4882a593Smuzhiyun ** Globals
47*4882a593Smuzhiyun */
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun unsigned long m68k_vmalloc_end;
50*4882a593Smuzhiyun EXPORT_SYMBOL(m68k_vmalloc_end);
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun unsigned long pmeg_vaddr[PMEGS_NUM];
53*4882a593Smuzhiyun unsigned char pmeg_alloc[PMEGS_NUM];
54*4882a593Smuzhiyun unsigned char pmeg_ctx[PMEGS_NUM];
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun /* pointers to the mm structs for each task in each
57*4882a593Smuzhiyun    context. 0xffffffff is a marker for kernel context */
58*4882a593Smuzhiyun static struct mm_struct *ctx_alloc[CONTEXTS_NUM] = {
59*4882a593Smuzhiyun     [0] = (struct mm_struct *)0xffffffff
60*4882a593Smuzhiyun };
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun /* has this context been mmdrop'd? */
63*4882a593Smuzhiyun static unsigned char ctx_avail = CONTEXTS_NUM-1;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun /* array of pages to be marked off for the rom when we do mem_init later */
66*4882a593Smuzhiyun /* 256 pages lets the rom take up to 2mb of physical ram..  I really
67*4882a593Smuzhiyun    hope it never wants mote than that. */
68*4882a593Smuzhiyun unsigned long rom_pages[256];
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun /* Print a PTE value in symbolic form. For debugging. */
print_pte(pte_t pte)71*4882a593Smuzhiyun void print_pte (pte_t pte)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun #if 0
74*4882a593Smuzhiyun 	/* Verbose version. */
75*4882a593Smuzhiyun 	unsigned long val = pte_val (pte);
76*4882a593Smuzhiyun 	pr_cont(" pte=%lx [addr=%lx",
77*4882a593Smuzhiyun 		val, (val & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT);
78*4882a593Smuzhiyun 	if (val & SUN3_PAGE_VALID)	pr_cont(" valid");
79*4882a593Smuzhiyun 	if (val & SUN3_PAGE_WRITEABLE)	pr_cont(" write");
80*4882a593Smuzhiyun 	if (val & SUN3_PAGE_SYSTEM)	pr_cont(" sys");
81*4882a593Smuzhiyun 	if (val & SUN3_PAGE_NOCACHE)	pr_cont(" nocache");
82*4882a593Smuzhiyun 	if (val & SUN3_PAGE_ACCESSED)	pr_cont(" accessed");
83*4882a593Smuzhiyun 	if (val & SUN3_PAGE_MODIFIED)	pr_cont(" modified");
84*4882a593Smuzhiyun 	switch (val & SUN3_PAGE_TYPE_MASK) {
85*4882a593Smuzhiyun 		case SUN3_PAGE_TYPE_MEMORY: pr_cont(" memory"); break;
86*4882a593Smuzhiyun 		case SUN3_PAGE_TYPE_IO:     pr_cont(" io");     break;
87*4882a593Smuzhiyun 		case SUN3_PAGE_TYPE_VME16:  pr_cont(" vme16");  break;
88*4882a593Smuzhiyun 		case SUN3_PAGE_TYPE_VME32:  pr_cont(" vme32");  break;
89*4882a593Smuzhiyun 	}
90*4882a593Smuzhiyun 	pr_cont("]\n");
91*4882a593Smuzhiyun #else
92*4882a593Smuzhiyun 	/* Terse version. More likely to fit on a line. */
93*4882a593Smuzhiyun 	unsigned long val = pte_val (pte);
94*4882a593Smuzhiyun 	char flags[7], *type;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	flags[0] = (val & SUN3_PAGE_VALID)     ? 'v' : '-';
97*4882a593Smuzhiyun 	flags[1] = (val & SUN3_PAGE_WRITEABLE) ? 'w' : '-';
98*4882a593Smuzhiyun 	flags[2] = (val & SUN3_PAGE_SYSTEM)    ? 's' : '-';
99*4882a593Smuzhiyun 	flags[3] = (val & SUN3_PAGE_NOCACHE)   ? 'x' : '-';
100*4882a593Smuzhiyun 	flags[4] = (val & SUN3_PAGE_ACCESSED)  ? 'a' : '-';
101*4882a593Smuzhiyun 	flags[5] = (val & SUN3_PAGE_MODIFIED)  ? 'm' : '-';
102*4882a593Smuzhiyun 	flags[6] = '\0';
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	switch (val & SUN3_PAGE_TYPE_MASK) {
105*4882a593Smuzhiyun 		case SUN3_PAGE_TYPE_MEMORY: type = "memory"; break;
106*4882a593Smuzhiyun 		case SUN3_PAGE_TYPE_IO:     type = "io"    ; break;
107*4882a593Smuzhiyun 		case SUN3_PAGE_TYPE_VME16:  type = "vme16" ; break;
108*4882a593Smuzhiyun 		case SUN3_PAGE_TYPE_VME32:  type = "vme32" ; break;
109*4882a593Smuzhiyun 		default: type = "unknown?"; break;
110*4882a593Smuzhiyun 	}
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	pr_cont(" pte=%08lx [%07lx %s %s]\n",
113*4882a593Smuzhiyun 		val, (val & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT, flags, type);
114*4882a593Smuzhiyun #endif
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun /* Print the PTE value for a given virtual address. For debugging. */
print_pte_vaddr(unsigned long vaddr)118*4882a593Smuzhiyun void print_pte_vaddr (unsigned long vaddr)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun 	pr_cont(" vaddr=%lx [%02lx]", vaddr, sun3_get_segmap (vaddr));
121*4882a593Smuzhiyun 	print_pte (__pte (sun3_get_pte (vaddr)));
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun /*
125*4882a593Smuzhiyun  * Initialise the MMU emulator.
126*4882a593Smuzhiyun  */
mmu_emu_init(unsigned long bootmem_end)127*4882a593Smuzhiyun void __init mmu_emu_init(unsigned long bootmem_end)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun 	unsigned long seg, num;
130*4882a593Smuzhiyun 	int i,j;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	memset(rom_pages, 0, sizeof(rom_pages));
133*4882a593Smuzhiyun 	memset(pmeg_vaddr, 0, sizeof(pmeg_vaddr));
134*4882a593Smuzhiyun 	memset(pmeg_alloc, 0, sizeof(pmeg_alloc));
135*4882a593Smuzhiyun 	memset(pmeg_ctx, 0, sizeof(pmeg_ctx));
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	/* pmeg align the end of bootmem, adding another pmeg,
138*4882a593Smuzhiyun 	 * later bootmem allocations will likely need it */
139*4882a593Smuzhiyun 	bootmem_end = (bootmem_end + (2 * SUN3_PMEG_SIZE)) & ~SUN3_PMEG_MASK;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	/* mark all of the pmegs used thus far as reserved */
142*4882a593Smuzhiyun 	for (i=0; i < __pa(bootmem_end) / SUN3_PMEG_SIZE ; ++i)
143*4882a593Smuzhiyun 		pmeg_alloc[i] = 2;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	/* I'm thinking that most of the top pmeg's are going to be
147*4882a593Smuzhiyun 	   used for something, and we probably shouldn't risk it */
148*4882a593Smuzhiyun 	for(num = 0xf0; num <= 0xff; num++)
149*4882a593Smuzhiyun 		pmeg_alloc[num] = 2;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	/* liberate all existing mappings in the rest of kernel space */
152*4882a593Smuzhiyun 	for(seg = bootmem_end; seg < 0x0f800000; seg += SUN3_PMEG_SIZE) {
153*4882a593Smuzhiyun 		i = sun3_get_segmap(seg);
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 		if(!pmeg_alloc[i]) {
156*4882a593Smuzhiyun #ifdef DEBUG_MMU_EMU
157*4882a593Smuzhiyun 			pr_info("freed:");
158*4882a593Smuzhiyun 			print_pte_vaddr (seg);
159*4882a593Smuzhiyun #endif
160*4882a593Smuzhiyun 			sun3_put_segmap(seg, SUN3_INVALID_PMEG);
161*4882a593Smuzhiyun 		}
162*4882a593Smuzhiyun 	}
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	j = 0;
165*4882a593Smuzhiyun 	for (num=0, seg=0x0F800000; seg<0x10000000; seg+=16*PAGE_SIZE) {
166*4882a593Smuzhiyun 		if (sun3_get_segmap (seg) != SUN3_INVALID_PMEG) {
167*4882a593Smuzhiyun #ifdef DEBUG_PROM_MAPS
168*4882a593Smuzhiyun 			for(i = 0; i < 16; i++) {
169*4882a593Smuzhiyun 				pr_info("mapped:");
170*4882a593Smuzhiyun 				print_pte_vaddr (seg + (i*PAGE_SIZE));
171*4882a593Smuzhiyun 				break;
172*4882a593Smuzhiyun 			}
173*4882a593Smuzhiyun #endif
174*4882a593Smuzhiyun 			// the lowest mapping here is the end of our
175*4882a593Smuzhiyun 			// vmalloc region
176*4882a593Smuzhiyun 			if (!m68k_vmalloc_end)
177*4882a593Smuzhiyun 				m68k_vmalloc_end = seg;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 			// mark the segmap alloc'd, and reserve any
180*4882a593Smuzhiyun 			// of the first 0xbff pages the hardware is
181*4882a593Smuzhiyun 			// already using...  does any sun3 support > 24mb?
182*4882a593Smuzhiyun 			pmeg_alloc[sun3_get_segmap(seg)] = 2;
183*4882a593Smuzhiyun 		}
184*4882a593Smuzhiyun 	}
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	dvma_init();
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	/* blank everything below the kernel, and we've got the base
190*4882a593Smuzhiyun 	   mapping to start all the contexts off with... */
191*4882a593Smuzhiyun 	for(seg = 0; seg < PAGE_OFFSET; seg += SUN3_PMEG_SIZE)
192*4882a593Smuzhiyun 		sun3_put_segmap(seg, SUN3_INVALID_PMEG);
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	set_fs(MAKE_MM_SEG(3));
195*4882a593Smuzhiyun 	for(seg = 0; seg < 0x10000000; seg += SUN3_PMEG_SIZE) {
196*4882a593Smuzhiyun 		i = sun3_get_segmap(seg);
197*4882a593Smuzhiyun 		for(j = 1; j < CONTEXTS_NUM; j++)
198*4882a593Smuzhiyun 			(*(romvec->pv_setctxt))(j, (void *)seg, i);
199*4882a593Smuzhiyun 	}
200*4882a593Smuzhiyun 	set_fs(KERNEL_DS);
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun /* erase the mappings for a dead context.  Uses the pg_dir for hints
205*4882a593Smuzhiyun    as the pmeg tables proved somewhat unreliable, and unmapping all of
206*4882a593Smuzhiyun    TASK_SIZE was much slower and no more stable. */
207*4882a593Smuzhiyun /* todo: find a better way to keep track of the pmegs used by a
208*4882a593Smuzhiyun    context for when they're cleared */
clear_context(unsigned long context)209*4882a593Smuzhiyun void clear_context(unsigned long context)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun      unsigned char oldctx;
212*4882a593Smuzhiyun      unsigned long i;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun      if(context) {
215*4882a593Smuzhiyun 	     if(!ctx_alloc[context])
216*4882a593Smuzhiyun 		     panic("clear_context: context not allocated\n");
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	     ctx_alloc[context]->context = SUN3_INVALID_CONTEXT;
219*4882a593Smuzhiyun 	     ctx_alloc[context] = (struct mm_struct *)0;
220*4882a593Smuzhiyun 	     ctx_avail++;
221*4882a593Smuzhiyun      }
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun      oldctx = sun3_get_context();
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun      sun3_put_context(context);
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun      for(i = 0; i < SUN3_INVALID_PMEG; i++) {
228*4882a593Smuzhiyun 	     if((pmeg_ctx[i] == context) && (pmeg_alloc[i] == 1)) {
229*4882a593Smuzhiyun 		     sun3_put_segmap(pmeg_vaddr[i], SUN3_INVALID_PMEG);
230*4882a593Smuzhiyun 		     pmeg_ctx[i] = 0;
231*4882a593Smuzhiyun 		     pmeg_alloc[i] = 0;
232*4882a593Smuzhiyun 		     pmeg_vaddr[i] = 0;
233*4882a593Smuzhiyun 	     }
234*4882a593Smuzhiyun      }
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun      sun3_put_context(oldctx);
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun /* gets an empty context.  if full, kills the next context listed to
240*4882a593Smuzhiyun    die first */
241*4882a593Smuzhiyun /* This context invalidation scheme is, well, totally arbitrary, I'm
242*4882a593Smuzhiyun    sure it could be much more intelligent...  but it gets the job done
243*4882a593Smuzhiyun    for now without much overhead in making it's decision. */
244*4882a593Smuzhiyun /* todo: come up with optimized scheme for flushing contexts */
get_free_context(struct mm_struct * mm)245*4882a593Smuzhiyun unsigned long get_free_context(struct mm_struct *mm)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun 	unsigned long new = 1;
248*4882a593Smuzhiyun 	static unsigned char next_to_die = 1;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	if(!ctx_avail) {
251*4882a593Smuzhiyun 		/* kill someone to get our context */
252*4882a593Smuzhiyun 		new = next_to_die;
253*4882a593Smuzhiyun 		clear_context(new);
254*4882a593Smuzhiyun 		next_to_die = (next_to_die + 1) & 0x7;
255*4882a593Smuzhiyun 		if(!next_to_die)
256*4882a593Smuzhiyun 			next_to_die++;
257*4882a593Smuzhiyun 	} else {
258*4882a593Smuzhiyun 		while(new < CONTEXTS_NUM) {
259*4882a593Smuzhiyun 			if(ctx_alloc[new])
260*4882a593Smuzhiyun 				new++;
261*4882a593Smuzhiyun 			else
262*4882a593Smuzhiyun 				break;
263*4882a593Smuzhiyun 		}
264*4882a593Smuzhiyun 		// check to make sure one was really free...
265*4882a593Smuzhiyun 		if(new == CONTEXTS_NUM)
266*4882a593Smuzhiyun 			panic("get_free_context: failed to find free context");
267*4882a593Smuzhiyun 	}
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	ctx_alloc[new] = mm;
270*4882a593Smuzhiyun 	ctx_avail--;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	return new;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun /*
276*4882a593Smuzhiyun  * Dynamically select a `spare' PMEG and use it to map virtual `vaddr' in
277*4882a593Smuzhiyun  * `context'. Maintain internal PMEG management structures. This doesn't
278*4882a593Smuzhiyun  * actually map the physical address, but does clear the old mappings.
279*4882a593Smuzhiyun  */
280*4882a593Smuzhiyun //todo: better allocation scheme? but is extra complexity worthwhile?
281*4882a593Smuzhiyun //todo: only clear old entries if necessary? how to tell?
282*4882a593Smuzhiyun 
mmu_emu_map_pmeg(int context,int vaddr)283*4882a593Smuzhiyun inline void mmu_emu_map_pmeg (int context, int vaddr)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun 	static unsigned char curr_pmeg = 128;
286*4882a593Smuzhiyun 	int i;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	/* Round address to PMEG boundary. */
289*4882a593Smuzhiyun 	vaddr &= ~SUN3_PMEG_MASK;
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	/* Find a spare one. */
292*4882a593Smuzhiyun 	while (pmeg_alloc[curr_pmeg] == 2)
293*4882a593Smuzhiyun 		++curr_pmeg;
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun #ifdef DEBUG_MMU_EMU
297*4882a593Smuzhiyun 	pr_info("mmu_emu_map_pmeg: pmeg %x to context %d vaddr %x\n",
298*4882a593Smuzhiyun 		curr_pmeg, context, vaddr);
299*4882a593Smuzhiyun #endif
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	/* Invalidate old mapping for the pmeg, if any */
302*4882a593Smuzhiyun 	if (pmeg_alloc[curr_pmeg] == 1) {
303*4882a593Smuzhiyun 		sun3_put_context(pmeg_ctx[curr_pmeg]);
304*4882a593Smuzhiyun 		sun3_put_segmap (pmeg_vaddr[curr_pmeg], SUN3_INVALID_PMEG);
305*4882a593Smuzhiyun 		sun3_put_context(context);
306*4882a593Smuzhiyun 	}
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	/* Update PMEG management structures. */
309*4882a593Smuzhiyun 	// don't take pmeg's away from the kernel...
310*4882a593Smuzhiyun 	if(vaddr >= PAGE_OFFSET) {
311*4882a593Smuzhiyun 		/* map kernel pmegs into all contexts */
312*4882a593Smuzhiyun 		unsigned char i;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 		for(i = 0; i < CONTEXTS_NUM; i++) {
315*4882a593Smuzhiyun 			sun3_put_context(i);
316*4882a593Smuzhiyun 			sun3_put_segmap (vaddr, curr_pmeg);
317*4882a593Smuzhiyun 		}
318*4882a593Smuzhiyun 		sun3_put_context(context);
319*4882a593Smuzhiyun 		pmeg_alloc[curr_pmeg] = 2;
320*4882a593Smuzhiyun 		pmeg_ctx[curr_pmeg] = 0;
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	}
323*4882a593Smuzhiyun 	else {
324*4882a593Smuzhiyun 		pmeg_alloc[curr_pmeg] = 1;
325*4882a593Smuzhiyun 		pmeg_ctx[curr_pmeg] = context;
326*4882a593Smuzhiyun 		sun3_put_segmap (vaddr, curr_pmeg);
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	}
329*4882a593Smuzhiyun 	pmeg_vaddr[curr_pmeg] = vaddr;
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	/* Set hardware mapping and clear the old PTE entries. */
332*4882a593Smuzhiyun 	for (i=0; i<SUN3_PMEG_SIZE; i+=SUN3_PTE_SIZE)
333*4882a593Smuzhiyun 		sun3_put_pte (vaddr + i, SUN3_PAGE_SYSTEM);
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	/* Consider a different one next time. */
336*4882a593Smuzhiyun 	++curr_pmeg;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun /*
340*4882a593Smuzhiyun  * Handle a pagefault at virtual address `vaddr'; check if there should be a
341*4882a593Smuzhiyun  * page there (specifically, whether the software pagetables indicate that
342*4882a593Smuzhiyun  * there is). This is necessary due to the limited size of the second-level
343*4882a593Smuzhiyun  * Sun3 hardware pagetables (256 groups of 16 pages). If there should be a
344*4882a593Smuzhiyun  * mapping present, we select a `spare' PMEG and use it to create a mapping.
345*4882a593Smuzhiyun  * `read_flag' is nonzero for a read fault; zero for a write. Returns nonzero
346*4882a593Smuzhiyun  * if we successfully handled the fault.
347*4882a593Smuzhiyun  */
348*4882a593Smuzhiyun //todo: should we bump minor pagefault counter? if so, here or in caller?
349*4882a593Smuzhiyun //todo: possibly inline this into bus_error030 in <asm/buserror.h> ?
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun // kernel_fault is set when a kernel page couldn't be demand mapped,
352*4882a593Smuzhiyun // and forces another try using the kernel page table.  basically a
353*4882a593Smuzhiyun // hack so that vmalloc would work correctly.
354*4882a593Smuzhiyun 
mmu_emu_handle_fault(unsigned long vaddr,int read_flag,int kernel_fault)355*4882a593Smuzhiyun int mmu_emu_handle_fault (unsigned long vaddr, int read_flag, int kernel_fault)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun 	unsigned long segment, offset;
358*4882a593Smuzhiyun 	unsigned char context;
359*4882a593Smuzhiyun 	pte_t *pte;
360*4882a593Smuzhiyun 	pgd_t * crp;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	if(current->mm == NULL) {
363*4882a593Smuzhiyun 		crp = swapper_pg_dir;
364*4882a593Smuzhiyun 		context = 0;
365*4882a593Smuzhiyun 	} else {
366*4882a593Smuzhiyun 		context = current->mm->context;
367*4882a593Smuzhiyun 		if(kernel_fault)
368*4882a593Smuzhiyun 			crp = swapper_pg_dir;
369*4882a593Smuzhiyun 		else
370*4882a593Smuzhiyun 			crp = current->mm->pgd;
371*4882a593Smuzhiyun 	}
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun #ifdef DEBUG_MMU_EMU
374*4882a593Smuzhiyun 	pr_info("mmu_emu_handle_fault: vaddr=%lx type=%s crp=%p\n",
375*4882a593Smuzhiyun 		vaddr, read_flag ? "read" : "write", crp);
376*4882a593Smuzhiyun #endif
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	segment = (vaddr >> SUN3_PMEG_SIZE_BITS) & 0x7FF;
379*4882a593Smuzhiyun 	offset  = (vaddr >> SUN3_PTE_SIZE_BITS) & 0xF;
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun #ifdef DEBUG_MMU_EMU
382*4882a593Smuzhiyun 	pr_info("mmu_emu_handle_fault: segment=%lx offset=%lx\n", segment,
383*4882a593Smuzhiyun 		offset);
384*4882a593Smuzhiyun #endif
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	pte = (pte_t *) pgd_val (*(crp + segment));
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun //todo: next line should check for valid pmd properly.
389*4882a593Smuzhiyun 	if (!pte) {
390*4882a593Smuzhiyun //                pr_info("mmu_emu_handle_fault: invalid pmd\n");
391*4882a593Smuzhiyun                 return 0;
392*4882a593Smuzhiyun         }
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	pte = (pte_t *) __va ((unsigned long)(pte + offset));
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	/* Make sure this is a valid page */
397*4882a593Smuzhiyun 	if (!(pte_val (*pte) & SUN3_PAGE_VALID))
398*4882a593Smuzhiyun 		return 0;
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	/* Make sure there's a pmeg allocated for the page */
401*4882a593Smuzhiyun 	if (sun3_get_segmap (vaddr&~SUN3_PMEG_MASK) == SUN3_INVALID_PMEG)
402*4882a593Smuzhiyun 		mmu_emu_map_pmeg (context, vaddr);
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	/* Write the pte value to hardware MMU */
405*4882a593Smuzhiyun 	sun3_put_pte (vaddr&PAGE_MASK, pte_val (*pte));
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	/* Update software copy of the pte value */
408*4882a593Smuzhiyun // I'm not sure this is necessary. If this is required, we ought to simply
409*4882a593Smuzhiyun // copy this out when we reuse the PMEG or at some other convenient time.
410*4882a593Smuzhiyun // Doing it here is fairly meaningless, anyway, as we only know about the
411*4882a593Smuzhiyun // first access to a given page. --m
412*4882a593Smuzhiyun 	if (!read_flag) {
413*4882a593Smuzhiyun 		if (pte_val (*pte) & SUN3_PAGE_WRITEABLE)
414*4882a593Smuzhiyun 			pte_val (*pte) |= (SUN3_PAGE_ACCESSED
415*4882a593Smuzhiyun 					   | SUN3_PAGE_MODIFIED);
416*4882a593Smuzhiyun 		else
417*4882a593Smuzhiyun 			return 0;	/* Write-protect error. */
418*4882a593Smuzhiyun 	} else
419*4882a593Smuzhiyun 		pte_val (*pte) |= SUN3_PAGE_ACCESSED;
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun #ifdef DEBUG_MMU_EMU
422*4882a593Smuzhiyun 	pr_info("seg:%ld crp:%p ->", get_fs().seg, crp);
423*4882a593Smuzhiyun 	print_pte_vaddr (vaddr);
424*4882a593Smuzhiyun 	pr_cont("\n");
425*4882a593Smuzhiyun #endif
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	return 1;
428*4882a593Smuzhiyun }
429