xref: /OK3568_Linux_fs/kernel/arch/mips/mm/tlb-r4k.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * This file is subject to the terms and conditions of the GNU General Public
3*4882a593Smuzhiyun  * License.  See the file "COPYING" in the main directory of this archive
4*4882a593Smuzhiyun  * for more details.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
7*4882a593Smuzhiyun  * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
8*4882a593Smuzhiyun  * Carsten Langgaard, carstenl@mips.com
9*4882a593Smuzhiyun  * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun #include <linux/cpu_pm.h>
12*4882a593Smuzhiyun #include <linux/init.h>
13*4882a593Smuzhiyun #include <linux/sched.h>
14*4882a593Smuzhiyun #include <linux/smp.h>
15*4882a593Smuzhiyun #include <linux/mm.h>
16*4882a593Smuzhiyun #include <linux/hugetlb.h>
17*4882a593Smuzhiyun #include <linux/export.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #include <asm/cpu.h>
20*4882a593Smuzhiyun #include <asm/cpu-type.h>
21*4882a593Smuzhiyun #include <asm/bootinfo.h>
22*4882a593Smuzhiyun #include <asm/hazards.h>
23*4882a593Smuzhiyun #include <asm/mmu_context.h>
24*4882a593Smuzhiyun #include <asm/tlb.h>
25*4882a593Smuzhiyun #include <asm/tlbmisc.h>
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun extern void build_tlb_refill_handler(void);
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun /*
30*4882a593Smuzhiyun  * LOONGSON-2 has a 4 entry itlb which is a subset of jtlb, LOONGSON-3 has
31*4882a593Smuzhiyun  * a 4 entry itlb and a 4 entry dtlb which are subsets of jtlb. Unfortunately,
32*4882a593Smuzhiyun  * itlb/dtlb are not totally transparent to software.
33*4882a593Smuzhiyun  */
flush_micro_tlb(void)34*4882a593Smuzhiyun static inline void flush_micro_tlb(void)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun 	switch (current_cpu_type()) {
37*4882a593Smuzhiyun 	case CPU_LOONGSON2EF:
38*4882a593Smuzhiyun 		write_c0_diag(LOONGSON_DIAG_ITLB);
39*4882a593Smuzhiyun 		break;
40*4882a593Smuzhiyun 	case CPU_LOONGSON64:
41*4882a593Smuzhiyun 		write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
42*4882a593Smuzhiyun 		break;
43*4882a593Smuzhiyun 	default:
44*4882a593Smuzhiyun 		break;
45*4882a593Smuzhiyun 	}
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun 
flush_micro_tlb_vm(struct vm_area_struct * vma)48*4882a593Smuzhiyun static inline void flush_micro_tlb_vm(struct vm_area_struct *vma)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun 	if (vma->vm_flags & VM_EXEC)
51*4882a593Smuzhiyun 		flush_micro_tlb();
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun 
local_flush_tlb_all(void)54*4882a593Smuzhiyun void local_flush_tlb_all(void)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 	unsigned long flags;
57*4882a593Smuzhiyun 	unsigned long old_ctx;
58*4882a593Smuzhiyun 	int entry, ftlbhighset;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	local_irq_save(flags);
61*4882a593Smuzhiyun 	/* Save old context and create impossible VPN2 value */
62*4882a593Smuzhiyun 	old_ctx = read_c0_entryhi();
63*4882a593Smuzhiyun 	htw_stop();
64*4882a593Smuzhiyun 	write_c0_entrylo0(0);
65*4882a593Smuzhiyun 	write_c0_entrylo1(0);
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	entry = num_wired_entries();
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	/*
70*4882a593Smuzhiyun 	 * Blast 'em all away.
71*4882a593Smuzhiyun 	 * If there are any wired entries, fall back to iterating
72*4882a593Smuzhiyun 	 */
73*4882a593Smuzhiyun 	if (cpu_has_tlbinv && !entry) {
74*4882a593Smuzhiyun 		if (current_cpu_data.tlbsizevtlb) {
75*4882a593Smuzhiyun 			write_c0_index(0);
76*4882a593Smuzhiyun 			mtc0_tlbw_hazard();
77*4882a593Smuzhiyun 			tlbinvf();  /* invalidate VTLB */
78*4882a593Smuzhiyun 		}
79*4882a593Smuzhiyun 		ftlbhighset = current_cpu_data.tlbsizevtlb +
80*4882a593Smuzhiyun 			current_cpu_data.tlbsizeftlbsets;
81*4882a593Smuzhiyun 		for (entry = current_cpu_data.tlbsizevtlb;
82*4882a593Smuzhiyun 		     entry < ftlbhighset;
83*4882a593Smuzhiyun 		     entry++) {
84*4882a593Smuzhiyun 			write_c0_index(entry);
85*4882a593Smuzhiyun 			mtc0_tlbw_hazard();
86*4882a593Smuzhiyun 			tlbinvf();  /* invalidate one FTLB set */
87*4882a593Smuzhiyun 		}
88*4882a593Smuzhiyun 	} else {
89*4882a593Smuzhiyun 		while (entry < current_cpu_data.tlbsize) {
90*4882a593Smuzhiyun 			/* Make sure all entries differ. */
91*4882a593Smuzhiyun 			write_c0_entryhi(UNIQUE_ENTRYHI(entry));
92*4882a593Smuzhiyun 			write_c0_index(entry);
93*4882a593Smuzhiyun 			mtc0_tlbw_hazard();
94*4882a593Smuzhiyun 			tlb_write_indexed();
95*4882a593Smuzhiyun 			entry++;
96*4882a593Smuzhiyun 		}
97*4882a593Smuzhiyun 	}
98*4882a593Smuzhiyun 	tlbw_use_hazard();
99*4882a593Smuzhiyun 	write_c0_entryhi(old_ctx);
100*4882a593Smuzhiyun 	htw_start();
101*4882a593Smuzhiyun 	flush_micro_tlb();
102*4882a593Smuzhiyun 	local_irq_restore(flags);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun EXPORT_SYMBOL(local_flush_tlb_all);
105*4882a593Smuzhiyun 
local_flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)106*4882a593Smuzhiyun void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
107*4882a593Smuzhiyun 	unsigned long end)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun 	struct mm_struct *mm = vma->vm_mm;
110*4882a593Smuzhiyun 	int cpu = smp_processor_id();
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	if (cpu_context(cpu, mm) != 0) {
113*4882a593Smuzhiyun 		unsigned long size, flags;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 		local_irq_save(flags);
116*4882a593Smuzhiyun 		start = round_down(start, PAGE_SIZE << 1);
117*4882a593Smuzhiyun 		end = round_up(end, PAGE_SIZE << 1);
118*4882a593Smuzhiyun 		size = (end - start) >> (PAGE_SHIFT + 1);
119*4882a593Smuzhiyun 		if (size <= (current_cpu_data.tlbsizeftlbsets ?
120*4882a593Smuzhiyun 			     current_cpu_data.tlbsize / 8 :
121*4882a593Smuzhiyun 			     current_cpu_data.tlbsize / 2)) {
122*4882a593Smuzhiyun 			unsigned long old_entryhi, old_mmid;
123*4882a593Smuzhiyun 			int newpid = cpu_asid(cpu, mm);
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 			old_entryhi = read_c0_entryhi();
126*4882a593Smuzhiyun 			if (cpu_has_mmid) {
127*4882a593Smuzhiyun 				old_mmid = read_c0_memorymapid();
128*4882a593Smuzhiyun 				write_c0_memorymapid(newpid);
129*4882a593Smuzhiyun 			}
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 			htw_stop();
132*4882a593Smuzhiyun 			while (start < end) {
133*4882a593Smuzhiyun 				int idx;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 				if (cpu_has_mmid)
136*4882a593Smuzhiyun 					write_c0_entryhi(start);
137*4882a593Smuzhiyun 				else
138*4882a593Smuzhiyun 					write_c0_entryhi(start | newpid);
139*4882a593Smuzhiyun 				start += (PAGE_SIZE << 1);
140*4882a593Smuzhiyun 				mtc0_tlbw_hazard();
141*4882a593Smuzhiyun 				tlb_probe();
142*4882a593Smuzhiyun 				tlb_probe_hazard();
143*4882a593Smuzhiyun 				idx = read_c0_index();
144*4882a593Smuzhiyun 				write_c0_entrylo0(0);
145*4882a593Smuzhiyun 				write_c0_entrylo1(0);
146*4882a593Smuzhiyun 				if (idx < 0)
147*4882a593Smuzhiyun 					continue;
148*4882a593Smuzhiyun 				/* Make sure all entries differ. */
149*4882a593Smuzhiyun 				write_c0_entryhi(UNIQUE_ENTRYHI(idx));
150*4882a593Smuzhiyun 				mtc0_tlbw_hazard();
151*4882a593Smuzhiyun 				tlb_write_indexed();
152*4882a593Smuzhiyun 			}
153*4882a593Smuzhiyun 			tlbw_use_hazard();
154*4882a593Smuzhiyun 			write_c0_entryhi(old_entryhi);
155*4882a593Smuzhiyun 			if (cpu_has_mmid)
156*4882a593Smuzhiyun 				write_c0_memorymapid(old_mmid);
157*4882a593Smuzhiyun 			htw_start();
158*4882a593Smuzhiyun 		} else {
159*4882a593Smuzhiyun 			drop_mmu_context(mm);
160*4882a593Smuzhiyun 		}
161*4882a593Smuzhiyun 		flush_micro_tlb();
162*4882a593Smuzhiyun 		local_irq_restore(flags);
163*4882a593Smuzhiyun 	}
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun 
local_flush_tlb_kernel_range(unsigned long start,unsigned long end)166*4882a593Smuzhiyun void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun 	unsigned long size, flags;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	local_irq_save(flags);
171*4882a593Smuzhiyun 	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
172*4882a593Smuzhiyun 	size = (size + 1) >> 1;
173*4882a593Smuzhiyun 	if (size <= (current_cpu_data.tlbsizeftlbsets ?
174*4882a593Smuzhiyun 		     current_cpu_data.tlbsize / 8 :
175*4882a593Smuzhiyun 		     current_cpu_data.tlbsize / 2)) {
176*4882a593Smuzhiyun 		int pid = read_c0_entryhi();
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 		start &= (PAGE_MASK << 1);
179*4882a593Smuzhiyun 		end += ((PAGE_SIZE << 1) - 1);
180*4882a593Smuzhiyun 		end &= (PAGE_MASK << 1);
181*4882a593Smuzhiyun 		htw_stop();
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 		while (start < end) {
184*4882a593Smuzhiyun 			int idx;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 			write_c0_entryhi(start);
187*4882a593Smuzhiyun 			start += (PAGE_SIZE << 1);
188*4882a593Smuzhiyun 			mtc0_tlbw_hazard();
189*4882a593Smuzhiyun 			tlb_probe();
190*4882a593Smuzhiyun 			tlb_probe_hazard();
191*4882a593Smuzhiyun 			idx = read_c0_index();
192*4882a593Smuzhiyun 			write_c0_entrylo0(0);
193*4882a593Smuzhiyun 			write_c0_entrylo1(0);
194*4882a593Smuzhiyun 			if (idx < 0)
195*4882a593Smuzhiyun 				continue;
196*4882a593Smuzhiyun 			/* Make sure all entries differ. */
197*4882a593Smuzhiyun 			write_c0_entryhi(UNIQUE_ENTRYHI(idx));
198*4882a593Smuzhiyun 			mtc0_tlbw_hazard();
199*4882a593Smuzhiyun 			tlb_write_indexed();
200*4882a593Smuzhiyun 		}
201*4882a593Smuzhiyun 		tlbw_use_hazard();
202*4882a593Smuzhiyun 		write_c0_entryhi(pid);
203*4882a593Smuzhiyun 		htw_start();
204*4882a593Smuzhiyun 	} else {
205*4882a593Smuzhiyun 		local_flush_tlb_all();
206*4882a593Smuzhiyun 	}
207*4882a593Smuzhiyun 	flush_micro_tlb();
208*4882a593Smuzhiyun 	local_irq_restore(flags);
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun 
local_flush_tlb_page(struct vm_area_struct * vma,unsigned long page)211*4882a593Smuzhiyun void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun 	int cpu = smp_processor_id();
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	if (cpu_context(cpu, vma->vm_mm) != 0) {
216*4882a593Smuzhiyun 		unsigned long old_mmid;
217*4882a593Smuzhiyun 		unsigned long flags, old_entryhi;
218*4882a593Smuzhiyun 		int idx;
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 		page &= (PAGE_MASK << 1);
221*4882a593Smuzhiyun 		local_irq_save(flags);
222*4882a593Smuzhiyun 		old_entryhi = read_c0_entryhi();
223*4882a593Smuzhiyun 		htw_stop();
224*4882a593Smuzhiyun 		if (cpu_has_mmid) {
225*4882a593Smuzhiyun 			old_mmid = read_c0_memorymapid();
226*4882a593Smuzhiyun 			write_c0_entryhi(page);
227*4882a593Smuzhiyun 			write_c0_memorymapid(cpu_asid(cpu, vma->vm_mm));
228*4882a593Smuzhiyun 		} else {
229*4882a593Smuzhiyun 			write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm));
230*4882a593Smuzhiyun 		}
231*4882a593Smuzhiyun 		mtc0_tlbw_hazard();
232*4882a593Smuzhiyun 		tlb_probe();
233*4882a593Smuzhiyun 		tlb_probe_hazard();
234*4882a593Smuzhiyun 		idx = read_c0_index();
235*4882a593Smuzhiyun 		write_c0_entrylo0(0);
236*4882a593Smuzhiyun 		write_c0_entrylo1(0);
237*4882a593Smuzhiyun 		if (idx < 0)
238*4882a593Smuzhiyun 			goto finish;
239*4882a593Smuzhiyun 		/* Make sure all entries differ. */
240*4882a593Smuzhiyun 		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
241*4882a593Smuzhiyun 		mtc0_tlbw_hazard();
242*4882a593Smuzhiyun 		tlb_write_indexed();
243*4882a593Smuzhiyun 		tlbw_use_hazard();
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	finish:
246*4882a593Smuzhiyun 		write_c0_entryhi(old_entryhi);
247*4882a593Smuzhiyun 		if (cpu_has_mmid)
248*4882a593Smuzhiyun 			write_c0_memorymapid(old_mmid);
249*4882a593Smuzhiyun 		htw_start();
250*4882a593Smuzhiyun 		flush_micro_tlb_vm(vma);
251*4882a593Smuzhiyun 		local_irq_restore(flags);
252*4882a593Smuzhiyun 	}
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun /*
256*4882a593Smuzhiyun  * This one is only used for pages with the global bit set so we don't care
257*4882a593Smuzhiyun  * much about the ASID.
258*4882a593Smuzhiyun  */
local_flush_tlb_one(unsigned long page)259*4882a593Smuzhiyun void local_flush_tlb_one(unsigned long page)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun 	unsigned long flags;
262*4882a593Smuzhiyun 	int oldpid, idx;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	local_irq_save(flags);
265*4882a593Smuzhiyun 	oldpid = read_c0_entryhi();
266*4882a593Smuzhiyun 	htw_stop();
267*4882a593Smuzhiyun 	page &= (PAGE_MASK << 1);
268*4882a593Smuzhiyun 	write_c0_entryhi(page);
269*4882a593Smuzhiyun 	mtc0_tlbw_hazard();
270*4882a593Smuzhiyun 	tlb_probe();
271*4882a593Smuzhiyun 	tlb_probe_hazard();
272*4882a593Smuzhiyun 	idx = read_c0_index();
273*4882a593Smuzhiyun 	write_c0_entrylo0(0);
274*4882a593Smuzhiyun 	write_c0_entrylo1(0);
275*4882a593Smuzhiyun 	if (idx >= 0) {
276*4882a593Smuzhiyun 		/* Make sure all entries differ. */
277*4882a593Smuzhiyun 		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
278*4882a593Smuzhiyun 		mtc0_tlbw_hazard();
279*4882a593Smuzhiyun 		tlb_write_indexed();
280*4882a593Smuzhiyun 		tlbw_use_hazard();
281*4882a593Smuzhiyun 	}
282*4882a593Smuzhiyun 	write_c0_entryhi(oldpid);
283*4882a593Smuzhiyun 	htw_start();
284*4882a593Smuzhiyun 	flush_micro_tlb();
285*4882a593Smuzhiyun 	local_irq_restore(flags);
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun /*
289*4882a593Smuzhiyun  * We will need multiple versions of update_mmu_cache(), one that just
290*4882a593Smuzhiyun  * updates the TLB with the new pte(s), and another which also checks
291*4882a593Smuzhiyun  * for the R4k "end of page" hardware bug and does the needy.
292*4882a593Smuzhiyun  */
__update_tlb(struct vm_area_struct * vma,unsigned long address,pte_t pte)293*4882a593Smuzhiyun void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun 	unsigned long flags;
296*4882a593Smuzhiyun 	pgd_t *pgdp;
297*4882a593Smuzhiyun 	p4d_t *p4dp;
298*4882a593Smuzhiyun 	pud_t *pudp;
299*4882a593Smuzhiyun 	pmd_t *pmdp;
300*4882a593Smuzhiyun 	pte_t *ptep;
301*4882a593Smuzhiyun 	int idx, pid;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	/*
304*4882a593Smuzhiyun 	 * Handle debugger faulting in for debugee.
305*4882a593Smuzhiyun 	 */
306*4882a593Smuzhiyun 	if (current->active_mm != vma->vm_mm)
307*4882a593Smuzhiyun 		return;
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	local_irq_save(flags);
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	htw_stop();
312*4882a593Smuzhiyun 	address &= (PAGE_MASK << 1);
313*4882a593Smuzhiyun 	if (cpu_has_mmid) {
314*4882a593Smuzhiyun 		write_c0_entryhi(address);
315*4882a593Smuzhiyun 	} else {
316*4882a593Smuzhiyun 		pid = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
317*4882a593Smuzhiyun 		write_c0_entryhi(address | pid);
318*4882a593Smuzhiyun 	}
319*4882a593Smuzhiyun 	pgdp = pgd_offset(vma->vm_mm, address);
320*4882a593Smuzhiyun 	mtc0_tlbw_hazard();
321*4882a593Smuzhiyun 	tlb_probe();
322*4882a593Smuzhiyun 	tlb_probe_hazard();
323*4882a593Smuzhiyun 	p4dp = p4d_offset(pgdp, address);
324*4882a593Smuzhiyun 	pudp = pud_offset(p4dp, address);
325*4882a593Smuzhiyun 	pmdp = pmd_offset(pudp, address);
326*4882a593Smuzhiyun 	idx = read_c0_index();
327*4882a593Smuzhiyun #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
328*4882a593Smuzhiyun 	/* this could be a huge page  */
329*4882a593Smuzhiyun 	if (pmd_huge(*pmdp)) {
330*4882a593Smuzhiyun 		unsigned long lo;
331*4882a593Smuzhiyun 		write_c0_pagemask(PM_HUGE_MASK);
332*4882a593Smuzhiyun 		ptep = (pte_t *)pmdp;
333*4882a593Smuzhiyun 		lo = pte_to_entrylo(pte_val(*ptep));
334*4882a593Smuzhiyun 		write_c0_entrylo0(lo);
335*4882a593Smuzhiyun 		write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 		mtc0_tlbw_hazard();
338*4882a593Smuzhiyun 		if (idx < 0)
339*4882a593Smuzhiyun 			tlb_write_random();
340*4882a593Smuzhiyun 		else
341*4882a593Smuzhiyun 			tlb_write_indexed();
342*4882a593Smuzhiyun 		tlbw_use_hazard();
343*4882a593Smuzhiyun 		write_c0_pagemask(PM_DEFAULT_MASK);
344*4882a593Smuzhiyun 	} else
345*4882a593Smuzhiyun #endif
346*4882a593Smuzhiyun 	{
347*4882a593Smuzhiyun 		ptep = pte_offset_map(pmdp, address);
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
350*4882a593Smuzhiyun #ifdef CONFIG_XPA
351*4882a593Smuzhiyun 		write_c0_entrylo0(pte_to_entrylo(ptep->pte_high));
352*4882a593Smuzhiyun 		if (cpu_has_xpa)
353*4882a593Smuzhiyun 			writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
354*4882a593Smuzhiyun 		ptep++;
355*4882a593Smuzhiyun 		write_c0_entrylo1(pte_to_entrylo(ptep->pte_high));
356*4882a593Smuzhiyun 		if (cpu_has_xpa)
357*4882a593Smuzhiyun 			writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
358*4882a593Smuzhiyun #else
359*4882a593Smuzhiyun 		write_c0_entrylo0(ptep->pte_high);
360*4882a593Smuzhiyun 		ptep++;
361*4882a593Smuzhiyun 		write_c0_entrylo1(ptep->pte_high);
362*4882a593Smuzhiyun #endif
363*4882a593Smuzhiyun #else
364*4882a593Smuzhiyun 		write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
365*4882a593Smuzhiyun 		write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
366*4882a593Smuzhiyun #endif
367*4882a593Smuzhiyun 		mtc0_tlbw_hazard();
368*4882a593Smuzhiyun 		if (idx < 0)
369*4882a593Smuzhiyun 			tlb_write_random();
370*4882a593Smuzhiyun 		else
371*4882a593Smuzhiyun 			tlb_write_indexed();
372*4882a593Smuzhiyun 	}
373*4882a593Smuzhiyun 	tlbw_use_hazard();
374*4882a593Smuzhiyun 	htw_start();
375*4882a593Smuzhiyun 	flush_micro_tlb_vm(vma);
376*4882a593Smuzhiyun 	local_irq_restore(flags);
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun 
add_wired_entry(unsigned long entrylo0,unsigned long entrylo1,unsigned long entryhi,unsigned long pagemask)379*4882a593Smuzhiyun void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
380*4882a593Smuzhiyun 		     unsigned long entryhi, unsigned long pagemask)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun #ifdef CONFIG_XPA
383*4882a593Smuzhiyun 	panic("Broken for XPA kernels");
384*4882a593Smuzhiyun #else
385*4882a593Smuzhiyun 	unsigned int old_mmid;
386*4882a593Smuzhiyun 	unsigned long flags;
387*4882a593Smuzhiyun 	unsigned long wired;
388*4882a593Smuzhiyun 	unsigned long old_pagemask;
389*4882a593Smuzhiyun 	unsigned long old_ctx;
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	local_irq_save(flags);
392*4882a593Smuzhiyun 	if (cpu_has_mmid) {
393*4882a593Smuzhiyun 		old_mmid = read_c0_memorymapid();
394*4882a593Smuzhiyun 		write_c0_memorymapid(MMID_KERNEL_WIRED);
395*4882a593Smuzhiyun 	}
396*4882a593Smuzhiyun 	/* Save old context and create impossible VPN2 value */
397*4882a593Smuzhiyun 	old_ctx = read_c0_entryhi();
398*4882a593Smuzhiyun 	htw_stop();
399*4882a593Smuzhiyun 	old_pagemask = read_c0_pagemask();
400*4882a593Smuzhiyun 	wired = num_wired_entries();
401*4882a593Smuzhiyun 	write_c0_wired(wired + 1);
402*4882a593Smuzhiyun 	write_c0_index(wired);
403*4882a593Smuzhiyun 	tlbw_use_hazard();	/* What is the hazard here? */
404*4882a593Smuzhiyun 	write_c0_pagemask(pagemask);
405*4882a593Smuzhiyun 	write_c0_entryhi(entryhi);
406*4882a593Smuzhiyun 	write_c0_entrylo0(entrylo0);
407*4882a593Smuzhiyun 	write_c0_entrylo1(entrylo1);
408*4882a593Smuzhiyun 	mtc0_tlbw_hazard();
409*4882a593Smuzhiyun 	tlb_write_indexed();
410*4882a593Smuzhiyun 	tlbw_use_hazard();
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	write_c0_entryhi(old_ctx);
413*4882a593Smuzhiyun 	if (cpu_has_mmid)
414*4882a593Smuzhiyun 		write_c0_memorymapid(old_mmid);
415*4882a593Smuzhiyun 	tlbw_use_hazard();	/* What is the hazard here? */
416*4882a593Smuzhiyun 	htw_start();
417*4882a593Smuzhiyun 	write_c0_pagemask(old_pagemask);
418*4882a593Smuzhiyun 	local_flush_tlb_all();
419*4882a593Smuzhiyun 	local_irq_restore(flags);
420*4882a593Smuzhiyun #endif
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
424*4882a593Smuzhiyun 
has_transparent_hugepage(void)425*4882a593Smuzhiyun int has_transparent_hugepage(void)
426*4882a593Smuzhiyun {
427*4882a593Smuzhiyun 	static unsigned int mask = -1;
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	if (mask == -1) {	/* first call comes during __init */
430*4882a593Smuzhiyun 		unsigned long flags;
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 		local_irq_save(flags);
433*4882a593Smuzhiyun 		write_c0_pagemask(PM_HUGE_MASK);
434*4882a593Smuzhiyun 		back_to_back_c0_hazard();
435*4882a593Smuzhiyun 		mask = read_c0_pagemask();
436*4882a593Smuzhiyun 		write_c0_pagemask(PM_DEFAULT_MASK);
437*4882a593Smuzhiyun 		local_irq_restore(flags);
438*4882a593Smuzhiyun 	}
439*4882a593Smuzhiyun 	return mask == PM_HUGE_MASK;
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun EXPORT_SYMBOL(has_transparent_hugepage);
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun #endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun /*
446*4882a593Smuzhiyun  * Used for loading TLB entries before trap_init() has started, when we
447*4882a593Smuzhiyun  * don't actually want to add a wired entry which remains throughout the
448*4882a593Smuzhiyun  * lifetime of the system
449*4882a593Smuzhiyun  */
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun int temp_tlb_entry;
452*4882a593Smuzhiyun 
add_temporary_entry(unsigned long entrylo0,unsigned long entrylo1,unsigned long entryhi,unsigned long pagemask)453*4882a593Smuzhiyun __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
454*4882a593Smuzhiyun 			       unsigned long entryhi, unsigned long pagemask)
455*4882a593Smuzhiyun {
456*4882a593Smuzhiyun 	int ret = 0;
457*4882a593Smuzhiyun 	unsigned long flags;
458*4882a593Smuzhiyun 	unsigned long wired;
459*4882a593Smuzhiyun 	unsigned long old_pagemask;
460*4882a593Smuzhiyun 	unsigned long old_ctx;
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	local_irq_save(flags);
463*4882a593Smuzhiyun 	/* Save old context and create impossible VPN2 value */
464*4882a593Smuzhiyun 	htw_stop();
465*4882a593Smuzhiyun 	old_ctx = read_c0_entryhi();
466*4882a593Smuzhiyun 	old_pagemask = read_c0_pagemask();
467*4882a593Smuzhiyun 	wired = num_wired_entries();
468*4882a593Smuzhiyun 	if (--temp_tlb_entry < wired) {
469*4882a593Smuzhiyun 		printk(KERN_WARNING
470*4882a593Smuzhiyun 		       "No TLB space left for add_temporary_entry\n");
471*4882a593Smuzhiyun 		ret = -ENOSPC;
472*4882a593Smuzhiyun 		goto out;
473*4882a593Smuzhiyun 	}
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	write_c0_index(temp_tlb_entry);
476*4882a593Smuzhiyun 	write_c0_pagemask(pagemask);
477*4882a593Smuzhiyun 	write_c0_entryhi(entryhi);
478*4882a593Smuzhiyun 	write_c0_entrylo0(entrylo0);
479*4882a593Smuzhiyun 	write_c0_entrylo1(entrylo1);
480*4882a593Smuzhiyun 	mtc0_tlbw_hazard();
481*4882a593Smuzhiyun 	tlb_write_indexed();
482*4882a593Smuzhiyun 	tlbw_use_hazard();
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	write_c0_entryhi(old_ctx);
485*4882a593Smuzhiyun 	write_c0_pagemask(old_pagemask);
486*4882a593Smuzhiyun 	htw_start();
487*4882a593Smuzhiyun out:
488*4882a593Smuzhiyun 	local_irq_restore(flags);
489*4882a593Smuzhiyun 	return ret;
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun static int ntlb;
set_ntlb(char * str)493*4882a593Smuzhiyun static int __init set_ntlb(char *str)
494*4882a593Smuzhiyun {
495*4882a593Smuzhiyun 	get_option(&str, &ntlb);
496*4882a593Smuzhiyun 	return 1;
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun __setup("ntlb=", set_ntlb);
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun /*
502*4882a593Smuzhiyun  * Configure TLB (for init or after a CPU has been powered off).
503*4882a593Smuzhiyun  */
r4k_tlb_configure(void)504*4882a593Smuzhiyun static void r4k_tlb_configure(void)
505*4882a593Smuzhiyun {
506*4882a593Smuzhiyun 	/*
507*4882a593Smuzhiyun 	 * You should never change this register:
508*4882a593Smuzhiyun 	 *   - On R4600 1.7 the tlbp never hits for pages smaller than
509*4882a593Smuzhiyun 	 *     the value in the c0_pagemask register.
510*4882a593Smuzhiyun 	 *   - The entire mm handling assumes the c0_pagemask register to
511*4882a593Smuzhiyun 	 *     be set to fixed-size pages.
512*4882a593Smuzhiyun 	 */
513*4882a593Smuzhiyun 	write_c0_pagemask(PM_DEFAULT_MASK);
514*4882a593Smuzhiyun 	back_to_back_c0_hazard();
515*4882a593Smuzhiyun 	if (read_c0_pagemask() != PM_DEFAULT_MASK)
516*4882a593Smuzhiyun 		panic("MMU doesn't support PAGE_SIZE=0x%lx", PAGE_SIZE);
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	write_c0_wired(0);
519*4882a593Smuzhiyun 	if (current_cpu_type() == CPU_R10000 ||
520*4882a593Smuzhiyun 	    current_cpu_type() == CPU_R12000 ||
521*4882a593Smuzhiyun 	    current_cpu_type() == CPU_R14000 ||
522*4882a593Smuzhiyun 	    current_cpu_type() == CPU_R16000)
523*4882a593Smuzhiyun 		write_c0_framemask(0);
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 	if (cpu_has_rixi) {
526*4882a593Smuzhiyun 		/*
527*4882a593Smuzhiyun 		 * Enable the no read, no exec bits, and enable large physical
528*4882a593Smuzhiyun 		 * address.
529*4882a593Smuzhiyun 		 */
530*4882a593Smuzhiyun #ifdef CONFIG_64BIT
531*4882a593Smuzhiyun 		set_c0_pagegrain(PG_RIE | PG_XIE | PG_ELPA);
532*4882a593Smuzhiyun #else
533*4882a593Smuzhiyun 		set_c0_pagegrain(PG_RIE | PG_XIE);
534*4882a593Smuzhiyun #endif
535*4882a593Smuzhiyun 	}
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	temp_tlb_entry = current_cpu_data.tlbsize - 1;
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	/* From this point on the ARC firmware is dead.	 */
540*4882a593Smuzhiyun 	local_flush_tlb_all();
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	/* Did I tell you that ARC SUCKS?  */
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun 
tlb_init(void)545*4882a593Smuzhiyun void tlb_init(void)
546*4882a593Smuzhiyun {
547*4882a593Smuzhiyun 	r4k_tlb_configure();
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	if (ntlb) {
550*4882a593Smuzhiyun 		if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
551*4882a593Smuzhiyun 			int wired = current_cpu_data.tlbsize - ntlb;
552*4882a593Smuzhiyun 			write_c0_wired(wired);
553*4882a593Smuzhiyun 			write_c0_index(wired-1);
554*4882a593Smuzhiyun 			printk("Restricting TLB to %d entries\n", ntlb);
555*4882a593Smuzhiyun 		} else
556*4882a593Smuzhiyun 			printk("Ignoring invalid argument ntlb=%d\n", ntlb);
557*4882a593Smuzhiyun 	}
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	build_tlb_refill_handler();
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun 
r4k_tlb_pm_notifier(struct notifier_block * self,unsigned long cmd,void * v)562*4882a593Smuzhiyun static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd,
563*4882a593Smuzhiyun 			       void *v)
564*4882a593Smuzhiyun {
565*4882a593Smuzhiyun 	switch (cmd) {
566*4882a593Smuzhiyun 	case CPU_PM_ENTER_FAILED:
567*4882a593Smuzhiyun 	case CPU_PM_EXIT:
568*4882a593Smuzhiyun 		r4k_tlb_configure();
569*4882a593Smuzhiyun 		break;
570*4882a593Smuzhiyun 	}
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	return NOTIFY_OK;
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun static struct notifier_block r4k_tlb_pm_notifier_block = {
576*4882a593Smuzhiyun 	.notifier_call = r4k_tlb_pm_notifier,
577*4882a593Smuzhiyun };
578*4882a593Smuzhiyun 
r4k_tlb_init_pm(void)579*4882a593Smuzhiyun static int __init r4k_tlb_init_pm(void)
580*4882a593Smuzhiyun {
581*4882a593Smuzhiyun 	return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block);
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun arch_initcall(r4k_tlb_init_pm);
584