xref: /OK3568_Linux_fs/kernel/arch/ia64/mm/tlb.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * TLB support routines.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
6*4882a593Smuzhiyun  *	David Mosberger-Tang <davidm@hpl.hp.com>
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * 08/02/00 A. Mallick <asit.k.mallick@intel.com>
9*4882a593Smuzhiyun  *		Modified RID allocation for SMP
10*4882a593Smuzhiyun  *          Goutham Rao <goutham.rao@intel.com>
11*4882a593Smuzhiyun  *              IPI based ptc implementation and A-step IPI implementation.
12*4882a593Smuzhiyun  * Rohit Seth <rohit.seth@intel.com>
13*4882a593Smuzhiyun  * Ken Chen <kenneth.w.chen@intel.com>
14*4882a593Smuzhiyun  * Christophe de Dinechin <ddd@hp.com>: Avoid ptc.e on memory allocation
15*4882a593Smuzhiyun  * Copyright (C) 2007 Intel Corp
16*4882a593Smuzhiyun  *	Fenghua Yu <fenghua.yu@intel.com>
17*4882a593Smuzhiyun  *	Add multiple ptc.g/ptc.ga instruction support in global tlb purge.
18*4882a593Smuzhiyun  */
19*4882a593Smuzhiyun #include <linux/module.h>
20*4882a593Smuzhiyun #include <linux/init.h>
21*4882a593Smuzhiyun #include <linux/kernel.h>
22*4882a593Smuzhiyun #include <linux/sched.h>
23*4882a593Smuzhiyun #include <linux/smp.h>
24*4882a593Smuzhiyun #include <linux/mm.h>
25*4882a593Smuzhiyun #include <linux/memblock.h>
26*4882a593Smuzhiyun #include <linux/slab.h>
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #include <asm/delay.h>
29*4882a593Smuzhiyun #include <asm/mmu_context.h>
30*4882a593Smuzhiyun #include <asm/pal.h>
31*4882a593Smuzhiyun #include <asm/tlbflush.h>
32*4882a593Smuzhiyun #include <asm/dma.h>
33*4882a593Smuzhiyun #include <asm/processor.h>
34*4882a593Smuzhiyun #include <asm/sal.h>
35*4882a593Smuzhiyun #include <asm/tlb.h>
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun static struct {
38*4882a593Smuzhiyun 	u64 mask;		/* mask of supported purge page-sizes */
39*4882a593Smuzhiyun 	unsigned long max_bits;	/* log2 of largest supported purge page-size */
40*4882a593Smuzhiyun } purge;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun struct ia64_ctx ia64_ctx = {
43*4882a593Smuzhiyun 	.lock =	__SPIN_LOCK_UNLOCKED(ia64_ctx.lock),
44*4882a593Smuzhiyun 	.next =	1,
45*4882a593Smuzhiyun 	.max_ctx = ~0U
46*4882a593Smuzhiyun };
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun DEFINE_PER_CPU(u8, ia64_need_tlb_flush);
49*4882a593Smuzhiyun DEFINE_PER_CPU(u8, ia64_tr_num);  /*Number of TR slots in current processor*/
50*4882a593Smuzhiyun DEFINE_PER_CPU(u8, ia64_tr_used); /*Max Slot number used by kernel*/
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun /*
55*4882a593Smuzhiyun  * Initializes the ia64_ctx.bitmap array based on max_ctx+1.
56*4882a593Smuzhiyun  * Called after cpu_init() has setup ia64_ctx.max_ctx based on
57*4882a593Smuzhiyun  * maximum RID that is supported by boot CPU.
58*4882a593Smuzhiyun  */
59*4882a593Smuzhiyun void __init
mmu_context_init(void)60*4882a593Smuzhiyun mmu_context_init (void)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun 	ia64_ctx.bitmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3,
63*4882a593Smuzhiyun 					 SMP_CACHE_BYTES);
64*4882a593Smuzhiyun 	if (!ia64_ctx.bitmap)
65*4882a593Smuzhiyun 		panic("%s: Failed to allocate %u bytes\n", __func__,
66*4882a593Smuzhiyun 		      (ia64_ctx.max_ctx + 1) >> 3);
67*4882a593Smuzhiyun 	ia64_ctx.flushmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3,
68*4882a593Smuzhiyun 					   SMP_CACHE_BYTES);
69*4882a593Smuzhiyun 	if (!ia64_ctx.flushmap)
70*4882a593Smuzhiyun 		panic("%s: Failed to allocate %u bytes\n", __func__,
71*4882a593Smuzhiyun 		      (ia64_ctx.max_ctx + 1) >> 3);
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun /*
75*4882a593Smuzhiyun  * Acquire the ia64_ctx.lock before calling this function!
76*4882a593Smuzhiyun  */
77*4882a593Smuzhiyun void
wrap_mmu_context(struct mm_struct * mm)78*4882a593Smuzhiyun wrap_mmu_context (struct mm_struct *mm)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun 	int i, cpu;
81*4882a593Smuzhiyun 	unsigned long flush_bit;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	for (i=0; i <= ia64_ctx.max_ctx / BITS_PER_LONG; i++) {
84*4882a593Smuzhiyun 		flush_bit = xchg(&ia64_ctx.flushmap[i], 0);
85*4882a593Smuzhiyun 		ia64_ctx.bitmap[i] ^= flush_bit;
86*4882a593Smuzhiyun 	}
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	/* use offset at 300 to skip daemons */
89*4882a593Smuzhiyun 	ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap,
90*4882a593Smuzhiyun 				ia64_ctx.max_ctx, 300);
91*4882a593Smuzhiyun 	ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap,
92*4882a593Smuzhiyun 				ia64_ctx.max_ctx, ia64_ctx.next);
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	/*
95*4882a593Smuzhiyun 	 * can't call flush_tlb_all() here because of race condition
96*4882a593Smuzhiyun 	 * with O(1) scheduler [EF]
97*4882a593Smuzhiyun 	 */
98*4882a593Smuzhiyun 	cpu = get_cpu(); /* prevent preemption/migration */
99*4882a593Smuzhiyun 	for_each_online_cpu(i)
100*4882a593Smuzhiyun 		if (i != cpu)
101*4882a593Smuzhiyun 			per_cpu(ia64_need_tlb_flush, i) = 1;
102*4882a593Smuzhiyun 	put_cpu();
103*4882a593Smuzhiyun 	local_flush_tlb_all();
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun /*
107*4882a593Smuzhiyun  * Implement "spinaphores" ... like counting semaphores, but they
108*4882a593Smuzhiyun  * spin instead of sleeping.  If there are ever any other users for
109*4882a593Smuzhiyun  * this primitive it can be moved up to a spinaphore.h header.
110*4882a593Smuzhiyun  */
111*4882a593Smuzhiyun struct spinaphore {
112*4882a593Smuzhiyun 	unsigned long	ticket;
113*4882a593Smuzhiyun 	unsigned long	serve;
114*4882a593Smuzhiyun };
115*4882a593Smuzhiyun 
spinaphore_init(struct spinaphore * ss,int val)116*4882a593Smuzhiyun static inline void spinaphore_init(struct spinaphore *ss, int val)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	ss->ticket = 0;
119*4882a593Smuzhiyun 	ss->serve = val;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun 
down_spin(struct spinaphore * ss)122*4882a593Smuzhiyun static inline void down_spin(struct spinaphore *ss)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun 	unsigned long t = ia64_fetchadd(1, &ss->ticket, acq), serve;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	if (time_before(t, ss->serve))
127*4882a593Smuzhiyun 		return;
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	ia64_invala();
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	for (;;) {
132*4882a593Smuzhiyun 		asm volatile ("ld8.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory");
133*4882a593Smuzhiyun 		if (time_before(t, serve))
134*4882a593Smuzhiyun 			return;
135*4882a593Smuzhiyun 		cpu_relax();
136*4882a593Smuzhiyun 	}
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun 
up_spin(struct spinaphore * ss)139*4882a593Smuzhiyun static inline void up_spin(struct spinaphore *ss)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun 	ia64_fetchadd(1, &ss->serve, rel);
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun static struct spinaphore ptcg_sem;
145*4882a593Smuzhiyun static u16 nptcg = 1;
146*4882a593Smuzhiyun static int need_ptcg_sem = 1;
147*4882a593Smuzhiyun static int toolatetochangeptcgsem = 0;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun /*
150*4882a593Smuzhiyun  * Kernel parameter "nptcg=" overrides max number of concurrent global TLB
151*4882a593Smuzhiyun  * purges which is reported from either PAL or SAL PALO.
152*4882a593Smuzhiyun  *
153*4882a593Smuzhiyun  * We don't have sanity checking for nptcg value. It's the user's responsibility
154*4882a593Smuzhiyun  * for valid nptcg value on the platform. Otherwise, kernel may hang in some
155*4882a593Smuzhiyun  * cases.
156*4882a593Smuzhiyun  */
157*4882a593Smuzhiyun static int __init
set_nptcg(char * str)158*4882a593Smuzhiyun set_nptcg(char *str)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun 	int value = 0;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	get_option(&str, &value);
163*4882a593Smuzhiyun 	setup_ptcg_sem(value, NPTCG_FROM_KERNEL_PARAMETER);
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	return 1;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun __setup("nptcg=", set_nptcg);
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun /*
171*4882a593Smuzhiyun  * Maximum number of simultaneous ptc.g purges in the system can
172*4882a593Smuzhiyun  * be defined by PAL_VM_SUMMARY (in which case we should take
173*4882a593Smuzhiyun  * the smallest value for any cpu in the system) or by the PAL
174*4882a593Smuzhiyun  * override table (in which case we should ignore the value from
175*4882a593Smuzhiyun  * PAL_VM_SUMMARY).
176*4882a593Smuzhiyun  *
177*4882a593Smuzhiyun  * Kernel parameter "nptcg=" overrides maximum number of simultanesous ptc.g
178*4882a593Smuzhiyun  * purges defined in either PAL_VM_SUMMARY or PAL override table. In this case,
179*4882a593Smuzhiyun  * we should ignore the value from either PAL_VM_SUMMARY or PAL override table.
180*4882a593Smuzhiyun  *
181*4882a593Smuzhiyun  * Complicating the logic here is the fact that num_possible_cpus()
182*4882a593Smuzhiyun  * isn't fully setup until we start bringing cpus online.
183*4882a593Smuzhiyun  */
184*4882a593Smuzhiyun void
setup_ptcg_sem(int max_purges,int nptcg_from)185*4882a593Smuzhiyun setup_ptcg_sem(int max_purges, int nptcg_from)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun 	static int kp_override;
188*4882a593Smuzhiyun 	static int palo_override;
189*4882a593Smuzhiyun 	static int firstcpu = 1;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	if (toolatetochangeptcgsem) {
192*4882a593Smuzhiyun 		if (nptcg_from == NPTCG_FROM_PAL && max_purges == 0)
193*4882a593Smuzhiyun 			BUG_ON(1 < nptcg);
194*4882a593Smuzhiyun 		else
195*4882a593Smuzhiyun 			BUG_ON(max_purges < nptcg);
196*4882a593Smuzhiyun 		return;
197*4882a593Smuzhiyun 	}
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	if (nptcg_from == NPTCG_FROM_KERNEL_PARAMETER) {
200*4882a593Smuzhiyun 		kp_override = 1;
201*4882a593Smuzhiyun 		nptcg = max_purges;
202*4882a593Smuzhiyun 		goto resetsema;
203*4882a593Smuzhiyun 	}
204*4882a593Smuzhiyun 	if (kp_override) {
205*4882a593Smuzhiyun 		need_ptcg_sem = num_possible_cpus() > nptcg;
206*4882a593Smuzhiyun 		return;
207*4882a593Smuzhiyun 	}
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	if (nptcg_from == NPTCG_FROM_PALO) {
210*4882a593Smuzhiyun 		palo_override = 1;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 		/* In PALO max_purges == 0 really means it! */
213*4882a593Smuzhiyun 		if (max_purges == 0)
214*4882a593Smuzhiyun 			panic("Whoa! Platform does not support global TLB purges.\n");
215*4882a593Smuzhiyun 		nptcg = max_purges;
216*4882a593Smuzhiyun 		if (nptcg == PALO_MAX_TLB_PURGES) {
217*4882a593Smuzhiyun 			need_ptcg_sem = 0;
218*4882a593Smuzhiyun 			return;
219*4882a593Smuzhiyun 		}
220*4882a593Smuzhiyun 		goto resetsema;
221*4882a593Smuzhiyun 	}
222*4882a593Smuzhiyun 	if (palo_override) {
223*4882a593Smuzhiyun 		if (nptcg != PALO_MAX_TLB_PURGES)
224*4882a593Smuzhiyun 			need_ptcg_sem = (num_possible_cpus() > nptcg);
225*4882a593Smuzhiyun 		return;
226*4882a593Smuzhiyun 	}
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	/* In PAL_VM_SUMMARY max_purges == 0 actually means 1 */
229*4882a593Smuzhiyun 	if (max_purges == 0) max_purges = 1;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	if (firstcpu) {
232*4882a593Smuzhiyun 		nptcg = max_purges;
233*4882a593Smuzhiyun 		firstcpu = 0;
234*4882a593Smuzhiyun 	}
235*4882a593Smuzhiyun 	if (max_purges < nptcg)
236*4882a593Smuzhiyun 		nptcg = max_purges;
237*4882a593Smuzhiyun 	if (nptcg == PAL_MAX_PURGES) {
238*4882a593Smuzhiyun 		need_ptcg_sem = 0;
239*4882a593Smuzhiyun 		return;
240*4882a593Smuzhiyun 	} else
241*4882a593Smuzhiyun 		need_ptcg_sem = (num_possible_cpus() > nptcg);
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun resetsema:
244*4882a593Smuzhiyun 	spinaphore_init(&ptcg_sem, max_purges);
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun #ifdef CONFIG_SMP
248*4882a593Smuzhiyun static void
ia64_global_tlb_purge(struct mm_struct * mm,unsigned long start,unsigned long end,unsigned long nbits)249*4882a593Smuzhiyun ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start,
250*4882a593Smuzhiyun 		       unsigned long end, unsigned long nbits)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun 	struct mm_struct *active_mm = current->active_mm;
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	toolatetochangeptcgsem = 1;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	if (mm != active_mm) {
257*4882a593Smuzhiyun 		/* Restore region IDs for mm */
258*4882a593Smuzhiyun 		if (mm && active_mm) {
259*4882a593Smuzhiyun 			activate_context(mm);
260*4882a593Smuzhiyun 		} else {
261*4882a593Smuzhiyun 			flush_tlb_all();
262*4882a593Smuzhiyun 			return;
263*4882a593Smuzhiyun 		}
264*4882a593Smuzhiyun 	}
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	if (need_ptcg_sem)
267*4882a593Smuzhiyun 		down_spin(&ptcg_sem);
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	do {
270*4882a593Smuzhiyun 		/*
271*4882a593Smuzhiyun 		 * Flush ALAT entries also.
272*4882a593Smuzhiyun 		 */
273*4882a593Smuzhiyun 		ia64_ptcga(start, (nbits << 2));
274*4882a593Smuzhiyun 		ia64_srlz_i();
275*4882a593Smuzhiyun 		start += (1UL << nbits);
276*4882a593Smuzhiyun 	} while (start < end);
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	if (need_ptcg_sem)
279*4882a593Smuzhiyun 		up_spin(&ptcg_sem);
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun         if (mm != active_mm) {
282*4882a593Smuzhiyun                 activate_context(active_mm);
283*4882a593Smuzhiyun         }
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun #endif /* CONFIG_SMP */
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun void
local_flush_tlb_all(void)288*4882a593Smuzhiyun local_flush_tlb_all (void)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun 	unsigned long i, j, flags, count0, count1, stride0, stride1, addr;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	addr    = local_cpu_data->ptce_base;
293*4882a593Smuzhiyun 	count0  = local_cpu_data->ptce_count[0];
294*4882a593Smuzhiyun 	count1  = local_cpu_data->ptce_count[1];
295*4882a593Smuzhiyun 	stride0 = local_cpu_data->ptce_stride[0];
296*4882a593Smuzhiyun 	stride1 = local_cpu_data->ptce_stride[1];
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	local_irq_save(flags);
299*4882a593Smuzhiyun 	for (i = 0; i < count0; ++i) {
300*4882a593Smuzhiyun 		for (j = 0; j < count1; ++j) {
301*4882a593Smuzhiyun 			ia64_ptce(addr);
302*4882a593Smuzhiyun 			addr += stride1;
303*4882a593Smuzhiyun 		}
304*4882a593Smuzhiyun 		addr += stride0;
305*4882a593Smuzhiyun 	}
306*4882a593Smuzhiyun 	local_irq_restore(flags);
307*4882a593Smuzhiyun 	ia64_srlz_i();			/* srlz.i implies srlz.d */
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun static void
__flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)311*4882a593Smuzhiyun __flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
312*4882a593Smuzhiyun 		 unsigned long end)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun 	struct mm_struct *mm = vma->vm_mm;
315*4882a593Smuzhiyun 	unsigned long size = end - start;
316*4882a593Smuzhiyun 	unsigned long nbits;
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun #ifndef CONFIG_SMP
319*4882a593Smuzhiyun 	if (mm != current->active_mm) {
320*4882a593Smuzhiyun 		mm->context = 0;
321*4882a593Smuzhiyun 		return;
322*4882a593Smuzhiyun 	}
323*4882a593Smuzhiyun #endif
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	nbits = ia64_fls(size + 0xfff);
326*4882a593Smuzhiyun 	while (unlikely (((1UL << nbits) & purge.mask) == 0) &&
327*4882a593Smuzhiyun 			(nbits < purge.max_bits))
328*4882a593Smuzhiyun 		++nbits;
329*4882a593Smuzhiyun 	if (nbits > purge.max_bits)
330*4882a593Smuzhiyun 		nbits = purge.max_bits;
331*4882a593Smuzhiyun 	start &= ~((1UL << nbits) - 1);
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	preempt_disable();
334*4882a593Smuzhiyun #ifdef CONFIG_SMP
335*4882a593Smuzhiyun 	if (mm != current->active_mm || cpumask_weight(mm_cpumask(mm)) != 1) {
336*4882a593Smuzhiyun 		ia64_global_tlb_purge(mm, start, end, nbits);
337*4882a593Smuzhiyun 		preempt_enable();
338*4882a593Smuzhiyun 		return;
339*4882a593Smuzhiyun 	}
340*4882a593Smuzhiyun #endif
341*4882a593Smuzhiyun 	do {
342*4882a593Smuzhiyun 		ia64_ptcl(start, (nbits<<2));
343*4882a593Smuzhiyun 		start += (1UL << nbits);
344*4882a593Smuzhiyun 	} while (start < end);
345*4882a593Smuzhiyun 	preempt_enable();
346*4882a593Smuzhiyun 	ia64_srlz_i();			/* srlz.i implies srlz.d */
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun 
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)349*4882a593Smuzhiyun void flush_tlb_range(struct vm_area_struct *vma,
350*4882a593Smuzhiyun 		unsigned long start, unsigned long end)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun 	if (unlikely(end - start >= 1024*1024*1024*1024UL
353*4882a593Smuzhiyun 			|| REGION_NUMBER(start) != REGION_NUMBER(end - 1))) {
354*4882a593Smuzhiyun 		/*
355*4882a593Smuzhiyun 		 * If we flush more than a tera-byte or across regions, we're
356*4882a593Smuzhiyun 		 * probably better off just flushing the entire TLB(s).  This
357*4882a593Smuzhiyun 		 * should be very rare and is not worth optimizing for.
358*4882a593Smuzhiyun 		 */
359*4882a593Smuzhiyun 		flush_tlb_all();
360*4882a593Smuzhiyun 	} else {
361*4882a593Smuzhiyun 		/* flush the address range from the tlb */
362*4882a593Smuzhiyun 		__flush_tlb_range(vma, start, end);
363*4882a593Smuzhiyun 		/* flush the virt. page-table area mapping the addr range */
364*4882a593Smuzhiyun 		__flush_tlb_range(vma, ia64_thash(start), ia64_thash(end));
365*4882a593Smuzhiyun 	}
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun EXPORT_SYMBOL(flush_tlb_range);
368*4882a593Smuzhiyun 
ia64_tlb_init(void)369*4882a593Smuzhiyun void ia64_tlb_init(void)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun 	ia64_ptce_info_t ptce_info;
372*4882a593Smuzhiyun 	u64 tr_pgbits;
373*4882a593Smuzhiyun 	long status;
374*4882a593Smuzhiyun 	pal_vm_info_1_u_t vm_info_1;
375*4882a593Smuzhiyun 	pal_vm_info_2_u_t vm_info_2;
376*4882a593Smuzhiyun 	int cpu = smp_processor_id();
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	if ((status = ia64_pal_vm_page_size(&tr_pgbits, &purge.mask)) != 0) {
379*4882a593Smuzhiyun 		printk(KERN_ERR "PAL_VM_PAGE_SIZE failed with status=%ld; "
380*4882a593Smuzhiyun 		       "defaulting to architected purge page-sizes.\n", status);
381*4882a593Smuzhiyun 		purge.mask = 0x115557000UL;
382*4882a593Smuzhiyun 	}
383*4882a593Smuzhiyun 	purge.max_bits = ia64_fls(purge.mask);
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	ia64_get_ptce(&ptce_info);
386*4882a593Smuzhiyun 	local_cpu_data->ptce_base = ptce_info.base;
387*4882a593Smuzhiyun 	local_cpu_data->ptce_count[0] = ptce_info.count[0];
388*4882a593Smuzhiyun 	local_cpu_data->ptce_count[1] = ptce_info.count[1];
389*4882a593Smuzhiyun 	local_cpu_data->ptce_stride[0] = ptce_info.stride[0];
390*4882a593Smuzhiyun 	local_cpu_data->ptce_stride[1] = ptce_info.stride[1];
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	local_flush_tlb_all();	/* nuke left overs from bootstrapping... */
393*4882a593Smuzhiyun 	status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2);
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	if (status) {
396*4882a593Smuzhiyun 		printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
397*4882a593Smuzhiyun 		per_cpu(ia64_tr_num, cpu) = 8;
398*4882a593Smuzhiyun 		return;
399*4882a593Smuzhiyun 	}
400*4882a593Smuzhiyun 	per_cpu(ia64_tr_num, cpu) = vm_info_1.pal_vm_info_1_s.max_itr_entry+1;
401*4882a593Smuzhiyun 	if (per_cpu(ia64_tr_num, cpu) >
402*4882a593Smuzhiyun 				(vm_info_1.pal_vm_info_1_s.max_dtr_entry+1))
403*4882a593Smuzhiyun 		per_cpu(ia64_tr_num, cpu) =
404*4882a593Smuzhiyun 				vm_info_1.pal_vm_info_1_s.max_dtr_entry+1;
405*4882a593Smuzhiyun 	if (per_cpu(ia64_tr_num, cpu) > IA64_TR_ALLOC_MAX) {
406*4882a593Smuzhiyun 		static int justonce = 1;
407*4882a593Smuzhiyun 		per_cpu(ia64_tr_num, cpu) = IA64_TR_ALLOC_MAX;
408*4882a593Smuzhiyun 		if (justonce) {
409*4882a593Smuzhiyun 			justonce = 0;
410*4882a593Smuzhiyun 			printk(KERN_DEBUG "TR register number exceeds "
411*4882a593Smuzhiyun 			       "IA64_TR_ALLOC_MAX!\n");
412*4882a593Smuzhiyun 		}
413*4882a593Smuzhiyun 	}
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun /*
417*4882a593Smuzhiyun  * is_tr_overlap
418*4882a593Smuzhiyun  *
419*4882a593Smuzhiyun  * Check overlap with inserted TRs.
420*4882a593Smuzhiyun  */
is_tr_overlap(struct ia64_tr_entry * p,u64 va,u64 log_size)421*4882a593Smuzhiyun static int is_tr_overlap(struct ia64_tr_entry *p, u64 va, u64 log_size)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun 	u64 tr_log_size;
424*4882a593Smuzhiyun 	u64 tr_end;
425*4882a593Smuzhiyun 	u64 va_rr = ia64_get_rr(va);
426*4882a593Smuzhiyun 	u64 va_rid = RR_TO_RID(va_rr);
427*4882a593Smuzhiyun 	u64 va_end = va + (1<<log_size) - 1;
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	if (va_rid != RR_TO_RID(p->rr))
430*4882a593Smuzhiyun 		return 0;
431*4882a593Smuzhiyun 	tr_log_size = (p->itir & 0xff) >> 2;
432*4882a593Smuzhiyun 	tr_end = p->ifa + (1<<tr_log_size) - 1;
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	if (va > tr_end || p->ifa > va_end)
435*4882a593Smuzhiyun 		return 0;
436*4882a593Smuzhiyun 	return 1;
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun /*
441*4882a593Smuzhiyun  * ia64_insert_tr in virtual mode. Allocate a TR slot
442*4882a593Smuzhiyun  *
443*4882a593Smuzhiyun  * target_mask : 0x1 : itr, 0x2 : dtr, 0x3 : idtr
444*4882a593Smuzhiyun  *
445*4882a593Smuzhiyun  * va 	: virtual address.
446*4882a593Smuzhiyun  * pte 	: pte entries inserted.
447*4882a593Smuzhiyun  * log_size: range to be covered.
448*4882a593Smuzhiyun  *
449*4882a593Smuzhiyun  * Return value:  <0 :  error No.
450*4882a593Smuzhiyun  *
451*4882a593Smuzhiyun  *		  >=0 : slot number allocated for TR.
452*4882a593Smuzhiyun  * Must be called with preemption disabled.
453*4882a593Smuzhiyun  */
ia64_itr_entry(u64 target_mask,u64 va,u64 pte,u64 log_size)454*4882a593Smuzhiyun int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
455*4882a593Smuzhiyun {
456*4882a593Smuzhiyun 	int i, r;
457*4882a593Smuzhiyun 	unsigned long psr;
458*4882a593Smuzhiyun 	struct ia64_tr_entry *p;
459*4882a593Smuzhiyun 	int cpu = smp_processor_id();
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 	if (!ia64_idtrs[cpu]) {
462*4882a593Smuzhiyun 		ia64_idtrs[cpu] = kmalloc_array(2 * IA64_TR_ALLOC_MAX,
463*4882a593Smuzhiyun 						sizeof(struct ia64_tr_entry),
464*4882a593Smuzhiyun 						GFP_KERNEL);
465*4882a593Smuzhiyun 		if (!ia64_idtrs[cpu])
466*4882a593Smuzhiyun 			return -ENOMEM;
467*4882a593Smuzhiyun 	}
468*4882a593Smuzhiyun 	r = -EINVAL;
469*4882a593Smuzhiyun 	/*Check overlap with existing TR entries*/
470*4882a593Smuzhiyun 	if (target_mask & 0x1) {
471*4882a593Smuzhiyun 		p = ia64_idtrs[cpu];
472*4882a593Smuzhiyun 		for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
473*4882a593Smuzhiyun 								i++, p++) {
474*4882a593Smuzhiyun 			if (p->pte & 0x1)
475*4882a593Smuzhiyun 				if (is_tr_overlap(p, va, log_size)) {
476*4882a593Smuzhiyun 					printk(KERN_DEBUG "Overlapped Entry"
477*4882a593Smuzhiyun 						"Inserted for TR Register!!\n");
478*4882a593Smuzhiyun 					goto out;
479*4882a593Smuzhiyun 			}
480*4882a593Smuzhiyun 		}
481*4882a593Smuzhiyun 	}
482*4882a593Smuzhiyun 	if (target_mask & 0x2) {
483*4882a593Smuzhiyun 		p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX;
484*4882a593Smuzhiyun 		for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
485*4882a593Smuzhiyun 								i++, p++) {
486*4882a593Smuzhiyun 			if (p->pte & 0x1)
487*4882a593Smuzhiyun 				if (is_tr_overlap(p, va, log_size)) {
488*4882a593Smuzhiyun 					printk(KERN_DEBUG "Overlapped Entry"
489*4882a593Smuzhiyun 						"Inserted for TR Register!!\n");
490*4882a593Smuzhiyun 					goto out;
491*4882a593Smuzhiyun 				}
492*4882a593Smuzhiyun 		}
493*4882a593Smuzhiyun 	}
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) {
496*4882a593Smuzhiyun 		switch (target_mask & 0x3) {
497*4882a593Smuzhiyun 		case 1:
498*4882a593Smuzhiyun 			if (!((ia64_idtrs[cpu] + i)->pte & 0x1))
499*4882a593Smuzhiyun 				goto found;
500*4882a593Smuzhiyun 			continue;
501*4882a593Smuzhiyun 		case 2:
502*4882a593Smuzhiyun 			if (!((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
503*4882a593Smuzhiyun 				goto found;
504*4882a593Smuzhiyun 			continue;
505*4882a593Smuzhiyun 		case 3:
506*4882a593Smuzhiyun 			if (!((ia64_idtrs[cpu] + i)->pte & 0x1) &&
507*4882a593Smuzhiyun 			    !((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
508*4882a593Smuzhiyun 				goto found;
509*4882a593Smuzhiyun 			continue;
510*4882a593Smuzhiyun 		default:
511*4882a593Smuzhiyun 			r = -EINVAL;
512*4882a593Smuzhiyun 			goto out;
513*4882a593Smuzhiyun 		}
514*4882a593Smuzhiyun 	}
515*4882a593Smuzhiyun found:
516*4882a593Smuzhiyun 	if (i >= per_cpu(ia64_tr_num, cpu))
517*4882a593Smuzhiyun 		return -EBUSY;
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	/*Record tr info for mca hander use!*/
520*4882a593Smuzhiyun 	if (i > per_cpu(ia64_tr_used, cpu))
521*4882a593Smuzhiyun 		per_cpu(ia64_tr_used, cpu) = i;
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 	psr = ia64_clear_ic();
524*4882a593Smuzhiyun 	if (target_mask & 0x1) {
525*4882a593Smuzhiyun 		ia64_itr(0x1, i, va, pte, log_size);
526*4882a593Smuzhiyun 		ia64_srlz_i();
527*4882a593Smuzhiyun 		p = ia64_idtrs[cpu] + i;
528*4882a593Smuzhiyun 		p->ifa = va;
529*4882a593Smuzhiyun 		p->pte = pte;
530*4882a593Smuzhiyun 		p->itir = log_size << 2;
531*4882a593Smuzhiyun 		p->rr = ia64_get_rr(va);
532*4882a593Smuzhiyun 	}
533*4882a593Smuzhiyun 	if (target_mask & 0x2) {
534*4882a593Smuzhiyun 		ia64_itr(0x2, i, va, pte, log_size);
535*4882a593Smuzhiyun 		ia64_srlz_i();
536*4882a593Smuzhiyun 		p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i;
537*4882a593Smuzhiyun 		p->ifa = va;
538*4882a593Smuzhiyun 		p->pte = pte;
539*4882a593Smuzhiyun 		p->itir = log_size << 2;
540*4882a593Smuzhiyun 		p->rr = ia64_get_rr(va);
541*4882a593Smuzhiyun 	}
542*4882a593Smuzhiyun 	ia64_set_psr(psr);
543*4882a593Smuzhiyun 	r = i;
544*4882a593Smuzhiyun out:
545*4882a593Smuzhiyun 	return r;
546*4882a593Smuzhiyun }
547*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ia64_itr_entry);
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun /*
550*4882a593Smuzhiyun  * ia64_purge_tr
551*4882a593Smuzhiyun  *
552*4882a593Smuzhiyun  * target_mask: 0x1: purge itr, 0x2 : purge dtr, 0x3 purge idtr.
553*4882a593Smuzhiyun  * slot: slot number to be freed.
554*4882a593Smuzhiyun  *
555*4882a593Smuzhiyun  * Must be called with preemption disabled.
556*4882a593Smuzhiyun  */
ia64_ptr_entry(u64 target_mask,int slot)557*4882a593Smuzhiyun void ia64_ptr_entry(u64 target_mask, int slot)
558*4882a593Smuzhiyun {
559*4882a593Smuzhiyun 	int cpu = smp_processor_id();
560*4882a593Smuzhiyun 	int i;
561*4882a593Smuzhiyun 	struct ia64_tr_entry *p;
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 	if (slot < IA64_TR_ALLOC_BASE || slot >= per_cpu(ia64_tr_num, cpu))
564*4882a593Smuzhiyun 		return;
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	if (target_mask & 0x1) {
567*4882a593Smuzhiyun 		p = ia64_idtrs[cpu] + slot;
568*4882a593Smuzhiyun 		if ((p->pte&0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
569*4882a593Smuzhiyun 			p->pte = 0;
570*4882a593Smuzhiyun 			ia64_ptr(0x1, p->ifa, p->itir>>2);
571*4882a593Smuzhiyun 			ia64_srlz_i();
572*4882a593Smuzhiyun 		}
573*4882a593Smuzhiyun 	}
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	if (target_mask & 0x2) {
576*4882a593Smuzhiyun 		p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + slot;
577*4882a593Smuzhiyun 		if ((p->pte & 0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
578*4882a593Smuzhiyun 			p->pte = 0;
579*4882a593Smuzhiyun 			ia64_ptr(0x2, p->ifa, p->itir>>2);
580*4882a593Smuzhiyun 			ia64_srlz_i();
581*4882a593Smuzhiyun 		}
582*4882a593Smuzhiyun 	}
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) {
585*4882a593Smuzhiyun 		if (((ia64_idtrs[cpu] + i)->pte & 0x1) ||
586*4882a593Smuzhiyun 		    ((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
587*4882a593Smuzhiyun 			break;
588*4882a593Smuzhiyun 	}
589*4882a593Smuzhiyun 	per_cpu(ia64_tr_used, cpu) = i;
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ia64_ptr_entry);
592