xref: /OK3568_Linux_fs/kernel/arch/parisc/lib/delay.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *	Precise Delay Loops for parisc
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *	based on code by:
6*4882a593Smuzhiyun  *	Copyright (C) 1993 Linus Torvalds
7*4882a593Smuzhiyun  *	Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
8*4882a593Smuzhiyun  *	Copyright (C) 2008 Jiri Hladky <hladky _dot_ jiri _at_ gmail _dot_ com>
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  *	parisc implementation:
11*4882a593Smuzhiyun  *	Copyright (C) 2013 Helge Deller <deller@gmx.de>
12*4882a593Smuzhiyun  */
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <linux/module.h>
16*4882a593Smuzhiyun #include <linux/preempt.h>
17*4882a593Smuzhiyun #include <linux/init.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #include <asm/delay.h>
20*4882a593Smuzhiyun #include <asm/special_insns.h>    /* for mfctl() */
21*4882a593Smuzhiyun #include <asm/processor.h> /* for boot_cpu_data */
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun /* CR16 based delay: */
__cr16_delay(unsigned long __loops)24*4882a593Smuzhiyun static void __cr16_delay(unsigned long __loops)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun 	/*
27*4882a593Smuzhiyun 	 * Note: Due to unsigned math, cr16 rollovers shouldn't be
28*4882a593Smuzhiyun 	 * a problem here. However, on 32 bit, we need to make sure
29*4882a593Smuzhiyun 	 * we don't pass in too big a value. The current default
30*4882a593Smuzhiyun 	 * value of MAX_UDELAY_MS should help prevent this.
31*4882a593Smuzhiyun 	 */
32*4882a593Smuzhiyun 	u32 bclock, now, loops = __loops;
33*4882a593Smuzhiyun 	int cpu;
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	preempt_disable();
36*4882a593Smuzhiyun 	cpu = smp_processor_id();
37*4882a593Smuzhiyun 	bclock = mfctl(16);
38*4882a593Smuzhiyun 	for (;;) {
39*4882a593Smuzhiyun 		now = mfctl(16);
40*4882a593Smuzhiyun 		if ((now - bclock) >= loops)
41*4882a593Smuzhiyun 			break;
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 		/* Allow RT tasks to run */
44*4882a593Smuzhiyun 		preempt_enable();
45*4882a593Smuzhiyun 		asm volatile("	nop\n");
46*4882a593Smuzhiyun 		barrier();
47*4882a593Smuzhiyun 		preempt_disable();
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 		/*
50*4882a593Smuzhiyun 		 * It is possible that we moved to another CPU, and
51*4882a593Smuzhiyun 		 * since CR16's are per-cpu we need to calculate
52*4882a593Smuzhiyun 		 * that. The delay must guarantee that we wait "at
53*4882a593Smuzhiyun 		 * least" the amount of time. Being moved to another
54*4882a593Smuzhiyun 		 * CPU could make the wait longer but we just need to
55*4882a593Smuzhiyun 		 * make sure we waited long enough. Rebalance the
56*4882a593Smuzhiyun 		 * counter for this CPU.
57*4882a593Smuzhiyun 		 */
58*4882a593Smuzhiyun 		if (unlikely(cpu != smp_processor_id())) {
59*4882a593Smuzhiyun 			loops -= (now - bclock);
60*4882a593Smuzhiyun 			cpu = smp_processor_id();
61*4882a593Smuzhiyun 			bclock = mfctl(16);
62*4882a593Smuzhiyun 		}
63*4882a593Smuzhiyun 	}
64*4882a593Smuzhiyun 	preempt_enable();
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 
__udelay(unsigned long usecs)68*4882a593Smuzhiyun void __udelay(unsigned long usecs)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun 	__cr16_delay(usecs * ((unsigned long)boot_cpu_data.cpu_hz / 1000000UL));
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun EXPORT_SYMBOL(__udelay);
73