1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Precise Delay Loops for S390
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright IBM Corp. 1999, 2008
6*4882a593Smuzhiyun * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
7*4882a593Smuzhiyun * Heiko Carstens <heiko.carstens@de.ibm.com>,
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/sched.h>
11*4882a593Smuzhiyun #include <linux/delay.h>
12*4882a593Smuzhiyun #include <linux/timex.h>
13*4882a593Smuzhiyun #include <linux/export.h>
14*4882a593Smuzhiyun #include <linux/irqflags.h>
15*4882a593Smuzhiyun #include <linux/interrupt.h>
16*4882a593Smuzhiyun #include <linux/irq.h>
17*4882a593Smuzhiyun #include <asm/vtimer.h>
18*4882a593Smuzhiyun #include <asm/div64.h>
19*4882a593Smuzhiyun #include <asm/idle.h>
20*4882a593Smuzhiyun
__delay(unsigned long loops)21*4882a593Smuzhiyun void __delay(unsigned long loops)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun /*
24*4882a593Smuzhiyun * To end the bloody studid and useless discussion about the
25*4882a593Smuzhiyun * BogoMips number I took the liberty to define the __delay
26*4882a593Smuzhiyun * function in a way that that resulting BogoMips number will
27*4882a593Smuzhiyun * yield the megahertz number of the cpu. The important function
28*4882a593Smuzhiyun * is udelay and that is done using the tod clock. -- martin.
29*4882a593Smuzhiyun */
30*4882a593Smuzhiyun asm volatile("0: brct %0,0b" : : "d" ((loops/2) + 1));
31*4882a593Smuzhiyun }
32*4882a593Smuzhiyun EXPORT_SYMBOL(__delay);
33*4882a593Smuzhiyun
__udelay_disabled(unsigned long long usecs)34*4882a593Smuzhiyun static void __udelay_disabled(unsigned long long usecs)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun unsigned long cr0, cr0_new, psw_mask;
37*4882a593Smuzhiyun struct s390_idle_data idle;
38*4882a593Smuzhiyun u64 end;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun end = get_tod_clock() + (usecs << 12);
41*4882a593Smuzhiyun __ctl_store(cr0, 0, 0);
42*4882a593Smuzhiyun cr0_new = cr0 & ~CR0_IRQ_SUBCLASS_MASK;
43*4882a593Smuzhiyun cr0_new |= (1UL << (63 - 52)); /* enable clock comparator irq */
44*4882a593Smuzhiyun __ctl_load(cr0_new, 0, 0);
45*4882a593Smuzhiyun psw_mask = __extract_psw() | PSW_MASK_EXT | PSW_MASK_WAIT;
46*4882a593Smuzhiyun set_clock_comparator(end);
47*4882a593Smuzhiyun set_cpu_flag(CIF_IGNORE_IRQ);
48*4882a593Smuzhiyun psw_idle(&idle, psw_mask);
49*4882a593Smuzhiyun trace_hardirqs_off();
50*4882a593Smuzhiyun clear_cpu_flag(CIF_IGNORE_IRQ);
51*4882a593Smuzhiyun set_clock_comparator(S390_lowcore.clock_comparator);
52*4882a593Smuzhiyun __ctl_load(cr0, 0, 0);
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
__udelay_enabled(unsigned long long usecs)55*4882a593Smuzhiyun static void __udelay_enabled(unsigned long long usecs)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun u64 clock_saved, end;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun end = get_tod_clock_fast() + (usecs << 12);
60*4882a593Smuzhiyun do {
61*4882a593Smuzhiyun clock_saved = 0;
62*4882a593Smuzhiyun if (tod_after(S390_lowcore.clock_comparator, end)) {
63*4882a593Smuzhiyun clock_saved = local_tick_disable();
64*4882a593Smuzhiyun set_clock_comparator(end);
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun enabled_wait();
67*4882a593Smuzhiyun if (clock_saved)
68*4882a593Smuzhiyun local_tick_enable(clock_saved);
69*4882a593Smuzhiyun } while (get_tod_clock_fast() < end);
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /*
73*4882a593Smuzhiyun * Waits for 'usecs' microseconds using the TOD clock comparator.
74*4882a593Smuzhiyun */
__udelay(unsigned long long usecs)75*4882a593Smuzhiyun void __udelay(unsigned long long usecs)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun unsigned long flags;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun preempt_disable();
80*4882a593Smuzhiyun local_irq_save(flags);
81*4882a593Smuzhiyun if (in_irq()) {
82*4882a593Smuzhiyun __udelay_disabled(usecs);
83*4882a593Smuzhiyun goto out;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun if (in_softirq()) {
86*4882a593Smuzhiyun if (raw_irqs_disabled_flags(flags))
87*4882a593Smuzhiyun __udelay_disabled(usecs);
88*4882a593Smuzhiyun else
89*4882a593Smuzhiyun __udelay_enabled(usecs);
90*4882a593Smuzhiyun goto out;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun if (raw_irqs_disabled_flags(flags)) {
93*4882a593Smuzhiyun local_bh_disable();
94*4882a593Smuzhiyun __udelay_disabled(usecs);
95*4882a593Smuzhiyun _local_bh_enable();
96*4882a593Smuzhiyun goto out;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun __udelay_enabled(usecs);
99*4882a593Smuzhiyun out:
100*4882a593Smuzhiyun local_irq_restore(flags);
101*4882a593Smuzhiyun preempt_enable();
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun EXPORT_SYMBOL(__udelay);
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun /*
106*4882a593Smuzhiyun * Simple udelay variant. To be used on startup and reboot
107*4882a593Smuzhiyun * when the interrupt handler isn't working.
108*4882a593Smuzhiyun */
udelay_simple(unsigned long long usecs)109*4882a593Smuzhiyun void udelay_simple(unsigned long long usecs)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun u64 end;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun end = get_tod_clock_fast() + (usecs << 12);
114*4882a593Smuzhiyun while (get_tod_clock_fast() < end)
115*4882a593Smuzhiyun cpu_relax();
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
__ndelay(unsigned long long nsecs)118*4882a593Smuzhiyun void __ndelay(unsigned long long nsecs)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun u64 end;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun nsecs <<= 9;
123*4882a593Smuzhiyun do_div(nsecs, 125);
124*4882a593Smuzhiyun end = get_tod_clock_fast() + nsecs;
125*4882a593Smuzhiyun if (nsecs & ~0xfffUL)
126*4882a593Smuzhiyun __udelay(nsecs >> 12);
127*4882a593Smuzhiyun while (get_tod_clock_fast() < end)
128*4882a593Smuzhiyun barrier();
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun EXPORT_SYMBOL(__ndelay);
131