Lines Matching +full:int +full:- +full:clock +full:- +full:stable +full:- +full:broken

1 // SPDX-License-Identifier: GPL-2.0-only
18 * clock with bounded drift between CPUs. The value of cpu_clock(i)
29 * cpu_clock(i) -- can be used from any context, including NMI.
30 * local_clock() -- is cpu_clock() on the current CPU.
41 * Otherwise it tries to create a semi stable clock from a mixture of other
44 * - GTOD (clock monotomic)
45 * - sched_clock()
46 * - explicit idle events
60 * Scheduler clock - returns current time in nanosec units.
62 * Architectures and sub-architectures can override this.
66 return (unsigned long long)(jiffies - INITIAL_JIFFIES) in sched_clock()
75 * We must start with !__sched_clock_stable because the unstable -> stable
76 * transition is accurate, while the stable -> unstable transition is not.
79 * will become stable, such that there's only a single 1 -> 0 transition.
82 static int __sched_clock_stable_early = 1;
93 u64 clock; member
103 static inline struct sched_clock_data *cpu_sdc(int cpu) in cpu_sdc()
108 int sched_clock_stable(void) in sched_clock_stable()
115 scd->tick_gtod = ktime_get_ns(); in __scd_stamp()
116 scd->tick_raw = sched_clock(); in __scd_stamp()
125 * to disable IRQs in order to get a consistent scd->tick* reading. in __set_sched_clock_stable()
130 * Attempt to make the (initial) unstable->stable transition continuous. in __set_sched_clock_stable()
132 __sched_clock_offset = (scd->tick_gtod + __gtod_offset) - (scd->tick_raw); in __set_sched_clock_stable()
135 printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n", in __set_sched_clock_stable()
136 scd->tick_gtod, __gtod_offset, in __set_sched_clock_stable()
137 scd->tick_raw, __sched_clock_offset); in __set_sched_clock_stable()
144 * If we ever get here, we're screwed, because we found out -- typically after
145 * the fact -- that TSC wasn't good. This means all our clocksources (including
151 * The only way to fully avoid random clock jumps is to boot with:
157 int cpu; in __sched_clock_work()
163 scd->clock = scd->tick_gtod + __gtod_offset; in __sched_clock_work()
170 …printk(KERN_WARNING "TSC found unstable after boot, most likely due to broken BIOS. Use 'tsc=unsta… in __sched_clock_work()
171 printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n", in __sched_clock_work()
172 scd->tick_gtod, __gtod_offset, in __sched_clock_work()
173 scd->tick_raw, __sched_clock_offset); in __sched_clock_work()
204 __gtod_offset = (scd->tick_raw + __sched_clock_offset) - scd->tick_gtod; in __sched_clock_gtod_offset()
223 * We run this as late_initcall() such that it runs after all built-in drivers,
226 static int __init sched_clock_init_late(void) in sched_clock_init_late()
251 return (s64)(x - y) < 0 ? x : y; in wrap_min()
256 return (s64)(x - y) > 0 ? x : y; in wrap_max()
262 * - filter out backward motion
263 * - use the GTOD tick value to create a window to filter crazy TSC values
267 u64 now, clock, old_clock, min_clock, max_clock, gtod; in sched_clock_local() local
272 delta = now - scd->tick_raw; in sched_clock_local()
276 old_clock = scd->clock; in sched_clock_local()
279 * scd->clock = clamp(scd->tick_gtod + delta, in sched_clock_local()
280 * max(scd->tick_gtod, scd->clock), in sched_clock_local()
281 * scd->tick_gtod + TICK_NSEC); in sched_clock_local()
284 gtod = scd->tick_gtod + __gtod_offset; in sched_clock_local()
285 clock = gtod + delta; in sched_clock_local()
289 clock = wrap_max(clock, min_clock); in sched_clock_local()
290 clock = wrap_min(clock, max_clock); in sched_clock_local()
292 if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock) in sched_clock_local()
295 return clock; in sched_clock_local()
307 * Careful here: The local and the remote clock values need to in sched_clock_remote()
313 * 32-bit kernels as an NMI could use sched_clock_local() via the in sched_clock_remote()
315 * the low 32-bit and the high 32-bit portion. in sched_clock_remote()
319 * We must enforce atomic readout on 32-bit, otherwise the in sched_clock_remote()
321 * the low 32-bit and the high 32-bit portion. in sched_clock_remote()
323 remote_clock = cmpxchg64(&scd->clock, 0, 0); in sched_clock_remote()
326 * On 64-bit kernels the read of [my]scd->clock is atomic versus the in sched_clock_remote()
327 * update, so we can avoid the above 32-bit dance. in sched_clock_remote()
331 this_clock = my_scd->clock; in sched_clock_remote()
332 remote_clock = scd->clock; in sched_clock_remote()
341 if (likely((s64)(remote_clock - this_clock) < 0)) { in sched_clock_remote()
342 ptr = &scd->clock; in sched_clock_remote()
349 ptr = &my_scd->clock; in sched_clock_remote()
365 u64 sched_clock_cpu(int cpu) in sched_clock_cpu()
368 u64 clock; in sched_clock_cpu() local
380 clock = sched_clock_remote(scd); in sched_clock_cpu()
382 clock = sched_clock_local(scd); in sched_clock_cpu()
385 return clock; in sched_clock_cpu()
414 * The watchdog just found this TSC to (still) be stable, so now is a in sched_clock_tick_stable()
424 * We are going deep-idle (irqs are disabled):
461 u64 sched_clock_cpu(int cpu) in sched_clock_cpu()
472 * Running clock - returns the time that has elapsed while a guest has been
477 * Architectures and sub-architectures can override this.