1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * SGI RTC clock/timer routines.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * (C) Copyright 2020 Hewlett Packard Enterprise Development LP
6*4882a593Smuzhiyun * Copyright (c) 2009-2013 Silicon Graphics, Inc. All Rights Reserved.
7*4882a593Smuzhiyun * Copyright (c) Dimitri Sivanich
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun #include <linux/clockchips.h>
10*4882a593Smuzhiyun #include <linux/slab.h>
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <asm/uv/uv_mmrs.h>
13*4882a593Smuzhiyun #include <asm/uv/uv_hub.h>
14*4882a593Smuzhiyun #include <asm/uv/bios.h>
15*4882a593Smuzhiyun #include <asm/uv/uv.h>
16*4882a593Smuzhiyun #include <asm/apic.h>
17*4882a593Smuzhiyun #include <asm/cpu.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #define RTC_NAME "sgi_rtc"
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun static u64 uv_read_rtc(struct clocksource *cs);
22*4882a593Smuzhiyun static int uv_rtc_next_event(unsigned long, struct clock_event_device *);
23*4882a593Smuzhiyun static int uv_rtc_shutdown(struct clock_event_device *evt);
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun static struct clocksource clocksource_uv = {
26*4882a593Smuzhiyun .name = RTC_NAME,
27*4882a593Smuzhiyun .rating = 299,
28*4882a593Smuzhiyun .read = uv_read_rtc,
29*4882a593Smuzhiyun .mask = (u64)UVH_RTC_REAL_TIME_CLOCK_MASK,
30*4882a593Smuzhiyun .flags = CLOCK_SOURCE_IS_CONTINUOUS,
31*4882a593Smuzhiyun };
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun static struct clock_event_device clock_event_device_uv = {
34*4882a593Smuzhiyun .name = RTC_NAME,
35*4882a593Smuzhiyun .features = CLOCK_EVT_FEAT_ONESHOT,
36*4882a593Smuzhiyun .shift = 20,
37*4882a593Smuzhiyun .rating = 400,
38*4882a593Smuzhiyun .irq = -1,
39*4882a593Smuzhiyun .set_next_event = uv_rtc_next_event,
40*4882a593Smuzhiyun .set_state_shutdown = uv_rtc_shutdown,
41*4882a593Smuzhiyun .event_handler = NULL,
42*4882a593Smuzhiyun };
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun static DEFINE_PER_CPU(struct clock_event_device, cpu_ced);
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun /* There is one of these allocated per node */
47*4882a593Smuzhiyun struct uv_rtc_timer_head {
48*4882a593Smuzhiyun spinlock_t lock;
49*4882a593Smuzhiyun /* next cpu waiting for timer, local node relative: */
50*4882a593Smuzhiyun int next_cpu;
51*4882a593Smuzhiyun /* number of cpus on this node: */
52*4882a593Smuzhiyun int ncpus;
53*4882a593Smuzhiyun struct {
54*4882a593Smuzhiyun int lcpu; /* systemwide logical cpu number */
55*4882a593Smuzhiyun u64 expires; /* next timer expiration for this cpu */
56*4882a593Smuzhiyun } cpu[];
57*4882a593Smuzhiyun };
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun /*
60*4882a593Smuzhiyun * Access to uv_rtc_timer_head via blade id.
61*4882a593Smuzhiyun */
62*4882a593Smuzhiyun static struct uv_rtc_timer_head **blade_info __read_mostly;
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun static int uv_rtc_evt_enable;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /*
67*4882a593Smuzhiyun * Hardware interface routines
68*4882a593Smuzhiyun */
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun /* Send IPIs to another node */
uv_rtc_send_IPI(int cpu)71*4882a593Smuzhiyun static void uv_rtc_send_IPI(int cpu)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun unsigned long apicid, val;
74*4882a593Smuzhiyun int pnode;
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun apicid = cpu_physical_id(cpu);
77*4882a593Smuzhiyun pnode = uv_apicid_to_pnode(apicid);
78*4882a593Smuzhiyun val = (1UL << UVH_IPI_INT_SEND_SHFT) |
79*4882a593Smuzhiyun (apicid << UVH_IPI_INT_APIC_ID_SHFT) |
80*4882a593Smuzhiyun (X86_PLATFORM_IPI_VECTOR << UVH_IPI_INT_VECTOR_SHFT);
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun /* Check for an RTC interrupt pending */
uv_intr_pending(int pnode)86*4882a593Smuzhiyun static int uv_intr_pending(int pnode)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED2) &
89*4882a593Smuzhiyun UVH_EVENT_OCCURRED2_RTC_1_MASK;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun /* Setup interrupt and return non-zero if early expiration occurred. */
uv_setup_intr(int cpu,u64 expires)93*4882a593Smuzhiyun static int uv_setup_intr(int cpu, u64 expires)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun u64 val;
96*4882a593Smuzhiyun unsigned long apicid = cpu_physical_id(cpu);
97*4882a593Smuzhiyun int pnode = uv_cpu_to_pnode(cpu);
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG,
100*4882a593Smuzhiyun UVH_RTC1_INT_CONFIG_M_MASK);
101*4882a593Smuzhiyun uv_write_global_mmr64(pnode, UVH_INT_CMPB, -1L);
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED2_ALIAS,
104*4882a593Smuzhiyun UVH_EVENT_OCCURRED2_RTC_1_MASK);
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) |
107*4882a593Smuzhiyun ((u64)apicid << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT);
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /* Set configuration */
110*4882a593Smuzhiyun uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG, val);
111*4882a593Smuzhiyun /* Initialize comparator value */
112*4882a593Smuzhiyun uv_write_global_mmr64(pnode, UVH_INT_CMPB, expires);
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun if (uv_read_rtc(NULL) <= expires)
115*4882a593Smuzhiyun return 0;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun return !uv_intr_pending(pnode);
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun /*
121*4882a593Smuzhiyun * Per-cpu timer tracking routines
122*4882a593Smuzhiyun */
123*4882a593Smuzhiyun
uv_rtc_deallocate_timers(void)124*4882a593Smuzhiyun static __init void uv_rtc_deallocate_timers(void)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun int bid;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun for_each_possible_blade(bid) {
129*4882a593Smuzhiyun kfree(blade_info[bid]);
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun kfree(blade_info);
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun /* Allocate per-node list of cpu timer expiration times. */
uv_rtc_allocate_timers(void)135*4882a593Smuzhiyun static __init int uv_rtc_allocate_timers(void)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun int cpu;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun blade_info = kcalloc(uv_possible_blades, sizeof(void *), GFP_KERNEL);
140*4882a593Smuzhiyun if (!blade_info)
141*4882a593Smuzhiyun return -ENOMEM;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun for_each_present_cpu(cpu) {
144*4882a593Smuzhiyun int nid = cpu_to_node(cpu);
145*4882a593Smuzhiyun int bid = uv_cpu_to_blade_id(cpu);
146*4882a593Smuzhiyun int bcpu = uv_cpu_blade_processor_id(cpu);
147*4882a593Smuzhiyun struct uv_rtc_timer_head *head = blade_info[bid];
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun if (!head) {
150*4882a593Smuzhiyun head = kmalloc_node(struct_size(head, cpu,
151*4882a593Smuzhiyun uv_blade_nr_possible_cpus(bid)),
152*4882a593Smuzhiyun GFP_KERNEL, nid);
153*4882a593Smuzhiyun if (!head) {
154*4882a593Smuzhiyun uv_rtc_deallocate_timers();
155*4882a593Smuzhiyun return -ENOMEM;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun spin_lock_init(&head->lock);
158*4882a593Smuzhiyun head->ncpus = uv_blade_nr_possible_cpus(bid);
159*4882a593Smuzhiyun head->next_cpu = -1;
160*4882a593Smuzhiyun blade_info[bid] = head;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun head->cpu[bcpu].lcpu = cpu;
164*4882a593Smuzhiyun head->cpu[bcpu].expires = ULLONG_MAX;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun return 0;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun /* Find and set the next expiring timer. */
uv_rtc_find_next_timer(struct uv_rtc_timer_head * head,int pnode)171*4882a593Smuzhiyun static void uv_rtc_find_next_timer(struct uv_rtc_timer_head *head, int pnode)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun u64 lowest = ULLONG_MAX;
174*4882a593Smuzhiyun int c, bcpu = -1;
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun head->next_cpu = -1;
177*4882a593Smuzhiyun for (c = 0; c < head->ncpus; c++) {
178*4882a593Smuzhiyun u64 exp = head->cpu[c].expires;
179*4882a593Smuzhiyun if (exp < lowest) {
180*4882a593Smuzhiyun bcpu = c;
181*4882a593Smuzhiyun lowest = exp;
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun if (bcpu >= 0) {
185*4882a593Smuzhiyun head->next_cpu = bcpu;
186*4882a593Smuzhiyun c = head->cpu[bcpu].lcpu;
187*4882a593Smuzhiyun if (uv_setup_intr(c, lowest))
188*4882a593Smuzhiyun /* If we didn't set it up in time, trigger */
189*4882a593Smuzhiyun uv_rtc_send_IPI(c);
190*4882a593Smuzhiyun } else {
191*4882a593Smuzhiyun uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG,
192*4882a593Smuzhiyun UVH_RTC1_INT_CONFIG_M_MASK);
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun /*
197*4882a593Smuzhiyun * Set expiration time for current cpu.
198*4882a593Smuzhiyun *
199*4882a593Smuzhiyun * Returns 1 if we missed the expiration time.
200*4882a593Smuzhiyun */
uv_rtc_set_timer(int cpu,u64 expires)201*4882a593Smuzhiyun static int uv_rtc_set_timer(int cpu, u64 expires)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun int pnode = uv_cpu_to_pnode(cpu);
204*4882a593Smuzhiyun int bid = uv_cpu_to_blade_id(cpu);
205*4882a593Smuzhiyun struct uv_rtc_timer_head *head = blade_info[bid];
206*4882a593Smuzhiyun int bcpu = uv_cpu_blade_processor_id(cpu);
207*4882a593Smuzhiyun u64 *t = &head->cpu[bcpu].expires;
208*4882a593Smuzhiyun unsigned long flags;
209*4882a593Smuzhiyun int next_cpu;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun spin_lock_irqsave(&head->lock, flags);
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun next_cpu = head->next_cpu;
214*4882a593Smuzhiyun *t = expires;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun /* Will this one be next to go off? */
217*4882a593Smuzhiyun if (next_cpu < 0 || bcpu == next_cpu ||
218*4882a593Smuzhiyun expires < head->cpu[next_cpu].expires) {
219*4882a593Smuzhiyun head->next_cpu = bcpu;
220*4882a593Smuzhiyun if (uv_setup_intr(cpu, expires)) {
221*4882a593Smuzhiyun *t = ULLONG_MAX;
222*4882a593Smuzhiyun uv_rtc_find_next_timer(head, pnode);
223*4882a593Smuzhiyun spin_unlock_irqrestore(&head->lock, flags);
224*4882a593Smuzhiyun return -ETIME;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun spin_unlock_irqrestore(&head->lock, flags);
229*4882a593Smuzhiyun return 0;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun /*
233*4882a593Smuzhiyun * Unset expiration time for current cpu.
234*4882a593Smuzhiyun *
235*4882a593Smuzhiyun * Returns 1 if this timer was pending.
236*4882a593Smuzhiyun */
uv_rtc_unset_timer(int cpu,int force)237*4882a593Smuzhiyun static int uv_rtc_unset_timer(int cpu, int force)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun int pnode = uv_cpu_to_pnode(cpu);
240*4882a593Smuzhiyun int bid = uv_cpu_to_blade_id(cpu);
241*4882a593Smuzhiyun struct uv_rtc_timer_head *head = blade_info[bid];
242*4882a593Smuzhiyun int bcpu = uv_cpu_blade_processor_id(cpu);
243*4882a593Smuzhiyun u64 *t = &head->cpu[bcpu].expires;
244*4882a593Smuzhiyun unsigned long flags;
245*4882a593Smuzhiyun int rc = 0;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun spin_lock_irqsave(&head->lock, flags);
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force)
250*4882a593Smuzhiyun rc = 1;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun if (rc) {
253*4882a593Smuzhiyun *t = ULLONG_MAX;
254*4882a593Smuzhiyun /* Was the hardware setup for this timer? */
255*4882a593Smuzhiyun if (head->next_cpu == bcpu)
256*4882a593Smuzhiyun uv_rtc_find_next_timer(head, pnode);
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun spin_unlock_irqrestore(&head->lock, flags);
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun return rc;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun /*
266*4882a593Smuzhiyun * Kernel interface routines.
267*4882a593Smuzhiyun */
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun /*
270*4882a593Smuzhiyun * Read the RTC.
271*4882a593Smuzhiyun *
272*4882a593Smuzhiyun * Starting with HUB rev 2.0, the UV RTC register is replicated across all
273*4882a593Smuzhiyun * cachelines of it's own page. This allows faster simultaneous reads
274*4882a593Smuzhiyun * from a given socket.
275*4882a593Smuzhiyun */
uv_read_rtc(struct clocksource * cs)276*4882a593Smuzhiyun static u64 uv_read_rtc(struct clocksource *cs)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun unsigned long offset;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun if (uv_get_min_hub_revision_id() == 1)
281*4882a593Smuzhiyun offset = 0;
282*4882a593Smuzhiyun else
283*4882a593Smuzhiyun offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE;
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun return (u64)uv_read_local_mmr(UVH_RTC | offset);
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun /*
289*4882a593Smuzhiyun * Program the next event, relative to now
290*4882a593Smuzhiyun */
uv_rtc_next_event(unsigned long delta,struct clock_event_device * ced)291*4882a593Smuzhiyun static int uv_rtc_next_event(unsigned long delta,
292*4882a593Smuzhiyun struct clock_event_device *ced)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun int ced_cpu = cpumask_first(ced->cpumask);
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun return uv_rtc_set_timer(ced_cpu, delta + uv_read_rtc(NULL));
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun /*
300*4882a593Smuzhiyun * Shutdown the RTC timer
301*4882a593Smuzhiyun */
uv_rtc_shutdown(struct clock_event_device * evt)302*4882a593Smuzhiyun static int uv_rtc_shutdown(struct clock_event_device *evt)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun int ced_cpu = cpumask_first(evt->cpumask);
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun uv_rtc_unset_timer(ced_cpu, 1);
307*4882a593Smuzhiyun return 0;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun
uv_rtc_interrupt(void)310*4882a593Smuzhiyun static void uv_rtc_interrupt(void)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun int cpu = smp_processor_id();
313*4882a593Smuzhiyun struct clock_event_device *ced = &per_cpu(cpu_ced, cpu);
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun if (!ced || !ced->event_handler)
316*4882a593Smuzhiyun return;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun if (uv_rtc_unset_timer(cpu, 0) != 1)
319*4882a593Smuzhiyun return;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun ced->event_handler(ced);
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
uv_enable_evt_rtc(char * str)324*4882a593Smuzhiyun static int __init uv_enable_evt_rtc(char *str)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun uv_rtc_evt_enable = 1;
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun return 1;
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun __setup("uvrtcevt", uv_enable_evt_rtc);
331*4882a593Smuzhiyun
uv_rtc_register_clockevents(struct work_struct * dummy)332*4882a593Smuzhiyun static __init void uv_rtc_register_clockevents(struct work_struct *dummy)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun struct clock_event_device *ced = this_cpu_ptr(&cpu_ced);
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun *ced = clock_event_device_uv;
337*4882a593Smuzhiyun ced->cpumask = cpumask_of(smp_processor_id());
338*4882a593Smuzhiyun clockevents_register_device(ced);
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
uv_rtc_setup_clock(void)341*4882a593Smuzhiyun static __init int uv_rtc_setup_clock(void)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun int rc;
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun if (!is_uv_system())
346*4882a593Smuzhiyun return -ENODEV;
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun rc = clocksource_register_hz(&clocksource_uv, sn_rtc_cycles_per_second);
349*4882a593Smuzhiyun if (rc)
350*4882a593Smuzhiyun printk(KERN_INFO "UV RTC clocksource failed rc %d\n", rc);
351*4882a593Smuzhiyun else
352*4882a593Smuzhiyun printk(KERN_INFO "UV RTC clocksource registered freq %lu MHz\n",
353*4882a593Smuzhiyun sn_rtc_cycles_per_second/(unsigned long)1E6);
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun if (rc || !uv_rtc_evt_enable || x86_platform_ipi_callback)
356*4882a593Smuzhiyun return rc;
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun /* Setup and register clockevents */
359*4882a593Smuzhiyun rc = uv_rtc_allocate_timers();
360*4882a593Smuzhiyun if (rc)
361*4882a593Smuzhiyun goto error;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun x86_platform_ipi_callback = uv_rtc_interrupt;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun clock_event_device_uv.mult = div_sc(sn_rtc_cycles_per_second,
366*4882a593Smuzhiyun NSEC_PER_SEC, clock_event_device_uv.shift);
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun clock_event_device_uv.min_delta_ns = NSEC_PER_SEC /
369*4882a593Smuzhiyun sn_rtc_cycles_per_second;
370*4882a593Smuzhiyun clock_event_device_uv.min_delta_ticks = 1;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun clock_event_device_uv.max_delta_ns = clocksource_uv.mask *
373*4882a593Smuzhiyun (NSEC_PER_SEC / sn_rtc_cycles_per_second);
374*4882a593Smuzhiyun clock_event_device_uv.max_delta_ticks = clocksource_uv.mask;
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun rc = schedule_on_each_cpu(uv_rtc_register_clockevents);
377*4882a593Smuzhiyun if (rc) {
378*4882a593Smuzhiyun x86_platform_ipi_callback = NULL;
379*4882a593Smuzhiyun uv_rtc_deallocate_timers();
380*4882a593Smuzhiyun goto error;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun printk(KERN_INFO "UV RTC clockevents registered\n");
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun return 0;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun error:
388*4882a593Smuzhiyun clocksource_unregister(&clocksource_uv);
389*4882a593Smuzhiyun printk(KERN_INFO "UV RTC clockevents failed rc %d\n", rc);
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun return rc;
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun arch_initcall(uv_rtc_setup_clock);
394