1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
4 * Copyright (C) 2005-2006 Thomas Gleixner
5 *
6 * This file contains driver APIs to the irq subsystem.
7 */
8
9 #define pr_fmt(fmt) "genirq: " fmt
10
11 #include <linux/irq.h>
12 #include <linux/kthread.h>
13 #include <linux/module.h>
14 #include <linux/random.h>
15 #include <linux/interrupt.h>
16 #include <linux/irqdomain.h>
17 #include <linux/slab.h>
18 #include <linux/sched.h>
19 #include <linux/sched/rt.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/isolation.h>
22 #include <uapi/linux/sched/types.h>
23 #include <linux/task_work.h>
24
25 #include "internals.h"
26
27 #if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT)
28 __read_mostly bool force_irqthreads;
29 EXPORT_SYMBOL_GPL(force_irqthreads);
30
setup_forced_irqthreads(char * arg)31 static int __init setup_forced_irqthreads(char *arg)
32 {
33 force_irqthreads = true;
34 return 0;
35 }
36 early_param("threadirqs", setup_forced_irqthreads);
37 #endif
38
__synchronize_hardirq(struct irq_desc * desc,bool sync_chip)39 static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
40 {
41 struct irq_data *irqd = irq_desc_get_irq_data(desc);
42 bool inprogress;
43
44 do {
45 unsigned long flags;
46
47 /*
48 * Wait until we're out of the critical section. This might
49 * give the wrong answer due to the lack of memory barriers.
50 */
51 while (irqd_irq_inprogress(&desc->irq_data))
52 cpu_relax();
53
54 /* Ok, that indicated we're done: double-check carefully. */
55 raw_spin_lock_irqsave(&desc->lock, flags);
56 inprogress = irqd_irq_inprogress(&desc->irq_data);
57
58 /*
59 * If requested and supported, check at the chip whether it
60 * is in flight at the hardware level, i.e. already pending
61 * in a CPU and waiting for service and acknowledge.
62 */
63 if (!inprogress && sync_chip) {
64 /*
65 * Ignore the return code. inprogress is only updated
66 * when the chip supports it.
67 */
68 __irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE,
69 &inprogress);
70 }
71 raw_spin_unlock_irqrestore(&desc->lock, flags);
72
73 /* Oops, that failed? */
74 } while (inprogress);
75 }
76
77 /**
78 * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
79 * @irq: interrupt number to wait for
80 *
81 * This function waits for any pending hard IRQ handlers for this
82 * interrupt to complete before returning. If you use this
83 * function while holding a resource the IRQ handler may need you
84 * will deadlock. It does not take associated threaded handlers
85 * into account.
86 *
87 * Do not use this for shutdown scenarios where you must be sure
88 * that all parts (hardirq and threaded handler) have completed.
89 *
90 * Returns: false if a threaded handler is active.
91 *
92 * This function may be called - with care - from IRQ context.
93 *
94 * It does not check whether there is an interrupt in flight at the
95 * hardware level, but not serviced yet, as this might deadlock when
96 * called with interrupts disabled and the target CPU of the interrupt
97 * is the current CPU.
98 */
synchronize_hardirq(unsigned int irq)99 bool synchronize_hardirq(unsigned int irq)
100 {
101 struct irq_desc *desc = irq_to_desc(irq);
102
103 if (desc) {
104 __synchronize_hardirq(desc, false);
105 return !atomic_read(&desc->threads_active);
106 }
107
108 return true;
109 }
110 EXPORT_SYMBOL(synchronize_hardirq);
111
112 /**
113 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
114 * @irq: interrupt number to wait for
115 *
116 * This function waits for any pending IRQ handlers for this interrupt
117 * to complete before returning. If you use this function while
118 * holding a resource the IRQ handler may need you will deadlock.
119 *
120 * Can only be called from preemptible code as it might sleep when
121 * an interrupt thread is associated to @irq.
122 *
123 * It optionally makes sure (when the irq chip supports that method)
124 * that the interrupt is not pending in any CPU and waiting for
125 * service.
126 */
synchronize_irq(unsigned int irq)127 void synchronize_irq(unsigned int irq)
128 {
129 struct irq_desc *desc = irq_to_desc(irq);
130
131 if (desc) {
132 __synchronize_hardirq(desc, true);
133 /*
134 * We made sure that no hardirq handler is
135 * running. Now verify that no threaded handlers are
136 * active.
137 */
138 wait_event(desc->wait_for_threads,
139 !atomic_read(&desc->threads_active));
140 }
141 }
142 EXPORT_SYMBOL(synchronize_irq);
143
144 #ifdef CONFIG_SMP
145 cpumask_var_t irq_default_affinity;
146
__irq_can_set_affinity(struct irq_desc * desc)147 static bool __irq_can_set_affinity(struct irq_desc *desc)
148 {
149 if (!desc || !irqd_can_balance(&desc->irq_data) ||
150 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
151 return false;
152 return true;
153 }
154
155 /**
156 * irq_can_set_affinity - Check if the affinity of a given irq can be set
157 * @irq: Interrupt to check
158 *
159 */
irq_can_set_affinity(unsigned int irq)160 int irq_can_set_affinity(unsigned int irq)
161 {
162 return __irq_can_set_affinity(irq_to_desc(irq));
163 }
164
165 /**
166 * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
167 * @irq: Interrupt to check
168 *
169 * Like irq_can_set_affinity() above, but additionally checks for the
170 * AFFINITY_MANAGED flag.
171 */
irq_can_set_affinity_usr(unsigned int irq)172 bool irq_can_set_affinity_usr(unsigned int irq)
173 {
174 struct irq_desc *desc = irq_to_desc(irq);
175
176 return __irq_can_set_affinity(desc) &&
177 !irqd_affinity_is_managed(&desc->irq_data);
178 }
179
180 /**
181 * irq_set_thread_affinity - Notify irq threads to adjust affinity
182 * @desc: irq descriptor which has affitnity changed
183 *
184 * We just set IRQTF_AFFINITY and delegate the affinity setting
185 * to the interrupt thread itself. We can not call
186 * set_cpus_allowed_ptr() here as we hold desc->lock and this
187 * code can be called from hard interrupt context.
188 */
irq_set_thread_affinity(struct irq_desc * desc)189 void irq_set_thread_affinity(struct irq_desc *desc)
190 {
191 struct irqaction *action;
192
193 for_each_action_of_desc(desc, action)
194 if (action->thread)
195 set_bit(IRQTF_AFFINITY, &action->thread_flags);
196 }
197
198 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
irq_validate_effective_affinity(struct irq_data * data)199 static void irq_validate_effective_affinity(struct irq_data *data)
200 {
201 const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
202 struct irq_chip *chip = irq_data_get_irq_chip(data);
203
204 if (!cpumask_empty(m))
205 return;
206 pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
207 chip->name, data->irq);
208 }
209
irq_init_effective_affinity(struct irq_data * data,const struct cpumask * mask)210 static inline void irq_init_effective_affinity(struct irq_data *data,
211 const struct cpumask *mask)
212 {
213 cpumask_copy(irq_data_get_effective_affinity_mask(data), mask);
214 }
215 #else
irq_validate_effective_affinity(struct irq_data * data)216 static inline void irq_validate_effective_affinity(struct irq_data *data) { }
irq_init_effective_affinity(struct irq_data * data,const struct cpumask * mask)217 static inline void irq_init_effective_affinity(struct irq_data *data,
218 const struct cpumask *mask) { }
219 #endif
220
irq_do_set_affinity(struct irq_data * data,const struct cpumask * mask,bool force)221 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
222 bool force)
223 {
224 struct irq_desc *desc = irq_data_to_desc(data);
225 struct irq_chip *chip = irq_data_get_irq_chip(data);
226 const struct cpumask *prog_mask;
227 int ret;
228
229 static DEFINE_RAW_SPINLOCK(tmp_mask_lock);
230 static struct cpumask tmp_mask;
231
232 if (!chip || !chip->irq_set_affinity)
233 return -EINVAL;
234
235 raw_spin_lock(&tmp_mask_lock);
236 /*
237 * If this is a managed interrupt and housekeeping is enabled on
238 * it check whether the requested affinity mask intersects with
239 * a housekeeping CPU. If so, then remove the isolated CPUs from
240 * the mask and just keep the housekeeping CPU(s). This prevents
241 * the affinity setter from routing the interrupt to an isolated
242 * CPU to avoid that I/O submitted from a housekeeping CPU causes
243 * interrupts on an isolated one.
244 *
245 * If the masks do not intersect or include online CPU(s) then
246 * keep the requested mask. The isolated target CPUs are only
247 * receiving interrupts when the I/O operation was submitted
248 * directly from them.
249 *
250 * If all housekeeping CPUs in the affinity mask are offline, the
251 * interrupt will be migrated by the CPU hotplug code once a
252 * housekeeping CPU which belongs to the affinity mask comes
253 * online.
254 */
255 if (irqd_affinity_is_managed(data) &&
256 housekeeping_enabled(HK_FLAG_MANAGED_IRQ)) {
257 const struct cpumask *hk_mask;
258
259 hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ);
260
261 cpumask_and(&tmp_mask, mask, hk_mask);
262 if (!cpumask_intersects(&tmp_mask, cpu_online_mask))
263 prog_mask = mask;
264 else
265 prog_mask = &tmp_mask;
266 } else {
267 prog_mask = mask;
268 }
269
270 /*
271 * Make sure we only provide online CPUs to the irqchip,
272 * unless we are being asked to force the affinity (in which
273 * case we do as we are told).
274 */
275 cpumask_and(&tmp_mask, prog_mask, cpu_online_mask);
276 if (!force && !cpumask_empty(&tmp_mask))
277 ret = chip->irq_set_affinity(data, &tmp_mask, force);
278 else if (force)
279 ret = chip->irq_set_affinity(data, mask, force);
280 else
281 ret = -EINVAL;
282
283 raw_spin_unlock(&tmp_mask_lock);
284
285 switch (ret) {
286 case IRQ_SET_MASK_OK:
287 case IRQ_SET_MASK_OK_DONE:
288 cpumask_copy(desc->irq_common_data.affinity, mask);
289 fallthrough;
290 case IRQ_SET_MASK_OK_NOCOPY:
291 irq_validate_effective_affinity(data);
292 irq_set_thread_affinity(desc);
293 ret = 0;
294 }
295
296 return ret;
297 }
298 EXPORT_SYMBOL_GPL(irq_do_set_affinity);
299
300 #ifdef CONFIG_GENERIC_PENDING_IRQ
irq_set_affinity_pending(struct irq_data * data,const struct cpumask * dest)301 static inline int irq_set_affinity_pending(struct irq_data *data,
302 const struct cpumask *dest)
303 {
304 struct irq_desc *desc = irq_data_to_desc(data);
305
306 irqd_set_move_pending(data);
307 irq_copy_pending(desc, dest);
308 return 0;
309 }
310 #else
irq_set_affinity_pending(struct irq_data * data,const struct cpumask * dest)311 static inline int irq_set_affinity_pending(struct irq_data *data,
312 const struct cpumask *dest)
313 {
314 return -EBUSY;
315 }
316 #endif
317
irq_try_set_affinity(struct irq_data * data,const struct cpumask * dest,bool force)318 static int irq_try_set_affinity(struct irq_data *data,
319 const struct cpumask *dest, bool force)
320 {
321 int ret = irq_do_set_affinity(data, dest, force);
322
323 /*
324 * In case that the underlying vector management is busy and the
325 * architecture supports the generic pending mechanism then utilize
326 * this to avoid returning an error to user space.
327 */
328 if (ret == -EBUSY && !force)
329 ret = irq_set_affinity_pending(data, dest);
330 return ret;
331 }
332
irq_set_affinity_deactivated(struct irq_data * data,const struct cpumask * mask,bool force)333 static bool irq_set_affinity_deactivated(struct irq_data *data,
334 const struct cpumask *mask, bool force)
335 {
336 struct irq_desc *desc = irq_data_to_desc(data);
337
338 /*
339 * Handle irq chips which can handle affinity only in activated
340 * state correctly
341 *
342 * If the interrupt is not yet activated, just store the affinity
343 * mask and do not call the chip driver at all. On activation the
344 * driver has to make sure anyway that the interrupt is in a
345 * useable state so startup works.
346 */
347 if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) ||
348 irqd_is_activated(data) || !irqd_affinity_on_activate(data))
349 return false;
350
351 cpumask_copy(desc->irq_common_data.affinity, mask);
352 irq_init_effective_affinity(data, mask);
353 irqd_set(data, IRQD_AFFINITY_SET);
354 return true;
355 }
356
irq_set_affinity_locked(struct irq_data * data,const struct cpumask * mask,bool force)357 int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
358 bool force)
359 {
360 struct irq_chip *chip = irq_data_get_irq_chip(data);
361 struct irq_desc *desc = irq_data_to_desc(data);
362 int ret = 0;
363
364 if (!chip || !chip->irq_set_affinity)
365 return -EINVAL;
366
367 if (irq_set_affinity_deactivated(data, mask, force))
368 return 0;
369
370 if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
371 ret = irq_try_set_affinity(data, mask, force);
372 } else {
373 irqd_set_move_pending(data);
374 irq_copy_pending(desc, mask);
375 }
376
377 if (desc->affinity_notify) {
378 kref_get(&desc->affinity_notify->kref);
379 if (!schedule_work(&desc->affinity_notify->work)) {
380 /* Work was already scheduled, drop our extra ref */
381 kref_put(&desc->affinity_notify->kref,
382 desc->affinity_notify->release);
383 }
384 }
385 irqd_set(data, IRQD_AFFINITY_SET);
386
387 return ret;
388 }
389
__irq_set_affinity(unsigned int irq,const struct cpumask * mask,bool force)390 int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
391 {
392 struct irq_desc *desc = irq_to_desc(irq);
393 unsigned long flags;
394 int ret;
395
396 if (!desc)
397 return -EINVAL;
398
399 raw_spin_lock_irqsave(&desc->lock, flags);
400 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
401 raw_spin_unlock_irqrestore(&desc->lock, flags);
402 return ret;
403 }
404
irq_set_affinity_hint(unsigned int irq,const struct cpumask * m)405 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
406 {
407 unsigned long flags;
408 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
409
410 if (!desc)
411 return -EINVAL;
412 desc->affinity_hint = m;
413 irq_put_desc_unlock(desc, flags);
414 /* set the initial affinity to prevent every interrupt being on CPU0 */
415 if (m)
416 __irq_set_affinity(irq, m, false);
417 return 0;
418 }
419 EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
420
irq_affinity_notify(struct work_struct * work)421 static void irq_affinity_notify(struct work_struct *work)
422 {
423 struct irq_affinity_notify *notify =
424 container_of(work, struct irq_affinity_notify, work);
425 struct irq_desc *desc = irq_to_desc(notify->irq);
426 cpumask_var_t cpumask;
427 unsigned long flags;
428
429 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
430 goto out;
431
432 raw_spin_lock_irqsave(&desc->lock, flags);
433 if (irq_move_pending(&desc->irq_data))
434 irq_get_pending(cpumask, desc);
435 else
436 cpumask_copy(cpumask, desc->irq_common_data.affinity);
437 raw_spin_unlock_irqrestore(&desc->lock, flags);
438
439 notify->notify(notify, cpumask);
440
441 free_cpumask_var(cpumask);
442 out:
443 kref_put(¬ify->kref, notify->release);
444 }
445
446 /**
447 * irq_set_affinity_notifier - control notification of IRQ affinity changes
448 * @irq: Interrupt for which to enable/disable notification
449 * @notify: Context for notification, or %NULL to disable
450 * notification. Function pointers must be initialised;
451 * the other fields will be initialised by this function.
452 *
453 * Must be called in process context. Notification may only be enabled
454 * after the IRQ is allocated and must be disabled before the IRQ is
455 * freed using free_irq().
456 */
457 int
irq_set_affinity_notifier(unsigned int irq,struct irq_affinity_notify * notify)458 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
459 {
460 struct irq_desc *desc = irq_to_desc(irq);
461 struct irq_affinity_notify *old_notify;
462 unsigned long flags;
463
464 /* The release function is promised process context */
465 might_sleep();
466
467 if (!desc || desc->istate & IRQS_NMI)
468 return -EINVAL;
469
470 /* Complete initialisation of *notify */
471 if (notify) {
472 notify->irq = irq;
473 kref_init(¬ify->kref);
474 INIT_WORK(¬ify->work, irq_affinity_notify);
475 }
476
477 raw_spin_lock_irqsave(&desc->lock, flags);
478 old_notify = desc->affinity_notify;
479 desc->affinity_notify = notify;
480 raw_spin_unlock_irqrestore(&desc->lock, flags);
481
482 if (old_notify) {
483 if (cancel_work_sync(&old_notify->work)) {
484 /* Pending work had a ref, put that one too */
485 kref_put(&old_notify->kref, old_notify->release);
486 }
487 kref_put(&old_notify->kref, old_notify->release);
488 }
489
490 return 0;
491 }
492 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
493
494 #ifndef CONFIG_AUTO_IRQ_AFFINITY
495 /*
496 * Generic version of the affinity autoselector.
497 */
irq_setup_affinity(struct irq_desc * desc)498 int irq_setup_affinity(struct irq_desc *desc)
499 {
500 struct cpumask *set = irq_default_affinity;
501 int ret, node = irq_desc_get_node(desc);
502 static DEFINE_RAW_SPINLOCK(mask_lock);
503 static struct cpumask mask;
504
505 /* Excludes PER_CPU and NO_BALANCE interrupts */
506 if (!__irq_can_set_affinity(desc))
507 return 0;
508
509 raw_spin_lock(&mask_lock);
510 /*
511 * Preserve the managed affinity setting and a userspace affinity
512 * setup, but make sure that one of the targets is online.
513 */
514 if (irqd_affinity_is_managed(&desc->irq_data) ||
515 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
516 if (cpumask_intersects(desc->irq_common_data.affinity,
517 cpu_online_mask))
518 set = desc->irq_common_data.affinity;
519 else
520 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
521 }
522
523 cpumask_and(&mask, cpu_online_mask, set);
524 if (cpumask_empty(&mask))
525 cpumask_copy(&mask, cpu_online_mask);
526
527 if (node != NUMA_NO_NODE) {
528 const struct cpumask *nodemask = cpumask_of_node(node);
529
530 /* make sure at least one of the cpus in nodemask is online */
531 if (cpumask_intersects(&mask, nodemask))
532 cpumask_and(&mask, &mask, nodemask);
533 }
534 ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
535 raw_spin_unlock(&mask_lock);
536 return ret;
537 }
538 #else
539 /* Wrapper for ALPHA specific affinity selector magic */
irq_setup_affinity(struct irq_desc * desc)540 int irq_setup_affinity(struct irq_desc *desc)
541 {
542 return irq_select_affinity(irq_desc_get_irq(desc));
543 }
544 #endif /* CONFIG_AUTO_IRQ_AFFINITY */
545 #endif /* CONFIG_SMP */
546
547
548 /**
549 * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
550 * @irq: interrupt number to set affinity
551 * @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
552 * specific data for percpu_devid interrupts
553 *
554 * This function uses the vCPU specific data to set the vCPU
555 * affinity for an irq. The vCPU specific data is passed from
556 * outside, such as KVM. One example code path is as below:
557 * KVM -> IOMMU -> irq_set_vcpu_affinity().
558 */
irq_set_vcpu_affinity(unsigned int irq,void * vcpu_info)559 int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
560 {
561 unsigned long flags;
562 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
563 struct irq_data *data;
564 struct irq_chip *chip;
565 int ret = -ENOSYS;
566
567 if (!desc)
568 return -EINVAL;
569
570 data = irq_desc_get_irq_data(desc);
571 do {
572 chip = irq_data_get_irq_chip(data);
573 if (chip && chip->irq_set_vcpu_affinity)
574 break;
575 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
576 data = data->parent_data;
577 #else
578 data = NULL;
579 #endif
580 } while (data);
581
582 if (data)
583 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
584 irq_put_desc_unlock(desc, flags);
585
586 return ret;
587 }
588 EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
589
__disable_irq(struct irq_desc * desc)590 void __disable_irq(struct irq_desc *desc)
591 {
592 if (!desc->depth++)
593 irq_disable(desc);
594 }
595
__disable_irq_nosync(unsigned int irq)596 static int __disable_irq_nosync(unsigned int irq)
597 {
598 unsigned long flags;
599 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
600
601 if (!desc)
602 return -EINVAL;
603 __disable_irq(desc);
604 irq_put_desc_busunlock(desc, flags);
605 return 0;
606 }
607
608 /**
609 * disable_irq_nosync - disable an irq without waiting
610 * @irq: Interrupt to disable
611 *
612 * Disable the selected interrupt line. Disables and Enables are
613 * nested.
614 * Unlike disable_irq(), this function does not ensure existing
615 * instances of the IRQ handler have completed before returning.
616 *
617 * This function may be called from IRQ context.
618 */
disable_irq_nosync(unsigned int irq)619 void disable_irq_nosync(unsigned int irq)
620 {
621 __disable_irq_nosync(irq);
622 }
623 EXPORT_SYMBOL(disable_irq_nosync);
624
625 /**
626 * disable_irq - disable an irq and wait for completion
627 * @irq: Interrupt to disable
628 *
629 * Disable the selected interrupt line. Enables and Disables are
630 * nested.
631 * This function waits for any pending IRQ handlers for this interrupt
632 * to complete before returning. If you use this function while
633 * holding a resource the IRQ handler may need you will deadlock.
634 *
635 * This function may be called - with care - from IRQ context.
636 */
disable_irq(unsigned int irq)637 void disable_irq(unsigned int irq)
638 {
639 if (!__disable_irq_nosync(irq))
640 synchronize_irq(irq);
641 }
642 EXPORT_SYMBOL(disable_irq);
643
644 /**
645 * disable_hardirq - disables an irq and waits for hardirq completion
646 * @irq: Interrupt to disable
647 *
648 * Disable the selected interrupt line. Enables and Disables are
649 * nested.
650 * This function waits for any pending hard IRQ handlers for this
651 * interrupt to complete before returning. If you use this function while
652 * holding a resource the hard IRQ handler may need you will deadlock.
653 *
654 * When used to optimistically disable an interrupt from atomic context
655 * the return value must be checked.
656 *
657 * Returns: false if a threaded handler is active.
658 *
659 * This function may be called - with care - from IRQ context.
660 */
disable_hardirq(unsigned int irq)661 bool disable_hardirq(unsigned int irq)
662 {
663 if (!__disable_irq_nosync(irq))
664 return synchronize_hardirq(irq);
665
666 return false;
667 }
668 EXPORT_SYMBOL_GPL(disable_hardirq);
669
670 /**
671 * disable_nmi_nosync - disable an nmi without waiting
672 * @irq: Interrupt to disable
673 *
674 * Disable the selected interrupt line. Disables and enables are
675 * nested.
676 * The interrupt to disable must have been requested through request_nmi.
677 * Unlike disable_nmi(), this function does not ensure existing
678 * instances of the IRQ handler have completed before returning.
679 */
disable_nmi_nosync(unsigned int irq)680 void disable_nmi_nosync(unsigned int irq)
681 {
682 disable_irq_nosync(irq);
683 }
684
__enable_irq(struct irq_desc * desc)685 void __enable_irq(struct irq_desc *desc)
686 {
687 switch (desc->depth) {
688 case 0:
689 err_out:
690 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
691 irq_desc_get_irq(desc));
692 break;
693 case 1: {
694 if (desc->istate & IRQS_SUSPENDED)
695 goto err_out;
696 /* Prevent probing on this irq: */
697 irq_settings_set_noprobe(desc);
698 /*
699 * Call irq_startup() not irq_enable() here because the
700 * interrupt might be marked NOAUTOEN. So irq_startup()
701 * needs to be invoked when it gets enabled the first
702 * time. If it was already started up, then irq_startup()
703 * will invoke irq_enable() under the hood.
704 */
705 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
706 break;
707 }
708 default:
709 desc->depth--;
710 }
711 }
712
713 /**
714 * enable_irq - enable handling of an irq
715 * @irq: Interrupt to enable
716 *
717 * Undoes the effect of one call to disable_irq(). If this
718 * matches the last disable, processing of interrupts on this
719 * IRQ line is re-enabled.
720 *
721 * This function may be called from IRQ context only when
722 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
723 */
enable_irq(unsigned int irq)724 void enable_irq(unsigned int irq)
725 {
726 unsigned long flags;
727 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
728
729 if (!desc)
730 return;
731 if (WARN(!desc->irq_data.chip,
732 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
733 goto out;
734
735 __enable_irq(desc);
736 out:
737 irq_put_desc_busunlock(desc, flags);
738 }
739 EXPORT_SYMBOL(enable_irq);
740
741 /**
742 * enable_nmi - enable handling of an nmi
743 * @irq: Interrupt to enable
744 *
745 * The interrupt to enable must have been requested through request_nmi.
746 * Undoes the effect of one call to disable_nmi(). If this
747 * matches the last disable, processing of interrupts on this
748 * IRQ line is re-enabled.
749 */
enable_nmi(unsigned int irq)750 void enable_nmi(unsigned int irq)
751 {
752 enable_irq(irq);
753 }
754
set_irq_wake_real(unsigned int irq,unsigned int on)755 static int set_irq_wake_real(unsigned int irq, unsigned int on)
756 {
757 struct irq_desc *desc = irq_to_desc(irq);
758 int ret = -ENXIO;
759
760 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
761 return 0;
762
763 if (desc->irq_data.chip->irq_set_wake)
764 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
765
766 return ret;
767 }
768
769 /**
770 * irq_set_irq_wake - control irq power management wakeup
771 * @irq: interrupt to control
772 * @on: enable/disable power management wakeup
773 *
774 * Enable/disable power management wakeup mode, which is
775 * disabled by default. Enables and disables must match,
776 * just as they match for non-wakeup mode support.
777 *
778 * Wakeup mode lets this IRQ wake the system from sleep
779 * states like "suspend to RAM".
780 *
781 * Note: irq enable/disable state is completely orthogonal
782 * to the enable/disable state of irq wake. An irq can be
783 * disabled with disable_irq() and still wake the system as
784 * long as the irq has wake enabled. If this does not hold,
785 * then the underlying irq chip and the related driver need
786 * to be investigated.
787 */
irq_set_irq_wake(unsigned int irq,unsigned int on)788 int irq_set_irq_wake(unsigned int irq, unsigned int on)
789 {
790 unsigned long flags;
791 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
792 int ret = 0;
793
794 if (!desc)
795 return -EINVAL;
796
797 /* Don't use NMIs as wake up interrupts please */
798 if (desc->istate & IRQS_NMI) {
799 ret = -EINVAL;
800 goto out_unlock;
801 }
802
803 /* wakeup-capable irqs can be shared between drivers that
804 * don't need to have the same sleep mode behaviors.
805 */
806 if (on) {
807 if (desc->wake_depth++ == 0) {
808 ret = set_irq_wake_real(irq, on);
809 if (ret)
810 desc->wake_depth = 0;
811 else
812 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
813 }
814 } else {
815 if (desc->wake_depth == 0) {
816 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
817 } else if (--desc->wake_depth == 0) {
818 ret = set_irq_wake_real(irq, on);
819 if (ret)
820 desc->wake_depth = 1;
821 else
822 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
823 }
824 }
825
826 out_unlock:
827 irq_put_desc_busunlock(desc, flags);
828 return ret;
829 }
830 EXPORT_SYMBOL(irq_set_irq_wake);
831
832 /*
833 * Internal function that tells the architecture code whether a
834 * particular irq has been exclusively allocated or is available
835 * for driver use.
836 */
can_request_irq(unsigned int irq,unsigned long irqflags)837 int can_request_irq(unsigned int irq, unsigned long irqflags)
838 {
839 unsigned long flags;
840 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
841 int canrequest = 0;
842
843 if (!desc)
844 return 0;
845
846 if (irq_settings_can_request(desc)) {
847 if (!desc->action ||
848 irqflags & desc->action->flags & IRQF_SHARED)
849 canrequest = 1;
850 }
851 irq_put_desc_unlock(desc, flags);
852 return canrequest;
853 }
854
__irq_set_trigger(struct irq_desc * desc,unsigned long flags)855 int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
856 {
857 struct irq_chip *chip = desc->irq_data.chip;
858 int ret, unmask = 0;
859
860 if (!chip || !chip->irq_set_type) {
861 /*
862 * IRQF_TRIGGER_* but the PIC does not support multiple
863 * flow-types?
864 */
865 pr_debug("No set_type function for IRQ %d (%s)\n",
866 irq_desc_get_irq(desc),
867 chip ? (chip->name ? : "unknown") : "unknown");
868 return 0;
869 }
870
871 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
872 if (!irqd_irq_masked(&desc->irq_data))
873 mask_irq(desc);
874 if (!irqd_irq_disabled(&desc->irq_data))
875 unmask = 1;
876 }
877
878 /* Mask all flags except trigger mode */
879 flags &= IRQ_TYPE_SENSE_MASK;
880 ret = chip->irq_set_type(&desc->irq_data, flags);
881
882 switch (ret) {
883 case IRQ_SET_MASK_OK:
884 case IRQ_SET_MASK_OK_DONE:
885 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
886 irqd_set(&desc->irq_data, flags);
887 fallthrough;
888
889 case IRQ_SET_MASK_OK_NOCOPY:
890 flags = irqd_get_trigger_type(&desc->irq_data);
891 irq_settings_set_trigger_mask(desc, flags);
892 irqd_clear(&desc->irq_data, IRQD_LEVEL);
893 irq_settings_clr_level(desc);
894 if (flags & IRQ_TYPE_LEVEL_MASK) {
895 irq_settings_set_level(desc);
896 irqd_set(&desc->irq_data, IRQD_LEVEL);
897 }
898
899 ret = 0;
900 break;
901 default:
902 pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n",
903 flags, irq_desc_get_irq(desc), chip->irq_set_type);
904 }
905 if (unmask)
906 unmask_irq(desc);
907 return ret;
908 }
909
910 #ifdef CONFIG_HARDIRQS_SW_RESEND
irq_set_parent(int irq,int parent_irq)911 int irq_set_parent(int irq, int parent_irq)
912 {
913 unsigned long flags;
914 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
915
916 if (!desc)
917 return -EINVAL;
918
919 desc->parent_irq = parent_irq;
920
921 irq_put_desc_unlock(desc, flags);
922 return 0;
923 }
924 EXPORT_SYMBOL_GPL(irq_set_parent);
925 #endif
926
927 /*
928 * Default primary interrupt handler for threaded interrupts. Is
929 * assigned as primary handler when request_threaded_irq is called
930 * with handler == NULL. Useful for oneshot interrupts.
931 */
irq_default_primary_handler(int irq,void * dev_id)932 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
933 {
934 return IRQ_WAKE_THREAD;
935 }
936
937 /*
938 * Primary handler for nested threaded interrupts. Should never be
939 * called.
940 */
irq_nested_primary_handler(int irq,void * dev_id)941 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
942 {
943 WARN(1, "Primary handler called for nested irq %d\n", irq);
944 return IRQ_NONE;
945 }
946
irq_forced_secondary_handler(int irq,void * dev_id)947 static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
948 {
949 WARN(1, "Secondary action handler called for irq %d\n", irq);
950 return IRQ_NONE;
951 }
952
irq_wait_for_interrupt(struct irqaction * action)953 static int irq_wait_for_interrupt(struct irqaction *action)
954 {
955 for (;;) {
956 set_current_state(TASK_INTERRUPTIBLE);
957
958 if (kthread_should_stop()) {
959 /* may need to run one last time */
960 if (test_and_clear_bit(IRQTF_RUNTHREAD,
961 &action->thread_flags)) {
962 __set_current_state(TASK_RUNNING);
963 return 0;
964 }
965 __set_current_state(TASK_RUNNING);
966 return -1;
967 }
968
969 if (test_and_clear_bit(IRQTF_RUNTHREAD,
970 &action->thread_flags)) {
971 __set_current_state(TASK_RUNNING);
972 return 0;
973 }
974 schedule();
975 }
976 }
977
978 /*
979 * Oneshot interrupts keep the irq line masked until the threaded
980 * handler finished. unmask if the interrupt has not been disabled and
981 * is marked MASKED.
982 */
irq_finalize_oneshot(struct irq_desc * desc,struct irqaction * action)983 static void irq_finalize_oneshot(struct irq_desc *desc,
984 struct irqaction *action)
985 {
986 if (!(desc->istate & IRQS_ONESHOT) ||
987 action->handler == irq_forced_secondary_handler)
988 return;
989 again:
990 chip_bus_lock(desc);
991 raw_spin_lock_irq(&desc->lock);
992
993 /*
994 * Implausible though it may be we need to protect us against
995 * the following scenario:
996 *
997 * The thread is faster done than the hard interrupt handler
998 * on the other CPU. If we unmask the irq line then the
999 * interrupt can come in again and masks the line, leaves due
1000 * to IRQS_INPROGRESS and the irq line is masked forever.
1001 *
1002 * This also serializes the state of shared oneshot handlers
1003 * versus "desc->threads_onehsot |= action->thread_mask;" in
1004 * irq_wake_thread(). See the comment there which explains the
1005 * serialization.
1006 */
1007 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
1008 raw_spin_unlock_irq(&desc->lock);
1009 chip_bus_sync_unlock(desc);
1010 cpu_relax();
1011 goto again;
1012 }
1013
1014 /*
1015 * Now check again, whether the thread should run. Otherwise
1016 * we would clear the threads_oneshot bit of this thread which
1017 * was just set.
1018 */
1019 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1020 goto out_unlock;
1021
1022 desc->threads_oneshot &= ~action->thread_mask;
1023
1024 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
1025 irqd_irq_masked(&desc->irq_data))
1026 unmask_threaded_irq(desc);
1027
1028 out_unlock:
1029 raw_spin_unlock_irq(&desc->lock);
1030 chip_bus_sync_unlock(desc);
1031 }
1032
1033 #ifdef CONFIG_SMP
1034 /*
1035 * Check whether we need to change the affinity of the interrupt thread.
1036 */
1037 static void
irq_thread_check_affinity(struct irq_desc * desc,struct irqaction * action)1038 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
1039 {
1040 cpumask_var_t mask;
1041 bool valid = true;
1042
1043 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
1044 return;
1045
1046 /*
1047 * In case we are out of memory we set IRQTF_AFFINITY again and
1048 * try again next time
1049 */
1050 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1051 set_bit(IRQTF_AFFINITY, &action->thread_flags);
1052 return;
1053 }
1054
1055 raw_spin_lock_irq(&desc->lock);
1056 /*
1057 * This code is triggered unconditionally. Check the affinity
1058 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
1059 */
1060 if (cpumask_available(desc->irq_common_data.affinity)) {
1061 const struct cpumask *m;
1062
1063 m = irq_data_get_effective_affinity_mask(&desc->irq_data);
1064 cpumask_copy(mask, m);
1065 } else {
1066 valid = false;
1067 }
1068 raw_spin_unlock_irq(&desc->lock);
1069
1070 if (valid)
1071 set_cpus_allowed_ptr(current, mask);
1072 free_cpumask_var(mask);
1073 }
1074 #else
1075 static inline void
irq_thread_check_affinity(struct irq_desc * desc,struct irqaction * action)1076 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
1077 #endif
1078
1079 /*
1080 * Interrupts which are not explicitly requested as threaded
1081 * interrupts rely on the implicit bh/preempt disable of the hard irq
1082 * context. So we need to disable bh here to avoid deadlocks and other
1083 * side effects.
1084 */
1085 static irqreturn_t
irq_forced_thread_fn(struct irq_desc * desc,struct irqaction * action)1086 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
1087 {
1088 irqreturn_t ret;
1089
1090 local_bh_disable();
1091 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
1092 local_irq_disable();
1093 ret = action->thread_fn(action->irq, action->dev_id);
1094 if (ret == IRQ_HANDLED)
1095 atomic_inc(&desc->threads_handled);
1096
1097 irq_finalize_oneshot(desc, action);
1098 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
1099 local_irq_enable();
1100 local_bh_enable();
1101 return ret;
1102 }
1103
1104 /*
1105 * Interrupts explicitly requested as threaded interrupts want to be
1106 * preemtible - many of them need to sleep and wait for slow busses to
1107 * complete.
1108 */
irq_thread_fn(struct irq_desc * desc,struct irqaction * action)1109 static irqreturn_t irq_thread_fn(struct irq_desc *desc,
1110 struct irqaction *action)
1111 {
1112 irqreturn_t ret;
1113
1114 ret = action->thread_fn(action->irq, action->dev_id);
1115 if (ret == IRQ_HANDLED)
1116 atomic_inc(&desc->threads_handled);
1117
1118 irq_finalize_oneshot(desc, action);
1119 return ret;
1120 }
1121
wake_threads_waitq(struct irq_desc * desc)1122 static void wake_threads_waitq(struct irq_desc *desc)
1123 {
1124 if (atomic_dec_and_test(&desc->threads_active))
1125 wake_up(&desc->wait_for_threads);
1126 }
1127
irq_thread_dtor(struct callback_head * unused)1128 static void irq_thread_dtor(struct callback_head *unused)
1129 {
1130 struct task_struct *tsk = current;
1131 struct irq_desc *desc;
1132 struct irqaction *action;
1133
1134 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
1135 return;
1136
1137 action = kthread_data(tsk);
1138
1139 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
1140 tsk->comm, tsk->pid, action->irq);
1141
1142
1143 desc = irq_to_desc(action->irq);
1144 /*
1145 * If IRQTF_RUNTHREAD is set, we need to decrement
1146 * desc->threads_active and wake possible waiters.
1147 */
1148 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1149 wake_threads_waitq(desc);
1150
1151 /* Prevent a stale desc->threads_oneshot */
1152 irq_finalize_oneshot(desc, action);
1153 }
1154
irq_wake_secondary(struct irq_desc * desc,struct irqaction * action)1155 static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
1156 {
1157 struct irqaction *secondary = action->secondary;
1158
1159 if (WARN_ON_ONCE(!secondary))
1160 return;
1161
1162 raw_spin_lock_irq(&desc->lock);
1163 __irq_wake_thread(desc, secondary);
1164 raw_spin_unlock_irq(&desc->lock);
1165 }
1166
1167 /*
1168 * Internal function to notify that a interrupt thread is ready.
1169 */
irq_thread_set_ready(struct irq_desc * desc,struct irqaction * action)1170 static void irq_thread_set_ready(struct irq_desc *desc,
1171 struct irqaction *action)
1172 {
1173 set_bit(IRQTF_READY, &action->thread_flags);
1174 wake_up(&desc->wait_for_threads);
1175 }
1176
1177 /*
1178 * Internal function to wake up a interrupt thread and wait until it is
1179 * ready.
1180 */
wake_up_and_wait_for_irq_thread_ready(struct irq_desc * desc,struct irqaction * action)1181 static void wake_up_and_wait_for_irq_thread_ready(struct irq_desc *desc,
1182 struct irqaction *action)
1183 {
1184 if (!action || !action->thread)
1185 return;
1186
1187 wake_up_process(action->thread);
1188 wait_event(desc->wait_for_threads,
1189 test_bit(IRQTF_READY, &action->thread_flags));
1190 }
1191
1192 /*
1193 * Interrupt handler thread
1194 */
irq_thread(void * data)1195 static int irq_thread(void *data)
1196 {
1197 struct callback_head on_exit_work;
1198 struct irqaction *action = data;
1199 struct irq_desc *desc = irq_to_desc(action->irq);
1200 irqreturn_t (*handler_fn)(struct irq_desc *desc,
1201 struct irqaction *action);
1202
1203 irq_thread_set_ready(desc, action);
1204
1205 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
1206 &action->thread_flags))
1207 handler_fn = irq_forced_thread_fn;
1208 else
1209 handler_fn = irq_thread_fn;
1210
1211 init_task_work(&on_exit_work, irq_thread_dtor);
1212 task_work_add(current, &on_exit_work, TWA_NONE);
1213
1214 irq_thread_check_affinity(desc, action);
1215
1216 while (!irq_wait_for_interrupt(action)) {
1217 irqreturn_t action_ret;
1218
1219 irq_thread_check_affinity(desc, action);
1220
1221 action_ret = handler_fn(desc, action);
1222 if (action_ret == IRQ_WAKE_THREAD)
1223 irq_wake_secondary(desc, action);
1224
1225 wake_threads_waitq(desc);
1226 }
1227
1228 /*
1229 * This is the regular exit path. __free_irq() is stopping the
1230 * thread via kthread_stop() after calling
1231 * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
1232 * oneshot mask bit can be set.
1233 */
1234 task_work_cancel(current, irq_thread_dtor);
1235 return 0;
1236 }
1237
1238 /**
1239 * irq_wake_thread - wake the irq thread for the action identified by dev_id
1240 * @irq: Interrupt line
1241 * @dev_id: Device identity for which the thread should be woken
1242 *
1243 */
irq_wake_thread(unsigned int irq,void * dev_id)1244 void irq_wake_thread(unsigned int irq, void *dev_id)
1245 {
1246 struct irq_desc *desc = irq_to_desc(irq);
1247 struct irqaction *action;
1248 unsigned long flags;
1249
1250 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1251 return;
1252
1253 raw_spin_lock_irqsave(&desc->lock, flags);
1254 for_each_action_of_desc(desc, action) {
1255 if (action->dev_id == dev_id) {
1256 if (action->thread)
1257 __irq_wake_thread(desc, action);
1258 break;
1259 }
1260 }
1261 raw_spin_unlock_irqrestore(&desc->lock, flags);
1262 }
1263 EXPORT_SYMBOL_GPL(irq_wake_thread);
1264
irq_setup_forced_threading(struct irqaction * new)1265 static int irq_setup_forced_threading(struct irqaction *new)
1266 {
1267 if (!force_irqthreads)
1268 return 0;
1269 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1270 return 0;
1271
1272 /*
1273 * No further action required for interrupts which are requested as
1274 * threaded interrupts already
1275 */
1276 if (new->handler == irq_default_primary_handler)
1277 return 0;
1278
1279 new->flags |= IRQF_ONESHOT;
1280
1281 /*
1282 * Handle the case where we have a real primary handler and a
1283 * thread handler. We force thread them as well by creating a
1284 * secondary action.
1285 */
1286 if (new->handler && new->thread_fn) {
1287 /* Allocate the secondary action */
1288 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1289 if (!new->secondary)
1290 return -ENOMEM;
1291 new->secondary->handler = irq_forced_secondary_handler;
1292 new->secondary->thread_fn = new->thread_fn;
1293 new->secondary->dev_id = new->dev_id;
1294 new->secondary->irq = new->irq;
1295 new->secondary->name = new->name;
1296 }
1297 /* Deal with the primary handler */
1298 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1299 new->thread_fn = new->handler;
1300 new->handler = irq_default_primary_handler;
1301 return 0;
1302 }
1303
irq_request_resources(struct irq_desc * desc)1304 static int irq_request_resources(struct irq_desc *desc)
1305 {
1306 struct irq_data *d = &desc->irq_data;
1307 struct irq_chip *c = d->chip;
1308
1309 return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1310 }
1311
irq_release_resources(struct irq_desc * desc)1312 static void irq_release_resources(struct irq_desc *desc)
1313 {
1314 struct irq_data *d = &desc->irq_data;
1315 struct irq_chip *c = d->chip;
1316
1317 if (c->irq_release_resources)
1318 c->irq_release_resources(d);
1319 }
1320
irq_supports_nmi(struct irq_desc * desc)1321 static bool irq_supports_nmi(struct irq_desc *desc)
1322 {
1323 struct irq_data *d = irq_desc_get_irq_data(desc);
1324
1325 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1326 /* Only IRQs directly managed by the root irqchip can be set as NMI */
1327 if (d->parent_data)
1328 return false;
1329 #endif
1330 /* Don't support NMIs for chips behind a slow bus */
1331 if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock)
1332 return false;
1333
1334 return d->chip->flags & IRQCHIP_SUPPORTS_NMI;
1335 }
1336
irq_nmi_setup(struct irq_desc * desc)1337 static int irq_nmi_setup(struct irq_desc *desc)
1338 {
1339 struct irq_data *d = irq_desc_get_irq_data(desc);
1340 struct irq_chip *c = d->chip;
1341
1342 return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL;
1343 }
1344
irq_nmi_teardown(struct irq_desc * desc)1345 static void irq_nmi_teardown(struct irq_desc *desc)
1346 {
1347 struct irq_data *d = irq_desc_get_irq_data(desc);
1348 struct irq_chip *c = d->chip;
1349
1350 if (c->irq_nmi_teardown)
1351 c->irq_nmi_teardown(d);
1352 }
1353
1354 static int
setup_irq_thread(struct irqaction * new,unsigned int irq,bool secondary)1355 setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1356 {
1357 struct task_struct *t;
1358
1359 if (!secondary) {
1360 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1361 new->name);
1362 } else {
1363 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1364 new->name);
1365 }
1366
1367 if (IS_ERR(t))
1368 return PTR_ERR(t);
1369
1370 sched_set_fifo(t);
1371
1372 /*
1373 * We keep the reference to the task struct even if
1374 * the thread dies to avoid that the interrupt code
1375 * references an already freed task_struct.
1376 */
1377 new->thread = get_task_struct(t);
1378 /*
1379 * Tell the thread to set its affinity. This is
1380 * important for shared interrupt handlers as we do
1381 * not invoke setup_affinity() for the secondary
1382 * handlers as everything is already set up. Even for
1383 * interrupts marked with IRQF_NO_BALANCE this is
1384 * correct as we want the thread to move to the cpu(s)
1385 * on which the requesting code placed the interrupt.
1386 */
1387 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1388 return 0;
1389 }
1390
1391 /*
1392 * Internal function to register an irqaction - typically used to
1393 * allocate special interrupts that are part of the architecture.
1394 *
1395 * Locking rules:
1396 *
1397 * desc->request_mutex Provides serialization against a concurrent free_irq()
1398 * chip_bus_lock Provides serialization for slow bus operations
1399 * desc->lock Provides serialization against hard interrupts
1400 *
1401 * chip_bus_lock and desc->lock are sufficient for all other management and
1402 * interrupt related functions. desc->request_mutex solely serializes
1403 * request/free_irq().
1404 */
1405 static int
__setup_irq(unsigned int irq,struct irq_desc * desc,struct irqaction * new)1406 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1407 {
1408 struct irqaction *old, **old_ptr;
1409 unsigned long flags, thread_mask = 0;
1410 int ret, nested, shared = 0;
1411
1412 if (!desc)
1413 return -EINVAL;
1414
1415 if (desc->irq_data.chip == &no_irq_chip)
1416 return -ENOSYS;
1417 if (!try_module_get(desc->owner))
1418 return -ENODEV;
1419
1420 new->irq = irq;
1421
1422 /*
1423 * If the trigger type is not specified by the caller,
1424 * then use the default for this interrupt.
1425 */
1426 if (!(new->flags & IRQF_TRIGGER_MASK))
1427 new->flags |= irqd_get_trigger_type(&desc->irq_data);
1428
1429 /*
1430 * Check whether the interrupt nests into another interrupt
1431 * thread.
1432 */
1433 nested = irq_settings_is_nested_thread(desc);
1434 if (nested) {
1435 if (!new->thread_fn) {
1436 ret = -EINVAL;
1437 goto out_mput;
1438 }
1439 /*
1440 * Replace the primary handler which was provided from
1441 * the driver for non nested interrupt handling by the
1442 * dummy function which warns when called.
1443 */
1444 new->handler = irq_nested_primary_handler;
1445 } else {
1446 if (irq_settings_can_thread(desc)) {
1447 ret = irq_setup_forced_threading(new);
1448 if (ret)
1449 goto out_mput;
1450 }
1451 }
1452
1453 /*
1454 * Create a handler thread when a thread function is supplied
1455 * and the interrupt does not nest into another interrupt
1456 * thread.
1457 */
1458 if (new->thread_fn && !nested) {
1459 ret = setup_irq_thread(new, irq, false);
1460 if (ret)
1461 goto out_mput;
1462 if (new->secondary) {
1463 ret = setup_irq_thread(new->secondary, irq, true);
1464 if (ret)
1465 goto out_thread;
1466 }
1467 }
1468
1469 /*
1470 * Drivers are often written to work w/o knowledge about the
1471 * underlying irq chip implementation, so a request for a
1472 * threaded irq without a primary hard irq context handler
1473 * requires the ONESHOT flag to be set. Some irq chips like
1474 * MSI based interrupts are per se one shot safe. Check the
1475 * chip flags, so we can avoid the unmask dance at the end of
1476 * the threaded handler for those.
1477 */
1478 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1479 new->flags &= ~IRQF_ONESHOT;
1480
1481 /*
1482 * Protects against a concurrent __free_irq() call which might wait
1483 * for synchronize_hardirq() to complete without holding the optional
1484 * chip bus lock and desc->lock. Also protects against handing out
1485 * a recycled oneshot thread_mask bit while it's still in use by
1486 * its previous owner.
1487 */
1488 mutex_lock(&desc->request_mutex);
1489
1490 /*
1491 * Acquire bus lock as the irq_request_resources() callback below
1492 * might rely on the serialization or the magic power management
1493 * functions which are abusing the irq_bus_lock() callback,
1494 */
1495 chip_bus_lock(desc);
1496
1497 /* First installed action requests resources. */
1498 if (!desc->action) {
1499 ret = irq_request_resources(desc);
1500 if (ret) {
1501 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1502 new->name, irq, desc->irq_data.chip->name);
1503 goto out_bus_unlock;
1504 }
1505 }
1506
1507 /*
1508 * The following block of code has to be executed atomically
1509 * protected against a concurrent interrupt and any of the other
1510 * management calls which are not serialized via
1511 * desc->request_mutex or the optional bus lock.
1512 */
1513 raw_spin_lock_irqsave(&desc->lock, flags);
1514 old_ptr = &desc->action;
1515 old = *old_ptr;
1516 if (old) {
1517 /*
1518 * Can't share interrupts unless both agree to and are
1519 * the same type (level, edge, polarity). So both flag
1520 * fields must have IRQF_SHARED set and the bits which
1521 * set the trigger type must match. Also all must
1522 * agree on ONESHOT.
1523 * Interrupt lines used for NMIs cannot be shared.
1524 */
1525 unsigned int oldtype;
1526
1527 if (desc->istate & IRQS_NMI) {
1528 pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
1529 new->name, irq, desc->irq_data.chip->name);
1530 ret = -EINVAL;
1531 goto out_unlock;
1532 }
1533
1534 /*
1535 * If nobody did set the configuration before, inherit
1536 * the one provided by the requester.
1537 */
1538 if (irqd_trigger_type_was_set(&desc->irq_data)) {
1539 oldtype = irqd_get_trigger_type(&desc->irq_data);
1540 } else {
1541 oldtype = new->flags & IRQF_TRIGGER_MASK;
1542 irqd_set_trigger_type(&desc->irq_data, oldtype);
1543 }
1544
1545 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1546 (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1547 ((old->flags ^ new->flags) & IRQF_ONESHOT))
1548 goto mismatch;
1549
1550 /* All handlers must agree on per-cpuness */
1551 if ((old->flags & IRQF_PERCPU) !=
1552 (new->flags & IRQF_PERCPU))
1553 goto mismatch;
1554
1555 /* add new interrupt at end of irq queue */
1556 do {
1557 /*
1558 * Or all existing action->thread_mask bits,
1559 * so we can find the next zero bit for this
1560 * new action.
1561 */
1562 thread_mask |= old->thread_mask;
1563 old_ptr = &old->next;
1564 old = *old_ptr;
1565 } while (old);
1566 shared = 1;
1567 }
1568
1569 /*
1570 * Setup the thread mask for this irqaction for ONESHOT. For
1571 * !ONESHOT irqs the thread mask is 0 so we can avoid a
1572 * conditional in irq_wake_thread().
1573 */
1574 if (new->flags & IRQF_ONESHOT) {
1575 /*
1576 * Unlikely to have 32 resp 64 irqs sharing one line,
1577 * but who knows.
1578 */
1579 if (thread_mask == ~0UL) {
1580 ret = -EBUSY;
1581 goto out_unlock;
1582 }
1583 /*
1584 * The thread_mask for the action is or'ed to
1585 * desc->thread_active to indicate that the
1586 * IRQF_ONESHOT thread handler has been woken, but not
1587 * yet finished. The bit is cleared when a thread
1588 * completes. When all threads of a shared interrupt
1589 * line have completed desc->threads_active becomes
1590 * zero and the interrupt line is unmasked. See
1591 * handle.c:irq_wake_thread() for further information.
1592 *
1593 * If no thread is woken by primary (hard irq context)
1594 * interrupt handlers, then desc->threads_active is
1595 * also checked for zero to unmask the irq line in the
1596 * affected hard irq flow handlers
1597 * (handle_[fasteoi|level]_irq).
1598 *
1599 * The new action gets the first zero bit of
1600 * thread_mask assigned. See the loop above which or's
1601 * all existing action->thread_mask bits.
1602 */
1603 new->thread_mask = 1UL << ffz(thread_mask);
1604
1605 } else if (new->handler == irq_default_primary_handler &&
1606 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1607 /*
1608 * The interrupt was requested with handler = NULL, so
1609 * we use the default primary handler for it. But it
1610 * does not have the oneshot flag set. In combination
1611 * with level interrupts this is deadly, because the
1612 * default primary handler just wakes the thread, then
1613 * the irq lines is reenabled, but the device still
1614 * has the level irq asserted. Rinse and repeat....
1615 *
1616 * While this works for edge type interrupts, we play
1617 * it safe and reject unconditionally because we can't
1618 * say for sure which type this interrupt really
1619 * has. The type flags are unreliable as the
1620 * underlying chip implementation can override them.
1621 */
1622 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d)\n",
1623 new->name, irq);
1624 ret = -EINVAL;
1625 goto out_unlock;
1626 }
1627
1628 if (!shared) {
1629 /* Setup the type (level, edge polarity) if configured: */
1630 if (new->flags & IRQF_TRIGGER_MASK) {
1631 ret = __irq_set_trigger(desc,
1632 new->flags & IRQF_TRIGGER_MASK);
1633
1634 if (ret)
1635 goto out_unlock;
1636 }
1637
1638 /*
1639 * Activate the interrupt. That activation must happen
1640 * independently of IRQ_NOAUTOEN. request_irq() can fail
1641 * and the callers are supposed to handle
1642 * that. enable_irq() of an interrupt requested with
1643 * IRQ_NOAUTOEN is not supposed to fail. The activation
1644 * keeps it in shutdown mode, it merily associates
1645 * resources if necessary and if that's not possible it
1646 * fails. Interrupts which are in managed shutdown mode
1647 * will simply ignore that activation request.
1648 */
1649 ret = irq_activate(desc);
1650 if (ret)
1651 goto out_unlock;
1652
1653 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1654 IRQS_ONESHOT | IRQS_WAITING);
1655 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1656
1657 if (new->flags & IRQF_PERCPU) {
1658 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1659 irq_settings_set_per_cpu(desc);
1660 }
1661
1662 if (new->flags & IRQF_ONESHOT)
1663 desc->istate |= IRQS_ONESHOT;
1664
1665 /* Exclude IRQ from balancing if requested */
1666 if (new->flags & IRQF_NOBALANCING) {
1667 irq_settings_set_no_balancing(desc);
1668 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1669 }
1670
1671 if (irq_settings_can_autoenable(desc)) {
1672 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
1673 } else {
1674 /*
1675 * Shared interrupts do not go well with disabling
1676 * auto enable. The sharing interrupt might request
1677 * it while it's still disabled and then wait for
1678 * interrupts forever.
1679 */
1680 WARN_ON_ONCE(new->flags & IRQF_SHARED);
1681 /* Undo nested disables: */
1682 desc->depth = 1;
1683 }
1684
1685 } else if (new->flags & IRQF_TRIGGER_MASK) {
1686 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1687 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1688
1689 if (nmsk != omsk)
1690 /* hope the handler works with current trigger mode */
1691 pr_warn("irq %d uses trigger mode %u; requested %u\n",
1692 irq, omsk, nmsk);
1693 }
1694
1695 *old_ptr = new;
1696
1697 irq_pm_install_action(desc, new);
1698
1699 /* Reset broken irq detection when installing new handler */
1700 desc->irq_count = 0;
1701 desc->irqs_unhandled = 0;
1702
1703 /*
1704 * Check whether we disabled the irq via the spurious handler
1705 * before. Reenable it and give it another chance.
1706 */
1707 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1708 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1709 __enable_irq(desc);
1710 }
1711
1712 raw_spin_unlock_irqrestore(&desc->lock, flags);
1713 chip_bus_sync_unlock(desc);
1714 mutex_unlock(&desc->request_mutex);
1715
1716 irq_setup_timings(desc, new);
1717
1718 wake_up_and_wait_for_irq_thread_ready(desc, new);
1719 wake_up_and_wait_for_irq_thread_ready(desc, new->secondary);
1720
1721 register_irq_proc(irq, desc);
1722 new->dir = NULL;
1723 register_handler_proc(irq, new);
1724 return 0;
1725
1726 mismatch:
1727 if (!(new->flags & IRQF_PROBE_SHARED)) {
1728 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1729 irq, new->flags, new->name, old->flags, old->name);
1730 #ifdef CONFIG_DEBUG_SHIRQ
1731 dump_stack();
1732 #endif
1733 }
1734 ret = -EBUSY;
1735
1736 out_unlock:
1737 raw_spin_unlock_irqrestore(&desc->lock, flags);
1738
1739 if (!desc->action)
1740 irq_release_resources(desc);
1741 out_bus_unlock:
1742 chip_bus_sync_unlock(desc);
1743 mutex_unlock(&desc->request_mutex);
1744
1745 out_thread:
1746 if (new->thread) {
1747 struct task_struct *t = new->thread;
1748
1749 new->thread = NULL;
1750 kthread_stop(t);
1751 put_task_struct(t);
1752 }
1753 if (new->secondary && new->secondary->thread) {
1754 struct task_struct *t = new->secondary->thread;
1755
1756 new->secondary->thread = NULL;
1757 kthread_stop(t);
1758 put_task_struct(t);
1759 }
1760 out_mput:
1761 module_put(desc->owner);
1762 return ret;
1763 }
1764
1765 /*
1766 * Internal function to unregister an irqaction - used to free
1767 * regular and special interrupts that are part of the architecture.
1768 */
__free_irq(struct irq_desc * desc,void * dev_id)1769 static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1770 {
1771 unsigned irq = desc->irq_data.irq;
1772 struct irqaction *action, **action_ptr;
1773 unsigned long flags;
1774
1775 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1776
1777 mutex_lock(&desc->request_mutex);
1778 chip_bus_lock(desc);
1779 raw_spin_lock_irqsave(&desc->lock, flags);
1780
1781 /*
1782 * There can be multiple actions per IRQ descriptor, find the right
1783 * one based on the dev_id:
1784 */
1785 action_ptr = &desc->action;
1786 for (;;) {
1787 action = *action_ptr;
1788
1789 if (!action) {
1790 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1791 raw_spin_unlock_irqrestore(&desc->lock, flags);
1792 chip_bus_sync_unlock(desc);
1793 mutex_unlock(&desc->request_mutex);
1794 return NULL;
1795 }
1796
1797 if (action->dev_id == dev_id)
1798 break;
1799 action_ptr = &action->next;
1800 }
1801
1802 /* Found it - now remove it from the list of entries: */
1803 *action_ptr = action->next;
1804
1805 irq_pm_remove_action(desc, action);
1806
1807 /* If this was the last handler, shut down the IRQ line: */
1808 if (!desc->action) {
1809 irq_settings_clr_disable_unlazy(desc);
1810 /* Only shutdown. Deactivate after synchronize_hardirq() */
1811 irq_shutdown(desc);
1812 }
1813
1814 #ifdef CONFIG_SMP
1815 /* make sure affinity_hint is cleaned up */
1816 if (WARN_ON_ONCE(desc->affinity_hint))
1817 desc->affinity_hint = NULL;
1818 #endif
1819
1820 raw_spin_unlock_irqrestore(&desc->lock, flags);
1821 /*
1822 * Drop bus_lock here so the changes which were done in the chip
1823 * callbacks above are synced out to the irq chips which hang
1824 * behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
1825 *
1826 * Aside of that the bus_lock can also be taken from the threaded
1827 * handler in irq_finalize_oneshot() which results in a deadlock
1828 * because kthread_stop() would wait forever for the thread to
1829 * complete, which is blocked on the bus lock.
1830 *
1831 * The still held desc->request_mutex() protects against a
1832 * concurrent request_irq() of this irq so the release of resources
1833 * and timing data is properly serialized.
1834 */
1835 chip_bus_sync_unlock(desc);
1836
1837 unregister_handler_proc(irq, action);
1838
1839 /*
1840 * Make sure it's not being used on another CPU and if the chip
1841 * supports it also make sure that there is no (not yet serviced)
1842 * interrupt in flight at the hardware level.
1843 */
1844 __synchronize_hardirq(desc, true);
1845
1846 #ifdef CONFIG_DEBUG_SHIRQ
1847 /*
1848 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1849 * event to happen even now it's being freed, so let's make sure that
1850 * is so by doing an extra call to the handler ....
1851 *
1852 * ( We do this after actually deregistering it, to make sure that a
1853 * 'real' IRQ doesn't run in parallel with our fake. )
1854 */
1855 if (action->flags & IRQF_SHARED) {
1856 local_irq_save(flags);
1857 action->handler(irq, dev_id);
1858 local_irq_restore(flags);
1859 }
1860 #endif
1861
1862 /*
1863 * The action has already been removed above, but the thread writes
1864 * its oneshot mask bit when it completes. Though request_mutex is
1865 * held across this which prevents __setup_irq() from handing out
1866 * the same bit to a newly requested action.
1867 */
1868 if (action->thread) {
1869 kthread_stop(action->thread);
1870 put_task_struct(action->thread);
1871 if (action->secondary && action->secondary->thread) {
1872 kthread_stop(action->secondary->thread);
1873 put_task_struct(action->secondary->thread);
1874 }
1875 }
1876
1877 /* Last action releases resources */
1878 if (!desc->action) {
1879 /*
1880 * Reaquire bus lock as irq_release_resources() might
1881 * require it to deallocate resources over the slow bus.
1882 */
1883 chip_bus_lock(desc);
1884 /*
1885 * There is no interrupt on the fly anymore. Deactivate it
1886 * completely.
1887 */
1888 raw_spin_lock_irqsave(&desc->lock, flags);
1889 irq_domain_deactivate_irq(&desc->irq_data);
1890 raw_spin_unlock_irqrestore(&desc->lock, flags);
1891
1892 irq_release_resources(desc);
1893 chip_bus_sync_unlock(desc);
1894 irq_remove_timings(desc);
1895 }
1896
1897 mutex_unlock(&desc->request_mutex);
1898
1899 irq_chip_pm_put(&desc->irq_data);
1900 module_put(desc->owner);
1901 kfree(action->secondary);
1902 return action;
1903 }
1904
1905 /**
1906 * free_irq - free an interrupt allocated with request_irq
1907 * @irq: Interrupt line to free
1908 * @dev_id: Device identity to free
1909 *
1910 * Remove an interrupt handler. The handler is removed and if the
1911 * interrupt line is no longer in use by any driver it is disabled.
1912 * On a shared IRQ the caller must ensure the interrupt is disabled
1913 * on the card it drives before calling this function. The function
1914 * does not return until any executing interrupts for this IRQ
1915 * have completed.
1916 *
1917 * This function must not be called from interrupt context.
1918 *
1919 * Returns the devname argument passed to request_irq.
1920 */
free_irq(unsigned int irq,void * dev_id)1921 const void *free_irq(unsigned int irq, void *dev_id)
1922 {
1923 struct irq_desc *desc = irq_to_desc(irq);
1924 struct irqaction *action;
1925 const char *devname;
1926
1927 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1928 return NULL;
1929
1930 #ifdef CONFIG_SMP
1931 if (WARN_ON(desc->affinity_notify))
1932 desc->affinity_notify = NULL;
1933 #endif
1934
1935 action = __free_irq(desc, dev_id);
1936
1937 if (!action)
1938 return NULL;
1939
1940 devname = action->name;
1941 kfree(action);
1942 return devname;
1943 }
1944 EXPORT_SYMBOL(free_irq);
1945
1946 /* This function must be called with desc->lock held */
__cleanup_nmi(unsigned int irq,struct irq_desc * desc)1947 static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
1948 {
1949 const char *devname = NULL;
1950
1951 desc->istate &= ~IRQS_NMI;
1952
1953 if (!WARN_ON(desc->action == NULL)) {
1954 irq_pm_remove_action(desc, desc->action);
1955 devname = desc->action->name;
1956 unregister_handler_proc(irq, desc->action);
1957
1958 kfree(desc->action);
1959 desc->action = NULL;
1960 }
1961
1962 irq_settings_clr_disable_unlazy(desc);
1963 irq_shutdown_and_deactivate(desc);
1964
1965 irq_release_resources(desc);
1966
1967 irq_chip_pm_put(&desc->irq_data);
1968 module_put(desc->owner);
1969
1970 return devname;
1971 }
1972
free_nmi(unsigned int irq,void * dev_id)1973 const void *free_nmi(unsigned int irq, void *dev_id)
1974 {
1975 struct irq_desc *desc = irq_to_desc(irq);
1976 unsigned long flags;
1977 const void *devname;
1978
1979 if (!desc || WARN_ON(!(desc->istate & IRQS_NMI)))
1980 return NULL;
1981
1982 if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1983 return NULL;
1984
1985 /* NMI still enabled */
1986 if (WARN_ON(desc->depth == 0))
1987 disable_nmi_nosync(irq);
1988
1989 raw_spin_lock_irqsave(&desc->lock, flags);
1990
1991 irq_nmi_teardown(desc);
1992 devname = __cleanup_nmi(irq, desc);
1993
1994 raw_spin_unlock_irqrestore(&desc->lock, flags);
1995
1996 return devname;
1997 }
1998
1999 /**
2000 * request_threaded_irq - allocate an interrupt line
2001 * @irq: Interrupt line to allocate
2002 * @handler: Function to be called when the IRQ occurs.
2003 * Primary handler for threaded interrupts
2004 * If NULL and thread_fn != NULL the default
2005 * primary handler is installed
2006 * @thread_fn: Function called from the irq handler thread
2007 * If NULL, no irq thread is created
2008 * @irqflags: Interrupt type flags
2009 * @devname: An ascii name for the claiming device
2010 * @dev_id: A cookie passed back to the handler function
2011 *
2012 * This call allocates interrupt resources and enables the
2013 * interrupt line and IRQ handling. From the point this
2014 * call is made your handler function may be invoked. Since
2015 * your handler function must clear any interrupt the board
2016 * raises, you must take care both to initialise your hardware
2017 * and to set up the interrupt handler in the right order.
2018 *
2019 * If you want to set up a threaded irq handler for your device
2020 * then you need to supply @handler and @thread_fn. @handler is
2021 * still called in hard interrupt context and has to check
2022 * whether the interrupt originates from the device. If yes it
2023 * needs to disable the interrupt on the device and return
2024 * IRQ_WAKE_THREAD which will wake up the handler thread and run
2025 * @thread_fn. This split handler design is necessary to support
2026 * shared interrupts.
2027 *
2028 * Dev_id must be globally unique. Normally the address of the
2029 * device data structure is used as the cookie. Since the handler
2030 * receives this value it makes sense to use it.
2031 *
2032 * If your interrupt is shared you must pass a non NULL dev_id
2033 * as this is required when freeing the interrupt.
2034 *
2035 * Flags:
2036 *
2037 * IRQF_SHARED Interrupt is shared
2038 * IRQF_TRIGGER_* Specify active edge(s) or level
2039 *
2040 */
request_threaded_irq(unsigned int irq,irq_handler_t handler,irq_handler_t thread_fn,unsigned long irqflags,const char * devname,void * dev_id)2041 int request_threaded_irq(unsigned int irq, irq_handler_t handler,
2042 irq_handler_t thread_fn, unsigned long irqflags,
2043 const char *devname, void *dev_id)
2044 {
2045 struct irqaction *action;
2046 struct irq_desc *desc;
2047 int retval;
2048
2049 if (irq == IRQ_NOTCONNECTED)
2050 return -ENOTCONN;
2051
2052 /*
2053 * Sanity-check: shared interrupts must pass in a real dev-ID,
2054 * otherwise we'll have trouble later trying to figure out
2055 * which interrupt is which (messes up the interrupt freeing
2056 * logic etc).
2057 *
2058 * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
2059 * it cannot be set along with IRQF_NO_SUSPEND.
2060 */
2061 if (((irqflags & IRQF_SHARED) && !dev_id) ||
2062 (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
2063 ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
2064 return -EINVAL;
2065
2066 desc = irq_to_desc(irq);
2067 if (!desc)
2068 return -EINVAL;
2069
2070 if (!irq_settings_can_request(desc) ||
2071 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2072 return -EINVAL;
2073
2074 if (!handler) {
2075 if (!thread_fn)
2076 return -EINVAL;
2077 handler = irq_default_primary_handler;
2078 }
2079
2080 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2081 if (!action)
2082 return -ENOMEM;
2083
2084 action->handler = handler;
2085 action->thread_fn = thread_fn;
2086 action->flags = irqflags;
2087 action->name = devname;
2088 action->dev_id = dev_id;
2089
2090 retval = irq_chip_pm_get(&desc->irq_data);
2091 if (retval < 0) {
2092 kfree(action);
2093 return retval;
2094 }
2095
2096 retval = __setup_irq(irq, desc, action);
2097
2098 if (retval) {
2099 irq_chip_pm_put(&desc->irq_data);
2100 kfree(action->secondary);
2101 kfree(action);
2102 }
2103
2104 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
2105 if (!retval && (irqflags & IRQF_SHARED)) {
2106 /*
2107 * It's a shared IRQ -- the driver ought to be prepared for it
2108 * to happen immediately, so let's make sure....
2109 * We disable the irq to make sure that a 'real' IRQ doesn't
2110 * run in parallel with our fake.
2111 */
2112 unsigned long flags;
2113
2114 disable_irq(irq);
2115 local_irq_save(flags);
2116
2117 handler(irq, dev_id);
2118
2119 local_irq_restore(flags);
2120 enable_irq(irq);
2121 }
2122 #endif
2123 return retval;
2124 }
2125 EXPORT_SYMBOL(request_threaded_irq);
2126
2127 /**
2128 * request_any_context_irq - allocate an interrupt line
2129 * @irq: Interrupt line to allocate
2130 * @handler: Function to be called when the IRQ occurs.
2131 * Threaded handler for threaded interrupts.
2132 * @flags: Interrupt type flags
2133 * @name: An ascii name for the claiming device
2134 * @dev_id: A cookie passed back to the handler function
2135 *
2136 * This call allocates interrupt resources and enables the
2137 * interrupt line and IRQ handling. It selects either a
2138 * hardirq or threaded handling method depending on the
2139 * context.
2140 *
2141 * On failure, it returns a negative value. On success,
2142 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
2143 */
request_any_context_irq(unsigned int irq,irq_handler_t handler,unsigned long flags,const char * name,void * dev_id)2144 int request_any_context_irq(unsigned int irq, irq_handler_t handler,
2145 unsigned long flags, const char *name, void *dev_id)
2146 {
2147 struct irq_desc *desc;
2148 int ret;
2149
2150 if (irq == IRQ_NOTCONNECTED)
2151 return -ENOTCONN;
2152
2153 desc = irq_to_desc(irq);
2154 if (!desc)
2155 return -EINVAL;
2156
2157 if (irq_settings_is_nested_thread(desc)) {
2158 ret = request_threaded_irq(irq, NULL, handler,
2159 flags, name, dev_id);
2160 return !ret ? IRQC_IS_NESTED : ret;
2161 }
2162
2163 ret = request_irq(irq, handler, flags, name, dev_id);
2164 return !ret ? IRQC_IS_HARDIRQ : ret;
2165 }
2166 EXPORT_SYMBOL_GPL(request_any_context_irq);
2167
2168 /**
2169 * request_nmi - allocate an interrupt line for NMI delivery
2170 * @irq: Interrupt line to allocate
2171 * @handler: Function to be called when the IRQ occurs.
2172 * Threaded handler for threaded interrupts.
2173 * @irqflags: Interrupt type flags
2174 * @name: An ascii name for the claiming device
2175 * @dev_id: A cookie passed back to the handler function
2176 *
2177 * This call allocates interrupt resources and enables the
2178 * interrupt line and IRQ handling. It sets up the IRQ line
2179 * to be handled as an NMI.
2180 *
2181 * An interrupt line delivering NMIs cannot be shared and IRQ handling
2182 * cannot be threaded.
2183 *
2184 * Interrupt lines requested for NMI delivering must produce per cpu
2185 * interrupts and have auto enabling setting disabled.
2186 *
2187 * Dev_id must be globally unique. Normally the address of the
2188 * device data structure is used as the cookie. Since the handler
2189 * receives this value it makes sense to use it.
2190 *
2191 * If the interrupt line cannot be used to deliver NMIs, function
2192 * will fail and return a negative value.
2193 */
request_nmi(unsigned int irq,irq_handler_t handler,unsigned long irqflags,const char * name,void * dev_id)2194 int request_nmi(unsigned int irq, irq_handler_t handler,
2195 unsigned long irqflags, const char *name, void *dev_id)
2196 {
2197 struct irqaction *action;
2198 struct irq_desc *desc;
2199 unsigned long flags;
2200 int retval;
2201
2202 if (irq == IRQ_NOTCONNECTED)
2203 return -ENOTCONN;
2204
2205 /* NMI cannot be shared, used for Polling */
2206 if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL))
2207 return -EINVAL;
2208
2209 if (!(irqflags & IRQF_PERCPU))
2210 return -EINVAL;
2211
2212 if (!handler)
2213 return -EINVAL;
2214
2215 desc = irq_to_desc(irq);
2216
2217 if (!desc || irq_settings_can_autoenable(desc) ||
2218 !irq_settings_can_request(desc) ||
2219 WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
2220 !irq_supports_nmi(desc))
2221 return -EINVAL;
2222
2223 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2224 if (!action)
2225 return -ENOMEM;
2226
2227 action->handler = handler;
2228 action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING;
2229 action->name = name;
2230 action->dev_id = dev_id;
2231
2232 retval = irq_chip_pm_get(&desc->irq_data);
2233 if (retval < 0)
2234 goto err_out;
2235
2236 retval = __setup_irq(irq, desc, action);
2237 if (retval)
2238 goto err_irq_setup;
2239
2240 raw_spin_lock_irqsave(&desc->lock, flags);
2241
2242 /* Setup NMI state */
2243 desc->istate |= IRQS_NMI;
2244 retval = irq_nmi_setup(desc);
2245 if (retval) {
2246 __cleanup_nmi(irq, desc);
2247 raw_spin_unlock_irqrestore(&desc->lock, flags);
2248 return -EINVAL;
2249 }
2250
2251 raw_spin_unlock_irqrestore(&desc->lock, flags);
2252
2253 return 0;
2254
2255 err_irq_setup:
2256 irq_chip_pm_put(&desc->irq_data);
2257 err_out:
2258 kfree(action);
2259
2260 return retval;
2261 }
2262
enable_percpu_irq(unsigned int irq,unsigned int type)2263 void enable_percpu_irq(unsigned int irq, unsigned int type)
2264 {
2265 unsigned int cpu = smp_processor_id();
2266 unsigned long flags;
2267 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2268
2269 if (!desc)
2270 return;
2271
2272 /*
2273 * If the trigger type is not specified by the caller, then
2274 * use the default for this interrupt.
2275 */
2276 type &= IRQ_TYPE_SENSE_MASK;
2277 if (type == IRQ_TYPE_NONE)
2278 type = irqd_get_trigger_type(&desc->irq_data);
2279
2280 if (type != IRQ_TYPE_NONE) {
2281 int ret;
2282
2283 ret = __irq_set_trigger(desc, type);
2284
2285 if (ret) {
2286 WARN(1, "failed to set type for IRQ%d\n", irq);
2287 goto out;
2288 }
2289 }
2290
2291 irq_percpu_enable(desc, cpu);
2292 out:
2293 irq_put_desc_unlock(desc, flags);
2294 }
2295 EXPORT_SYMBOL_GPL(enable_percpu_irq);
2296
enable_percpu_nmi(unsigned int irq,unsigned int type)2297 void enable_percpu_nmi(unsigned int irq, unsigned int type)
2298 {
2299 enable_percpu_irq(irq, type);
2300 }
2301
2302 /**
2303 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
2304 * @irq: Linux irq number to check for
2305 *
2306 * Must be called from a non migratable context. Returns the enable
2307 * state of a per cpu interrupt on the current cpu.
2308 */
irq_percpu_is_enabled(unsigned int irq)2309 bool irq_percpu_is_enabled(unsigned int irq)
2310 {
2311 unsigned int cpu = smp_processor_id();
2312 struct irq_desc *desc;
2313 unsigned long flags;
2314 bool is_enabled;
2315
2316 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2317 if (!desc)
2318 return false;
2319
2320 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
2321 irq_put_desc_unlock(desc, flags);
2322
2323 return is_enabled;
2324 }
2325 EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
2326
disable_percpu_irq(unsigned int irq)2327 void disable_percpu_irq(unsigned int irq)
2328 {
2329 unsigned int cpu = smp_processor_id();
2330 unsigned long flags;
2331 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2332
2333 if (!desc)
2334 return;
2335
2336 irq_percpu_disable(desc, cpu);
2337 irq_put_desc_unlock(desc, flags);
2338 }
2339 EXPORT_SYMBOL_GPL(disable_percpu_irq);
2340
disable_percpu_nmi(unsigned int irq)2341 void disable_percpu_nmi(unsigned int irq)
2342 {
2343 disable_percpu_irq(irq);
2344 }
2345
2346 /*
2347 * Internal function to unregister a percpu irqaction.
2348 */
__free_percpu_irq(unsigned int irq,void __percpu * dev_id)2349 static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2350 {
2351 struct irq_desc *desc = irq_to_desc(irq);
2352 struct irqaction *action;
2353 unsigned long flags;
2354
2355 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
2356
2357 if (!desc)
2358 return NULL;
2359
2360 raw_spin_lock_irqsave(&desc->lock, flags);
2361
2362 action = desc->action;
2363 if (!action || action->percpu_dev_id != dev_id) {
2364 WARN(1, "Trying to free already-free IRQ %d\n", irq);
2365 goto bad;
2366 }
2367
2368 if (!cpumask_empty(desc->percpu_enabled)) {
2369 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
2370 irq, cpumask_first(desc->percpu_enabled));
2371 goto bad;
2372 }
2373
2374 /* Found it - now remove it from the list of entries: */
2375 desc->action = NULL;
2376
2377 desc->istate &= ~IRQS_NMI;
2378
2379 raw_spin_unlock_irqrestore(&desc->lock, flags);
2380
2381 unregister_handler_proc(irq, action);
2382
2383 irq_chip_pm_put(&desc->irq_data);
2384 module_put(desc->owner);
2385 return action;
2386
2387 bad:
2388 raw_spin_unlock_irqrestore(&desc->lock, flags);
2389 return NULL;
2390 }
2391
2392 /**
2393 * remove_percpu_irq - free a per-cpu interrupt
2394 * @irq: Interrupt line to free
2395 * @act: irqaction for the interrupt
2396 *
2397 * Used to remove interrupts statically setup by the early boot process.
2398 */
remove_percpu_irq(unsigned int irq,struct irqaction * act)2399 void remove_percpu_irq(unsigned int irq, struct irqaction *act)
2400 {
2401 struct irq_desc *desc = irq_to_desc(irq);
2402
2403 if (desc && irq_settings_is_per_cpu_devid(desc))
2404 __free_percpu_irq(irq, act->percpu_dev_id);
2405 }
2406
2407 /**
2408 * free_percpu_irq - free an interrupt allocated with request_percpu_irq
2409 * @irq: Interrupt line to free
2410 * @dev_id: Device identity to free
2411 *
2412 * Remove a percpu interrupt handler. The handler is removed, but
2413 * the interrupt line is not disabled. This must be done on each
2414 * CPU before calling this function. The function does not return
2415 * until any executing interrupts for this IRQ have completed.
2416 *
2417 * This function must not be called from interrupt context.
2418 */
free_percpu_irq(unsigned int irq,void __percpu * dev_id)2419 void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2420 {
2421 struct irq_desc *desc = irq_to_desc(irq);
2422
2423 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2424 return;
2425
2426 chip_bus_lock(desc);
2427 kfree(__free_percpu_irq(irq, dev_id));
2428 chip_bus_sync_unlock(desc);
2429 }
2430 EXPORT_SYMBOL_GPL(free_percpu_irq);
2431
free_percpu_nmi(unsigned int irq,void __percpu * dev_id)2432 void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)
2433 {
2434 struct irq_desc *desc = irq_to_desc(irq);
2435
2436 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2437 return;
2438
2439 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2440 return;
2441
2442 kfree(__free_percpu_irq(irq, dev_id));
2443 }
2444
2445 /**
2446 * setup_percpu_irq - setup a per-cpu interrupt
2447 * @irq: Interrupt line to setup
2448 * @act: irqaction for the interrupt
2449 *
2450 * Used to statically setup per-cpu interrupts in the early boot process.
2451 */
setup_percpu_irq(unsigned int irq,struct irqaction * act)2452 int setup_percpu_irq(unsigned int irq, struct irqaction *act)
2453 {
2454 struct irq_desc *desc = irq_to_desc(irq);
2455 int retval;
2456
2457 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2458 return -EINVAL;
2459
2460 retval = irq_chip_pm_get(&desc->irq_data);
2461 if (retval < 0)
2462 return retval;
2463
2464 retval = __setup_irq(irq, desc, act);
2465
2466 if (retval)
2467 irq_chip_pm_put(&desc->irq_data);
2468
2469 return retval;
2470 }
2471
2472 /**
2473 * __request_percpu_irq - allocate a percpu interrupt line
2474 * @irq: Interrupt line to allocate
2475 * @handler: Function to be called when the IRQ occurs.
2476 * @flags: Interrupt type flags (IRQF_TIMER only)
2477 * @devname: An ascii name for the claiming device
2478 * @dev_id: A percpu cookie passed back to the handler function
2479 *
2480 * This call allocates interrupt resources and enables the
2481 * interrupt on the local CPU. If the interrupt is supposed to be
2482 * enabled on other CPUs, it has to be done on each CPU using
2483 * enable_percpu_irq().
2484 *
2485 * Dev_id must be globally unique. It is a per-cpu variable, and
2486 * the handler gets called with the interrupted CPU's instance of
2487 * that variable.
2488 */
__request_percpu_irq(unsigned int irq,irq_handler_t handler,unsigned long flags,const char * devname,void __percpu * dev_id)2489 int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2490 unsigned long flags, const char *devname,
2491 void __percpu *dev_id)
2492 {
2493 struct irqaction *action;
2494 struct irq_desc *desc;
2495 int retval;
2496
2497 if (!dev_id)
2498 return -EINVAL;
2499
2500 desc = irq_to_desc(irq);
2501 if (!desc || !irq_settings_can_request(desc) ||
2502 !irq_settings_is_per_cpu_devid(desc))
2503 return -EINVAL;
2504
2505 if (flags && flags != IRQF_TIMER)
2506 return -EINVAL;
2507
2508 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2509 if (!action)
2510 return -ENOMEM;
2511
2512 action->handler = handler;
2513 action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
2514 action->name = devname;
2515 action->percpu_dev_id = dev_id;
2516
2517 retval = irq_chip_pm_get(&desc->irq_data);
2518 if (retval < 0) {
2519 kfree(action);
2520 return retval;
2521 }
2522
2523 retval = __setup_irq(irq, desc, action);
2524
2525 if (retval) {
2526 irq_chip_pm_put(&desc->irq_data);
2527 kfree(action);
2528 }
2529
2530 return retval;
2531 }
2532 EXPORT_SYMBOL_GPL(__request_percpu_irq);
2533
2534 /**
2535 * request_percpu_nmi - allocate a percpu interrupt line for NMI delivery
2536 * @irq: Interrupt line to allocate
2537 * @handler: Function to be called when the IRQ occurs.
2538 * @name: An ascii name for the claiming device
2539 * @dev_id: A percpu cookie passed back to the handler function
2540 *
2541 * This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs
2542 * have to be setup on each CPU by calling prepare_percpu_nmi() before
2543 * being enabled on the same CPU by using enable_percpu_nmi().
2544 *
2545 * Dev_id must be globally unique. It is a per-cpu variable, and
2546 * the handler gets called with the interrupted CPU's instance of
2547 * that variable.
2548 *
2549 * Interrupt lines requested for NMI delivering should have auto enabling
2550 * setting disabled.
2551 *
2552 * If the interrupt line cannot be used to deliver NMIs, function
2553 * will fail returning a negative value.
2554 */
request_percpu_nmi(unsigned int irq,irq_handler_t handler,const char * name,void __percpu * dev_id)2555 int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
2556 const char *name, void __percpu *dev_id)
2557 {
2558 struct irqaction *action;
2559 struct irq_desc *desc;
2560 unsigned long flags;
2561 int retval;
2562
2563 if (!handler)
2564 return -EINVAL;
2565
2566 desc = irq_to_desc(irq);
2567
2568 if (!desc || !irq_settings_can_request(desc) ||
2569 !irq_settings_is_per_cpu_devid(desc) ||
2570 irq_settings_can_autoenable(desc) ||
2571 !irq_supports_nmi(desc))
2572 return -EINVAL;
2573
2574 /* The line cannot already be NMI */
2575 if (desc->istate & IRQS_NMI)
2576 return -EINVAL;
2577
2578 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2579 if (!action)
2580 return -ENOMEM;
2581
2582 action->handler = handler;
2583 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
2584 | IRQF_NOBALANCING;
2585 action->name = name;
2586 action->percpu_dev_id = dev_id;
2587
2588 retval = irq_chip_pm_get(&desc->irq_data);
2589 if (retval < 0)
2590 goto err_out;
2591
2592 retval = __setup_irq(irq, desc, action);
2593 if (retval)
2594 goto err_irq_setup;
2595
2596 raw_spin_lock_irqsave(&desc->lock, flags);
2597 desc->istate |= IRQS_NMI;
2598 raw_spin_unlock_irqrestore(&desc->lock, flags);
2599
2600 return 0;
2601
2602 err_irq_setup:
2603 irq_chip_pm_put(&desc->irq_data);
2604 err_out:
2605 kfree(action);
2606
2607 return retval;
2608 }
2609
2610 /**
2611 * prepare_percpu_nmi - performs CPU local setup for NMI delivery
2612 * @irq: Interrupt line to prepare for NMI delivery
2613 *
2614 * This call prepares an interrupt line to deliver NMI on the current CPU,
2615 * before that interrupt line gets enabled with enable_percpu_nmi().
2616 *
2617 * As a CPU local operation, this should be called from non-preemptible
2618 * context.
2619 *
2620 * If the interrupt line cannot be used to deliver NMIs, function
2621 * will fail returning a negative value.
2622 */
prepare_percpu_nmi(unsigned int irq)2623 int prepare_percpu_nmi(unsigned int irq)
2624 {
2625 unsigned long flags;
2626 struct irq_desc *desc;
2627 int ret = 0;
2628
2629 WARN_ON(preemptible());
2630
2631 desc = irq_get_desc_lock(irq, &flags,
2632 IRQ_GET_DESC_CHECK_PERCPU);
2633 if (!desc)
2634 return -EINVAL;
2635
2636 if (WARN(!(desc->istate & IRQS_NMI),
2637 KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",
2638 irq)) {
2639 ret = -EINVAL;
2640 goto out;
2641 }
2642
2643 ret = irq_nmi_setup(desc);
2644 if (ret) {
2645 pr_err("Failed to setup NMI delivery: irq %u\n", irq);
2646 goto out;
2647 }
2648
2649 out:
2650 irq_put_desc_unlock(desc, flags);
2651 return ret;
2652 }
2653
2654 /**
2655 * teardown_percpu_nmi - undoes NMI setup of IRQ line
2656 * @irq: Interrupt line from which CPU local NMI configuration should be
2657 * removed
2658 *
2659 * This call undoes the setup done by prepare_percpu_nmi().
2660 *
2661 * IRQ line should not be enabled for the current CPU.
2662 *
2663 * As a CPU local operation, this should be called from non-preemptible
2664 * context.
2665 */
teardown_percpu_nmi(unsigned int irq)2666 void teardown_percpu_nmi(unsigned int irq)
2667 {
2668 unsigned long flags;
2669 struct irq_desc *desc;
2670
2671 WARN_ON(preemptible());
2672
2673 desc = irq_get_desc_lock(irq, &flags,
2674 IRQ_GET_DESC_CHECK_PERCPU);
2675 if (!desc)
2676 return;
2677
2678 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2679 goto out;
2680
2681 irq_nmi_teardown(desc);
2682 out:
2683 irq_put_desc_unlock(desc, flags);
2684 }
2685
__irq_get_irqchip_state(struct irq_data * data,enum irqchip_irq_state which,bool * state)2686 int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which,
2687 bool *state)
2688 {
2689 struct irq_chip *chip;
2690 int err = -EINVAL;
2691
2692 do {
2693 chip = irq_data_get_irq_chip(data);
2694 if (WARN_ON_ONCE(!chip))
2695 return -ENODEV;
2696 if (chip->irq_get_irqchip_state)
2697 break;
2698 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2699 data = data->parent_data;
2700 #else
2701 data = NULL;
2702 #endif
2703 } while (data);
2704
2705 if (data)
2706 err = chip->irq_get_irqchip_state(data, which, state);
2707 return err;
2708 }
2709
2710 /**
2711 * irq_get_irqchip_state - returns the irqchip state of a interrupt.
2712 * @irq: Interrupt line that is forwarded to a VM
2713 * @which: One of IRQCHIP_STATE_* the caller wants to know about
2714 * @state: a pointer to a boolean where the state is to be storeed
2715 *
2716 * This call snapshots the internal irqchip state of an
2717 * interrupt, returning into @state the bit corresponding to
2718 * stage @which
2719 *
2720 * This function should be called with preemption disabled if the
2721 * interrupt controller has per-cpu registers.
2722 */
irq_get_irqchip_state(unsigned int irq,enum irqchip_irq_state which,bool * state)2723 int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2724 bool *state)
2725 {
2726 struct irq_desc *desc;
2727 struct irq_data *data;
2728 unsigned long flags;
2729 int err = -EINVAL;
2730
2731 desc = irq_get_desc_buslock(irq, &flags, 0);
2732 if (!desc)
2733 return err;
2734
2735 data = irq_desc_get_irq_data(desc);
2736
2737 err = __irq_get_irqchip_state(data, which, state);
2738
2739 irq_put_desc_busunlock(desc, flags);
2740 return err;
2741 }
2742 EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2743
2744 /**
2745 * irq_set_irqchip_state - set the state of a forwarded interrupt.
2746 * @irq: Interrupt line that is forwarded to a VM
2747 * @which: State to be restored (one of IRQCHIP_STATE_*)
2748 * @val: Value corresponding to @which
2749 *
2750 * This call sets the internal irqchip state of an interrupt,
2751 * depending on the value of @which.
2752 *
2753 * This function should be called with preemption disabled if the
2754 * interrupt controller has per-cpu registers.
2755 */
irq_set_irqchip_state(unsigned int irq,enum irqchip_irq_state which,bool val)2756 int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2757 bool val)
2758 {
2759 struct irq_desc *desc;
2760 struct irq_data *data;
2761 struct irq_chip *chip;
2762 unsigned long flags;
2763 int err = -EINVAL;
2764
2765 desc = irq_get_desc_buslock(irq, &flags, 0);
2766 if (!desc)
2767 return err;
2768
2769 data = irq_desc_get_irq_data(desc);
2770
2771 do {
2772 chip = irq_data_get_irq_chip(data);
2773 if (WARN_ON_ONCE(!chip)) {
2774 err = -ENODEV;
2775 goto out_unlock;
2776 }
2777 if (chip->irq_set_irqchip_state)
2778 break;
2779 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2780 data = data->parent_data;
2781 #else
2782 data = NULL;
2783 #endif
2784 } while (data);
2785
2786 if (data)
2787 err = chip->irq_set_irqchip_state(data, which, val);
2788
2789 out_unlock:
2790 irq_put_desc_busunlock(desc, flags);
2791 return err;
2792 }
2793 EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
2794