1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2015 Imagination Technologies Ltd
4*4882a593Smuzhiyun * Author: Qais Yousef <qais.yousef@imgtec.com>
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * This file contains driver APIs to the IPI subsystem.
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #define pr_fmt(fmt) "genirq/ipi: " fmt
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/irqdomain.h>
12*4882a593Smuzhiyun #include <linux/irq.h>
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun /**
15*4882a593Smuzhiyun * irq_reserve_ipi() - Setup an IPI to destination cpumask
16*4882a593Smuzhiyun * @domain: IPI domain
17*4882a593Smuzhiyun * @dest: cpumask of cpus which can receive the IPI
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * Allocate a virq that can be used to send IPI to any CPU in dest mask.
20*4882a593Smuzhiyun *
21*4882a593Smuzhiyun * On success it'll return linux irq number and error code on failure
22*4882a593Smuzhiyun */
irq_reserve_ipi(struct irq_domain * domain,const struct cpumask * dest)23*4882a593Smuzhiyun int irq_reserve_ipi(struct irq_domain *domain,
24*4882a593Smuzhiyun const struct cpumask *dest)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun unsigned int nr_irqs, offset;
27*4882a593Smuzhiyun struct irq_data *data;
28*4882a593Smuzhiyun int virq, i;
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun if (!domain ||!irq_domain_is_ipi(domain)) {
31*4882a593Smuzhiyun pr_warn("Reservation on a non IPI domain\n");
32*4882a593Smuzhiyun return -EINVAL;
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun if (!cpumask_subset(dest, cpu_possible_mask)) {
36*4882a593Smuzhiyun pr_warn("Reservation is not in possible_cpu_mask\n");
37*4882a593Smuzhiyun return -EINVAL;
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun nr_irqs = cpumask_weight(dest);
41*4882a593Smuzhiyun if (!nr_irqs) {
42*4882a593Smuzhiyun pr_warn("Reservation for empty destination mask\n");
43*4882a593Smuzhiyun return -EINVAL;
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun if (irq_domain_is_ipi_single(domain)) {
47*4882a593Smuzhiyun /*
48*4882a593Smuzhiyun * If the underlying implementation uses a single HW irq on
49*4882a593Smuzhiyun * all cpus then we only need a single Linux irq number for
50*4882a593Smuzhiyun * it. We have no restrictions vs. the destination mask. The
51*4882a593Smuzhiyun * underlying implementation can deal with holes nicely.
52*4882a593Smuzhiyun */
53*4882a593Smuzhiyun nr_irqs = 1;
54*4882a593Smuzhiyun offset = 0;
55*4882a593Smuzhiyun } else {
56*4882a593Smuzhiyun unsigned int next;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /*
59*4882a593Smuzhiyun * The IPI requires a separate HW irq on each CPU. We require
60*4882a593Smuzhiyun * that the destination mask is consecutive. If an
61*4882a593Smuzhiyun * implementation needs to support holes, it can reserve
62*4882a593Smuzhiyun * several IPI ranges.
63*4882a593Smuzhiyun */
64*4882a593Smuzhiyun offset = cpumask_first(dest);
65*4882a593Smuzhiyun /*
66*4882a593Smuzhiyun * Find a hole and if found look for another set bit after the
67*4882a593Smuzhiyun * hole. For now we don't support this scenario.
68*4882a593Smuzhiyun */
69*4882a593Smuzhiyun next = cpumask_next_zero(offset, dest);
70*4882a593Smuzhiyun if (next < nr_cpu_ids)
71*4882a593Smuzhiyun next = cpumask_next(next, dest);
72*4882a593Smuzhiyun if (next < nr_cpu_ids) {
73*4882a593Smuzhiyun pr_warn("Destination mask has holes\n");
74*4882a593Smuzhiyun return -EINVAL;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun virq = irq_domain_alloc_descs(-1, nr_irqs, 0, NUMA_NO_NODE, NULL);
79*4882a593Smuzhiyun if (virq <= 0) {
80*4882a593Smuzhiyun pr_warn("Can't reserve IPI, failed to alloc descs\n");
81*4882a593Smuzhiyun return -ENOMEM;
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun virq = __irq_domain_alloc_irqs(domain, virq, nr_irqs, NUMA_NO_NODE,
85*4882a593Smuzhiyun (void *) dest, true, NULL);
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun if (virq <= 0) {
88*4882a593Smuzhiyun pr_warn("Can't reserve IPI, failed to alloc hw irqs\n");
89*4882a593Smuzhiyun goto free_descs;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun for (i = 0; i < nr_irqs; i++) {
93*4882a593Smuzhiyun data = irq_get_irq_data(virq + i);
94*4882a593Smuzhiyun cpumask_copy(data->common->affinity, dest);
95*4882a593Smuzhiyun data->common->ipi_offset = offset;
96*4882a593Smuzhiyun irq_set_status_flags(virq + i, IRQ_NO_BALANCING);
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun return virq;
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun free_descs:
101*4882a593Smuzhiyun irq_free_descs(virq, nr_irqs);
102*4882a593Smuzhiyun return -EBUSY;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun /**
106*4882a593Smuzhiyun * irq_destroy_ipi() - unreserve an IPI that was previously allocated
107*4882a593Smuzhiyun * @irq: linux irq number to be destroyed
108*4882a593Smuzhiyun * @dest: cpumask of cpus which should have the IPI removed
109*4882a593Smuzhiyun *
110*4882a593Smuzhiyun * The IPIs allocated with irq_reserve_ipi() are retuerned to the system
111*4882a593Smuzhiyun * destroying all virqs associated with them.
112*4882a593Smuzhiyun *
113*4882a593Smuzhiyun * Return 0 on success or error code on failure.
114*4882a593Smuzhiyun */
irq_destroy_ipi(unsigned int irq,const struct cpumask * dest)115*4882a593Smuzhiyun int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun struct irq_data *data = irq_get_irq_data(irq);
118*4882a593Smuzhiyun struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL;
119*4882a593Smuzhiyun struct irq_domain *domain;
120*4882a593Smuzhiyun unsigned int nr_irqs;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun if (!irq || !data || !ipimask)
123*4882a593Smuzhiyun return -EINVAL;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun domain = data->domain;
126*4882a593Smuzhiyun if (WARN_ON(domain == NULL))
127*4882a593Smuzhiyun return -EINVAL;
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun if (!irq_domain_is_ipi(domain)) {
130*4882a593Smuzhiyun pr_warn("Trying to destroy a non IPI domain!\n");
131*4882a593Smuzhiyun return -EINVAL;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun if (WARN_ON(!cpumask_subset(dest, ipimask)))
135*4882a593Smuzhiyun /*
136*4882a593Smuzhiyun * Must be destroying a subset of CPUs to which this IPI
137*4882a593Smuzhiyun * was set up to target
138*4882a593Smuzhiyun */
139*4882a593Smuzhiyun return -EINVAL;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun if (irq_domain_is_ipi_per_cpu(domain)) {
142*4882a593Smuzhiyun irq = irq + cpumask_first(dest) - data->common->ipi_offset;
143*4882a593Smuzhiyun nr_irqs = cpumask_weight(dest);
144*4882a593Smuzhiyun } else {
145*4882a593Smuzhiyun nr_irqs = 1;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun irq_domain_free_irqs(irq, nr_irqs);
149*4882a593Smuzhiyun return 0;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun /**
153*4882a593Smuzhiyun * ipi_get_hwirq - Get the hwirq associated with an IPI to a cpu
154*4882a593Smuzhiyun * @irq: linux irq number
155*4882a593Smuzhiyun * @cpu: the target cpu
156*4882a593Smuzhiyun *
157*4882a593Smuzhiyun * When dealing with coprocessors IPI, we need to inform the coprocessor of
158*4882a593Smuzhiyun * the hwirq it needs to use to receive and send IPIs.
159*4882a593Smuzhiyun *
160*4882a593Smuzhiyun * Returns hwirq value on success and INVALID_HWIRQ on failure.
161*4882a593Smuzhiyun */
ipi_get_hwirq(unsigned int irq,unsigned int cpu)162*4882a593Smuzhiyun irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun struct irq_data *data = irq_get_irq_data(irq);
165*4882a593Smuzhiyun struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun if (!data || !ipimask || cpu >= nr_cpu_ids)
168*4882a593Smuzhiyun return INVALID_HWIRQ;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun if (!cpumask_test_cpu(cpu, ipimask))
171*4882a593Smuzhiyun return INVALID_HWIRQ;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun /*
174*4882a593Smuzhiyun * Get the real hardware irq number if the underlying implementation
175*4882a593Smuzhiyun * uses a separate irq per cpu. If the underlying implementation uses
176*4882a593Smuzhiyun * a single hardware irq for all cpus then the IPI send mechanism
177*4882a593Smuzhiyun * needs to take care of the cpu destinations.
178*4882a593Smuzhiyun */
179*4882a593Smuzhiyun if (irq_domain_is_ipi_per_cpu(data->domain))
180*4882a593Smuzhiyun data = irq_get_irq_data(irq + cpu - data->common->ipi_offset);
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun return data ? irqd_to_hwirq(data) : INVALID_HWIRQ;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ipi_get_hwirq);
185*4882a593Smuzhiyun
ipi_send_verify(struct irq_chip * chip,struct irq_data * data,const struct cpumask * dest,unsigned int cpu)186*4882a593Smuzhiyun static int ipi_send_verify(struct irq_chip *chip, struct irq_data *data,
187*4882a593Smuzhiyun const struct cpumask *dest, unsigned int cpu)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun struct cpumask *ipimask = irq_data_get_affinity_mask(data);
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun if (!chip || !ipimask)
192*4882a593Smuzhiyun return -EINVAL;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun if (!chip->ipi_send_single && !chip->ipi_send_mask)
195*4882a593Smuzhiyun return -EINVAL;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun if (cpu >= nr_cpu_ids)
198*4882a593Smuzhiyun return -EINVAL;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun if (dest) {
201*4882a593Smuzhiyun if (!cpumask_subset(dest, ipimask))
202*4882a593Smuzhiyun return -EINVAL;
203*4882a593Smuzhiyun } else {
204*4882a593Smuzhiyun if (!cpumask_test_cpu(cpu, ipimask))
205*4882a593Smuzhiyun return -EINVAL;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun return 0;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun /**
211*4882a593Smuzhiyun * __ipi_send_single - send an IPI to a target Linux SMP CPU
212*4882a593Smuzhiyun * @desc: pointer to irq_desc of the IRQ
213*4882a593Smuzhiyun * @cpu: destination CPU, must in the destination mask passed to
214*4882a593Smuzhiyun * irq_reserve_ipi()
215*4882a593Smuzhiyun *
216*4882a593Smuzhiyun * This function is for architecture or core code to speed up IPI sending. Not
217*4882a593Smuzhiyun * usable from driver code.
218*4882a593Smuzhiyun *
219*4882a593Smuzhiyun * Returns zero on success and negative error number on failure.
220*4882a593Smuzhiyun */
__ipi_send_single(struct irq_desc * desc,unsigned int cpu)221*4882a593Smuzhiyun int __ipi_send_single(struct irq_desc *desc, unsigned int cpu)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun struct irq_data *data = irq_desc_get_irq_data(desc);
224*4882a593Smuzhiyun struct irq_chip *chip = irq_data_get_irq_chip(data);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun #ifdef DEBUG
227*4882a593Smuzhiyun /*
228*4882a593Smuzhiyun * Minimise the overhead by omitting the checks for Linux SMP IPIs.
229*4882a593Smuzhiyun * Since the callers should be arch or core code which is generally
230*4882a593Smuzhiyun * trusted, only check for errors when debugging.
231*4882a593Smuzhiyun */
232*4882a593Smuzhiyun if (WARN_ON_ONCE(ipi_send_verify(chip, data, NULL, cpu)))
233*4882a593Smuzhiyun return -EINVAL;
234*4882a593Smuzhiyun #endif
235*4882a593Smuzhiyun if (!chip->ipi_send_single) {
236*4882a593Smuzhiyun chip->ipi_send_mask(data, cpumask_of(cpu));
237*4882a593Smuzhiyun return 0;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun /* FIXME: Store this information in irqdata flags */
241*4882a593Smuzhiyun if (irq_domain_is_ipi_per_cpu(data->domain) &&
242*4882a593Smuzhiyun cpu != data->common->ipi_offset) {
243*4882a593Smuzhiyun /* use the correct data for that cpu */
244*4882a593Smuzhiyun unsigned irq = data->irq + cpu - data->common->ipi_offset;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun data = irq_get_irq_data(irq);
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun chip->ipi_send_single(data, cpu);
249*4882a593Smuzhiyun return 0;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun /**
253*4882a593Smuzhiyun * ipi_send_mask - send an IPI to target Linux SMP CPU(s)
254*4882a593Smuzhiyun * @desc: pointer to irq_desc of the IRQ
255*4882a593Smuzhiyun * @dest: dest CPU(s), must be a subset of the mask passed to
256*4882a593Smuzhiyun * irq_reserve_ipi()
257*4882a593Smuzhiyun *
258*4882a593Smuzhiyun * This function is for architecture or core code to speed up IPI sending. Not
259*4882a593Smuzhiyun * usable from driver code.
260*4882a593Smuzhiyun *
261*4882a593Smuzhiyun * Returns zero on success and negative error number on failure.
262*4882a593Smuzhiyun */
__ipi_send_mask(struct irq_desc * desc,const struct cpumask * dest)263*4882a593Smuzhiyun int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun struct irq_data *data = irq_desc_get_irq_data(desc);
266*4882a593Smuzhiyun struct irq_chip *chip = irq_data_get_irq_chip(data);
267*4882a593Smuzhiyun unsigned int cpu;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun #ifdef DEBUG
270*4882a593Smuzhiyun /*
271*4882a593Smuzhiyun * Minimise the overhead by omitting the checks for Linux SMP IPIs.
272*4882a593Smuzhiyun * Since the callers should be arch or core code which is generally
273*4882a593Smuzhiyun * trusted, only check for errors when debugging.
274*4882a593Smuzhiyun */
275*4882a593Smuzhiyun if (WARN_ON_ONCE(ipi_send_verify(chip, data, dest, 0)))
276*4882a593Smuzhiyun return -EINVAL;
277*4882a593Smuzhiyun #endif
278*4882a593Smuzhiyun if (chip->ipi_send_mask) {
279*4882a593Smuzhiyun chip->ipi_send_mask(data, dest);
280*4882a593Smuzhiyun return 0;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun if (irq_domain_is_ipi_per_cpu(data->domain)) {
284*4882a593Smuzhiyun unsigned int base = data->irq;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun for_each_cpu(cpu, dest) {
287*4882a593Smuzhiyun unsigned irq = base + cpu - data->common->ipi_offset;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun data = irq_get_irq_data(irq);
290*4882a593Smuzhiyun chip->ipi_send_single(data, cpu);
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun } else {
293*4882a593Smuzhiyun for_each_cpu(cpu, dest)
294*4882a593Smuzhiyun chip->ipi_send_single(data, cpu);
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun return 0;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun /**
300*4882a593Smuzhiyun * ipi_send_single - Send an IPI to a single CPU
301*4882a593Smuzhiyun * @virq: linux irq number from irq_reserve_ipi()
302*4882a593Smuzhiyun * @cpu: destination CPU, must in the destination mask passed to
303*4882a593Smuzhiyun * irq_reserve_ipi()
304*4882a593Smuzhiyun *
305*4882a593Smuzhiyun * Returns zero on success and negative error number on failure.
306*4882a593Smuzhiyun */
ipi_send_single(unsigned int virq,unsigned int cpu)307*4882a593Smuzhiyun int ipi_send_single(unsigned int virq, unsigned int cpu)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun struct irq_desc *desc = irq_to_desc(virq);
310*4882a593Smuzhiyun struct irq_data *data = desc ? irq_desc_get_irq_data(desc) : NULL;
311*4882a593Smuzhiyun struct irq_chip *chip = data ? irq_data_get_irq_chip(data) : NULL;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun if (WARN_ON_ONCE(ipi_send_verify(chip, data, NULL, cpu)))
314*4882a593Smuzhiyun return -EINVAL;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun return __ipi_send_single(desc, cpu);
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ipi_send_single);
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun /**
321*4882a593Smuzhiyun * ipi_send_mask - Send an IPI to target CPU(s)
322*4882a593Smuzhiyun * @virq: linux irq number from irq_reserve_ipi()
323*4882a593Smuzhiyun * @dest: dest CPU(s), must be a subset of the mask passed to
324*4882a593Smuzhiyun * irq_reserve_ipi()
325*4882a593Smuzhiyun *
326*4882a593Smuzhiyun * Returns zero on success and negative error number on failure.
327*4882a593Smuzhiyun */
ipi_send_mask(unsigned int virq,const struct cpumask * dest)328*4882a593Smuzhiyun int ipi_send_mask(unsigned int virq, const struct cpumask *dest)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun struct irq_desc *desc = irq_to_desc(virq);
331*4882a593Smuzhiyun struct irq_data *data = desc ? irq_desc_get_irq_data(desc) : NULL;
332*4882a593Smuzhiyun struct irq_chip *chip = data ? irq_data_get_irq_chip(data) : NULL;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun if (WARN_ON_ONCE(ipi_send_verify(chip, data, dest, 0)))
335*4882a593Smuzhiyun return -EINVAL;
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun return __ipi_send_mask(desc, dest);
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ipi_send_mask);
340