xref: /OK3568_Linux_fs/kernel/drivers/irqchip/irq-bcm7038-l1.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Broadcom BCM7038 style Level 1 interrupt controller driver
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2014 Broadcom Corporation
6*4882a593Smuzhiyun  * Author: Kevin Cernekee
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #define pr_fmt(fmt)	KBUILD_MODNAME	": " fmt
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <linux/bitops.h>
12*4882a593Smuzhiyun #include <linux/kernel.h>
13*4882a593Smuzhiyun #include <linux/init.h>
14*4882a593Smuzhiyun #include <linux/interrupt.h>
15*4882a593Smuzhiyun #include <linux/io.h>
16*4882a593Smuzhiyun #include <linux/ioport.h>
17*4882a593Smuzhiyun #include <linux/irq.h>
18*4882a593Smuzhiyun #include <linux/irqdomain.h>
19*4882a593Smuzhiyun #include <linux/module.h>
20*4882a593Smuzhiyun #include <linux/of.h>
21*4882a593Smuzhiyun #include <linux/of_irq.h>
22*4882a593Smuzhiyun #include <linux/of_address.h>
23*4882a593Smuzhiyun #include <linux/of_platform.h>
24*4882a593Smuzhiyun #include <linux/platform_device.h>
25*4882a593Smuzhiyun #include <linux/slab.h>
26*4882a593Smuzhiyun #include <linux/smp.h>
27*4882a593Smuzhiyun #include <linux/types.h>
28*4882a593Smuzhiyun #include <linux/irqchip.h>
29*4882a593Smuzhiyun #include <linux/irqchip/chained_irq.h>
30*4882a593Smuzhiyun #include <linux/syscore_ops.h>
31*4882a593Smuzhiyun #ifdef CONFIG_ARM
32*4882a593Smuzhiyun #include <asm/smp_plat.h>
33*4882a593Smuzhiyun #endif
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #define IRQS_PER_WORD		32
36*4882a593Smuzhiyun #define REG_BYTES_PER_IRQ_WORD	(sizeof(u32) * 4)
37*4882a593Smuzhiyun #define MAX_WORDS		8
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun struct bcm7038_l1_cpu;
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun struct bcm7038_l1_chip {
42*4882a593Smuzhiyun 	raw_spinlock_t		lock;
43*4882a593Smuzhiyun 	unsigned int		n_words;
44*4882a593Smuzhiyun 	struct irq_domain	*domain;
45*4882a593Smuzhiyun 	struct bcm7038_l1_cpu	*cpus[NR_CPUS];
46*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
47*4882a593Smuzhiyun 	struct list_head	list;
48*4882a593Smuzhiyun 	u32			wake_mask[MAX_WORDS];
49*4882a593Smuzhiyun #endif
50*4882a593Smuzhiyun 	u32			irq_fwd_mask[MAX_WORDS];
51*4882a593Smuzhiyun 	u8			affinity[MAX_WORDS * IRQS_PER_WORD];
52*4882a593Smuzhiyun };
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun struct bcm7038_l1_cpu {
55*4882a593Smuzhiyun 	void __iomem		*map_base;
56*4882a593Smuzhiyun 	u32			mask_cache[];
57*4882a593Smuzhiyun };
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun /*
60*4882a593Smuzhiyun  * STATUS/MASK_STATUS/MASK_SET/MASK_CLEAR are packed one right after another:
61*4882a593Smuzhiyun  *
62*4882a593Smuzhiyun  * 7038:
63*4882a593Smuzhiyun  *   0x1000_1400: W0_STATUS
64*4882a593Smuzhiyun  *   0x1000_1404: W1_STATUS
65*4882a593Smuzhiyun  *   0x1000_1408: W0_MASK_STATUS
66*4882a593Smuzhiyun  *   0x1000_140c: W1_MASK_STATUS
67*4882a593Smuzhiyun  *   0x1000_1410: W0_MASK_SET
68*4882a593Smuzhiyun  *   0x1000_1414: W1_MASK_SET
69*4882a593Smuzhiyun  *   0x1000_1418: W0_MASK_CLEAR
70*4882a593Smuzhiyun  *   0x1000_141c: W1_MASK_CLEAR
71*4882a593Smuzhiyun  *
72*4882a593Smuzhiyun  * 7445:
73*4882a593Smuzhiyun  *   0xf03e_1500: W0_STATUS
74*4882a593Smuzhiyun  *   0xf03e_1504: W1_STATUS
75*4882a593Smuzhiyun  *   0xf03e_1508: W2_STATUS
76*4882a593Smuzhiyun  *   0xf03e_150c: W3_STATUS
77*4882a593Smuzhiyun  *   0xf03e_1510: W4_STATUS
78*4882a593Smuzhiyun  *   0xf03e_1514: W0_MASK_STATUS
79*4882a593Smuzhiyun  *   0xf03e_1518: W1_MASK_STATUS
80*4882a593Smuzhiyun  *   [...]
81*4882a593Smuzhiyun  */
82*4882a593Smuzhiyun 
reg_status(struct bcm7038_l1_chip * intc,unsigned int word)83*4882a593Smuzhiyun static inline unsigned int reg_status(struct bcm7038_l1_chip *intc,
84*4882a593Smuzhiyun 				      unsigned int word)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun 	return (0 * intc->n_words + word) * sizeof(u32);
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun 
reg_mask_status(struct bcm7038_l1_chip * intc,unsigned int word)89*4882a593Smuzhiyun static inline unsigned int reg_mask_status(struct bcm7038_l1_chip *intc,
90*4882a593Smuzhiyun 					   unsigned int word)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun 	return (1 * intc->n_words + word) * sizeof(u32);
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun 
reg_mask_set(struct bcm7038_l1_chip * intc,unsigned int word)95*4882a593Smuzhiyun static inline unsigned int reg_mask_set(struct bcm7038_l1_chip *intc,
96*4882a593Smuzhiyun 					unsigned int word)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun 	return (2 * intc->n_words + word) * sizeof(u32);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun 
reg_mask_clr(struct bcm7038_l1_chip * intc,unsigned int word)101*4882a593Smuzhiyun static inline unsigned int reg_mask_clr(struct bcm7038_l1_chip *intc,
102*4882a593Smuzhiyun 					unsigned int word)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	return (3 * intc->n_words + word) * sizeof(u32);
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun 
l1_readl(void __iomem * reg)107*4882a593Smuzhiyun static inline u32 l1_readl(void __iomem *reg)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
110*4882a593Smuzhiyun 		return ioread32be(reg);
111*4882a593Smuzhiyun 	else
112*4882a593Smuzhiyun 		return readl(reg);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
l1_writel(u32 val,void __iomem * reg)115*4882a593Smuzhiyun static inline void l1_writel(u32 val, void __iomem *reg)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
118*4882a593Smuzhiyun 		iowrite32be(val, reg);
119*4882a593Smuzhiyun 	else
120*4882a593Smuzhiyun 		writel(val, reg);
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun 
bcm7038_l1_irq_handle(struct irq_desc * desc)123*4882a593Smuzhiyun static void bcm7038_l1_irq_handle(struct irq_desc *desc)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	struct bcm7038_l1_chip *intc = irq_desc_get_handler_data(desc);
126*4882a593Smuzhiyun 	struct bcm7038_l1_cpu *cpu;
127*4882a593Smuzhiyun 	struct irq_chip *chip = irq_desc_get_chip(desc);
128*4882a593Smuzhiyun 	unsigned int idx;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun #ifdef CONFIG_SMP
131*4882a593Smuzhiyun 	cpu = intc->cpus[cpu_logical_map(smp_processor_id())];
132*4882a593Smuzhiyun #else
133*4882a593Smuzhiyun 	cpu = intc->cpus[0];
134*4882a593Smuzhiyun #endif
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	chained_irq_enter(chip, desc);
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	for (idx = 0; idx < intc->n_words; idx++) {
139*4882a593Smuzhiyun 		int base = idx * IRQS_PER_WORD;
140*4882a593Smuzhiyun 		unsigned long pending, flags;
141*4882a593Smuzhiyun 		int hwirq;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 		raw_spin_lock_irqsave(&intc->lock, flags);
144*4882a593Smuzhiyun 		pending = l1_readl(cpu->map_base + reg_status(intc, idx)) &
145*4882a593Smuzhiyun 			  ~cpu->mask_cache[idx];
146*4882a593Smuzhiyun 		raw_spin_unlock_irqrestore(&intc->lock, flags);
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 		for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) {
149*4882a593Smuzhiyun 			generic_handle_irq(irq_find_mapping(intc->domain,
150*4882a593Smuzhiyun 							    base + hwirq));
151*4882a593Smuzhiyun 		}
152*4882a593Smuzhiyun 	}
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	chained_irq_exit(chip, desc);
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun 
__bcm7038_l1_unmask(struct irq_data * d,unsigned int cpu_idx)157*4882a593Smuzhiyun static void __bcm7038_l1_unmask(struct irq_data *d, unsigned int cpu_idx)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun 	struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
160*4882a593Smuzhiyun 	u32 word = d->hwirq / IRQS_PER_WORD;
161*4882a593Smuzhiyun 	u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	intc->cpus[cpu_idx]->mask_cache[word] &= ~mask;
164*4882a593Smuzhiyun 	l1_writel(mask, intc->cpus[cpu_idx]->map_base +
165*4882a593Smuzhiyun 			reg_mask_clr(intc, word));
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun 
__bcm7038_l1_mask(struct irq_data * d,unsigned int cpu_idx)168*4882a593Smuzhiyun static void __bcm7038_l1_mask(struct irq_data *d, unsigned int cpu_idx)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
171*4882a593Smuzhiyun 	u32 word = d->hwirq / IRQS_PER_WORD;
172*4882a593Smuzhiyun 	u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	intc->cpus[cpu_idx]->mask_cache[word] |= mask;
175*4882a593Smuzhiyun 	l1_writel(mask, intc->cpus[cpu_idx]->map_base +
176*4882a593Smuzhiyun 			reg_mask_set(intc, word));
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun 
bcm7038_l1_unmask(struct irq_data * d)179*4882a593Smuzhiyun static void bcm7038_l1_unmask(struct irq_data *d)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun 	struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
182*4882a593Smuzhiyun 	unsigned long flags;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	raw_spin_lock_irqsave(&intc->lock, flags);
185*4882a593Smuzhiyun 	__bcm7038_l1_unmask(d, intc->affinity[d->hwirq]);
186*4882a593Smuzhiyun 	raw_spin_unlock_irqrestore(&intc->lock, flags);
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun 
bcm7038_l1_mask(struct irq_data * d)189*4882a593Smuzhiyun static void bcm7038_l1_mask(struct irq_data *d)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun 	struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
192*4882a593Smuzhiyun 	unsigned long flags;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	raw_spin_lock_irqsave(&intc->lock, flags);
195*4882a593Smuzhiyun 	__bcm7038_l1_mask(d, intc->affinity[d->hwirq]);
196*4882a593Smuzhiyun 	raw_spin_unlock_irqrestore(&intc->lock, flags);
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun 
bcm7038_l1_set_affinity(struct irq_data * d,const struct cpumask * dest,bool force)199*4882a593Smuzhiyun static int bcm7038_l1_set_affinity(struct irq_data *d,
200*4882a593Smuzhiyun 				   const struct cpumask *dest,
201*4882a593Smuzhiyun 				   bool force)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun 	struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
204*4882a593Smuzhiyun 	unsigned long flags;
205*4882a593Smuzhiyun 	irq_hw_number_t hw = d->hwirq;
206*4882a593Smuzhiyun 	u32 word = hw / IRQS_PER_WORD;
207*4882a593Smuzhiyun 	u32 mask = BIT(hw % IRQS_PER_WORD);
208*4882a593Smuzhiyun 	unsigned int first_cpu = cpumask_any_and(dest, cpu_online_mask);
209*4882a593Smuzhiyun 	bool was_disabled;
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	raw_spin_lock_irqsave(&intc->lock, flags);
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	was_disabled = !!(intc->cpus[intc->affinity[hw]]->mask_cache[word] &
214*4882a593Smuzhiyun 			  mask);
215*4882a593Smuzhiyun 	__bcm7038_l1_mask(d, intc->affinity[hw]);
216*4882a593Smuzhiyun 	intc->affinity[hw] = first_cpu;
217*4882a593Smuzhiyun 	if (!was_disabled)
218*4882a593Smuzhiyun 		__bcm7038_l1_unmask(d, first_cpu);
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	raw_spin_unlock_irqrestore(&intc->lock, flags);
221*4882a593Smuzhiyun 	irq_data_update_effective_affinity(d, cpumask_of(first_cpu));
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	return 0;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun #ifdef CONFIG_SMP
bcm7038_l1_cpu_offline(struct irq_data * d)227*4882a593Smuzhiyun static void bcm7038_l1_cpu_offline(struct irq_data *d)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun 	struct cpumask *mask = irq_data_get_affinity_mask(d);
230*4882a593Smuzhiyun 	int cpu = smp_processor_id();
231*4882a593Smuzhiyun 	cpumask_t new_affinity;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	/* This CPU was not on the affinity mask */
234*4882a593Smuzhiyun 	if (!cpumask_test_cpu(cpu, mask))
235*4882a593Smuzhiyun 		return;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	if (cpumask_weight(mask) > 1) {
238*4882a593Smuzhiyun 		/*
239*4882a593Smuzhiyun 		 * Multiple CPU affinity, remove this CPU from the affinity
240*4882a593Smuzhiyun 		 * mask
241*4882a593Smuzhiyun 		 */
242*4882a593Smuzhiyun 		cpumask_copy(&new_affinity, mask);
243*4882a593Smuzhiyun 		cpumask_clear_cpu(cpu, &new_affinity);
244*4882a593Smuzhiyun 	} else {
245*4882a593Smuzhiyun 		/* Only CPU, put on the lowest online CPU */
246*4882a593Smuzhiyun 		cpumask_clear(&new_affinity);
247*4882a593Smuzhiyun 		cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
248*4882a593Smuzhiyun 	}
249*4882a593Smuzhiyun 	irq_set_affinity_locked(d, &new_affinity, false);
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun #endif
252*4882a593Smuzhiyun 
bcm7038_l1_init_one(struct device_node * dn,unsigned int idx,struct bcm7038_l1_chip * intc)253*4882a593Smuzhiyun static int __init bcm7038_l1_init_one(struct device_node *dn,
254*4882a593Smuzhiyun 				      unsigned int idx,
255*4882a593Smuzhiyun 				      struct bcm7038_l1_chip *intc)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun 	struct resource res;
258*4882a593Smuzhiyun 	resource_size_t sz;
259*4882a593Smuzhiyun 	struct bcm7038_l1_cpu *cpu;
260*4882a593Smuzhiyun 	unsigned int i, n_words, parent_irq;
261*4882a593Smuzhiyun 	int ret;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	if (of_address_to_resource(dn, idx, &res))
264*4882a593Smuzhiyun 		return -EINVAL;
265*4882a593Smuzhiyun 	sz = resource_size(&res);
266*4882a593Smuzhiyun 	n_words = sz / REG_BYTES_PER_IRQ_WORD;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	if (n_words > MAX_WORDS)
269*4882a593Smuzhiyun 		return -EINVAL;
270*4882a593Smuzhiyun 	else if (!intc->n_words)
271*4882a593Smuzhiyun 		intc->n_words = n_words;
272*4882a593Smuzhiyun 	else if (intc->n_words != n_words)
273*4882a593Smuzhiyun 		return -EINVAL;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	ret = of_property_read_u32_array(dn , "brcm,int-fwd-mask",
276*4882a593Smuzhiyun 					 intc->irq_fwd_mask, n_words);
277*4882a593Smuzhiyun 	if (ret != 0 && ret != -EINVAL) {
278*4882a593Smuzhiyun 		/* property exists but has the wrong number of words */
279*4882a593Smuzhiyun 		pr_err("invalid brcm,int-fwd-mask property\n");
280*4882a593Smuzhiyun 		return -EINVAL;
281*4882a593Smuzhiyun 	}
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32),
284*4882a593Smuzhiyun 					GFP_KERNEL);
285*4882a593Smuzhiyun 	if (!cpu)
286*4882a593Smuzhiyun 		return -ENOMEM;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	cpu->map_base = ioremap(res.start, sz);
289*4882a593Smuzhiyun 	if (!cpu->map_base)
290*4882a593Smuzhiyun 		return -ENOMEM;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	for (i = 0; i < n_words; i++) {
293*4882a593Smuzhiyun 		l1_writel(~intc->irq_fwd_mask[i],
294*4882a593Smuzhiyun 			  cpu->map_base + reg_mask_set(intc, i));
295*4882a593Smuzhiyun 		l1_writel(intc->irq_fwd_mask[i],
296*4882a593Smuzhiyun 			  cpu->map_base + reg_mask_clr(intc, i));
297*4882a593Smuzhiyun 		cpu->mask_cache[i] = ~intc->irq_fwd_mask[i];
298*4882a593Smuzhiyun 	}
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	parent_irq = irq_of_parse_and_map(dn, idx);
301*4882a593Smuzhiyun 	if (!parent_irq) {
302*4882a593Smuzhiyun 		pr_err("failed to map parent interrupt %d\n", parent_irq);
303*4882a593Smuzhiyun 		return -EINVAL;
304*4882a593Smuzhiyun 	}
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	if (of_property_read_bool(dn, "brcm,irq-can-wake"))
307*4882a593Smuzhiyun 		enable_irq_wake(parent_irq);
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	irq_set_chained_handler_and_data(parent_irq, bcm7038_l1_irq_handle,
310*4882a593Smuzhiyun 					 intc);
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	return 0;
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
316*4882a593Smuzhiyun /*
317*4882a593Smuzhiyun  * We keep a list of bcm7038_l1_chip used for suspend/resume. This hack is
318*4882a593Smuzhiyun  * used because the struct chip_type suspend/resume hooks are not called
319*4882a593Smuzhiyun  * unless chip_type is hooked onto a generic_chip. Since this driver does
320*4882a593Smuzhiyun  * not use generic_chip, we need to manually hook our resume/suspend to
321*4882a593Smuzhiyun  * syscore_ops.
322*4882a593Smuzhiyun  */
323*4882a593Smuzhiyun static LIST_HEAD(bcm7038_l1_intcs_list);
324*4882a593Smuzhiyun static DEFINE_RAW_SPINLOCK(bcm7038_l1_intcs_lock);
325*4882a593Smuzhiyun 
bcm7038_l1_suspend(void)326*4882a593Smuzhiyun static int bcm7038_l1_suspend(void)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun 	struct bcm7038_l1_chip *intc;
329*4882a593Smuzhiyun 	int boot_cpu, word;
330*4882a593Smuzhiyun 	u32 val;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	/* Wakeup interrupt should only come from the boot cpu */
333*4882a593Smuzhiyun #ifdef CONFIG_SMP
334*4882a593Smuzhiyun 	boot_cpu = cpu_logical_map(0);
335*4882a593Smuzhiyun #else
336*4882a593Smuzhiyun 	boot_cpu = 0;
337*4882a593Smuzhiyun #endif
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	list_for_each_entry(intc, &bcm7038_l1_intcs_list, list) {
340*4882a593Smuzhiyun 		for (word = 0; word < intc->n_words; word++) {
341*4882a593Smuzhiyun 			val = intc->wake_mask[word] | intc->irq_fwd_mask[word];
342*4882a593Smuzhiyun 			l1_writel(~val,
343*4882a593Smuzhiyun 				intc->cpus[boot_cpu]->map_base + reg_mask_set(intc, word));
344*4882a593Smuzhiyun 			l1_writel(val,
345*4882a593Smuzhiyun 				intc->cpus[boot_cpu]->map_base + reg_mask_clr(intc, word));
346*4882a593Smuzhiyun 		}
347*4882a593Smuzhiyun 	}
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	return 0;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun 
bcm7038_l1_resume(void)352*4882a593Smuzhiyun static void bcm7038_l1_resume(void)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun 	struct bcm7038_l1_chip *intc;
355*4882a593Smuzhiyun 	int boot_cpu, word;
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun #ifdef CONFIG_SMP
358*4882a593Smuzhiyun 	boot_cpu = cpu_logical_map(0);
359*4882a593Smuzhiyun #else
360*4882a593Smuzhiyun 	boot_cpu = 0;
361*4882a593Smuzhiyun #endif
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	list_for_each_entry(intc, &bcm7038_l1_intcs_list, list) {
364*4882a593Smuzhiyun 		for (word = 0; word < intc->n_words; word++) {
365*4882a593Smuzhiyun 			l1_writel(intc->cpus[boot_cpu]->mask_cache[word],
366*4882a593Smuzhiyun 				intc->cpus[boot_cpu]->map_base + reg_mask_set(intc, word));
367*4882a593Smuzhiyun 			l1_writel(~intc->cpus[boot_cpu]->mask_cache[word],
368*4882a593Smuzhiyun 				intc->cpus[boot_cpu]->map_base + reg_mask_clr(intc, word));
369*4882a593Smuzhiyun 		}
370*4882a593Smuzhiyun 	}
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun static struct syscore_ops bcm7038_l1_syscore_ops = {
374*4882a593Smuzhiyun 	.suspend	= bcm7038_l1_suspend,
375*4882a593Smuzhiyun 	.resume		= bcm7038_l1_resume,
376*4882a593Smuzhiyun };
377*4882a593Smuzhiyun 
bcm7038_l1_set_wake(struct irq_data * d,unsigned int on)378*4882a593Smuzhiyun static int bcm7038_l1_set_wake(struct irq_data *d, unsigned int on)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun 	struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
381*4882a593Smuzhiyun 	unsigned long flags;
382*4882a593Smuzhiyun 	u32 word = d->hwirq / IRQS_PER_WORD;
383*4882a593Smuzhiyun 	u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	raw_spin_lock_irqsave(&intc->lock, flags);
386*4882a593Smuzhiyun 	if (on)
387*4882a593Smuzhiyun 		intc->wake_mask[word] |= mask;
388*4882a593Smuzhiyun 	else
389*4882a593Smuzhiyun 		intc->wake_mask[word] &= ~mask;
390*4882a593Smuzhiyun 	raw_spin_unlock_irqrestore(&intc->lock, flags);
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	return 0;
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun #endif
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun static struct irq_chip bcm7038_l1_irq_chip = {
397*4882a593Smuzhiyun 	.name			= "bcm7038-l1",
398*4882a593Smuzhiyun 	.irq_mask		= bcm7038_l1_mask,
399*4882a593Smuzhiyun 	.irq_unmask		= bcm7038_l1_unmask,
400*4882a593Smuzhiyun 	.irq_set_affinity	= bcm7038_l1_set_affinity,
401*4882a593Smuzhiyun #ifdef CONFIG_SMP
402*4882a593Smuzhiyun 	.irq_cpu_offline	= bcm7038_l1_cpu_offline,
403*4882a593Smuzhiyun #endif
404*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
405*4882a593Smuzhiyun 	.irq_set_wake		= bcm7038_l1_set_wake,
406*4882a593Smuzhiyun #endif
407*4882a593Smuzhiyun };
408*4882a593Smuzhiyun 
bcm7038_l1_map(struct irq_domain * d,unsigned int virq,irq_hw_number_t hw_irq)409*4882a593Smuzhiyun static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq,
410*4882a593Smuzhiyun 			  irq_hw_number_t hw_irq)
411*4882a593Smuzhiyun {
412*4882a593Smuzhiyun 	struct bcm7038_l1_chip *intc = d->host_data;
413*4882a593Smuzhiyun 	u32 mask = BIT(hw_irq % IRQS_PER_WORD);
414*4882a593Smuzhiyun 	u32 word = hw_irq / IRQS_PER_WORD;
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	if (intc->irq_fwd_mask[word] & mask)
417*4882a593Smuzhiyun 		return -EPERM;
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	irq_set_chip_and_handler(virq, &bcm7038_l1_irq_chip, handle_level_irq);
420*4882a593Smuzhiyun 	irq_set_chip_data(virq, d->host_data);
421*4882a593Smuzhiyun 	irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
422*4882a593Smuzhiyun 	return 0;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun static const struct irq_domain_ops bcm7038_l1_domain_ops = {
426*4882a593Smuzhiyun 	.xlate			= irq_domain_xlate_onecell,
427*4882a593Smuzhiyun 	.map			= bcm7038_l1_map,
428*4882a593Smuzhiyun };
429*4882a593Smuzhiyun 
bcm7038_l1_of_init(struct device_node * dn,struct device_node * parent)430*4882a593Smuzhiyun static int __init bcm7038_l1_of_init(struct device_node *dn,
431*4882a593Smuzhiyun 			      struct device_node *parent)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun 	struct bcm7038_l1_chip *intc;
434*4882a593Smuzhiyun 	int idx, ret;
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	intc = kzalloc(sizeof(*intc), GFP_KERNEL);
437*4882a593Smuzhiyun 	if (!intc)
438*4882a593Smuzhiyun 		return -ENOMEM;
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	raw_spin_lock_init(&intc->lock);
441*4882a593Smuzhiyun 	for_each_possible_cpu(idx) {
442*4882a593Smuzhiyun 		ret = bcm7038_l1_init_one(dn, idx, intc);
443*4882a593Smuzhiyun 		if (ret < 0) {
444*4882a593Smuzhiyun 			if (idx)
445*4882a593Smuzhiyun 				break;
446*4882a593Smuzhiyun 			pr_err("failed to remap intc L1 registers\n");
447*4882a593Smuzhiyun 			goto out_free;
448*4882a593Smuzhiyun 		}
449*4882a593Smuzhiyun 	}
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	intc->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * intc->n_words,
452*4882a593Smuzhiyun 					     &bcm7038_l1_domain_ops,
453*4882a593Smuzhiyun 					     intc);
454*4882a593Smuzhiyun 	if (!intc->domain) {
455*4882a593Smuzhiyun 		ret = -ENOMEM;
456*4882a593Smuzhiyun 		goto out_unmap;
457*4882a593Smuzhiyun 	}
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
460*4882a593Smuzhiyun 	/* Add bcm7038_l1_chip into a list */
461*4882a593Smuzhiyun 	raw_spin_lock(&bcm7038_l1_intcs_lock);
462*4882a593Smuzhiyun 	list_add_tail(&intc->list, &bcm7038_l1_intcs_list);
463*4882a593Smuzhiyun 	raw_spin_unlock(&bcm7038_l1_intcs_lock);
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	if (list_is_singular(&bcm7038_l1_intcs_list))
466*4882a593Smuzhiyun 		register_syscore_ops(&bcm7038_l1_syscore_ops);
467*4882a593Smuzhiyun #endif
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	pr_info("registered BCM7038 L1 intc (%pOF, IRQs: %d)\n",
470*4882a593Smuzhiyun 		dn, IRQS_PER_WORD * intc->n_words);
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	return 0;
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun out_unmap:
475*4882a593Smuzhiyun 	for_each_possible_cpu(idx) {
476*4882a593Smuzhiyun 		struct bcm7038_l1_cpu *cpu = intc->cpus[idx];
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 		if (cpu) {
479*4882a593Smuzhiyun 			if (cpu->map_base)
480*4882a593Smuzhiyun 				iounmap(cpu->map_base);
481*4882a593Smuzhiyun 			kfree(cpu);
482*4882a593Smuzhiyun 		}
483*4882a593Smuzhiyun 	}
484*4882a593Smuzhiyun out_free:
485*4882a593Smuzhiyun 	kfree(intc);
486*4882a593Smuzhiyun 	return ret;
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun IRQCHIP_DECLARE(bcm7038_l1, "brcm,bcm7038-l1-intc", bcm7038_l1_of_init);
490