1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Intel IXP4xx Queue Manager driver for Linux
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/ioport.h>
9*4882a593Smuzhiyun #include <linux/interrupt.h>
10*4882a593Smuzhiyun #include <linux/kernel.h>
11*4882a593Smuzhiyun #include <linux/module.h>
12*4882a593Smuzhiyun #include <linux/of.h>
13*4882a593Smuzhiyun #include <linux/platform_device.h>
14*4882a593Smuzhiyun #include <linux/soc/ixp4xx/qmgr.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun static struct qmgr_regs __iomem *qmgr_regs;
17*4882a593Smuzhiyun static int qmgr_irq_1;
18*4882a593Smuzhiyun static int qmgr_irq_2;
19*4882a593Smuzhiyun static spinlock_t qmgr_lock;
20*4882a593Smuzhiyun static u32 used_sram_bitmap[4]; /* 128 16-dword pages */
21*4882a593Smuzhiyun static void (*irq_handlers[QUEUES])(void *pdev);
22*4882a593Smuzhiyun static void *irq_pdevs[QUEUES];
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #if DEBUG_QMGR
25*4882a593Smuzhiyun char qmgr_queue_descs[QUEUES][32];
26*4882a593Smuzhiyun #endif
27*4882a593Smuzhiyun
qmgr_put_entry(unsigned int queue,u32 val)28*4882a593Smuzhiyun void qmgr_put_entry(unsigned int queue, u32 val)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun #if DEBUG_QMGR
31*4882a593Smuzhiyun BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun printk(KERN_DEBUG "Queue %s(%i) put %X\n",
34*4882a593Smuzhiyun qmgr_queue_descs[queue], queue, val);
35*4882a593Smuzhiyun #endif
36*4882a593Smuzhiyun __raw_writel(val, &qmgr_regs->acc[queue][0]);
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun
qmgr_get_entry(unsigned int queue)39*4882a593Smuzhiyun u32 qmgr_get_entry(unsigned int queue)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun u32 val;
42*4882a593Smuzhiyun val = __raw_readl(&qmgr_regs->acc[queue][0]);
43*4882a593Smuzhiyun #if DEBUG_QMGR
44*4882a593Smuzhiyun BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun printk(KERN_DEBUG "Queue %s(%i) get %X\n",
47*4882a593Smuzhiyun qmgr_queue_descs[queue], queue, val);
48*4882a593Smuzhiyun #endif
49*4882a593Smuzhiyun return val;
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun
__qmgr_get_stat1(unsigned int queue)52*4882a593Smuzhiyun static int __qmgr_get_stat1(unsigned int queue)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun return (__raw_readl(&qmgr_regs->stat1[queue >> 3])
55*4882a593Smuzhiyun >> ((queue & 7) << 2)) & 0xF;
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun
__qmgr_get_stat2(unsigned int queue)58*4882a593Smuzhiyun static int __qmgr_get_stat2(unsigned int queue)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun BUG_ON(queue >= HALF_QUEUES);
61*4882a593Smuzhiyun return (__raw_readl(&qmgr_regs->stat2[queue >> 4])
62*4882a593Smuzhiyun >> ((queue & 0xF) << 1)) & 0x3;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /**
66*4882a593Smuzhiyun * qmgr_stat_empty() - checks if a hardware queue is empty
67*4882a593Smuzhiyun * @queue: queue number
68*4882a593Smuzhiyun *
69*4882a593Smuzhiyun * Returns non-zero value if the queue is empty.
70*4882a593Smuzhiyun */
qmgr_stat_empty(unsigned int queue)71*4882a593Smuzhiyun int qmgr_stat_empty(unsigned int queue)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun BUG_ON(queue >= HALF_QUEUES);
74*4882a593Smuzhiyun return __qmgr_get_stat1(queue) & QUEUE_STAT1_EMPTY;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun /**
78*4882a593Smuzhiyun * qmgr_stat_below_low_watermark() - checks if a queue is below low watermark
79*4882a593Smuzhiyun * @queue: queue number
80*4882a593Smuzhiyun *
81*4882a593Smuzhiyun * Returns non-zero value if the queue is below low watermark.
82*4882a593Smuzhiyun */
qmgr_stat_below_low_watermark(unsigned int queue)83*4882a593Smuzhiyun int qmgr_stat_below_low_watermark(unsigned int queue)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun if (queue >= HALF_QUEUES)
86*4882a593Smuzhiyun return (__raw_readl(&qmgr_regs->statne_h) >>
87*4882a593Smuzhiyun (queue - HALF_QUEUES)) & 0x01;
88*4882a593Smuzhiyun return __qmgr_get_stat1(queue) & QUEUE_STAT1_NEARLY_EMPTY;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun /**
92*4882a593Smuzhiyun * qmgr_stat_full() - checks if a hardware queue is full
93*4882a593Smuzhiyun * @queue: queue number
94*4882a593Smuzhiyun *
95*4882a593Smuzhiyun * Returns non-zero value if the queue is full.
96*4882a593Smuzhiyun */
qmgr_stat_full(unsigned int queue)97*4882a593Smuzhiyun int qmgr_stat_full(unsigned int queue)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun if (queue >= HALF_QUEUES)
100*4882a593Smuzhiyun return (__raw_readl(&qmgr_regs->statf_h) >>
101*4882a593Smuzhiyun (queue - HALF_QUEUES)) & 0x01;
102*4882a593Smuzhiyun return __qmgr_get_stat1(queue) & QUEUE_STAT1_FULL;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun /**
106*4882a593Smuzhiyun * qmgr_stat_overflow() - checks if a hardware queue experienced overflow
107*4882a593Smuzhiyun * @queue: queue number
108*4882a593Smuzhiyun *
109*4882a593Smuzhiyun * Returns non-zero value if the queue experienced overflow.
110*4882a593Smuzhiyun */
qmgr_stat_overflow(unsigned int queue)111*4882a593Smuzhiyun int qmgr_stat_overflow(unsigned int queue)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun return __qmgr_get_stat2(queue) & QUEUE_STAT2_OVERFLOW;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun
qmgr_set_irq(unsigned int queue,int src,void (* handler)(void * pdev),void * pdev)116*4882a593Smuzhiyun void qmgr_set_irq(unsigned int queue, int src,
117*4882a593Smuzhiyun void (*handler)(void *pdev), void *pdev)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun unsigned long flags;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun spin_lock_irqsave(&qmgr_lock, flags);
122*4882a593Smuzhiyun if (queue < HALF_QUEUES) {
123*4882a593Smuzhiyun u32 __iomem *reg;
124*4882a593Smuzhiyun int bit;
125*4882a593Smuzhiyun BUG_ON(src > QUEUE_IRQ_SRC_NOT_FULL);
126*4882a593Smuzhiyun reg = &qmgr_regs->irqsrc[queue >> 3]; /* 8 queues per u32 */
127*4882a593Smuzhiyun bit = (queue % 8) * 4; /* 3 bits + 1 reserved bit per queue */
128*4882a593Smuzhiyun __raw_writel((__raw_readl(reg) & ~(7 << bit)) | (src << bit),
129*4882a593Smuzhiyun reg);
130*4882a593Smuzhiyun } else
131*4882a593Smuzhiyun /* IRQ source for queues 32-63 is fixed */
132*4882a593Smuzhiyun BUG_ON(src != QUEUE_IRQ_SRC_NOT_NEARLY_EMPTY);
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun irq_handlers[queue] = handler;
135*4882a593Smuzhiyun irq_pdevs[queue] = pdev;
136*4882a593Smuzhiyun spin_unlock_irqrestore(&qmgr_lock, flags);
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun
qmgr_irq1_a0(int irq,void * pdev)140*4882a593Smuzhiyun static irqreturn_t qmgr_irq1_a0(int irq, void *pdev)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun int i, ret = 0;
143*4882a593Smuzhiyun u32 en_bitmap, src, stat;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun /* ACK - it may clear any bits so don't rely on it */
146*4882a593Smuzhiyun __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[0]);
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun en_bitmap = __raw_readl(&qmgr_regs->irqen[0]);
149*4882a593Smuzhiyun while (en_bitmap) {
150*4882a593Smuzhiyun i = __fls(en_bitmap); /* number of the last "low" queue */
151*4882a593Smuzhiyun en_bitmap &= ~BIT(i);
152*4882a593Smuzhiyun src = __raw_readl(&qmgr_regs->irqsrc[i >> 3]);
153*4882a593Smuzhiyun stat = __raw_readl(&qmgr_regs->stat1[i >> 3]);
154*4882a593Smuzhiyun if (src & 4) /* the IRQ condition is inverted */
155*4882a593Smuzhiyun stat = ~stat;
156*4882a593Smuzhiyun if (stat & BIT(src & 3)) {
157*4882a593Smuzhiyun irq_handlers[i](irq_pdevs[i]);
158*4882a593Smuzhiyun ret = IRQ_HANDLED;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun return ret;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun
qmgr_irq2_a0(int irq,void * pdev)165*4882a593Smuzhiyun static irqreturn_t qmgr_irq2_a0(int irq, void *pdev)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun int i, ret = 0;
168*4882a593Smuzhiyun u32 req_bitmap;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun /* ACK - it may clear any bits so don't rely on it */
171*4882a593Smuzhiyun __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[1]);
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun req_bitmap = __raw_readl(&qmgr_regs->irqen[1]) &
174*4882a593Smuzhiyun __raw_readl(&qmgr_regs->statne_h);
175*4882a593Smuzhiyun while (req_bitmap) {
176*4882a593Smuzhiyun i = __fls(req_bitmap); /* number of the last "high" queue */
177*4882a593Smuzhiyun req_bitmap &= ~BIT(i);
178*4882a593Smuzhiyun irq_handlers[HALF_QUEUES + i](irq_pdevs[HALF_QUEUES + i]);
179*4882a593Smuzhiyun ret = IRQ_HANDLED;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun return ret;
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun
qmgr_irq(int irq,void * pdev)185*4882a593Smuzhiyun static irqreturn_t qmgr_irq(int irq, void *pdev)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun int i, half = (irq == qmgr_irq_1 ? 0 : 1);
188*4882a593Smuzhiyun u32 req_bitmap = __raw_readl(&qmgr_regs->irqstat[half]);
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun if (!req_bitmap)
191*4882a593Smuzhiyun return 0;
192*4882a593Smuzhiyun __raw_writel(req_bitmap, &qmgr_regs->irqstat[half]); /* ACK */
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun while (req_bitmap) {
195*4882a593Smuzhiyun i = __fls(req_bitmap); /* number of the last queue */
196*4882a593Smuzhiyun req_bitmap &= ~BIT(i);
197*4882a593Smuzhiyun i += half * HALF_QUEUES;
198*4882a593Smuzhiyun irq_handlers[i](irq_pdevs[i]);
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun return IRQ_HANDLED;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun
qmgr_enable_irq(unsigned int queue)204*4882a593Smuzhiyun void qmgr_enable_irq(unsigned int queue)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun unsigned long flags;
207*4882a593Smuzhiyun int half = queue / 32;
208*4882a593Smuzhiyun u32 mask = 1 << (queue & (HALF_QUEUES - 1));
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun spin_lock_irqsave(&qmgr_lock, flags);
211*4882a593Smuzhiyun __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) | mask,
212*4882a593Smuzhiyun &qmgr_regs->irqen[half]);
213*4882a593Smuzhiyun spin_unlock_irqrestore(&qmgr_lock, flags);
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
qmgr_disable_irq(unsigned int queue)216*4882a593Smuzhiyun void qmgr_disable_irq(unsigned int queue)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun unsigned long flags;
219*4882a593Smuzhiyun int half = queue / 32;
220*4882a593Smuzhiyun u32 mask = 1 << (queue & (HALF_QUEUES - 1));
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun spin_lock_irqsave(&qmgr_lock, flags);
223*4882a593Smuzhiyun __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) & ~mask,
224*4882a593Smuzhiyun &qmgr_regs->irqen[half]);
225*4882a593Smuzhiyun __raw_writel(mask, &qmgr_regs->irqstat[half]); /* clear */
226*4882a593Smuzhiyun spin_unlock_irqrestore(&qmgr_lock, flags);
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
shift_mask(u32 * mask)229*4882a593Smuzhiyun static inline void shift_mask(u32 *mask)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun mask[3] = mask[3] << 1 | mask[2] >> 31;
232*4882a593Smuzhiyun mask[2] = mask[2] << 1 | mask[1] >> 31;
233*4882a593Smuzhiyun mask[1] = mask[1] << 1 | mask[0] >> 31;
234*4882a593Smuzhiyun mask[0] <<= 1;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun #if DEBUG_QMGR
qmgr_request_queue(unsigned int queue,unsigned int len,unsigned int nearly_empty_watermark,unsigned int nearly_full_watermark,const char * desc_format,const char * name)238*4882a593Smuzhiyun int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
239*4882a593Smuzhiyun unsigned int nearly_empty_watermark,
240*4882a593Smuzhiyun unsigned int nearly_full_watermark,
241*4882a593Smuzhiyun const char *desc_format, const char* name)
242*4882a593Smuzhiyun #else
243*4882a593Smuzhiyun int __qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
244*4882a593Smuzhiyun unsigned int nearly_empty_watermark,
245*4882a593Smuzhiyun unsigned int nearly_full_watermark)
246*4882a593Smuzhiyun #endif
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun u32 cfg, addr = 0, mask[4]; /* in 16-dwords */
249*4882a593Smuzhiyun int err;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun BUG_ON(queue >= QUEUES);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun if ((nearly_empty_watermark | nearly_full_watermark) & ~7)
254*4882a593Smuzhiyun return -EINVAL;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun switch (len) {
257*4882a593Smuzhiyun case 16:
258*4882a593Smuzhiyun cfg = 0 << 24;
259*4882a593Smuzhiyun mask[0] = 0x1;
260*4882a593Smuzhiyun break;
261*4882a593Smuzhiyun case 32:
262*4882a593Smuzhiyun cfg = 1 << 24;
263*4882a593Smuzhiyun mask[0] = 0x3;
264*4882a593Smuzhiyun break;
265*4882a593Smuzhiyun case 64:
266*4882a593Smuzhiyun cfg = 2 << 24;
267*4882a593Smuzhiyun mask[0] = 0xF;
268*4882a593Smuzhiyun break;
269*4882a593Smuzhiyun case 128:
270*4882a593Smuzhiyun cfg = 3 << 24;
271*4882a593Smuzhiyun mask[0] = 0xFF;
272*4882a593Smuzhiyun break;
273*4882a593Smuzhiyun default:
274*4882a593Smuzhiyun return -EINVAL;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun cfg |= nearly_empty_watermark << 26;
278*4882a593Smuzhiyun cfg |= nearly_full_watermark << 29;
279*4882a593Smuzhiyun len /= 16; /* in 16-dwords: 1, 2, 4 or 8 */
280*4882a593Smuzhiyun mask[1] = mask[2] = mask[3] = 0;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun if (!try_module_get(THIS_MODULE))
283*4882a593Smuzhiyun return -ENODEV;
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun spin_lock_irq(&qmgr_lock);
286*4882a593Smuzhiyun if (__raw_readl(&qmgr_regs->sram[queue])) {
287*4882a593Smuzhiyun err = -EBUSY;
288*4882a593Smuzhiyun goto err;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun while (1) {
292*4882a593Smuzhiyun if (!(used_sram_bitmap[0] & mask[0]) &&
293*4882a593Smuzhiyun !(used_sram_bitmap[1] & mask[1]) &&
294*4882a593Smuzhiyun !(used_sram_bitmap[2] & mask[2]) &&
295*4882a593Smuzhiyun !(used_sram_bitmap[3] & mask[3]))
296*4882a593Smuzhiyun break; /* found free space */
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun addr++;
299*4882a593Smuzhiyun shift_mask(mask);
300*4882a593Smuzhiyun if (addr + len > ARRAY_SIZE(qmgr_regs->sram)) {
301*4882a593Smuzhiyun printk(KERN_ERR "qmgr: no free SRAM space for"
302*4882a593Smuzhiyun " queue %i\n", queue);
303*4882a593Smuzhiyun err = -ENOMEM;
304*4882a593Smuzhiyun goto err;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun used_sram_bitmap[0] |= mask[0];
309*4882a593Smuzhiyun used_sram_bitmap[1] |= mask[1];
310*4882a593Smuzhiyun used_sram_bitmap[2] |= mask[2];
311*4882a593Smuzhiyun used_sram_bitmap[3] |= mask[3];
312*4882a593Smuzhiyun __raw_writel(cfg | (addr << 14), &qmgr_regs->sram[queue]);
313*4882a593Smuzhiyun #if DEBUG_QMGR
314*4882a593Smuzhiyun snprintf(qmgr_queue_descs[queue], sizeof(qmgr_queue_descs[0]),
315*4882a593Smuzhiyun desc_format, name);
316*4882a593Smuzhiyun printk(KERN_DEBUG "qmgr: requested queue %s(%i) addr = 0x%02X\n",
317*4882a593Smuzhiyun qmgr_queue_descs[queue], queue, addr);
318*4882a593Smuzhiyun #endif
319*4882a593Smuzhiyun spin_unlock_irq(&qmgr_lock);
320*4882a593Smuzhiyun return 0;
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun err:
323*4882a593Smuzhiyun spin_unlock_irq(&qmgr_lock);
324*4882a593Smuzhiyun module_put(THIS_MODULE);
325*4882a593Smuzhiyun return err;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun
qmgr_release_queue(unsigned int queue)328*4882a593Smuzhiyun void qmgr_release_queue(unsigned int queue)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun u32 cfg, addr, mask[4];
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun BUG_ON(queue >= QUEUES); /* not in valid range */
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun spin_lock_irq(&qmgr_lock);
335*4882a593Smuzhiyun cfg = __raw_readl(&qmgr_regs->sram[queue]);
336*4882a593Smuzhiyun addr = (cfg >> 14) & 0xFF;
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun BUG_ON(!addr); /* not requested */
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun switch ((cfg >> 24) & 3) {
341*4882a593Smuzhiyun case 0: mask[0] = 0x1; break;
342*4882a593Smuzhiyun case 1: mask[0] = 0x3; break;
343*4882a593Smuzhiyun case 2: mask[0] = 0xF; break;
344*4882a593Smuzhiyun case 3: mask[0] = 0xFF; break;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun mask[1] = mask[2] = mask[3] = 0;
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun while (addr--)
350*4882a593Smuzhiyun shift_mask(mask);
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun #if DEBUG_QMGR
353*4882a593Smuzhiyun printk(KERN_DEBUG "qmgr: releasing queue %s(%i)\n",
354*4882a593Smuzhiyun qmgr_queue_descs[queue], queue);
355*4882a593Smuzhiyun qmgr_queue_descs[queue][0] = '\x0';
356*4882a593Smuzhiyun #endif
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun while ((addr = qmgr_get_entry(queue)))
359*4882a593Smuzhiyun printk(KERN_ERR "qmgr: released queue %i not empty: 0x%08X\n",
360*4882a593Smuzhiyun queue, addr);
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun __raw_writel(0, &qmgr_regs->sram[queue]);
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun used_sram_bitmap[0] &= ~mask[0];
365*4882a593Smuzhiyun used_sram_bitmap[1] &= ~mask[1];
366*4882a593Smuzhiyun used_sram_bitmap[2] &= ~mask[2];
367*4882a593Smuzhiyun used_sram_bitmap[3] &= ~mask[3];
368*4882a593Smuzhiyun irq_handlers[queue] = NULL; /* catch IRQ bugs */
369*4882a593Smuzhiyun spin_unlock_irq(&qmgr_lock);
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun module_put(THIS_MODULE);
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun
ixp4xx_qmgr_probe(struct platform_device * pdev)374*4882a593Smuzhiyun static int ixp4xx_qmgr_probe(struct platform_device *pdev)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun int i, err;
377*4882a593Smuzhiyun irq_handler_t handler1, handler2;
378*4882a593Smuzhiyun struct device *dev = &pdev->dev;
379*4882a593Smuzhiyun struct resource *res;
380*4882a593Smuzhiyun int irq1, irq2;
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
383*4882a593Smuzhiyun if (!res)
384*4882a593Smuzhiyun return -ENODEV;
385*4882a593Smuzhiyun qmgr_regs = devm_ioremap_resource(dev, res);
386*4882a593Smuzhiyun if (IS_ERR(qmgr_regs))
387*4882a593Smuzhiyun return PTR_ERR(qmgr_regs);
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun irq1 = platform_get_irq(pdev, 0);
390*4882a593Smuzhiyun if (irq1 <= 0)
391*4882a593Smuzhiyun return irq1 ? irq1 : -EINVAL;
392*4882a593Smuzhiyun qmgr_irq_1 = irq1;
393*4882a593Smuzhiyun irq2 = platform_get_irq(pdev, 1);
394*4882a593Smuzhiyun if (irq2 <= 0)
395*4882a593Smuzhiyun return irq2 ? irq2 : -EINVAL;
396*4882a593Smuzhiyun qmgr_irq_2 = irq2;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun /* reset qmgr registers */
399*4882a593Smuzhiyun for (i = 0; i < 4; i++) {
400*4882a593Smuzhiyun __raw_writel(0x33333333, &qmgr_regs->stat1[i]);
401*4882a593Smuzhiyun __raw_writel(0, &qmgr_regs->irqsrc[i]);
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun for (i = 0; i < 2; i++) {
404*4882a593Smuzhiyun __raw_writel(0, &qmgr_regs->stat2[i]);
405*4882a593Smuzhiyun __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[i]); /* clear */
406*4882a593Smuzhiyun __raw_writel(0, &qmgr_regs->irqen[i]);
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun __raw_writel(0xFFFFFFFF, &qmgr_regs->statne_h);
410*4882a593Smuzhiyun __raw_writel(0, &qmgr_regs->statf_h);
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun for (i = 0; i < QUEUES; i++)
413*4882a593Smuzhiyun __raw_writel(0, &qmgr_regs->sram[i]);
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun if (cpu_is_ixp42x_rev_a0()) {
416*4882a593Smuzhiyun handler1 = qmgr_irq1_a0;
417*4882a593Smuzhiyun handler2 = qmgr_irq2_a0;
418*4882a593Smuzhiyun } else
419*4882a593Smuzhiyun handler1 = handler2 = qmgr_irq;
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun err = devm_request_irq(dev, irq1, handler1, 0, "IXP4xx Queue Manager",
422*4882a593Smuzhiyun NULL);
423*4882a593Smuzhiyun if (err) {
424*4882a593Smuzhiyun dev_err(dev, "failed to request IRQ%i (%i)\n",
425*4882a593Smuzhiyun irq1, err);
426*4882a593Smuzhiyun return err;
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun err = devm_request_irq(dev, irq2, handler2, 0, "IXP4xx Queue Manager",
430*4882a593Smuzhiyun NULL);
431*4882a593Smuzhiyun if (err) {
432*4882a593Smuzhiyun dev_err(dev, "failed to request IRQ%i (%i)\n",
433*4882a593Smuzhiyun irq2, err);
434*4882a593Smuzhiyun return err;
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun used_sram_bitmap[0] = 0xF; /* 4 first pages reserved for config */
438*4882a593Smuzhiyun spin_lock_init(&qmgr_lock);
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun dev_info(dev, "IXP4xx Queue Manager initialized.\n");
441*4882a593Smuzhiyun return 0;
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun
ixp4xx_qmgr_remove(struct platform_device * pdev)444*4882a593Smuzhiyun static int ixp4xx_qmgr_remove(struct platform_device *pdev)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun synchronize_irq(qmgr_irq_1);
447*4882a593Smuzhiyun synchronize_irq(qmgr_irq_2);
448*4882a593Smuzhiyun return 0;
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun static const struct of_device_id ixp4xx_qmgr_of_match[] = {
452*4882a593Smuzhiyun {
453*4882a593Smuzhiyun .compatible = "intel,ixp4xx-ahb-queue-manager",
454*4882a593Smuzhiyun },
455*4882a593Smuzhiyun {},
456*4882a593Smuzhiyun };
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun static struct platform_driver ixp4xx_qmgr_driver = {
459*4882a593Smuzhiyun .driver = {
460*4882a593Smuzhiyun .name = "ixp4xx-qmgr",
461*4882a593Smuzhiyun .of_match_table = of_match_ptr(ixp4xx_qmgr_of_match),
462*4882a593Smuzhiyun },
463*4882a593Smuzhiyun .probe = ixp4xx_qmgr_probe,
464*4882a593Smuzhiyun .remove = ixp4xx_qmgr_remove,
465*4882a593Smuzhiyun };
466*4882a593Smuzhiyun module_platform_driver(ixp4xx_qmgr_driver);
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
469*4882a593Smuzhiyun MODULE_AUTHOR("Krzysztof Halasa");
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun EXPORT_SYMBOL(qmgr_put_entry);
472*4882a593Smuzhiyun EXPORT_SYMBOL(qmgr_get_entry);
473*4882a593Smuzhiyun EXPORT_SYMBOL(qmgr_stat_empty);
474*4882a593Smuzhiyun EXPORT_SYMBOL(qmgr_stat_below_low_watermark);
475*4882a593Smuzhiyun EXPORT_SYMBOL(qmgr_stat_full);
476*4882a593Smuzhiyun EXPORT_SYMBOL(qmgr_stat_overflow);
477*4882a593Smuzhiyun EXPORT_SYMBOL(qmgr_set_irq);
478*4882a593Smuzhiyun EXPORT_SYMBOL(qmgr_enable_irq);
479*4882a593Smuzhiyun EXPORT_SYMBOL(qmgr_disable_irq);
480*4882a593Smuzhiyun #if DEBUG_QMGR
481*4882a593Smuzhiyun EXPORT_SYMBOL(qmgr_queue_descs);
482*4882a593Smuzhiyun EXPORT_SYMBOL(qmgr_request_queue);
483*4882a593Smuzhiyun #else
484*4882a593Smuzhiyun EXPORT_SYMBOL(__qmgr_request_queue);
485*4882a593Smuzhiyun #endif
486*4882a593Smuzhiyun EXPORT_SYMBOL(qmgr_release_queue);
487