1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * drivers/net/ethernet/ibm/emac/mal.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Memory Access Layer (MAL) support
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
8*4882a593Smuzhiyun * <benh@kernel.crashing.org>
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Based on the arch/ppc version of the driver:
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * Copyright (c) 2004, 2005 Zultys Technologies.
13*4882a593Smuzhiyun * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * Based on original work by
16*4882a593Smuzhiyun * Benjamin Herrenschmidt <benh@kernel.crashing.org>,
17*4882a593Smuzhiyun * David Gibson <hermes@gibson.dropbear.id.au>,
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * Armin Kuster <akuster@mvista.com>
20*4882a593Smuzhiyun * Copyright 2002 MontaVista Softare Inc.
21*4882a593Smuzhiyun */
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #include <linux/delay.h>
24*4882a593Smuzhiyun #include <linux/slab.h>
25*4882a593Smuzhiyun #include <linux/of_irq.h>
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #include "core.h"
28*4882a593Smuzhiyun #include <asm/dcr-regs.h>
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun static int mal_count;
31*4882a593Smuzhiyun
mal_register_commac(struct mal_instance * mal,struct mal_commac * commac)32*4882a593Smuzhiyun int mal_register_commac(struct mal_instance *mal, struct mal_commac *commac)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun unsigned long flags;
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun spin_lock_irqsave(&mal->lock, flags);
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun MAL_DBG(mal, "reg(%08x, %08x)" NL,
39*4882a593Smuzhiyun commac->tx_chan_mask, commac->rx_chan_mask);
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun /* Don't let multiple commacs claim the same channel(s) */
42*4882a593Smuzhiyun if ((mal->tx_chan_mask & commac->tx_chan_mask) ||
43*4882a593Smuzhiyun (mal->rx_chan_mask & commac->rx_chan_mask)) {
44*4882a593Smuzhiyun spin_unlock_irqrestore(&mal->lock, flags);
45*4882a593Smuzhiyun printk(KERN_WARNING "mal%d: COMMAC channels conflict!\n",
46*4882a593Smuzhiyun mal->index);
47*4882a593Smuzhiyun return -EBUSY;
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun if (list_empty(&mal->list))
51*4882a593Smuzhiyun napi_enable(&mal->napi);
52*4882a593Smuzhiyun mal->tx_chan_mask |= commac->tx_chan_mask;
53*4882a593Smuzhiyun mal->rx_chan_mask |= commac->rx_chan_mask;
54*4882a593Smuzhiyun list_add(&commac->list, &mal->list);
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun spin_unlock_irqrestore(&mal->lock, flags);
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun return 0;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
mal_unregister_commac(struct mal_instance * mal,struct mal_commac * commac)61*4882a593Smuzhiyun void mal_unregister_commac(struct mal_instance *mal,
62*4882a593Smuzhiyun struct mal_commac *commac)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun unsigned long flags;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun spin_lock_irqsave(&mal->lock, flags);
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun MAL_DBG(mal, "unreg(%08x, %08x)" NL,
69*4882a593Smuzhiyun commac->tx_chan_mask, commac->rx_chan_mask);
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun mal->tx_chan_mask &= ~commac->tx_chan_mask;
72*4882a593Smuzhiyun mal->rx_chan_mask &= ~commac->rx_chan_mask;
73*4882a593Smuzhiyun list_del_init(&commac->list);
74*4882a593Smuzhiyun if (list_empty(&mal->list))
75*4882a593Smuzhiyun napi_disable(&mal->napi);
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun spin_unlock_irqrestore(&mal->lock, flags);
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
mal_set_rcbs(struct mal_instance * mal,int channel,unsigned long size)80*4882a593Smuzhiyun int mal_set_rcbs(struct mal_instance *mal, int channel, unsigned long size)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun BUG_ON(channel < 0 || channel >= mal->num_rx_chans ||
83*4882a593Smuzhiyun size > MAL_MAX_RX_SIZE);
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun MAL_DBG(mal, "set_rbcs(%d, %lu)" NL, channel, size);
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun if (size & 0xf) {
88*4882a593Smuzhiyun printk(KERN_WARNING
89*4882a593Smuzhiyun "mal%d: incorrect RX size %lu for the channel %d\n",
90*4882a593Smuzhiyun mal->index, size, channel);
91*4882a593Smuzhiyun return -EINVAL;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun set_mal_dcrn(mal, MAL_RCBS(channel), size >> 4);
95*4882a593Smuzhiyun return 0;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
mal_tx_bd_offset(struct mal_instance * mal,int channel)98*4882a593Smuzhiyun int mal_tx_bd_offset(struct mal_instance *mal, int channel)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun BUG_ON(channel < 0 || channel >= mal->num_tx_chans);
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun return channel * NUM_TX_BUFF;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
mal_rx_bd_offset(struct mal_instance * mal,int channel)105*4882a593Smuzhiyun int mal_rx_bd_offset(struct mal_instance *mal, int channel)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun BUG_ON(channel < 0 || channel >= mal->num_rx_chans);
108*4882a593Smuzhiyun return mal->num_tx_chans * NUM_TX_BUFF + channel * NUM_RX_BUFF;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun
mal_enable_tx_channel(struct mal_instance * mal,int channel)111*4882a593Smuzhiyun void mal_enable_tx_channel(struct mal_instance *mal, int channel)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun unsigned long flags;
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun spin_lock_irqsave(&mal->lock, flags);
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun MAL_DBG(mal, "enable_tx(%d)" NL, channel);
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun set_mal_dcrn(mal, MAL_TXCASR,
120*4882a593Smuzhiyun get_mal_dcrn(mal, MAL_TXCASR) | MAL_CHAN_MASK(channel));
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun spin_unlock_irqrestore(&mal->lock, flags);
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
mal_disable_tx_channel(struct mal_instance * mal,int channel)125*4882a593Smuzhiyun void mal_disable_tx_channel(struct mal_instance *mal, int channel)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun set_mal_dcrn(mal, MAL_TXCARR, MAL_CHAN_MASK(channel));
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun MAL_DBG(mal, "disable_tx(%d)" NL, channel);
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
mal_enable_rx_channel(struct mal_instance * mal,int channel)132*4882a593Smuzhiyun void mal_enable_rx_channel(struct mal_instance *mal, int channel)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun unsigned long flags;
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun /*
137*4882a593Smuzhiyun * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple
138*4882a593Smuzhiyun * of 8, but enabling in MAL_RXCASR needs the divided by 8 value
139*4882a593Smuzhiyun * for the bitmask
140*4882a593Smuzhiyun */
141*4882a593Smuzhiyun if (!(channel % 8))
142*4882a593Smuzhiyun channel >>= 3;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun spin_lock_irqsave(&mal->lock, flags);
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun MAL_DBG(mal, "enable_rx(%d)" NL, channel);
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun set_mal_dcrn(mal, MAL_RXCASR,
149*4882a593Smuzhiyun get_mal_dcrn(mal, MAL_RXCASR) | MAL_CHAN_MASK(channel));
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun spin_unlock_irqrestore(&mal->lock, flags);
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
mal_disable_rx_channel(struct mal_instance * mal,int channel)154*4882a593Smuzhiyun void mal_disable_rx_channel(struct mal_instance *mal, int channel)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun /*
157*4882a593Smuzhiyun * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple
158*4882a593Smuzhiyun * of 8, but enabling in MAL_RXCASR needs the divided by 8 value
159*4882a593Smuzhiyun * for the bitmask
160*4882a593Smuzhiyun */
161*4882a593Smuzhiyun if (!(channel % 8))
162*4882a593Smuzhiyun channel >>= 3;
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun set_mal_dcrn(mal, MAL_RXCARR, MAL_CHAN_MASK(channel));
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun MAL_DBG(mal, "disable_rx(%d)" NL, channel);
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
mal_poll_add(struct mal_instance * mal,struct mal_commac * commac)169*4882a593Smuzhiyun void mal_poll_add(struct mal_instance *mal, struct mal_commac *commac)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun unsigned long flags;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun spin_lock_irqsave(&mal->lock, flags);
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun MAL_DBG(mal, "poll_add(%p)" NL, commac);
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun /* starts disabled */
178*4882a593Smuzhiyun set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags);
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun list_add_tail(&commac->poll_list, &mal->poll_list);
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun spin_unlock_irqrestore(&mal->lock, flags);
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
mal_poll_del(struct mal_instance * mal,struct mal_commac * commac)185*4882a593Smuzhiyun void mal_poll_del(struct mal_instance *mal, struct mal_commac *commac)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun unsigned long flags;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun spin_lock_irqsave(&mal->lock, flags);
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun MAL_DBG(mal, "poll_del(%p)" NL, commac);
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun list_del(&commac->poll_list);
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun spin_unlock_irqrestore(&mal->lock, flags);
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /* synchronized by mal_poll() */
mal_enable_eob_irq(struct mal_instance * mal)199*4882a593Smuzhiyun static inline void mal_enable_eob_irq(struct mal_instance *mal)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun MAL_DBG2(mal, "enable_irq" NL);
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun // XXX might want to cache MAL_CFG as the DCR read can be slooooow
204*4882a593Smuzhiyun set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) | MAL_CFG_EOPIE);
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /* synchronized by NAPI state */
mal_disable_eob_irq(struct mal_instance * mal)208*4882a593Smuzhiyun static inline void mal_disable_eob_irq(struct mal_instance *mal)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun // XXX might want to cache MAL_CFG as the DCR read can be slooooow
211*4882a593Smuzhiyun set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) & ~MAL_CFG_EOPIE);
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun MAL_DBG2(mal, "disable_irq" NL);
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
mal_serr(int irq,void * dev_instance)216*4882a593Smuzhiyun static irqreturn_t mal_serr(int irq, void *dev_instance)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun struct mal_instance *mal = dev_instance;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun u32 esr = get_mal_dcrn(mal, MAL_ESR);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun /* Clear the error status register */
223*4882a593Smuzhiyun set_mal_dcrn(mal, MAL_ESR, esr);
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun MAL_DBG(mal, "SERR %08x" NL, esr);
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun if (esr & MAL_ESR_EVB) {
228*4882a593Smuzhiyun if (esr & MAL_ESR_DE) {
229*4882a593Smuzhiyun /* We ignore Descriptor error,
230*4882a593Smuzhiyun * TXDE or RXDE interrupt will be generated anyway.
231*4882a593Smuzhiyun */
232*4882a593Smuzhiyun return IRQ_HANDLED;
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun if (esr & MAL_ESR_PEIN) {
236*4882a593Smuzhiyun /* PLB error, it's probably buggy hardware or
237*4882a593Smuzhiyun * incorrect physical address in BD (i.e. bug)
238*4882a593Smuzhiyun */
239*4882a593Smuzhiyun if (net_ratelimit())
240*4882a593Smuzhiyun printk(KERN_ERR
241*4882a593Smuzhiyun "mal%d: system error, "
242*4882a593Smuzhiyun "PLB (ESR = 0x%08x)\n",
243*4882a593Smuzhiyun mal->index, esr);
244*4882a593Smuzhiyun return IRQ_HANDLED;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun /* OPB error, it's probably buggy hardware or incorrect
248*4882a593Smuzhiyun * EBC setup
249*4882a593Smuzhiyun */
250*4882a593Smuzhiyun if (net_ratelimit())
251*4882a593Smuzhiyun printk(KERN_ERR
252*4882a593Smuzhiyun "mal%d: system error, OPB (ESR = 0x%08x)\n",
253*4882a593Smuzhiyun mal->index, esr);
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun return IRQ_HANDLED;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
mal_schedule_poll(struct mal_instance * mal)258*4882a593Smuzhiyun static inline void mal_schedule_poll(struct mal_instance *mal)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun if (likely(napi_schedule_prep(&mal->napi))) {
261*4882a593Smuzhiyun MAL_DBG2(mal, "schedule_poll" NL);
262*4882a593Smuzhiyun spin_lock(&mal->lock);
263*4882a593Smuzhiyun mal_disable_eob_irq(mal);
264*4882a593Smuzhiyun spin_unlock(&mal->lock);
265*4882a593Smuzhiyun __napi_schedule(&mal->napi);
266*4882a593Smuzhiyun } else
267*4882a593Smuzhiyun MAL_DBG2(mal, "already in poll" NL);
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
mal_txeob(int irq,void * dev_instance)270*4882a593Smuzhiyun static irqreturn_t mal_txeob(int irq, void *dev_instance)
271*4882a593Smuzhiyun {
272*4882a593Smuzhiyun struct mal_instance *mal = dev_instance;
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun u32 r = get_mal_dcrn(mal, MAL_TXEOBISR);
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun MAL_DBG2(mal, "txeob %08x" NL, r);
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun mal_schedule_poll(mal);
279*4882a593Smuzhiyun set_mal_dcrn(mal, MAL_TXEOBISR, r);
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun #ifdef CONFIG_PPC_DCR_NATIVE
282*4882a593Smuzhiyun if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
283*4882a593Smuzhiyun mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
284*4882a593Smuzhiyun (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICTX));
285*4882a593Smuzhiyun #endif
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun return IRQ_HANDLED;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun
mal_rxeob(int irq,void * dev_instance)290*4882a593Smuzhiyun static irqreturn_t mal_rxeob(int irq, void *dev_instance)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun struct mal_instance *mal = dev_instance;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun u32 r = get_mal_dcrn(mal, MAL_RXEOBISR);
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun MAL_DBG2(mal, "rxeob %08x" NL, r);
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun mal_schedule_poll(mal);
299*4882a593Smuzhiyun set_mal_dcrn(mal, MAL_RXEOBISR, r);
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun #ifdef CONFIG_PPC_DCR_NATIVE
302*4882a593Smuzhiyun if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
303*4882a593Smuzhiyun mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
304*4882a593Smuzhiyun (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICRX));
305*4882a593Smuzhiyun #endif
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun return IRQ_HANDLED;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun
mal_txde(int irq,void * dev_instance)310*4882a593Smuzhiyun static irqreturn_t mal_txde(int irq, void *dev_instance)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun struct mal_instance *mal = dev_instance;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun u32 deir = get_mal_dcrn(mal, MAL_TXDEIR);
315*4882a593Smuzhiyun set_mal_dcrn(mal, MAL_TXDEIR, deir);
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun MAL_DBG(mal, "txde %08x" NL, deir);
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun if (net_ratelimit())
320*4882a593Smuzhiyun printk(KERN_ERR
321*4882a593Smuzhiyun "mal%d: TX descriptor error (TXDEIR = 0x%08x)\n",
322*4882a593Smuzhiyun mal->index, deir);
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun return IRQ_HANDLED;
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun
mal_rxde(int irq,void * dev_instance)327*4882a593Smuzhiyun static irqreturn_t mal_rxde(int irq, void *dev_instance)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun struct mal_instance *mal = dev_instance;
330*4882a593Smuzhiyun struct list_head *l;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun u32 deir = get_mal_dcrn(mal, MAL_RXDEIR);
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun MAL_DBG(mal, "rxde %08x" NL, deir);
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun list_for_each(l, &mal->list) {
337*4882a593Smuzhiyun struct mal_commac *mc = list_entry(l, struct mal_commac, list);
338*4882a593Smuzhiyun if (deir & mc->rx_chan_mask) {
339*4882a593Smuzhiyun set_bit(MAL_COMMAC_RX_STOPPED, &mc->flags);
340*4882a593Smuzhiyun mc->ops->rxde(mc->dev);
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun mal_schedule_poll(mal);
345*4882a593Smuzhiyun set_mal_dcrn(mal, MAL_RXDEIR, deir);
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun return IRQ_HANDLED;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun
mal_int(int irq,void * dev_instance)350*4882a593Smuzhiyun static irqreturn_t mal_int(int irq, void *dev_instance)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun struct mal_instance *mal = dev_instance;
353*4882a593Smuzhiyun u32 esr = get_mal_dcrn(mal, MAL_ESR);
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun if (esr & MAL_ESR_EVB) {
356*4882a593Smuzhiyun /* descriptor error */
357*4882a593Smuzhiyun if (esr & MAL_ESR_DE) {
358*4882a593Smuzhiyun if (esr & MAL_ESR_CIDT)
359*4882a593Smuzhiyun return mal_rxde(irq, dev_instance);
360*4882a593Smuzhiyun else
361*4882a593Smuzhiyun return mal_txde(irq, dev_instance);
362*4882a593Smuzhiyun } else { /* SERR */
363*4882a593Smuzhiyun return mal_serr(irq, dev_instance);
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun return IRQ_HANDLED;
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun
mal_poll_disable(struct mal_instance * mal,struct mal_commac * commac)369*4882a593Smuzhiyun void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun /* Spinlock-type semantics: only one caller disable poll at a time */
372*4882a593Smuzhiyun while (test_and_set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags))
373*4882a593Smuzhiyun msleep(1);
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun /* Synchronize with the MAL NAPI poller */
376*4882a593Smuzhiyun napi_synchronize(&mal->napi);
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun
mal_poll_enable(struct mal_instance * mal,struct mal_commac * commac)379*4882a593Smuzhiyun void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun smp_wmb();
382*4882a593Smuzhiyun clear_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags);
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun /* Feels better to trigger a poll here to catch up with events that
385*4882a593Smuzhiyun * may have happened on this channel while disabled. It will most
386*4882a593Smuzhiyun * probably be delayed until the next interrupt but that's mostly a
387*4882a593Smuzhiyun * non-issue in the context where this is called.
388*4882a593Smuzhiyun */
389*4882a593Smuzhiyun napi_schedule(&mal->napi);
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun
mal_poll(struct napi_struct * napi,int budget)392*4882a593Smuzhiyun static int mal_poll(struct napi_struct *napi, int budget)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun struct mal_instance *mal = container_of(napi, struct mal_instance, napi);
395*4882a593Smuzhiyun struct list_head *l;
396*4882a593Smuzhiyun int received = 0;
397*4882a593Smuzhiyun unsigned long flags;
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun MAL_DBG2(mal, "poll(%d)" NL, budget);
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun /* Process TX skbs */
402*4882a593Smuzhiyun list_for_each(l, &mal->poll_list) {
403*4882a593Smuzhiyun struct mal_commac *mc =
404*4882a593Smuzhiyun list_entry(l, struct mal_commac, poll_list);
405*4882a593Smuzhiyun mc->ops->poll_tx(mc->dev);
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun /* Process RX skbs.
409*4882a593Smuzhiyun *
410*4882a593Smuzhiyun * We _might_ need something more smart here to enforce polling
411*4882a593Smuzhiyun * fairness.
412*4882a593Smuzhiyun */
413*4882a593Smuzhiyun list_for_each(l, &mal->poll_list) {
414*4882a593Smuzhiyun struct mal_commac *mc =
415*4882a593Smuzhiyun list_entry(l, struct mal_commac, poll_list);
416*4882a593Smuzhiyun int n;
417*4882a593Smuzhiyun if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags)))
418*4882a593Smuzhiyun continue;
419*4882a593Smuzhiyun n = mc->ops->poll_rx(mc->dev, budget - received);
420*4882a593Smuzhiyun if (n) {
421*4882a593Smuzhiyun received += n;
422*4882a593Smuzhiyun if (received >= budget)
423*4882a593Smuzhiyun return budget;
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun if (napi_complete_done(napi, received)) {
428*4882a593Smuzhiyun /* We need to disable IRQs to protect from RXDE IRQ here */
429*4882a593Smuzhiyun spin_lock_irqsave(&mal->lock, flags);
430*4882a593Smuzhiyun mal_enable_eob_irq(mal);
431*4882a593Smuzhiyun spin_unlock_irqrestore(&mal->lock, flags);
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun /* Check for "rotting" packet(s) */
435*4882a593Smuzhiyun list_for_each(l, &mal->poll_list) {
436*4882a593Smuzhiyun struct mal_commac *mc =
437*4882a593Smuzhiyun list_entry(l, struct mal_commac, poll_list);
438*4882a593Smuzhiyun if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags)))
439*4882a593Smuzhiyun continue;
440*4882a593Smuzhiyun if (unlikely(mc->ops->peek_rx(mc->dev) ||
441*4882a593Smuzhiyun test_bit(MAL_COMMAC_RX_STOPPED, &mc->flags))) {
442*4882a593Smuzhiyun MAL_DBG2(mal, "rotting packet" NL);
443*4882a593Smuzhiyun if (!napi_reschedule(napi))
444*4882a593Smuzhiyun goto more_work;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun spin_lock_irqsave(&mal->lock, flags);
447*4882a593Smuzhiyun mal_disable_eob_irq(mal);
448*4882a593Smuzhiyun spin_unlock_irqrestore(&mal->lock, flags);
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun mc->ops->poll_tx(mc->dev);
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun more_work:
454*4882a593Smuzhiyun MAL_DBG2(mal, "poll() %d <- %d" NL, budget, received);
455*4882a593Smuzhiyun return received;
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun
mal_reset(struct mal_instance * mal)458*4882a593Smuzhiyun static void mal_reset(struct mal_instance *mal)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun int n = 10;
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun MAL_DBG(mal, "reset" NL);
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun set_mal_dcrn(mal, MAL_CFG, MAL_CFG_SR);
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun /* Wait for reset to complete (1 system clock) */
467*4882a593Smuzhiyun while ((get_mal_dcrn(mal, MAL_CFG) & MAL_CFG_SR) && n)
468*4882a593Smuzhiyun --n;
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun if (unlikely(!n))
471*4882a593Smuzhiyun printk(KERN_ERR "mal%d: reset timeout\n", mal->index);
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun
mal_get_regs_len(struct mal_instance * mal)474*4882a593Smuzhiyun int mal_get_regs_len(struct mal_instance *mal)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun return sizeof(struct emac_ethtool_regs_subhdr) +
477*4882a593Smuzhiyun sizeof(struct mal_regs);
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun
mal_dump_regs(struct mal_instance * mal,void * buf)480*4882a593Smuzhiyun void *mal_dump_regs(struct mal_instance *mal, void *buf)
481*4882a593Smuzhiyun {
482*4882a593Smuzhiyun struct emac_ethtool_regs_subhdr *hdr = buf;
483*4882a593Smuzhiyun struct mal_regs *regs = (struct mal_regs *)(hdr + 1);
484*4882a593Smuzhiyun int i;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun hdr->version = mal->version;
487*4882a593Smuzhiyun hdr->index = mal->index;
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun regs->tx_count = mal->num_tx_chans;
490*4882a593Smuzhiyun regs->rx_count = mal->num_rx_chans;
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun regs->cfg = get_mal_dcrn(mal, MAL_CFG);
493*4882a593Smuzhiyun regs->esr = get_mal_dcrn(mal, MAL_ESR);
494*4882a593Smuzhiyun regs->ier = get_mal_dcrn(mal, MAL_IER);
495*4882a593Smuzhiyun regs->tx_casr = get_mal_dcrn(mal, MAL_TXCASR);
496*4882a593Smuzhiyun regs->tx_carr = get_mal_dcrn(mal, MAL_TXCARR);
497*4882a593Smuzhiyun regs->tx_eobisr = get_mal_dcrn(mal, MAL_TXEOBISR);
498*4882a593Smuzhiyun regs->tx_deir = get_mal_dcrn(mal, MAL_TXDEIR);
499*4882a593Smuzhiyun regs->rx_casr = get_mal_dcrn(mal, MAL_RXCASR);
500*4882a593Smuzhiyun regs->rx_carr = get_mal_dcrn(mal, MAL_RXCARR);
501*4882a593Smuzhiyun regs->rx_eobisr = get_mal_dcrn(mal, MAL_RXEOBISR);
502*4882a593Smuzhiyun regs->rx_deir = get_mal_dcrn(mal, MAL_RXDEIR);
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun for (i = 0; i < regs->tx_count; ++i)
505*4882a593Smuzhiyun regs->tx_ctpr[i] = get_mal_dcrn(mal, MAL_TXCTPR(i));
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun for (i = 0; i < regs->rx_count; ++i) {
508*4882a593Smuzhiyun regs->rx_ctpr[i] = get_mal_dcrn(mal, MAL_RXCTPR(i));
509*4882a593Smuzhiyun regs->rcbs[i] = get_mal_dcrn(mal, MAL_RCBS(i));
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun return regs + 1;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun
mal_probe(struct platform_device * ofdev)514*4882a593Smuzhiyun static int mal_probe(struct platform_device *ofdev)
515*4882a593Smuzhiyun {
516*4882a593Smuzhiyun struct mal_instance *mal;
517*4882a593Smuzhiyun int err = 0, i, bd_size;
518*4882a593Smuzhiyun int index = mal_count++;
519*4882a593Smuzhiyun unsigned int dcr_base;
520*4882a593Smuzhiyun const u32 *prop;
521*4882a593Smuzhiyun u32 cfg;
522*4882a593Smuzhiyun unsigned long irqflags;
523*4882a593Smuzhiyun irq_handler_t hdlr_serr, hdlr_txde, hdlr_rxde;
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun mal = kzalloc(sizeof(struct mal_instance), GFP_KERNEL);
526*4882a593Smuzhiyun if (!mal)
527*4882a593Smuzhiyun return -ENOMEM;
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun mal->index = index;
530*4882a593Smuzhiyun mal->ofdev = ofdev;
531*4882a593Smuzhiyun mal->version = of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal2") ? 2 : 1;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun MAL_DBG(mal, "probe" NL);
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun prop = of_get_property(ofdev->dev.of_node, "num-tx-chans", NULL);
536*4882a593Smuzhiyun if (prop == NULL) {
537*4882a593Smuzhiyun printk(KERN_ERR
538*4882a593Smuzhiyun "mal%d: can't find MAL num-tx-chans property!\n",
539*4882a593Smuzhiyun index);
540*4882a593Smuzhiyun err = -ENODEV;
541*4882a593Smuzhiyun goto fail;
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun mal->num_tx_chans = prop[0];
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun prop = of_get_property(ofdev->dev.of_node, "num-rx-chans", NULL);
546*4882a593Smuzhiyun if (prop == NULL) {
547*4882a593Smuzhiyun printk(KERN_ERR
548*4882a593Smuzhiyun "mal%d: can't find MAL num-rx-chans property!\n",
549*4882a593Smuzhiyun index);
550*4882a593Smuzhiyun err = -ENODEV;
551*4882a593Smuzhiyun goto fail;
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun mal->num_rx_chans = prop[0];
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun dcr_base = dcr_resource_start(ofdev->dev.of_node, 0);
556*4882a593Smuzhiyun if (dcr_base == 0) {
557*4882a593Smuzhiyun printk(KERN_ERR
558*4882a593Smuzhiyun "mal%d: can't find DCR resource!\n", index);
559*4882a593Smuzhiyun err = -ENODEV;
560*4882a593Smuzhiyun goto fail;
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun mal->dcr_host = dcr_map(ofdev->dev.of_node, dcr_base, 0x100);
563*4882a593Smuzhiyun if (!DCR_MAP_OK(mal->dcr_host)) {
564*4882a593Smuzhiyun printk(KERN_ERR
565*4882a593Smuzhiyun "mal%d: failed to map DCRs !\n", index);
566*4882a593Smuzhiyun err = -ENODEV;
567*4882a593Smuzhiyun goto fail;
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-405ez")) {
571*4882a593Smuzhiyun #if defined(CONFIG_IBM_EMAC_MAL_CLR_ICINTSTAT) && \
572*4882a593Smuzhiyun defined(CONFIG_IBM_EMAC_MAL_COMMON_ERR)
573*4882a593Smuzhiyun mal->features |= (MAL_FTR_CLEAR_ICINTSTAT |
574*4882a593Smuzhiyun MAL_FTR_COMMON_ERR_INT);
575*4882a593Smuzhiyun #else
576*4882a593Smuzhiyun printk(KERN_ERR "%pOF: Support for 405EZ not enabled!\n",
577*4882a593Smuzhiyun ofdev->dev.of_node);
578*4882a593Smuzhiyun err = -ENODEV;
579*4882a593Smuzhiyun goto fail;
580*4882a593Smuzhiyun #endif
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun mal->txeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
584*4882a593Smuzhiyun mal->rxeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 1);
585*4882a593Smuzhiyun mal->serr_irq = irq_of_parse_and_map(ofdev->dev.of_node, 2);
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) {
588*4882a593Smuzhiyun mal->txde_irq = mal->rxde_irq = mal->serr_irq;
589*4882a593Smuzhiyun } else {
590*4882a593Smuzhiyun mal->txde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 3);
591*4882a593Smuzhiyun mal->rxde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 4);
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun if (!mal->txeob_irq || !mal->rxeob_irq || !mal->serr_irq ||
595*4882a593Smuzhiyun !mal->txde_irq || !mal->rxde_irq) {
596*4882a593Smuzhiyun printk(KERN_ERR
597*4882a593Smuzhiyun "mal%d: failed to map interrupts !\n", index);
598*4882a593Smuzhiyun err = -ENODEV;
599*4882a593Smuzhiyun goto fail_unmap;
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun INIT_LIST_HEAD(&mal->poll_list);
603*4882a593Smuzhiyun INIT_LIST_HEAD(&mal->list);
604*4882a593Smuzhiyun spin_lock_init(&mal->lock);
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun init_dummy_netdev(&mal->dummy_dev);
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun netif_napi_add(&mal->dummy_dev, &mal->napi, mal_poll,
609*4882a593Smuzhiyun CONFIG_IBM_EMAC_POLL_WEIGHT);
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun /* Load power-on reset defaults */
612*4882a593Smuzhiyun mal_reset(mal);
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun /* Set the MAL configuration register */
615*4882a593Smuzhiyun cfg = (mal->version == 2) ? MAL2_CFG_DEFAULT : MAL1_CFG_DEFAULT;
616*4882a593Smuzhiyun cfg |= MAL_CFG_PLBB | MAL_CFG_OPBBL | MAL_CFG_LEA;
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun /* Current Axon is not happy with priority being non-0, it can
619*4882a593Smuzhiyun * deadlock, fix it up here
620*4882a593Smuzhiyun */
621*4882a593Smuzhiyun if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-axon"))
622*4882a593Smuzhiyun cfg &= ~(MAL2_CFG_RPP_10 | MAL2_CFG_WPP_10);
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun /* Apply configuration */
625*4882a593Smuzhiyun set_mal_dcrn(mal, MAL_CFG, cfg);
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun /* Allocate space for BD rings */
628*4882a593Smuzhiyun BUG_ON(mal->num_tx_chans <= 0 || mal->num_tx_chans > 32);
629*4882a593Smuzhiyun BUG_ON(mal->num_rx_chans <= 0 || mal->num_rx_chans > 32);
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun bd_size = sizeof(struct mal_descriptor) *
632*4882a593Smuzhiyun (NUM_TX_BUFF * mal->num_tx_chans +
633*4882a593Smuzhiyun NUM_RX_BUFF * mal->num_rx_chans);
634*4882a593Smuzhiyun mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
635*4882a593Smuzhiyun GFP_KERNEL);
636*4882a593Smuzhiyun if (mal->bd_virt == NULL) {
637*4882a593Smuzhiyun err = -ENOMEM;
638*4882a593Smuzhiyun goto fail_unmap;
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun for (i = 0; i < mal->num_tx_chans; ++i)
642*4882a593Smuzhiyun set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma +
643*4882a593Smuzhiyun sizeof(struct mal_descriptor) *
644*4882a593Smuzhiyun mal_tx_bd_offset(mal, i));
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun for (i = 0; i < mal->num_rx_chans; ++i)
647*4882a593Smuzhiyun set_mal_dcrn(mal, MAL_RXCTPR(i), mal->bd_dma +
648*4882a593Smuzhiyun sizeof(struct mal_descriptor) *
649*4882a593Smuzhiyun mal_rx_bd_offset(mal, i));
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) {
652*4882a593Smuzhiyun irqflags = IRQF_SHARED;
653*4882a593Smuzhiyun hdlr_serr = hdlr_txde = hdlr_rxde = mal_int;
654*4882a593Smuzhiyun } else {
655*4882a593Smuzhiyun irqflags = 0;
656*4882a593Smuzhiyun hdlr_serr = mal_serr;
657*4882a593Smuzhiyun hdlr_txde = mal_txde;
658*4882a593Smuzhiyun hdlr_rxde = mal_rxde;
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun err = request_irq(mal->serr_irq, hdlr_serr, irqflags, "MAL SERR", mal);
662*4882a593Smuzhiyun if (err)
663*4882a593Smuzhiyun goto fail2;
664*4882a593Smuzhiyun err = request_irq(mal->txde_irq, hdlr_txde, irqflags, "MAL TX DE", mal);
665*4882a593Smuzhiyun if (err)
666*4882a593Smuzhiyun goto fail3;
667*4882a593Smuzhiyun err = request_irq(mal->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal);
668*4882a593Smuzhiyun if (err)
669*4882a593Smuzhiyun goto fail4;
670*4882a593Smuzhiyun err = request_irq(mal->rxde_irq, hdlr_rxde, irqflags, "MAL RX DE", mal);
671*4882a593Smuzhiyun if (err)
672*4882a593Smuzhiyun goto fail5;
673*4882a593Smuzhiyun err = request_irq(mal->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal);
674*4882a593Smuzhiyun if (err)
675*4882a593Smuzhiyun goto fail6;
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun /* Enable all MAL SERR interrupt sources */
678*4882a593Smuzhiyun set_mal_dcrn(mal, MAL_IER, MAL_IER_EVENTS);
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun /* Enable EOB interrupt */
681*4882a593Smuzhiyun mal_enable_eob_irq(mal);
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun printk(KERN_INFO
684*4882a593Smuzhiyun "MAL v%d %pOF, %d TX channels, %d RX channels\n",
685*4882a593Smuzhiyun mal->version, ofdev->dev.of_node,
686*4882a593Smuzhiyun mal->num_tx_chans, mal->num_rx_chans);
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun /* Advertise this instance to the rest of the world */
689*4882a593Smuzhiyun wmb();
690*4882a593Smuzhiyun platform_set_drvdata(ofdev, mal);
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun return 0;
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun fail6:
695*4882a593Smuzhiyun free_irq(mal->rxde_irq, mal);
696*4882a593Smuzhiyun fail5:
697*4882a593Smuzhiyun free_irq(mal->txeob_irq, mal);
698*4882a593Smuzhiyun fail4:
699*4882a593Smuzhiyun free_irq(mal->txde_irq, mal);
700*4882a593Smuzhiyun fail3:
701*4882a593Smuzhiyun free_irq(mal->serr_irq, mal);
702*4882a593Smuzhiyun fail2:
703*4882a593Smuzhiyun dma_free_coherent(&ofdev->dev, bd_size, mal->bd_virt, mal->bd_dma);
704*4882a593Smuzhiyun fail_unmap:
705*4882a593Smuzhiyun dcr_unmap(mal->dcr_host, 0x100);
706*4882a593Smuzhiyun fail:
707*4882a593Smuzhiyun kfree(mal);
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun return err;
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun
mal_remove(struct platform_device * ofdev)712*4882a593Smuzhiyun static int mal_remove(struct platform_device *ofdev)
713*4882a593Smuzhiyun {
714*4882a593Smuzhiyun struct mal_instance *mal = platform_get_drvdata(ofdev);
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun MAL_DBG(mal, "remove" NL);
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun /* Synchronize with scheduled polling */
719*4882a593Smuzhiyun napi_disable(&mal->napi);
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun if (!list_empty(&mal->list))
722*4882a593Smuzhiyun /* This is *very* bad */
723*4882a593Smuzhiyun WARN(1, KERN_EMERG
724*4882a593Smuzhiyun "mal%d: commac list is not empty on remove!\n",
725*4882a593Smuzhiyun mal->index);
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun free_irq(mal->serr_irq, mal);
728*4882a593Smuzhiyun free_irq(mal->txde_irq, mal);
729*4882a593Smuzhiyun free_irq(mal->txeob_irq, mal);
730*4882a593Smuzhiyun free_irq(mal->rxde_irq, mal);
731*4882a593Smuzhiyun free_irq(mal->rxeob_irq, mal);
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun mal_reset(mal);
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun dma_free_coherent(&ofdev->dev,
736*4882a593Smuzhiyun sizeof(struct mal_descriptor) *
737*4882a593Smuzhiyun (NUM_TX_BUFF * mal->num_tx_chans +
738*4882a593Smuzhiyun NUM_RX_BUFF * mal->num_rx_chans), mal->bd_virt,
739*4882a593Smuzhiyun mal->bd_dma);
740*4882a593Smuzhiyun kfree(mal);
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun return 0;
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun static const struct of_device_id mal_platform_match[] =
746*4882a593Smuzhiyun {
747*4882a593Smuzhiyun {
748*4882a593Smuzhiyun .compatible = "ibm,mcmal",
749*4882a593Smuzhiyun },
750*4882a593Smuzhiyun {
751*4882a593Smuzhiyun .compatible = "ibm,mcmal2",
752*4882a593Smuzhiyun },
753*4882a593Smuzhiyun /* Backward compat */
754*4882a593Smuzhiyun {
755*4882a593Smuzhiyun .type = "mcmal-dma",
756*4882a593Smuzhiyun .compatible = "ibm,mcmal",
757*4882a593Smuzhiyun },
758*4882a593Smuzhiyun {
759*4882a593Smuzhiyun .type = "mcmal-dma",
760*4882a593Smuzhiyun .compatible = "ibm,mcmal2",
761*4882a593Smuzhiyun },
762*4882a593Smuzhiyun {},
763*4882a593Smuzhiyun };
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun static struct platform_driver mal_of_driver = {
766*4882a593Smuzhiyun .driver = {
767*4882a593Smuzhiyun .name = "mcmal",
768*4882a593Smuzhiyun .of_match_table = mal_platform_match,
769*4882a593Smuzhiyun },
770*4882a593Smuzhiyun .probe = mal_probe,
771*4882a593Smuzhiyun .remove = mal_remove,
772*4882a593Smuzhiyun };
773*4882a593Smuzhiyun
mal_init(void)774*4882a593Smuzhiyun int __init mal_init(void)
775*4882a593Smuzhiyun {
776*4882a593Smuzhiyun return platform_driver_register(&mal_of_driver);
777*4882a593Smuzhiyun }
778*4882a593Smuzhiyun
mal_exit(void)779*4882a593Smuzhiyun void mal_exit(void)
780*4882a593Smuzhiyun {
781*4882a593Smuzhiyun platform_driver_unregister(&mal_of_driver);
782*4882a593Smuzhiyun }
783