1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /* Freescale QUICC Engine HDLC Device Driver
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright 2016 Freescale Semiconductor Inc.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/delay.h>
8*4882a593Smuzhiyun #include <linux/dma-mapping.h>
9*4882a593Smuzhiyun #include <linux/hdlc.h>
10*4882a593Smuzhiyun #include <linux/init.h>
11*4882a593Smuzhiyun #include <linux/interrupt.h>
12*4882a593Smuzhiyun #include <linux/io.h>
13*4882a593Smuzhiyun #include <linux/irq.h>
14*4882a593Smuzhiyun #include <linux/kernel.h>
15*4882a593Smuzhiyun #include <linux/module.h>
16*4882a593Smuzhiyun #include <linux/netdevice.h>
17*4882a593Smuzhiyun #include <linux/of_address.h>
18*4882a593Smuzhiyun #include <linux/of_irq.h>
19*4882a593Smuzhiyun #include <linux/of_platform.h>
20*4882a593Smuzhiyun #include <linux/platform_device.h>
21*4882a593Smuzhiyun #include <linux/sched.h>
22*4882a593Smuzhiyun #include <linux/skbuff.h>
23*4882a593Smuzhiyun #include <linux/slab.h>
24*4882a593Smuzhiyun #include <linux/spinlock.h>
25*4882a593Smuzhiyun #include <linux/stddef.h>
26*4882a593Smuzhiyun #include <soc/fsl/qe/qe_tdm.h>
27*4882a593Smuzhiyun #include <uapi/linux/if_arp.h>
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #include "fsl_ucc_hdlc.h"
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #define DRV_DESC "Freescale QE UCC HDLC Driver"
32*4882a593Smuzhiyun #define DRV_NAME "ucc_hdlc"
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #define TDM_PPPOHT_SLIC_MAXIN
35*4882a593Smuzhiyun #define RX_BD_ERRORS (R_CD_S | R_OV_S | R_CR_S | R_AB_S | R_NO_S | R_LG_S)
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun static struct ucc_tdm_info utdm_primary_info = {
38*4882a593Smuzhiyun .uf_info = {
39*4882a593Smuzhiyun .tsa = 0,
40*4882a593Smuzhiyun .cdp = 0,
41*4882a593Smuzhiyun .cds = 1,
42*4882a593Smuzhiyun .ctsp = 1,
43*4882a593Smuzhiyun .ctss = 1,
44*4882a593Smuzhiyun .revd = 0,
45*4882a593Smuzhiyun .urfs = 256,
46*4882a593Smuzhiyun .utfs = 256,
47*4882a593Smuzhiyun .urfet = 128,
48*4882a593Smuzhiyun .urfset = 192,
49*4882a593Smuzhiyun .utfet = 128,
50*4882a593Smuzhiyun .utftt = 0x40,
51*4882a593Smuzhiyun .ufpt = 256,
52*4882a593Smuzhiyun .mode = UCC_FAST_PROTOCOL_MODE_HDLC,
53*4882a593Smuzhiyun .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
54*4882a593Smuzhiyun .tenc = UCC_FAST_TX_ENCODING_NRZ,
55*4882a593Smuzhiyun .renc = UCC_FAST_RX_ENCODING_NRZ,
56*4882a593Smuzhiyun .tcrc = UCC_FAST_16_BIT_CRC,
57*4882a593Smuzhiyun .synl = UCC_FAST_SYNC_LEN_NOT_USED,
58*4882a593Smuzhiyun },
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun .si_info = {
61*4882a593Smuzhiyun #ifdef TDM_PPPOHT_SLIC_MAXIN
62*4882a593Smuzhiyun .simr_rfsd = 1,
63*4882a593Smuzhiyun .simr_tfsd = 2,
64*4882a593Smuzhiyun #else
65*4882a593Smuzhiyun .simr_rfsd = 0,
66*4882a593Smuzhiyun .simr_tfsd = 0,
67*4882a593Smuzhiyun #endif
68*4882a593Smuzhiyun .simr_crt = 0,
69*4882a593Smuzhiyun .simr_sl = 0,
70*4882a593Smuzhiyun .simr_ce = 1,
71*4882a593Smuzhiyun .simr_fe = 1,
72*4882a593Smuzhiyun .simr_gm = 0,
73*4882a593Smuzhiyun },
74*4882a593Smuzhiyun };
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun static struct ucc_tdm_info utdm_info[UCC_MAX_NUM];
77*4882a593Smuzhiyun
uhdlc_init(struct ucc_hdlc_private * priv)78*4882a593Smuzhiyun static int uhdlc_init(struct ucc_hdlc_private *priv)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun struct ucc_tdm_info *ut_info;
81*4882a593Smuzhiyun struct ucc_fast_info *uf_info;
82*4882a593Smuzhiyun u32 cecr_subblock;
83*4882a593Smuzhiyun u16 bd_status;
84*4882a593Smuzhiyun int ret, i;
85*4882a593Smuzhiyun void *bd_buffer;
86*4882a593Smuzhiyun dma_addr_t bd_dma_addr;
87*4882a593Smuzhiyun s32 riptr;
88*4882a593Smuzhiyun s32 tiptr;
89*4882a593Smuzhiyun u32 gumr;
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun ut_info = priv->ut_info;
92*4882a593Smuzhiyun uf_info = &ut_info->uf_info;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun if (priv->tsa) {
95*4882a593Smuzhiyun uf_info->tsa = 1;
96*4882a593Smuzhiyun uf_info->ctsp = 1;
97*4882a593Smuzhiyun uf_info->cds = 1;
98*4882a593Smuzhiyun uf_info->ctss = 1;
99*4882a593Smuzhiyun } else {
100*4882a593Smuzhiyun uf_info->cds = 0;
101*4882a593Smuzhiyun uf_info->ctsp = 0;
102*4882a593Smuzhiyun uf_info->ctss = 0;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun /* This sets HPM register in CMXUCR register which configures a
106*4882a593Smuzhiyun * open drain connected HDLC bus
107*4882a593Smuzhiyun */
108*4882a593Smuzhiyun if (priv->hdlc_bus)
109*4882a593Smuzhiyun uf_info->brkpt_support = 1;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF |
112*4882a593Smuzhiyun UCC_HDLC_UCCE_TXB) << 16);
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun ret = ucc_fast_init(uf_info, &priv->uccf);
115*4882a593Smuzhiyun if (ret) {
116*4882a593Smuzhiyun dev_err(priv->dev, "Failed to init uccf.");
117*4882a593Smuzhiyun return ret;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun priv->uf_regs = priv->uccf->uf_regs;
121*4882a593Smuzhiyun ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun /* Loopback mode */
124*4882a593Smuzhiyun if (priv->loopback) {
125*4882a593Smuzhiyun dev_info(priv->dev, "Loopback Mode\n");
126*4882a593Smuzhiyun /* use the same clock when work in loopback */
127*4882a593Smuzhiyun qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1);
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun gumr = ioread32be(&priv->uf_regs->gumr);
130*4882a593Smuzhiyun gumr |= (UCC_FAST_GUMR_LOOPBACK | UCC_FAST_GUMR_CDS |
131*4882a593Smuzhiyun UCC_FAST_GUMR_TCI);
132*4882a593Smuzhiyun gumr &= ~(UCC_FAST_GUMR_CTSP | UCC_FAST_GUMR_RSYN);
133*4882a593Smuzhiyun iowrite32be(gumr, &priv->uf_regs->gumr);
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun /* Initialize SI */
137*4882a593Smuzhiyun if (priv->tsa)
138*4882a593Smuzhiyun ucc_tdm_init(priv->utdm, priv->ut_info);
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun /* Write to QE CECR, UCCx channel to Stop Transmission */
141*4882a593Smuzhiyun cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
142*4882a593Smuzhiyun ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
143*4882a593Smuzhiyun QE_CR_PROTOCOL_UNSPECIFIED, 0);
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun /* Set UPSMR normal mode (need fixed)*/
146*4882a593Smuzhiyun iowrite32be(0, &priv->uf_regs->upsmr);
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun /* hdlc_bus mode */
149*4882a593Smuzhiyun if (priv->hdlc_bus) {
150*4882a593Smuzhiyun u32 upsmr;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun dev_info(priv->dev, "HDLC bus Mode\n");
153*4882a593Smuzhiyun upsmr = ioread32be(&priv->uf_regs->upsmr);
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun /* bus mode and retransmit enable, with collision window
156*4882a593Smuzhiyun * set to 8 bytes
157*4882a593Smuzhiyun */
158*4882a593Smuzhiyun upsmr |= UCC_HDLC_UPSMR_RTE | UCC_HDLC_UPSMR_BUS |
159*4882a593Smuzhiyun UCC_HDLC_UPSMR_CW8;
160*4882a593Smuzhiyun iowrite32be(upsmr, &priv->uf_regs->upsmr);
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun /* explicitly disable CDS & CTSP */
163*4882a593Smuzhiyun gumr = ioread32be(&priv->uf_regs->gumr);
164*4882a593Smuzhiyun gumr &= ~(UCC_FAST_GUMR_CDS | UCC_FAST_GUMR_CTSP);
165*4882a593Smuzhiyun /* set automatic sync to explicitly ignore CD signal */
166*4882a593Smuzhiyun gumr |= UCC_FAST_GUMR_SYNL_AUTO;
167*4882a593Smuzhiyun iowrite32be(gumr, &priv->uf_regs->gumr);
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun priv->rx_ring_size = RX_BD_RING_LEN;
171*4882a593Smuzhiyun priv->tx_ring_size = TX_BD_RING_LEN;
172*4882a593Smuzhiyun /* Alloc Rx BD */
173*4882a593Smuzhiyun priv->rx_bd_base = dma_alloc_coherent(priv->dev,
174*4882a593Smuzhiyun RX_BD_RING_LEN * sizeof(struct qe_bd),
175*4882a593Smuzhiyun &priv->dma_rx_bd, GFP_KERNEL);
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun if (!priv->rx_bd_base) {
178*4882a593Smuzhiyun dev_err(priv->dev, "Cannot allocate MURAM memory for RxBDs\n");
179*4882a593Smuzhiyun ret = -ENOMEM;
180*4882a593Smuzhiyun goto free_uccf;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun /* Alloc Tx BD */
184*4882a593Smuzhiyun priv->tx_bd_base = dma_alloc_coherent(priv->dev,
185*4882a593Smuzhiyun TX_BD_RING_LEN * sizeof(struct qe_bd),
186*4882a593Smuzhiyun &priv->dma_tx_bd, GFP_KERNEL);
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun if (!priv->tx_bd_base) {
189*4882a593Smuzhiyun dev_err(priv->dev, "Cannot allocate MURAM memory for TxBDs\n");
190*4882a593Smuzhiyun ret = -ENOMEM;
191*4882a593Smuzhiyun goto free_rx_bd;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun /* Alloc parameter ram for ucc hdlc */
195*4882a593Smuzhiyun priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param),
196*4882a593Smuzhiyun ALIGNMENT_OF_UCC_HDLC_PRAM);
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun if (priv->ucc_pram_offset < 0) {
199*4882a593Smuzhiyun dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n");
200*4882a593Smuzhiyun ret = -ENOMEM;
201*4882a593Smuzhiyun goto free_tx_bd;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun priv->rx_skbuff = kcalloc(priv->rx_ring_size,
205*4882a593Smuzhiyun sizeof(*priv->rx_skbuff),
206*4882a593Smuzhiyun GFP_KERNEL);
207*4882a593Smuzhiyun if (!priv->rx_skbuff) {
208*4882a593Smuzhiyun ret = -ENOMEM;
209*4882a593Smuzhiyun goto free_ucc_pram;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun priv->tx_skbuff = kcalloc(priv->tx_ring_size,
213*4882a593Smuzhiyun sizeof(*priv->tx_skbuff),
214*4882a593Smuzhiyun GFP_KERNEL);
215*4882a593Smuzhiyun if (!priv->tx_skbuff) {
216*4882a593Smuzhiyun ret = -ENOMEM;
217*4882a593Smuzhiyun goto free_rx_skbuff;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun priv->skb_curtx = 0;
221*4882a593Smuzhiyun priv->skb_dirtytx = 0;
222*4882a593Smuzhiyun priv->curtx_bd = priv->tx_bd_base;
223*4882a593Smuzhiyun priv->dirty_tx = priv->tx_bd_base;
224*4882a593Smuzhiyun priv->currx_bd = priv->rx_bd_base;
225*4882a593Smuzhiyun priv->currx_bdnum = 0;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun /* init parameter base */
228*4882a593Smuzhiyun cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
229*4882a593Smuzhiyun ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
230*4882a593Smuzhiyun QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
233*4882a593Smuzhiyun qe_muram_addr(priv->ucc_pram_offset);
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun /* Zero out parameter ram */
236*4882a593Smuzhiyun memset_io(priv->ucc_pram, 0, sizeof(struct ucc_hdlc_param));
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun /* Alloc riptr, tiptr */
239*4882a593Smuzhiyun riptr = qe_muram_alloc(32, 32);
240*4882a593Smuzhiyun if (riptr < 0) {
241*4882a593Smuzhiyun dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
242*4882a593Smuzhiyun ret = -ENOMEM;
243*4882a593Smuzhiyun goto free_tx_skbuff;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun tiptr = qe_muram_alloc(32, 32);
247*4882a593Smuzhiyun if (tiptr < 0) {
248*4882a593Smuzhiyun dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
249*4882a593Smuzhiyun ret = -ENOMEM;
250*4882a593Smuzhiyun goto free_riptr;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun if (riptr != (u16)riptr || tiptr != (u16)tiptr) {
253*4882a593Smuzhiyun dev_err(priv->dev, "MURAM allocation out of addressable range\n");
254*4882a593Smuzhiyun ret = -ENOMEM;
255*4882a593Smuzhiyun goto free_tiptr;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun /* Set RIPTR, TIPTR */
259*4882a593Smuzhiyun iowrite16be(riptr, &priv->ucc_pram->riptr);
260*4882a593Smuzhiyun iowrite16be(tiptr, &priv->ucc_pram->tiptr);
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun /* Set MRBLR */
263*4882a593Smuzhiyun iowrite16be(MAX_RX_BUF_LENGTH, &priv->ucc_pram->mrblr);
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun /* Set RBASE, TBASE */
266*4882a593Smuzhiyun iowrite32be(priv->dma_rx_bd, &priv->ucc_pram->rbase);
267*4882a593Smuzhiyun iowrite32be(priv->dma_tx_bd, &priv->ucc_pram->tbase);
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun /* Set RSTATE, TSTATE */
270*4882a593Smuzhiyun iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->rstate);
271*4882a593Smuzhiyun iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->tstate);
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun /* Set C_MASK, C_PRES for 16bit CRC */
274*4882a593Smuzhiyun iowrite32be(CRC_16BIT_MASK, &priv->ucc_pram->c_mask);
275*4882a593Smuzhiyun iowrite32be(CRC_16BIT_PRES, &priv->ucc_pram->c_pres);
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr);
278*4882a593Smuzhiyun iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr);
279*4882a593Smuzhiyun iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt);
280*4882a593Smuzhiyun iowrite16be(priv->hmask, &priv->ucc_pram->hmask);
281*4882a593Smuzhiyun iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1);
282*4882a593Smuzhiyun iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2);
283*4882a593Smuzhiyun iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3);
284*4882a593Smuzhiyun iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4);
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun /* Get BD buffer */
287*4882a593Smuzhiyun bd_buffer = dma_alloc_coherent(priv->dev,
288*4882a593Smuzhiyun (RX_BD_RING_LEN + TX_BD_RING_LEN) * MAX_RX_BUF_LENGTH,
289*4882a593Smuzhiyun &bd_dma_addr, GFP_KERNEL);
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun if (!bd_buffer) {
292*4882a593Smuzhiyun dev_err(priv->dev, "Could not allocate buffer descriptors\n");
293*4882a593Smuzhiyun ret = -ENOMEM;
294*4882a593Smuzhiyun goto free_tiptr;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun priv->rx_buffer = bd_buffer;
298*4882a593Smuzhiyun priv->tx_buffer = bd_buffer + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun priv->dma_rx_addr = bd_dma_addr;
301*4882a593Smuzhiyun priv->dma_tx_addr = bd_dma_addr + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun for (i = 0; i < RX_BD_RING_LEN; i++) {
304*4882a593Smuzhiyun if (i < (RX_BD_RING_LEN - 1))
305*4882a593Smuzhiyun bd_status = R_E_S | R_I_S;
306*4882a593Smuzhiyun else
307*4882a593Smuzhiyun bd_status = R_E_S | R_I_S | R_W_S;
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun iowrite16be(bd_status, &priv->rx_bd_base[i].status);
310*4882a593Smuzhiyun iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
311*4882a593Smuzhiyun &priv->rx_bd_base[i].buf);
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun for (i = 0; i < TX_BD_RING_LEN; i++) {
315*4882a593Smuzhiyun if (i < (TX_BD_RING_LEN - 1))
316*4882a593Smuzhiyun bd_status = T_I_S | T_TC_S;
317*4882a593Smuzhiyun else
318*4882a593Smuzhiyun bd_status = T_I_S | T_TC_S | T_W_S;
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun iowrite16be(bd_status, &priv->tx_bd_base[i].status);
321*4882a593Smuzhiyun iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
322*4882a593Smuzhiyun &priv->tx_bd_base[i].buf);
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun return 0;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun free_tiptr:
328*4882a593Smuzhiyun qe_muram_free(tiptr);
329*4882a593Smuzhiyun free_riptr:
330*4882a593Smuzhiyun qe_muram_free(riptr);
331*4882a593Smuzhiyun free_tx_skbuff:
332*4882a593Smuzhiyun kfree(priv->tx_skbuff);
333*4882a593Smuzhiyun free_rx_skbuff:
334*4882a593Smuzhiyun kfree(priv->rx_skbuff);
335*4882a593Smuzhiyun free_ucc_pram:
336*4882a593Smuzhiyun qe_muram_free(priv->ucc_pram_offset);
337*4882a593Smuzhiyun free_tx_bd:
338*4882a593Smuzhiyun dma_free_coherent(priv->dev,
339*4882a593Smuzhiyun TX_BD_RING_LEN * sizeof(struct qe_bd),
340*4882a593Smuzhiyun priv->tx_bd_base, priv->dma_tx_bd);
341*4882a593Smuzhiyun free_rx_bd:
342*4882a593Smuzhiyun dma_free_coherent(priv->dev,
343*4882a593Smuzhiyun RX_BD_RING_LEN * sizeof(struct qe_bd),
344*4882a593Smuzhiyun priv->rx_bd_base, priv->dma_rx_bd);
345*4882a593Smuzhiyun free_uccf:
346*4882a593Smuzhiyun ucc_fast_free(priv->uccf);
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun return ret;
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun
ucc_hdlc_tx(struct sk_buff * skb,struct net_device * dev)351*4882a593Smuzhiyun static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun hdlc_device *hdlc = dev_to_hdlc(dev);
354*4882a593Smuzhiyun struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv;
355*4882a593Smuzhiyun struct qe_bd __iomem *bd;
356*4882a593Smuzhiyun u16 bd_status;
357*4882a593Smuzhiyun unsigned long flags;
358*4882a593Smuzhiyun u16 *proto_head;
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun switch (dev->type) {
361*4882a593Smuzhiyun case ARPHRD_RAWHDLC:
362*4882a593Smuzhiyun if (skb_headroom(skb) < HDLC_HEAD_LEN) {
363*4882a593Smuzhiyun dev->stats.tx_dropped++;
364*4882a593Smuzhiyun dev_kfree_skb(skb);
365*4882a593Smuzhiyun netdev_err(dev, "No enough space for hdlc head\n");
366*4882a593Smuzhiyun return -ENOMEM;
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun skb_push(skb, HDLC_HEAD_LEN);
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun proto_head = (u16 *)skb->data;
372*4882a593Smuzhiyun *proto_head = htons(DEFAULT_HDLC_HEAD);
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun dev->stats.tx_bytes += skb->len;
375*4882a593Smuzhiyun break;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun case ARPHRD_PPP:
378*4882a593Smuzhiyun proto_head = (u16 *)skb->data;
379*4882a593Smuzhiyun if (*proto_head != htons(DEFAULT_PPP_HEAD)) {
380*4882a593Smuzhiyun dev->stats.tx_dropped++;
381*4882a593Smuzhiyun dev_kfree_skb(skb);
382*4882a593Smuzhiyun netdev_err(dev, "Wrong ppp header\n");
383*4882a593Smuzhiyun return -ENOMEM;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun dev->stats.tx_bytes += skb->len;
387*4882a593Smuzhiyun break;
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun case ARPHRD_ETHER:
390*4882a593Smuzhiyun dev->stats.tx_bytes += skb->len;
391*4882a593Smuzhiyun break;
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun default:
394*4882a593Smuzhiyun dev->stats.tx_dropped++;
395*4882a593Smuzhiyun dev_kfree_skb(skb);
396*4882a593Smuzhiyun return -ENOMEM;
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun netdev_sent_queue(dev, skb->len);
399*4882a593Smuzhiyun spin_lock_irqsave(&priv->lock, flags);
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun /* Start from the next BD that should be filled */
402*4882a593Smuzhiyun bd = priv->curtx_bd;
403*4882a593Smuzhiyun bd_status = ioread16be(&bd->status);
404*4882a593Smuzhiyun /* Save the skb pointer so we can free it later */
405*4882a593Smuzhiyun priv->tx_skbuff[priv->skb_curtx] = skb;
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun /* Update the current skb pointer (wrapping if this was the last) */
408*4882a593Smuzhiyun priv->skb_curtx =
409*4882a593Smuzhiyun (priv->skb_curtx + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun /* copy skb data to tx buffer for sdma processing */
412*4882a593Smuzhiyun memcpy(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
413*4882a593Smuzhiyun skb->data, skb->len);
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun /* set bd status and length */
416*4882a593Smuzhiyun bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun iowrite16be(skb->len, &bd->length);
419*4882a593Smuzhiyun iowrite16be(bd_status, &bd->status);
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun /* Move to next BD in the ring */
422*4882a593Smuzhiyun if (!(bd_status & T_W_S))
423*4882a593Smuzhiyun bd += 1;
424*4882a593Smuzhiyun else
425*4882a593Smuzhiyun bd = priv->tx_bd_base;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun if (bd == priv->dirty_tx) {
428*4882a593Smuzhiyun if (!netif_queue_stopped(dev))
429*4882a593Smuzhiyun netif_stop_queue(dev);
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun priv->curtx_bd = bd;
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun spin_unlock_irqrestore(&priv->lock, flags);
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun return NETDEV_TX_OK;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun
hdlc_tx_restart(struct ucc_hdlc_private * priv)439*4882a593Smuzhiyun static int hdlc_tx_restart(struct ucc_hdlc_private *priv)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun u32 cecr_subblock;
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun cecr_subblock =
444*4882a593Smuzhiyun ucc_fast_get_qe_cr_subblock(priv->ut_info->uf_info.ucc_num);
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun qe_issue_cmd(QE_RESTART_TX, cecr_subblock,
447*4882a593Smuzhiyun QE_CR_PROTOCOL_UNSPECIFIED, 0);
448*4882a593Smuzhiyun return 0;
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun
hdlc_tx_done(struct ucc_hdlc_private * priv)451*4882a593Smuzhiyun static int hdlc_tx_done(struct ucc_hdlc_private *priv)
452*4882a593Smuzhiyun {
453*4882a593Smuzhiyun /* Start from the next BD that should be filled */
454*4882a593Smuzhiyun struct net_device *dev = priv->ndev;
455*4882a593Smuzhiyun unsigned int bytes_sent = 0;
456*4882a593Smuzhiyun int howmany = 0;
457*4882a593Smuzhiyun struct qe_bd *bd; /* BD pointer */
458*4882a593Smuzhiyun u16 bd_status;
459*4882a593Smuzhiyun int tx_restart = 0;
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun bd = priv->dirty_tx;
462*4882a593Smuzhiyun bd_status = ioread16be(&bd->status);
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun /* Normal processing. */
465*4882a593Smuzhiyun while ((bd_status & T_R_S) == 0) {
466*4882a593Smuzhiyun struct sk_buff *skb;
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun if (bd_status & T_UN_S) { /* Underrun */
469*4882a593Smuzhiyun dev->stats.tx_fifo_errors++;
470*4882a593Smuzhiyun tx_restart = 1;
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun if (bd_status & T_CT_S) { /* Carrier lost */
473*4882a593Smuzhiyun dev->stats.tx_carrier_errors++;
474*4882a593Smuzhiyun tx_restart = 1;
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun /* BD contains already transmitted buffer. */
478*4882a593Smuzhiyun /* Handle the transmitted buffer and release */
479*4882a593Smuzhiyun /* the BD to be used with the current frame */
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun skb = priv->tx_skbuff[priv->skb_dirtytx];
482*4882a593Smuzhiyun if (!skb)
483*4882a593Smuzhiyun break;
484*4882a593Smuzhiyun howmany++;
485*4882a593Smuzhiyun bytes_sent += skb->len;
486*4882a593Smuzhiyun dev->stats.tx_packets++;
487*4882a593Smuzhiyun memset(priv->tx_buffer +
488*4882a593Smuzhiyun (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
489*4882a593Smuzhiyun 0, skb->len);
490*4882a593Smuzhiyun dev_consume_skb_irq(skb);
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun priv->tx_skbuff[priv->skb_dirtytx] = NULL;
493*4882a593Smuzhiyun priv->skb_dirtytx =
494*4882a593Smuzhiyun (priv->skb_dirtytx +
495*4882a593Smuzhiyun 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun /* We freed a buffer, so now we can restart transmission */
498*4882a593Smuzhiyun if (netif_queue_stopped(dev))
499*4882a593Smuzhiyun netif_wake_queue(dev);
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun /* Advance the confirmation BD pointer */
502*4882a593Smuzhiyun if (!(bd_status & T_W_S))
503*4882a593Smuzhiyun bd += 1;
504*4882a593Smuzhiyun else
505*4882a593Smuzhiyun bd = priv->tx_bd_base;
506*4882a593Smuzhiyun bd_status = ioread16be(&bd->status);
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun priv->dirty_tx = bd;
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun if (tx_restart)
511*4882a593Smuzhiyun hdlc_tx_restart(priv);
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun netdev_completed_queue(dev, howmany, bytes_sent);
514*4882a593Smuzhiyun return 0;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun
hdlc_rx_done(struct ucc_hdlc_private * priv,int rx_work_limit)517*4882a593Smuzhiyun static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun struct net_device *dev = priv->ndev;
520*4882a593Smuzhiyun struct sk_buff *skb = NULL;
521*4882a593Smuzhiyun hdlc_device *hdlc = dev_to_hdlc(dev);
522*4882a593Smuzhiyun struct qe_bd *bd;
523*4882a593Smuzhiyun u16 bd_status;
524*4882a593Smuzhiyun u16 length, howmany = 0;
525*4882a593Smuzhiyun u8 *bdbuffer;
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun bd = priv->currx_bd;
528*4882a593Smuzhiyun bd_status = ioread16be(&bd->status);
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun /* while there are received buffers and BD is full (~R_E) */
531*4882a593Smuzhiyun while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) {
532*4882a593Smuzhiyun if (bd_status & (RX_BD_ERRORS)) {
533*4882a593Smuzhiyun dev->stats.rx_errors++;
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun if (bd_status & R_CD_S)
536*4882a593Smuzhiyun dev->stats.collisions++;
537*4882a593Smuzhiyun if (bd_status & R_OV_S)
538*4882a593Smuzhiyun dev->stats.rx_fifo_errors++;
539*4882a593Smuzhiyun if (bd_status & R_CR_S)
540*4882a593Smuzhiyun dev->stats.rx_crc_errors++;
541*4882a593Smuzhiyun if (bd_status & R_AB_S)
542*4882a593Smuzhiyun dev->stats.rx_over_errors++;
543*4882a593Smuzhiyun if (bd_status & R_NO_S)
544*4882a593Smuzhiyun dev->stats.rx_frame_errors++;
545*4882a593Smuzhiyun if (bd_status & R_LG_S)
546*4882a593Smuzhiyun dev->stats.rx_length_errors++;
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun goto recycle;
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun bdbuffer = priv->rx_buffer +
551*4882a593Smuzhiyun (priv->currx_bdnum * MAX_RX_BUF_LENGTH);
552*4882a593Smuzhiyun length = ioread16be(&bd->length);
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun switch (dev->type) {
555*4882a593Smuzhiyun case ARPHRD_RAWHDLC:
556*4882a593Smuzhiyun bdbuffer += HDLC_HEAD_LEN;
557*4882a593Smuzhiyun length -= (HDLC_HEAD_LEN + HDLC_CRC_SIZE);
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun skb = dev_alloc_skb(length);
560*4882a593Smuzhiyun if (!skb) {
561*4882a593Smuzhiyun dev->stats.rx_dropped++;
562*4882a593Smuzhiyun return -ENOMEM;
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun skb_put(skb, length);
566*4882a593Smuzhiyun skb->len = length;
567*4882a593Smuzhiyun skb->dev = dev;
568*4882a593Smuzhiyun memcpy(skb->data, bdbuffer, length);
569*4882a593Smuzhiyun break;
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun case ARPHRD_PPP:
572*4882a593Smuzhiyun case ARPHRD_ETHER:
573*4882a593Smuzhiyun length -= HDLC_CRC_SIZE;
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun skb = dev_alloc_skb(length);
576*4882a593Smuzhiyun if (!skb) {
577*4882a593Smuzhiyun dev->stats.rx_dropped++;
578*4882a593Smuzhiyun return -ENOMEM;
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun skb_put(skb, length);
582*4882a593Smuzhiyun skb->len = length;
583*4882a593Smuzhiyun skb->dev = dev;
584*4882a593Smuzhiyun memcpy(skb->data, bdbuffer, length);
585*4882a593Smuzhiyun break;
586*4882a593Smuzhiyun }
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun dev->stats.rx_packets++;
589*4882a593Smuzhiyun dev->stats.rx_bytes += skb->len;
590*4882a593Smuzhiyun howmany++;
591*4882a593Smuzhiyun if (hdlc->proto)
592*4882a593Smuzhiyun skb->protocol = hdlc_type_trans(skb, dev);
593*4882a593Smuzhiyun netif_receive_skb(skb);
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun recycle:
596*4882a593Smuzhiyun iowrite16be((bd_status & R_W_S) | R_E_S | R_I_S, &bd->status);
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun /* update to point at the next bd */
599*4882a593Smuzhiyun if (bd_status & R_W_S) {
600*4882a593Smuzhiyun priv->currx_bdnum = 0;
601*4882a593Smuzhiyun bd = priv->rx_bd_base;
602*4882a593Smuzhiyun } else {
603*4882a593Smuzhiyun if (priv->currx_bdnum < (RX_BD_RING_LEN - 1))
604*4882a593Smuzhiyun priv->currx_bdnum += 1;
605*4882a593Smuzhiyun else
606*4882a593Smuzhiyun priv->currx_bdnum = RX_BD_RING_LEN - 1;
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun bd += 1;
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun bd_status = ioread16be(&bd->status);
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun priv->currx_bd = bd;
615*4882a593Smuzhiyun return howmany;
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun
ucc_hdlc_poll(struct napi_struct * napi,int budget)618*4882a593Smuzhiyun static int ucc_hdlc_poll(struct napi_struct *napi, int budget)
619*4882a593Smuzhiyun {
620*4882a593Smuzhiyun struct ucc_hdlc_private *priv = container_of(napi,
621*4882a593Smuzhiyun struct ucc_hdlc_private,
622*4882a593Smuzhiyun napi);
623*4882a593Smuzhiyun int howmany;
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun /* Tx event processing */
626*4882a593Smuzhiyun spin_lock(&priv->lock);
627*4882a593Smuzhiyun hdlc_tx_done(priv);
628*4882a593Smuzhiyun spin_unlock(&priv->lock);
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun howmany = 0;
631*4882a593Smuzhiyun howmany += hdlc_rx_done(priv, budget - howmany);
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun if (howmany < budget) {
634*4882a593Smuzhiyun napi_complete_done(napi, howmany);
635*4882a593Smuzhiyun qe_setbits_be32(priv->uccf->p_uccm,
636*4882a593Smuzhiyun (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
637*4882a593Smuzhiyun }
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun return howmany;
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun
ucc_hdlc_irq_handler(int irq,void * dev_id)642*4882a593Smuzhiyun static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id;
645*4882a593Smuzhiyun struct net_device *dev = priv->ndev;
646*4882a593Smuzhiyun struct ucc_fast_private *uccf;
647*4882a593Smuzhiyun u32 ucce;
648*4882a593Smuzhiyun u32 uccm;
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun uccf = priv->uccf;
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun ucce = ioread32be(uccf->p_ucce);
653*4882a593Smuzhiyun uccm = ioread32be(uccf->p_uccm);
654*4882a593Smuzhiyun ucce &= uccm;
655*4882a593Smuzhiyun iowrite32be(ucce, uccf->p_ucce);
656*4882a593Smuzhiyun if (!ucce)
657*4882a593Smuzhiyun return IRQ_NONE;
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun if ((ucce >> 16) & (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)) {
660*4882a593Smuzhiyun if (napi_schedule_prep(&priv->napi)) {
661*4882a593Smuzhiyun uccm &= ~((UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)
662*4882a593Smuzhiyun << 16);
663*4882a593Smuzhiyun iowrite32be(uccm, uccf->p_uccm);
664*4882a593Smuzhiyun __napi_schedule(&priv->napi);
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun /* Errors and other events */
669*4882a593Smuzhiyun if (ucce >> 16 & UCC_HDLC_UCCE_BSY)
670*4882a593Smuzhiyun dev->stats.rx_missed_errors++;
671*4882a593Smuzhiyun if (ucce >> 16 & UCC_HDLC_UCCE_TXE)
672*4882a593Smuzhiyun dev->stats.tx_errors++;
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun return IRQ_HANDLED;
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun
uhdlc_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)677*4882a593Smuzhiyun static int uhdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
678*4882a593Smuzhiyun {
679*4882a593Smuzhiyun const size_t size = sizeof(te1_settings);
680*4882a593Smuzhiyun te1_settings line;
681*4882a593Smuzhiyun struct ucc_hdlc_private *priv = netdev_priv(dev);
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun if (cmd != SIOCWANDEV)
684*4882a593Smuzhiyun return hdlc_ioctl(dev, ifr, cmd);
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun switch (ifr->ifr_settings.type) {
687*4882a593Smuzhiyun case IF_GET_IFACE:
688*4882a593Smuzhiyun ifr->ifr_settings.type = IF_IFACE_E1;
689*4882a593Smuzhiyun if (ifr->ifr_settings.size < size) {
690*4882a593Smuzhiyun ifr->ifr_settings.size = size; /* data size wanted */
691*4882a593Smuzhiyun return -ENOBUFS;
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun memset(&line, 0, sizeof(line));
694*4882a593Smuzhiyun line.clock_type = priv->clocking;
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
697*4882a593Smuzhiyun return -EFAULT;
698*4882a593Smuzhiyun return 0;
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun default:
701*4882a593Smuzhiyun return hdlc_ioctl(dev, ifr, cmd);
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun }
704*4882a593Smuzhiyun
uhdlc_open(struct net_device * dev)705*4882a593Smuzhiyun static int uhdlc_open(struct net_device *dev)
706*4882a593Smuzhiyun {
707*4882a593Smuzhiyun u32 cecr_subblock;
708*4882a593Smuzhiyun hdlc_device *hdlc = dev_to_hdlc(dev);
709*4882a593Smuzhiyun struct ucc_hdlc_private *priv = hdlc->priv;
710*4882a593Smuzhiyun struct ucc_tdm *utdm = priv->utdm;
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun if (priv->hdlc_busy != 1) {
713*4882a593Smuzhiyun if (request_irq(priv->ut_info->uf_info.irq,
714*4882a593Smuzhiyun ucc_hdlc_irq_handler, 0, "hdlc", priv))
715*4882a593Smuzhiyun return -ENODEV;
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun cecr_subblock = ucc_fast_get_qe_cr_subblock(
718*4882a593Smuzhiyun priv->ut_info->uf_info.ucc_num);
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
721*4882a593Smuzhiyun QE_CR_PROTOCOL_UNSPECIFIED, 0);
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun /* Enable the TDM port */
726*4882a593Smuzhiyun if (priv->tsa)
727*4882a593Smuzhiyun utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun priv->hdlc_busy = 1;
730*4882a593Smuzhiyun netif_device_attach(priv->ndev);
731*4882a593Smuzhiyun napi_enable(&priv->napi);
732*4882a593Smuzhiyun netdev_reset_queue(dev);
733*4882a593Smuzhiyun netif_start_queue(dev);
734*4882a593Smuzhiyun hdlc_open(dev);
735*4882a593Smuzhiyun }
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun return 0;
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun
uhdlc_memclean(struct ucc_hdlc_private * priv)740*4882a593Smuzhiyun static void uhdlc_memclean(struct ucc_hdlc_private *priv)
741*4882a593Smuzhiyun {
742*4882a593Smuzhiyun qe_muram_free(ioread16be(&priv->ucc_pram->riptr));
743*4882a593Smuzhiyun qe_muram_free(ioread16be(&priv->ucc_pram->tiptr));
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun if (priv->rx_bd_base) {
746*4882a593Smuzhiyun dma_free_coherent(priv->dev,
747*4882a593Smuzhiyun RX_BD_RING_LEN * sizeof(struct qe_bd),
748*4882a593Smuzhiyun priv->rx_bd_base, priv->dma_rx_bd);
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun priv->rx_bd_base = NULL;
751*4882a593Smuzhiyun priv->dma_rx_bd = 0;
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun if (priv->tx_bd_base) {
755*4882a593Smuzhiyun dma_free_coherent(priv->dev,
756*4882a593Smuzhiyun TX_BD_RING_LEN * sizeof(struct qe_bd),
757*4882a593Smuzhiyun priv->tx_bd_base, priv->dma_tx_bd);
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun priv->tx_bd_base = NULL;
760*4882a593Smuzhiyun priv->dma_tx_bd = 0;
761*4882a593Smuzhiyun }
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun if (priv->ucc_pram) {
764*4882a593Smuzhiyun qe_muram_free(priv->ucc_pram_offset);
765*4882a593Smuzhiyun priv->ucc_pram = NULL;
766*4882a593Smuzhiyun priv->ucc_pram_offset = 0;
767*4882a593Smuzhiyun }
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun kfree(priv->rx_skbuff);
770*4882a593Smuzhiyun priv->rx_skbuff = NULL;
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun kfree(priv->tx_skbuff);
773*4882a593Smuzhiyun priv->tx_skbuff = NULL;
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun if (priv->uf_regs) {
776*4882a593Smuzhiyun iounmap(priv->uf_regs);
777*4882a593Smuzhiyun priv->uf_regs = NULL;
778*4882a593Smuzhiyun }
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun if (priv->uccf) {
781*4882a593Smuzhiyun ucc_fast_free(priv->uccf);
782*4882a593Smuzhiyun priv->uccf = NULL;
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun if (priv->rx_buffer) {
786*4882a593Smuzhiyun dma_free_coherent(priv->dev,
787*4882a593Smuzhiyun RX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
788*4882a593Smuzhiyun priv->rx_buffer, priv->dma_rx_addr);
789*4882a593Smuzhiyun priv->rx_buffer = NULL;
790*4882a593Smuzhiyun priv->dma_rx_addr = 0;
791*4882a593Smuzhiyun }
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun if (priv->tx_buffer) {
794*4882a593Smuzhiyun dma_free_coherent(priv->dev,
795*4882a593Smuzhiyun TX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
796*4882a593Smuzhiyun priv->tx_buffer, priv->dma_tx_addr);
797*4882a593Smuzhiyun priv->tx_buffer = NULL;
798*4882a593Smuzhiyun priv->dma_tx_addr = 0;
799*4882a593Smuzhiyun }
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun
uhdlc_close(struct net_device * dev)802*4882a593Smuzhiyun static int uhdlc_close(struct net_device *dev)
803*4882a593Smuzhiyun {
804*4882a593Smuzhiyun struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
805*4882a593Smuzhiyun struct ucc_tdm *utdm = priv->utdm;
806*4882a593Smuzhiyun u32 cecr_subblock;
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun napi_disable(&priv->napi);
809*4882a593Smuzhiyun cecr_subblock = ucc_fast_get_qe_cr_subblock(
810*4882a593Smuzhiyun priv->ut_info->uf_info.ucc_num);
811*4882a593Smuzhiyun
812*4882a593Smuzhiyun qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
813*4882a593Smuzhiyun (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
814*4882a593Smuzhiyun qe_issue_cmd(QE_CLOSE_RX_BD, cecr_subblock,
815*4882a593Smuzhiyun (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun if (priv->tsa)
818*4882a593Smuzhiyun utdm->si_regs->siglmr1_h &= ~(0x1 << utdm->tdm_port);
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun free_irq(priv->ut_info->uf_info.irq, priv);
823*4882a593Smuzhiyun netif_stop_queue(dev);
824*4882a593Smuzhiyun netdev_reset_queue(dev);
825*4882a593Smuzhiyun priv->hdlc_busy = 0;
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun return 0;
828*4882a593Smuzhiyun }
829*4882a593Smuzhiyun
ucc_hdlc_attach(struct net_device * dev,unsigned short encoding,unsigned short parity)830*4882a593Smuzhiyun static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding,
831*4882a593Smuzhiyun unsigned short parity)
832*4882a593Smuzhiyun {
833*4882a593Smuzhiyun struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun if (encoding != ENCODING_NRZ &&
836*4882a593Smuzhiyun encoding != ENCODING_NRZI)
837*4882a593Smuzhiyun return -EINVAL;
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun if (parity != PARITY_NONE &&
840*4882a593Smuzhiyun parity != PARITY_CRC32_PR1_CCITT &&
841*4882a593Smuzhiyun parity != PARITY_CRC16_PR0_CCITT &&
842*4882a593Smuzhiyun parity != PARITY_CRC16_PR1_CCITT)
843*4882a593Smuzhiyun return -EINVAL;
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun priv->encoding = encoding;
846*4882a593Smuzhiyun priv->parity = parity;
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun return 0;
849*4882a593Smuzhiyun }
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun #ifdef CONFIG_PM
store_clk_config(struct ucc_hdlc_private * priv)852*4882a593Smuzhiyun static void store_clk_config(struct ucc_hdlc_private *priv)
853*4882a593Smuzhiyun {
854*4882a593Smuzhiyun struct qe_mux *qe_mux_reg = &qe_immr->qmx;
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun /* store si clk */
857*4882a593Smuzhiyun priv->cmxsi1cr_h = ioread32be(&qe_mux_reg->cmxsi1cr_h);
858*4882a593Smuzhiyun priv->cmxsi1cr_l = ioread32be(&qe_mux_reg->cmxsi1cr_l);
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun /* store si sync */
861*4882a593Smuzhiyun priv->cmxsi1syr = ioread32be(&qe_mux_reg->cmxsi1syr);
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun /* store ucc clk */
864*4882a593Smuzhiyun memcpy_fromio(priv->cmxucr, qe_mux_reg->cmxucr, 4 * sizeof(u32));
865*4882a593Smuzhiyun }
866*4882a593Smuzhiyun
resume_clk_config(struct ucc_hdlc_private * priv)867*4882a593Smuzhiyun static void resume_clk_config(struct ucc_hdlc_private *priv)
868*4882a593Smuzhiyun {
869*4882a593Smuzhiyun struct qe_mux *qe_mux_reg = &qe_immr->qmx;
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32));
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun iowrite32be(priv->cmxsi1cr_h, &qe_mux_reg->cmxsi1cr_h);
874*4882a593Smuzhiyun iowrite32be(priv->cmxsi1cr_l, &qe_mux_reg->cmxsi1cr_l);
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun iowrite32be(priv->cmxsi1syr, &qe_mux_reg->cmxsi1syr);
877*4882a593Smuzhiyun }
878*4882a593Smuzhiyun
uhdlc_suspend(struct device * dev)879*4882a593Smuzhiyun static int uhdlc_suspend(struct device *dev)
880*4882a593Smuzhiyun {
881*4882a593Smuzhiyun struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
882*4882a593Smuzhiyun struct ucc_fast __iomem *uf_regs;
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun if (!priv)
885*4882a593Smuzhiyun return -EINVAL;
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun if (!netif_running(priv->ndev))
888*4882a593Smuzhiyun return 0;
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun netif_device_detach(priv->ndev);
891*4882a593Smuzhiyun napi_disable(&priv->napi);
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun uf_regs = priv->uf_regs;
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun /* backup gumr guemr*/
896*4882a593Smuzhiyun priv->gumr = ioread32be(&uf_regs->gumr);
897*4882a593Smuzhiyun priv->guemr = ioread8(&uf_regs->guemr);
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun priv->ucc_pram_bak = kmalloc(sizeof(*priv->ucc_pram_bak),
900*4882a593Smuzhiyun GFP_KERNEL);
901*4882a593Smuzhiyun if (!priv->ucc_pram_bak)
902*4882a593Smuzhiyun return -ENOMEM;
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun /* backup HDLC parameter */
905*4882a593Smuzhiyun memcpy_fromio(priv->ucc_pram_bak, priv->ucc_pram,
906*4882a593Smuzhiyun sizeof(struct ucc_hdlc_param));
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun /* store the clk configuration */
909*4882a593Smuzhiyun store_clk_config(priv);
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun /* save power */
912*4882a593Smuzhiyun ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun return 0;
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun
uhdlc_resume(struct device * dev)917*4882a593Smuzhiyun static int uhdlc_resume(struct device *dev)
918*4882a593Smuzhiyun {
919*4882a593Smuzhiyun struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
920*4882a593Smuzhiyun struct ucc_tdm *utdm;
921*4882a593Smuzhiyun struct ucc_tdm_info *ut_info;
922*4882a593Smuzhiyun struct ucc_fast __iomem *uf_regs;
923*4882a593Smuzhiyun struct ucc_fast_private *uccf;
924*4882a593Smuzhiyun struct ucc_fast_info *uf_info;
925*4882a593Smuzhiyun int i;
926*4882a593Smuzhiyun u32 cecr_subblock;
927*4882a593Smuzhiyun u16 bd_status;
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun if (!priv)
930*4882a593Smuzhiyun return -EINVAL;
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun if (!netif_running(priv->ndev))
933*4882a593Smuzhiyun return 0;
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun utdm = priv->utdm;
936*4882a593Smuzhiyun ut_info = priv->ut_info;
937*4882a593Smuzhiyun uf_info = &ut_info->uf_info;
938*4882a593Smuzhiyun uf_regs = priv->uf_regs;
939*4882a593Smuzhiyun uccf = priv->uccf;
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun /* restore gumr guemr */
942*4882a593Smuzhiyun iowrite8(priv->guemr, &uf_regs->guemr);
943*4882a593Smuzhiyun iowrite32be(priv->gumr, &uf_regs->gumr);
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun /* Set Virtual Fifo registers */
946*4882a593Smuzhiyun iowrite16be(uf_info->urfs, &uf_regs->urfs);
947*4882a593Smuzhiyun iowrite16be(uf_info->urfet, &uf_regs->urfet);
948*4882a593Smuzhiyun iowrite16be(uf_info->urfset, &uf_regs->urfset);
949*4882a593Smuzhiyun iowrite16be(uf_info->utfs, &uf_regs->utfs);
950*4882a593Smuzhiyun iowrite16be(uf_info->utfet, &uf_regs->utfet);
951*4882a593Smuzhiyun iowrite16be(uf_info->utftt, &uf_regs->utftt);
952*4882a593Smuzhiyun /* utfb, urfb are offsets from MURAM base */
953*4882a593Smuzhiyun iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb);
954*4882a593Smuzhiyun iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb);
955*4882a593Smuzhiyun
956*4882a593Smuzhiyun /* Rx Tx and sync clock routing */
957*4882a593Smuzhiyun resume_clk_config(priv);
958*4882a593Smuzhiyun
959*4882a593Smuzhiyun iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
960*4882a593Smuzhiyun iowrite32be(0xffffffff, &uf_regs->ucce);
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun /* rebuild SIRAM */
965*4882a593Smuzhiyun if (priv->tsa)
966*4882a593Smuzhiyun ucc_tdm_init(priv->utdm, priv->ut_info);
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun /* Write to QE CECR, UCCx channel to Stop Transmission */
969*4882a593Smuzhiyun cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
970*4882a593Smuzhiyun qe_issue_cmd(QE_STOP_TX, cecr_subblock,
971*4882a593Smuzhiyun (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun /* Set UPSMR normal mode */
974*4882a593Smuzhiyun iowrite32be(0, &uf_regs->upsmr);
975*4882a593Smuzhiyun
976*4882a593Smuzhiyun /* init parameter base */
977*4882a593Smuzhiyun cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
978*4882a593Smuzhiyun qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
979*4882a593Smuzhiyun QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
982*4882a593Smuzhiyun qe_muram_addr(priv->ucc_pram_offset);
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun /* restore ucc parameter */
985*4882a593Smuzhiyun memcpy_toio(priv->ucc_pram, priv->ucc_pram_bak,
986*4882a593Smuzhiyun sizeof(struct ucc_hdlc_param));
987*4882a593Smuzhiyun kfree(priv->ucc_pram_bak);
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun /* rebuild BD entry */
990*4882a593Smuzhiyun for (i = 0; i < RX_BD_RING_LEN; i++) {
991*4882a593Smuzhiyun if (i < (RX_BD_RING_LEN - 1))
992*4882a593Smuzhiyun bd_status = R_E_S | R_I_S;
993*4882a593Smuzhiyun else
994*4882a593Smuzhiyun bd_status = R_E_S | R_I_S | R_W_S;
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun iowrite16be(bd_status, &priv->rx_bd_base[i].status);
997*4882a593Smuzhiyun iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
998*4882a593Smuzhiyun &priv->rx_bd_base[i].buf);
999*4882a593Smuzhiyun }
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun for (i = 0; i < TX_BD_RING_LEN; i++) {
1002*4882a593Smuzhiyun if (i < (TX_BD_RING_LEN - 1))
1003*4882a593Smuzhiyun bd_status = T_I_S | T_TC_S;
1004*4882a593Smuzhiyun else
1005*4882a593Smuzhiyun bd_status = T_I_S | T_TC_S | T_W_S;
1006*4882a593Smuzhiyun
1007*4882a593Smuzhiyun iowrite16be(bd_status, &priv->tx_bd_base[i].status);
1008*4882a593Smuzhiyun iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
1009*4882a593Smuzhiyun &priv->tx_bd_base[i].buf);
1010*4882a593Smuzhiyun }
1011*4882a593Smuzhiyun
1012*4882a593Smuzhiyun /* if hdlc is busy enable TX and RX */
1013*4882a593Smuzhiyun if (priv->hdlc_busy == 1) {
1014*4882a593Smuzhiyun cecr_subblock = ucc_fast_get_qe_cr_subblock(
1015*4882a593Smuzhiyun priv->ut_info->uf_info.ucc_num);
1016*4882a593Smuzhiyun
1017*4882a593Smuzhiyun qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
1018*4882a593Smuzhiyun (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun /* Enable the TDM port */
1023*4882a593Smuzhiyun if (priv->tsa)
1024*4882a593Smuzhiyun utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
1025*4882a593Smuzhiyun }
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun napi_enable(&priv->napi);
1028*4882a593Smuzhiyun netif_device_attach(priv->ndev);
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun return 0;
1031*4882a593Smuzhiyun }
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun static const struct dev_pm_ops uhdlc_pm_ops = {
1034*4882a593Smuzhiyun .suspend = uhdlc_suspend,
1035*4882a593Smuzhiyun .resume = uhdlc_resume,
1036*4882a593Smuzhiyun .freeze = uhdlc_suspend,
1037*4882a593Smuzhiyun .thaw = uhdlc_resume,
1038*4882a593Smuzhiyun };
1039*4882a593Smuzhiyun
1040*4882a593Smuzhiyun #define HDLC_PM_OPS (&uhdlc_pm_ops)
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun #else
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun #define HDLC_PM_OPS NULL
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun #endif
uhdlc_tx_timeout(struct net_device * ndev,unsigned int txqueue)1047*4882a593Smuzhiyun static void uhdlc_tx_timeout(struct net_device *ndev, unsigned int txqueue)
1048*4882a593Smuzhiyun {
1049*4882a593Smuzhiyun netdev_err(ndev, "%s\n", __func__);
1050*4882a593Smuzhiyun }
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun static const struct net_device_ops uhdlc_ops = {
1053*4882a593Smuzhiyun .ndo_open = uhdlc_open,
1054*4882a593Smuzhiyun .ndo_stop = uhdlc_close,
1055*4882a593Smuzhiyun .ndo_start_xmit = hdlc_start_xmit,
1056*4882a593Smuzhiyun .ndo_do_ioctl = uhdlc_ioctl,
1057*4882a593Smuzhiyun .ndo_tx_timeout = uhdlc_tx_timeout,
1058*4882a593Smuzhiyun };
1059*4882a593Smuzhiyun
hdlc_map_iomem(char * name,int init_flag,void __iomem ** ptr)1060*4882a593Smuzhiyun static int hdlc_map_iomem(char *name, int init_flag, void __iomem **ptr)
1061*4882a593Smuzhiyun {
1062*4882a593Smuzhiyun struct device_node *np;
1063*4882a593Smuzhiyun struct platform_device *pdev;
1064*4882a593Smuzhiyun struct resource *res;
1065*4882a593Smuzhiyun static int siram_init_flag;
1066*4882a593Smuzhiyun int ret = 0;
1067*4882a593Smuzhiyun
1068*4882a593Smuzhiyun np = of_find_compatible_node(NULL, NULL, name);
1069*4882a593Smuzhiyun if (!np)
1070*4882a593Smuzhiyun return -EINVAL;
1071*4882a593Smuzhiyun
1072*4882a593Smuzhiyun pdev = of_find_device_by_node(np);
1073*4882a593Smuzhiyun if (!pdev) {
1074*4882a593Smuzhiyun pr_err("%pOFn: failed to lookup pdev\n", np);
1075*4882a593Smuzhiyun of_node_put(np);
1076*4882a593Smuzhiyun return -EINVAL;
1077*4882a593Smuzhiyun }
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun of_node_put(np);
1080*4882a593Smuzhiyun res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1081*4882a593Smuzhiyun if (!res) {
1082*4882a593Smuzhiyun ret = -EINVAL;
1083*4882a593Smuzhiyun goto error_put_device;
1084*4882a593Smuzhiyun }
1085*4882a593Smuzhiyun *ptr = ioremap(res->start, resource_size(res));
1086*4882a593Smuzhiyun if (!*ptr) {
1087*4882a593Smuzhiyun ret = -ENOMEM;
1088*4882a593Smuzhiyun goto error_put_device;
1089*4882a593Smuzhiyun }
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun /* We've remapped the addresses, and we don't need the device any
1092*4882a593Smuzhiyun * more, so we should release it.
1093*4882a593Smuzhiyun */
1094*4882a593Smuzhiyun put_device(&pdev->dev);
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun if (init_flag && siram_init_flag == 0) {
1097*4882a593Smuzhiyun memset_io(*ptr, 0, resource_size(res));
1098*4882a593Smuzhiyun siram_init_flag = 1;
1099*4882a593Smuzhiyun }
1100*4882a593Smuzhiyun return 0;
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyun error_put_device:
1103*4882a593Smuzhiyun put_device(&pdev->dev);
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun return ret;
1106*4882a593Smuzhiyun }
1107*4882a593Smuzhiyun
ucc_hdlc_probe(struct platform_device * pdev)1108*4882a593Smuzhiyun static int ucc_hdlc_probe(struct platform_device *pdev)
1109*4882a593Smuzhiyun {
1110*4882a593Smuzhiyun struct device_node *np = pdev->dev.of_node;
1111*4882a593Smuzhiyun struct ucc_hdlc_private *uhdlc_priv = NULL;
1112*4882a593Smuzhiyun struct ucc_tdm_info *ut_info;
1113*4882a593Smuzhiyun struct ucc_tdm *utdm = NULL;
1114*4882a593Smuzhiyun struct resource res;
1115*4882a593Smuzhiyun struct net_device *dev;
1116*4882a593Smuzhiyun hdlc_device *hdlc;
1117*4882a593Smuzhiyun int ucc_num;
1118*4882a593Smuzhiyun const char *sprop;
1119*4882a593Smuzhiyun int ret;
1120*4882a593Smuzhiyun u32 val;
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun ret = of_property_read_u32_index(np, "cell-index", 0, &val);
1123*4882a593Smuzhiyun if (ret) {
1124*4882a593Smuzhiyun dev_err(&pdev->dev, "Invalid ucc property\n");
1125*4882a593Smuzhiyun return -ENODEV;
1126*4882a593Smuzhiyun }
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun ucc_num = val - 1;
1129*4882a593Smuzhiyun if (ucc_num > (UCC_MAX_NUM - 1) || ucc_num < 0) {
1130*4882a593Smuzhiyun dev_err(&pdev->dev, ": Invalid UCC num\n");
1131*4882a593Smuzhiyun return -EINVAL;
1132*4882a593Smuzhiyun }
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun memcpy(&utdm_info[ucc_num], &utdm_primary_info,
1135*4882a593Smuzhiyun sizeof(utdm_primary_info));
1136*4882a593Smuzhiyun
1137*4882a593Smuzhiyun ut_info = &utdm_info[ucc_num];
1138*4882a593Smuzhiyun ut_info->uf_info.ucc_num = ucc_num;
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun sprop = of_get_property(np, "rx-clock-name", NULL);
1141*4882a593Smuzhiyun if (sprop) {
1142*4882a593Smuzhiyun ut_info->uf_info.rx_clock = qe_clock_source(sprop);
1143*4882a593Smuzhiyun if ((ut_info->uf_info.rx_clock < QE_CLK_NONE) ||
1144*4882a593Smuzhiyun (ut_info->uf_info.rx_clock > QE_CLK24)) {
1145*4882a593Smuzhiyun dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1146*4882a593Smuzhiyun return -EINVAL;
1147*4882a593Smuzhiyun }
1148*4882a593Smuzhiyun } else {
1149*4882a593Smuzhiyun dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1150*4882a593Smuzhiyun return -EINVAL;
1151*4882a593Smuzhiyun }
1152*4882a593Smuzhiyun
1153*4882a593Smuzhiyun sprop = of_get_property(np, "tx-clock-name", NULL);
1154*4882a593Smuzhiyun if (sprop) {
1155*4882a593Smuzhiyun ut_info->uf_info.tx_clock = qe_clock_source(sprop);
1156*4882a593Smuzhiyun if ((ut_info->uf_info.tx_clock < QE_CLK_NONE) ||
1157*4882a593Smuzhiyun (ut_info->uf_info.tx_clock > QE_CLK24)) {
1158*4882a593Smuzhiyun dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1159*4882a593Smuzhiyun return -EINVAL;
1160*4882a593Smuzhiyun }
1161*4882a593Smuzhiyun } else {
1162*4882a593Smuzhiyun dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1163*4882a593Smuzhiyun return -EINVAL;
1164*4882a593Smuzhiyun }
1165*4882a593Smuzhiyun
1166*4882a593Smuzhiyun ret = of_address_to_resource(np, 0, &res);
1167*4882a593Smuzhiyun if (ret)
1168*4882a593Smuzhiyun return -EINVAL;
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyun ut_info->uf_info.regs = res.start;
1171*4882a593Smuzhiyun ut_info->uf_info.irq = irq_of_parse_and_map(np, 0);
1172*4882a593Smuzhiyun
1173*4882a593Smuzhiyun uhdlc_priv = kzalloc(sizeof(*uhdlc_priv), GFP_KERNEL);
1174*4882a593Smuzhiyun if (!uhdlc_priv) {
1175*4882a593Smuzhiyun return -ENOMEM;
1176*4882a593Smuzhiyun }
1177*4882a593Smuzhiyun
1178*4882a593Smuzhiyun dev_set_drvdata(&pdev->dev, uhdlc_priv);
1179*4882a593Smuzhiyun uhdlc_priv->dev = &pdev->dev;
1180*4882a593Smuzhiyun uhdlc_priv->ut_info = ut_info;
1181*4882a593Smuzhiyun
1182*4882a593Smuzhiyun if (of_get_property(np, "fsl,tdm-interface", NULL))
1183*4882a593Smuzhiyun uhdlc_priv->tsa = 1;
1184*4882a593Smuzhiyun
1185*4882a593Smuzhiyun if (of_get_property(np, "fsl,ucc-internal-loopback", NULL))
1186*4882a593Smuzhiyun uhdlc_priv->loopback = 1;
1187*4882a593Smuzhiyun
1188*4882a593Smuzhiyun if (of_get_property(np, "fsl,hdlc-bus", NULL))
1189*4882a593Smuzhiyun uhdlc_priv->hdlc_bus = 1;
1190*4882a593Smuzhiyun
1191*4882a593Smuzhiyun if (uhdlc_priv->tsa == 1) {
1192*4882a593Smuzhiyun utdm = kzalloc(sizeof(*utdm), GFP_KERNEL);
1193*4882a593Smuzhiyun if (!utdm) {
1194*4882a593Smuzhiyun ret = -ENOMEM;
1195*4882a593Smuzhiyun dev_err(&pdev->dev, "No mem to alloc ucc tdm data\n");
1196*4882a593Smuzhiyun goto free_uhdlc_priv;
1197*4882a593Smuzhiyun }
1198*4882a593Smuzhiyun uhdlc_priv->utdm = utdm;
1199*4882a593Smuzhiyun ret = ucc_of_parse_tdm(np, utdm, ut_info);
1200*4882a593Smuzhiyun if (ret)
1201*4882a593Smuzhiyun goto free_utdm;
1202*4882a593Smuzhiyun
1203*4882a593Smuzhiyun ret = hdlc_map_iomem("fsl,t1040-qe-si", 0,
1204*4882a593Smuzhiyun (void __iomem **)&utdm->si_regs);
1205*4882a593Smuzhiyun if (ret)
1206*4882a593Smuzhiyun goto free_utdm;
1207*4882a593Smuzhiyun ret = hdlc_map_iomem("fsl,t1040-qe-siram", 1,
1208*4882a593Smuzhiyun (void __iomem **)&utdm->siram);
1209*4882a593Smuzhiyun if (ret)
1210*4882a593Smuzhiyun goto unmap_si_regs;
1211*4882a593Smuzhiyun }
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun if (of_property_read_u16(np, "fsl,hmask", &uhdlc_priv->hmask))
1214*4882a593Smuzhiyun uhdlc_priv->hmask = DEFAULT_ADDR_MASK;
1215*4882a593Smuzhiyun
1216*4882a593Smuzhiyun ret = uhdlc_init(uhdlc_priv);
1217*4882a593Smuzhiyun if (ret) {
1218*4882a593Smuzhiyun dev_err(&pdev->dev, "Failed to init uhdlc\n");
1219*4882a593Smuzhiyun goto undo_uhdlc_init;
1220*4882a593Smuzhiyun }
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun dev = alloc_hdlcdev(uhdlc_priv);
1223*4882a593Smuzhiyun if (!dev) {
1224*4882a593Smuzhiyun ret = -ENOMEM;
1225*4882a593Smuzhiyun pr_err("ucc_hdlc: unable to allocate memory\n");
1226*4882a593Smuzhiyun goto undo_uhdlc_init;
1227*4882a593Smuzhiyun }
1228*4882a593Smuzhiyun
1229*4882a593Smuzhiyun uhdlc_priv->ndev = dev;
1230*4882a593Smuzhiyun hdlc = dev_to_hdlc(dev);
1231*4882a593Smuzhiyun dev->tx_queue_len = 16;
1232*4882a593Smuzhiyun dev->netdev_ops = &uhdlc_ops;
1233*4882a593Smuzhiyun dev->watchdog_timeo = 2 * HZ;
1234*4882a593Smuzhiyun hdlc->attach = ucc_hdlc_attach;
1235*4882a593Smuzhiyun hdlc->xmit = ucc_hdlc_tx;
1236*4882a593Smuzhiyun netif_napi_add(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32);
1237*4882a593Smuzhiyun if (register_hdlc_device(dev)) {
1238*4882a593Smuzhiyun ret = -ENOBUFS;
1239*4882a593Smuzhiyun pr_err("ucc_hdlc: unable to register hdlc device\n");
1240*4882a593Smuzhiyun goto free_dev;
1241*4882a593Smuzhiyun }
1242*4882a593Smuzhiyun
1243*4882a593Smuzhiyun return 0;
1244*4882a593Smuzhiyun
1245*4882a593Smuzhiyun free_dev:
1246*4882a593Smuzhiyun free_netdev(dev);
1247*4882a593Smuzhiyun undo_uhdlc_init:
1248*4882a593Smuzhiyun iounmap(utdm->siram);
1249*4882a593Smuzhiyun unmap_si_regs:
1250*4882a593Smuzhiyun iounmap(utdm->si_regs);
1251*4882a593Smuzhiyun free_utdm:
1252*4882a593Smuzhiyun if (uhdlc_priv->tsa)
1253*4882a593Smuzhiyun kfree(utdm);
1254*4882a593Smuzhiyun free_uhdlc_priv:
1255*4882a593Smuzhiyun kfree(uhdlc_priv);
1256*4882a593Smuzhiyun return ret;
1257*4882a593Smuzhiyun }
1258*4882a593Smuzhiyun
ucc_hdlc_remove(struct platform_device * pdev)1259*4882a593Smuzhiyun static int ucc_hdlc_remove(struct platform_device *pdev)
1260*4882a593Smuzhiyun {
1261*4882a593Smuzhiyun struct ucc_hdlc_private *priv = dev_get_drvdata(&pdev->dev);
1262*4882a593Smuzhiyun
1263*4882a593Smuzhiyun uhdlc_memclean(priv);
1264*4882a593Smuzhiyun
1265*4882a593Smuzhiyun if (priv->utdm->si_regs) {
1266*4882a593Smuzhiyun iounmap(priv->utdm->si_regs);
1267*4882a593Smuzhiyun priv->utdm->si_regs = NULL;
1268*4882a593Smuzhiyun }
1269*4882a593Smuzhiyun
1270*4882a593Smuzhiyun if (priv->utdm->siram) {
1271*4882a593Smuzhiyun iounmap(priv->utdm->siram);
1272*4882a593Smuzhiyun priv->utdm->siram = NULL;
1273*4882a593Smuzhiyun }
1274*4882a593Smuzhiyun kfree(priv);
1275*4882a593Smuzhiyun
1276*4882a593Smuzhiyun dev_info(&pdev->dev, "UCC based hdlc module removed\n");
1277*4882a593Smuzhiyun
1278*4882a593Smuzhiyun return 0;
1279*4882a593Smuzhiyun }
1280*4882a593Smuzhiyun
1281*4882a593Smuzhiyun static const struct of_device_id fsl_ucc_hdlc_of_match[] = {
1282*4882a593Smuzhiyun {
1283*4882a593Smuzhiyun .compatible = "fsl,ucc-hdlc",
1284*4882a593Smuzhiyun },
1285*4882a593Smuzhiyun {},
1286*4882a593Smuzhiyun };
1287*4882a593Smuzhiyun
1288*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, fsl_ucc_hdlc_of_match);
1289*4882a593Smuzhiyun
1290*4882a593Smuzhiyun static struct platform_driver ucc_hdlc_driver = {
1291*4882a593Smuzhiyun .probe = ucc_hdlc_probe,
1292*4882a593Smuzhiyun .remove = ucc_hdlc_remove,
1293*4882a593Smuzhiyun .driver = {
1294*4882a593Smuzhiyun .name = DRV_NAME,
1295*4882a593Smuzhiyun .pm = HDLC_PM_OPS,
1296*4882a593Smuzhiyun .of_match_table = fsl_ucc_hdlc_of_match,
1297*4882a593Smuzhiyun },
1298*4882a593Smuzhiyun };
1299*4882a593Smuzhiyun
1300*4882a593Smuzhiyun module_platform_driver(ucc_hdlc_driver);
1301*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1302*4882a593Smuzhiyun MODULE_DESCRIPTION(DRV_DESC);
1303