xref: /OK3568_Linux_fs/kernel/drivers/soc/fsl/qe/ucc_fast.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Authors: 	Shlomi Gridish <gridish@freescale.com>
6*4882a593Smuzhiyun  * 		Li Yang <leoli@freescale.com>
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Description:
9*4882a593Smuzhiyun  * QE UCC Fast API Set - UCC Fast specific routines implementations.
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/errno.h>
13*4882a593Smuzhiyun #include <linux/slab.h>
14*4882a593Smuzhiyun #include <linux/stddef.h>
15*4882a593Smuzhiyun #include <linux/interrupt.h>
16*4882a593Smuzhiyun #include <linux/err.h>
17*4882a593Smuzhiyun #include <linux/export.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #include <asm/io.h>
20*4882a593Smuzhiyun #include <soc/fsl/qe/immap_qe.h>
21*4882a593Smuzhiyun #include <soc/fsl/qe/qe.h>
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #include <soc/fsl/qe/ucc.h>
24*4882a593Smuzhiyun #include <soc/fsl/qe/ucc_fast.h>
25*4882a593Smuzhiyun 
ucc_fast_dump_regs(struct ucc_fast_private * uccf)26*4882a593Smuzhiyun void ucc_fast_dump_regs(struct ucc_fast_private * uccf)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun 	printk(KERN_INFO "UCC%u Fast registers:\n", uccf->uf_info->ucc_num);
29*4882a593Smuzhiyun 	printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs);
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	printk(KERN_INFO "gumr  : addr=0x%p, val=0x%08x\n",
32*4882a593Smuzhiyun 		  &uccf->uf_regs->gumr, qe_ioread32be(&uccf->uf_regs->gumr));
33*4882a593Smuzhiyun 	printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n",
34*4882a593Smuzhiyun 		  &uccf->uf_regs->upsmr, qe_ioread32be(&uccf->uf_regs->upsmr));
35*4882a593Smuzhiyun 	printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n",
36*4882a593Smuzhiyun 		  &uccf->uf_regs->utodr, qe_ioread16be(&uccf->uf_regs->utodr));
37*4882a593Smuzhiyun 	printk(KERN_INFO "udsr  : addr=0x%p, val=0x%04x\n",
38*4882a593Smuzhiyun 		  &uccf->uf_regs->udsr, qe_ioread16be(&uccf->uf_regs->udsr));
39*4882a593Smuzhiyun 	printk(KERN_INFO "ucce  : addr=0x%p, val=0x%08x\n",
40*4882a593Smuzhiyun 		  &uccf->uf_regs->ucce, qe_ioread32be(&uccf->uf_regs->ucce));
41*4882a593Smuzhiyun 	printk(KERN_INFO "uccm  : addr=0x%p, val=0x%08x\n",
42*4882a593Smuzhiyun 		  &uccf->uf_regs->uccm, qe_ioread32be(&uccf->uf_regs->uccm));
43*4882a593Smuzhiyun 	printk(KERN_INFO "uccs  : addr=0x%p, val=0x%02x\n",
44*4882a593Smuzhiyun 		  &uccf->uf_regs->uccs, qe_ioread8(&uccf->uf_regs->uccs));
45*4882a593Smuzhiyun 	printk(KERN_INFO "urfb  : addr=0x%p, val=0x%08x\n",
46*4882a593Smuzhiyun 		  &uccf->uf_regs->urfb, qe_ioread32be(&uccf->uf_regs->urfb));
47*4882a593Smuzhiyun 	printk(KERN_INFO "urfs  : addr=0x%p, val=0x%04x\n",
48*4882a593Smuzhiyun 		  &uccf->uf_regs->urfs, qe_ioread16be(&uccf->uf_regs->urfs));
49*4882a593Smuzhiyun 	printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n",
50*4882a593Smuzhiyun 		  &uccf->uf_regs->urfet, qe_ioread16be(&uccf->uf_regs->urfet));
51*4882a593Smuzhiyun 	printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n",
52*4882a593Smuzhiyun 		  &uccf->uf_regs->urfset,
53*4882a593Smuzhiyun 		  qe_ioread16be(&uccf->uf_regs->urfset));
54*4882a593Smuzhiyun 	printk(KERN_INFO "utfb  : addr=0x%p, val=0x%08x\n",
55*4882a593Smuzhiyun 		  &uccf->uf_regs->utfb, qe_ioread32be(&uccf->uf_regs->utfb));
56*4882a593Smuzhiyun 	printk(KERN_INFO "utfs  : addr=0x%p, val=0x%04x\n",
57*4882a593Smuzhiyun 		  &uccf->uf_regs->utfs, qe_ioread16be(&uccf->uf_regs->utfs));
58*4882a593Smuzhiyun 	printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n",
59*4882a593Smuzhiyun 		  &uccf->uf_regs->utfet, qe_ioread16be(&uccf->uf_regs->utfet));
60*4882a593Smuzhiyun 	printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n",
61*4882a593Smuzhiyun 		  &uccf->uf_regs->utftt, qe_ioread16be(&uccf->uf_regs->utftt));
62*4882a593Smuzhiyun 	printk(KERN_INFO "utpt  : addr=0x%p, val=0x%04x\n",
63*4882a593Smuzhiyun 		  &uccf->uf_regs->utpt, qe_ioread16be(&uccf->uf_regs->utpt));
64*4882a593Smuzhiyun 	printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n",
65*4882a593Smuzhiyun 		  &uccf->uf_regs->urtry, qe_ioread32be(&uccf->uf_regs->urtry));
66*4882a593Smuzhiyun 	printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n",
67*4882a593Smuzhiyun 		  &uccf->uf_regs->guemr, qe_ioread8(&uccf->uf_regs->guemr));
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun EXPORT_SYMBOL(ucc_fast_dump_regs);
70*4882a593Smuzhiyun 
ucc_fast_get_qe_cr_subblock(int uccf_num)71*4882a593Smuzhiyun u32 ucc_fast_get_qe_cr_subblock(int uccf_num)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun 	switch (uccf_num) {
74*4882a593Smuzhiyun 	case 0: return QE_CR_SUBBLOCK_UCCFAST1;
75*4882a593Smuzhiyun 	case 1: return QE_CR_SUBBLOCK_UCCFAST2;
76*4882a593Smuzhiyun 	case 2: return QE_CR_SUBBLOCK_UCCFAST3;
77*4882a593Smuzhiyun 	case 3: return QE_CR_SUBBLOCK_UCCFAST4;
78*4882a593Smuzhiyun 	case 4: return QE_CR_SUBBLOCK_UCCFAST5;
79*4882a593Smuzhiyun 	case 5: return QE_CR_SUBBLOCK_UCCFAST6;
80*4882a593Smuzhiyun 	case 6: return QE_CR_SUBBLOCK_UCCFAST7;
81*4882a593Smuzhiyun 	case 7: return QE_CR_SUBBLOCK_UCCFAST8;
82*4882a593Smuzhiyun 	default: return QE_CR_SUBBLOCK_INVALID;
83*4882a593Smuzhiyun 	}
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun EXPORT_SYMBOL(ucc_fast_get_qe_cr_subblock);
86*4882a593Smuzhiyun 
ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf)87*4882a593Smuzhiyun void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun 	qe_iowrite16be(UCC_FAST_TOD, &uccf->uf_regs->utodr);
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun EXPORT_SYMBOL(ucc_fast_transmit_on_demand);
92*4882a593Smuzhiyun 
ucc_fast_enable(struct ucc_fast_private * uccf,enum comm_dir mode)93*4882a593Smuzhiyun void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun 	struct ucc_fast __iomem *uf_regs;
96*4882a593Smuzhiyun 	u32 gumr;
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	uf_regs = uccf->uf_regs;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	/* Enable reception and/or transmission on this UCC. */
101*4882a593Smuzhiyun 	gumr = qe_ioread32be(&uf_regs->gumr);
102*4882a593Smuzhiyun 	if (mode & COMM_DIR_TX) {
103*4882a593Smuzhiyun 		gumr |= UCC_FAST_GUMR_ENT;
104*4882a593Smuzhiyun 		uccf->enabled_tx = 1;
105*4882a593Smuzhiyun 	}
106*4882a593Smuzhiyun 	if (mode & COMM_DIR_RX) {
107*4882a593Smuzhiyun 		gumr |= UCC_FAST_GUMR_ENR;
108*4882a593Smuzhiyun 		uccf->enabled_rx = 1;
109*4882a593Smuzhiyun 	}
110*4882a593Smuzhiyun 	qe_iowrite32be(gumr, &uf_regs->gumr);
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun EXPORT_SYMBOL(ucc_fast_enable);
113*4882a593Smuzhiyun 
ucc_fast_disable(struct ucc_fast_private * uccf,enum comm_dir mode)114*4882a593Smuzhiyun void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun 	struct ucc_fast __iomem *uf_regs;
117*4882a593Smuzhiyun 	u32 gumr;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	uf_regs = uccf->uf_regs;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	/* Disable reception and/or transmission on this UCC. */
122*4882a593Smuzhiyun 	gumr = qe_ioread32be(&uf_regs->gumr);
123*4882a593Smuzhiyun 	if (mode & COMM_DIR_TX) {
124*4882a593Smuzhiyun 		gumr &= ~UCC_FAST_GUMR_ENT;
125*4882a593Smuzhiyun 		uccf->enabled_tx = 0;
126*4882a593Smuzhiyun 	}
127*4882a593Smuzhiyun 	if (mode & COMM_DIR_RX) {
128*4882a593Smuzhiyun 		gumr &= ~UCC_FAST_GUMR_ENR;
129*4882a593Smuzhiyun 		uccf->enabled_rx = 0;
130*4882a593Smuzhiyun 	}
131*4882a593Smuzhiyun 	qe_iowrite32be(gumr, &uf_regs->gumr);
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun EXPORT_SYMBOL(ucc_fast_disable);
134*4882a593Smuzhiyun 
ucc_fast_init(struct ucc_fast_info * uf_info,struct ucc_fast_private ** uccf_ret)135*4882a593Smuzhiyun int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** uccf_ret)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun 	struct ucc_fast_private *uccf;
138*4882a593Smuzhiyun 	struct ucc_fast __iomem *uf_regs;
139*4882a593Smuzhiyun 	u32 gumr;
140*4882a593Smuzhiyun 	int ret;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	if (!uf_info)
143*4882a593Smuzhiyun 		return -EINVAL;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	/* check if the UCC port number is in range. */
146*4882a593Smuzhiyun 	if ((uf_info->ucc_num < 0) || (uf_info->ucc_num > UCC_MAX_NUM - 1)) {
147*4882a593Smuzhiyun 		printk(KERN_ERR "%s: illegal UCC number\n", __func__);
148*4882a593Smuzhiyun 		return -EINVAL;
149*4882a593Smuzhiyun 	}
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	/* Check that 'max_rx_buf_length' is properly aligned (4). */
152*4882a593Smuzhiyun 	if (uf_info->max_rx_buf_length & (UCC_FAST_MRBLR_ALIGNMENT - 1)) {
153*4882a593Smuzhiyun 		printk(KERN_ERR "%s: max_rx_buf_length not aligned\n",
154*4882a593Smuzhiyun 			__func__);
155*4882a593Smuzhiyun 		return -EINVAL;
156*4882a593Smuzhiyun 	}
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	/* Validate Virtual Fifo register values */
159*4882a593Smuzhiyun 	if (uf_info->urfs < UCC_FAST_URFS_MIN_VAL) {
160*4882a593Smuzhiyun 		printk(KERN_ERR "%s: urfs is too small\n", __func__);
161*4882a593Smuzhiyun 		return -EINVAL;
162*4882a593Smuzhiyun 	}
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	if (uf_info->urfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
165*4882a593Smuzhiyun 		printk(KERN_ERR "%s: urfs is not aligned\n", __func__);
166*4882a593Smuzhiyun 		return -EINVAL;
167*4882a593Smuzhiyun 	}
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	if (uf_info->urfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
170*4882a593Smuzhiyun 		printk(KERN_ERR "%s: urfet is not aligned.\n", __func__);
171*4882a593Smuzhiyun 		return -EINVAL;
172*4882a593Smuzhiyun 	}
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	if (uf_info->urfset & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
175*4882a593Smuzhiyun 		printk(KERN_ERR "%s: urfset is not aligned\n", __func__);
176*4882a593Smuzhiyun 		return -EINVAL;
177*4882a593Smuzhiyun 	}
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	if (uf_info->utfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
180*4882a593Smuzhiyun 		printk(KERN_ERR "%s: utfs is not aligned\n", __func__);
181*4882a593Smuzhiyun 		return -EINVAL;
182*4882a593Smuzhiyun 	}
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	if (uf_info->utfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
185*4882a593Smuzhiyun 		printk(KERN_ERR "%s: utfet is not aligned\n", __func__);
186*4882a593Smuzhiyun 		return -EINVAL;
187*4882a593Smuzhiyun 	}
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	if (uf_info->utftt & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
190*4882a593Smuzhiyun 		printk(KERN_ERR "%s: utftt is not aligned\n", __func__);
191*4882a593Smuzhiyun 		return -EINVAL;
192*4882a593Smuzhiyun 	}
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	uccf = kzalloc(sizeof(struct ucc_fast_private), GFP_KERNEL);
195*4882a593Smuzhiyun 	if (!uccf) {
196*4882a593Smuzhiyun 		printk(KERN_ERR "%s: Cannot allocate private data\n",
197*4882a593Smuzhiyun 			__func__);
198*4882a593Smuzhiyun 		return -ENOMEM;
199*4882a593Smuzhiyun 	}
200*4882a593Smuzhiyun 	uccf->ucc_fast_tx_virtual_fifo_base_offset = -1;
201*4882a593Smuzhiyun 	uccf->ucc_fast_rx_virtual_fifo_base_offset = -1;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	/* Fill fast UCC structure */
204*4882a593Smuzhiyun 	uccf->uf_info = uf_info;
205*4882a593Smuzhiyun 	/* Set the PHY base address */
206*4882a593Smuzhiyun 	uccf->uf_regs = ioremap(uf_info->regs, sizeof(struct ucc_fast));
207*4882a593Smuzhiyun 	if (uccf->uf_regs == NULL) {
208*4882a593Smuzhiyun 		printk(KERN_ERR "%s: Cannot map UCC registers\n", __func__);
209*4882a593Smuzhiyun 		kfree(uccf);
210*4882a593Smuzhiyun 		return -ENOMEM;
211*4882a593Smuzhiyun 	}
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	uccf->enabled_tx = 0;
214*4882a593Smuzhiyun 	uccf->enabled_rx = 0;
215*4882a593Smuzhiyun 	uccf->stopped_tx = 0;
216*4882a593Smuzhiyun 	uccf->stopped_rx = 0;
217*4882a593Smuzhiyun 	uf_regs = uccf->uf_regs;
218*4882a593Smuzhiyun 	uccf->p_ucce = &uf_regs->ucce;
219*4882a593Smuzhiyun 	uccf->p_uccm = &uf_regs->uccm;
220*4882a593Smuzhiyun #ifdef CONFIG_UGETH_TX_ON_DEMAND
221*4882a593Smuzhiyun 	uccf->p_utodr = &uf_regs->utodr;
222*4882a593Smuzhiyun #endif
223*4882a593Smuzhiyun #ifdef STATISTICS
224*4882a593Smuzhiyun 	uccf->tx_frames = 0;
225*4882a593Smuzhiyun 	uccf->rx_frames = 0;
226*4882a593Smuzhiyun 	uccf->rx_discarded = 0;
227*4882a593Smuzhiyun #endif				/* STATISTICS */
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	/* Set UCC to fast type */
230*4882a593Smuzhiyun 	ret = ucc_set_type(uf_info->ucc_num, UCC_SPEED_TYPE_FAST);
231*4882a593Smuzhiyun 	if (ret) {
232*4882a593Smuzhiyun 		printk(KERN_ERR "%s: cannot set UCC type\n", __func__);
233*4882a593Smuzhiyun 		ucc_fast_free(uccf);
234*4882a593Smuzhiyun 		return ret;
235*4882a593Smuzhiyun 	}
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	uccf->mrblr = uf_info->max_rx_buf_length;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	/* Set GUMR */
240*4882a593Smuzhiyun 	/* For more details see the hardware spec. */
241*4882a593Smuzhiyun 	gumr = uf_info->ttx_trx;
242*4882a593Smuzhiyun 	if (uf_info->tci)
243*4882a593Smuzhiyun 		gumr |= UCC_FAST_GUMR_TCI;
244*4882a593Smuzhiyun 	if (uf_info->cdp)
245*4882a593Smuzhiyun 		gumr |= UCC_FAST_GUMR_CDP;
246*4882a593Smuzhiyun 	if (uf_info->ctsp)
247*4882a593Smuzhiyun 		gumr |= UCC_FAST_GUMR_CTSP;
248*4882a593Smuzhiyun 	if (uf_info->cds)
249*4882a593Smuzhiyun 		gumr |= UCC_FAST_GUMR_CDS;
250*4882a593Smuzhiyun 	if (uf_info->ctss)
251*4882a593Smuzhiyun 		gumr |= UCC_FAST_GUMR_CTSS;
252*4882a593Smuzhiyun 	if (uf_info->txsy)
253*4882a593Smuzhiyun 		gumr |= UCC_FAST_GUMR_TXSY;
254*4882a593Smuzhiyun 	if (uf_info->rsyn)
255*4882a593Smuzhiyun 		gumr |= UCC_FAST_GUMR_RSYN;
256*4882a593Smuzhiyun 	gumr |= uf_info->synl;
257*4882a593Smuzhiyun 	if (uf_info->rtsm)
258*4882a593Smuzhiyun 		gumr |= UCC_FAST_GUMR_RTSM;
259*4882a593Smuzhiyun 	gumr |= uf_info->renc;
260*4882a593Smuzhiyun 	if (uf_info->revd)
261*4882a593Smuzhiyun 		gumr |= UCC_FAST_GUMR_REVD;
262*4882a593Smuzhiyun 	gumr |= uf_info->tenc;
263*4882a593Smuzhiyun 	gumr |= uf_info->tcrc;
264*4882a593Smuzhiyun 	gumr |= uf_info->mode;
265*4882a593Smuzhiyun 	qe_iowrite32be(gumr, &uf_regs->gumr);
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	/* Allocate memory for Tx Virtual Fifo */
268*4882a593Smuzhiyun 	uccf->ucc_fast_tx_virtual_fifo_base_offset =
269*4882a593Smuzhiyun 	    qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
270*4882a593Smuzhiyun 	if (uccf->ucc_fast_tx_virtual_fifo_base_offset < 0) {
271*4882a593Smuzhiyun 		printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO\n",
272*4882a593Smuzhiyun 			__func__);
273*4882a593Smuzhiyun 		ucc_fast_free(uccf);
274*4882a593Smuzhiyun 		return -ENOMEM;
275*4882a593Smuzhiyun 	}
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	/* Allocate memory for Rx Virtual Fifo */
278*4882a593Smuzhiyun 	uccf->ucc_fast_rx_virtual_fifo_base_offset =
279*4882a593Smuzhiyun 		qe_muram_alloc(uf_info->urfs +
280*4882a593Smuzhiyun 			   UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR,
281*4882a593Smuzhiyun 			   UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
282*4882a593Smuzhiyun 	if (uccf->ucc_fast_rx_virtual_fifo_base_offset < 0) {
283*4882a593Smuzhiyun 		printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO\n",
284*4882a593Smuzhiyun 			__func__);
285*4882a593Smuzhiyun 		ucc_fast_free(uccf);
286*4882a593Smuzhiyun 		return -ENOMEM;
287*4882a593Smuzhiyun 	}
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	/* Set Virtual Fifo registers */
290*4882a593Smuzhiyun 	qe_iowrite16be(uf_info->urfs, &uf_regs->urfs);
291*4882a593Smuzhiyun 	qe_iowrite16be(uf_info->urfet, &uf_regs->urfet);
292*4882a593Smuzhiyun 	qe_iowrite16be(uf_info->urfset, &uf_regs->urfset);
293*4882a593Smuzhiyun 	qe_iowrite16be(uf_info->utfs, &uf_regs->utfs);
294*4882a593Smuzhiyun 	qe_iowrite16be(uf_info->utfet, &uf_regs->utfet);
295*4882a593Smuzhiyun 	qe_iowrite16be(uf_info->utftt, &uf_regs->utftt);
296*4882a593Smuzhiyun 	/* utfb, urfb are offsets from MURAM base */
297*4882a593Smuzhiyun 	qe_iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset,
298*4882a593Smuzhiyun 		       &uf_regs->utfb);
299*4882a593Smuzhiyun 	qe_iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset,
300*4882a593Smuzhiyun 		       &uf_regs->urfb);
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	/* Mux clocking */
303*4882a593Smuzhiyun 	/* Grant Support */
304*4882a593Smuzhiyun 	ucc_set_qe_mux_grant(uf_info->ucc_num, uf_info->grant_support);
305*4882a593Smuzhiyun 	/* Breakpoint Support */
306*4882a593Smuzhiyun 	ucc_set_qe_mux_bkpt(uf_info->ucc_num, uf_info->brkpt_support);
307*4882a593Smuzhiyun 	/* Set Tsa or NMSI mode. */
308*4882a593Smuzhiyun 	ucc_set_qe_mux_tsa(uf_info->ucc_num, uf_info->tsa);
309*4882a593Smuzhiyun 	/* If NMSI (not Tsa), set Tx and Rx clock. */
310*4882a593Smuzhiyun 	if (!uf_info->tsa) {
311*4882a593Smuzhiyun 		/* Rx clock routing */
312*4882a593Smuzhiyun 		if ((uf_info->rx_clock != QE_CLK_NONE) &&
313*4882a593Smuzhiyun 		    ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->rx_clock,
314*4882a593Smuzhiyun 					COMM_DIR_RX)) {
315*4882a593Smuzhiyun 			printk(KERN_ERR "%s: illegal value for RX clock\n",
316*4882a593Smuzhiyun 			       __func__);
317*4882a593Smuzhiyun 			ucc_fast_free(uccf);
318*4882a593Smuzhiyun 			return -EINVAL;
319*4882a593Smuzhiyun 		}
320*4882a593Smuzhiyun 		/* Tx clock routing */
321*4882a593Smuzhiyun 		if ((uf_info->tx_clock != QE_CLK_NONE) &&
322*4882a593Smuzhiyun 		    ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->tx_clock,
323*4882a593Smuzhiyun 					COMM_DIR_TX)) {
324*4882a593Smuzhiyun 			printk(KERN_ERR "%s: illegal value for TX clock\n",
325*4882a593Smuzhiyun 			       __func__);
326*4882a593Smuzhiyun 			ucc_fast_free(uccf);
327*4882a593Smuzhiyun 			return -EINVAL;
328*4882a593Smuzhiyun 		}
329*4882a593Smuzhiyun 	} else {
330*4882a593Smuzhiyun 		/* tdm Rx clock routing */
331*4882a593Smuzhiyun 		if ((uf_info->rx_clock != QE_CLK_NONE) &&
332*4882a593Smuzhiyun 		    ucc_set_tdm_rxtx_clk(uf_info->tdm_num, uf_info->rx_clock,
333*4882a593Smuzhiyun 					 COMM_DIR_RX)) {
334*4882a593Smuzhiyun 			pr_err("%s: illegal value for RX clock", __func__);
335*4882a593Smuzhiyun 			ucc_fast_free(uccf);
336*4882a593Smuzhiyun 			return -EINVAL;
337*4882a593Smuzhiyun 		}
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 		/* tdm Tx clock routing */
340*4882a593Smuzhiyun 		if ((uf_info->tx_clock != QE_CLK_NONE) &&
341*4882a593Smuzhiyun 		    ucc_set_tdm_rxtx_clk(uf_info->tdm_num, uf_info->tx_clock,
342*4882a593Smuzhiyun 					 COMM_DIR_TX)) {
343*4882a593Smuzhiyun 			pr_err("%s: illegal value for TX clock", __func__);
344*4882a593Smuzhiyun 			ucc_fast_free(uccf);
345*4882a593Smuzhiyun 			return -EINVAL;
346*4882a593Smuzhiyun 		}
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 		/* tdm Rx sync clock routing */
349*4882a593Smuzhiyun 		if ((uf_info->rx_sync != QE_CLK_NONE) &&
350*4882a593Smuzhiyun 		    ucc_set_tdm_rxtx_sync(uf_info->tdm_num, uf_info->rx_sync,
351*4882a593Smuzhiyun 					  COMM_DIR_RX)) {
352*4882a593Smuzhiyun 			pr_err("%s: illegal value for RX clock", __func__);
353*4882a593Smuzhiyun 			ucc_fast_free(uccf);
354*4882a593Smuzhiyun 			return -EINVAL;
355*4882a593Smuzhiyun 		}
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 		/* tdm Tx sync clock routing */
358*4882a593Smuzhiyun 		if ((uf_info->tx_sync != QE_CLK_NONE) &&
359*4882a593Smuzhiyun 		    ucc_set_tdm_rxtx_sync(uf_info->tdm_num, uf_info->tx_sync,
360*4882a593Smuzhiyun 					  COMM_DIR_TX)) {
361*4882a593Smuzhiyun 			pr_err("%s: illegal value for TX clock", __func__);
362*4882a593Smuzhiyun 			ucc_fast_free(uccf);
363*4882a593Smuzhiyun 			return -EINVAL;
364*4882a593Smuzhiyun 		}
365*4882a593Smuzhiyun 	}
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	/* Set interrupt mask register at UCC level. */
368*4882a593Smuzhiyun 	qe_iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	/* First, clear anything pending at UCC level,
371*4882a593Smuzhiyun 	 * otherwise, old garbage may come through
372*4882a593Smuzhiyun 	 * as soon as the dam is opened. */
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	/* Writing '1' clears */
375*4882a593Smuzhiyun 	qe_iowrite32be(0xffffffff, &uf_regs->ucce);
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	*uccf_ret = uccf;
378*4882a593Smuzhiyun 	return 0;
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun EXPORT_SYMBOL(ucc_fast_init);
381*4882a593Smuzhiyun 
ucc_fast_free(struct ucc_fast_private * uccf)382*4882a593Smuzhiyun void ucc_fast_free(struct ucc_fast_private * uccf)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun 	if (!uccf)
385*4882a593Smuzhiyun 		return;
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	qe_muram_free(uccf->ucc_fast_tx_virtual_fifo_base_offset);
388*4882a593Smuzhiyun 	qe_muram_free(uccf->ucc_fast_rx_virtual_fifo_base_offset);
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	if (uccf->uf_regs)
391*4882a593Smuzhiyun 		iounmap(uccf->uf_regs);
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	kfree(uccf);
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun EXPORT_SYMBOL(ucc_fast_free);
396