xref: /rk3399_rockchip-uboot/drivers/dma/keystone_nav.c (revision ef4547176d0fa4d43d060a58c0c902add7fe0aed)
1*ef454717SKhoronzhuk, Ivan /*
2*ef454717SKhoronzhuk, Ivan  * Multicore Navigator driver for TI Keystone 2 devices.
3*ef454717SKhoronzhuk, Ivan  *
4*ef454717SKhoronzhuk, Ivan  * (C) Copyright 2012-2014
5*ef454717SKhoronzhuk, Ivan  *     Texas Instruments Incorporated, <www.ti.com>
6*ef454717SKhoronzhuk, Ivan  *
7*ef454717SKhoronzhuk, Ivan  * SPDX-License-Identifier:     GPL-2.0+
8*ef454717SKhoronzhuk, Ivan  */
9*ef454717SKhoronzhuk, Ivan #include <common.h>
10*ef454717SKhoronzhuk, Ivan #include <asm/io.h>
11*ef454717SKhoronzhuk, Ivan #include <asm/ti-common/keystone_nav.h>
12*ef454717SKhoronzhuk, Ivan 
13*ef454717SKhoronzhuk, Ivan struct qm_config qm_memmap = {
14*ef454717SKhoronzhuk, Ivan 	.stat_cfg	= CONFIG_KSNAV_QM_QUEUE_STATUS_BASE,
15*ef454717SKhoronzhuk, Ivan 	.queue		= (void *)CONFIG_KSNAV_QM_MANAGER_QUEUES_BASE,
16*ef454717SKhoronzhuk, Ivan 	.mngr_vbusm	= CONFIG_KSNAV_QM_BASE_ADDRESS,
17*ef454717SKhoronzhuk, Ivan 	.i_lram		= CONFIG_KSNAV_QM_LINK_RAM_BASE,
18*ef454717SKhoronzhuk, Ivan 	.proxy		= (void *)CONFIG_KSNAV_QM_MANAGER_Q_PROXY_BASE,
19*ef454717SKhoronzhuk, Ivan 	.status_ram	= CONFIG_KSNAV_QM_STATUS_RAM_BASE,
20*ef454717SKhoronzhuk, Ivan 	.mngr_cfg	= (void *)CONFIG_KSNAV_QM_CONF_BASE,
21*ef454717SKhoronzhuk, Ivan 	.intd_cfg	= CONFIG_KSNAV_QM_INTD_CONF_BASE,
22*ef454717SKhoronzhuk, Ivan 	.desc_mem	= (void *)CONFIG_KSNAV_QM_DESC_SETUP_BASE,
23*ef454717SKhoronzhuk, Ivan 	.region_num	= CONFIG_KSNAV_QM_REGION_NUM,
24*ef454717SKhoronzhuk, Ivan 	.pdsp_cmd	= CONFIG_KSNAV_QM_PDSP1_CMD_BASE,
25*ef454717SKhoronzhuk, Ivan 	.pdsp_ctl	= CONFIG_KSNAV_QM_PDSP1_CTRL_BASE,
26*ef454717SKhoronzhuk, Ivan 	.pdsp_iram	= CONFIG_KSNAV_QM_PDSP1_IRAM_BASE,
27*ef454717SKhoronzhuk, Ivan 	.qpool_num	= CONFIG_KSNAV_QM_QPOOL_NUM,
28*ef454717SKhoronzhuk, Ivan };
29*ef454717SKhoronzhuk, Ivan 
30*ef454717SKhoronzhuk, Ivan /*
31*ef454717SKhoronzhuk, Ivan  * We are going to use only one type of descriptors - host packet
32*ef454717SKhoronzhuk, Ivan  * descriptors. We staticaly allocate memory for them here
33*ef454717SKhoronzhuk, Ivan  */
34*ef454717SKhoronzhuk, Ivan struct qm_host_desc desc_pool[HDESC_NUM] __aligned(sizeof(struct qm_host_desc));
35*ef454717SKhoronzhuk, Ivan 
36*ef454717SKhoronzhuk, Ivan static struct qm_config *qm_cfg;
37*ef454717SKhoronzhuk, Ivan 
38*ef454717SKhoronzhuk, Ivan inline int num_of_desc_to_reg(int num_descr)
39*ef454717SKhoronzhuk, Ivan {
40*ef454717SKhoronzhuk, Ivan 	int j, num;
41*ef454717SKhoronzhuk, Ivan 
42*ef454717SKhoronzhuk, Ivan 	for (j = 0, num = 32; j < 15; j++, num *= 2) {
43*ef454717SKhoronzhuk, Ivan 		if (num_descr <= num)
44*ef454717SKhoronzhuk, Ivan 			return j;
45*ef454717SKhoronzhuk, Ivan 	}
46*ef454717SKhoronzhuk, Ivan 
47*ef454717SKhoronzhuk, Ivan 	return 15;
48*ef454717SKhoronzhuk, Ivan }
49*ef454717SKhoronzhuk, Ivan 
50*ef454717SKhoronzhuk, Ivan int _qm_init(struct qm_config *cfg)
51*ef454717SKhoronzhuk, Ivan {
52*ef454717SKhoronzhuk, Ivan 	u32 j;
53*ef454717SKhoronzhuk, Ivan 
54*ef454717SKhoronzhuk, Ivan 	qm_cfg = cfg;
55*ef454717SKhoronzhuk, Ivan 
56*ef454717SKhoronzhuk, Ivan 	qm_cfg->mngr_cfg->link_ram_base0	= qm_cfg->i_lram;
57*ef454717SKhoronzhuk, Ivan 	qm_cfg->mngr_cfg->link_ram_size0	= HDESC_NUM * 8;
58*ef454717SKhoronzhuk, Ivan 	qm_cfg->mngr_cfg->link_ram_base1	= 0;
59*ef454717SKhoronzhuk, Ivan 	qm_cfg->mngr_cfg->link_ram_size1	= 0;
60*ef454717SKhoronzhuk, Ivan 	qm_cfg->mngr_cfg->link_ram_base2	= 0;
61*ef454717SKhoronzhuk, Ivan 
62*ef454717SKhoronzhuk, Ivan 	qm_cfg->desc_mem[0].base_addr = (u32)desc_pool;
63*ef454717SKhoronzhuk, Ivan 	qm_cfg->desc_mem[0].start_idx = 0;
64*ef454717SKhoronzhuk, Ivan 	qm_cfg->desc_mem[0].desc_reg_size =
65*ef454717SKhoronzhuk, Ivan 		(((sizeof(struct qm_host_desc) >> 4) - 1) << 16) |
66*ef454717SKhoronzhuk, Ivan 		num_of_desc_to_reg(HDESC_NUM);
67*ef454717SKhoronzhuk, Ivan 
68*ef454717SKhoronzhuk, Ivan 	memset(desc_pool, 0, sizeof(desc_pool));
69*ef454717SKhoronzhuk, Ivan 	for (j = 0; j < HDESC_NUM; j++)
70*ef454717SKhoronzhuk, Ivan 		qm_push(&desc_pool[j], qm_cfg->qpool_num);
71*ef454717SKhoronzhuk, Ivan 
72*ef454717SKhoronzhuk, Ivan 	return QM_OK;
73*ef454717SKhoronzhuk, Ivan }
74*ef454717SKhoronzhuk, Ivan 
75*ef454717SKhoronzhuk, Ivan int qm_init(void)
76*ef454717SKhoronzhuk, Ivan {
77*ef454717SKhoronzhuk, Ivan 	return _qm_init(&qm_memmap);
78*ef454717SKhoronzhuk, Ivan }
79*ef454717SKhoronzhuk, Ivan 
80*ef454717SKhoronzhuk, Ivan void qm_close(void)
81*ef454717SKhoronzhuk, Ivan {
82*ef454717SKhoronzhuk, Ivan 	u32	j;
83*ef454717SKhoronzhuk, Ivan 
84*ef454717SKhoronzhuk, Ivan 	if (qm_cfg == NULL)
85*ef454717SKhoronzhuk, Ivan 		return;
86*ef454717SKhoronzhuk, Ivan 
87*ef454717SKhoronzhuk, Ivan 	queue_close(qm_cfg->qpool_num);
88*ef454717SKhoronzhuk, Ivan 
89*ef454717SKhoronzhuk, Ivan 	qm_cfg->mngr_cfg->link_ram_base0	= 0;
90*ef454717SKhoronzhuk, Ivan 	qm_cfg->mngr_cfg->link_ram_size0	= 0;
91*ef454717SKhoronzhuk, Ivan 	qm_cfg->mngr_cfg->link_ram_base1	= 0;
92*ef454717SKhoronzhuk, Ivan 	qm_cfg->mngr_cfg->link_ram_size1	= 0;
93*ef454717SKhoronzhuk, Ivan 	qm_cfg->mngr_cfg->link_ram_base2	= 0;
94*ef454717SKhoronzhuk, Ivan 
95*ef454717SKhoronzhuk, Ivan 	for (j = 0; j < qm_cfg->region_num; j++) {
96*ef454717SKhoronzhuk, Ivan 		qm_cfg->desc_mem[j].base_addr = 0;
97*ef454717SKhoronzhuk, Ivan 		qm_cfg->desc_mem[j].start_idx = 0;
98*ef454717SKhoronzhuk, Ivan 		qm_cfg->desc_mem[j].desc_reg_size = 0;
99*ef454717SKhoronzhuk, Ivan 	}
100*ef454717SKhoronzhuk, Ivan 
101*ef454717SKhoronzhuk, Ivan 	qm_cfg = NULL;
102*ef454717SKhoronzhuk, Ivan }
103*ef454717SKhoronzhuk, Ivan 
104*ef454717SKhoronzhuk, Ivan void qm_push(struct qm_host_desc *hd, u32 qnum)
105*ef454717SKhoronzhuk, Ivan {
106*ef454717SKhoronzhuk, Ivan 	u32 regd;
107*ef454717SKhoronzhuk, Ivan 
108*ef454717SKhoronzhuk, Ivan 	if (!qm_cfg)
109*ef454717SKhoronzhuk, Ivan 		return;
110*ef454717SKhoronzhuk, Ivan 
111*ef454717SKhoronzhuk, Ivan 	cpu_to_bus((u32 *)hd, sizeof(struct qm_host_desc)/4);
112*ef454717SKhoronzhuk, Ivan 	regd = (u32)hd | ((sizeof(struct qm_host_desc) >> 4) - 1);
113*ef454717SKhoronzhuk, Ivan 	writel(regd, &qm_cfg->queue[qnum].ptr_size_thresh);
114*ef454717SKhoronzhuk, Ivan }
115*ef454717SKhoronzhuk, Ivan 
116*ef454717SKhoronzhuk, Ivan void qm_buff_push(struct qm_host_desc *hd, u32 qnum,
117*ef454717SKhoronzhuk, Ivan 		    void *buff_ptr, u32 buff_len)
118*ef454717SKhoronzhuk, Ivan {
119*ef454717SKhoronzhuk, Ivan 	hd->orig_buff_len = buff_len;
120*ef454717SKhoronzhuk, Ivan 	hd->buff_len = buff_len;
121*ef454717SKhoronzhuk, Ivan 	hd->orig_buff_ptr = (u32)buff_ptr;
122*ef454717SKhoronzhuk, Ivan 	hd->buff_ptr = (u32)buff_ptr;
123*ef454717SKhoronzhuk, Ivan 	qm_push(hd, qnum);
124*ef454717SKhoronzhuk, Ivan }
125*ef454717SKhoronzhuk, Ivan 
126*ef454717SKhoronzhuk, Ivan struct qm_host_desc *qm_pop(u32 qnum)
127*ef454717SKhoronzhuk, Ivan {
128*ef454717SKhoronzhuk, Ivan 	u32 uhd;
129*ef454717SKhoronzhuk, Ivan 
130*ef454717SKhoronzhuk, Ivan 	if (!qm_cfg)
131*ef454717SKhoronzhuk, Ivan 		return NULL;
132*ef454717SKhoronzhuk, Ivan 
133*ef454717SKhoronzhuk, Ivan 	uhd = readl(&qm_cfg->queue[qnum].ptr_size_thresh) & ~0xf;
134*ef454717SKhoronzhuk, Ivan 	if (uhd)
135*ef454717SKhoronzhuk, Ivan 		cpu_to_bus((u32 *)uhd, sizeof(struct qm_host_desc)/4);
136*ef454717SKhoronzhuk, Ivan 
137*ef454717SKhoronzhuk, Ivan 	return (struct qm_host_desc *)uhd;
138*ef454717SKhoronzhuk, Ivan }
139*ef454717SKhoronzhuk, Ivan 
140*ef454717SKhoronzhuk, Ivan struct qm_host_desc *qm_pop_from_free_pool(void)
141*ef454717SKhoronzhuk, Ivan {
142*ef454717SKhoronzhuk, Ivan 	if (!qm_cfg)
143*ef454717SKhoronzhuk, Ivan 		return NULL;
144*ef454717SKhoronzhuk, Ivan 
145*ef454717SKhoronzhuk, Ivan 	return qm_pop(qm_cfg->qpool_num);
146*ef454717SKhoronzhuk, Ivan }
147*ef454717SKhoronzhuk, Ivan 
148*ef454717SKhoronzhuk, Ivan void queue_close(u32 qnum)
149*ef454717SKhoronzhuk, Ivan {
150*ef454717SKhoronzhuk, Ivan 	struct qm_host_desc *hd;
151*ef454717SKhoronzhuk, Ivan 
152*ef454717SKhoronzhuk, Ivan 	while ((hd = qm_pop(qnum)))
153*ef454717SKhoronzhuk, Ivan 		;
154*ef454717SKhoronzhuk, Ivan }
155*ef454717SKhoronzhuk, Ivan 
156*ef454717SKhoronzhuk, Ivan /**
157*ef454717SKhoronzhuk, Ivan  * DMA API
158*ef454717SKhoronzhuk, Ivan  */
159*ef454717SKhoronzhuk, Ivan struct pktdma_cfg netcp_pktdma = {
160*ef454717SKhoronzhuk, Ivan 	.global		= (void *)CONFIG_KSNAV_NETCP_PDMA_CTRL_BASE,
161*ef454717SKhoronzhuk, Ivan 	.tx_ch		= (void *)CONFIG_KSNAV_NETCP_PDMA_TX_BASE,
162*ef454717SKhoronzhuk, Ivan 	.tx_ch_num	= CONFIG_KSNAV_NETCP_PDMA_TX_CH_NUM,
163*ef454717SKhoronzhuk, Ivan 	.rx_ch		= (void *)CONFIG_KSNAV_NETCP_PDMA_RX_BASE,
164*ef454717SKhoronzhuk, Ivan 	.rx_ch_num	= CONFIG_KSNAV_NETCP_PDMA_RX_CH_NUM,
165*ef454717SKhoronzhuk, Ivan 	.tx_sched	= (u32 *)CONFIG_KSNAV_NETCP_PDMA_SCHED_BASE,
166*ef454717SKhoronzhuk, Ivan 	.rx_flows	= (void *)CONFIG_KSNAV_NETCP_PDMA_RX_FLOW_BASE,
167*ef454717SKhoronzhuk, Ivan 	.rx_flow_num	= CONFIG_KSNAV_NETCP_PDMA_RX_FLOW_NUM,
168*ef454717SKhoronzhuk, Ivan 	.rx_free_q	= CONFIG_KSNAV_NETCP_PDMA_RX_FREE_QUEUE,
169*ef454717SKhoronzhuk, Ivan 	.rx_rcv_q	= CONFIG_KSNAV_NETCP_PDMA_RX_RCV_QUEUE,
170*ef454717SKhoronzhuk, Ivan 	.tx_snd_q	= CONFIG_KSNAV_NETCP_PDMA_TX_SND_QUEUE,
171*ef454717SKhoronzhuk, Ivan };
172*ef454717SKhoronzhuk, Ivan 
173*ef454717SKhoronzhuk, Ivan struct pktdma_cfg *netcp;
174*ef454717SKhoronzhuk, Ivan 
175*ef454717SKhoronzhuk, Ivan static int netcp_rx_disable(void)
176*ef454717SKhoronzhuk, Ivan {
177*ef454717SKhoronzhuk, Ivan 	u32 j, v, k;
178*ef454717SKhoronzhuk, Ivan 
179*ef454717SKhoronzhuk, Ivan 	for (j = 0; j < netcp->rx_ch_num; j++) {
180*ef454717SKhoronzhuk, Ivan 		v = readl(&netcp->rx_ch[j].cfg_a);
181*ef454717SKhoronzhuk, Ivan 		if (!(v & CPDMA_CHAN_A_ENABLE))
182*ef454717SKhoronzhuk, Ivan 			continue;
183*ef454717SKhoronzhuk, Ivan 
184*ef454717SKhoronzhuk, Ivan 		writel(v | CPDMA_CHAN_A_TDOWN, &netcp->rx_ch[j].cfg_a);
185*ef454717SKhoronzhuk, Ivan 		for (k = 0; k < TDOWN_TIMEOUT_COUNT; k++) {
186*ef454717SKhoronzhuk, Ivan 			udelay(100);
187*ef454717SKhoronzhuk, Ivan 			v = readl(&netcp->rx_ch[j].cfg_a);
188*ef454717SKhoronzhuk, Ivan 			if (!(v & CPDMA_CHAN_A_ENABLE))
189*ef454717SKhoronzhuk, Ivan 				continue;
190*ef454717SKhoronzhuk, Ivan 		}
191*ef454717SKhoronzhuk, Ivan 		/* TODO: teardown error on if TDOWN_TIMEOUT_COUNT is reached */
192*ef454717SKhoronzhuk, Ivan 	}
193*ef454717SKhoronzhuk, Ivan 
194*ef454717SKhoronzhuk, Ivan 	/* Clear all of the flow registers */
195*ef454717SKhoronzhuk, Ivan 	for (j = 0; j < netcp->rx_flow_num; j++) {
196*ef454717SKhoronzhuk, Ivan 		writel(0, &netcp->rx_flows[j].control);
197*ef454717SKhoronzhuk, Ivan 		writel(0, &netcp->rx_flows[j].tags);
198*ef454717SKhoronzhuk, Ivan 		writel(0, &netcp->rx_flows[j].tag_sel);
199*ef454717SKhoronzhuk, Ivan 		writel(0, &netcp->rx_flows[j].fdq_sel[0]);
200*ef454717SKhoronzhuk, Ivan 		writel(0, &netcp->rx_flows[j].fdq_sel[1]);
201*ef454717SKhoronzhuk, Ivan 		writel(0, &netcp->rx_flows[j].thresh[0]);
202*ef454717SKhoronzhuk, Ivan 		writel(0, &netcp->rx_flows[j].thresh[1]);
203*ef454717SKhoronzhuk, Ivan 		writel(0, &netcp->rx_flows[j].thresh[2]);
204*ef454717SKhoronzhuk, Ivan 	}
205*ef454717SKhoronzhuk, Ivan 
206*ef454717SKhoronzhuk, Ivan 	return QM_OK;
207*ef454717SKhoronzhuk, Ivan }
208*ef454717SKhoronzhuk, Ivan 
209*ef454717SKhoronzhuk, Ivan static int netcp_tx_disable(void)
210*ef454717SKhoronzhuk, Ivan {
211*ef454717SKhoronzhuk, Ivan 	u32 j, v, k;
212*ef454717SKhoronzhuk, Ivan 
213*ef454717SKhoronzhuk, Ivan 	for (j = 0; j < netcp->tx_ch_num; j++) {
214*ef454717SKhoronzhuk, Ivan 		v = readl(&netcp->tx_ch[j].cfg_a);
215*ef454717SKhoronzhuk, Ivan 		if (!(v & CPDMA_CHAN_A_ENABLE))
216*ef454717SKhoronzhuk, Ivan 			continue;
217*ef454717SKhoronzhuk, Ivan 
218*ef454717SKhoronzhuk, Ivan 		writel(v | CPDMA_CHAN_A_TDOWN, &netcp->tx_ch[j].cfg_a);
219*ef454717SKhoronzhuk, Ivan 		for (k = 0; k < TDOWN_TIMEOUT_COUNT; k++) {
220*ef454717SKhoronzhuk, Ivan 			udelay(100);
221*ef454717SKhoronzhuk, Ivan 			v = readl(&netcp->tx_ch[j].cfg_a);
222*ef454717SKhoronzhuk, Ivan 			if (!(v & CPDMA_CHAN_A_ENABLE))
223*ef454717SKhoronzhuk, Ivan 				continue;
224*ef454717SKhoronzhuk, Ivan 		}
225*ef454717SKhoronzhuk, Ivan 		/* TODO: teardown error on if TDOWN_TIMEOUT_COUNT is reached */
226*ef454717SKhoronzhuk, Ivan 	}
227*ef454717SKhoronzhuk, Ivan 
228*ef454717SKhoronzhuk, Ivan 	return QM_OK;
229*ef454717SKhoronzhuk, Ivan }
230*ef454717SKhoronzhuk, Ivan 
231*ef454717SKhoronzhuk, Ivan static int _netcp_init(struct pktdma_cfg *netcp_cfg,
232*ef454717SKhoronzhuk, Ivan 		       struct rx_buff_desc *rx_buffers)
233*ef454717SKhoronzhuk, Ivan {
234*ef454717SKhoronzhuk, Ivan 	u32 j, v;
235*ef454717SKhoronzhuk, Ivan 	struct qm_host_desc *hd;
236*ef454717SKhoronzhuk, Ivan 	u8 *rx_ptr;
237*ef454717SKhoronzhuk, Ivan 
238*ef454717SKhoronzhuk, Ivan 	if (netcp_cfg == NULL || rx_buffers == NULL ||
239*ef454717SKhoronzhuk, Ivan 	    rx_buffers->buff_ptr == NULL || qm_cfg == NULL)
240*ef454717SKhoronzhuk, Ivan 		return QM_ERR;
241*ef454717SKhoronzhuk, Ivan 
242*ef454717SKhoronzhuk, Ivan 	netcp = netcp_cfg;
243*ef454717SKhoronzhuk, Ivan 	netcp->rx_flow = rx_buffers->rx_flow;
244*ef454717SKhoronzhuk, Ivan 
245*ef454717SKhoronzhuk, Ivan 	/* init rx queue */
246*ef454717SKhoronzhuk, Ivan 	rx_ptr = rx_buffers->buff_ptr;
247*ef454717SKhoronzhuk, Ivan 
248*ef454717SKhoronzhuk, Ivan 	for (j = 0; j < rx_buffers->num_buffs; j++) {
249*ef454717SKhoronzhuk, Ivan 		hd = qm_pop(qm_cfg->qpool_num);
250*ef454717SKhoronzhuk, Ivan 		if (hd == NULL)
251*ef454717SKhoronzhuk, Ivan 			return QM_ERR;
252*ef454717SKhoronzhuk, Ivan 
253*ef454717SKhoronzhuk, Ivan 		qm_buff_push(hd, netcp->rx_free_q,
254*ef454717SKhoronzhuk, Ivan 			     rx_ptr, rx_buffers->buff_len);
255*ef454717SKhoronzhuk, Ivan 
256*ef454717SKhoronzhuk, Ivan 		rx_ptr += rx_buffers->buff_len;
257*ef454717SKhoronzhuk, Ivan 	}
258*ef454717SKhoronzhuk, Ivan 
259*ef454717SKhoronzhuk, Ivan 	netcp_rx_disable();
260*ef454717SKhoronzhuk, Ivan 
261*ef454717SKhoronzhuk, Ivan 	/* configure rx channels */
262*ef454717SKhoronzhuk, Ivan 	v = CPDMA_REG_VAL_MAKE_RX_FLOW_A(1, 1, 0, 0, 0, 0, 0, netcp->rx_rcv_q);
263*ef454717SKhoronzhuk, Ivan 	writel(v, &netcp->rx_flows[netcp->rx_flow].control);
264*ef454717SKhoronzhuk, Ivan 	writel(0, &netcp->rx_flows[netcp->rx_flow].tags);
265*ef454717SKhoronzhuk, Ivan 	writel(0, &netcp->rx_flows[netcp->rx_flow].tag_sel);
266*ef454717SKhoronzhuk, Ivan 
267*ef454717SKhoronzhuk, Ivan 	v = CPDMA_REG_VAL_MAKE_RX_FLOW_D(0, netcp->rx_free_q, 0,
268*ef454717SKhoronzhuk, Ivan 					 netcp->rx_free_q);
269*ef454717SKhoronzhuk, Ivan 
270*ef454717SKhoronzhuk, Ivan 	writel(v, &netcp->rx_flows[netcp->rx_flow].fdq_sel[0]);
271*ef454717SKhoronzhuk, Ivan 	writel(v, &netcp->rx_flows[netcp->rx_flow].fdq_sel[1]);
272*ef454717SKhoronzhuk, Ivan 	writel(0, &netcp->rx_flows[netcp->rx_flow].thresh[0]);
273*ef454717SKhoronzhuk, Ivan 	writel(0, &netcp->rx_flows[netcp->rx_flow].thresh[1]);
274*ef454717SKhoronzhuk, Ivan 	writel(0, &netcp->rx_flows[netcp->rx_flow].thresh[2]);
275*ef454717SKhoronzhuk, Ivan 
276*ef454717SKhoronzhuk, Ivan 	for (j = 0; j < netcp->rx_ch_num; j++)
277*ef454717SKhoronzhuk, Ivan 		writel(CPDMA_CHAN_A_ENABLE, &netcp->rx_ch[j].cfg_a);
278*ef454717SKhoronzhuk, Ivan 
279*ef454717SKhoronzhuk, Ivan 	/* configure tx channels */
280*ef454717SKhoronzhuk, Ivan 	/* Disable loopback in the tx direction */
281*ef454717SKhoronzhuk, Ivan 	writel(0, &netcp->global->emulation_control);
282*ef454717SKhoronzhuk, Ivan 
283*ef454717SKhoronzhuk, Ivan 	/* Set QM base address, only for K2x devices */
284*ef454717SKhoronzhuk, Ivan 	writel(CONFIG_KSNAV_QM_BASE_ADDRESS, &netcp->global->qm_base_addr[0]);
285*ef454717SKhoronzhuk, Ivan 
286*ef454717SKhoronzhuk, Ivan 	/* Enable all channels. The current state isn't important */
287*ef454717SKhoronzhuk, Ivan 	for (j = 0; j < netcp->tx_ch_num; j++)  {
288*ef454717SKhoronzhuk, Ivan 		writel(0, &netcp->tx_ch[j].cfg_b);
289*ef454717SKhoronzhuk, Ivan 		writel(CPDMA_CHAN_A_ENABLE, &netcp->tx_ch[j].cfg_a);
290*ef454717SKhoronzhuk, Ivan 	}
291*ef454717SKhoronzhuk, Ivan 
292*ef454717SKhoronzhuk, Ivan 	return QM_OK;
293*ef454717SKhoronzhuk, Ivan }
294*ef454717SKhoronzhuk, Ivan 
295*ef454717SKhoronzhuk, Ivan int netcp_init(struct rx_buff_desc *rx_buffers)
296*ef454717SKhoronzhuk, Ivan {
297*ef454717SKhoronzhuk, Ivan 	return _netcp_init(&netcp_pktdma, rx_buffers);
298*ef454717SKhoronzhuk, Ivan }
299*ef454717SKhoronzhuk, Ivan 
300*ef454717SKhoronzhuk, Ivan int netcp_close(void)
301*ef454717SKhoronzhuk, Ivan {
302*ef454717SKhoronzhuk, Ivan 	if (!netcp)
303*ef454717SKhoronzhuk, Ivan 		return QM_ERR;
304*ef454717SKhoronzhuk, Ivan 
305*ef454717SKhoronzhuk, Ivan 	netcp_tx_disable();
306*ef454717SKhoronzhuk, Ivan 	netcp_rx_disable();
307*ef454717SKhoronzhuk, Ivan 
308*ef454717SKhoronzhuk, Ivan 	queue_close(netcp->rx_free_q);
309*ef454717SKhoronzhuk, Ivan 	queue_close(netcp->rx_rcv_q);
310*ef454717SKhoronzhuk, Ivan 	queue_close(netcp->tx_snd_q);
311*ef454717SKhoronzhuk, Ivan 
312*ef454717SKhoronzhuk, Ivan 	return QM_OK;
313*ef454717SKhoronzhuk, Ivan }
314*ef454717SKhoronzhuk, Ivan 
315*ef454717SKhoronzhuk, Ivan int netcp_send(u32 *pkt, int num_bytes, u32 swinfo2)
316*ef454717SKhoronzhuk, Ivan {
317*ef454717SKhoronzhuk, Ivan 	struct qm_host_desc *hd;
318*ef454717SKhoronzhuk, Ivan 
319*ef454717SKhoronzhuk, Ivan 	hd = qm_pop(qm_cfg->qpool_num);
320*ef454717SKhoronzhuk, Ivan 	if (hd == NULL)
321*ef454717SKhoronzhuk, Ivan 		return QM_ERR;
322*ef454717SKhoronzhuk, Ivan 
323*ef454717SKhoronzhuk, Ivan 	hd->desc_info	= num_bytes;
324*ef454717SKhoronzhuk, Ivan 	hd->swinfo[2]	= swinfo2;
325*ef454717SKhoronzhuk, Ivan 	hd->packet_info = qm_cfg->qpool_num;
326*ef454717SKhoronzhuk, Ivan 
327*ef454717SKhoronzhuk, Ivan 	qm_buff_push(hd, netcp->tx_snd_q, pkt, num_bytes);
328*ef454717SKhoronzhuk, Ivan 
329*ef454717SKhoronzhuk, Ivan 	return QM_OK;
330*ef454717SKhoronzhuk, Ivan }
331*ef454717SKhoronzhuk, Ivan 
332*ef454717SKhoronzhuk, Ivan void *netcp_recv(u32 **pkt, int *num_bytes)
333*ef454717SKhoronzhuk, Ivan {
334*ef454717SKhoronzhuk, Ivan 	struct qm_host_desc *hd;
335*ef454717SKhoronzhuk, Ivan 
336*ef454717SKhoronzhuk, Ivan 	hd = qm_pop(netcp->rx_rcv_q);
337*ef454717SKhoronzhuk, Ivan 	if (!hd)
338*ef454717SKhoronzhuk, Ivan 		return NULL;
339*ef454717SKhoronzhuk, Ivan 
340*ef454717SKhoronzhuk, Ivan 	*pkt = (u32 *)hd->buff_ptr;
341*ef454717SKhoronzhuk, Ivan 	*num_bytes = hd->desc_info & 0x3fffff;
342*ef454717SKhoronzhuk, Ivan 
343*ef454717SKhoronzhuk, Ivan 	return hd;
344*ef454717SKhoronzhuk, Ivan }
345*ef454717SKhoronzhuk, Ivan 
346*ef454717SKhoronzhuk, Ivan void netcp_release_rxhd(void *hd)
347*ef454717SKhoronzhuk, Ivan {
348*ef454717SKhoronzhuk, Ivan 	struct qm_host_desc *_hd = (struct qm_host_desc *)hd;
349*ef454717SKhoronzhuk, Ivan 
350*ef454717SKhoronzhuk, Ivan 	_hd->buff_len = _hd->orig_buff_len;
351*ef454717SKhoronzhuk, Ivan 	_hd->buff_ptr = _hd->orig_buff_ptr;
352*ef454717SKhoronzhuk, Ivan 
353*ef454717SKhoronzhuk, Ivan 	qm_push(_hd, netcp->rx_free_q);
354*ef454717SKhoronzhuk, Ivan }
355