xref: /rk3399_rockchip-uboot/drivers/dma/keystone_nav.c (revision 9ea9021ac466f5ccc8b6238cbce37428bb58f887)
1ef454717SKhoronzhuk, Ivan /*
2ef454717SKhoronzhuk, Ivan  * Multicore Navigator driver for TI Keystone 2 devices.
3ef454717SKhoronzhuk, Ivan  *
4ef454717SKhoronzhuk, Ivan  * (C) Copyright 2012-2014
5ef454717SKhoronzhuk, Ivan  *     Texas Instruments Incorporated, <www.ti.com>
6ef454717SKhoronzhuk, Ivan  *
7ef454717SKhoronzhuk, Ivan  * SPDX-License-Identifier:     GPL-2.0+
8ef454717SKhoronzhuk, Ivan  */
9ef454717SKhoronzhuk, Ivan #include <common.h>
10ef454717SKhoronzhuk, Ivan #include <asm/io.h>
11ef454717SKhoronzhuk, Ivan #include <asm/ti-common/keystone_nav.h>
12ef454717SKhoronzhuk, Ivan 
13ef454717SKhoronzhuk, Ivan struct qm_config qm_memmap = {
14ef454717SKhoronzhuk, Ivan 	.stat_cfg	= CONFIG_KSNAV_QM_QUEUE_STATUS_BASE,
15ef454717SKhoronzhuk, Ivan 	.queue		= (void *)CONFIG_KSNAV_QM_MANAGER_QUEUES_BASE,
16ef454717SKhoronzhuk, Ivan 	.mngr_vbusm	= CONFIG_KSNAV_QM_BASE_ADDRESS,
17ef454717SKhoronzhuk, Ivan 	.i_lram		= CONFIG_KSNAV_QM_LINK_RAM_BASE,
18ef454717SKhoronzhuk, Ivan 	.proxy		= (void *)CONFIG_KSNAV_QM_MANAGER_Q_PROXY_BASE,
19ef454717SKhoronzhuk, Ivan 	.status_ram	= CONFIG_KSNAV_QM_STATUS_RAM_BASE,
20ef454717SKhoronzhuk, Ivan 	.mngr_cfg	= (void *)CONFIG_KSNAV_QM_CONF_BASE,
21ef454717SKhoronzhuk, Ivan 	.intd_cfg	= CONFIG_KSNAV_QM_INTD_CONF_BASE,
22ef454717SKhoronzhuk, Ivan 	.desc_mem	= (void *)CONFIG_KSNAV_QM_DESC_SETUP_BASE,
23ef454717SKhoronzhuk, Ivan 	.region_num	= CONFIG_KSNAV_QM_REGION_NUM,
24ef454717SKhoronzhuk, Ivan 	.pdsp_cmd	= CONFIG_KSNAV_QM_PDSP1_CMD_BASE,
25ef454717SKhoronzhuk, Ivan 	.pdsp_ctl	= CONFIG_KSNAV_QM_PDSP1_CTRL_BASE,
26ef454717SKhoronzhuk, Ivan 	.pdsp_iram	= CONFIG_KSNAV_QM_PDSP1_IRAM_BASE,
27ef454717SKhoronzhuk, Ivan 	.qpool_num	= CONFIG_KSNAV_QM_QPOOL_NUM,
28ef454717SKhoronzhuk, Ivan };
29ef454717SKhoronzhuk, Ivan 
30ef454717SKhoronzhuk, Ivan /*
31ef454717SKhoronzhuk, Ivan  * We are going to use only one type of descriptors - host packet
32ef454717SKhoronzhuk, Ivan  * descriptors. We staticaly allocate memory for them here
33ef454717SKhoronzhuk, Ivan  */
34ef454717SKhoronzhuk, Ivan struct qm_host_desc desc_pool[HDESC_NUM] __aligned(sizeof(struct qm_host_desc));
35ef454717SKhoronzhuk, Ivan 
36ef454717SKhoronzhuk, Ivan static struct qm_config *qm_cfg;
37ef454717SKhoronzhuk, Ivan 
38ef454717SKhoronzhuk, Ivan inline int num_of_desc_to_reg(int num_descr)
39ef454717SKhoronzhuk, Ivan {
40ef454717SKhoronzhuk, Ivan 	int j, num;
41ef454717SKhoronzhuk, Ivan 
42ef454717SKhoronzhuk, Ivan 	for (j = 0, num = 32; j < 15; j++, num *= 2) {
43ef454717SKhoronzhuk, Ivan 		if (num_descr <= num)
44ef454717SKhoronzhuk, Ivan 			return j;
45ef454717SKhoronzhuk, Ivan 	}
46ef454717SKhoronzhuk, Ivan 
47ef454717SKhoronzhuk, Ivan 	return 15;
48ef454717SKhoronzhuk, Ivan }
49ef454717SKhoronzhuk, Ivan 
50ef454717SKhoronzhuk, Ivan int _qm_init(struct qm_config *cfg)
51ef454717SKhoronzhuk, Ivan {
52ef454717SKhoronzhuk, Ivan 	u32 j;
53ef454717SKhoronzhuk, Ivan 
54ef454717SKhoronzhuk, Ivan 	qm_cfg = cfg;
55ef454717SKhoronzhuk, Ivan 
56ef454717SKhoronzhuk, Ivan 	qm_cfg->mngr_cfg->link_ram_base0	= qm_cfg->i_lram;
57ef454717SKhoronzhuk, Ivan 	qm_cfg->mngr_cfg->link_ram_size0	= HDESC_NUM * 8;
58ef454717SKhoronzhuk, Ivan 	qm_cfg->mngr_cfg->link_ram_base1	= 0;
59ef454717SKhoronzhuk, Ivan 	qm_cfg->mngr_cfg->link_ram_size1	= 0;
60ef454717SKhoronzhuk, Ivan 	qm_cfg->mngr_cfg->link_ram_base2	= 0;
61ef454717SKhoronzhuk, Ivan 
62ef454717SKhoronzhuk, Ivan 	qm_cfg->desc_mem[0].base_addr = (u32)desc_pool;
63ef454717SKhoronzhuk, Ivan 	qm_cfg->desc_mem[0].start_idx = 0;
64ef454717SKhoronzhuk, Ivan 	qm_cfg->desc_mem[0].desc_reg_size =
65ef454717SKhoronzhuk, Ivan 		(((sizeof(struct qm_host_desc) >> 4) - 1) << 16) |
66ef454717SKhoronzhuk, Ivan 		num_of_desc_to_reg(HDESC_NUM);
67ef454717SKhoronzhuk, Ivan 
68ef454717SKhoronzhuk, Ivan 	memset(desc_pool, 0, sizeof(desc_pool));
69ef454717SKhoronzhuk, Ivan 	for (j = 0; j < HDESC_NUM; j++)
70ef454717SKhoronzhuk, Ivan 		qm_push(&desc_pool[j], qm_cfg->qpool_num);
71ef454717SKhoronzhuk, Ivan 
72ef454717SKhoronzhuk, Ivan 	return QM_OK;
73ef454717SKhoronzhuk, Ivan }
74ef454717SKhoronzhuk, Ivan 
75ef454717SKhoronzhuk, Ivan int qm_init(void)
76ef454717SKhoronzhuk, Ivan {
77ef454717SKhoronzhuk, Ivan 	return _qm_init(&qm_memmap);
78ef454717SKhoronzhuk, Ivan }
79ef454717SKhoronzhuk, Ivan 
80ef454717SKhoronzhuk, Ivan void qm_close(void)
81ef454717SKhoronzhuk, Ivan {
82ef454717SKhoronzhuk, Ivan 	u32	j;
83ef454717SKhoronzhuk, Ivan 
84ef454717SKhoronzhuk, Ivan 	if (qm_cfg == NULL)
85ef454717SKhoronzhuk, Ivan 		return;
86ef454717SKhoronzhuk, Ivan 
87ef454717SKhoronzhuk, Ivan 	queue_close(qm_cfg->qpool_num);
88ef454717SKhoronzhuk, Ivan 
89ef454717SKhoronzhuk, Ivan 	qm_cfg->mngr_cfg->link_ram_base0	= 0;
90ef454717SKhoronzhuk, Ivan 	qm_cfg->mngr_cfg->link_ram_size0	= 0;
91ef454717SKhoronzhuk, Ivan 	qm_cfg->mngr_cfg->link_ram_base1	= 0;
92ef454717SKhoronzhuk, Ivan 	qm_cfg->mngr_cfg->link_ram_size1	= 0;
93ef454717SKhoronzhuk, Ivan 	qm_cfg->mngr_cfg->link_ram_base2	= 0;
94ef454717SKhoronzhuk, Ivan 
95ef454717SKhoronzhuk, Ivan 	for (j = 0; j < qm_cfg->region_num; j++) {
96ef454717SKhoronzhuk, Ivan 		qm_cfg->desc_mem[j].base_addr = 0;
97ef454717SKhoronzhuk, Ivan 		qm_cfg->desc_mem[j].start_idx = 0;
98ef454717SKhoronzhuk, Ivan 		qm_cfg->desc_mem[j].desc_reg_size = 0;
99ef454717SKhoronzhuk, Ivan 	}
100ef454717SKhoronzhuk, Ivan 
101ef454717SKhoronzhuk, Ivan 	qm_cfg = NULL;
102ef454717SKhoronzhuk, Ivan }
103ef454717SKhoronzhuk, Ivan 
104ef454717SKhoronzhuk, Ivan void qm_push(struct qm_host_desc *hd, u32 qnum)
105ef454717SKhoronzhuk, Ivan {
106ef454717SKhoronzhuk, Ivan 	u32 regd;
107ef454717SKhoronzhuk, Ivan 
108ef454717SKhoronzhuk, Ivan 	if (!qm_cfg)
109ef454717SKhoronzhuk, Ivan 		return;
110ef454717SKhoronzhuk, Ivan 
111ef454717SKhoronzhuk, Ivan 	cpu_to_bus((u32 *)hd, sizeof(struct qm_host_desc)/4);
112ef454717SKhoronzhuk, Ivan 	regd = (u32)hd | ((sizeof(struct qm_host_desc) >> 4) - 1);
113ef454717SKhoronzhuk, Ivan 	writel(regd, &qm_cfg->queue[qnum].ptr_size_thresh);
114ef454717SKhoronzhuk, Ivan }
115ef454717SKhoronzhuk, Ivan 
116ef454717SKhoronzhuk, Ivan void qm_buff_push(struct qm_host_desc *hd, u32 qnum,
117ef454717SKhoronzhuk, Ivan 		    void *buff_ptr, u32 buff_len)
118ef454717SKhoronzhuk, Ivan {
119ef454717SKhoronzhuk, Ivan 	hd->orig_buff_len = buff_len;
120ef454717SKhoronzhuk, Ivan 	hd->buff_len = buff_len;
121ef454717SKhoronzhuk, Ivan 	hd->orig_buff_ptr = (u32)buff_ptr;
122ef454717SKhoronzhuk, Ivan 	hd->buff_ptr = (u32)buff_ptr;
123ef454717SKhoronzhuk, Ivan 	qm_push(hd, qnum);
124ef454717SKhoronzhuk, Ivan }
125ef454717SKhoronzhuk, Ivan 
126ef454717SKhoronzhuk, Ivan struct qm_host_desc *qm_pop(u32 qnum)
127ef454717SKhoronzhuk, Ivan {
128ef454717SKhoronzhuk, Ivan 	u32 uhd;
129ef454717SKhoronzhuk, Ivan 
130ef454717SKhoronzhuk, Ivan 	if (!qm_cfg)
131ef454717SKhoronzhuk, Ivan 		return NULL;
132ef454717SKhoronzhuk, Ivan 
133ef454717SKhoronzhuk, Ivan 	uhd = readl(&qm_cfg->queue[qnum].ptr_size_thresh) & ~0xf;
134ef454717SKhoronzhuk, Ivan 	if (uhd)
135ef454717SKhoronzhuk, Ivan 		cpu_to_bus((u32 *)uhd, sizeof(struct qm_host_desc)/4);
136ef454717SKhoronzhuk, Ivan 
137ef454717SKhoronzhuk, Ivan 	return (struct qm_host_desc *)uhd;
138ef454717SKhoronzhuk, Ivan }
139ef454717SKhoronzhuk, Ivan 
140ef454717SKhoronzhuk, Ivan struct qm_host_desc *qm_pop_from_free_pool(void)
141ef454717SKhoronzhuk, Ivan {
142ef454717SKhoronzhuk, Ivan 	if (!qm_cfg)
143ef454717SKhoronzhuk, Ivan 		return NULL;
144ef454717SKhoronzhuk, Ivan 
145ef454717SKhoronzhuk, Ivan 	return qm_pop(qm_cfg->qpool_num);
146ef454717SKhoronzhuk, Ivan }
147ef454717SKhoronzhuk, Ivan 
148ef454717SKhoronzhuk, Ivan void queue_close(u32 qnum)
149ef454717SKhoronzhuk, Ivan {
150ef454717SKhoronzhuk, Ivan 	struct qm_host_desc *hd;
151ef454717SKhoronzhuk, Ivan 
152ef454717SKhoronzhuk, Ivan 	while ((hd = qm_pop(qnum)))
153ef454717SKhoronzhuk, Ivan 		;
154ef454717SKhoronzhuk, Ivan }
155ef454717SKhoronzhuk, Ivan 
156ef454717SKhoronzhuk, Ivan /**
157ef454717SKhoronzhuk, Ivan  * DMA API
158ef454717SKhoronzhuk, Ivan  */
159ef454717SKhoronzhuk, Ivan 
160*9ea9021aSKhoronzhuk, Ivan static int ksnav_rx_disable(struct pktdma_cfg *pktdma)
161ef454717SKhoronzhuk, Ivan {
162ef454717SKhoronzhuk, Ivan 	u32 j, v, k;
163ef454717SKhoronzhuk, Ivan 
164*9ea9021aSKhoronzhuk, Ivan 	for (j = 0; j < pktdma->rx_ch_num; j++) {
165*9ea9021aSKhoronzhuk, Ivan 		v = readl(&pktdma->rx_ch[j].cfg_a);
166ef454717SKhoronzhuk, Ivan 		if (!(v & CPDMA_CHAN_A_ENABLE))
167ef454717SKhoronzhuk, Ivan 			continue;
168ef454717SKhoronzhuk, Ivan 
169*9ea9021aSKhoronzhuk, Ivan 		writel(v | CPDMA_CHAN_A_TDOWN, &pktdma->rx_ch[j].cfg_a);
170ef454717SKhoronzhuk, Ivan 		for (k = 0; k < TDOWN_TIMEOUT_COUNT; k++) {
171ef454717SKhoronzhuk, Ivan 			udelay(100);
172*9ea9021aSKhoronzhuk, Ivan 			v = readl(&pktdma->rx_ch[j].cfg_a);
173ef454717SKhoronzhuk, Ivan 			if (!(v & CPDMA_CHAN_A_ENABLE))
174ef454717SKhoronzhuk, Ivan 				continue;
175ef454717SKhoronzhuk, Ivan 		}
176ef454717SKhoronzhuk, Ivan 		/* TODO: teardown error on if TDOWN_TIMEOUT_COUNT is reached */
177ef454717SKhoronzhuk, Ivan 	}
178ef454717SKhoronzhuk, Ivan 
179ef454717SKhoronzhuk, Ivan 	/* Clear all of the flow registers */
180*9ea9021aSKhoronzhuk, Ivan 	for (j = 0; j < pktdma->rx_flow_num; j++) {
181*9ea9021aSKhoronzhuk, Ivan 		writel(0, &pktdma->rx_flows[j].control);
182*9ea9021aSKhoronzhuk, Ivan 		writel(0, &pktdma->rx_flows[j].tags);
183*9ea9021aSKhoronzhuk, Ivan 		writel(0, &pktdma->rx_flows[j].tag_sel);
184*9ea9021aSKhoronzhuk, Ivan 		writel(0, &pktdma->rx_flows[j].fdq_sel[0]);
185*9ea9021aSKhoronzhuk, Ivan 		writel(0, &pktdma->rx_flows[j].fdq_sel[1]);
186*9ea9021aSKhoronzhuk, Ivan 		writel(0, &pktdma->rx_flows[j].thresh[0]);
187*9ea9021aSKhoronzhuk, Ivan 		writel(0, &pktdma->rx_flows[j].thresh[1]);
188*9ea9021aSKhoronzhuk, Ivan 		writel(0, &pktdma->rx_flows[j].thresh[2]);
189ef454717SKhoronzhuk, Ivan 	}
190ef454717SKhoronzhuk, Ivan 
191ef454717SKhoronzhuk, Ivan 	return QM_OK;
192ef454717SKhoronzhuk, Ivan }
193ef454717SKhoronzhuk, Ivan 
194*9ea9021aSKhoronzhuk, Ivan static int ksnav_tx_disable(struct pktdma_cfg *pktdma)
195ef454717SKhoronzhuk, Ivan {
196ef454717SKhoronzhuk, Ivan 	u32 j, v, k;
197ef454717SKhoronzhuk, Ivan 
198*9ea9021aSKhoronzhuk, Ivan 	for (j = 0; j < pktdma->tx_ch_num; j++) {
199*9ea9021aSKhoronzhuk, Ivan 		v = readl(&pktdma->tx_ch[j].cfg_a);
200ef454717SKhoronzhuk, Ivan 		if (!(v & CPDMA_CHAN_A_ENABLE))
201ef454717SKhoronzhuk, Ivan 			continue;
202ef454717SKhoronzhuk, Ivan 
203*9ea9021aSKhoronzhuk, Ivan 		writel(v | CPDMA_CHAN_A_TDOWN, &pktdma->tx_ch[j].cfg_a);
204ef454717SKhoronzhuk, Ivan 		for (k = 0; k < TDOWN_TIMEOUT_COUNT; k++) {
205ef454717SKhoronzhuk, Ivan 			udelay(100);
206*9ea9021aSKhoronzhuk, Ivan 			v = readl(&pktdma->tx_ch[j].cfg_a);
207ef454717SKhoronzhuk, Ivan 			if (!(v & CPDMA_CHAN_A_ENABLE))
208ef454717SKhoronzhuk, Ivan 				continue;
209ef454717SKhoronzhuk, Ivan 		}
210ef454717SKhoronzhuk, Ivan 		/* TODO: teardown error on if TDOWN_TIMEOUT_COUNT is reached */
211ef454717SKhoronzhuk, Ivan 	}
212ef454717SKhoronzhuk, Ivan 
213ef454717SKhoronzhuk, Ivan 	return QM_OK;
214ef454717SKhoronzhuk, Ivan }
215ef454717SKhoronzhuk, Ivan 
216*9ea9021aSKhoronzhuk, Ivan int ksnav_init(struct pktdma_cfg *pktdma, struct rx_buff_desc *rx_buffers)
217ef454717SKhoronzhuk, Ivan {
218ef454717SKhoronzhuk, Ivan 	u32 j, v;
219ef454717SKhoronzhuk, Ivan 	struct qm_host_desc *hd;
220ef454717SKhoronzhuk, Ivan 	u8 *rx_ptr;
221ef454717SKhoronzhuk, Ivan 
222*9ea9021aSKhoronzhuk, Ivan 	if (pktdma == NULL || rx_buffers == NULL ||
223ef454717SKhoronzhuk, Ivan 	    rx_buffers->buff_ptr == NULL || qm_cfg == NULL)
224ef454717SKhoronzhuk, Ivan 		return QM_ERR;
225ef454717SKhoronzhuk, Ivan 
226*9ea9021aSKhoronzhuk, Ivan 	pktdma->rx_flow = rx_buffers->rx_flow;
227ef454717SKhoronzhuk, Ivan 
228ef454717SKhoronzhuk, Ivan 	/* init rx queue */
229ef454717SKhoronzhuk, Ivan 	rx_ptr = rx_buffers->buff_ptr;
230ef454717SKhoronzhuk, Ivan 
231ef454717SKhoronzhuk, Ivan 	for (j = 0; j < rx_buffers->num_buffs; j++) {
232ef454717SKhoronzhuk, Ivan 		hd = qm_pop(qm_cfg->qpool_num);
233ef454717SKhoronzhuk, Ivan 		if (hd == NULL)
234ef454717SKhoronzhuk, Ivan 			return QM_ERR;
235ef454717SKhoronzhuk, Ivan 
236*9ea9021aSKhoronzhuk, Ivan 		qm_buff_push(hd, pktdma->rx_free_q,
237ef454717SKhoronzhuk, Ivan 			     rx_ptr, rx_buffers->buff_len);
238ef454717SKhoronzhuk, Ivan 
239ef454717SKhoronzhuk, Ivan 		rx_ptr += rx_buffers->buff_len;
240ef454717SKhoronzhuk, Ivan 	}
241ef454717SKhoronzhuk, Ivan 
242*9ea9021aSKhoronzhuk, Ivan 	ksnav_rx_disable(pktdma);
243ef454717SKhoronzhuk, Ivan 
244ef454717SKhoronzhuk, Ivan 	/* configure rx channels */
245*9ea9021aSKhoronzhuk, Ivan 	v = CPDMA_REG_VAL_MAKE_RX_FLOW_A(1, 1, 0, 0, 0, 0, 0, pktdma->rx_rcv_q);
246*9ea9021aSKhoronzhuk, Ivan 	writel(v, &pktdma->rx_flows[pktdma->rx_flow].control);
247*9ea9021aSKhoronzhuk, Ivan 	writel(0, &pktdma->rx_flows[pktdma->rx_flow].tags);
248*9ea9021aSKhoronzhuk, Ivan 	writel(0, &pktdma->rx_flows[pktdma->rx_flow].tag_sel);
249ef454717SKhoronzhuk, Ivan 
250*9ea9021aSKhoronzhuk, Ivan 	v = CPDMA_REG_VAL_MAKE_RX_FLOW_D(0, pktdma->rx_free_q, 0,
251*9ea9021aSKhoronzhuk, Ivan 					 pktdma->rx_free_q);
252ef454717SKhoronzhuk, Ivan 
253*9ea9021aSKhoronzhuk, Ivan 	writel(v, &pktdma->rx_flows[pktdma->rx_flow].fdq_sel[0]);
254*9ea9021aSKhoronzhuk, Ivan 	writel(v, &pktdma->rx_flows[pktdma->rx_flow].fdq_sel[1]);
255*9ea9021aSKhoronzhuk, Ivan 	writel(0, &pktdma->rx_flows[pktdma->rx_flow].thresh[0]);
256*9ea9021aSKhoronzhuk, Ivan 	writel(0, &pktdma->rx_flows[pktdma->rx_flow].thresh[1]);
257*9ea9021aSKhoronzhuk, Ivan 	writel(0, &pktdma->rx_flows[pktdma->rx_flow].thresh[2]);
258ef454717SKhoronzhuk, Ivan 
259*9ea9021aSKhoronzhuk, Ivan 	for (j = 0; j < pktdma->rx_ch_num; j++)
260*9ea9021aSKhoronzhuk, Ivan 		writel(CPDMA_CHAN_A_ENABLE, &pktdma->rx_ch[j].cfg_a);
261ef454717SKhoronzhuk, Ivan 
262ef454717SKhoronzhuk, Ivan 	/* configure tx channels */
263ef454717SKhoronzhuk, Ivan 	/* Disable loopback in the tx direction */
264*9ea9021aSKhoronzhuk, Ivan 	writel(0, &pktdma->global->emulation_control);
265ef454717SKhoronzhuk, Ivan 
266ef454717SKhoronzhuk, Ivan 	/* Set QM base address, only for K2x devices */
267*9ea9021aSKhoronzhuk, Ivan 	writel(CONFIG_KSNAV_QM_BASE_ADDRESS, &pktdma->global->qm_base_addr[0]);
268ef454717SKhoronzhuk, Ivan 
269ef454717SKhoronzhuk, Ivan 	/* Enable all channels. The current state isn't important */
270*9ea9021aSKhoronzhuk, Ivan 	for (j = 0; j < pktdma->tx_ch_num; j++)  {
271*9ea9021aSKhoronzhuk, Ivan 		writel(0, &pktdma->tx_ch[j].cfg_b);
272*9ea9021aSKhoronzhuk, Ivan 		writel(CPDMA_CHAN_A_ENABLE, &pktdma->tx_ch[j].cfg_a);
273ef454717SKhoronzhuk, Ivan 	}
274ef454717SKhoronzhuk, Ivan 
275ef454717SKhoronzhuk, Ivan 	return QM_OK;
276ef454717SKhoronzhuk, Ivan }
277ef454717SKhoronzhuk, Ivan 
278*9ea9021aSKhoronzhuk, Ivan int ksnav_close(struct pktdma_cfg *pktdma)
279ef454717SKhoronzhuk, Ivan {
280*9ea9021aSKhoronzhuk, Ivan 	if (!pktdma)
281ef454717SKhoronzhuk, Ivan 		return QM_ERR;
282ef454717SKhoronzhuk, Ivan 
283*9ea9021aSKhoronzhuk, Ivan 	ksnav_tx_disable(pktdma);
284*9ea9021aSKhoronzhuk, Ivan 	ksnav_rx_disable(pktdma);
285ef454717SKhoronzhuk, Ivan 
286*9ea9021aSKhoronzhuk, Ivan 	queue_close(pktdma->rx_free_q);
287*9ea9021aSKhoronzhuk, Ivan 	queue_close(pktdma->rx_rcv_q);
288*9ea9021aSKhoronzhuk, Ivan 	queue_close(pktdma->tx_snd_q);
289ef454717SKhoronzhuk, Ivan 
290ef454717SKhoronzhuk, Ivan 	return QM_OK;
291ef454717SKhoronzhuk, Ivan }
292ef454717SKhoronzhuk, Ivan 
293*9ea9021aSKhoronzhuk, Ivan int ksnav_send(struct pktdma_cfg *pktdma, u32 *pkt, int num_bytes, u32 swinfo2)
294ef454717SKhoronzhuk, Ivan {
295ef454717SKhoronzhuk, Ivan 	struct qm_host_desc *hd;
296ef454717SKhoronzhuk, Ivan 
297ef454717SKhoronzhuk, Ivan 	hd = qm_pop(qm_cfg->qpool_num);
298ef454717SKhoronzhuk, Ivan 	if (hd == NULL)
299ef454717SKhoronzhuk, Ivan 		return QM_ERR;
300ef454717SKhoronzhuk, Ivan 
301ef454717SKhoronzhuk, Ivan 	hd->desc_info	= num_bytes;
302ef454717SKhoronzhuk, Ivan 	hd->swinfo[2]	= swinfo2;
303ef454717SKhoronzhuk, Ivan 	hd->packet_info = qm_cfg->qpool_num;
304ef454717SKhoronzhuk, Ivan 
305*9ea9021aSKhoronzhuk, Ivan 	qm_buff_push(hd, pktdma->tx_snd_q, pkt, num_bytes);
306ef454717SKhoronzhuk, Ivan 
307ef454717SKhoronzhuk, Ivan 	return QM_OK;
308ef454717SKhoronzhuk, Ivan }
309ef454717SKhoronzhuk, Ivan 
310*9ea9021aSKhoronzhuk, Ivan void *ksnav_recv(struct pktdma_cfg *pktdma, u32 **pkt, int *num_bytes)
311ef454717SKhoronzhuk, Ivan {
312ef454717SKhoronzhuk, Ivan 	struct qm_host_desc *hd;
313ef454717SKhoronzhuk, Ivan 
314*9ea9021aSKhoronzhuk, Ivan 	hd = qm_pop(pktdma->rx_rcv_q);
315ef454717SKhoronzhuk, Ivan 	if (!hd)
316ef454717SKhoronzhuk, Ivan 		return NULL;
317ef454717SKhoronzhuk, Ivan 
318ef454717SKhoronzhuk, Ivan 	*pkt = (u32 *)hd->buff_ptr;
319ef454717SKhoronzhuk, Ivan 	*num_bytes = hd->desc_info & 0x3fffff;
320ef454717SKhoronzhuk, Ivan 
321ef454717SKhoronzhuk, Ivan 	return hd;
322ef454717SKhoronzhuk, Ivan }
323ef454717SKhoronzhuk, Ivan 
324*9ea9021aSKhoronzhuk, Ivan void ksnav_release_rxhd(struct pktdma_cfg *pktdma, void *hd)
325ef454717SKhoronzhuk, Ivan {
326ef454717SKhoronzhuk, Ivan 	struct qm_host_desc *_hd = (struct qm_host_desc *)hd;
327ef454717SKhoronzhuk, Ivan 
328ef454717SKhoronzhuk, Ivan 	_hd->buff_len = _hd->orig_buff_len;
329ef454717SKhoronzhuk, Ivan 	_hd->buff_ptr = _hd->orig_buff_ptr;
330ef454717SKhoronzhuk, Ivan 
331*9ea9021aSKhoronzhuk, Ivan 	qm_push(_hd, pktdma->rx_free_q);
332ef454717SKhoronzhuk, Ivan }
333