xref: /OK3568_Linux_fs/u-boot/drivers/net/xilinx_ll_temac_sdma.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Xilinx xps_ll_temac ethernet driver for u-boot
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * SDMA sub-controller
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Copyright (C) 2011 - 2012 Stephan Linz <linz@li-pro.net>
7*4882a593Smuzhiyun  * Copyright (C) 2008 - 2011 Michal Simek <monstr@monstr.eu>
8*4882a593Smuzhiyun  * Copyright (C) 2008 - 2011 PetaLogix
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * Based on Yoshio Kashiwagi kashiwagi@co-nss.co.jp driver
11*4882a593Smuzhiyun  * Copyright (C) 2008 Nissin Systems Co.,Ltd.
12*4882a593Smuzhiyun  * March 2008 created
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * CREDITS: tsec driver
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  * SPDX-License-Identifier:	GPL-2.0+
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  * [0]: http://www.xilinx.com/support/documentation
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  * [M]:	[0]/ip_documentation/mpmc.pdf
21*4882a593Smuzhiyun  * [S]:	[0]/ip_documentation/xps_ll_temac.pdf
22*4882a593Smuzhiyun  * [A]:	[0]/application_notes/xapp1041.pdf
23*4882a593Smuzhiyun  */
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #include <config.h>
26*4882a593Smuzhiyun #include <common.h>
27*4882a593Smuzhiyun #include <net.h>
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #include <asm/types.h>
30*4882a593Smuzhiyun #include <asm/io.h>
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #include "xilinx_ll_temac.h"
33*4882a593Smuzhiyun #include "xilinx_ll_temac_sdma.h"
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #define TX_BUF_CNT		2
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun static unsigned int rx_idx;	/* index of the current RX buffer */
38*4882a593Smuzhiyun static unsigned int tx_idx;	/* index of the current TX buffer */
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun struct rtx_cdmac_bd {
41*4882a593Smuzhiyun 	struct cdmac_bd rx[PKTBUFSRX];
42*4882a593Smuzhiyun 	struct cdmac_bd tx[TX_BUF_CNT];
43*4882a593Smuzhiyun };
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun /*
46*4882a593Smuzhiyun  * DMA Buffer Descriptor alignment
47*4882a593Smuzhiyun  *
48*4882a593Smuzhiyun  * If the address contained in the Next Descriptor Pointer register is not
49*4882a593Smuzhiyun  * 8-word aligned or reaches beyond the range of available memory, the SDMA
50*4882a593Smuzhiyun  * halts processing and sets the CDMAC_BD_STCTRL_ERROR bit in the respective
51*4882a593Smuzhiyun  * status register (tx_chnl_sts or rx_chnl_sts).
52*4882a593Smuzhiyun  *
53*4882a593Smuzhiyun  * [1]: [0]/ip_documentation/mpmc.pdf
54*4882a593Smuzhiyun  *      page 161, Next Descriptor Pointer
55*4882a593Smuzhiyun  */
56*4882a593Smuzhiyun static struct rtx_cdmac_bd cdmac_bd __aligned(32);
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun /* Xilinx Processor Local Bus (PLB) in/out accessors */
ll_temac_xlplb_in32(phys_addr_t addr)59*4882a593Smuzhiyun inline unsigned ll_temac_xlplb_in32(phys_addr_t addr)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun 	return in_be32((void *)addr);
62*4882a593Smuzhiyun }
ll_temac_xlplb_out32(phys_addr_t addr,unsigned value)63*4882a593Smuzhiyun inline void ll_temac_xlplb_out32(phys_addr_t addr, unsigned value)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun 	out_be32((void *)addr, value);
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun /* collect all register addresses for Xilinx PLB in/out accessors */
ll_temac_collect_xlplb_sdma_reg_addr(struct eth_device * dev)69*4882a593Smuzhiyun void ll_temac_collect_xlplb_sdma_reg_addr(struct eth_device *dev)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun 	struct ll_temac *ll_temac = dev->priv;
72*4882a593Smuzhiyun 	struct sdma_ctrl *sdma_ctrl = (void *)ll_temac->ctrladdr;
73*4882a593Smuzhiyun 	phys_addr_t *ra = ll_temac->sdma_reg_addr;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	ra[TX_NXTDESC_PTR]   = (phys_addr_t)&sdma_ctrl->tx_nxtdesc_ptr;
76*4882a593Smuzhiyun 	ra[TX_CURBUF_ADDR]   = (phys_addr_t)&sdma_ctrl->tx_curbuf_addr;
77*4882a593Smuzhiyun 	ra[TX_CURBUF_LENGTH] = (phys_addr_t)&sdma_ctrl->tx_curbuf_length;
78*4882a593Smuzhiyun 	ra[TX_CURDESC_PTR]   = (phys_addr_t)&sdma_ctrl->tx_curdesc_ptr;
79*4882a593Smuzhiyun 	ra[TX_TAILDESC_PTR]  = (phys_addr_t)&sdma_ctrl->tx_taildesc_ptr;
80*4882a593Smuzhiyun 	ra[TX_CHNL_CTRL]     = (phys_addr_t)&sdma_ctrl->tx_chnl_ctrl;
81*4882a593Smuzhiyun 	ra[TX_IRQ_REG]       = (phys_addr_t)&sdma_ctrl->tx_irq_reg;
82*4882a593Smuzhiyun 	ra[TX_CHNL_STS]      = (phys_addr_t)&sdma_ctrl->tx_chnl_sts;
83*4882a593Smuzhiyun 	ra[RX_NXTDESC_PTR]   = (phys_addr_t)&sdma_ctrl->rx_nxtdesc_ptr;
84*4882a593Smuzhiyun 	ra[RX_CURBUF_ADDR]   = (phys_addr_t)&sdma_ctrl->rx_curbuf_addr;
85*4882a593Smuzhiyun 	ra[RX_CURBUF_LENGTH] = (phys_addr_t)&sdma_ctrl->rx_curbuf_length;
86*4882a593Smuzhiyun 	ra[RX_CURDESC_PTR]   = (phys_addr_t)&sdma_ctrl->rx_curdesc_ptr;
87*4882a593Smuzhiyun 	ra[RX_TAILDESC_PTR]  = (phys_addr_t)&sdma_ctrl->rx_taildesc_ptr;
88*4882a593Smuzhiyun 	ra[RX_CHNL_CTRL]     = (phys_addr_t)&sdma_ctrl->rx_chnl_ctrl;
89*4882a593Smuzhiyun 	ra[RX_IRQ_REG]       = (phys_addr_t)&sdma_ctrl->rx_irq_reg;
90*4882a593Smuzhiyun 	ra[RX_CHNL_STS]      = (phys_addr_t)&sdma_ctrl->rx_chnl_sts;
91*4882a593Smuzhiyun 	ra[DMA_CONTROL_REG]  = (phys_addr_t)&sdma_ctrl->dma_control_reg;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun /* Check for TX and RX channel errors. */
ll_temac_sdma_error(struct eth_device * dev)95*4882a593Smuzhiyun static inline int ll_temac_sdma_error(struct eth_device *dev)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun 	int err;
98*4882a593Smuzhiyun 	struct ll_temac *ll_temac = dev->priv;
99*4882a593Smuzhiyun 	phys_addr_t *ra = ll_temac->sdma_reg_addr;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	err = ll_temac->in32(ra[TX_CHNL_STS]) & CHNL_STS_ERROR;
102*4882a593Smuzhiyun 	err |= ll_temac->in32(ra[RX_CHNL_STS]) & CHNL_STS_ERROR;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	return err;
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun 
ll_temac_init_sdma(struct eth_device * dev)107*4882a593Smuzhiyun int ll_temac_init_sdma(struct eth_device *dev)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun 	struct ll_temac *ll_temac = dev->priv;
110*4882a593Smuzhiyun 	struct cdmac_bd *rx_dp;
111*4882a593Smuzhiyun 	struct cdmac_bd *tx_dp;
112*4882a593Smuzhiyun 	phys_addr_t *ra = ll_temac->sdma_reg_addr;
113*4882a593Smuzhiyun 	int i;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	printf("%s: SDMA: %d Rx buffers, %d Tx buffers\n",
116*4882a593Smuzhiyun 			dev->name, PKTBUFSRX, TX_BUF_CNT);
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	/* Initialize the Rx Buffer descriptors */
119*4882a593Smuzhiyun 	for (i = 0; i < PKTBUFSRX; i++) {
120*4882a593Smuzhiyun 		rx_dp = &cdmac_bd.rx[i];
121*4882a593Smuzhiyun 		memset(rx_dp, 0, sizeof(*rx_dp));
122*4882a593Smuzhiyun 		rx_dp->next_p = rx_dp;
123*4882a593Smuzhiyun 		rx_dp->buf_len = PKTSIZE_ALIGN;
124*4882a593Smuzhiyun 		rx_dp->phys_buf_p = (u8 *)net_rx_packets[i];
125*4882a593Smuzhiyun 		flush_cache((u32)rx_dp->phys_buf_p, PKTSIZE_ALIGN);
126*4882a593Smuzhiyun 	}
127*4882a593Smuzhiyun 	flush_cache((u32)cdmac_bd.rx, sizeof(cdmac_bd.rx));
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	/* Initialize the TX Buffer Descriptors */
130*4882a593Smuzhiyun 	for (i = 0; i < TX_BUF_CNT; i++) {
131*4882a593Smuzhiyun 		tx_dp = &cdmac_bd.tx[i];
132*4882a593Smuzhiyun 		memset(tx_dp, 0, sizeof(*tx_dp));
133*4882a593Smuzhiyun 		tx_dp->next_p = tx_dp;
134*4882a593Smuzhiyun 	}
135*4882a593Smuzhiyun 	flush_cache((u32)cdmac_bd.tx, sizeof(cdmac_bd.tx));
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	/* Reset index counter to the Rx and Tx Buffer descriptors */
138*4882a593Smuzhiyun 	rx_idx = tx_idx = 0;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	/* initial Rx DMA start by writing to respective TAILDESC_PTR */
141*4882a593Smuzhiyun 	ll_temac->out32(ra[RX_CURDESC_PTR], (int)&cdmac_bd.rx[rx_idx]);
142*4882a593Smuzhiyun 	ll_temac->out32(ra[RX_TAILDESC_PTR], (int)&cdmac_bd.rx[rx_idx]);
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	return 0;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun 
ll_temac_halt_sdma(struct eth_device * dev)147*4882a593Smuzhiyun int ll_temac_halt_sdma(struct eth_device *dev)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun 	unsigned timeout = 50;	/* 1usec * 50 = 50usec */
150*4882a593Smuzhiyun 	struct ll_temac *ll_temac = dev->priv;
151*4882a593Smuzhiyun 	phys_addr_t *ra = ll_temac->sdma_reg_addr;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	/*
154*4882a593Smuzhiyun 	 * Soft reset the DMA
155*4882a593Smuzhiyun 	 *
156*4882a593Smuzhiyun 	 * Quote from MPMC documentation: Writing a 1 to this field
157*4882a593Smuzhiyun 	 * forces the DMA engine to shutdown and reset itself. After
158*4882a593Smuzhiyun 	 * setting this bit, software must poll it until the bit is
159*4882a593Smuzhiyun 	 * cleared by the DMA. This indicates that the reset process
160*4882a593Smuzhiyun 	 * is done and the pipeline has been flushed.
161*4882a593Smuzhiyun 	 */
162*4882a593Smuzhiyun 	ll_temac->out32(ra[DMA_CONTROL_REG], DMA_CONTROL_RESET);
163*4882a593Smuzhiyun 	while (timeout && (ll_temac->in32(ra[DMA_CONTROL_REG])
164*4882a593Smuzhiyun 					& DMA_CONTROL_RESET)) {
165*4882a593Smuzhiyun 		timeout--;
166*4882a593Smuzhiyun 		udelay(1);
167*4882a593Smuzhiyun 	}
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	if (!timeout) {
170*4882a593Smuzhiyun 		printf("%s: Timeout\n", __func__);
171*4882a593Smuzhiyun 		return -1;
172*4882a593Smuzhiyun 	}
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	return 0;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun 
ll_temac_reset_sdma(struct eth_device * dev)177*4882a593Smuzhiyun int ll_temac_reset_sdma(struct eth_device *dev)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun 	u32 r;
180*4882a593Smuzhiyun 	struct ll_temac *ll_temac = dev->priv;
181*4882a593Smuzhiyun 	phys_addr_t *ra = ll_temac->sdma_reg_addr;
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	/* Soft reset the DMA.  */
184*4882a593Smuzhiyun 	if (ll_temac_halt_sdma(dev))
185*4882a593Smuzhiyun 		return -1;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	/* Now clear the interrupts.  */
188*4882a593Smuzhiyun 	r = ll_temac->in32(ra[TX_CHNL_CTRL]);
189*4882a593Smuzhiyun 	r &= ~CHNL_CTRL_IRQ_MASK;
190*4882a593Smuzhiyun 	ll_temac->out32(ra[TX_CHNL_CTRL], r);
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	r = ll_temac->in32(ra[RX_CHNL_CTRL]);
193*4882a593Smuzhiyun 	r &= ~CHNL_CTRL_IRQ_MASK;
194*4882a593Smuzhiyun 	ll_temac->out32(ra[RX_CHNL_CTRL], r);
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	/* Now ACK pending IRQs.  */
197*4882a593Smuzhiyun 	ll_temac->out32(ra[TX_IRQ_REG], IRQ_REG_IRQ_MASK);
198*4882a593Smuzhiyun 	ll_temac->out32(ra[RX_IRQ_REG], IRQ_REG_IRQ_MASK);
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	/* Set tail-ptr mode, disable errors for both channels.  */
201*4882a593Smuzhiyun 	ll_temac->out32(ra[DMA_CONTROL_REG],
202*4882a593Smuzhiyun 			/* Enable use of tail pointer register */
203*4882a593Smuzhiyun 			DMA_CONTROL_TPE |
204*4882a593Smuzhiyun 			/* Disable error when 2 or 4 bit coalesce cnt overfl */
205*4882a593Smuzhiyun 			DMA_CONTROL_RXOCEID |
206*4882a593Smuzhiyun 			/* Disable error when 2 or 4 bit coalesce cnt overfl */
207*4882a593Smuzhiyun 			DMA_CONTROL_TXOCEID);
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	return 0;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun 
ll_temac_recv_sdma(struct eth_device * dev)212*4882a593Smuzhiyun int ll_temac_recv_sdma(struct eth_device *dev)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun 	int length, pb_idx;
215*4882a593Smuzhiyun 	struct cdmac_bd *rx_dp = &cdmac_bd.rx[rx_idx];
216*4882a593Smuzhiyun 	struct ll_temac *ll_temac = dev->priv;
217*4882a593Smuzhiyun 	phys_addr_t *ra = ll_temac->sdma_reg_addr;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	if (ll_temac_sdma_error(dev)) {
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 		if (ll_temac_reset_sdma(dev))
222*4882a593Smuzhiyun 			return -1;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 		ll_temac_init_sdma(dev);
225*4882a593Smuzhiyun 	}
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	flush_cache((u32)rx_dp, sizeof(*rx_dp));
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	if (!(rx_dp->sca.stctrl & CDMAC_BD_STCTRL_COMPLETED))
230*4882a593Smuzhiyun 		return 0;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	if (rx_dp->sca.stctrl & (CDMAC_BD_STCTRL_SOP | CDMAC_BD_STCTRL_EOP)) {
233*4882a593Smuzhiyun 		pb_idx = rx_idx;
234*4882a593Smuzhiyun 		length = rx_dp->sca.app[4] & CDMAC_BD_APP4_RXBYTECNT_MASK;
235*4882a593Smuzhiyun 	} else {
236*4882a593Smuzhiyun 		pb_idx = -1;
237*4882a593Smuzhiyun 		length = 0;
238*4882a593Smuzhiyun 		printf("%s: Got part of package, unsupported (%x)\n",
239*4882a593Smuzhiyun 				__func__, rx_dp->sca.stctrl);
240*4882a593Smuzhiyun 	}
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	/* flip the buffer */
243*4882a593Smuzhiyun 	flush_cache((u32)rx_dp->phys_buf_p, length);
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	/* reset the current descriptor */
246*4882a593Smuzhiyun 	rx_dp->sca.stctrl = 0;
247*4882a593Smuzhiyun 	rx_dp->sca.app[4] = 0;
248*4882a593Smuzhiyun 	flush_cache((u32)rx_dp, sizeof(*rx_dp));
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	/* Find next empty buffer descriptor, preparation for next iteration */
251*4882a593Smuzhiyun 	rx_idx = (rx_idx + 1) % PKTBUFSRX;
252*4882a593Smuzhiyun 	rx_dp = &cdmac_bd.rx[rx_idx];
253*4882a593Smuzhiyun 	flush_cache((u32)rx_dp, sizeof(*rx_dp));
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	/* DMA start by writing to respective TAILDESC_PTR */
256*4882a593Smuzhiyun 	ll_temac->out32(ra[RX_CURDESC_PTR], (int)&cdmac_bd.rx[rx_idx]);
257*4882a593Smuzhiyun 	ll_temac->out32(ra[RX_TAILDESC_PTR], (int)&cdmac_bd.rx[rx_idx]);
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	if (length > 0 && pb_idx != -1)
260*4882a593Smuzhiyun 		net_process_received_packet(net_rx_packets[pb_idx], length);
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	return 0;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun 
ll_temac_send_sdma(struct eth_device * dev,void * packet,int length)265*4882a593Smuzhiyun int ll_temac_send_sdma(struct eth_device *dev, void *packet, int length)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun 	unsigned timeout = 50;	/* 1usec * 50 = 50usec */
268*4882a593Smuzhiyun 	struct cdmac_bd *tx_dp = &cdmac_bd.tx[tx_idx];
269*4882a593Smuzhiyun 	struct ll_temac *ll_temac = dev->priv;
270*4882a593Smuzhiyun 	phys_addr_t *ra = ll_temac->sdma_reg_addr;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	if (ll_temac_sdma_error(dev)) {
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 		if (ll_temac_reset_sdma(dev))
275*4882a593Smuzhiyun 			return -1;
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 		ll_temac_init_sdma(dev);
278*4882a593Smuzhiyun 	}
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	tx_dp->phys_buf_p = (u8 *)packet;
281*4882a593Smuzhiyun 	tx_dp->buf_len = length;
282*4882a593Smuzhiyun 	tx_dp->sca.stctrl = CDMAC_BD_STCTRL_SOP | CDMAC_BD_STCTRL_EOP |
283*4882a593Smuzhiyun 			CDMAC_BD_STCTRL_STOP_ON_END;
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	flush_cache((u32)packet, length);
286*4882a593Smuzhiyun 	flush_cache((u32)tx_dp, sizeof(*tx_dp));
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	/* DMA start by writing to respective TAILDESC_PTR */
289*4882a593Smuzhiyun 	ll_temac->out32(ra[TX_CURDESC_PTR], (int)tx_dp);
290*4882a593Smuzhiyun 	ll_temac->out32(ra[TX_TAILDESC_PTR], (int)tx_dp);
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	/* Find next empty buffer descriptor, preparation for next iteration */
293*4882a593Smuzhiyun 	tx_idx = (tx_idx + 1) % TX_BUF_CNT;
294*4882a593Smuzhiyun 	tx_dp = &cdmac_bd.tx[tx_idx];
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	do {
297*4882a593Smuzhiyun 		flush_cache((u32)tx_dp, sizeof(*tx_dp));
298*4882a593Smuzhiyun 		udelay(1);
299*4882a593Smuzhiyun 	} while (timeout-- && !(tx_dp->sca.stctrl & CDMAC_BD_STCTRL_COMPLETED));
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	if (!timeout) {
302*4882a593Smuzhiyun 		printf("%s: Timeout\n", __func__);
303*4882a593Smuzhiyun 		return -1;
304*4882a593Smuzhiyun 	}
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	return 0;
307*4882a593Smuzhiyun }
308