xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/amd/xgbe/xgbe-desc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * AMD 10Gb Ethernet driver
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * This file is available to you under your choice of the following two
5*4882a593Smuzhiyun  * licenses:
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * License 1: GPLv2
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * Copyright (c) 2014 Advanced Micro Devices, Inc.
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * This file is free software; you may copy, redistribute and/or modify
12*4882a593Smuzhiyun  * it under the terms of the GNU General Public License as published by
13*4882a593Smuzhiyun  * the Free Software Foundation, either version 2 of the License, or (at
14*4882a593Smuzhiyun  * your option) any later version.
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  * This file is distributed in the hope that it will be useful, but
17*4882a593Smuzhiyun  * WITHOUT ANY WARRANTY; without even the implied warranty of
18*4882a593Smuzhiyun  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19*4882a593Smuzhiyun  * General Public License for more details.
20*4882a593Smuzhiyun  *
21*4882a593Smuzhiyun  * You should have received a copy of the GNU General Public License
22*4882a593Smuzhiyun  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * This file incorporates work covered by the following copyright and
25*4882a593Smuzhiyun  * permission notice:
26*4882a593Smuzhiyun  *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
27*4882a593Smuzhiyun  *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28*4882a593Smuzhiyun  *     Inc. unless otherwise expressly agreed to in writing between Synopsys
29*4882a593Smuzhiyun  *     and you.
30*4882a593Smuzhiyun  *
31*4882a593Smuzhiyun  *     The Software IS NOT an item of Licensed Software or Licensed Product
32*4882a593Smuzhiyun  *     under any End User Software License Agreement or Agreement for Licensed
33*4882a593Smuzhiyun  *     Product with Synopsys or any supplement thereto.  Permission is hereby
34*4882a593Smuzhiyun  *     granted, free of charge, to any person obtaining a copy of this software
35*4882a593Smuzhiyun  *     annotated with this license and the Software, to deal in the Software
36*4882a593Smuzhiyun  *     without restriction, including without limitation the rights to use,
37*4882a593Smuzhiyun  *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38*4882a593Smuzhiyun  *     of the Software, and to permit persons to whom the Software is furnished
39*4882a593Smuzhiyun  *     to do so, subject to the following conditions:
40*4882a593Smuzhiyun  *
41*4882a593Smuzhiyun  *     The above copyright notice and this permission notice shall be included
42*4882a593Smuzhiyun  *     in all copies or substantial portions of the Software.
43*4882a593Smuzhiyun  *
44*4882a593Smuzhiyun  *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45*4882a593Smuzhiyun  *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46*4882a593Smuzhiyun  *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47*4882a593Smuzhiyun  *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48*4882a593Smuzhiyun  *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49*4882a593Smuzhiyun  *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50*4882a593Smuzhiyun  *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51*4882a593Smuzhiyun  *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52*4882a593Smuzhiyun  *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53*4882a593Smuzhiyun  *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54*4882a593Smuzhiyun  *     THE POSSIBILITY OF SUCH DAMAGE.
55*4882a593Smuzhiyun  *
56*4882a593Smuzhiyun  *
57*4882a593Smuzhiyun  * License 2: Modified BSD
58*4882a593Smuzhiyun  *
59*4882a593Smuzhiyun  * Copyright (c) 2014 Advanced Micro Devices, Inc.
60*4882a593Smuzhiyun  * All rights reserved.
61*4882a593Smuzhiyun  *
62*4882a593Smuzhiyun  * Redistribution and use in source and binary forms, with or without
63*4882a593Smuzhiyun  * modification, are permitted provided that the following conditions are met:
64*4882a593Smuzhiyun  *     * Redistributions of source code must retain the above copyright
65*4882a593Smuzhiyun  *       notice, this list of conditions and the following disclaimer.
66*4882a593Smuzhiyun  *     * Redistributions in binary form must reproduce the above copyright
67*4882a593Smuzhiyun  *       notice, this list of conditions and the following disclaimer in the
68*4882a593Smuzhiyun  *       documentation and/or other materials provided with the distribution.
69*4882a593Smuzhiyun  *     * Neither the name of Advanced Micro Devices, Inc. nor the
70*4882a593Smuzhiyun  *       names of its contributors may be used to endorse or promote products
71*4882a593Smuzhiyun  *       derived from this software without specific prior written permission.
72*4882a593Smuzhiyun  *
73*4882a593Smuzhiyun  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74*4882a593Smuzhiyun  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75*4882a593Smuzhiyun  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76*4882a593Smuzhiyun  * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77*4882a593Smuzhiyun  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78*4882a593Smuzhiyun  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79*4882a593Smuzhiyun  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80*4882a593Smuzhiyun  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81*4882a593Smuzhiyun  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82*4882a593Smuzhiyun  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83*4882a593Smuzhiyun  *
84*4882a593Smuzhiyun  * This file incorporates work covered by the following copyright and
85*4882a593Smuzhiyun  * permission notice:
86*4882a593Smuzhiyun  *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
87*4882a593Smuzhiyun  *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88*4882a593Smuzhiyun  *     Inc. unless otherwise expressly agreed to in writing between Synopsys
89*4882a593Smuzhiyun  *     and you.
90*4882a593Smuzhiyun  *
91*4882a593Smuzhiyun  *     The Software IS NOT an item of Licensed Software or Licensed Product
92*4882a593Smuzhiyun  *     under any End User Software License Agreement or Agreement for Licensed
93*4882a593Smuzhiyun  *     Product with Synopsys or any supplement thereto.  Permission is hereby
94*4882a593Smuzhiyun  *     granted, free of charge, to any person obtaining a copy of this software
95*4882a593Smuzhiyun  *     annotated with this license and the Software, to deal in the Software
96*4882a593Smuzhiyun  *     without restriction, including without limitation the rights to use,
97*4882a593Smuzhiyun  *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98*4882a593Smuzhiyun  *     of the Software, and to permit persons to whom the Software is furnished
99*4882a593Smuzhiyun  *     to do so, subject to the following conditions:
100*4882a593Smuzhiyun  *
101*4882a593Smuzhiyun  *     The above copyright notice and this permission notice shall be included
102*4882a593Smuzhiyun  *     in all copies or substantial portions of the Software.
103*4882a593Smuzhiyun  *
104*4882a593Smuzhiyun  *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105*4882a593Smuzhiyun  *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106*4882a593Smuzhiyun  *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107*4882a593Smuzhiyun  *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108*4882a593Smuzhiyun  *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109*4882a593Smuzhiyun  *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110*4882a593Smuzhiyun  *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111*4882a593Smuzhiyun  *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112*4882a593Smuzhiyun  *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113*4882a593Smuzhiyun  *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114*4882a593Smuzhiyun  *     THE POSSIBILITY OF SUCH DAMAGE.
115*4882a593Smuzhiyun  */
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun #include "xgbe.h"
118*4882a593Smuzhiyun #include "xgbe-common.h"
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun static void xgbe_unmap_rdata(struct xgbe_prv_data *, struct xgbe_ring_data *);
121*4882a593Smuzhiyun 
xgbe_free_ring(struct xgbe_prv_data * pdata,struct xgbe_ring * ring)122*4882a593Smuzhiyun static void xgbe_free_ring(struct xgbe_prv_data *pdata,
123*4882a593Smuzhiyun 			   struct xgbe_ring *ring)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	struct xgbe_ring_data *rdata;
126*4882a593Smuzhiyun 	unsigned int i;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	if (!ring)
129*4882a593Smuzhiyun 		return;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	if (ring->rdata) {
132*4882a593Smuzhiyun 		for (i = 0; i < ring->rdesc_count; i++) {
133*4882a593Smuzhiyun 			rdata = XGBE_GET_DESC_DATA(ring, i);
134*4882a593Smuzhiyun 			xgbe_unmap_rdata(pdata, rdata);
135*4882a593Smuzhiyun 		}
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 		kfree(ring->rdata);
138*4882a593Smuzhiyun 		ring->rdata = NULL;
139*4882a593Smuzhiyun 	}
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	if (ring->rx_hdr_pa.pages) {
142*4882a593Smuzhiyun 		dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
143*4882a593Smuzhiyun 			       ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
144*4882a593Smuzhiyun 		put_page(ring->rx_hdr_pa.pages);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 		ring->rx_hdr_pa.pages = NULL;
147*4882a593Smuzhiyun 		ring->rx_hdr_pa.pages_len = 0;
148*4882a593Smuzhiyun 		ring->rx_hdr_pa.pages_offset = 0;
149*4882a593Smuzhiyun 		ring->rx_hdr_pa.pages_dma = 0;
150*4882a593Smuzhiyun 	}
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	if (ring->rx_buf_pa.pages) {
153*4882a593Smuzhiyun 		dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
154*4882a593Smuzhiyun 			       ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
155*4882a593Smuzhiyun 		put_page(ring->rx_buf_pa.pages);
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 		ring->rx_buf_pa.pages = NULL;
158*4882a593Smuzhiyun 		ring->rx_buf_pa.pages_len = 0;
159*4882a593Smuzhiyun 		ring->rx_buf_pa.pages_offset = 0;
160*4882a593Smuzhiyun 		ring->rx_buf_pa.pages_dma = 0;
161*4882a593Smuzhiyun 	}
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	if (ring->rdesc) {
164*4882a593Smuzhiyun 		dma_free_coherent(pdata->dev,
165*4882a593Smuzhiyun 				  (sizeof(struct xgbe_ring_desc) *
166*4882a593Smuzhiyun 				   ring->rdesc_count),
167*4882a593Smuzhiyun 				  ring->rdesc, ring->rdesc_dma);
168*4882a593Smuzhiyun 		ring->rdesc = NULL;
169*4882a593Smuzhiyun 	}
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun 
xgbe_free_ring_resources(struct xgbe_prv_data * pdata)172*4882a593Smuzhiyun static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun 	struct xgbe_channel *channel;
175*4882a593Smuzhiyun 	unsigned int i;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	DBGPR("-->xgbe_free_ring_resources\n");
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	for (i = 0; i < pdata->channel_count; i++) {
180*4882a593Smuzhiyun 		channel = pdata->channel[i];
181*4882a593Smuzhiyun 		xgbe_free_ring(pdata, channel->tx_ring);
182*4882a593Smuzhiyun 		xgbe_free_ring(pdata, channel->rx_ring);
183*4882a593Smuzhiyun 	}
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	DBGPR("<--xgbe_free_ring_resources\n");
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun 
xgbe_alloc_node(size_t size,int node)188*4882a593Smuzhiyun static void *xgbe_alloc_node(size_t size, int node)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun 	void *mem;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	mem = kzalloc_node(size, GFP_KERNEL, node);
193*4882a593Smuzhiyun 	if (!mem)
194*4882a593Smuzhiyun 		mem = kzalloc(size, GFP_KERNEL);
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	return mem;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun 
xgbe_dma_alloc_node(struct device * dev,size_t size,dma_addr_t * dma,int node)199*4882a593Smuzhiyun static void *xgbe_dma_alloc_node(struct device *dev, size_t size,
200*4882a593Smuzhiyun 				 dma_addr_t *dma, int node)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun 	void *mem;
203*4882a593Smuzhiyun 	int cur_node = dev_to_node(dev);
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	set_dev_node(dev, node);
206*4882a593Smuzhiyun 	mem = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
207*4882a593Smuzhiyun 	set_dev_node(dev, cur_node);
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	if (!mem)
210*4882a593Smuzhiyun 		mem = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	return mem;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun 
xgbe_init_ring(struct xgbe_prv_data * pdata,struct xgbe_ring * ring,unsigned int rdesc_count)215*4882a593Smuzhiyun static int xgbe_init_ring(struct xgbe_prv_data *pdata,
216*4882a593Smuzhiyun 			  struct xgbe_ring *ring, unsigned int rdesc_count)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun 	size_t size;
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	if (!ring)
221*4882a593Smuzhiyun 		return 0;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	/* Descriptors */
224*4882a593Smuzhiyun 	size = rdesc_count * sizeof(struct xgbe_ring_desc);
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	ring->rdesc_count = rdesc_count;
227*4882a593Smuzhiyun 	ring->rdesc = xgbe_dma_alloc_node(pdata->dev, size, &ring->rdesc_dma,
228*4882a593Smuzhiyun 					  ring->node);
229*4882a593Smuzhiyun 	if (!ring->rdesc)
230*4882a593Smuzhiyun 		return -ENOMEM;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	/* Descriptor information */
233*4882a593Smuzhiyun 	size = rdesc_count * sizeof(struct xgbe_ring_data);
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	ring->rdata = xgbe_alloc_node(size, ring->node);
236*4882a593Smuzhiyun 	if (!ring->rdata)
237*4882a593Smuzhiyun 		return -ENOMEM;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	netif_dbg(pdata, drv, pdata->netdev,
240*4882a593Smuzhiyun 		  "rdesc=%p, rdesc_dma=%pad, rdata=%p, node=%d\n",
241*4882a593Smuzhiyun 		  ring->rdesc, &ring->rdesc_dma, ring->rdata, ring->node);
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	return 0;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun 
xgbe_alloc_ring_resources(struct xgbe_prv_data * pdata)246*4882a593Smuzhiyun static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun 	struct xgbe_channel *channel;
249*4882a593Smuzhiyun 	unsigned int i;
250*4882a593Smuzhiyun 	int ret;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	for (i = 0; i < pdata->channel_count; i++) {
253*4882a593Smuzhiyun 		channel = pdata->channel[i];
254*4882a593Smuzhiyun 		netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
255*4882a593Smuzhiyun 			  channel->name);
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 		ret = xgbe_init_ring(pdata, channel->tx_ring,
258*4882a593Smuzhiyun 				     pdata->tx_desc_count);
259*4882a593Smuzhiyun 		if (ret) {
260*4882a593Smuzhiyun 			netdev_alert(pdata->netdev,
261*4882a593Smuzhiyun 				     "error initializing Tx ring\n");
262*4882a593Smuzhiyun 			goto err_ring;
263*4882a593Smuzhiyun 		}
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 		netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n",
266*4882a593Smuzhiyun 			  channel->name);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 		ret = xgbe_init_ring(pdata, channel->rx_ring,
269*4882a593Smuzhiyun 				     pdata->rx_desc_count);
270*4882a593Smuzhiyun 		if (ret) {
271*4882a593Smuzhiyun 			netdev_alert(pdata->netdev,
272*4882a593Smuzhiyun 				     "error initializing Rx ring\n");
273*4882a593Smuzhiyun 			goto err_ring;
274*4882a593Smuzhiyun 		}
275*4882a593Smuzhiyun 	}
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	return 0;
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun err_ring:
280*4882a593Smuzhiyun 	xgbe_free_ring_resources(pdata);
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	return ret;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun 
xgbe_alloc_pages(struct xgbe_prv_data * pdata,struct xgbe_page_alloc * pa,int alloc_order,int node)285*4882a593Smuzhiyun static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
286*4882a593Smuzhiyun 			    struct xgbe_page_alloc *pa, int alloc_order,
287*4882a593Smuzhiyun 			    int node)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun 	struct page *pages = NULL;
290*4882a593Smuzhiyun 	dma_addr_t pages_dma;
291*4882a593Smuzhiyun 	gfp_t gfp;
292*4882a593Smuzhiyun 	int order;
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun again:
295*4882a593Smuzhiyun 	order = alloc_order;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	/* Try to obtain pages, decreasing order if necessary */
298*4882a593Smuzhiyun 	gfp = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
299*4882a593Smuzhiyun 	while (order >= 0) {
300*4882a593Smuzhiyun 		pages = alloc_pages_node(node, gfp, order);
301*4882a593Smuzhiyun 		if (pages)
302*4882a593Smuzhiyun 			break;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 		order--;
305*4882a593Smuzhiyun 	}
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	/* If we couldn't get local pages, try getting from anywhere */
308*4882a593Smuzhiyun 	if (!pages && (node != NUMA_NO_NODE)) {
309*4882a593Smuzhiyun 		node = NUMA_NO_NODE;
310*4882a593Smuzhiyun 		goto again;
311*4882a593Smuzhiyun 	}
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	if (!pages)
314*4882a593Smuzhiyun 		return -ENOMEM;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	/* Map the pages */
317*4882a593Smuzhiyun 	pages_dma = dma_map_page(pdata->dev, pages, 0,
318*4882a593Smuzhiyun 				 PAGE_SIZE << order, DMA_FROM_DEVICE);
319*4882a593Smuzhiyun 	if (dma_mapping_error(pdata->dev, pages_dma)) {
320*4882a593Smuzhiyun 		put_page(pages);
321*4882a593Smuzhiyun 		return -ENOMEM;
322*4882a593Smuzhiyun 	}
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	pa->pages = pages;
325*4882a593Smuzhiyun 	pa->pages_len = PAGE_SIZE << order;
326*4882a593Smuzhiyun 	pa->pages_offset = 0;
327*4882a593Smuzhiyun 	pa->pages_dma = pages_dma;
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	return 0;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun 
xgbe_set_buffer_data(struct xgbe_buffer_data * bd,struct xgbe_page_alloc * pa,unsigned int len)332*4882a593Smuzhiyun static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
333*4882a593Smuzhiyun 				 struct xgbe_page_alloc *pa,
334*4882a593Smuzhiyun 				 unsigned int len)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun 	get_page(pa->pages);
337*4882a593Smuzhiyun 	bd->pa = *pa;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	bd->dma_base = pa->pages_dma;
340*4882a593Smuzhiyun 	bd->dma_off = pa->pages_offset;
341*4882a593Smuzhiyun 	bd->dma_len = len;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	pa->pages_offset += len;
344*4882a593Smuzhiyun 	if ((pa->pages_offset + len) > pa->pages_len) {
345*4882a593Smuzhiyun 		/* This data descriptor is responsible for unmapping page(s) */
346*4882a593Smuzhiyun 		bd->pa_unmap = *pa;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 		/* Get a new allocation next time */
349*4882a593Smuzhiyun 		pa->pages = NULL;
350*4882a593Smuzhiyun 		pa->pages_len = 0;
351*4882a593Smuzhiyun 		pa->pages_offset = 0;
352*4882a593Smuzhiyun 		pa->pages_dma = 0;
353*4882a593Smuzhiyun 	}
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun 
xgbe_map_rx_buffer(struct xgbe_prv_data * pdata,struct xgbe_ring * ring,struct xgbe_ring_data * rdata)356*4882a593Smuzhiyun static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
357*4882a593Smuzhiyun 			      struct xgbe_ring *ring,
358*4882a593Smuzhiyun 			      struct xgbe_ring_data *rdata)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun 	int ret;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	if (!ring->rx_hdr_pa.pages) {
363*4882a593Smuzhiyun 		ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, 0, ring->node);
364*4882a593Smuzhiyun 		if (ret)
365*4882a593Smuzhiyun 			return ret;
366*4882a593Smuzhiyun 	}
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	if (!ring->rx_buf_pa.pages) {
369*4882a593Smuzhiyun 		ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa,
370*4882a593Smuzhiyun 				       PAGE_ALLOC_COSTLY_ORDER, ring->node);
371*4882a593Smuzhiyun 		if (ret)
372*4882a593Smuzhiyun 			return ret;
373*4882a593Smuzhiyun 	}
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	/* Set up the header page info */
376*4882a593Smuzhiyun 	xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
377*4882a593Smuzhiyun 			     XGBE_SKB_ALLOC_SIZE);
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	/* Set up the buffer page info */
380*4882a593Smuzhiyun 	xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa,
381*4882a593Smuzhiyun 			     pdata->rx_buf_size);
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	return 0;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun 
xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data * pdata)386*4882a593Smuzhiyun static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
389*4882a593Smuzhiyun 	struct xgbe_channel *channel;
390*4882a593Smuzhiyun 	struct xgbe_ring *ring;
391*4882a593Smuzhiyun 	struct xgbe_ring_data *rdata;
392*4882a593Smuzhiyun 	struct xgbe_ring_desc *rdesc;
393*4882a593Smuzhiyun 	dma_addr_t rdesc_dma;
394*4882a593Smuzhiyun 	unsigned int i, j;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	DBGPR("-->xgbe_wrapper_tx_descriptor_init\n");
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	for (i = 0; i < pdata->channel_count; i++) {
399*4882a593Smuzhiyun 		channel = pdata->channel[i];
400*4882a593Smuzhiyun 		ring = channel->tx_ring;
401*4882a593Smuzhiyun 		if (!ring)
402*4882a593Smuzhiyun 			break;
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 		rdesc = ring->rdesc;
405*4882a593Smuzhiyun 		rdesc_dma = ring->rdesc_dma;
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 		for (j = 0; j < ring->rdesc_count; j++) {
408*4882a593Smuzhiyun 			rdata = XGBE_GET_DESC_DATA(ring, j);
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 			rdata->rdesc = rdesc;
411*4882a593Smuzhiyun 			rdata->rdesc_dma = rdesc_dma;
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 			rdesc++;
414*4882a593Smuzhiyun 			rdesc_dma += sizeof(struct xgbe_ring_desc);
415*4882a593Smuzhiyun 		}
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 		ring->cur = 0;
418*4882a593Smuzhiyun 		ring->dirty = 0;
419*4882a593Smuzhiyun 		memset(&ring->tx, 0, sizeof(ring->tx));
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 		hw_if->tx_desc_init(channel);
422*4882a593Smuzhiyun 	}
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	DBGPR("<--xgbe_wrapper_tx_descriptor_init\n");
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun 
xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data * pdata)427*4882a593Smuzhiyun static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
430*4882a593Smuzhiyun 	struct xgbe_channel *channel;
431*4882a593Smuzhiyun 	struct xgbe_ring *ring;
432*4882a593Smuzhiyun 	struct xgbe_ring_desc *rdesc;
433*4882a593Smuzhiyun 	struct xgbe_ring_data *rdata;
434*4882a593Smuzhiyun 	dma_addr_t rdesc_dma;
435*4882a593Smuzhiyun 	unsigned int i, j;
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	for (i = 0; i < pdata->channel_count; i++) {
440*4882a593Smuzhiyun 		channel = pdata->channel[i];
441*4882a593Smuzhiyun 		ring = channel->rx_ring;
442*4882a593Smuzhiyun 		if (!ring)
443*4882a593Smuzhiyun 			break;
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 		rdesc = ring->rdesc;
446*4882a593Smuzhiyun 		rdesc_dma = ring->rdesc_dma;
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 		for (j = 0; j < ring->rdesc_count; j++) {
449*4882a593Smuzhiyun 			rdata = XGBE_GET_DESC_DATA(ring, j);
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 			rdata->rdesc = rdesc;
452*4882a593Smuzhiyun 			rdata->rdesc_dma = rdesc_dma;
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 			if (xgbe_map_rx_buffer(pdata, ring, rdata))
455*4882a593Smuzhiyun 				break;
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 			rdesc++;
458*4882a593Smuzhiyun 			rdesc_dma += sizeof(struct xgbe_ring_desc);
459*4882a593Smuzhiyun 		}
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 		ring->cur = 0;
462*4882a593Smuzhiyun 		ring->dirty = 0;
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 		hw_if->rx_desc_init(channel);
465*4882a593Smuzhiyun 	}
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun 
xgbe_unmap_rdata(struct xgbe_prv_data * pdata,struct xgbe_ring_data * rdata)470*4882a593Smuzhiyun static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
471*4882a593Smuzhiyun 			     struct xgbe_ring_data *rdata)
472*4882a593Smuzhiyun {
473*4882a593Smuzhiyun 	if (rdata->skb_dma) {
474*4882a593Smuzhiyun 		if (rdata->mapped_as_page) {
475*4882a593Smuzhiyun 			dma_unmap_page(pdata->dev, rdata->skb_dma,
476*4882a593Smuzhiyun 				       rdata->skb_dma_len, DMA_TO_DEVICE);
477*4882a593Smuzhiyun 		} else {
478*4882a593Smuzhiyun 			dma_unmap_single(pdata->dev, rdata->skb_dma,
479*4882a593Smuzhiyun 					 rdata->skb_dma_len, DMA_TO_DEVICE);
480*4882a593Smuzhiyun 		}
481*4882a593Smuzhiyun 		rdata->skb_dma = 0;
482*4882a593Smuzhiyun 		rdata->skb_dma_len = 0;
483*4882a593Smuzhiyun 	}
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	if (rdata->skb) {
486*4882a593Smuzhiyun 		dev_kfree_skb_any(rdata->skb);
487*4882a593Smuzhiyun 		rdata->skb = NULL;
488*4882a593Smuzhiyun 	}
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	if (rdata->rx.hdr.pa.pages)
491*4882a593Smuzhiyun 		put_page(rdata->rx.hdr.pa.pages);
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	if (rdata->rx.hdr.pa_unmap.pages) {
494*4882a593Smuzhiyun 		dma_unmap_page(pdata->dev, rdata->rx.hdr.pa_unmap.pages_dma,
495*4882a593Smuzhiyun 			       rdata->rx.hdr.pa_unmap.pages_len,
496*4882a593Smuzhiyun 			       DMA_FROM_DEVICE);
497*4882a593Smuzhiyun 		put_page(rdata->rx.hdr.pa_unmap.pages);
498*4882a593Smuzhiyun 	}
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	if (rdata->rx.buf.pa.pages)
501*4882a593Smuzhiyun 		put_page(rdata->rx.buf.pa.pages);
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	if (rdata->rx.buf.pa_unmap.pages) {
504*4882a593Smuzhiyun 		dma_unmap_page(pdata->dev, rdata->rx.buf.pa_unmap.pages_dma,
505*4882a593Smuzhiyun 			       rdata->rx.buf.pa_unmap.pages_len,
506*4882a593Smuzhiyun 			       DMA_FROM_DEVICE);
507*4882a593Smuzhiyun 		put_page(rdata->rx.buf.pa_unmap.pages);
508*4882a593Smuzhiyun 	}
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	memset(&rdata->tx, 0, sizeof(rdata->tx));
511*4882a593Smuzhiyun 	memset(&rdata->rx, 0, sizeof(rdata->rx));
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	rdata->mapped_as_page = 0;
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	if (rdata->state_saved) {
516*4882a593Smuzhiyun 		rdata->state_saved = 0;
517*4882a593Smuzhiyun 		rdata->state.skb = NULL;
518*4882a593Smuzhiyun 		rdata->state.len = 0;
519*4882a593Smuzhiyun 		rdata->state.error = 0;
520*4882a593Smuzhiyun 	}
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun 
xgbe_map_tx_skb(struct xgbe_channel * channel,struct sk_buff * skb)523*4882a593Smuzhiyun static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun 	struct xgbe_prv_data *pdata = channel->pdata;
526*4882a593Smuzhiyun 	struct xgbe_ring *ring = channel->tx_ring;
527*4882a593Smuzhiyun 	struct xgbe_ring_data *rdata;
528*4882a593Smuzhiyun 	struct xgbe_packet_data *packet;
529*4882a593Smuzhiyun 	skb_frag_t *frag;
530*4882a593Smuzhiyun 	dma_addr_t skb_dma;
531*4882a593Smuzhiyun 	unsigned int start_index, cur_index;
532*4882a593Smuzhiyun 	unsigned int offset, tso, vlan, datalen, len;
533*4882a593Smuzhiyun 	unsigned int i;
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur);
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	offset = 0;
538*4882a593Smuzhiyun 	start_index = ring->cur;
539*4882a593Smuzhiyun 	cur_index = ring->cur;
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	packet = &ring->packet_data;
542*4882a593Smuzhiyun 	packet->rdesc_count = 0;
543*4882a593Smuzhiyun 	packet->length = 0;
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 	tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
546*4882a593Smuzhiyun 			     TSO_ENABLE);
547*4882a593Smuzhiyun 	vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
548*4882a593Smuzhiyun 			      VLAN_CTAG);
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	/* Save space for a context descriptor if needed */
551*4882a593Smuzhiyun 	if ((tso && (packet->mss != ring->tx.cur_mss)) ||
552*4882a593Smuzhiyun 	    (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)))
553*4882a593Smuzhiyun 		cur_index++;
554*4882a593Smuzhiyun 	rdata = XGBE_GET_DESC_DATA(ring, cur_index);
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	if (tso) {
557*4882a593Smuzhiyun 		/* Map the TSO header */
558*4882a593Smuzhiyun 		skb_dma = dma_map_single(pdata->dev, skb->data,
559*4882a593Smuzhiyun 					 packet->header_len, DMA_TO_DEVICE);
560*4882a593Smuzhiyun 		if (dma_mapping_error(pdata->dev, skb_dma)) {
561*4882a593Smuzhiyun 			netdev_alert(pdata->netdev, "dma_map_single failed\n");
562*4882a593Smuzhiyun 			goto err_out;
563*4882a593Smuzhiyun 		}
564*4882a593Smuzhiyun 		rdata->skb_dma = skb_dma;
565*4882a593Smuzhiyun 		rdata->skb_dma_len = packet->header_len;
566*4882a593Smuzhiyun 		netif_dbg(pdata, tx_queued, pdata->netdev,
567*4882a593Smuzhiyun 			  "skb header: index=%u, dma=%pad, len=%u\n",
568*4882a593Smuzhiyun 			  cur_index, &skb_dma, packet->header_len);
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 		offset = packet->header_len;
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 		packet->length += packet->header_len;
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 		cur_index++;
575*4882a593Smuzhiyun 		rdata = XGBE_GET_DESC_DATA(ring, cur_index);
576*4882a593Smuzhiyun 	}
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	/* Map the (remainder of the) packet */
579*4882a593Smuzhiyun 	for (datalen = skb_headlen(skb) - offset; datalen; ) {
580*4882a593Smuzhiyun 		len = min_t(unsigned int, datalen, XGBE_TX_MAX_BUF_SIZE);
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 		skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
583*4882a593Smuzhiyun 					 DMA_TO_DEVICE);
584*4882a593Smuzhiyun 		if (dma_mapping_error(pdata->dev, skb_dma)) {
585*4882a593Smuzhiyun 			netdev_alert(pdata->netdev, "dma_map_single failed\n");
586*4882a593Smuzhiyun 			goto err_out;
587*4882a593Smuzhiyun 		}
588*4882a593Smuzhiyun 		rdata->skb_dma = skb_dma;
589*4882a593Smuzhiyun 		rdata->skb_dma_len = len;
590*4882a593Smuzhiyun 		netif_dbg(pdata, tx_queued, pdata->netdev,
591*4882a593Smuzhiyun 			  "skb data: index=%u, dma=%pad, len=%u\n",
592*4882a593Smuzhiyun 			  cur_index, &skb_dma, len);
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 		datalen -= len;
595*4882a593Smuzhiyun 		offset += len;
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 		packet->length += len;
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 		cur_index++;
600*4882a593Smuzhiyun 		rdata = XGBE_GET_DESC_DATA(ring, cur_index);
601*4882a593Smuzhiyun 	}
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
604*4882a593Smuzhiyun 		netif_dbg(pdata, tx_queued, pdata->netdev,
605*4882a593Smuzhiyun 			  "mapping frag %u\n", i);
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 		frag = &skb_shinfo(skb)->frags[i];
608*4882a593Smuzhiyun 		offset = 0;
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 		for (datalen = skb_frag_size(frag); datalen; ) {
611*4882a593Smuzhiyun 			len = min_t(unsigned int, datalen,
612*4882a593Smuzhiyun 				    XGBE_TX_MAX_BUF_SIZE);
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 			skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
615*4882a593Smuzhiyun 						   len, DMA_TO_DEVICE);
616*4882a593Smuzhiyun 			if (dma_mapping_error(pdata->dev, skb_dma)) {
617*4882a593Smuzhiyun 				netdev_alert(pdata->netdev,
618*4882a593Smuzhiyun 					     "skb_frag_dma_map failed\n");
619*4882a593Smuzhiyun 				goto err_out;
620*4882a593Smuzhiyun 			}
621*4882a593Smuzhiyun 			rdata->skb_dma = skb_dma;
622*4882a593Smuzhiyun 			rdata->skb_dma_len = len;
623*4882a593Smuzhiyun 			rdata->mapped_as_page = 1;
624*4882a593Smuzhiyun 			netif_dbg(pdata, tx_queued, pdata->netdev,
625*4882a593Smuzhiyun 				  "skb frag: index=%u, dma=%pad, len=%u\n",
626*4882a593Smuzhiyun 				  cur_index, &skb_dma, len);
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 			datalen -= len;
629*4882a593Smuzhiyun 			offset += len;
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 			packet->length += len;
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 			cur_index++;
634*4882a593Smuzhiyun 			rdata = XGBE_GET_DESC_DATA(ring, cur_index);
635*4882a593Smuzhiyun 		}
636*4882a593Smuzhiyun 	}
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 	/* Save the skb address in the last entry. We always have some data
639*4882a593Smuzhiyun 	 * that has been mapped so rdata is always advanced past the last
640*4882a593Smuzhiyun 	 * piece of mapped data - use the entry pointed to by cur_index - 1.
641*4882a593Smuzhiyun 	 */
642*4882a593Smuzhiyun 	rdata = XGBE_GET_DESC_DATA(ring, cur_index - 1);
643*4882a593Smuzhiyun 	rdata->skb = skb;
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	/* Save the number of descriptor entries used */
646*4882a593Smuzhiyun 	packet->rdesc_count = cur_index - start_index;
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun 	DBGPR("<--xgbe_map_tx_skb: count=%u\n", packet->rdesc_count);
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 	return packet->rdesc_count;
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun err_out:
653*4882a593Smuzhiyun 	while (start_index < cur_index) {
654*4882a593Smuzhiyun 		rdata = XGBE_GET_DESC_DATA(ring, start_index++);
655*4882a593Smuzhiyun 		xgbe_unmap_rdata(pdata, rdata);
656*4882a593Smuzhiyun 	}
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 	DBGPR("<--xgbe_map_tx_skb: count=0\n");
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	return 0;
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun 
xgbe_init_function_ptrs_desc(struct xgbe_desc_if * desc_if)663*4882a593Smuzhiyun void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
664*4882a593Smuzhiyun {
665*4882a593Smuzhiyun 	DBGPR("-->xgbe_init_function_ptrs_desc\n");
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
668*4882a593Smuzhiyun 	desc_if->free_ring_resources = xgbe_free_ring_resources;
669*4882a593Smuzhiyun 	desc_if->map_tx_skb = xgbe_map_tx_skb;
670*4882a593Smuzhiyun 	desc_if->map_rx_buffer = xgbe_map_rx_buffer;
671*4882a593Smuzhiyun 	desc_if->unmap_rdata = xgbe_unmap_rdata;
672*4882a593Smuzhiyun 	desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
673*4882a593Smuzhiyun 	desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	DBGPR("<--xgbe_init_function_ptrs_desc\n");
676*4882a593Smuzhiyun }
677