1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * IBM Power Virtual Ethernet Device Driver
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) IBM Corporation, 2003, 2010
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Authors: Dave Larson <larson1@us.ibm.com>
8*4882a593Smuzhiyun * Santiago Leon <santil@linux.vnet.ibm.com>
9*4882a593Smuzhiyun * Brian King <brking@linux.vnet.ibm.com>
10*4882a593Smuzhiyun * Robert Jennings <rcj@linux.vnet.ibm.com>
11*4882a593Smuzhiyun * Anton Blanchard <anton@au.ibm.com>
12*4882a593Smuzhiyun */
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #include <linux/module.h>
15*4882a593Smuzhiyun #include <linux/types.h>
16*4882a593Smuzhiyun #include <linux/errno.h>
17*4882a593Smuzhiyun #include <linux/dma-mapping.h>
18*4882a593Smuzhiyun #include <linux/kernel.h>
19*4882a593Smuzhiyun #include <linux/netdevice.h>
20*4882a593Smuzhiyun #include <linux/etherdevice.h>
21*4882a593Smuzhiyun #include <linux/skbuff.h>
22*4882a593Smuzhiyun #include <linux/init.h>
23*4882a593Smuzhiyun #include <linux/interrupt.h>
24*4882a593Smuzhiyun #include <linux/mm.h>
25*4882a593Smuzhiyun #include <linux/pm.h>
26*4882a593Smuzhiyun #include <linux/ethtool.h>
27*4882a593Smuzhiyun #include <linux/in.h>
28*4882a593Smuzhiyun #include <linux/ip.h>
29*4882a593Smuzhiyun #include <linux/ipv6.h>
30*4882a593Smuzhiyun #include <linux/slab.h>
31*4882a593Smuzhiyun #include <asm/hvcall.h>
32*4882a593Smuzhiyun #include <linux/atomic.h>
33*4882a593Smuzhiyun #include <asm/vio.h>
34*4882a593Smuzhiyun #include <asm/iommu.h>
35*4882a593Smuzhiyun #include <asm/firmware.h>
36*4882a593Smuzhiyun #include <net/tcp.h>
37*4882a593Smuzhiyun #include <net/ip6_checksum.h>
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun #include "ibmveth.h"
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
42*4882a593Smuzhiyun static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
43*4882a593Smuzhiyun static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun static struct kobj_type ktype_veth_pool;
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun static const char ibmveth_driver_name[] = "ibmveth";
49*4882a593Smuzhiyun static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
50*4882a593Smuzhiyun #define ibmveth_driver_version "1.06"
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
53*4882a593Smuzhiyun MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
54*4882a593Smuzhiyun MODULE_LICENSE("GPL");
55*4882a593Smuzhiyun MODULE_VERSION(ibmveth_driver_version);
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun static unsigned int tx_copybreak __read_mostly = 128;
58*4882a593Smuzhiyun module_param(tx_copybreak, uint, 0644);
59*4882a593Smuzhiyun MODULE_PARM_DESC(tx_copybreak,
60*4882a593Smuzhiyun "Maximum size of packet that is copied to a new buffer on transmit");
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun static unsigned int rx_copybreak __read_mostly = 128;
63*4882a593Smuzhiyun module_param(rx_copybreak, uint, 0644);
64*4882a593Smuzhiyun MODULE_PARM_DESC(rx_copybreak,
65*4882a593Smuzhiyun "Maximum size of packet that is copied to a new buffer on receive");
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun static unsigned int rx_flush __read_mostly = 0;
68*4882a593Smuzhiyun module_param(rx_flush, uint, 0644);
69*4882a593Smuzhiyun MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun static bool old_large_send __read_mostly;
72*4882a593Smuzhiyun module_param(old_large_send, bool, 0444);
73*4882a593Smuzhiyun MODULE_PARM_DESC(old_large_send,
74*4882a593Smuzhiyun "Use old large send method on firmware that supports the new method");
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun struct ibmveth_stat {
77*4882a593Smuzhiyun char name[ETH_GSTRING_LEN];
78*4882a593Smuzhiyun int offset;
79*4882a593Smuzhiyun };
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun #define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
82*4882a593Smuzhiyun #define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun static struct ibmveth_stat ibmveth_stats[] = {
85*4882a593Smuzhiyun { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
86*4882a593Smuzhiyun { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
87*4882a593Smuzhiyun { "replenish_add_buff_failure",
88*4882a593Smuzhiyun IBMVETH_STAT_OFF(replenish_add_buff_failure) },
89*4882a593Smuzhiyun { "replenish_add_buff_success",
90*4882a593Smuzhiyun IBMVETH_STAT_OFF(replenish_add_buff_success) },
91*4882a593Smuzhiyun { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
92*4882a593Smuzhiyun { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
93*4882a593Smuzhiyun { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
94*4882a593Smuzhiyun { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
95*4882a593Smuzhiyun { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
96*4882a593Smuzhiyun { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
97*4882a593Smuzhiyun { "tx_large_packets", IBMVETH_STAT_OFF(tx_large_packets) },
98*4882a593Smuzhiyun { "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) },
99*4882a593Smuzhiyun { "fw_enabled_large_send", IBMVETH_STAT_OFF(fw_large_send_support) }
100*4882a593Smuzhiyun };
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun /* simple methods of getting data from the current rxq entry */
ibmveth_rxq_flags(struct ibmveth_adapter * adapter)103*4882a593Smuzhiyun static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off);
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun
ibmveth_rxq_toggle(struct ibmveth_adapter * adapter)108*4882a593Smuzhiyun static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >>
111*4882a593Smuzhiyun IBMVETH_RXQ_TOGGLE_SHIFT;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
ibmveth_rxq_pending_buffer(struct ibmveth_adapter * adapter)114*4882a593Smuzhiyun static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun
ibmveth_rxq_buffer_valid(struct ibmveth_adapter * adapter)119*4882a593Smuzhiyun static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
ibmveth_rxq_frame_offset(struct ibmveth_adapter * adapter)124*4882a593Smuzhiyun static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
ibmveth_rxq_large_packet(struct ibmveth_adapter * adapter)129*4882a593Smuzhiyun static inline int ibmveth_rxq_large_packet(struct ibmveth_adapter *adapter)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_LRG_PKT;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
ibmveth_rxq_frame_length(struct ibmveth_adapter * adapter)134*4882a593Smuzhiyun static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
ibmveth_rxq_csum_good(struct ibmveth_adapter * adapter)139*4882a593Smuzhiyun static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun /* setup the initial settings for a buffer pool */
ibmveth_init_buffer_pool(struct ibmveth_buff_pool * pool,u32 pool_index,u32 pool_size,u32 buff_size,u32 pool_active)145*4882a593Smuzhiyun static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
146*4882a593Smuzhiyun u32 pool_index, u32 pool_size,
147*4882a593Smuzhiyun u32 buff_size, u32 pool_active)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun pool->size = pool_size;
150*4882a593Smuzhiyun pool->index = pool_index;
151*4882a593Smuzhiyun pool->buff_size = buff_size;
152*4882a593Smuzhiyun pool->threshold = pool_size * 7 / 8;
153*4882a593Smuzhiyun pool->active = pool_active;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun /* allocate and setup an buffer pool - called during open */
ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool * pool)157*4882a593Smuzhiyun static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun int i;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun pool->free_map = kmalloc_array(pool->size, sizeof(u16), GFP_KERNEL);
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun if (!pool->free_map)
164*4882a593Smuzhiyun return -1;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun pool->dma_addr = kcalloc(pool->size, sizeof(dma_addr_t), GFP_KERNEL);
167*4882a593Smuzhiyun if (!pool->dma_addr) {
168*4882a593Smuzhiyun kfree(pool->free_map);
169*4882a593Smuzhiyun pool->free_map = NULL;
170*4882a593Smuzhiyun return -1;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun if (!pool->skbuff) {
176*4882a593Smuzhiyun kfree(pool->dma_addr);
177*4882a593Smuzhiyun pool->dma_addr = NULL;
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun kfree(pool->free_map);
180*4882a593Smuzhiyun pool->free_map = NULL;
181*4882a593Smuzhiyun return -1;
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun for (i = 0; i < pool->size; ++i)
185*4882a593Smuzhiyun pool->free_map[i] = i;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun atomic_set(&pool->available, 0);
188*4882a593Smuzhiyun pool->producer_index = 0;
189*4882a593Smuzhiyun pool->consumer_index = 0;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun return 0;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
ibmveth_flush_buffer(void * addr,unsigned long length)194*4882a593Smuzhiyun static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun unsigned long offset;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
199*4882a593Smuzhiyun asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun /* replenish the buffers for a pool. note that we don't need to
203*4882a593Smuzhiyun * skb_reserve these since they are used for incoming...
204*4882a593Smuzhiyun */
ibmveth_replenish_buffer_pool(struct ibmveth_adapter * adapter,struct ibmveth_buff_pool * pool)205*4882a593Smuzhiyun static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
206*4882a593Smuzhiyun struct ibmveth_buff_pool *pool)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun u32 i;
209*4882a593Smuzhiyun u32 count = pool->size - atomic_read(&pool->available);
210*4882a593Smuzhiyun u32 buffers_added = 0;
211*4882a593Smuzhiyun struct sk_buff *skb;
212*4882a593Smuzhiyun unsigned int free_index, index;
213*4882a593Smuzhiyun u64 correlator;
214*4882a593Smuzhiyun unsigned long lpar_rc;
215*4882a593Smuzhiyun dma_addr_t dma_addr;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun mb();
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun for (i = 0; i < count; ++i) {
220*4882a593Smuzhiyun union ibmveth_buf_desc desc;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun if (!skb) {
225*4882a593Smuzhiyun netdev_dbg(adapter->netdev,
226*4882a593Smuzhiyun "replenish: unable to allocate skb\n");
227*4882a593Smuzhiyun adapter->replenish_no_mem++;
228*4882a593Smuzhiyun break;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun free_index = pool->consumer_index;
232*4882a593Smuzhiyun pool->consumer_index++;
233*4882a593Smuzhiyun if (pool->consumer_index >= pool->size)
234*4882a593Smuzhiyun pool->consumer_index = 0;
235*4882a593Smuzhiyun index = pool->free_map[free_index];
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun BUG_ON(index == IBM_VETH_INVALID_MAP);
238*4882a593Smuzhiyun BUG_ON(pool->skbuff[index] != NULL);
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
241*4882a593Smuzhiyun pool->buff_size, DMA_FROM_DEVICE);
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
244*4882a593Smuzhiyun goto failure;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
247*4882a593Smuzhiyun pool->dma_addr[index] = dma_addr;
248*4882a593Smuzhiyun pool->skbuff[index] = skb;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun correlator = ((u64)pool->index << 32) | index;
251*4882a593Smuzhiyun *(u64 *)skb->data = correlator;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
254*4882a593Smuzhiyun desc.fields.address = dma_addr;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun if (rx_flush) {
257*4882a593Smuzhiyun unsigned int len = min(pool->buff_size,
258*4882a593Smuzhiyun adapter->netdev->mtu +
259*4882a593Smuzhiyun IBMVETH_BUFF_OH);
260*4882a593Smuzhiyun ibmveth_flush_buffer(skb->data, len);
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
263*4882a593Smuzhiyun desc.desc);
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun if (lpar_rc != H_SUCCESS) {
266*4882a593Smuzhiyun goto failure;
267*4882a593Smuzhiyun } else {
268*4882a593Smuzhiyun buffers_added++;
269*4882a593Smuzhiyun adapter->replenish_add_buff_success++;
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun mb();
274*4882a593Smuzhiyun atomic_add(buffers_added, &(pool->available));
275*4882a593Smuzhiyun return;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun failure:
278*4882a593Smuzhiyun pool->free_map[free_index] = index;
279*4882a593Smuzhiyun pool->skbuff[index] = NULL;
280*4882a593Smuzhiyun if (pool->consumer_index == 0)
281*4882a593Smuzhiyun pool->consumer_index = pool->size - 1;
282*4882a593Smuzhiyun else
283*4882a593Smuzhiyun pool->consumer_index--;
284*4882a593Smuzhiyun if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
285*4882a593Smuzhiyun dma_unmap_single(&adapter->vdev->dev,
286*4882a593Smuzhiyun pool->dma_addr[index], pool->buff_size,
287*4882a593Smuzhiyun DMA_FROM_DEVICE);
288*4882a593Smuzhiyun dev_kfree_skb_any(skb);
289*4882a593Smuzhiyun adapter->replenish_add_buff_failure++;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun mb();
292*4882a593Smuzhiyun atomic_add(buffers_added, &(pool->available));
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun /*
296*4882a593Smuzhiyun * The final 8 bytes of the buffer list is a counter of frames dropped
297*4882a593Smuzhiyun * because there was not a buffer in the buffer list capable of holding
298*4882a593Smuzhiyun * the frame.
299*4882a593Smuzhiyun */
ibmveth_update_rx_no_buffer(struct ibmveth_adapter * adapter)300*4882a593Smuzhiyun static void ibmveth_update_rx_no_buffer(struct ibmveth_adapter *adapter)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun __be64 *p = adapter->buffer_list_addr + 4096 - 8;
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun adapter->rx_no_buffer = be64_to_cpup(p);
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun /* replenish routine */
ibmveth_replenish_task(struct ibmveth_adapter * adapter)308*4882a593Smuzhiyun static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun int i;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun adapter->replenish_task_cycles++;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) {
315*4882a593Smuzhiyun struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun if (pool->active &&
318*4882a593Smuzhiyun (atomic_read(&pool->available) < pool->threshold))
319*4882a593Smuzhiyun ibmveth_replenish_buffer_pool(adapter, pool);
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun ibmveth_update_rx_no_buffer(adapter);
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun /* empty and free ana buffer pool - also used to do cleanup in error paths */
ibmveth_free_buffer_pool(struct ibmveth_adapter * adapter,struct ibmveth_buff_pool * pool)326*4882a593Smuzhiyun static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
327*4882a593Smuzhiyun struct ibmveth_buff_pool *pool)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun int i;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun kfree(pool->free_map);
332*4882a593Smuzhiyun pool->free_map = NULL;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun if (pool->skbuff && pool->dma_addr) {
335*4882a593Smuzhiyun for (i = 0; i < pool->size; ++i) {
336*4882a593Smuzhiyun struct sk_buff *skb = pool->skbuff[i];
337*4882a593Smuzhiyun if (skb) {
338*4882a593Smuzhiyun dma_unmap_single(&adapter->vdev->dev,
339*4882a593Smuzhiyun pool->dma_addr[i],
340*4882a593Smuzhiyun pool->buff_size,
341*4882a593Smuzhiyun DMA_FROM_DEVICE);
342*4882a593Smuzhiyun dev_kfree_skb_any(skb);
343*4882a593Smuzhiyun pool->skbuff[i] = NULL;
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun if (pool->dma_addr) {
349*4882a593Smuzhiyun kfree(pool->dma_addr);
350*4882a593Smuzhiyun pool->dma_addr = NULL;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun if (pool->skbuff) {
354*4882a593Smuzhiyun kfree(pool->skbuff);
355*4882a593Smuzhiyun pool->skbuff = NULL;
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun /* remove a buffer from a pool */
ibmveth_remove_buffer_from_pool(struct ibmveth_adapter * adapter,u64 correlator)360*4882a593Smuzhiyun static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
361*4882a593Smuzhiyun u64 correlator)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun unsigned int pool = correlator >> 32;
364*4882a593Smuzhiyun unsigned int index = correlator & 0xffffffffUL;
365*4882a593Smuzhiyun unsigned int free_index;
366*4882a593Smuzhiyun struct sk_buff *skb;
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
369*4882a593Smuzhiyun BUG_ON(index >= adapter->rx_buff_pool[pool].size);
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun skb = adapter->rx_buff_pool[pool].skbuff[index];
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun BUG_ON(skb == NULL);
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun adapter->rx_buff_pool[pool].skbuff[index] = NULL;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun dma_unmap_single(&adapter->vdev->dev,
378*4882a593Smuzhiyun adapter->rx_buff_pool[pool].dma_addr[index],
379*4882a593Smuzhiyun adapter->rx_buff_pool[pool].buff_size,
380*4882a593Smuzhiyun DMA_FROM_DEVICE);
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun free_index = adapter->rx_buff_pool[pool].producer_index;
383*4882a593Smuzhiyun adapter->rx_buff_pool[pool].producer_index++;
384*4882a593Smuzhiyun if (adapter->rx_buff_pool[pool].producer_index >=
385*4882a593Smuzhiyun adapter->rx_buff_pool[pool].size)
386*4882a593Smuzhiyun adapter->rx_buff_pool[pool].producer_index = 0;
387*4882a593Smuzhiyun adapter->rx_buff_pool[pool].free_map[free_index] = index;
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun mb();
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun atomic_dec(&(adapter->rx_buff_pool[pool].available));
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun /* get the current buffer on the rx queue */
ibmveth_rxq_get_buffer(struct ibmveth_adapter * adapter)395*4882a593Smuzhiyun static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
398*4882a593Smuzhiyun unsigned int pool = correlator >> 32;
399*4882a593Smuzhiyun unsigned int index = correlator & 0xffffffffUL;
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
402*4882a593Smuzhiyun BUG_ON(index >= adapter->rx_buff_pool[pool].size);
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun return adapter->rx_buff_pool[pool].skbuff[index];
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun /* recycle the current buffer on the rx queue */
ibmveth_rxq_recycle_buffer(struct ibmveth_adapter * adapter)408*4882a593Smuzhiyun static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun u32 q_index = adapter->rx_queue.index;
411*4882a593Smuzhiyun u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
412*4882a593Smuzhiyun unsigned int pool = correlator >> 32;
413*4882a593Smuzhiyun unsigned int index = correlator & 0xffffffffUL;
414*4882a593Smuzhiyun union ibmveth_buf_desc desc;
415*4882a593Smuzhiyun unsigned long lpar_rc;
416*4882a593Smuzhiyun int ret = 1;
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
419*4882a593Smuzhiyun BUG_ON(index >= adapter->rx_buff_pool[pool].size);
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun if (!adapter->rx_buff_pool[pool].active) {
422*4882a593Smuzhiyun ibmveth_rxq_harvest_buffer(adapter);
423*4882a593Smuzhiyun ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
424*4882a593Smuzhiyun goto out;
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun desc.fields.flags_len = IBMVETH_BUF_VALID |
428*4882a593Smuzhiyun adapter->rx_buff_pool[pool].buff_size;
429*4882a593Smuzhiyun desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun if (lpar_rc != H_SUCCESS) {
434*4882a593Smuzhiyun netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
435*4882a593Smuzhiyun "during recycle rc=%ld", lpar_rc);
436*4882a593Smuzhiyun ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
437*4882a593Smuzhiyun ret = 0;
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
441*4882a593Smuzhiyun adapter->rx_queue.index = 0;
442*4882a593Smuzhiyun adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun out:
446*4882a593Smuzhiyun return ret;
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun
ibmveth_rxq_harvest_buffer(struct ibmveth_adapter * adapter)449*4882a593Smuzhiyun static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
450*4882a593Smuzhiyun {
451*4882a593Smuzhiyun ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
454*4882a593Smuzhiyun adapter->rx_queue.index = 0;
455*4882a593Smuzhiyun adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun
ibmveth_register_logical_lan(struct ibmveth_adapter * adapter,union ibmveth_buf_desc rxq_desc,u64 mac_address)459*4882a593Smuzhiyun static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
460*4882a593Smuzhiyun union ibmveth_buf_desc rxq_desc, u64 mac_address)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun int rc, try_again = 1;
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun /*
465*4882a593Smuzhiyun * After a kexec the adapter will still be open, so our attempt to
466*4882a593Smuzhiyun * open it will fail. So if we get a failure we free the adapter and
467*4882a593Smuzhiyun * try again, but only once.
468*4882a593Smuzhiyun */
469*4882a593Smuzhiyun retry:
470*4882a593Smuzhiyun rc = h_register_logical_lan(adapter->vdev->unit_address,
471*4882a593Smuzhiyun adapter->buffer_list_dma, rxq_desc.desc,
472*4882a593Smuzhiyun adapter->filter_list_dma, mac_address);
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun if (rc != H_SUCCESS && try_again) {
475*4882a593Smuzhiyun do {
476*4882a593Smuzhiyun rc = h_free_logical_lan(adapter->vdev->unit_address);
477*4882a593Smuzhiyun } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun try_again = 0;
480*4882a593Smuzhiyun goto retry;
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun return rc;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun
ibmveth_encode_mac_addr(u8 * mac)486*4882a593Smuzhiyun static u64 ibmveth_encode_mac_addr(u8 *mac)
487*4882a593Smuzhiyun {
488*4882a593Smuzhiyun int i;
489*4882a593Smuzhiyun u64 encoded = 0;
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun for (i = 0; i < ETH_ALEN; i++)
492*4882a593Smuzhiyun encoded = (encoded << 8) | mac[i];
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun return encoded;
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
ibmveth_open(struct net_device * netdev)497*4882a593Smuzhiyun static int ibmveth_open(struct net_device *netdev)
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun struct ibmveth_adapter *adapter = netdev_priv(netdev);
500*4882a593Smuzhiyun u64 mac_address;
501*4882a593Smuzhiyun int rxq_entries = 1;
502*4882a593Smuzhiyun unsigned long lpar_rc;
503*4882a593Smuzhiyun int rc;
504*4882a593Smuzhiyun union ibmveth_buf_desc rxq_desc;
505*4882a593Smuzhiyun int i;
506*4882a593Smuzhiyun struct device *dev;
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun netdev_dbg(netdev, "open starting\n");
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun napi_enable(&adapter->napi);
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
513*4882a593Smuzhiyun rxq_entries += adapter->rx_buff_pool[i].size;
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun rc = -ENOMEM;
516*4882a593Smuzhiyun adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
517*4882a593Smuzhiyun if (!adapter->buffer_list_addr) {
518*4882a593Smuzhiyun netdev_err(netdev, "unable to allocate list pages\n");
519*4882a593Smuzhiyun goto out;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
523*4882a593Smuzhiyun if (!adapter->filter_list_addr) {
524*4882a593Smuzhiyun netdev_err(netdev, "unable to allocate filter pages\n");
525*4882a593Smuzhiyun goto out_free_buffer_list;
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun dev = &adapter->vdev->dev;
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
531*4882a593Smuzhiyun rxq_entries;
532*4882a593Smuzhiyun adapter->rx_queue.queue_addr =
533*4882a593Smuzhiyun dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
534*4882a593Smuzhiyun &adapter->rx_queue.queue_dma, GFP_KERNEL);
535*4882a593Smuzhiyun if (!adapter->rx_queue.queue_addr)
536*4882a593Smuzhiyun goto out_free_filter_list;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun adapter->buffer_list_dma = dma_map_single(dev,
539*4882a593Smuzhiyun adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
540*4882a593Smuzhiyun if (dma_mapping_error(dev, adapter->buffer_list_dma)) {
541*4882a593Smuzhiyun netdev_err(netdev, "unable to map buffer list pages\n");
542*4882a593Smuzhiyun goto out_free_queue_mem;
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun adapter->filter_list_dma = dma_map_single(dev,
546*4882a593Smuzhiyun adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
547*4882a593Smuzhiyun if (dma_mapping_error(dev, adapter->filter_list_dma)) {
548*4882a593Smuzhiyun netdev_err(netdev, "unable to map filter list pages\n");
549*4882a593Smuzhiyun goto out_unmap_buffer_list;
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun adapter->rx_queue.index = 0;
553*4882a593Smuzhiyun adapter->rx_queue.num_slots = rxq_entries;
554*4882a593Smuzhiyun adapter->rx_queue.toggle = 1;
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun mac_address = ibmveth_encode_mac_addr(netdev->dev_addr);
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
559*4882a593Smuzhiyun adapter->rx_queue.queue_len;
560*4882a593Smuzhiyun rxq_desc.fields.address = adapter->rx_queue.queue_dma;
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr);
563*4882a593Smuzhiyun netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr);
564*4882a593Smuzhiyun netdev_dbg(netdev, "receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun if (lpar_rc != H_SUCCESS) {
571*4882a593Smuzhiyun netdev_err(netdev, "h_register_logical_lan failed with %ld\n",
572*4882a593Smuzhiyun lpar_rc);
573*4882a593Smuzhiyun netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
574*4882a593Smuzhiyun "desc:0x%llx MAC:0x%llx\n",
575*4882a593Smuzhiyun adapter->buffer_list_dma,
576*4882a593Smuzhiyun adapter->filter_list_dma,
577*4882a593Smuzhiyun rxq_desc.desc,
578*4882a593Smuzhiyun mac_address);
579*4882a593Smuzhiyun rc = -ENONET;
580*4882a593Smuzhiyun goto out_unmap_filter_list;
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
584*4882a593Smuzhiyun if (!adapter->rx_buff_pool[i].active)
585*4882a593Smuzhiyun continue;
586*4882a593Smuzhiyun if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
587*4882a593Smuzhiyun netdev_err(netdev, "unable to alloc pool\n");
588*4882a593Smuzhiyun adapter->rx_buff_pool[i].active = 0;
589*4882a593Smuzhiyun rc = -ENOMEM;
590*4882a593Smuzhiyun goto out_free_buffer_pools;
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq);
595*4882a593Smuzhiyun rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name,
596*4882a593Smuzhiyun netdev);
597*4882a593Smuzhiyun if (rc != 0) {
598*4882a593Smuzhiyun netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
599*4882a593Smuzhiyun netdev->irq, rc);
600*4882a593Smuzhiyun do {
601*4882a593Smuzhiyun lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
602*4882a593Smuzhiyun } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun goto out_free_buffer_pools;
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun rc = -ENOMEM;
608*4882a593Smuzhiyun adapter->bounce_buffer =
609*4882a593Smuzhiyun kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
610*4882a593Smuzhiyun if (!adapter->bounce_buffer)
611*4882a593Smuzhiyun goto out_free_irq;
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun adapter->bounce_buffer_dma =
614*4882a593Smuzhiyun dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
615*4882a593Smuzhiyun netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
616*4882a593Smuzhiyun if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
617*4882a593Smuzhiyun netdev_err(netdev, "unable to map bounce buffer\n");
618*4882a593Smuzhiyun goto out_free_bounce_buffer;
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun netdev_dbg(netdev, "initial replenish cycle\n");
622*4882a593Smuzhiyun ibmveth_interrupt(netdev->irq, netdev);
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun netif_start_queue(netdev);
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun netdev_dbg(netdev, "open complete\n");
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun return 0;
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun out_free_bounce_buffer:
631*4882a593Smuzhiyun kfree(adapter->bounce_buffer);
632*4882a593Smuzhiyun out_free_irq:
633*4882a593Smuzhiyun free_irq(netdev->irq, netdev);
634*4882a593Smuzhiyun out_free_buffer_pools:
635*4882a593Smuzhiyun while (--i >= 0) {
636*4882a593Smuzhiyun if (adapter->rx_buff_pool[i].active)
637*4882a593Smuzhiyun ibmveth_free_buffer_pool(adapter,
638*4882a593Smuzhiyun &adapter->rx_buff_pool[i]);
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun out_unmap_filter_list:
641*4882a593Smuzhiyun dma_unmap_single(dev, adapter->filter_list_dma, 4096,
642*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
643*4882a593Smuzhiyun out_unmap_buffer_list:
644*4882a593Smuzhiyun dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
645*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
646*4882a593Smuzhiyun out_free_queue_mem:
647*4882a593Smuzhiyun dma_free_coherent(dev, adapter->rx_queue.queue_len,
648*4882a593Smuzhiyun adapter->rx_queue.queue_addr,
649*4882a593Smuzhiyun adapter->rx_queue.queue_dma);
650*4882a593Smuzhiyun out_free_filter_list:
651*4882a593Smuzhiyun free_page((unsigned long)adapter->filter_list_addr);
652*4882a593Smuzhiyun out_free_buffer_list:
653*4882a593Smuzhiyun free_page((unsigned long)adapter->buffer_list_addr);
654*4882a593Smuzhiyun out:
655*4882a593Smuzhiyun napi_disable(&adapter->napi);
656*4882a593Smuzhiyun return rc;
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun
ibmveth_close(struct net_device * netdev)659*4882a593Smuzhiyun static int ibmveth_close(struct net_device *netdev)
660*4882a593Smuzhiyun {
661*4882a593Smuzhiyun struct ibmveth_adapter *adapter = netdev_priv(netdev);
662*4882a593Smuzhiyun struct device *dev = &adapter->vdev->dev;
663*4882a593Smuzhiyun long lpar_rc;
664*4882a593Smuzhiyun int i;
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun netdev_dbg(netdev, "close starting\n");
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun napi_disable(&adapter->napi);
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun if (!adapter->pool_config)
671*4882a593Smuzhiyun netif_stop_queue(netdev);
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun do {
676*4882a593Smuzhiyun lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
677*4882a593Smuzhiyun } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun if (lpar_rc != H_SUCCESS) {
680*4882a593Smuzhiyun netdev_err(netdev, "h_free_logical_lan failed with %lx, "
681*4882a593Smuzhiyun "continuing with close\n", lpar_rc);
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun free_irq(netdev->irq, netdev);
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun ibmveth_update_rx_no_buffer(adapter);
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
689*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
690*4882a593Smuzhiyun free_page((unsigned long)adapter->buffer_list_addr);
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun dma_unmap_single(dev, adapter->filter_list_dma, 4096,
693*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
694*4882a593Smuzhiyun free_page((unsigned long)adapter->filter_list_addr);
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun dma_free_coherent(dev, adapter->rx_queue.queue_len,
697*4882a593Smuzhiyun adapter->rx_queue.queue_addr,
698*4882a593Smuzhiyun adapter->rx_queue.queue_dma);
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
701*4882a593Smuzhiyun if (adapter->rx_buff_pool[i].active)
702*4882a593Smuzhiyun ibmveth_free_buffer_pool(adapter,
703*4882a593Smuzhiyun &adapter->rx_buff_pool[i]);
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun dma_unmap_single(&adapter->vdev->dev, adapter->bounce_buffer_dma,
706*4882a593Smuzhiyun adapter->netdev->mtu + IBMVETH_BUFF_OH,
707*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
708*4882a593Smuzhiyun kfree(adapter->bounce_buffer);
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun netdev_dbg(netdev, "close complete\n");
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun return 0;
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun
ibmveth_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)715*4882a593Smuzhiyun static int ibmveth_set_link_ksettings(struct net_device *dev,
716*4882a593Smuzhiyun const struct ethtool_link_ksettings *cmd)
717*4882a593Smuzhiyun {
718*4882a593Smuzhiyun struct ibmveth_adapter *adapter = netdev_priv(dev);
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun return ethtool_virtdev_set_link_ksettings(dev, cmd,
721*4882a593Smuzhiyun &adapter->speed,
722*4882a593Smuzhiyun &adapter->duplex);
723*4882a593Smuzhiyun }
724*4882a593Smuzhiyun
ibmveth_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)725*4882a593Smuzhiyun static int ibmveth_get_link_ksettings(struct net_device *dev,
726*4882a593Smuzhiyun struct ethtool_link_ksettings *cmd)
727*4882a593Smuzhiyun {
728*4882a593Smuzhiyun struct ibmveth_adapter *adapter = netdev_priv(dev);
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun cmd->base.speed = adapter->speed;
731*4882a593Smuzhiyun cmd->base.duplex = adapter->duplex;
732*4882a593Smuzhiyun cmd->base.port = PORT_OTHER;
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun return 0;
735*4882a593Smuzhiyun }
736*4882a593Smuzhiyun
ibmveth_init_link_settings(struct net_device * dev)737*4882a593Smuzhiyun static void ibmveth_init_link_settings(struct net_device *dev)
738*4882a593Smuzhiyun {
739*4882a593Smuzhiyun struct ibmveth_adapter *adapter = netdev_priv(dev);
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun adapter->speed = SPEED_1000;
742*4882a593Smuzhiyun adapter->duplex = DUPLEX_FULL;
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun
netdev_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)745*4882a593Smuzhiyun static void netdev_get_drvinfo(struct net_device *dev,
746*4882a593Smuzhiyun struct ethtool_drvinfo *info)
747*4882a593Smuzhiyun {
748*4882a593Smuzhiyun strlcpy(info->driver, ibmveth_driver_name, sizeof(info->driver));
749*4882a593Smuzhiyun strlcpy(info->version, ibmveth_driver_version, sizeof(info->version));
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun
ibmveth_fix_features(struct net_device * dev,netdev_features_t features)752*4882a593Smuzhiyun static netdev_features_t ibmveth_fix_features(struct net_device *dev,
753*4882a593Smuzhiyun netdev_features_t features)
754*4882a593Smuzhiyun {
755*4882a593Smuzhiyun /*
756*4882a593Smuzhiyun * Since the ibmveth firmware interface does not have the
757*4882a593Smuzhiyun * concept of separate tx/rx checksum offload enable, if rx
758*4882a593Smuzhiyun * checksum is disabled we also have to disable tx checksum
759*4882a593Smuzhiyun * offload. Once we disable rx checksum offload, we are no
760*4882a593Smuzhiyun * longer allowed to send tx buffers that are not properly
761*4882a593Smuzhiyun * checksummed.
762*4882a593Smuzhiyun */
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun if (!(features & NETIF_F_RXCSUM))
765*4882a593Smuzhiyun features &= ~NETIF_F_CSUM_MASK;
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun return features;
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun
ibmveth_set_csum_offload(struct net_device * dev,u32 data)770*4882a593Smuzhiyun static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
771*4882a593Smuzhiyun {
772*4882a593Smuzhiyun struct ibmveth_adapter *adapter = netdev_priv(dev);
773*4882a593Smuzhiyun unsigned long set_attr, clr_attr, ret_attr;
774*4882a593Smuzhiyun unsigned long set_attr6, clr_attr6;
775*4882a593Smuzhiyun long ret, ret4, ret6;
776*4882a593Smuzhiyun int rc1 = 0, rc2 = 0;
777*4882a593Smuzhiyun int restart = 0;
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun if (netif_running(dev)) {
780*4882a593Smuzhiyun restart = 1;
781*4882a593Smuzhiyun adapter->pool_config = 1;
782*4882a593Smuzhiyun ibmveth_close(dev);
783*4882a593Smuzhiyun adapter->pool_config = 0;
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun set_attr = 0;
787*4882a593Smuzhiyun clr_attr = 0;
788*4882a593Smuzhiyun set_attr6 = 0;
789*4882a593Smuzhiyun clr_attr6 = 0;
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun if (data) {
792*4882a593Smuzhiyun set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
793*4882a593Smuzhiyun set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
794*4882a593Smuzhiyun } else {
795*4882a593Smuzhiyun clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
796*4882a593Smuzhiyun clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
797*4882a593Smuzhiyun }
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun if (ret == H_SUCCESS &&
802*4882a593Smuzhiyun (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
803*4882a593Smuzhiyun ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
804*4882a593Smuzhiyun set_attr, &ret_attr);
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun if (ret4 != H_SUCCESS) {
807*4882a593Smuzhiyun netdev_err(dev, "unable to change IPv4 checksum "
808*4882a593Smuzhiyun "offload settings. %d rc=%ld\n",
809*4882a593Smuzhiyun data, ret4);
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun h_illan_attributes(adapter->vdev->unit_address,
812*4882a593Smuzhiyun set_attr, clr_attr, &ret_attr);
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun if (data == 1)
815*4882a593Smuzhiyun dev->features &= ~NETIF_F_IP_CSUM;
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun } else {
818*4882a593Smuzhiyun adapter->fw_ipv4_csum_support = data;
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun ret6 = h_illan_attributes(adapter->vdev->unit_address,
822*4882a593Smuzhiyun clr_attr6, set_attr6, &ret_attr);
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun if (ret6 != H_SUCCESS) {
825*4882a593Smuzhiyun netdev_err(dev, "unable to change IPv6 checksum "
826*4882a593Smuzhiyun "offload settings. %d rc=%ld\n",
827*4882a593Smuzhiyun data, ret6);
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun h_illan_attributes(adapter->vdev->unit_address,
830*4882a593Smuzhiyun set_attr6, clr_attr6, &ret_attr);
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun if (data == 1)
833*4882a593Smuzhiyun dev->features &= ~NETIF_F_IPV6_CSUM;
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun } else
836*4882a593Smuzhiyun adapter->fw_ipv6_csum_support = data;
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)
839*4882a593Smuzhiyun adapter->rx_csum = data;
840*4882a593Smuzhiyun else
841*4882a593Smuzhiyun rc1 = -EIO;
842*4882a593Smuzhiyun } else {
843*4882a593Smuzhiyun rc1 = -EIO;
844*4882a593Smuzhiyun netdev_err(dev, "unable to change checksum offload settings."
845*4882a593Smuzhiyun " %d rc=%ld ret_attr=%lx\n", data, ret,
846*4882a593Smuzhiyun ret_attr);
847*4882a593Smuzhiyun }
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun if (restart)
850*4882a593Smuzhiyun rc2 = ibmveth_open(dev);
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun return rc1 ? rc1 : rc2;
853*4882a593Smuzhiyun }
854*4882a593Smuzhiyun
ibmveth_set_tso(struct net_device * dev,u32 data)855*4882a593Smuzhiyun static int ibmveth_set_tso(struct net_device *dev, u32 data)
856*4882a593Smuzhiyun {
857*4882a593Smuzhiyun struct ibmveth_adapter *adapter = netdev_priv(dev);
858*4882a593Smuzhiyun unsigned long set_attr, clr_attr, ret_attr;
859*4882a593Smuzhiyun long ret1, ret2;
860*4882a593Smuzhiyun int rc1 = 0, rc2 = 0;
861*4882a593Smuzhiyun int restart = 0;
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun if (netif_running(dev)) {
864*4882a593Smuzhiyun restart = 1;
865*4882a593Smuzhiyun adapter->pool_config = 1;
866*4882a593Smuzhiyun ibmveth_close(dev);
867*4882a593Smuzhiyun adapter->pool_config = 0;
868*4882a593Smuzhiyun }
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun set_attr = 0;
871*4882a593Smuzhiyun clr_attr = 0;
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun if (data)
874*4882a593Smuzhiyun set_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
875*4882a593Smuzhiyun else
876*4882a593Smuzhiyun clr_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun ret1 = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun if (ret1 == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
881*4882a593Smuzhiyun !old_large_send) {
882*4882a593Smuzhiyun ret2 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
883*4882a593Smuzhiyun set_attr, &ret_attr);
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun if (ret2 != H_SUCCESS) {
886*4882a593Smuzhiyun netdev_err(dev, "unable to change tso settings. %d rc=%ld\n",
887*4882a593Smuzhiyun data, ret2);
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun h_illan_attributes(adapter->vdev->unit_address,
890*4882a593Smuzhiyun set_attr, clr_attr, &ret_attr);
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun if (data == 1)
893*4882a593Smuzhiyun dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
894*4882a593Smuzhiyun rc1 = -EIO;
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun } else {
897*4882a593Smuzhiyun adapter->fw_large_send_support = data;
898*4882a593Smuzhiyun adapter->large_send = data;
899*4882a593Smuzhiyun }
900*4882a593Smuzhiyun } else {
901*4882a593Smuzhiyun /* Older firmware version of large send offload does not
902*4882a593Smuzhiyun * support tcp6/ipv6
903*4882a593Smuzhiyun */
904*4882a593Smuzhiyun if (data == 1) {
905*4882a593Smuzhiyun dev->features &= ~NETIF_F_TSO6;
906*4882a593Smuzhiyun netdev_info(dev, "TSO feature requires all partitions to have updated driver");
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun adapter->large_send = data;
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun if (restart)
912*4882a593Smuzhiyun rc2 = ibmveth_open(dev);
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun return rc1 ? rc1 : rc2;
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun
ibmveth_set_features(struct net_device * dev,netdev_features_t features)917*4882a593Smuzhiyun static int ibmveth_set_features(struct net_device *dev,
918*4882a593Smuzhiyun netdev_features_t features)
919*4882a593Smuzhiyun {
920*4882a593Smuzhiyun struct ibmveth_adapter *adapter = netdev_priv(dev);
921*4882a593Smuzhiyun int rx_csum = !!(features & NETIF_F_RXCSUM);
922*4882a593Smuzhiyun int large_send = !!(features & (NETIF_F_TSO | NETIF_F_TSO6));
923*4882a593Smuzhiyun int rc1 = 0, rc2 = 0;
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun if (rx_csum != adapter->rx_csum) {
926*4882a593Smuzhiyun rc1 = ibmveth_set_csum_offload(dev, rx_csum);
927*4882a593Smuzhiyun if (rc1 && !adapter->rx_csum)
928*4882a593Smuzhiyun dev->features =
929*4882a593Smuzhiyun features & ~(NETIF_F_CSUM_MASK |
930*4882a593Smuzhiyun NETIF_F_RXCSUM);
931*4882a593Smuzhiyun }
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun if (large_send != adapter->large_send) {
934*4882a593Smuzhiyun rc2 = ibmveth_set_tso(dev, large_send);
935*4882a593Smuzhiyun if (rc2 && !adapter->large_send)
936*4882a593Smuzhiyun dev->features =
937*4882a593Smuzhiyun features & ~(NETIF_F_TSO | NETIF_F_TSO6);
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun return rc1 ? rc1 : rc2;
941*4882a593Smuzhiyun }
942*4882a593Smuzhiyun
ibmveth_get_strings(struct net_device * dev,u32 stringset,u8 * data)943*4882a593Smuzhiyun static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
944*4882a593Smuzhiyun {
945*4882a593Smuzhiyun int i;
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun if (stringset != ETH_SS_STATS)
948*4882a593Smuzhiyun return;
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN)
951*4882a593Smuzhiyun memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN);
952*4882a593Smuzhiyun }
953*4882a593Smuzhiyun
ibmveth_get_sset_count(struct net_device * dev,int sset)954*4882a593Smuzhiyun static int ibmveth_get_sset_count(struct net_device *dev, int sset)
955*4882a593Smuzhiyun {
956*4882a593Smuzhiyun switch (sset) {
957*4882a593Smuzhiyun case ETH_SS_STATS:
958*4882a593Smuzhiyun return ARRAY_SIZE(ibmveth_stats);
959*4882a593Smuzhiyun default:
960*4882a593Smuzhiyun return -EOPNOTSUPP;
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun }
963*4882a593Smuzhiyun
ibmveth_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)964*4882a593Smuzhiyun static void ibmveth_get_ethtool_stats(struct net_device *dev,
965*4882a593Smuzhiyun struct ethtool_stats *stats, u64 *data)
966*4882a593Smuzhiyun {
967*4882a593Smuzhiyun int i;
968*4882a593Smuzhiyun struct ibmveth_adapter *adapter = netdev_priv(dev);
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++)
971*4882a593Smuzhiyun data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
972*4882a593Smuzhiyun }
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun static const struct ethtool_ops netdev_ethtool_ops = {
975*4882a593Smuzhiyun .get_drvinfo = netdev_get_drvinfo,
976*4882a593Smuzhiyun .get_link = ethtool_op_get_link,
977*4882a593Smuzhiyun .get_strings = ibmveth_get_strings,
978*4882a593Smuzhiyun .get_sset_count = ibmveth_get_sset_count,
979*4882a593Smuzhiyun .get_ethtool_stats = ibmveth_get_ethtool_stats,
980*4882a593Smuzhiyun .get_link_ksettings = ibmveth_get_link_ksettings,
981*4882a593Smuzhiyun .set_link_ksettings = ibmveth_set_link_ksettings,
982*4882a593Smuzhiyun };
983*4882a593Smuzhiyun
ibmveth_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)984*4882a593Smuzhiyun static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
985*4882a593Smuzhiyun {
986*4882a593Smuzhiyun return -EOPNOTSUPP;
987*4882a593Smuzhiyun }
988*4882a593Smuzhiyun
ibmveth_send(struct ibmveth_adapter * adapter,union ibmveth_buf_desc * descs,unsigned long mss)989*4882a593Smuzhiyun static int ibmveth_send(struct ibmveth_adapter *adapter,
990*4882a593Smuzhiyun union ibmveth_buf_desc *descs, unsigned long mss)
991*4882a593Smuzhiyun {
992*4882a593Smuzhiyun unsigned long correlator;
993*4882a593Smuzhiyun unsigned int retry_count;
994*4882a593Smuzhiyun unsigned long ret;
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun /*
997*4882a593Smuzhiyun * The retry count sets a maximum for the number of broadcast and
998*4882a593Smuzhiyun * multicast destinations within the system.
999*4882a593Smuzhiyun */
1000*4882a593Smuzhiyun retry_count = 1024;
1001*4882a593Smuzhiyun correlator = 0;
1002*4882a593Smuzhiyun do {
1003*4882a593Smuzhiyun ret = h_send_logical_lan(adapter->vdev->unit_address,
1004*4882a593Smuzhiyun descs[0].desc, descs[1].desc,
1005*4882a593Smuzhiyun descs[2].desc, descs[3].desc,
1006*4882a593Smuzhiyun descs[4].desc, descs[5].desc,
1007*4882a593Smuzhiyun correlator, &correlator, mss,
1008*4882a593Smuzhiyun adapter->fw_large_send_support);
1009*4882a593Smuzhiyun } while ((ret == H_BUSY) && (retry_count--));
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun if (ret != H_SUCCESS && ret != H_DROPPED) {
1012*4882a593Smuzhiyun netdev_err(adapter->netdev, "tx: h_send_logical_lan failed "
1013*4882a593Smuzhiyun "with rc=%ld\n", ret);
1014*4882a593Smuzhiyun return 1;
1015*4882a593Smuzhiyun }
1016*4882a593Smuzhiyun
1017*4882a593Smuzhiyun return 0;
1018*4882a593Smuzhiyun }
1019*4882a593Smuzhiyun
ibmveth_is_packet_unsupported(struct sk_buff * skb,struct net_device * netdev)1020*4882a593Smuzhiyun static int ibmveth_is_packet_unsupported(struct sk_buff *skb,
1021*4882a593Smuzhiyun struct net_device *netdev)
1022*4882a593Smuzhiyun {
1023*4882a593Smuzhiyun struct ethhdr *ether_header;
1024*4882a593Smuzhiyun int ret = 0;
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun ether_header = eth_hdr(skb);
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun if (ether_addr_equal(ether_header->h_dest, netdev->dev_addr)) {
1029*4882a593Smuzhiyun netdev_dbg(netdev, "veth doesn't support loopback packets, dropping packet.\n");
1030*4882a593Smuzhiyun netdev->stats.tx_dropped++;
1031*4882a593Smuzhiyun ret = -EOPNOTSUPP;
1032*4882a593Smuzhiyun }
1033*4882a593Smuzhiyun
1034*4882a593Smuzhiyun return ret;
1035*4882a593Smuzhiyun }
1036*4882a593Smuzhiyun
ibmveth_start_xmit(struct sk_buff * skb,struct net_device * netdev)1037*4882a593Smuzhiyun static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
1038*4882a593Smuzhiyun struct net_device *netdev)
1039*4882a593Smuzhiyun {
1040*4882a593Smuzhiyun struct ibmveth_adapter *adapter = netdev_priv(netdev);
1041*4882a593Smuzhiyun unsigned int desc_flags;
1042*4882a593Smuzhiyun union ibmveth_buf_desc descs[6];
1043*4882a593Smuzhiyun int last, i;
1044*4882a593Smuzhiyun int force_bounce = 0;
1045*4882a593Smuzhiyun dma_addr_t dma_addr;
1046*4882a593Smuzhiyun unsigned long mss = 0;
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun if (ibmveth_is_packet_unsupported(skb, netdev))
1049*4882a593Smuzhiyun goto out;
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun /* veth doesn't handle frag_list, so linearize the skb.
1052*4882a593Smuzhiyun * When GRO is enabled SKB's can have frag_list.
1053*4882a593Smuzhiyun */
1054*4882a593Smuzhiyun if (adapter->is_active_trunk &&
1055*4882a593Smuzhiyun skb_has_frag_list(skb) && __skb_linearize(skb)) {
1056*4882a593Smuzhiyun netdev->stats.tx_dropped++;
1057*4882a593Smuzhiyun goto out;
1058*4882a593Smuzhiyun }
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun /*
1061*4882a593Smuzhiyun * veth handles a maximum of 6 segments including the header, so
1062*4882a593Smuzhiyun * we have to linearize the skb if there are more than this.
1063*4882a593Smuzhiyun */
1064*4882a593Smuzhiyun if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
1065*4882a593Smuzhiyun netdev->stats.tx_dropped++;
1066*4882a593Smuzhiyun goto out;
1067*4882a593Smuzhiyun }
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun /* veth can't checksum offload UDP */
1070*4882a593Smuzhiyun if (skb->ip_summed == CHECKSUM_PARTIAL &&
1071*4882a593Smuzhiyun ((skb->protocol == htons(ETH_P_IP) &&
1072*4882a593Smuzhiyun ip_hdr(skb)->protocol != IPPROTO_TCP) ||
1073*4882a593Smuzhiyun (skb->protocol == htons(ETH_P_IPV6) &&
1074*4882a593Smuzhiyun ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) &&
1075*4882a593Smuzhiyun skb_checksum_help(skb)) {
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun netdev_err(netdev, "tx: failed to checksum packet\n");
1078*4882a593Smuzhiyun netdev->stats.tx_dropped++;
1079*4882a593Smuzhiyun goto out;
1080*4882a593Smuzhiyun }
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun desc_flags = IBMVETH_BUF_VALID;
1083*4882a593Smuzhiyun
1084*4882a593Smuzhiyun if (skb->ip_summed == CHECKSUM_PARTIAL) {
1085*4882a593Smuzhiyun unsigned char *buf = skb_transport_header(skb) +
1086*4882a593Smuzhiyun skb->csum_offset;
1087*4882a593Smuzhiyun
1088*4882a593Smuzhiyun desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
1089*4882a593Smuzhiyun
1090*4882a593Smuzhiyun /* Need to zero out the checksum */
1091*4882a593Smuzhiyun buf[0] = 0;
1092*4882a593Smuzhiyun buf[1] = 0;
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun if (skb_is_gso(skb) && adapter->fw_large_send_support)
1095*4882a593Smuzhiyun desc_flags |= IBMVETH_BUF_LRG_SND;
1096*4882a593Smuzhiyun }
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun retry_bounce:
1099*4882a593Smuzhiyun memset(descs, 0, sizeof(descs));
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun /*
1102*4882a593Smuzhiyun * If a linear packet is below the rx threshold then
1103*4882a593Smuzhiyun * copy it into the static bounce buffer. This avoids the
1104*4882a593Smuzhiyun * cost of a TCE insert and remove.
1105*4882a593Smuzhiyun */
1106*4882a593Smuzhiyun if (force_bounce || (!skb_is_nonlinear(skb) &&
1107*4882a593Smuzhiyun (skb->len < tx_copybreak))) {
1108*4882a593Smuzhiyun skb_copy_from_linear_data(skb, adapter->bounce_buffer,
1109*4882a593Smuzhiyun skb->len);
1110*4882a593Smuzhiyun
1111*4882a593Smuzhiyun descs[0].fields.flags_len = desc_flags | skb->len;
1112*4882a593Smuzhiyun descs[0].fields.address = adapter->bounce_buffer_dma;
1113*4882a593Smuzhiyun
1114*4882a593Smuzhiyun if (ibmveth_send(adapter, descs, 0)) {
1115*4882a593Smuzhiyun adapter->tx_send_failed++;
1116*4882a593Smuzhiyun netdev->stats.tx_dropped++;
1117*4882a593Smuzhiyun } else {
1118*4882a593Smuzhiyun netdev->stats.tx_packets++;
1119*4882a593Smuzhiyun netdev->stats.tx_bytes += skb->len;
1120*4882a593Smuzhiyun }
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun goto out;
1123*4882a593Smuzhiyun }
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun /* Map the header */
1126*4882a593Smuzhiyun dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
1127*4882a593Smuzhiyun skb_headlen(skb), DMA_TO_DEVICE);
1128*4882a593Smuzhiyun if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1129*4882a593Smuzhiyun goto map_failed;
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
1132*4882a593Smuzhiyun descs[0].fields.address = dma_addr;
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun /* Map the frags */
1135*4882a593Smuzhiyun for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1136*4882a593Smuzhiyun const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1137*4882a593Smuzhiyun
1138*4882a593Smuzhiyun dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0,
1139*4882a593Smuzhiyun skb_frag_size(frag), DMA_TO_DEVICE);
1140*4882a593Smuzhiyun
1141*4882a593Smuzhiyun if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1142*4882a593Smuzhiyun goto map_failed_frags;
1143*4882a593Smuzhiyun
1144*4882a593Smuzhiyun descs[i+1].fields.flags_len = desc_flags | skb_frag_size(frag);
1145*4882a593Smuzhiyun descs[i+1].fields.address = dma_addr;
1146*4882a593Smuzhiyun }
1147*4882a593Smuzhiyun
1148*4882a593Smuzhiyun if (skb->ip_summed == CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1149*4882a593Smuzhiyun if (adapter->fw_large_send_support) {
1150*4882a593Smuzhiyun mss = (unsigned long)skb_shinfo(skb)->gso_size;
1151*4882a593Smuzhiyun adapter->tx_large_packets++;
1152*4882a593Smuzhiyun } else if (!skb_is_gso_v6(skb)) {
1153*4882a593Smuzhiyun /* Put -1 in the IP checksum to tell phyp it
1154*4882a593Smuzhiyun * is a largesend packet. Put the mss in
1155*4882a593Smuzhiyun * the TCP checksum.
1156*4882a593Smuzhiyun */
1157*4882a593Smuzhiyun ip_hdr(skb)->check = 0xffff;
1158*4882a593Smuzhiyun tcp_hdr(skb)->check =
1159*4882a593Smuzhiyun cpu_to_be16(skb_shinfo(skb)->gso_size);
1160*4882a593Smuzhiyun adapter->tx_large_packets++;
1161*4882a593Smuzhiyun }
1162*4882a593Smuzhiyun }
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun if (ibmveth_send(adapter, descs, mss)) {
1165*4882a593Smuzhiyun adapter->tx_send_failed++;
1166*4882a593Smuzhiyun netdev->stats.tx_dropped++;
1167*4882a593Smuzhiyun } else {
1168*4882a593Smuzhiyun netdev->stats.tx_packets++;
1169*4882a593Smuzhiyun netdev->stats.tx_bytes += skb->len;
1170*4882a593Smuzhiyun }
1171*4882a593Smuzhiyun
1172*4882a593Smuzhiyun dma_unmap_single(&adapter->vdev->dev,
1173*4882a593Smuzhiyun descs[0].fields.address,
1174*4882a593Smuzhiyun descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1175*4882a593Smuzhiyun DMA_TO_DEVICE);
1176*4882a593Smuzhiyun
1177*4882a593Smuzhiyun for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
1178*4882a593Smuzhiyun dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1179*4882a593Smuzhiyun descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1180*4882a593Smuzhiyun DMA_TO_DEVICE);
1181*4882a593Smuzhiyun
1182*4882a593Smuzhiyun out:
1183*4882a593Smuzhiyun dev_consume_skb_any(skb);
1184*4882a593Smuzhiyun return NETDEV_TX_OK;
1185*4882a593Smuzhiyun
1186*4882a593Smuzhiyun map_failed_frags:
1187*4882a593Smuzhiyun last = i+1;
1188*4882a593Smuzhiyun for (i = 1; i < last; i++)
1189*4882a593Smuzhiyun dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1190*4882a593Smuzhiyun descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1191*4882a593Smuzhiyun DMA_TO_DEVICE);
1192*4882a593Smuzhiyun
1193*4882a593Smuzhiyun dma_unmap_single(&adapter->vdev->dev,
1194*4882a593Smuzhiyun descs[0].fields.address,
1195*4882a593Smuzhiyun descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1196*4882a593Smuzhiyun DMA_TO_DEVICE);
1197*4882a593Smuzhiyun map_failed:
1198*4882a593Smuzhiyun if (!firmware_has_feature(FW_FEATURE_CMO))
1199*4882a593Smuzhiyun netdev_err(netdev, "tx: unable to map xmit buffer\n");
1200*4882a593Smuzhiyun adapter->tx_map_failed++;
1201*4882a593Smuzhiyun if (skb_linearize(skb)) {
1202*4882a593Smuzhiyun netdev->stats.tx_dropped++;
1203*4882a593Smuzhiyun goto out;
1204*4882a593Smuzhiyun }
1205*4882a593Smuzhiyun force_bounce = 1;
1206*4882a593Smuzhiyun goto retry_bounce;
1207*4882a593Smuzhiyun }
1208*4882a593Smuzhiyun
ibmveth_rx_mss_helper(struct sk_buff * skb,u16 mss,int lrg_pkt)1209*4882a593Smuzhiyun static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt)
1210*4882a593Smuzhiyun {
1211*4882a593Smuzhiyun struct tcphdr *tcph;
1212*4882a593Smuzhiyun int offset = 0;
1213*4882a593Smuzhiyun int hdr_len;
1214*4882a593Smuzhiyun
1215*4882a593Smuzhiyun /* only TCP packets will be aggregated */
1216*4882a593Smuzhiyun if (skb->protocol == htons(ETH_P_IP)) {
1217*4882a593Smuzhiyun struct iphdr *iph = (struct iphdr *)skb->data;
1218*4882a593Smuzhiyun
1219*4882a593Smuzhiyun if (iph->protocol == IPPROTO_TCP) {
1220*4882a593Smuzhiyun offset = iph->ihl * 4;
1221*4882a593Smuzhiyun skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1222*4882a593Smuzhiyun } else {
1223*4882a593Smuzhiyun return;
1224*4882a593Smuzhiyun }
1225*4882a593Smuzhiyun } else if (skb->protocol == htons(ETH_P_IPV6)) {
1226*4882a593Smuzhiyun struct ipv6hdr *iph6 = (struct ipv6hdr *)skb->data;
1227*4882a593Smuzhiyun
1228*4882a593Smuzhiyun if (iph6->nexthdr == IPPROTO_TCP) {
1229*4882a593Smuzhiyun offset = sizeof(struct ipv6hdr);
1230*4882a593Smuzhiyun skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1231*4882a593Smuzhiyun } else {
1232*4882a593Smuzhiyun return;
1233*4882a593Smuzhiyun }
1234*4882a593Smuzhiyun } else {
1235*4882a593Smuzhiyun return;
1236*4882a593Smuzhiyun }
1237*4882a593Smuzhiyun /* if mss is not set through Large Packet bit/mss in rx buffer,
1238*4882a593Smuzhiyun * expect that the mss will be written to the tcp header checksum.
1239*4882a593Smuzhiyun */
1240*4882a593Smuzhiyun tcph = (struct tcphdr *)(skb->data + offset);
1241*4882a593Smuzhiyun if (lrg_pkt) {
1242*4882a593Smuzhiyun skb_shinfo(skb)->gso_size = mss;
1243*4882a593Smuzhiyun } else if (offset) {
1244*4882a593Smuzhiyun skb_shinfo(skb)->gso_size = ntohs(tcph->check);
1245*4882a593Smuzhiyun tcph->check = 0;
1246*4882a593Smuzhiyun }
1247*4882a593Smuzhiyun
1248*4882a593Smuzhiyun if (skb_shinfo(skb)->gso_size) {
1249*4882a593Smuzhiyun hdr_len = offset + tcph->doff * 4;
1250*4882a593Smuzhiyun skb_shinfo(skb)->gso_segs =
1251*4882a593Smuzhiyun DIV_ROUND_UP(skb->len - hdr_len,
1252*4882a593Smuzhiyun skb_shinfo(skb)->gso_size);
1253*4882a593Smuzhiyun }
1254*4882a593Smuzhiyun }
1255*4882a593Smuzhiyun
ibmveth_rx_csum_helper(struct sk_buff * skb,struct ibmveth_adapter * adapter)1256*4882a593Smuzhiyun static void ibmveth_rx_csum_helper(struct sk_buff *skb,
1257*4882a593Smuzhiyun struct ibmveth_adapter *adapter)
1258*4882a593Smuzhiyun {
1259*4882a593Smuzhiyun struct iphdr *iph = NULL;
1260*4882a593Smuzhiyun struct ipv6hdr *iph6 = NULL;
1261*4882a593Smuzhiyun __be16 skb_proto = 0;
1262*4882a593Smuzhiyun u16 iphlen = 0;
1263*4882a593Smuzhiyun u16 iph_proto = 0;
1264*4882a593Smuzhiyun u16 tcphdrlen = 0;
1265*4882a593Smuzhiyun
1266*4882a593Smuzhiyun skb_proto = be16_to_cpu(skb->protocol);
1267*4882a593Smuzhiyun
1268*4882a593Smuzhiyun if (skb_proto == ETH_P_IP) {
1269*4882a593Smuzhiyun iph = (struct iphdr *)skb->data;
1270*4882a593Smuzhiyun
1271*4882a593Smuzhiyun /* If the IP checksum is not offloaded and if the packet
1272*4882a593Smuzhiyun * is large send, the checksum must be rebuilt.
1273*4882a593Smuzhiyun */
1274*4882a593Smuzhiyun if (iph->check == 0xffff) {
1275*4882a593Smuzhiyun iph->check = 0;
1276*4882a593Smuzhiyun iph->check = ip_fast_csum((unsigned char *)iph,
1277*4882a593Smuzhiyun iph->ihl);
1278*4882a593Smuzhiyun }
1279*4882a593Smuzhiyun
1280*4882a593Smuzhiyun iphlen = iph->ihl * 4;
1281*4882a593Smuzhiyun iph_proto = iph->protocol;
1282*4882a593Smuzhiyun } else if (skb_proto == ETH_P_IPV6) {
1283*4882a593Smuzhiyun iph6 = (struct ipv6hdr *)skb->data;
1284*4882a593Smuzhiyun iphlen = sizeof(struct ipv6hdr);
1285*4882a593Smuzhiyun iph_proto = iph6->nexthdr;
1286*4882a593Smuzhiyun }
1287*4882a593Smuzhiyun
1288*4882a593Smuzhiyun /* In OVS environment, when a flow is not cached, specifically for a
1289*4882a593Smuzhiyun * new TCP connection, the first packet information is passed up
1290*4882a593Smuzhiyun * the user space for finding a flow. During this process, OVS computes
1291*4882a593Smuzhiyun * checksum on the first packet when CHECKSUM_PARTIAL flag is set.
1292*4882a593Smuzhiyun *
1293*4882a593Smuzhiyun * Given that we zeroed out TCP checksum field in transmit path
1294*4882a593Smuzhiyun * (refer ibmveth_start_xmit routine) as we set "no checksum bit",
1295*4882a593Smuzhiyun * OVS computed checksum will be incorrect w/o TCP pseudo checksum
1296*4882a593Smuzhiyun * in the packet. This leads to OVS dropping the packet and hence
1297*4882a593Smuzhiyun * TCP retransmissions are seen.
1298*4882a593Smuzhiyun *
1299*4882a593Smuzhiyun * So, re-compute TCP pseudo header checksum.
1300*4882a593Smuzhiyun */
1301*4882a593Smuzhiyun if (iph_proto == IPPROTO_TCP && adapter->is_active_trunk) {
1302*4882a593Smuzhiyun struct tcphdr *tcph = (struct tcphdr *)(skb->data + iphlen);
1303*4882a593Smuzhiyun
1304*4882a593Smuzhiyun tcphdrlen = skb->len - iphlen;
1305*4882a593Smuzhiyun
1306*4882a593Smuzhiyun /* Recompute TCP pseudo header checksum */
1307*4882a593Smuzhiyun if (skb_proto == ETH_P_IP)
1308*4882a593Smuzhiyun tcph->check = ~csum_tcpudp_magic(iph->saddr,
1309*4882a593Smuzhiyun iph->daddr, tcphdrlen, iph_proto, 0);
1310*4882a593Smuzhiyun else if (skb_proto == ETH_P_IPV6)
1311*4882a593Smuzhiyun tcph->check = ~csum_ipv6_magic(&iph6->saddr,
1312*4882a593Smuzhiyun &iph6->daddr, tcphdrlen, iph_proto, 0);
1313*4882a593Smuzhiyun
1314*4882a593Smuzhiyun /* Setup SKB fields for checksum offload */
1315*4882a593Smuzhiyun skb_partial_csum_set(skb, iphlen,
1316*4882a593Smuzhiyun offsetof(struct tcphdr, check));
1317*4882a593Smuzhiyun skb_reset_network_header(skb);
1318*4882a593Smuzhiyun }
1319*4882a593Smuzhiyun }
1320*4882a593Smuzhiyun
ibmveth_poll(struct napi_struct * napi,int budget)1321*4882a593Smuzhiyun static int ibmveth_poll(struct napi_struct *napi, int budget)
1322*4882a593Smuzhiyun {
1323*4882a593Smuzhiyun struct ibmveth_adapter *adapter =
1324*4882a593Smuzhiyun container_of(napi, struct ibmveth_adapter, napi);
1325*4882a593Smuzhiyun struct net_device *netdev = adapter->netdev;
1326*4882a593Smuzhiyun int frames_processed = 0;
1327*4882a593Smuzhiyun unsigned long lpar_rc;
1328*4882a593Smuzhiyun u16 mss = 0;
1329*4882a593Smuzhiyun
1330*4882a593Smuzhiyun while (frames_processed < budget) {
1331*4882a593Smuzhiyun if (!ibmveth_rxq_pending_buffer(adapter))
1332*4882a593Smuzhiyun break;
1333*4882a593Smuzhiyun
1334*4882a593Smuzhiyun smp_rmb();
1335*4882a593Smuzhiyun if (!ibmveth_rxq_buffer_valid(adapter)) {
1336*4882a593Smuzhiyun wmb(); /* suggested by larson1 */
1337*4882a593Smuzhiyun adapter->rx_invalid_buffer++;
1338*4882a593Smuzhiyun netdev_dbg(netdev, "recycling invalid buffer\n");
1339*4882a593Smuzhiyun ibmveth_rxq_recycle_buffer(adapter);
1340*4882a593Smuzhiyun } else {
1341*4882a593Smuzhiyun struct sk_buff *skb, *new_skb;
1342*4882a593Smuzhiyun int length = ibmveth_rxq_frame_length(adapter);
1343*4882a593Smuzhiyun int offset = ibmveth_rxq_frame_offset(adapter);
1344*4882a593Smuzhiyun int csum_good = ibmveth_rxq_csum_good(adapter);
1345*4882a593Smuzhiyun int lrg_pkt = ibmveth_rxq_large_packet(adapter);
1346*4882a593Smuzhiyun __sum16 iph_check = 0;
1347*4882a593Smuzhiyun
1348*4882a593Smuzhiyun skb = ibmveth_rxq_get_buffer(adapter);
1349*4882a593Smuzhiyun
1350*4882a593Smuzhiyun /* if the large packet bit is set in the rx queue
1351*4882a593Smuzhiyun * descriptor, the mss will be written by PHYP eight
1352*4882a593Smuzhiyun * bytes from the start of the rx buffer, which is
1353*4882a593Smuzhiyun * skb->data at this stage
1354*4882a593Smuzhiyun */
1355*4882a593Smuzhiyun if (lrg_pkt) {
1356*4882a593Smuzhiyun __be64 *rxmss = (__be64 *)(skb->data + 8);
1357*4882a593Smuzhiyun
1358*4882a593Smuzhiyun mss = (u16)be64_to_cpu(*rxmss);
1359*4882a593Smuzhiyun }
1360*4882a593Smuzhiyun
1361*4882a593Smuzhiyun new_skb = NULL;
1362*4882a593Smuzhiyun if (length < rx_copybreak)
1363*4882a593Smuzhiyun new_skb = netdev_alloc_skb(netdev, length);
1364*4882a593Smuzhiyun
1365*4882a593Smuzhiyun if (new_skb) {
1366*4882a593Smuzhiyun skb_copy_to_linear_data(new_skb,
1367*4882a593Smuzhiyun skb->data + offset,
1368*4882a593Smuzhiyun length);
1369*4882a593Smuzhiyun if (rx_flush)
1370*4882a593Smuzhiyun ibmveth_flush_buffer(skb->data,
1371*4882a593Smuzhiyun length + offset);
1372*4882a593Smuzhiyun if (!ibmveth_rxq_recycle_buffer(adapter))
1373*4882a593Smuzhiyun kfree_skb(skb);
1374*4882a593Smuzhiyun skb = new_skb;
1375*4882a593Smuzhiyun } else {
1376*4882a593Smuzhiyun ibmveth_rxq_harvest_buffer(adapter);
1377*4882a593Smuzhiyun skb_reserve(skb, offset);
1378*4882a593Smuzhiyun }
1379*4882a593Smuzhiyun
1380*4882a593Smuzhiyun skb_put(skb, length);
1381*4882a593Smuzhiyun skb->protocol = eth_type_trans(skb, netdev);
1382*4882a593Smuzhiyun
1383*4882a593Smuzhiyun /* PHYP without PLSO support places a -1 in the ip
1384*4882a593Smuzhiyun * checksum for large send frames.
1385*4882a593Smuzhiyun */
1386*4882a593Smuzhiyun if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
1387*4882a593Smuzhiyun struct iphdr *iph = (struct iphdr *)skb->data;
1388*4882a593Smuzhiyun
1389*4882a593Smuzhiyun iph_check = iph->check;
1390*4882a593Smuzhiyun }
1391*4882a593Smuzhiyun
1392*4882a593Smuzhiyun if ((length > netdev->mtu + ETH_HLEN) ||
1393*4882a593Smuzhiyun lrg_pkt || iph_check == 0xffff) {
1394*4882a593Smuzhiyun ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
1395*4882a593Smuzhiyun adapter->rx_large_packets++;
1396*4882a593Smuzhiyun }
1397*4882a593Smuzhiyun
1398*4882a593Smuzhiyun if (csum_good) {
1399*4882a593Smuzhiyun skb->ip_summed = CHECKSUM_UNNECESSARY;
1400*4882a593Smuzhiyun ibmveth_rx_csum_helper(skb, adapter);
1401*4882a593Smuzhiyun }
1402*4882a593Smuzhiyun
1403*4882a593Smuzhiyun napi_gro_receive(napi, skb); /* send it up */
1404*4882a593Smuzhiyun
1405*4882a593Smuzhiyun netdev->stats.rx_packets++;
1406*4882a593Smuzhiyun netdev->stats.rx_bytes += length;
1407*4882a593Smuzhiyun frames_processed++;
1408*4882a593Smuzhiyun }
1409*4882a593Smuzhiyun }
1410*4882a593Smuzhiyun
1411*4882a593Smuzhiyun ibmveth_replenish_task(adapter);
1412*4882a593Smuzhiyun
1413*4882a593Smuzhiyun if (frames_processed < budget) {
1414*4882a593Smuzhiyun napi_complete_done(napi, frames_processed);
1415*4882a593Smuzhiyun
1416*4882a593Smuzhiyun /* We think we are done - reenable interrupts,
1417*4882a593Smuzhiyun * then check once more to make sure we are done.
1418*4882a593Smuzhiyun */
1419*4882a593Smuzhiyun lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1420*4882a593Smuzhiyun VIO_IRQ_ENABLE);
1421*4882a593Smuzhiyun
1422*4882a593Smuzhiyun BUG_ON(lpar_rc != H_SUCCESS);
1423*4882a593Smuzhiyun
1424*4882a593Smuzhiyun if (ibmveth_rxq_pending_buffer(adapter) &&
1425*4882a593Smuzhiyun napi_reschedule(napi)) {
1426*4882a593Smuzhiyun lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1427*4882a593Smuzhiyun VIO_IRQ_DISABLE);
1428*4882a593Smuzhiyun }
1429*4882a593Smuzhiyun }
1430*4882a593Smuzhiyun
1431*4882a593Smuzhiyun return frames_processed;
1432*4882a593Smuzhiyun }
1433*4882a593Smuzhiyun
ibmveth_interrupt(int irq,void * dev_instance)1434*4882a593Smuzhiyun static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
1435*4882a593Smuzhiyun {
1436*4882a593Smuzhiyun struct net_device *netdev = dev_instance;
1437*4882a593Smuzhiyun struct ibmveth_adapter *adapter = netdev_priv(netdev);
1438*4882a593Smuzhiyun unsigned long lpar_rc;
1439*4882a593Smuzhiyun
1440*4882a593Smuzhiyun if (napi_schedule_prep(&adapter->napi)) {
1441*4882a593Smuzhiyun lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1442*4882a593Smuzhiyun VIO_IRQ_DISABLE);
1443*4882a593Smuzhiyun BUG_ON(lpar_rc != H_SUCCESS);
1444*4882a593Smuzhiyun __napi_schedule(&adapter->napi);
1445*4882a593Smuzhiyun }
1446*4882a593Smuzhiyun return IRQ_HANDLED;
1447*4882a593Smuzhiyun }
1448*4882a593Smuzhiyun
ibmveth_set_multicast_list(struct net_device * netdev)1449*4882a593Smuzhiyun static void ibmveth_set_multicast_list(struct net_device *netdev)
1450*4882a593Smuzhiyun {
1451*4882a593Smuzhiyun struct ibmveth_adapter *adapter = netdev_priv(netdev);
1452*4882a593Smuzhiyun unsigned long lpar_rc;
1453*4882a593Smuzhiyun
1454*4882a593Smuzhiyun if ((netdev->flags & IFF_PROMISC) ||
1455*4882a593Smuzhiyun (netdev_mc_count(netdev) > adapter->mcastFilterSize)) {
1456*4882a593Smuzhiyun lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1457*4882a593Smuzhiyun IbmVethMcastEnableRecv |
1458*4882a593Smuzhiyun IbmVethMcastDisableFiltering,
1459*4882a593Smuzhiyun 0);
1460*4882a593Smuzhiyun if (lpar_rc != H_SUCCESS) {
1461*4882a593Smuzhiyun netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1462*4882a593Smuzhiyun "entering promisc mode\n", lpar_rc);
1463*4882a593Smuzhiyun }
1464*4882a593Smuzhiyun } else {
1465*4882a593Smuzhiyun struct netdev_hw_addr *ha;
1466*4882a593Smuzhiyun /* clear the filter table & disable filtering */
1467*4882a593Smuzhiyun lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1468*4882a593Smuzhiyun IbmVethMcastEnableRecv |
1469*4882a593Smuzhiyun IbmVethMcastDisableFiltering |
1470*4882a593Smuzhiyun IbmVethMcastClearFilterTable,
1471*4882a593Smuzhiyun 0);
1472*4882a593Smuzhiyun if (lpar_rc != H_SUCCESS) {
1473*4882a593Smuzhiyun netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1474*4882a593Smuzhiyun "attempting to clear filter table\n",
1475*4882a593Smuzhiyun lpar_rc);
1476*4882a593Smuzhiyun }
1477*4882a593Smuzhiyun /* add the addresses to the filter table */
1478*4882a593Smuzhiyun netdev_for_each_mc_addr(ha, netdev) {
1479*4882a593Smuzhiyun /* add the multicast address to the filter table */
1480*4882a593Smuzhiyun u64 mcast_addr;
1481*4882a593Smuzhiyun mcast_addr = ibmveth_encode_mac_addr(ha->addr);
1482*4882a593Smuzhiyun lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1483*4882a593Smuzhiyun IbmVethMcastAddFilter,
1484*4882a593Smuzhiyun mcast_addr);
1485*4882a593Smuzhiyun if (lpar_rc != H_SUCCESS) {
1486*4882a593Smuzhiyun netdev_err(netdev, "h_multicast_ctrl rc=%ld "
1487*4882a593Smuzhiyun "when adding an entry to the filter "
1488*4882a593Smuzhiyun "table\n", lpar_rc);
1489*4882a593Smuzhiyun }
1490*4882a593Smuzhiyun }
1491*4882a593Smuzhiyun
1492*4882a593Smuzhiyun /* re-enable filtering */
1493*4882a593Smuzhiyun lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1494*4882a593Smuzhiyun IbmVethMcastEnableFiltering,
1495*4882a593Smuzhiyun 0);
1496*4882a593Smuzhiyun if (lpar_rc != H_SUCCESS) {
1497*4882a593Smuzhiyun netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1498*4882a593Smuzhiyun "enabling filtering\n", lpar_rc);
1499*4882a593Smuzhiyun }
1500*4882a593Smuzhiyun }
1501*4882a593Smuzhiyun }
1502*4882a593Smuzhiyun
ibmveth_change_mtu(struct net_device * dev,int new_mtu)1503*4882a593Smuzhiyun static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1504*4882a593Smuzhiyun {
1505*4882a593Smuzhiyun struct ibmveth_adapter *adapter = netdev_priv(dev);
1506*4882a593Smuzhiyun struct vio_dev *viodev = adapter->vdev;
1507*4882a593Smuzhiyun int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
1508*4882a593Smuzhiyun int i, rc;
1509*4882a593Smuzhiyun int need_restart = 0;
1510*4882a593Smuzhiyun
1511*4882a593Smuzhiyun for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1512*4882a593Smuzhiyun if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size)
1513*4882a593Smuzhiyun break;
1514*4882a593Smuzhiyun
1515*4882a593Smuzhiyun if (i == IBMVETH_NUM_BUFF_POOLS)
1516*4882a593Smuzhiyun return -EINVAL;
1517*4882a593Smuzhiyun
1518*4882a593Smuzhiyun /* Deactivate all the buffer pools so that the next loop can activate
1519*4882a593Smuzhiyun only the buffer pools necessary to hold the new MTU */
1520*4882a593Smuzhiyun if (netif_running(adapter->netdev)) {
1521*4882a593Smuzhiyun need_restart = 1;
1522*4882a593Smuzhiyun adapter->pool_config = 1;
1523*4882a593Smuzhiyun ibmveth_close(adapter->netdev);
1524*4882a593Smuzhiyun adapter->pool_config = 0;
1525*4882a593Smuzhiyun }
1526*4882a593Smuzhiyun
1527*4882a593Smuzhiyun /* Look for an active buffer pool that can hold the new MTU */
1528*4882a593Smuzhiyun for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1529*4882a593Smuzhiyun adapter->rx_buff_pool[i].active = 1;
1530*4882a593Smuzhiyun
1531*4882a593Smuzhiyun if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) {
1532*4882a593Smuzhiyun dev->mtu = new_mtu;
1533*4882a593Smuzhiyun vio_cmo_set_dev_desired(viodev,
1534*4882a593Smuzhiyun ibmveth_get_desired_dma
1535*4882a593Smuzhiyun (viodev));
1536*4882a593Smuzhiyun if (need_restart) {
1537*4882a593Smuzhiyun return ibmveth_open(adapter->netdev);
1538*4882a593Smuzhiyun }
1539*4882a593Smuzhiyun return 0;
1540*4882a593Smuzhiyun }
1541*4882a593Smuzhiyun }
1542*4882a593Smuzhiyun
1543*4882a593Smuzhiyun if (need_restart && (rc = ibmveth_open(adapter->netdev)))
1544*4882a593Smuzhiyun return rc;
1545*4882a593Smuzhiyun
1546*4882a593Smuzhiyun return -EINVAL;
1547*4882a593Smuzhiyun }
1548*4882a593Smuzhiyun
1549*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
ibmveth_poll_controller(struct net_device * dev)1550*4882a593Smuzhiyun static void ibmveth_poll_controller(struct net_device *dev)
1551*4882a593Smuzhiyun {
1552*4882a593Smuzhiyun ibmveth_replenish_task(netdev_priv(dev));
1553*4882a593Smuzhiyun ibmveth_interrupt(dev->irq, dev);
1554*4882a593Smuzhiyun }
1555*4882a593Smuzhiyun #endif
1556*4882a593Smuzhiyun
1557*4882a593Smuzhiyun /**
1558*4882a593Smuzhiyun * ibmveth_get_desired_dma - Calculate IO memory desired by the driver
1559*4882a593Smuzhiyun *
1560*4882a593Smuzhiyun * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
1561*4882a593Smuzhiyun *
1562*4882a593Smuzhiyun * Return value:
1563*4882a593Smuzhiyun * Number of bytes of IO data the driver will need to perform well.
1564*4882a593Smuzhiyun */
ibmveth_get_desired_dma(struct vio_dev * vdev)1565*4882a593Smuzhiyun static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1566*4882a593Smuzhiyun {
1567*4882a593Smuzhiyun struct net_device *netdev = dev_get_drvdata(&vdev->dev);
1568*4882a593Smuzhiyun struct ibmveth_adapter *adapter;
1569*4882a593Smuzhiyun struct iommu_table *tbl;
1570*4882a593Smuzhiyun unsigned long ret;
1571*4882a593Smuzhiyun int i;
1572*4882a593Smuzhiyun int rxqentries = 1;
1573*4882a593Smuzhiyun
1574*4882a593Smuzhiyun tbl = get_iommu_table_base(&vdev->dev);
1575*4882a593Smuzhiyun
1576*4882a593Smuzhiyun /* netdev inits at probe time along with the structures we need below*/
1577*4882a593Smuzhiyun if (netdev == NULL)
1578*4882a593Smuzhiyun return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT, tbl);
1579*4882a593Smuzhiyun
1580*4882a593Smuzhiyun adapter = netdev_priv(netdev);
1581*4882a593Smuzhiyun
1582*4882a593Smuzhiyun ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
1583*4882a593Smuzhiyun ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl);
1584*4882a593Smuzhiyun
1585*4882a593Smuzhiyun for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1586*4882a593Smuzhiyun /* add the size of the active receive buffers */
1587*4882a593Smuzhiyun if (adapter->rx_buff_pool[i].active)
1588*4882a593Smuzhiyun ret +=
1589*4882a593Smuzhiyun adapter->rx_buff_pool[i].size *
1590*4882a593Smuzhiyun IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
1591*4882a593Smuzhiyun buff_size, tbl);
1592*4882a593Smuzhiyun rxqentries += adapter->rx_buff_pool[i].size;
1593*4882a593Smuzhiyun }
1594*4882a593Smuzhiyun /* add the size of the receive queue entries */
1595*4882a593Smuzhiyun ret += IOMMU_PAGE_ALIGN(
1596*4882a593Smuzhiyun rxqentries * sizeof(struct ibmveth_rx_q_entry), tbl);
1597*4882a593Smuzhiyun
1598*4882a593Smuzhiyun return ret;
1599*4882a593Smuzhiyun }
1600*4882a593Smuzhiyun
ibmveth_set_mac_addr(struct net_device * dev,void * p)1601*4882a593Smuzhiyun static int ibmveth_set_mac_addr(struct net_device *dev, void *p)
1602*4882a593Smuzhiyun {
1603*4882a593Smuzhiyun struct ibmveth_adapter *adapter = netdev_priv(dev);
1604*4882a593Smuzhiyun struct sockaddr *addr = p;
1605*4882a593Smuzhiyun u64 mac_address;
1606*4882a593Smuzhiyun int rc;
1607*4882a593Smuzhiyun
1608*4882a593Smuzhiyun if (!is_valid_ether_addr(addr->sa_data))
1609*4882a593Smuzhiyun return -EADDRNOTAVAIL;
1610*4882a593Smuzhiyun
1611*4882a593Smuzhiyun mac_address = ibmveth_encode_mac_addr(addr->sa_data);
1612*4882a593Smuzhiyun rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address);
1613*4882a593Smuzhiyun if (rc) {
1614*4882a593Smuzhiyun netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc);
1615*4882a593Smuzhiyun return rc;
1616*4882a593Smuzhiyun }
1617*4882a593Smuzhiyun
1618*4882a593Smuzhiyun ether_addr_copy(dev->dev_addr, addr->sa_data);
1619*4882a593Smuzhiyun
1620*4882a593Smuzhiyun return 0;
1621*4882a593Smuzhiyun }
1622*4882a593Smuzhiyun
1623*4882a593Smuzhiyun static const struct net_device_ops ibmveth_netdev_ops = {
1624*4882a593Smuzhiyun .ndo_open = ibmveth_open,
1625*4882a593Smuzhiyun .ndo_stop = ibmveth_close,
1626*4882a593Smuzhiyun .ndo_start_xmit = ibmveth_start_xmit,
1627*4882a593Smuzhiyun .ndo_set_rx_mode = ibmveth_set_multicast_list,
1628*4882a593Smuzhiyun .ndo_do_ioctl = ibmveth_ioctl,
1629*4882a593Smuzhiyun .ndo_change_mtu = ibmveth_change_mtu,
1630*4882a593Smuzhiyun .ndo_fix_features = ibmveth_fix_features,
1631*4882a593Smuzhiyun .ndo_set_features = ibmveth_set_features,
1632*4882a593Smuzhiyun .ndo_validate_addr = eth_validate_addr,
1633*4882a593Smuzhiyun .ndo_set_mac_address = ibmveth_set_mac_addr,
1634*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
1635*4882a593Smuzhiyun .ndo_poll_controller = ibmveth_poll_controller,
1636*4882a593Smuzhiyun #endif
1637*4882a593Smuzhiyun };
1638*4882a593Smuzhiyun
ibmveth_probe(struct vio_dev * dev,const struct vio_device_id * id)1639*4882a593Smuzhiyun static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1640*4882a593Smuzhiyun {
1641*4882a593Smuzhiyun int rc, i, mac_len;
1642*4882a593Smuzhiyun struct net_device *netdev;
1643*4882a593Smuzhiyun struct ibmveth_adapter *adapter;
1644*4882a593Smuzhiyun unsigned char *mac_addr_p;
1645*4882a593Smuzhiyun __be32 *mcastFilterSize_p;
1646*4882a593Smuzhiyun long ret;
1647*4882a593Smuzhiyun unsigned long ret_attr;
1648*4882a593Smuzhiyun
1649*4882a593Smuzhiyun dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
1650*4882a593Smuzhiyun dev->unit_address);
1651*4882a593Smuzhiyun
1652*4882a593Smuzhiyun mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
1653*4882a593Smuzhiyun &mac_len);
1654*4882a593Smuzhiyun if (!mac_addr_p) {
1655*4882a593Smuzhiyun dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
1656*4882a593Smuzhiyun return -EINVAL;
1657*4882a593Smuzhiyun }
1658*4882a593Smuzhiyun /* Workaround for old/broken pHyp */
1659*4882a593Smuzhiyun if (mac_len == 8)
1660*4882a593Smuzhiyun mac_addr_p += 2;
1661*4882a593Smuzhiyun else if (mac_len != 6) {
1662*4882a593Smuzhiyun dev_err(&dev->dev, "VETH_MAC_ADDR attribute wrong len %d\n",
1663*4882a593Smuzhiyun mac_len);
1664*4882a593Smuzhiyun return -EINVAL;
1665*4882a593Smuzhiyun }
1666*4882a593Smuzhiyun
1667*4882a593Smuzhiyun mcastFilterSize_p = (__be32 *)vio_get_attribute(dev,
1668*4882a593Smuzhiyun VETH_MCAST_FILTER_SIZE,
1669*4882a593Smuzhiyun NULL);
1670*4882a593Smuzhiyun if (!mcastFilterSize_p) {
1671*4882a593Smuzhiyun dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
1672*4882a593Smuzhiyun "attribute\n");
1673*4882a593Smuzhiyun return -EINVAL;
1674*4882a593Smuzhiyun }
1675*4882a593Smuzhiyun
1676*4882a593Smuzhiyun netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
1677*4882a593Smuzhiyun
1678*4882a593Smuzhiyun if (!netdev)
1679*4882a593Smuzhiyun return -ENOMEM;
1680*4882a593Smuzhiyun
1681*4882a593Smuzhiyun adapter = netdev_priv(netdev);
1682*4882a593Smuzhiyun dev_set_drvdata(&dev->dev, netdev);
1683*4882a593Smuzhiyun
1684*4882a593Smuzhiyun adapter->vdev = dev;
1685*4882a593Smuzhiyun adapter->netdev = netdev;
1686*4882a593Smuzhiyun adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p);
1687*4882a593Smuzhiyun adapter->pool_config = 0;
1688*4882a593Smuzhiyun ibmveth_init_link_settings(netdev);
1689*4882a593Smuzhiyun
1690*4882a593Smuzhiyun netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
1691*4882a593Smuzhiyun
1692*4882a593Smuzhiyun netdev->irq = dev->irq;
1693*4882a593Smuzhiyun netdev->netdev_ops = &ibmveth_netdev_ops;
1694*4882a593Smuzhiyun netdev->ethtool_ops = &netdev_ethtool_ops;
1695*4882a593Smuzhiyun SET_NETDEV_DEV(netdev, &dev->dev);
1696*4882a593Smuzhiyun netdev->hw_features = NETIF_F_SG;
1697*4882a593Smuzhiyun if (vio_get_attribute(dev, "ibm,illan-options", NULL) != NULL) {
1698*4882a593Smuzhiyun netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1699*4882a593Smuzhiyun NETIF_F_RXCSUM;
1700*4882a593Smuzhiyun }
1701*4882a593Smuzhiyun
1702*4882a593Smuzhiyun netdev->features |= netdev->hw_features;
1703*4882a593Smuzhiyun
1704*4882a593Smuzhiyun ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
1705*4882a593Smuzhiyun
1706*4882a593Smuzhiyun /* If running older firmware, TSO should not be enabled by default */
1707*4882a593Smuzhiyun if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
1708*4882a593Smuzhiyun !old_large_send) {
1709*4882a593Smuzhiyun netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
1710*4882a593Smuzhiyun netdev->features |= netdev->hw_features;
1711*4882a593Smuzhiyun } else {
1712*4882a593Smuzhiyun netdev->hw_features |= NETIF_F_TSO;
1713*4882a593Smuzhiyun }
1714*4882a593Smuzhiyun
1715*4882a593Smuzhiyun adapter->is_active_trunk = false;
1716*4882a593Smuzhiyun if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK)) {
1717*4882a593Smuzhiyun adapter->is_active_trunk = true;
1718*4882a593Smuzhiyun netdev->hw_features |= NETIF_F_FRAGLIST;
1719*4882a593Smuzhiyun netdev->features |= NETIF_F_FRAGLIST;
1720*4882a593Smuzhiyun }
1721*4882a593Smuzhiyun
1722*4882a593Smuzhiyun netdev->min_mtu = IBMVETH_MIN_MTU;
1723*4882a593Smuzhiyun netdev->max_mtu = ETH_MAX_MTU - IBMVETH_BUFF_OH;
1724*4882a593Smuzhiyun
1725*4882a593Smuzhiyun memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
1726*4882a593Smuzhiyun
1727*4882a593Smuzhiyun if (firmware_has_feature(FW_FEATURE_CMO))
1728*4882a593Smuzhiyun memcpy(pool_count, pool_count_cmo, sizeof(pool_count));
1729*4882a593Smuzhiyun
1730*4882a593Smuzhiyun for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1731*4882a593Smuzhiyun struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
1732*4882a593Smuzhiyun int error;
1733*4882a593Smuzhiyun
1734*4882a593Smuzhiyun ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
1735*4882a593Smuzhiyun pool_count[i], pool_size[i],
1736*4882a593Smuzhiyun pool_active[i]);
1737*4882a593Smuzhiyun error = kobject_init_and_add(kobj, &ktype_veth_pool,
1738*4882a593Smuzhiyun &dev->dev.kobj, "pool%d", i);
1739*4882a593Smuzhiyun if (!error)
1740*4882a593Smuzhiyun kobject_uevent(kobj, KOBJ_ADD);
1741*4882a593Smuzhiyun }
1742*4882a593Smuzhiyun
1743*4882a593Smuzhiyun netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
1744*4882a593Smuzhiyun netdev_dbg(netdev, "registering netdev...\n");
1745*4882a593Smuzhiyun
1746*4882a593Smuzhiyun ibmveth_set_features(netdev, netdev->features);
1747*4882a593Smuzhiyun
1748*4882a593Smuzhiyun rc = register_netdev(netdev);
1749*4882a593Smuzhiyun
1750*4882a593Smuzhiyun if (rc) {
1751*4882a593Smuzhiyun netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc);
1752*4882a593Smuzhiyun free_netdev(netdev);
1753*4882a593Smuzhiyun return rc;
1754*4882a593Smuzhiyun }
1755*4882a593Smuzhiyun
1756*4882a593Smuzhiyun netdev_dbg(netdev, "registered\n");
1757*4882a593Smuzhiyun
1758*4882a593Smuzhiyun return 0;
1759*4882a593Smuzhiyun }
1760*4882a593Smuzhiyun
ibmveth_remove(struct vio_dev * dev)1761*4882a593Smuzhiyun static int ibmveth_remove(struct vio_dev *dev)
1762*4882a593Smuzhiyun {
1763*4882a593Smuzhiyun struct net_device *netdev = dev_get_drvdata(&dev->dev);
1764*4882a593Smuzhiyun struct ibmveth_adapter *adapter = netdev_priv(netdev);
1765*4882a593Smuzhiyun int i;
1766*4882a593Smuzhiyun
1767*4882a593Smuzhiyun for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1768*4882a593Smuzhiyun kobject_put(&adapter->rx_buff_pool[i].kobj);
1769*4882a593Smuzhiyun
1770*4882a593Smuzhiyun unregister_netdev(netdev);
1771*4882a593Smuzhiyun
1772*4882a593Smuzhiyun free_netdev(netdev);
1773*4882a593Smuzhiyun dev_set_drvdata(&dev->dev, NULL);
1774*4882a593Smuzhiyun
1775*4882a593Smuzhiyun return 0;
1776*4882a593Smuzhiyun }
1777*4882a593Smuzhiyun
1778*4882a593Smuzhiyun static struct attribute veth_active_attr;
1779*4882a593Smuzhiyun static struct attribute veth_num_attr;
1780*4882a593Smuzhiyun static struct attribute veth_size_attr;
1781*4882a593Smuzhiyun
veth_pool_show(struct kobject * kobj,struct attribute * attr,char * buf)1782*4882a593Smuzhiyun static ssize_t veth_pool_show(struct kobject *kobj,
1783*4882a593Smuzhiyun struct attribute *attr, char *buf)
1784*4882a593Smuzhiyun {
1785*4882a593Smuzhiyun struct ibmveth_buff_pool *pool = container_of(kobj,
1786*4882a593Smuzhiyun struct ibmveth_buff_pool,
1787*4882a593Smuzhiyun kobj);
1788*4882a593Smuzhiyun
1789*4882a593Smuzhiyun if (attr == &veth_active_attr)
1790*4882a593Smuzhiyun return sprintf(buf, "%d\n", pool->active);
1791*4882a593Smuzhiyun else if (attr == &veth_num_attr)
1792*4882a593Smuzhiyun return sprintf(buf, "%d\n", pool->size);
1793*4882a593Smuzhiyun else if (attr == &veth_size_attr)
1794*4882a593Smuzhiyun return sprintf(buf, "%d\n", pool->buff_size);
1795*4882a593Smuzhiyun return 0;
1796*4882a593Smuzhiyun }
1797*4882a593Smuzhiyun
veth_pool_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)1798*4882a593Smuzhiyun static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
1799*4882a593Smuzhiyun const char *buf, size_t count)
1800*4882a593Smuzhiyun {
1801*4882a593Smuzhiyun struct ibmveth_buff_pool *pool = container_of(kobj,
1802*4882a593Smuzhiyun struct ibmveth_buff_pool,
1803*4882a593Smuzhiyun kobj);
1804*4882a593Smuzhiyun struct net_device *netdev = dev_get_drvdata(
1805*4882a593Smuzhiyun container_of(kobj->parent, struct device, kobj));
1806*4882a593Smuzhiyun struct ibmveth_adapter *adapter = netdev_priv(netdev);
1807*4882a593Smuzhiyun long value = simple_strtol(buf, NULL, 10);
1808*4882a593Smuzhiyun long rc;
1809*4882a593Smuzhiyun
1810*4882a593Smuzhiyun if (attr == &veth_active_attr) {
1811*4882a593Smuzhiyun if (value && !pool->active) {
1812*4882a593Smuzhiyun if (netif_running(netdev)) {
1813*4882a593Smuzhiyun if (ibmveth_alloc_buffer_pool(pool)) {
1814*4882a593Smuzhiyun netdev_err(netdev,
1815*4882a593Smuzhiyun "unable to alloc pool\n");
1816*4882a593Smuzhiyun return -ENOMEM;
1817*4882a593Smuzhiyun }
1818*4882a593Smuzhiyun pool->active = 1;
1819*4882a593Smuzhiyun adapter->pool_config = 1;
1820*4882a593Smuzhiyun ibmveth_close(netdev);
1821*4882a593Smuzhiyun adapter->pool_config = 0;
1822*4882a593Smuzhiyun if ((rc = ibmveth_open(netdev)))
1823*4882a593Smuzhiyun return rc;
1824*4882a593Smuzhiyun } else {
1825*4882a593Smuzhiyun pool->active = 1;
1826*4882a593Smuzhiyun }
1827*4882a593Smuzhiyun } else if (!value && pool->active) {
1828*4882a593Smuzhiyun int mtu = netdev->mtu + IBMVETH_BUFF_OH;
1829*4882a593Smuzhiyun int i;
1830*4882a593Smuzhiyun /* Make sure there is a buffer pool with buffers that
1831*4882a593Smuzhiyun can hold a packet of the size of the MTU */
1832*4882a593Smuzhiyun for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1833*4882a593Smuzhiyun if (pool == &adapter->rx_buff_pool[i])
1834*4882a593Smuzhiyun continue;
1835*4882a593Smuzhiyun if (!adapter->rx_buff_pool[i].active)
1836*4882a593Smuzhiyun continue;
1837*4882a593Smuzhiyun if (mtu <= adapter->rx_buff_pool[i].buff_size)
1838*4882a593Smuzhiyun break;
1839*4882a593Smuzhiyun }
1840*4882a593Smuzhiyun
1841*4882a593Smuzhiyun if (i == IBMVETH_NUM_BUFF_POOLS) {
1842*4882a593Smuzhiyun netdev_err(netdev, "no active pool >= MTU\n");
1843*4882a593Smuzhiyun return -EPERM;
1844*4882a593Smuzhiyun }
1845*4882a593Smuzhiyun
1846*4882a593Smuzhiyun if (netif_running(netdev)) {
1847*4882a593Smuzhiyun adapter->pool_config = 1;
1848*4882a593Smuzhiyun ibmveth_close(netdev);
1849*4882a593Smuzhiyun pool->active = 0;
1850*4882a593Smuzhiyun adapter->pool_config = 0;
1851*4882a593Smuzhiyun if ((rc = ibmveth_open(netdev)))
1852*4882a593Smuzhiyun return rc;
1853*4882a593Smuzhiyun }
1854*4882a593Smuzhiyun pool->active = 0;
1855*4882a593Smuzhiyun }
1856*4882a593Smuzhiyun } else if (attr == &veth_num_attr) {
1857*4882a593Smuzhiyun if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
1858*4882a593Smuzhiyun return -EINVAL;
1859*4882a593Smuzhiyun } else {
1860*4882a593Smuzhiyun if (netif_running(netdev)) {
1861*4882a593Smuzhiyun adapter->pool_config = 1;
1862*4882a593Smuzhiyun ibmveth_close(netdev);
1863*4882a593Smuzhiyun adapter->pool_config = 0;
1864*4882a593Smuzhiyun pool->size = value;
1865*4882a593Smuzhiyun if ((rc = ibmveth_open(netdev)))
1866*4882a593Smuzhiyun return rc;
1867*4882a593Smuzhiyun } else {
1868*4882a593Smuzhiyun pool->size = value;
1869*4882a593Smuzhiyun }
1870*4882a593Smuzhiyun }
1871*4882a593Smuzhiyun } else if (attr == &veth_size_attr) {
1872*4882a593Smuzhiyun if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
1873*4882a593Smuzhiyun return -EINVAL;
1874*4882a593Smuzhiyun } else {
1875*4882a593Smuzhiyun if (netif_running(netdev)) {
1876*4882a593Smuzhiyun adapter->pool_config = 1;
1877*4882a593Smuzhiyun ibmveth_close(netdev);
1878*4882a593Smuzhiyun adapter->pool_config = 0;
1879*4882a593Smuzhiyun pool->buff_size = value;
1880*4882a593Smuzhiyun if ((rc = ibmveth_open(netdev)))
1881*4882a593Smuzhiyun return rc;
1882*4882a593Smuzhiyun } else {
1883*4882a593Smuzhiyun pool->buff_size = value;
1884*4882a593Smuzhiyun }
1885*4882a593Smuzhiyun }
1886*4882a593Smuzhiyun }
1887*4882a593Smuzhiyun
1888*4882a593Smuzhiyun /* kick the interrupt handler to allocate/deallocate pools */
1889*4882a593Smuzhiyun ibmveth_interrupt(netdev->irq, netdev);
1890*4882a593Smuzhiyun return count;
1891*4882a593Smuzhiyun }
1892*4882a593Smuzhiyun
1893*4882a593Smuzhiyun
1894*4882a593Smuzhiyun #define ATTR(_name, _mode) \
1895*4882a593Smuzhiyun struct attribute veth_##_name##_attr = { \
1896*4882a593Smuzhiyun .name = __stringify(_name), .mode = _mode, \
1897*4882a593Smuzhiyun };
1898*4882a593Smuzhiyun
1899*4882a593Smuzhiyun static ATTR(active, 0644);
1900*4882a593Smuzhiyun static ATTR(num, 0644);
1901*4882a593Smuzhiyun static ATTR(size, 0644);
1902*4882a593Smuzhiyun
1903*4882a593Smuzhiyun static struct attribute *veth_pool_attrs[] = {
1904*4882a593Smuzhiyun &veth_active_attr,
1905*4882a593Smuzhiyun &veth_num_attr,
1906*4882a593Smuzhiyun &veth_size_attr,
1907*4882a593Smuzhiyun NULL,
1908*4882a593Smuzhiyun };
1909*4882a593Smuzhiyun
1910*4882a593Smuzhiyun static const struct sysfs_ops veth_pool_ops = {
1911*4882a593Smuzhiyun .show = veth_pool_show,
1912*4882a593Smuzhiyun .store = veth_pool_store,
1913*4882a593Smuzhiyun };
1914*4882a593Smuzhiyun
1915*4882a593Smuzhiyun static struct kobj_type ktype_veth_pool = {
1916*4882a593Smuzhiyun .release = NULL,
1917*4882a593Smuzhiyun .sysfs_ops = &veth_pool_ops,
1918*4882a593Smuzhiyun .default_attrs = veth_pool_attrs,
1919*4882a593Smuzhiyun };
1920*4882a593Smuzhiyun
ibmveth_resume(struct device * dev)1921*4882a593Smuzhiyun static int ibmveth_resume(struct device *dev)
1922*4882a593Smuzhiyun {
1923*4882a593Smuzhiyun struct net_device *netdev = dev_get_drvdata(dev);
1924*4882a593Smuzhiyun ibmveth_interrupt(netdev->irq, netdev);
1925*4882a593Smuzhiyun return 0;
1926*4882a593Smuzhiyun }
1927*4882a593Smuzhiyun
1928*4882a593Smuzhiyun static const struct vio_device_id ibmveth_device_table[] = {
1929*4882a593Smuzhiyun { "network", "IBM,l-lan"},
1930*4882a593Smuzhiyun { "", "" }
1931*4882a593Smuzhiyun };
1932*4882a593Smuzhiyun MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
1933*4882a593Smuzhiyun
1934*4882a593Smuzhiyun static const struct dev_pm_ops ibmveth_pm_ops = {
1935*4882a593Smuzhiyun .resume = ibmveth_resume
1936*4882a593Smuzhiyun };
1937*4882a593Smuzhiyun
1938*4882a593Smuzhiyun static struct vio_driver ibmveth_driver = {
1939*4882a593Smuzhiyun .id_table = ibmveth_device_table,
1940*4882a593Smuzhiyun .probe = ibmveth_probe,
1941*4882a593Smuzhiyun .remove = ibmveth_remove,
1942*4882a593Smuzhiyun .get_desired_dma = ibmveth_get_desired_dma,
1943*4882a593Smuzhiyun .name = ibmveth_driver_name,
1944*4882a593Smuzhiyun .pm = &ibmveth_pm_ops,
1945*4882a593Smuzhiyun };
1946*4882a593Smuzhiyun
ibmveth_module_init(void)1947*4882a593Smuzhiyun static int __init ibmveth_module_init(void)
1948*4882a593Smuzhiyun {
1949*4882a593Smuzhiyun printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name,
1950*4882a593Smuzhiyun ibmveth_driver_string, ibmveth_driver_version);
1951*4882a593Smuzhiyun
1952*4882a593Smuzhiyun return vio_register_driver(&ibmveth_driver);
1953*4882a593Smuzhiyun }
1954*4882a593Smuzhiyun
ibmveth_module_exit(void)1955*4882a593Smuzhiyun static void __exit ibmveth_module_exit(void)
1956*4882a593Smuzhiyun {
1957*4882a593Smuzhiyun vio_unregister_driver(&ibmveth_driver);
1958*4882a593Smuzhiyun }
1959*4882a593Smuzhiyun
1960*4882a593Smuzhiyun module_init(ibmveth_module_init);
1961*4882a593Smuzhiyun module_exit(ibmveth_module_exit);
1962