1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2002 Intersil Americas Inc.
4*4882a593Smuzhiyun * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/module.h>
8*4882a593Smuzhiyun #include <linux/gfp.h>
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/pci.h>
11*4882a593Smuzhiyun #include <linux/delay.h>
12*4882a593Smuzhiyun #include <linux/netdevice.h>
13*4882a593Smuzhiyun #include <linux/etherdevice.h>
14*4882a593Smuzhiyun #include <linux/if_arp.h>
15*4882a593Smuzhiyun #include <asm/byteorder.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include "prismcompat.h"
18*4882a593Smuzhiyun #include "isl_38xx.h"
19*4882a593Smuzhiyun #include "islpci_eth.h"
20*4882a593Smuzhiyun #include "islpci_mgt.h"
21*4882a593Smuzhiyun #include "oid_mgt.h"
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun /******************************************************************************
24*4882a593Smuzhiyun Network Interface functions
25*4882a593Smuzhiyun ******************************************************************************/
26*4882a593Smuzhiyun void
islpci_eth_cleanup_transmit(islpci_private * priv,isl38xx_control_block * control_block)27*4882a593Smuzhiyun islpci_eth_cleanup_transmit(islpci_private *priv,
28*4882a593Smuzhiyun isl38xx_control_block *control_block)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun struct sk_buff *skb;
31*4882a593Smuzhiyun u32 index;
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun /* compare the control block read pointer with the free pointer */
34*4882a593Smuzhiyun while (priv->free_data_tx !=
35*4882a593Smuzhiyun le32_to_cpu(control_block->
36*4882a593Smuzhiyun device_curr_frag[ISL38XX_CB_TX_DATA_LQ])) {
37*4882a593Smuzhiyun /* read the index of the first fragment to be freed */
38*4882a593Smuzhiyun index = priv->free_data_tx % ISL38XX_CB_TX_QSIZE;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun /* check for holes in the arrays caused by multi fragment frames
41*4882a593Smuzhiyun * searching for the last fragment of a frame */
42*4882a593Smuzhiyun if (priv->pci_map_tx_address[index]) {
43*4882a593Smuzhiyun /* entry is the last fragment of a frame
44*4882a593Smuzhiyun * free the skb structure and unmap pci memory */
45*4882a593Smuzhiyun skb = priv->data_low_tx[index];
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun #if VERBOSE > SHOW_ERROR_MESSAGES
48*4882a593Smuzhiyun DEBUG(SHOW_TRACING,
49*4882a593Smuzhiyun "cleanup skb %p skb->data %p skb->len %u truesize %u\n",
50*4882a593Smuzhiyun skb, skb->data, skb->len, skb->truesize);
51*4882a593Smuzhiyun #endif
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun dma_unmap_single(&priv->pdev->dev,
54*4882a593Smuzhiyun priv->pci_map_tx_address[index],
55*4882a593Smuzhiyun skb->len, DMA_TO_DEVICE);
56*4882a593Smuzhiyun dev_kfree_skb_irq(skb);
57*4882a593Smuzhiyun skb = NULL;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun /* increment the free data low queue pointer */
60*4882a593Smuzhiyun priv->free_data_tx++;
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun netdev_tx_t
islpci_eth_transmit(struct sk_buff * skb,struct net_device * ndev)65*4882a593Smuzhiyun islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun islpci_private *priv = netdev_priv(ndev);
68*4882a593Smuzhiyun isl38xx_control_block *cb = priv->control_block;
69*4882a593Smuzhiyun u32 index;
70*4882a593Smuzhiyun dma_addr_t pci_map_address;
71*4882a593Smuzhiyun int frame_size;
72*4882a593Smuzhiyun isl38xx_fragment *fragment;
73*4882a593Smuzhiyun int offset;
74*4882a593Smuzhiyun struct sk_buff *newskb;
75*4882a593Smuzhiyun int newskb_offset;
76*4882a593Smuzhiyun unsigned long flags;
77*4882a593Smuzhiyun unsigned char wds_mac[6];
78*4882a593Smuzhiyun u32 curr_frag;
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun #if VERBOSE > SHOW_ERROR_MESSAGES
81*4882a593Smuzhiyun DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_transmit\n");
82*4882a593Smuzhiyun #endif
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun /* lock the driver code */
85*4882a593Smuzhiyun spin_lock_irqsave(&priv->slock, flags);
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /* check whether the destination queue has enough fragments for the frame */
88*4882a593Smuzhiyun curr_frag = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_TX_DATA_LQ]);
89*4882a593Smuzhiyun if (unlikely(curr_frag - priv->free_data_tx >= ISL38XX_CB_TX_QSIZE)) {
90*4882a593Smuzhiyun printk(KERN_ERR "%s: transmit device queue full when awake\n",
91*4882a593Smuzhiyun ndev->name);
92*4882a593Smuzhiyun netif_stop_queue(ndev);
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun /* trigger the device */
95*4882a593Smuzhiyun isl38xx_w32_flush(priv->device_base, ISL38XX_DEV_INT_UPDATE,
96*4882a593Smuzhiyun ISL38XX_DEV_INT_REG);
97*4882a593Smuzhiyun udelay(ISL38XX_WRITEIO_DELAY);
98*4882a593Smuzhiyun goto drop_free;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun /* Check alignment and WDS frame formatting. The start of the packet should
101*4882a593Smuzhiyun * be aligned on a 4-byte boundary. If WDS is enabled add another 6 bytes
102*4882a593Smuzhiyun * and add WDS address information */
103*4882a593Smuzhiyun if (likely(((long) skb->data & 0x03) | init_wds)) {
104*4882a593Smuzhiyun /* get the number of bytes to add and re-align */
105*4882a593Smuzhiyun offset = (4 - (long) skb->data) & 0x03;
106*4882a593Smuzhiyun offset += init_wds ? 6 : 0;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun /* check whether the current skb can be used */
109*4882a593Smuzhiyun if (!skb_cloned(skb) && (skb_tailroom(skb) >= offset)) {
110*4882a593Smuzhiyun unsigned char *src = skb->data;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun #if VERBOSE > SHOW_ERROR_MESSAGES
113*4882a593Smuzhiyun DEBUG(SHOW_TRACING, "skb offset %i wds %i\n", offset,
114*4882a593Smuzhiyun init_wds);
115*4882a593Smuzhiyun #endif
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun /* align the buffer on 4-byte boundary */
118*4882a593Smuzhiyun skb_reserve(skb, (4 - (long) skb->data) & 0x03);
119*4882a593Smuzhiyun if (init_wds) {
120*4882a593Smuzhiyun /* wds requires an additional address field of 6 bytes */
121*4882a593Smuzhiyun skb_put(skb, 6);
122*4882a593Smuzhiyun #ifdef ISLPCI_ETH_DEBUG
123*4882a593Smuzhiyun printk("islpci_eth_transmit:wds_mac\n");
124*4882a593Smuzhiyun #endif
125*4882a593Smuzhiyun memmove(skb->data + 6, src, skb->len);
126*4882a593Smuzhiyun skb_copy_to_linear_data(skb, wds_mac, 6);
127*4882a593Smuzhiyun } else {
128*4882a593Smuzhiyun memmove(skb->data, src, skb->len);
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun #if VERBOSE > SHOW_ERROR_MESSAGES
132*4882a593Smuzhiyun DEBUG(SHOW_TRACING, "memmove %p %p %i\n", skb->data,
133*4882a593Smuzhiyun src, skb->len);
134*4882a593Smuzhiyun #endif
135*4882a593Smuzhiyun } else {
136*4882a593Smuzhiyun newskb =
137*4882a593Smuzhiyun dev_alloc_skb(init_wds ? skb->len + 6 : skb->len);
138*4882a593Smuzhiyun if (unlikely(newskb == NULL)) {
139*4882a593Smuzhiyun printk(KERN_ERR "%s: Cannot allocate skb\n",
140*4882a593Smuzhiyun ndev->name);
141*4882a593Smuzhiyun goto drop_free;
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun newskb_offset = (4 - (long) newskb->data) & 0x03;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun /* Check if newskb->data is aligned */
146*4882a593Smuzhiyun if (newskb_offset)
147*4882a593Smuzhiyun skb_reserve(newskb, newskb_offset);
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun skb_put(newskb, init_wds ? skb->len + 6 : skb->len);
150*4882a593Smuzhiyun if (init_wds) {
151*4882a593Smuzhiyun skb_copy_from_linear_data(skb,
152*4882a593Smuzhiyun newskb->data + 6,
153*4882a593Smuzhiyun skb->len);
154*4882a593Smuzhiyun skb_copy_to_linear_data(newskb, wds_mac, 6);
155*4882a593Smuzhiyun #ifdef ISLPCI_ETH_DEBUG
156*4882a593Smuzhiyun printk("islpci_eth_transmit:wds_mac\n");
157*4882a593Smuzhiyun #endif
158*4882a593Smuzhiyun } else
159*4882a593Smuzhiyun skb_copy_from_linear_data(skb, newskb->data,
160*4882a593Smuzhiyun skb->len);
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun #if VERBOSE > SHOW_ERROR_MESSAGES
163*4882a593Smuzhiyun DEBUG(SHOW_TRACING, "memcpy %p %p %i wds %i\n",
164*4882a593Smuzhiyun newskb->data, skb->data, skb->len, init_wds);
165*4882a593Smuzhiyun #endif
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun newskb->dev = skb->dev;
168*4882a593Smuzhiyun dev_kfree_skb_irq(skb);
169*4882a593Smuzhiyun skb = newskb;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun /* display the buffer contents for debugging */
173*4882a593Smuzhiyun #if VERBOSE > SHOW_ERROR_MESSAGES
174*4882a593Smuzhiyun DEBUG(SHOW_BUFFER_CONTENTS, "\ntx %p ", skb->data);
175*4882a593Smuzhiyun display_buffer((char *) skb->data, skb->len);
176*4882a593Smuzhiyun #endif
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun /* map the skb buffer to pci memory for DMA operation */
179*4882a593Smuzhiyun pci_map_address = dma_map_single(&priv->pdev->dev, (void *)skb->data,
180*4882a593Smuzhiyun skb->len, DMA_TO_DEVICE);
181*4882a593Smuzhiyun if (dma_mapping_error(&priv->pdev->dev, pci_map_address)) {
182*4882a593Smuzhiyun printk(KERN_WARNING "%s: cannot map buffer to PCI\n",
183*4882a593Smuzhiyun ndev->name);
184*4882a593Smuzhiyun goto drop_free;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun /* Place the fragment in the control block structure. */
187*4882a593Smuzhiyun index = curr_frag % ISL38XX_CB_TX_QSIZE;
188*4882a593Smuzhiyun fragment = &cb->tx_data_low[index];
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun priv->pci_map_tx_address[index] = pci_map_address;
191*4882a593Smuzhiyun /* store the skb address for future freeing */
192*4882a593Smuzhiyun priv->data_low_tx[index] = skb;
193*4882a593Smuzhiyun /* set the proper fragment start address and size information */
194*4882a593Smuzhiyun frame_size = skb->len;
195*4882a593Smuzhiyun fragment->size = cpu_to_le16(frame_size);
196*4882a593Smuzhiyun fragment->flags = cpu_to_le16(0); /* set to 1 if more fragments */
197*4882a593Smuzhiyun fragment->address = cpu_to_le32(pci_map_address);
198*4882a593Smuzhiyun curr_frag++;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun /* The fragment address in the control block must have been
201*4882a593Smuzhiyun * written before announcing the frame buffer to device. */
202*4882a593Smuzhiyun wmb();
203*4882a593Smuzhiyun cb->driver_curr_frag[ISL38XX_CB_TX_DATA_LQ] = cpu_to_le32(curr_frag);
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun if (curr_frag - priv->free_data_tx + ISL38XX_MIN_QTHRESHOLD
206*4882a593Smuzhiyun > ISL38XX_CB_TX_QSIZE) {
207*4882a593Smuzhiyun /* stop sends from upper layers */
208*4882a593Smuzhiyun netif_stop_queue(ndev);
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun /* set the full flag for the transmission queue */
211*4882a593Smuzhiyun priv->data_low_tx_full = 1;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun ndev->stats.tx_packets++;
215*4882a593Smuzhiyun ndev->stats.tx_bytes += skb->len;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun /* trigger the device */
218*4882a593Smuzhiyun islpci_trigger(priv);
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun /* unlock the driver code */
221*4882a593Smuzhiyun spin_unlock_irqrestore(&priv->slock, flags);
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun return NETDEV_TX_OK;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun drop_free:
226*4882a593Smuzhiyun ndev->stats.tx_dropped++;
227*4882a593Smuzhiyun spin_unlock_irqrestore(&priv->slock, flags);
228*4882a593Smuzhiyun dev_kfree_skb(skb);
229*4882a593Smuzhiyun return NETDEV_TX_OK;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun static inline int
islpci_monitor_rx(islpci_private * priv,struct sk_buff ** skb)233*4882a593Smuzhiyun islpci_monitor_rx(islpci_private *priv, struct sk_buff **skb)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun /* The card reports full 802.11 packets but with a 20 bytes
236*4882a593Smuzhiyun * header and without the FCS. But there a is a bit that
237*4882a593Smuzhiyun * indicates if the packet is corrupted :-) */
238*4882a593Smuzhiyun struct rfmon_header *hdr = (struct rfmon_header *) (*skb)->data;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun if (hdr->flags & 0x01)
241*4882a593Smuzhiyun /* This one is bad. Drop it ! */
242*4882a593Smuzhiyun return -1;
243*4882a593Smuzhiyun if (priv->ndev->type == ARPHRD_IEEE80211_PRISM) {
244*4882a593Smuzhiyun struct avs_80211_1_header *avs;
245*4882a593Smuzhiyun /* extract the relevant data from the header */
246*4882a593Smuzhiyun u32 clock = le32_to_cpu(hdr->clock);
247*4882a593Smuzhiyun u8 rate = hdr->rate;
248*4882a593Smuzhiyun u16 freq = le16_to_cpu(hdr->freq);
249*4882a593Smuzhiyun u8 rssi = hdr->rssi;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun skb_pull(*skb, sizeof (struct rfmon_header));
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun if (skb_headroom(*skb) < sizeof (struct avs_80211_1_header)) {
254*4882a593Smuzhiyun struct sk_buff *newskb = skb_copy_expand(*skb,
255*4882a593Smuzhiyun sizeof (struct
256*4882a593Smuzhiyun avs_80211_1_header),
257*4882a593Smuzhiyun 0, GFP_ATOMIC);
258*4882a593Smuzhiyun if (newskb) {
259*4882a593Smuzhiyun dev_kfree_skb_irq(*skb);
260*4882a593Smuzhiyun *skb = newskb;
261*4882a593Smuzhiyun } else
262*4882a593Smuzhiyun return -1;
263*4882a593Smuzhiyun /* This behavior is not very subtile... */
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun /* make room for the new header and fill it. */
267*4882a593Smuzhiyun avs = skb_push(*skb, sizeof(struct avs_80211_1_header));
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun avs->version = cpu_to_be32(P80211CAPTURE_VERSION);
270*4882a593Smuzhiyun avs->length = cpu_to_be32(sizeof (struct avs_80211_1_header));
271*4882a593Smuzhiyun avs->mactime = cpu_to_be64(clock);
272*4882a593Smuzhiyun avs->hosttime = cpu_to_be64(jiffies);
273*4882a593Smuzhiyun avs->phytype = cpu_to_be32(6); /*OFDM: 6 for (g), 8 for (a) */
274*4882a593Smuzhiyun avs->channel = cpu_to_be32(channel_of_freq(freq));
275*4882a593Smuzhiyun avs->datarate = cpu_to_be32(rate * 5);
276*4882a593Smuzhiyun avs->antenna = cpu_to_be32(0); /*unknown */
277*4882a593Smuzhiyun avs->priority = cpu_to_be32(0); /*unknown */
278*4882a593Smuzhiyun avs->ssi_type = cpu_to_be32(3); /*2: dBm, 3: raw RSSI */
279*4882a593Smuzhiyun avs->ssi_signal = cpu_to_be32(rssi & 0x7f);
280*4882a593Smuzhiyun avs->ssi_noise = cpu_to_be32(priv->local_iwstatistics.qual.noise); /*better than 'undefined', I assume */
281*4882a593Smuzhiyun avs->preamble = cpu_to_be32(0); /*unknown */
282*4882a593Smuzhiyun avs->encoding = cpu_to_be32(0); /*unknown */
283*4882a593Smuzhiyun } else
284*4882a593Smuzhiyun skb_pull(*skb, sizeof (struct rfmon_header));
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun (*skb)->protocol = htons(ETH_P_802_2);
287*4882a593Smuzhiyun skb_reset_mac_header(*skb);
288*4882a593Smuzhiyun (*skb)->pkt_type = PACKET_OTHERHOST;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun return 0;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun int
islpci_eth_receive(islpci_private * priv)294*4882a593Smuzhiyun islpci_eth_receive(islpci_private *priv)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun struct net_device *ndev = priv->ndev;
297*4882a593Smuzhiyun isl38xx_control_block *control_block = priv->control_block;
298*4882a593Smuzhiyun struct sk_buff *skb;
299*4882a593Smuzhiyun u16 size;
300*4882a593Smuzhiyun u32 index, offset;
301*4882a593Smuzhiyun unsigned char *src;
302*4882a593Smuzhiyun int discard = 0;
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun #if VERBOSE > SHOW_ERROR_MESSAGES
305*4882a593Smuzhiyun DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_receive\n");
306*4882a593Smuzhiyun #endif
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun /* the device has written an Ethernet frame in the data area
309*4882a593Smuzhiyun * of the sk_buff without updating the structure, do it now */
310*4882a593Smuzhiyun index = priv->free_data_rx % ISL38XX_CB_RX_QSIZE;
311*4882a593Smuzhiyun size = le16_to_cpu(control_block->rx_data_low[index].size);
312*4882a593Smuzhiyun skb = priv->data_low_rx[index];
313*4882a593Smuzhiyun offset = ((unsigned long)
314*4882a593Smuzhiyun le32_to_cpu(control_block->rx_data_low[index].address) -
315*4882a593Smuzhiyun (unsigned long) skb->data) & 3;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun #if VERBOSE > SHOW_ERROR_MESSAGES
318*4882a593Smuzhiyun DEBUG(SHOW_TRACING,
319*4882a593Smuzhiyun "frq->addr %x skb->data %p skb->len %u offset %u truesize %u\n",
320*4882a593Smuzhiyun control_block->rx_data_low[priv->free_data_rx].address, skb->data,
321*4882a593Smuzhiyun skb->len, offset, skb->truesize);
322*4882a593Smuzhiyun #endif
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun /* delete the streaming DMA mapping before processing the skb */
325*4882a593Smuzhiyun dma_unmap_single(&priv->pdev->dev, priv->pci_map_rx_address[index],
326*4882a593Smuzhiyun MAX_FRAGMENT_SIZE_RX + 2, DMA_FROM_DEVICE);
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun /* update the skb structure and align the buffer */
329*4882a593Smuzhiyun skb_put(skb, size);
330*4882a593Smuzhiyun if (offset) {
331*4882a593Smuzhiyun /* shift the buffer allocation offset bytes to get the right frame */
332*4882a593Smuzhiyun skb_pull(skb, 2);
333*4882a593Smuzhiyun skb_put(skb, 2);
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun #if VERBOSE > SHOW_ERROR_MESSAGES
336*4882a593Smuzhiyun /* display the buffer contents for debugging */
337*4882a593Smuzhiyun DEBUG(SHOW_BUFFER_CONTENTS, "\nrx %p ", skb->data);
338*4882a593Smuzhiyun display_buffer((char *) skb->data, skb->len);
339*4882a593Smuzhiyun #endif
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun /* check whether WDS is enabled and whether the data frame is a WDS frame */
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun if (init_wds) {
344*4882a593Smuzhiyun /* WDS enabled, check for the wds address on the first 6 bytes of the buffer */
345*4882a593Smuzhiyun src = skb->data + 6;
346*4882a593Smuzhiyun memmove(skb->data, src, skb->len - 6);
347*4882a593Smuzhiyun skb_trim(skb, skb->len - 6);
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun #if VERBOSE > SHOW_ERROR_MESSAGES
350*4882a593Smuzhiyun DEBUG(SHOW_TRACING, "Fragment size %i in skb at %p\n", size, skb);
351*4882a593Smuzhiyun DEBUG(SHOW_TRACING, "Skb data at %p, length %i\n", skb->data, skb->len);
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun /* display the buffer contents for debugging */
354*4882a593Smuzhiyun DEBUG(SHOW_BUFFER_CONTENTS, "\nrx %p ", skb->data);
355*4882a593Smuzhiyun display_buffer((char *) skb->data, skb->len);
356*4882a593Smuzhiyun #endif
357*4882a593Smuzhiyun /* take care of monitor mode and spy monitoring. */
358*4882a593Smuzhiyun if (unlikely(priv->iw_mode == IW_MODE_MONITOR)) {
359*4882a593Smuzhiyun skb->dev = ndev;
360*4882a593Smuzhiyun discard = islpci_monitor_rx(priv, &skb);
361*4882a593Smuzhiyun } else {
362*4882a593Smuzhiyun if (unlikely(skb->data[2 * ETH_ALEN] == 0)) {
363*4882a593Smuzhiyun /* The packet has a rx_annex. Read it for spy monitoring, Then
364*4882a593Smuzhiyun * remove it, while keeping the 2 leading MAC addr.
365*4882a593Smuzhiyun */
366*4882a593Smuzhiyun struct iw_quality wstats;
367*4882a593Smuzhiyun struct rx_annex_header *annex =
368*4882a593Smuzhiyun (struct rx_annex_header *) skb->data;
369*4882a593Smuzhiyun wstats.level = annex->rfmon.rssi;
370*4882a593Smuzhiyun /* The noise value can be a bit outdated if nobody's
371*4882a593Smuzhiyun * reading wireless stats... */
372*4882a593Smuzhiyun wstats.noise = priv->local_iwstatistics.qual.noise;
373*4882a593Smuzhiyun wstats.qual = wstats.level - wstats.noise;
374*4882a593Smuzhiyun wstats.updated = 0x07;
375*4882a593Smuzhiyun /* Update spy records */
376*4882a593Smuzhiyun wireless_spy_update(ndev, annex->addr2, &wstats);
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun skb_copy_from_linear_data(skb,
379*4882a593Smuzhiyun (skb->data +
380*4882a593Smuzhiyun sizeof(struct rfmon_header)),
381*4882a593Smuzhiyun 2 * ETH_ALEN);
382*4882a593Smuzhiyun skb_pull(skb, sizeof (struct rfmon_header));
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun skb->protocol = eth_type_trans(skb, ndev);
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun skb->ip_summed = CHECKSUM_NONE;
387*4882a593Smuzhiyun ndev->stats.rx_packets++;
388*4882a593Smuzhiyun ndev->stats.rx_bytes += size;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun /* deliver the skb to the network layer */
391*4882a593Smuzhiyun #ifdef ISLPCI_ETH_DEBUG
392*4882a593Smuzhiyun printk
393*4882a593Smuzhiyun ("islpci_eth_receive:netif_rx %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
394*4882a593Smuzhiyun skb->data[0], skb->data[1], skb->data[2], skb->data[3],
395*4882a593Smuzhiyun skb->data[4], skb->data[5]);
396*4882a593Smuzhiyun #endif
397*4882a593Smuzhiyun if (unlikely(discard)) {
398*4882a593Smuzhiyun dev_kfree_skb_irq(skb);
399*4882a593Smuzhiyun skb = NULL;
400*4882a593Smuzhiyun } else
401*4882a593Smuzhiyun netif_rx(skb);
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun /* increment the read index for the rx data low queue */
404*4882a593Smuzhiyun priv->free_data_rx++;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun /* add one or more sk_buff structures */
407*4882a593Smuzhiyun while (index =
408*4882a593Smuzhiyun le32_to_cpu(control_block->
409*4882a593Smuzhiyun driver_curr_frag[ISL38XX_CB_RX_DATA_LQ]),
410*4882a593Smuzhiyun index - priv->free_data_rx < ISL38XX_CB_RX_QSIZE) {
411*4882a593Smuzhiyun /* allocate an sk_buff for received data frames storage
412*4882a593Smuzhiyun * include any required allignment operations */
413*4882a593Smuzhiyun skb = dev_alloc_skb(MAX_FRAGMENT_SIZE_RX + 2);
414*4882a593Smuzhiyun if (unlikely(skb == NULL)) {
415*4882a593Smuzhiyun /* error allocating an sk_buff structure elements */
416*4882a593Smuzhiyun DEBUG(SHOW_ERROR_MESSAGES, "Error allocating skb\n");
417*4882a593Smuzhiyun break;
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun skb_reserve(skb, (4 - (long) skb->data) & 0x03);
420*4882a593Smuzhiyun /* store the new skb structure pointer */
421*4882a593Smuzhiyun index = index % ISL38XX_CB_RX_QSIZE;
422*4882a593Smuzhiyun priv->data_low_rx[index] = skb;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun #if VERBOSE > SHOW_ERROR_MESSAGES
425*4882a593Smuzhiyun DEBUG(SHOW_TRACING,
426*4882a593Smuzhiyun "new alloc skb %p skb->data %p skb->len %u index %u truesize %u\n",
427*4882a593Smuzhiyun skb, skb->data, skb->len, index, skb->truesize);
428*4882a593Smuzhiyun #endif
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun /* set the streaming DMA mapping for proper PCI bus operation */
431*4882a593Smuzhiyun priv->pci_map_rx_address[index] =
432*4882a593Smuzhiyun dma_map_single(&priv->pdev->dev, (void *)skb->data,
433*4882a593Smuzhiyun MAX_FRAGMENT_SIZE_RX + 2, DMA_FROM_DEVICE);
434*4882a593Smuzhiyun if (dma_mapping_error(&priv->pdev->dev, priv->pci_map_rx_address[index])) {
435*4882a593Smuzhiyun /* error mapping the buffer to device accessible memory address */
436*4882a593Smuzhiyun DEBUG(SHOW_ERROR_MESSAGES,
437*4882a593Smuzhiyun "Error mapping DMA address\n");
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun /* free the skbuf structure before aborting */
440*4882a593Smuzhiyun dev_kfree_skb_irq(skb);
441*4882a593Smuzhiyun skb = NULL;
442*4882a593Smuzhiyun break;
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun /* update the fragment address */
445*4882a593Smuzhiyun control_block->rx_data_low[index].address =
446*4882a593Smuzhiyun cpu_to_le32((u32)priv->pci_map_rx_address[index]);
447*4882a593Smuzhiyun wmb();
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun /* increment the driver read pointer */
450*4882a593Smuzhiyun le32_add_cpu(&control_block->
451*4882a593Smuzhiyun driver_curr_frag[ISL38XX_CB_RX_DATA_LQ], 1);
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun /* trigger the device */
455*4882a593Smuzhiyun islpci_trigger(priv);
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun return 0;
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun void
islpci_do_reset_and_wake(struct work_struct * work)461*4882a593Smuzhiyun islpci_do_reset_and_wake(struct work_struct *work)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun islpci_private *priv = container_of(work, islpci_private, reset_task);
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun islpci_reset(priv, 1);
466*4882a593Smuzhiyun priv->reset_task_pending = 0;
467*4882a593Smuzhiyun smp_wmb();
468*4882a593Smuzhiyun netif_wake_queue(priv->ndev);
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun void
islpci_eth_tx_timeout(struct net_device * ndev,unsigned int txqueue)472*4882a593Smuzhiyun islpci_eth_tx_timeout(struct net_device *ndev, unsigned int txqueue)
473*4882a593Smuzhiyun {
474*4882a593Smuzhiyun islpci_private *priv = netdev_priv(ndev);
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun /* increment the transmit error counter */
477*4882a593Smuzhiyun ndev->stats.tx_errors++;
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun if (!priv->reset_task_pending) {
480*4882a593Smuzhiyun printk(KERN_WARNING
481*4882a593Smuzhiyun "%s: tx_timeout, scheduling reset", ndev->name);
482*4882a593Smuzhiyun netif_stop_queue(ndev);
483*4882a593Smuzhiyun priv->reset_task_pending = 1;
484*4882a593Smuzhiyun schedule_work(&priv->reset_task);
485*4882a593Smuzhiyun } else {
486*4882a593Smuzhiyun printk(KERN_WARNING
487*4882a593Smuzhiyun "%s: tx_timeout, waiting for reset", ndev->name);
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun }
490