1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /******************************************************************************
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved.
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun Contact Information:
8*4882a593Smuzhiyun Intel Linux Wireless <ilw@linux.intel.com>
9*4882a593Smuzhiyun Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun ******************************************************************************/
12*4882a593Smuzhiyun #include <linux/compiler.h>
13*4882a593Smuzhiyun #include <linux/errno.h>
14*4882a593Smuzhiyun #include <linux/if_arp.h>
15*4882a593Smuzhiyun #include <linux/in6.h>
16*4882a593Smuzhiyun #include <linux/in.h>
17*4882a593Smuzhiyun #include <linux/ip.h>
18*4882a593Smuzhiyun #include <linux/kernel.h>
19*4882a593Smuzhiyun #include <linux/module.h>
20*4882a593Smuzhiyun #include <linux/netdevice.h>
21*4882a593Smuzhiyun #include <linux/proc_fs.h>
22*4882a593Smuzhiyun #include <linux/skbuff.h>
23*4882a593Smuzhiyun #include <linux/slab.h>
24*4882a593Smuzhiyun #include <linux/tcp.h>
25*4882a593Smuzhiyun #include <linux/types.h>
26*4882a593Smuzhiyun #include <linux/wireless.h>
27*4882a593Smuzhiyun #include <linux/etherdevice.h>
28*4882a593Smuzhiyun #include <linux/uaccess.h>
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #include "libipw.h"
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun 802.11 Data Frame
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun ,-------------------------------------------------------------------.
37*4882a593Smuzhiyun Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 |
38*4882a593Smuzhiyun |------|------|---------|---------|---------|------|---------|------|
39*4882a593Smuzhiyun Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | Frame | fcs |
40*4882a593Smuzhiyun | | tion | (BSSID) | | | ence | data | |
41*4882a593Smuzhiyun `--------------------------------------------------| |------'
42*4882a593Smuzhiyun Total: 28 non-data bytes `----.----'
43*4882a593Smuzhiyun |
44*4882a593Smuzhiyun .- 'Frame data' expands, if WEP enabled, to <----------'
45*4882a593Smuzhiyun |
46*4882a593Smuzhiyun V
47*4882a593Smuzhiyun ,-----------------------.
48*4882a593Smuzhiyun Bytes | 4 | 0-2296 | 4 |
49*4882a593Smuzhiyun |-----|-----------|-----|
50*4882a593Smuzhiyun Desc. | IV | Encrypted | ICV |
51*4882a593Smuzhiyun | | Packet | |
52*4882a593Smuzhiyun `-----| |-----'
53*4882a593Smuzhiyun `-----.-----'
54*4882a593Smuzhiyun |
55*4882a593Smuzhiyun .- 'Encrypted Packet' expands to
56*4882a593Smuzhiyun |
57*4882a593Smuzhiyun V
58*4882a593Smuzhiyun ,---------------------------------------------------.
59*4882a593Smuzhiyun Bytes | 1 | 1 | 1 | 3 | 2 | 0-2304 |
60*4882a593Smuzhiyun |------|------|---------|----------|------|---------|
61*4882a593Smuzhiyun Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP |
62*4882a593Smuzhiyun | DSAP | SSAP | | | | Packet |
63*4882a593Smuzhiyun | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | |
64*4882a593Smuzhiyun `----------------------------------------------------
65*4882a593Smuzhiyun Total: 8 non-data bytes
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun 802.3 Ethernet Data Frame
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun ,-----------------------------------------.
70*4882a593Smuzhiyun Bytes | 6 | 6 | 2 | Variable | 4 |
71*4882a593Smuzhiyun |-------|-------|------|-----------|------|
72*4882a593Smuzhiyun Desc. | Dest. | Source| Type | IP Packet | fcs |
73*4882a593Smuzhiyun | MAC | MAC | | | |
74*4882a593Smuzhiyun `-----------------------------------------'
75*4882a593Smuzhiyun Total: 18 non-data bytes
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun In the event that fragmentation is required, the incoming payload is split into
78*4882a593Smuzhiyun N parts of size ieee->fts. The first fragment contains the SNAP header and the
79*4882a593Smuzhiyun remaining packets are just data.
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun If encryption is enabled, each fragment payload size is reduced by enough space
82*4882a593Smuzhiyun to add the prefix and postfix (IV and ICV totalling 8 bytes in the case of WEP)
83*4882a593Smuzhiyun So if you have 1500 bytes of payload with ieee->fts set to 500 without
84*4882a593Smuzhiyun encryption it will take 3 frames. With WEP it will take 4 frames as the
85*4882a593Smuzhiyun payload of each frame is reduced to 492 bytes.
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun * SKB visualization
88*4882a593Smuzhiyun *
89*4882a593Smuzhiyun * ,- skb->data
90*4882a593Smuzhiyun * |
91*4882a593Smuzhiyun * | ETHERNET HEADER ,-<-- PAYLOAD
92*4882a593Smuzhiyun * | | 14 bytes from skb->data
93*4882a593Smuzhiyun * | 2 bytes for Type --> ,T. | (sizeof ethhdr)
94*4882a593Smuzhiyun * | | | |
95*4882a593Smuzhiyun * |,-Dest.--. ,--Src.---. | | |
96*4882a593Smuzhiyun * | 6 bytes| | 6 bytes | | | |
97*4882a593Smuzhiyun * v | | | | | |
98*4882a593Smuzhiyun * 0 | v 1 | v | v 2
99*4882a593Smuzhiyun * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
100*4882a593Smuzhiyun * ^ | ^ | ^ |
101*4882a593Smuzhiyun * | | | | | |
102*4882a593Smuzhiyun * | | | | `T' <---- 2 bytes for Type
103*4882a593Smuzhiyun * | | | |
104*4882a593Smuzhiyun * | | '---SNAP--' <-------- 6 bytes for SNAP
105*4882a593Smuzhiyun * | |
106*4882a593Smuzhiyun * `-IV--' <-------------------- 4 bytes for IV (WEP)
107*4882a593Smuzhiyun *
108*4882a593Smuzhiyun * SNAP HEADER
109*4882a593Smuzhiyun *
110*4882a593Smuzhiyun */
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
113*4882a593Smuzhiyun static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
114*4882a593Smuzhiyun
libipw_copy_snap(u8 * data,__be16 h_proto)115*4882a593Smuzhiyun static int libipw_copy_snap(u8 * data, __be16 h_proto)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun struct libipw_snap_hdr *snap;
118*4882a593Smuzhiyun u8 *oui;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun snap = (struct libipw_snap_hdr *)data;
121*4882a593Smuzhiyun snap->dsap = 0xaa;
122*4882a593Smuzhiyun snap->ssap = 0xaa;
123*4882a593Smuzhiyun snap->ctrl = 0x03;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun if (h_proto == htons(ETH_P_AARP) || h_proto == htons(ETH_P_IPX))
126*4882a593Smuzhiyun oui = P802_1H_OUI;
127*4882a593Smuzhiyun else
128*4882a593Smuzhiyun oui = RFC1042_OUI;
129*4882a593Smuzhiyun snap->oui[0] = oui[0];
130*4882a593Smuzhiyun snap->oui[1] = oui[1];
131*4882a593Smuzhiyun snap->oui[2] = oui[2];
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun memcpy(data + SNAP_SIZE, &h_proto, sizeof(u16));
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun return SNAP_SIZE + sizeof(u16);
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
libipw_encrypt_fragment(struct libipw_device * ieee,struct sk_buff * frag,int hdr_len)138*4882a593Smuzhiyun static int libipw_encrypt_fragment(struct libipw_device *ieee,
139*4882a593Smuzhiyun struct sk_buff *frag, int hdr_len)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun struct lib80211_crypt_data *crypt =
142*4882a593Smuzhiyun ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
143*4882a593Smuzhiyun int res;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun if (crypt == NULL)
146*4882a593Smuzhiyun return -1;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun /* To encrypt, frame format is:
149*4882a593Smuzhiyun * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */
150*4882a593Smuzhiyun atomic_inc(&crypt->refcnt);
151*4882a593Smuzhiyun res = 0;
152*4882a593Smuzhiyun if (crypt->ops && crypt->ops->encrypt_mpdu)
153*4882a593Smuzhiyun res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun atomic_dec(&crypt->refcnt);
156*4882a593Smuzhiyun if (res < 0) {
157*4882a593Smuzhiyun printk(KERN_INFO "%s: Encryption failed: len=%d.\n",
158*4882a593Smuzhiyun ieee->dev->name, frag->len);
159*4882a593Smuzhiyun ieee->ieee_stats.tx_discards++;
160*4882a593Smuzhiyun return -1;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun return 0;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
libipw_txb_free(struct libipw_txb * txb)166*4882a593Smuzhiyun void libipw_txb_free(struct libipw_txb *txb)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun int i;
169*4882a593Smuzhiyun if (unlikely(!txb))
170*4882a593Smuzhiyun return;
171*4882a593Smuzhiyun for (i = 0; i < txb->nr_frags; i++)
172*4882a593Smuzhiyun if (txb->fragments[i])
173*4882a593Smuzhiyun dev_kfree_skb_any(txb->fragments[i]);
174*4882a593Smuzhiyun kfree(txb);
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
libipw_alloc_txb(int nr_frags,int txb_size,int headroom,gfp_t gfp_mask)177*4882a593Smuzhiyun static struct libipw_txb *libipw_alloc_txb(int nr_frags, int txb_size,
178*4882a593Smuzhiyun int headroom, gfp_t gfp_mask)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun struct libipw_txb *txb;
181*4882a593Smuzhiyun int i;
182*4882a593Smuzhiyun txb = kmalloc(sizeof(struct libipw_txb) + (sizeof(u8 *) * nr_frags),
183*4882a593Smuzhiyun gfp_mask);
184*4882a593Smuzhiyun if (!txb)
185*4882a593Smuzhiyun return NULL;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun memset(txb, 0, sizeof(struct libipw_txb));
188*4882a593Smuzhiyun txb->nr_frags = nr_frags;
189*4882a593Smuzhiyun txb->frag_size = txb_size;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun for (i = 0; i < nr_frags; i++) {
192*4882a593Smuzhiyun txb->fragments[i] = __dev_alloc_skb(txb_size + headroom,
193*4882a593Smuzhiyun gfp_mask);
194*4882a593Smuzhiyun if (unlikely(!txb->fragments[i])) {
195*4882a593Smuzhiyun i--;
196*4882a593Smuzhiyun break;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun skb_reserve(txb->fragments[i], headroom);
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun if (unlikely(i != nr_frags)) {
201*4882a593Smuzhiyun while (i >= 0)
202*4882a593Smuzhiyun dev_kfree_skb_any(txb->fragments[i--]);
203*4882a593Smuzhiyun kfree(txb);
204*4882a593Smuzhiyun return NULL;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun return txb;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
libipw_classify(struct sk_buff * skb)209*4882a593Smuzhiyun static int libipw_classify(struct sk_buff *skb)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun struct ethhdr *eth;
212*4882a593Smuzhiyun struct iphdr *ip;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun eth = (struct ethhdr *)skb->data;
215*4882a593Smuzhiyun if (eth->h_proto != htons(ETH_P_IP))
216*4882a593Smuzhiyun return 0;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun ip = ip_hdr(skb);
219*4882a593Smuzhiyun switch (ip->tos & 0xfc) {
220*4882a593Smuzhiyun case 0x20:
221*4882a593Smuzhiyun return 2;
222*4882a593Smuzhiyun case 0x40:
223*4882a593Smuzhiyun return 1;
224*4882a593Smuzhiyun case 0x60:
225*4882a593Smuzhiyun return 3;
226*4882a593Smuzhiyun case 0x80:
227*4882a593Smuzhiyun return 4;
228*4882a593Smuzhiyun case 0xa0:
229*4882a593Smuzhiyun return 5;
230*4882a593Smuzhiyun case 0xc0:
231*4882a593Smuzhiyun return 6;
232*4882a593Smuzhiyun case 0xe0:
233*4882a593Smuzhiyun return 7;
234*4882a593Smuzhiyun default:
235*4882a593Smuzhiyun return 0;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun /* Incoming skb is converted to a txb which consists of
240*4882a593Smuzhiyun * a block of 802.11 fragment packets (stored as skbs) */
libipw_xmit(struct sk_buff * skb,struct net_device * dev)241*4882a593Smuzhiyun netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun struct libipw_device *ieee = netdev_priv(dev);
244*4882a593Smuzhiyun struct libipw_txb *txb = NULL;
245*4882a593Smuzhiyun struct libipw_hdr_3addrqos *frag_hdr;
246*4882a593Smuzhiyun int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size,
247*4882a593Smuzhiyun rts_required;
248*4882a593Smuzhiyun unsigned long flags;
249*4882a593Smuzhiyun int encrypt, host_encrypt, host_encrypt_msdu;
250*4882a593Smuzhiyun __be16 ether_type;
251*4882a593Smuzhiyun int bytes, fc, hdr_len;
252*4882a593Smuzhiyun struct sk_buff *skb_frag;
253*4882a593Smuzhiyun struct libipw_hdr_3addrqos header = {/* Ensure zero initialized */
254*4882a593Smuzhiyun .duration_id = 0,
255*4882a593Smuzhiyun .seq_ctl = 0,
256*4882a593Smuzhiyun .qos_ctl = 0
257*4882a593Smuzhiyun };
258*4882a593Smuzhiyun u8 dest[ETH_ALEN], src[ETH_ALEN];
259*4882a593Smuzhiyun struct lib80211_crypt_data *crypt;
260*4882a593Smuzhiyun int priority = skb->priority;
261*4882a593Smuzhiyun int snapped = 0;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun if (ieee->is_queue_full && (*ieee->is_queue_full) (dev, priority))
264*4882a593Smuzhiyun return NETDEV_TX_BUSY;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun spin_lock_irqsave(&ieee->lock, flags);
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun /* If there is no driver handler to take the TXB, dont' bother
269*4882a593Smuzhiyun * creating it... */
270*4882a593Smuzhiyun if (!ieee->hard_start_xmit) {
271*4882a593Smuzhiyun printk(KERN_WARNING "%s: No xmit handler.\n", ieee->dev->name);
272*4882a593Smuzhiyun goto success;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
276*4882a593Smuzhiyun printk(KERN_WARNING "%s: skb too small (%d).\n",
277*4882a593Smuzhiyun ieee->dev->name, skb->len);
278*4882a593Smuzhiyun goto success;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun ether_type = ((struct ethhdr *)skb->data)->h_proto;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun encrypt = !(ether_type == htons(ETH_P_PAE) && ieee->ieee802_1x) &&
286*4882a593Smuzhiyun ieee->sec.encrypt;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun host_encrypt = ieee->host_encrypt && encrypt && crypt;
289*4882a593Smuzhiyun host_encrypt_msdu = ieee->host_encrypt_msdu && encrypt && crypt;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun if (!encrypt && ieee->ieee802_1x &&
292*4882a593Smuzhiyun ieee->drop_unencrypted && ether_type != htons(ETH_P_PAE)) {
293*4882a593Smuzhiyun dev->stats.tx_dropped++;
294*4882a593Smuzhiyun goto success;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun /* Save source and destination addresses */
298*4882a593Smuzhiyun skb_copy_from_linear_data(skb, dest, ETH_ALEN);
299*4882a593Smuzhiyun skb_copy_from_linear_data_offset(skb, ETH_ALEN, src, ETH_ALEN);
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun if (host_encrypt)
302*4882a593Smuzhiyun fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
303*4882a593Smuzhiyun IEEE80211_FCTL_PROTECTED;
304*4882a593Smuzhiyun else
305*4882a593Smuzhiyun fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun if (ieee->iw_mode == IW_MODE_INFRA) {
308*4882a593Smuzhiyun fc |= IEEE80211_FCTL_TODS;
309*4882a593Smuzhiyun /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */
310*4882a593Smuzhiyun memcpy(header.addr1, ieee->bssid, ETH_ALEN);
311*4882a593Smuzhiyun memcpy(header.addr2, src, ETH_ALEN);
312*4882a593Smuzhiyun memcpy(header.addr3, dest, ETH_ALEN);
313*4882a593Smuzhiyun } else if (ieee->iw_mode == IW_MODE_ADHOC) {
314*4882a593Smuzhiyun /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */
315*4882a593Smuzhiyun memcpy(header.addr1, dest, ETH_ALEN);
316*4882a593Smuzhiyun memcpy(header.addr2, src, ETH_ALEN);
317*4882a593Smuzhiyun memcpy(header.addr3, ieee->bssid, ETH_ALEN);
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun hdr_len = LIBIPW_3ADDR_LEN;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun if (ieee->is_qos_active && ieee->is_qos_active(dev, skb)) {
322*4882a593Smuzhiyun fc |= IEEE80211_STYPE_QOS_DATA;
323*4882a593Smuzhiyun hdr_len += 2;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun skb->priority = libipw_classify(skb);
326*4882a593Smuzhiyun header.qos_ctl |= cpu_to_le16(skb->priority & LIBIPW_QCTL_TID);
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun header.frame_ctl = cpu_to_le16(fc);
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun /* Advance the SKB to the start of the payload */
331*4882a593Smuzhiyun skb_pull(skb, sizeof(struct ethhdr));
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun /* Determine total amount of storage required for TXB packets */
334*4882a593Smuzhiyun bytes = skb->len + SNAP_SIZE + sizeof(u16);
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun /* Encrypt msdu first on the whole data packet. */
337*4882a593Smuzhiyun if ((host_encrypt || host_encrypt_msdu) &&
338*4882a593Smuzhiyun crypt && crypt->ops && crypt->ops->encrypt_msdu) {
339*4882a593Smuzhiyun int res = 0;
340*4882a593Smuzhiyun int len = bytes + hdr_len + crypt->ops->extra_msdu_prefix_len +
341*4882a593Smuzhiyun crypt->ops->extra_msdu_postfix_len;
342*4882a593Smuzhiyun struct sk_buff *skb_new = dev_alloc_skb(len);
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun if (unlikely(!skb_new))
345*4882a593Smuzhiyun goto failed;
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun skb_reserve(skb_new, crypt->ops->extra_msdu_prefix_len);
348*4882a593Smuzhiyun skb_put_data(skb_new, &header, hdr_len);
349*4882a593Smuzhiyun snapped = 1;
350*4882a593Smuzhiyun libipw_copy_snap(skb_put(skb_new, SNAP_SIZE + sizeof(u16)),
351*4882a593Smuzhiyun ether_type);
352*4882a593Smuzhiyun skb_copy_from_linear_data(skb, skb_put(skb_new, skb->len), skb->len);
353*4882a593Smuzhiyun res = crypt->ops->encrypt_msdu(skb_new, hdr_len, crypt->priv);
354*4882a593Smuzhiyun if (res < 0) {
355*4882a593Smuzhiyun LIBIPW_ERROR("msdu encryption failed\n");
356*4882a593Smuzhiyun dev_kfree_skb_any(skb_new);
357*4882a593Smuzhiyun goto failed;
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun dev_kfree_skb_any(skb);
360*4882a593Smuzhiyun skb = skb_new;
361*4882a593Smuzhiyun bytes += crypt->ops->extra_msdu_prefix_len +
362*4882a593Smuzhiyun crypt->ops->extra_msdu_postfix_len;
363*4882a593Smuzhiyun skb_pull(skb, hdr_len);
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun if (host_encrypt || ieee->host_open_frag) {
367*4882a593Smuzhiyun /* Determine fragmentation size based on destination (multicast
368*4882a593Smuzhiyun * and broadcast are not fragmented) */
369*4882a593Smuzhiyun if (is_multicast_ether_addr(dest) ||
370*4882a593Smuzhiyun is_broadcast_ether_addr(dest))
371*4882a593Smuzhiyun frag_size = MAX_FRAG_THRESHOLD;
372*4882a593Smuzhiyun else
373*4882a593Smuzhiyun frag_size = ieee->fts;
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun /* Determine amount of payload per fragment. Regardless of if
376*4882a593Smuzhiyun * this stack is providing the full 802.11 header, one will
377*4882a593Smuzhiyun * eventually be affixed to this fragment -- so we must account
378*4882a593Smuzhiyun * for it when determining the amount of payload space. */
379*4882a593Smuzhiyun bytes_per_frag = frag_size - hdr_len;
380*4882a593Smuzhiyun if (ieee->config &
381*4882a593Smuzhiyun (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS))
382*4882a593Smuzhiyun bytes_per_frag -= LIBIPW_FCS_LEN;
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun /* Each fragment may need to have room for encryption
385*4882a593Smuzhiyun * pre/postfix */
386*4882a593Smuzhiyun if (host_encrypt && crypt && crypt->ops)
387*4882a593Smuzhiyun bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
388*4882a593Smuzhiyun crypt->ops->extra_mpdu_postfix_len;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun /* Number of fragments is the total
391*4882a593Smuzhiyun * bytes_per_frag / payload_per_fragment */
392*4882a593Smuzhiyun nr_frags = bytes / bytes_per_frag;
393*4882a593Smuzhiyun bytes_last_frag = bytes % bytes_per_frag;
394*4882a593Smuzhiyun if (bytes_last_frag)
395*4882a593Smuzhiyun nr_frags++;
396*4882a593Smuzhiyun else
397*4882a593Smuzhiyun bytes_last_frag = bytes_per_frag;
398*4882a593Smuzhiyun } else {
399*4882a593Smuzhiyun nr_frags = 1;
400*4882a593Smuzhiyun bytes_per_frag = bytes_last_frag = bytes;
401*4882a593Smuzhiyun frag_size = bytes + hdr_len;
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun rts_required = (frag_size > ieee->rts
405*4882a593Smuzhiyun && ieee->config & CFG_LIBIPW_RTS);
406*4882a593Smuzhiyun if (rts_required)
407*4882a593Smuzhiyun nr_frags++;
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun /* When we allocate the TXB we allocate enough space for the reserve
410*4882a593Smuzhiyun * and full fragment bytes (bytes_per_frag doesn't include prefix,
411*4882a593Smuzhiyun * postfix, header, FCS, etc.) */
412*4882a593Smuzhiyun txb = libipw_alloc_txb(nr_frags, frag_size,
413*4882a593Smuzhiyun ieee->tx_headroom, GFP_ATOMIC);
414*4882a593Smuzhiyun if (unlikely(!txb)) {
415*4882a593Smuzhiyun printk(KERN_WARNING "%s: Could not allocate TXB\n",
416*4882a593Smuzhiyun ieee->dev->name);
417*4882a593Smuzhiyun goto failed;
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun txb->encrypted = encrypt;
420*4882a593Smuzhiyun if (host_encrypt)
421*4882a593Smuzhiyun txb->payload_size = frag_size * (nr_frags - 1) +
422*4882a593Smuzhiyun bytes_last_frag;
423*4882a593Smuzhiyun else
424*4882a593Smuzhiyun txb->payload_size = bytes;
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun if (rts_required) {
427*4882a593Smuzhiyun skb_frag = txb->fragments[0];
428*4882a593Smuzhiyun frag_hdr = skb_put(skb_frag, hdr_len);
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun /*
431*4882a593Smuzhiyun * Set header frame_ctl to the RTS.
432*4882a593Smuzhiyun */
433*4882a593Smuzhiyun header.frame_ctl =
434*4882a593Smuzhiyun cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
435*4882a593Smuzhiyun memcpy(frag_hdr, &header, hdr_len);
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun /*
438*4882a593Smuzhiyun * Restore header frame_ctl to the original data setting.
439*4882a593Smuzhiyun */
440*4882a593Smuzhiyun header.frame_ctl = cpu_to_le16(fc);
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun if (ieee->config &
443*4882a593Smuzhiyun (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS))
444*4882a593Smuzhiyun skb_put(skb_frag, 4);
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun txb->rts_included = 1;
447*4882a593Smuzhiyun i = 1;
448*4882a593Smuzhiyun } else
449*4882a593Smuzhiyun i = 0;
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun for (; i < nr_frags; i++) {
452*4882a593Smuzhiyun skb_frag = txb->fragments[i];
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun if (host_encrypt)
455*4882a593Smuzhiyun skb_reserve(skb_frag,
456*4882a593Smuzhiyun crypt->ops->extra_mpdu_prefix_len);
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun frag_hdr = skb_put_data(skb_frag, &header, hdr_len);
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun /* If this is not the last fragment, then add the MOREFRAGS
461*4882a593Smuzhiyun * bit to the frame control */
462*4882a593Smuzhiyun if (i != nr_frags - 1) {
463*4882a593Smuzhiyun frag_hdr->frame_ctl =
464*4882a593Smuzhiyun cpu_to_le16(fc | IEEE80211_FCTL_MOREFRAGS);
465*4882a593Smuzhiyun bytes = bytes_per_frag;
466*4882a593Smuzhiyun } else {
467*4882a593Smuzhiyun /* The last fragment takes the remaining length */
468*4882a593Smuzhiyun bytes = bytes_last_frag;
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun if (i == 0 && !snapped) {
472*4882a593Smuzhiyun libipw_copy_snap(skb_put
473*4882a593Smuzhiyun (skb_frag, SNAP_SIZE + sizeof(u16)),
474*4882a593Smuzhiyun ether_type);
475*4882a593Smuzhiyun bytes -= SNAP_SIZE + sizeof(u16);
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun skb_copy_from_linear_data(skb, skb_put(skb_frag, bytes), bytes);
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun /* Advance the SKB... */
481*4882a593Smuzhiyun skb_pull(skb, bytes);
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun /* Encryption routine will move the header forward in order
484*4882a593Smuzhiyun * to insert the IV between the header and the payload */
485*4882a593Smuzhiyun if (host_encrypt)
486*4882a593Smuzhiyun libipw_encrypt_fragment(ieee, skb_frag, hdr_len);
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun if (ieee->config &
489*4882a593Smuzhiyun (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS))
490*4882a593Smuzhiyun skb_put(skb_frag, 4);
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun success:
494*4882a593Smuzhiyun spin_unlock_irqrestore(&ieee->lock, flags);
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun dev_kfree_skb_any(skb);
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun if (txb) {
499*4882a593Smuzhiyun netdev_tx_t ret = (*ieee->hard_start_xmit)(txb, dev, priority);
500*4882a593Smuzhiyun if (ret == NETDEV_TX_OK) {
501*4882a593Smuzhiyun dev->stats.tx_packets++;
502*4882a593Smuzhiyun dev->stats.tx_bytes += txb->payload_size;
503*4882a593Smuzhiyun return NETDEV_TX_OK;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun libipw_txb_free(txb);
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun return NETDEV_TX_OK;
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun failed:
512*4882a593Smuzhiyun spin_unlock_irqrestore(&ieee->lock, flags);
513*4882a593Smuzhiyun netif_stop_queue(dev);
514*4882a593Smuzhiyun dev->stats.tx_errors++;
515*4882a593Smuzhiyun return NETDEV_TX_BUSY;
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun EXPORT_SYMBOL(libipw_xmit);
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun EXPORT_SYMBOL(libipw_txb_free);
520