1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
4*4882a593Smuzhiyun <http://rt2x00.serialmonkey.com>
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun /*
9*4882a593Smuzhiyun Module: rt2x00lib
10*4882a593Smuzhiyun Abstract: rt2x00 crypto specific routines.
11*4882a593Smuzhiyun */
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <linux/kernel.h>
14*4882a593Smuzhiyun #include <linux/module.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include "rt2x00.h"
17*4882a593Smuzhiyun #include "rt2x00lib.h"
18*4882a593Smuzhiyun
rt2x00crypto_key_to_cipher(struct ieee80211_key_conf * key)19*4882a593Smuzhiyun enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf *key)
20*4882a593Smuzhiyun {
21*4882a593Smuzhiyun switch (key->cipher) {
22*4882a593Smuzhiyun case WLAN_CIPHER_SUITE_WEP40:
23*4882a593Smuzhiyun return CIPHER_WEP64;
24*4882a593Smuzhiyun case WLAN_CIPHER_SUITE_WEP104:
25*4882a593Smuzhiyun return CIPHER_WEP128;
26*4882a593Smuzhiyun case WLAN_CIPHER_SUITE_TKIP:
27*4882a593Smuzhiyun return CIPHER_TKIP;
28*4882a593Smuzhiyun case WLAN_CIPHER_SUITE_CCMP:
29*4882a593Smuzhiyun return CIPHER_AES;
30*4882a593Smuzhiyun default:
31*4882a593Smuzhiyun return CIPHER_NONE;
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun
rt2x00crypto_create_tx_descriptor(struct rt2x00_dev * rt2x00dev,struct sk_buff * skb,struct txentry_desc * txdesc)35*4882a593Smuzhiyun void rt2x00crypto_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
36*4882a593Smuzhiyun struct sk_buff *skb,
37*4882a593Smuzhiyun struct txentry_desc *txdesc)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
40*4882a593Smuzhiyun struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun if (!rt2x00_has_cap_hw_crypto(rt2x00dev) || !hw_key)
43*4882a593Smuzhiyun return;
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun __set_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags);
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun txdesc->cipher = rt2x00crypto_key_to_cipher(hw_key);
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun if (hw_key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
50*4882a593Smuzhiyun __set_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags);
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun txdesc->key_idx = hw_key->hw_key_idx;
53*4882a593Smuzhiyun txdesc->iv_offset = txdesc->header_length;
54*4882a593Smuzhiyun txdesc->iv_len = hw_key->iv_len;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun if (!(hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV))
57*4882a593Smuzhiyun __set_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags);
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun if (!(hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_MMIC))
60*4882a593Smuzhiyun __set_bit(ENTRY_TXD_ENCRYPT_MMIC, &txdesc->flags);
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
rt2x00crypto_tx_overhead(struct rt2x00_dev * rt2x00dev,struct sk_buff * skb)63*4882a593Smuzhiyun unsigned int rt2x00crypto_tx_overhead(struct rt2x00_dev *rt2x00dev,
64*4882a593Smuzhiyun struct sk_buff *skb)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
67*4882a593Smuzhiyun struct ieee80211_key_conf *key = tx_info->control.hw_key;
68*4882a593Smuzhiyun unsigned int overhead = 0;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun if (!rt2x00_has_cap_hw_crypto(rt2x00dev) || !key)
71*4882a593Smuzhiyun return overhead;
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun /*
74*4882a593Smuzhiyun * Extend frame length to include IV/EIV/ICV/MMIC,
75*4882a593Smuzhiyun * note that these lengths should only be added when
76*4882a593Smuzhiyun * mac80211 does not generate it.
77*4882a593Smuzhiyun */
78*4882a593Smuzhiyun overhead += key->icv_len;
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun if (!(key->flags & IEEE80211_KEY_FLAG_GENERATE_IV))
81*4882a593Smuzhiyun overhead += key->iv_len;
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun if (!(key->flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) {
84*4882a593Smuzhiyun if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
85*4882a593Smuzhiyun overhead += 8;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun return overhead;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
rt2x00crypto_tx_copy_iv(struct sk_buff * skb,struct txentry_desc * txdesc)91*4882a593Smuzhiyun void rt2x00crypto_tx_copy_iv(struct sk_buff *skb, struct txentry_desc *txdesc)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun if (unlikely(!txdesc->iv_len))
96*4882a593Smuzhiyun return;
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun /* Copy IV/EIV data */
99*4882a593Smuzhiyun memcpy(skbdesc->iv, skb->data + txdesc->iv_offset, txdesc->iv_len);
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
rt2x00crypto_tx_remove_iv(struct sk_buff * skb,struct txentry_desc * txdesc)102*4882a593Smuzhiyun void rt2x00crypto_tx_remove_iv(struct sk_buff *skb, struct txentry_desc *txdesc)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun if (unlikely(!txdesc->iv_len))
107*4882a593Smuzhiyun return;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /* Copy IV/EIV data */
110*4882a593Smuzhiyun memcpy(skbdesc->iv, skb->data + txdesc->iv_offset, txdesc->iv_len);
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /* Move ieee80211 header */
113*4882a593Smuzhiyun memmove(skb->data + txdesc->iv_len, skb->data, txdesc->iv_offset);
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun /* Pull buffer to correct size */
116*4882a593Smuzhiyun skb_pull(skb, txdesc->iv_len);
117*4882a593Smuzhiyun txdesc->length -= txdesc->iv_len;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun /* IV/EIV data has officially been stripped */
120*4882a593Smuzhiyun skbdesc->flags |= SKBDESC_IV_STRIPPED;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
rt2x00crypto_tx_insert_iv(struct sk_buff * skb,unsigned int header_length)123*4882a593Smuzhiyun void rt2x00crypto_tx_insert_iv(struct sk_buff *skb, unsigned int header_length)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
126*4882a593Smuzhiyun const unsigned int iv_len =
127*4882a593Smuzhiyun ((!!(skbdesc->iv[0])) * 4) + ((!!(skbdesc->iv[1])) * 4);
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun if (!(skbdesc->flags & SKBDESC_IV_STRIPPED))
130*4882a593Smuzhiyun return;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun skb_push(skb, iv_len);
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun /* Move ieee80211 header */
135*4882a593Smuzhiyun memmove(skb->data, skb->data + iv_len, header_length);
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun /* Copy IV/EIV data */
138*4882a593Smuzhiyun memcpy(skb->data + header_length, skbdesc->iv, iv_len);
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun /* IV/EIV data has returned into the frame */
141*4882a593Smuzhiyun skbdesc->flags &= ~SKBDESC_IV_STRIPPED;
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
rt2x00crypto_rx_insert_iv(struct sk_buff * skb,unsigned int header_length,struct rxdone_entry_desc * rxdesc)144*4882a593Smuzhiyun void rt2x00crypto_rx_insert_iv(struct sk_buff *skb,
145*4882a593Smuzhiyun unsigned int header_length,
146*4882a593Smuzhiyun struct rxdone_entry_desc *rxdesc)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun unsigned int payload_len = rxdesc->size - header_length;
149*4882a593Smuzhiyun unsigned int align = ALIGN_SIZE(skb, header_length);
150*4882a593Smuzhiyun unsigned int iv_len;
151*4882a593Smuzhiyun unsigned int icv_len;
152*4882a593Smuzhiyun unsigned int transfer = 0;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun /*
155*4882a593Smuzhiyun * WEP64/WEP128: Provides IV & ICV
156*4882a593Smuzhiyun * TKIP: Provides IV/EIV & ICV
157*4882a593Smuzhiyun * AES: Provies IV/EIV & ICV
158*4882a593Smuzhiyun */
159*4882a593Smuzhiyun switch (rxdesc->cipher) {
160*4882a593Smuzhiyun case CIPHER_WEP64:
161*4882a593Smuzhiyun case CIPHER_WEP128:
162*4882a593Smuzhiyun iv_len = 4;
163*4882a593Smuzhiyun icv_len = 4;
164*4882a593Smuzhiyun break;
165*4882a593Smuzhiyun case CIPHER_TKIP:
166*4882a593Smuzhiyun iv_len = 8;
167*4882a593Smuzhiyun icv_len = 4;
168*4882a593Smuzhiyun break;
169*4882a593Smuzhiyun case CIPHER_AES:
170*4882a593Smuzhiyun iv_len = 8;
171*4882a593Smuzhiyun icv_len = 8;
172*4882a593Smuzhiyun break;
173*4882a593Smuzhiyun default:
174*4882a593Smuzhiyun /* Unsupport type */
175*4882a593Smuzhiyun return;
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun /*
179*4882a593Smuzhiyun * Make room for new data. There are 2 possibilities
180*4882a593Smuzhiyun * either the alignment is already present between
181*4882a593Smuzhiyun * the 802.11 header and payload. In that case we
182*4882a593Smuzhiyun * we have to move the header less then the iv_len
183*4882a593Smuzhiyun * since we can use the already available l2pad bytes
184*4882a593Smuzhiyun * for the iv data.
185*4882a593Smuzhiyun * When the alignment must be added manually we must
186*4882a593Smuzhiyun * move the header more then iv_len since we must
187*4882a593Smuzhiyun * make room for the payload move as well.
188*4882a593Smuzhiyun */
189*4882a593Smuzhiyun if (rxdesc->dev_flags & RXDONE_L2PAD) {
190*4882a593Smuzhiyun skb_push(skb, iv_len - align);
191*4882a593Smuzhiyun skb_put(skb, icv_len);
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun /* Move ieee80211 header */
194*4882a593Smuzhiyun memmove(skb->data + transfer,
195*4882a593Smuzhiyun skb->data + transfer + (iv_len - align),
196*4882a593Smuzhiyun header_length);
197*4882a593Smuzhiyun transfer += header_length;
198*4882a593Smuzhiyun } else {
199*4882a593Smuzhiyun skb_push(skb, iv_len + align);
200*4882a593Smuzhiyun if (align < icv_len)
201*4882a593Smuzhiyun skb_put(skb, icv_len - align);
202*4882a593Smuzhiyun else if (align > icv_len)
203*4882a593Smuzhiyun skb_trim(skb, rxdesc->size + iv_len + icv_len);
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun /* Move ieee80211 header */
206*4882a593Smuzhiyun memmove(skb->data + transfer,
207*4882a593Smuzhiyun skb->data + transfer + iv_len + align,
208*4882a593Smuzhiyun header_length);
209*4882a593Smuzhiyun transfer += header_length;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun /* Copy IV/EIV data */
213*4882a593Smuzhiyun memcpy(skb->data + transfer, rxdesc->iv, iv_len);
214*4882a593Smuzhiyun transfer += iv_len;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun /*
217*4882a593Smuzhiyun * Move payload for alignment purposes. Note that
218*4882a593Smuzhiyun * this is only needed when no l2 padding is present.
219*4882a593Smuzhiyun */
220*4882a593Smuzhiyun if (!(rxdesc->dev_flags & RXDONE_L2PAD)) {
221*4882a593Smuzhiyun memmove(skb->data + transfer,
222*4882a593Smuzhiyun skb->data + transfer + align,
223*4882a593Smuzhiyun payload_len);
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun /*
227*4882a593Smuzhiyun * NOTE: Always count the payload as transferred,
228*4882a593Smuzhiyun * even when alignment was set to zero. This is required
229*4882a593Smuzhiyun * for determining the correct offset for the ICV data.
230*4882a593Smuzhiyun */
231*4882a593Smuzhiyun transfer += payload_len;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun /*
234*4882a593Smuzhiyun * Copy ICV data
235*4882a593Smuzhiyun * AES appends 8 bytes, we can't fill the upper
236*4882a593Smuzhiyun * 4 bytes, but mac80211 doesn't care about what
237*4882a593Smuzhiyun * we provide here anyway and strips it immediately.
238*4882a593Smuzhiyun */
239*4882a593Smuzhiyun memcpy(skb->data + transfer, &rxdesc->icv, 4);
240*4882a593Smuzhiyun transfer += icv_len;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun /* IV/EIV/ICV has been inserted into frame */
243*4882a593Smuzhiyun rxdesc->size = transfer;
244*4882a593Smuzhiyun rxdesc->flags &= ~RX_FLAG_IV_STRIPPED;
245*4882a593Smuzhiyun }
246