1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * This file is part of wl1251
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 1998-2007 Texas Instruments Incorporated
6*4882a593Smuzhiyun * Copyright (C) 2008 Nokia Corporation
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/kernel.h>
10*4882a593Smuzhiyun #include <linux/module.h>
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include "wl1251.h"
13*4882a593Smuzhiyun #include "reg.h"
14*4882a593Smuzhiyun #include "tx.h"
15*4882a593Smuzhiyun #include "ps.h"
16*4882a593Smuzhiyun #include "io.h"
17*4882a593Smuzhiyun #include "event.h"
18*4882a593Smuzhiyun
wl1251_tx_double_buffer_busy(struct wl1251 * wl,u32 data_out_count)19*4882a593Smuzhiyun static bool wl1251_tx_double_buffer_busy(struct wl1251 *wl, u32 data_out_count)
20*4882a593Smuzhiyun {
21*4882a593Smuzhiyun int used, data_in_count;
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun data_in_count = wl->data_in_count;
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun if (data_in_count < data_out_count)
26*4882a593Smuzhiyun /* data_in_count has wrapped */
27*4882a593Smuzhiyun data_in_count += TX_STATUS_DATA_OUT_COUNT_MASK + 1;
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun used = data_in_count - data_out_count;
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun WARN_ON(used < 0);
32*4882a593Smuzhiyun WARN_ON(used > DP_TX_PACKET_RING_CHUNK_NUM);
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun if (used >= DP_TX_PACKET_RING_CHUNK_NUM)
35*4882a593Smuzhiyun return true;
36*4882a593Smuzhiyun else
37*4882a593Smuzhiyun return false;
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun
wl1251_tx_path_status(struct wl1251 * wl)40*4882a593Smuzhiyun static int wl1251_tx_path_status(struct wl1251 *wl)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun u32 status, addr, data_out_count;
43*4882a593Smuzhiyun bool busy;
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun addr = wl->data_path->tx_control_addr;
46*4882a593Smuzhiyun status = wl1251_mem_read32(wl, addr);
47*4882a593Smuzhiyun data_out_count = status & TX_STATUS_DATA_OUT_COUNT_MASK;
48*4882a593Smuzhiyun busy = wl1251_tx_double_buffer_busy(wl, data_out_count);
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun if (busy)
51*4882a593Smuzhiyun return -EBUSY;
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun return 0;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun
wl1251_tx_id(struct wl1251 * wl,struct sk_buff * skb)56*4882a593Smuzhiyun static int wl1251_tx_id(struct wl1251 *wl, struct sk_buff *skb)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun int i;
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++)
61*4882a593Smuzhiyun if (wl->tx_frames[i] == NULL) {
62*4882a593Smuzhiyun wl->tx_frames[i] = skb;
63*4882a593Smuzhiyun return i;
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun return -EBUSY;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
wl1251_tx_control(struct tx_double_buffer_desc * tx_hdr,struct ieee80211_tx_info * control,u16 fc)69*4882a593Smuzhiyun static void wl1251_tx_control(struct tx_double_buffer_desc *tx_hdr,
70*4882a593Smuzhiyun struct ieee80211_tx_info *control, u16 fc)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun *(u16 *)&tx_hdr->control = 0;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun tx_hdr->control.rate_policy = 0;
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /* 802.11 packets */
77*4882a593Smuzhiyun tx_hdr->control.packet_type = 0;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun /* Also disable retry and ACK policy for injected packets */
80*4882a593Smuzhiyun if ((control->flags & IEEE80211_TX_CTL_NO_ACK) ||
81*4882a593Smuzhiyun (control->flags & IEEE80211_TX_CTL_INJECTED)) {
82*4882a593Smuzhiyun tx_hdr->control.rate_policy = 1;
83*4882a593Smuzhiyun tx_hdr->control.ack_policy = 1;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun tx_hdr->control.tx_complete = 1;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun if ((fc & IEEE80211_FTYPE_DATA) &&
89*4882a593Smuzhiyun ((fc & IEEE80211_STYPE_QOS_DATA) ||
90*4882a593Smuzhiyun (fc & IEEE80211_STYPE_QOS_NULLFUNC)))
91*4882a593Smuzhiyun tx_hdr->control.qos = 1;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun /* RSN + MIC = 8 + 8 = 16 bytes (worst case - AES). */
95*4882a593Smuzhiyun #define MAX_MSDU_SECURITY_LENGTH 16
96*4882a593Smuzhiyun #define MAX_MPDU_SECURITY_LENGTH 16
97*4882a593Smuzhiyun #define WLAN_QOS_HDR_LEN 26
98*4882a593Smuzhiyun #define MAX_MPDU_HEADER_AND_SECURITY (MAX_MPDU_SECURITY_LENGTH + \
99*4882a593Smuzhiyun WLAN_QOS_HDR_LEN)
100*4882a593Smuzhiyun #define HW_BLOCK_SIZE 252
wl1251_tx_frag_block_num(struct tx_double_buffer_desc * tx_hdr)101*4882a593Smuzhiyun static void wl1251_tx_frag_block_num(struct tx_double_buffer_desc *tx_hdr)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun u16 payload_len, frag_threshold, mem_blocks;
104*4882a593Smuzhiyun u16 num_mpdus, mem_blocks_per_frag;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD;
107*4882a593Smuzhiyun tx_hdr->frag_threshold = cpu_to_le16(frag_threshold);
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun payload_len = le16_to_cpu(tx_hdr->length) + MAX_MSDU_SECURITY_LENGTH;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun if (payload_len > frag_threshold) {
112*4882a593Smuzhiyun mem_blocks_per_frag =
113*4882a593Smuzhiyun ((frag_threshold + MAX_MPDU_HEADER_AND_SECURITY) /
114*4882a593Smuzhiyun HW_BLOCK_SIZE) + 1;
115*4882a593Smuzhiyun num_mpdus = payload_len / frag_threshold;
116*4882a593Smuzhiyun mem_blocks = num_mpdus * mem_blocks_per_frag;
117*4882a593Smuzhiyun payload_len -= num_mpdus * frag_threshold;
118*4882a593Smuzhiyun num_mpdus++;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun } else {
121*4882a593Smuzhiyun mem_blocks_per_frag = 0;
122*4882a593Smuzhiyun mem_blocks = 0;
123*4882a593Smuzhiyun num_mpdus = 1;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun mem_blocks += (payload_len / HW_BLOCK_SIZE) + 1;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun if (num_mpdus > 1)
129*4882a593Smuzhiyun mem_blocks += min(num_mpdus, mem_blocks_per_frag);
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun tx_hdr->num_mem_blocks = mem_blocks;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
wl1251_tx_fill_hdr(struct wl1251 * wl,struct sk_buff * skb,struct ieee80211_tx_info * control)134*4882a593Smuzhiyun static int wl1251_tx_fill_hdr(struct wl1251 *wl, struct sk_buff *skb,
135*4882a593Smuzhiyun struct ieee80211_tx_info *control)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun struct tx_double_buffer_desc *tx_hdr;
138*4882a593Smuzhiyun struct ieee80211_rate *rate;
139*4882a593Smuzhiyun int id;
140*4882a593Smuzhiyun u16 fc;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun if (!skb)
143*4882a593Smuzhiyun return -EINVAL;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun id = wl1251_tx_id(wl, skb);
146*4882a593Smuzhiyun if (id < 0)
147*4882a593Smuzhiyun return id;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun fc = *(u16 *)skb->data;
150*4882a593Smuzhiyun tx_hdr = skb_push(skb, sizeof(*tx_hdr));
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun tx_hdr->length = cpu_to_le16(skb->len - sizeof(*tx_hdr));
153*4882a593Smuzhiyun rate = ieee80211_get_tx_rate(wl->hw, control);
154*4882a593Smuzhiyun tx_hdr->rate = cpu_to_le16(rate->hw_value);
155*4882a593Smuzhiyun tx_hdr->expiry_time = cpu_to_le32(1 << 16);
156*4882a593Smuzhiyun tx_hdr->id = id;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun tx_hdr->xmit_queue = wl1251_tx_get_queue(skb_get_queue_mapping(skb));
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun wl1251_tx_control(tx_hdr, control, fc);
161*4882a593Smuzhiyun wl1251_tx_frag_block_num(tx_hdr);
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun return 0;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun /* We copy the packet to the target */
wl1251_tx_send_packet(struct wl1251 * wl,struct sk_buff * skb,struct ieee80211_tx_info * control)167*4882a593Smuzhiyun static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb,
168*4882a593Smuzhiyun struct ieee80211_tx_info *control)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun struct tx_double_buffer_desc *tx_hdr;
171*4882a593Smuzhiyun int len;
172*4882a593Smuzhiyun u32 addr;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun if (!skb)
175*4882a593Smuzhiyun return -EINVAL;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun tx_hdr = (struct tx_double_buffer_desc *) skb->data;
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun if (control->control.hw_key &&
180*4882a593Smuzhiyun control->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
181*4882a593Smuzhiyun int hdrlen;
182*4882a593Smuzhiyun __le16 fc;
183*4882a593Smuzhiyun u16 length;
184*4882a593Smuzhiyun u8 *pos;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun fc = *(__le16 *)(skb->data + sizeof(*tx_hdr));
187*4882a593Smuzhiyun length = le16_to_cpu(tx_hdr->length) + WL1251_TKIP_IV_SPACE;
188*4882a593Smuzhiyun tx_hdr->length = cpu_to_le16(length);
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun hdrlen = ieee80211_hdrlen(fc);
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun pos = skb_push(skb, WL1251_TKIP_IV_SPACE);
193*4882a593Smuzhiyun memmove(pos, pos + WL1251_TKIP_IV_SPACE,
194*4882a593Smuzhiyun sizeof(*tx_hdr) + hdrlen);
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun /* Revisit. This is a workaround for getting non-aligned packets.
198*4882a593Smuzhiyun This happens at least with EAPOL packets from the user space.
199*4882a593Smuzhiyun Our DMA requires packets to be aligned on a 4-byte boundary.
200*4882a593Smuzhiyun */
201*4882a593Smuzhiyun if (unlikely((long)skb->data & 0x03)) {
202*4882a593Smuzhiyun int offset = (4 - (long)skb->data) & 0x03;
203*4882a593Smuzhiyun wl1251_debug(DEBUG_TX, "skb offset %d", offset);
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun /* check whether the current skb can be used */
206*4882a593Smuzhiyun if (skb_cloned(skb) || (skb_tailroom(skb) < offset)) {
207*4882a593Smuzhiyun struct sk_buff *newskb = skb_copy_expand(skb, 0, 3,
208*4882a593Smuzhiyun GFP_KERNEL);
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun if (unlikely(newskb == NULL))
211*4882a593Smuzhiyun return -EINVAL;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun tx_hdr = (struct tx_double_buffer_desc *) newskb->data;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun dev_kfree_skb_any(skb);
216*4882a593Smuzhiyun wl->tx_frames[tx_hdr->id] = skb = newskb;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun offset = (4 - (long)skb->data) & 0x03;
219*4882a593Smuzhiyun wl1251_debug(DEBUG_TX, "new skb offset %d", offset);
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun /* align the buffer on a 4-byte boundary */
223*4882a593Smuzhiyun if (offset) {
224*4882a593Smuzhiyun unsigned char *src = skb->data;
225*4882a593Smuzhiyun skb_reserve(skb, offset);
226*4882a593Smuzhiyun memmove(skb->data, src, skb->len);
227*4882a593Smuzhiyun tx_hdr = (struct tx_double_buffer_desc *) skb->data;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun /* Our skb->data at this point includes the HW header */
232*4882a593Smuzhiyun len = WL1251_TX_ALIGN(skb->len);
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun if (wl->data_in_count & 0x1)
235*4882a593Smuzhiyun addr = wl->data_path->tx_packet_ring_addr +
236*4882a593Smuzhiyun wl->data_path->tx_packet_ring_chunk_size;
237*4882a593Smuzhiyun else
238*4882a593Smuzhiyun addr = wl->data_path->tx_packet_ring_addr;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun wl1251_mem_write(wl, addr, skb->data, len);
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun wl1251_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u rate 0x%x "
243*4882a593Smuzhiyun "queue %d", tx_hdr->id, skb, tx_hdr->length,
244*4882a593Smuzhiyun tx_hdr->rate, tx_hdr->xmit_queue);
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun return 0;
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun
wl1251_tx_trigger(struct wl1251 * wl)249*4882a593Smuzhiyun static void wl1251_tx_trigger(struct wl1251 *wl)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun u32 data, addr;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun if (wl->data_in_count & 0x1) {
254*4882a593Smuzhiyun addr = ACX_REG_INTERRUPT_TRIG_H;
255*4882a593Smuzhiyun data = INTR_TRIG_TX_PROC1;
256*4882a593Smuzhiyun } else {
257*4882a593Smuzhiyun addr = ACX_REG_INTERRUPT_TRIG;
258*4882a593Smuzhiyun data = INTR_TRIG_TX_PROC0;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun wl1251_reg_write32(wl, addr, data);
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun /* Bumping data in */
264*4882a593Smuzhiyun wl->data_in_count = (wl->data_in_count + 1) &
265*4882a593Smuzhiyun TX_STATUS_DATA_OUT_COUNT_MASK;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
enable_tx_for_packet_injection(struct wl1251 * wl)268*4882a593Smuzhiyun static void enable_tx_for_packet_injection(struct wl1251 *wl)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun int ret;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun ret = wl1251_cmd_join(wl, BSS_TYPE_STA_BSS, wl->channel,
273*4882a593Smuzhiyun wl->beacon_int, wl->dtim_period);
274*4882a593Smuzhiyun if (ret < 0) {
275*4882a593Smuzhiyun wl1251_warning("join failed");
276*4882a593Smuzhiyun return;
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun ret = wl1251_event_wait(wl, JOIN_EVENT_COMPLETE_ID, 100);
280*4882a593Smuzhiyun if (ret < 0) {
281*4882a593Smuzhiyun wl1251_warning("join timeout");
282*4882a593Smuzhiyun return;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun wl->joined = true;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun /* caller must hold wl->mutex */
wl1251_tx_frame(struct wl1251 * wl,struct sk_buff * skb)289*4882a593Smuzhiyun static int wl1251_tx_frame(struct wl1251 *wl, struct sk_buff *skb)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun struct ieee80211_tx_info *info;
292*4882a593Smuzhiyun int ret = 0;
293*4882a593Smuzhiyun u8 idx;
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun info = IEEE80211_SKB_CB(skb);
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun if (info->control.hw_key) {
298*4882a593Smuzhiyun if (unlikely(wl->monitor_present))
299*4882a593Smuzhiyun return -EINVAL;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun idx = info->control.hw_key->hw_key_idx;
302*4882a593Smuzhiyun if (unlikely(wl->default_key != idx)) {
303*4882a593Smuzhiyun ret = wl1251_acx_default_key(wl, idx);
304*4882a593Smuzhiyun if (ret < 0)
305*4882a593Smuzhiyun return ret;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun /* Enable tx path in monitor mode for packet injection */
310*4882a593Smuzhiyun if ((wl->vif == NULL) && !wl->joined)
311*4882a593Smuzhiyun enable_tx_for_packet_injection(wl);
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun ret = wl1251_tx_path_status(wl);
314*4882a593Smuzhiyun if (ret < 0)
315*4882a593Smuzhiyun return ret;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun ret = wl1251_tx_fill_hdr(wl, skb, info);
318*4882a593Smuzhiyun if (ret < 0)
319*4882a593Smuzhiyun return ret;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun ret = wl1251_tx_send_packet(wl, skb, info);
322*4882a593Smuzhiyun if (ret < 0)
323*4882a593Smuzhiyun return ret;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun wl1251_tx_trigger(wl);
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun return ret;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
wl1251_tx_work(struct work_struct * work)330*4882a593Smuzhiyun void wl1251_tx_work(struct work_struct *work)
331*4882a593Smuzhiyun {
332*4882a593Smuzhiyun struct wl1251 *wl = container_of(work, struct wl1251, tx_work);
333*4882a593Smuzhiyun struct sk_buff *skb;
334*4882a593Smuzhiyun bool woken_up = false;
335*4882a593Smuzhiyun int ret;
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun mutex_lock(&wl->mutex);
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun if (unlikely(wl->state == WL1251_STATE_OFF))
340*4882a593Smuzhiyun goto out;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun while ((skb = skb_dequeue(&wl->tx_queue))) {
343*4882a593Smuzhiyun if (!woken_up) {
344*4882a593Smuzhiyun ret = wl1251_ps_elp_wakeup(wl);
345*4882a593Smuzhiyun if (ret < 0)
346*4882a593Smuzhiyun goto out;
347*4882a593Smuzhiyun woken_up = true;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun ret = wl1251_tx_frame(wl, skb);
351*4882a593Smuzhiyun if (ret == -EBUSY) {
352*4882a593Smuzhiyun skb_queue_head(&wl->tx_queue, skb);
353*4882a593Smuzhiyun goto out;
354*4882a593Smuzhiyun } else if (ret < 0) {
355*4882a593Smuzhiyun dev_kfree_skb(skb);
356*4882a593Smuzhiyun goto out;
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun out:
361*4882a593Smuzhiyun if (woken_up)
362*4882a593Smuzhiyun wl1251_ps_elp_sleep(wl);
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun mutex_unlock(&wl->mutex);
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
wl1251_tx_parse_status(u8 status)367*4882a593Smuzhiyun static const char *wl1251_tx_parse_status(u8 status)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun /* 8 bit status field, one character per bit plus null */
370*4882a593Smuzhiyun static char buf[9];
371*4882a593Smuzhiyun int i = 0;
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun memset(buf, 0, sizeof(buf));
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun if (status & TX_DMA_ERROR)
376*4882a593Smuzhiyun buf[i++] = 'm';
377*4882a593Smuzhiyun if (status & TX_DISABLED)
378*4882a593Smuzhiyun buf[i++] = 'd';
379*4882a593Smuzhiyun if (status & TX_RETRY_EXCEEDED)
380*4882a593Smuzhiyun buf[i++] = 'r';
381*4882a593Smuzhiyun if (status & TX_TIMEOUT)
382*4882a593Smuzhiyun buf[i++] = 't';
383*4882a593Smuzhiyun if (status & TX_KEY_NOT_FOUND)
384*4882a593Smuzhiyun buf[i++] = 'k';
385*4882a593Smuzhiyun if (status & TX_ENCRYPT_FAIL)
386*4882a593Smuzhiyun buf[i++] = 'e';
387*4882a593Smuzhiyun if (status & TX_UNAVAILABLE_PRIORITY)
388*4882a593Smuzhiyun buf[i++] = 'p';
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun /* bit 0 is unused apparently */
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun return buf;
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
wl1251_tx_packet_cb(struct wl1251 * wl,struct tx_result * result)395*4882a593Smuzhiyun static void wl1251_tx_packet_cb(struct wl1251 *wl,
396*4882a593Smuzhiyun struct tx_result *result)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun struct ieee80211_tx_info *info;
399*4882a593Smuzhiyun struct sk_buff *skb;
400*4882a593Smuzhiyun int hdrlen;
401*4882a593Smuzhiyun u8 *frame;
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun skb = wl->tx_frames[result->id];
404*4882a593Smuzhiyun if (skb == NULL) {
405*4882a593Smuzhiyun wl1251_error("SKB for packet %d is NULL", result->id);
406*4882a593Smuzhiyun return;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun info = IEEE80211_SKB_CB(skb);
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
412*4882a593Smuzhiyun !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
413*4882a593Smuzhiyun (result->status == TX_SUCCESS))
414*4882a593Smuzhiyun info->flags |= IEEE80211_TX_STAT_ACK;
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun info->status.rates[0].count = result->ack_failures + 1;
417*4882a593Smuzhiyun wl->stats.retry_count += result->ack_failures;
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun /*
420*4882a593Smuzhiyun * We have to remove our private TX header before pushing
421*4882a593Smuzhiyun * the skb back to mac80211.
422*4882a593Smuzhiyun */
423*4882a593Smuzhiyun frame = skb_pull(skb, sizeof(struct tx_double_buffer_desc));
424*4882a593Smuzhiyun if (info->control.hw_key &&
425*4882a593Smuzhiyun info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
426*4882a593Smuzhiyun hdrlen = ieee80211_get_hdrlen_from_skb(skb);
427*4882a593Smuzhiyun memmove(frame + WL1251_TKIP_IV_SPACE, frame, hdrlen);
428*4882a593Smuzhiyun skb_pull(skb, WL1251_TKIP_IV_SPACE);
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun wl1251_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x"
432*4882a593Smuzhiyun " status 0x%x (%s)",
433*4882a593Smuzhiyun result->id, skb, result->ack_failures, result->rate,
434*4882a593Smuzhiyun result->status, wl1251_tx_parse_status(result->status));
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun ieee80211_tx_status(wl->hw, skb);
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun wl->tx_frames[result->id] = NULL;
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun /* Called upon reception of a TX complete interrupt */
wl1251_tx_complete(struct wl1251 * wl)443*4882a593Smuzhiyun void wl1251_tx_complete(struct wl1251 *wl)
444*4882a593Smuzhiyun {
445*4882a593Smuzhiyun int i, result_index, num_complete = 0, queue_len;
446*4882a593Smuzhiyun struct tx_result result[FW_TX_CMPLT_BLOCK_SIZE], *result_ptr;
447*4882a593Smuzhiyun unsigned long flags;
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun if (unlikely(wl->state != WL1251_STATE_ON))
450*4882a593Smuzhiyun return;
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun /* First we read the result */
453*4882a593Smuzhiyun wl1251_mem_read(wl, wl->data_path->tx_complete_addr,
454*4882a593Smuzhiyun result, sizeof(result));
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun result_index = wl->next_tx_complete;
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(result); i++) {
459*4882a593Smuzhiyun result_ptr = &result[result_index];
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun if (result_ptr->done_1 == 1 &&
462*4882a593Smuzhiyun result_ptr->done_2 == 1) {
463*4882a593Smuzhiyun wl1251_tx_packet_cb(wl, result_ptr);
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun result_ptr->done_1 = 0;
466*4882a593Smuzhiyun result_ptr->done_2 = 0;
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun result_index = (result_index + 1) &
469*4882a593Smuzhiyun (FW_TX_CMPLT_BLOCK_SIZE - 1);
470*4882a593Smuzhiyun num_complete++;
471*4882a593Smuzhiyun } else {
472*4882a593Smuzhiyun break;
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun queue_len = skb_queue_len(&wl->tx_queue);
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun if ((num_complete > 0) && (queue_len > 0)) {
479*4882a593Smuzhiyun /* firmware buffer has space, reschedule tx_work */
480*4882a593Smuzhiyun wl1251_debug(DEBUG_TX, "tx_complete: reschedule tx_work");
481*4882a593Smuzhiyun ieee80211_queue_work(wl->hw, &wl->tx_work);
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun if (wl->tx_queue_stopped &&
485*4882a593Smuzhiyun queue_len <= WL1251_TX_QUEUE_LOW_WATERMARK) {
486*4882a593Smuzhiyun /* tx_queue has space, restart queues */
487*4882a593Smuzhiyun wl1251_debug(DEBUG_TX, "tx_complete: waking queues");
488*4882a593Smuzhiyun spin_lock_irqsave(&wl->wl_lock, flags);
489*4882a593Smuzhiyun ieee80211_wake_queues(wl->hw);
490*4882a593Smuzhiyun wl->tx_queue_stopped = false;
491*4882a593Smuzhiyun spin_unlock_irqrestore(&wl->wl_lock, flags);
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun /* Every completed frame needs to be acknowledged */
495*4882a593Smuzhiyun if (num_complete) {
496*4882a593Smuzhiyun /*
497*4882a593Smuzhiyun * If we've wrapped, we have to clear
498*4882a593Smuzhiyun * the results in 2 steps.
499*4882a593Smuzhiyun */
500*4882a593Smuzhiyun if (result_index > wl->next_tx_complete) {
501*4882a593Smuzhiyun /* Only 1 write is needed */
502*4882a593Smuzhiyun wl1251_mem_write(wl,
503*4882a593Smuzhiyun wl->data_path->tx_complete_addr +
504*4882a593Smuzhiyun (wl->next_tx_complete *
505*4882a593Smuzhiyun sizeof(struct tx_result)),
506*4882a593Smuzhiyun &result[wl->next_tx_complete],
507*4882a593Smuzhiyun num_complete *
508*4882a593Smuzhiyun sizeof(struct tx_result));
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun } else if (result_index < wl->next_tx_complete) {
512*4882a593Smuzhiyun /* 2 writes are needed */
513*4882a593Smuzhiyun wl1251_mem_write(wl,
514*4882a593Smuzhiyun wl->data_path->tx_complete_addr +
515*4882a593Smuzhiyun (wl->next_tx_complete *
516*4882a593Smuzhiyun sizeof(struct tx_result)),
517*4882a593Smuzhiyun &result[wl->next_tx_complete],
518*4882a593Smuzhiyun (FW_TX_CMPLT_BLOCK_SIZE -
519*4882a593Smuzhiyun wl->next_tx_complete) *
520*4882a593Smuzhiyun sizeof(struct tx_result));
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun wl1251_mem_write(wl,
523*4882a593Smuzhiyun wl->data_path->tx_complete_addr,
524*4882a593Smuzhiyun result,
525*4882a593Smuzhiyun (num_complete -
526*4882a593Smuzhiyun FW_TX_CMPLT_BLOCK_SIZE +
527*4882a593Smuzhiyun wl->next_tx_complete) *
528*4882a593Smuzhiyun sizeof(struct tx_result));
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun } else {
531*4882a593Smuzhiyun /* We have to write the whole array */
532*4882a593Smuzhiyun wl1251_mem_write(wl,
533*4882a593Smuzhiyun wl->data_path->tx_complete_addr,
534*4882a593Smuzhiyun result,
535*4882a593Smuzhiyun FW_TX_CMPLT_BLOCK_SIZE *
536*4882a593Smuzhiyun sizeof(struct tx_result));
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun wl->next_tx_complete = result_index;
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun /* caller must hold wl->mutex */
wl1251_tx_flush(struct wl1251 * wl)545*4882a593Smuzhiyun void wl1251_tx_flush(struct wl1251 *wl)
546*4882a593Smuzhiyun {
547*4882a593Smuzhiyun int i;
548*4882a593Smuzhiyun struct sk_buff *skb;
549*4882a593Smuzhiyun struct ieee80211_tx_info *info;
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun /* TX failure */
552*4882a593Smuzhiyun /* control->flags = 0; FIXME */
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun while ((skb = skb_dequeue(&wl->tx_queue))) {
555*4882a593Smuzhiyun info = IEEE80211_SKB_CB(skb);
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun wl1251_debug(DEBUG_TX, "flushing skb 0x%p", skb);
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS))
560*4882a593Smuzhiyun continue;
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun ieee80211_tx_status(wl->hw, skb);
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++)
566*4882a593Smuzhiyun if (wl->tx_frames[i] != NULL) {
567*4882a593Smuzhiyun skb = wl->tx_frames[i];
568*4882a593Smuzhiyun info = IEEE80211_SKB_CB(skb);
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS))
571*4882a593Smuzhiyun continue;
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun ieee80211_tx_status(wl->hw, skb);
574*4882a593Smuzhiyun wl->tx_frames[i] = NULL;
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun }
577