1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
4*4882a593Smuzhiyun Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
5*4882a593Smuzhiyun <http://rt2x00.serialmonkey.com>
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun /*
10*4882a593Smuzhiyun Module: rt2x00lib
11*4882a593Smuzhiyun Abstract: rt2x00 generic device routines.
12*4882a593Smuzhiyun */
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #include <linux/kernel.h>
15*4882a593Smuzhiyun #include <linux/module.h>
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun #include <linux/log2.h>
18*4882a593Smuzhiyun #include <linux/of.h>
19*4882a593Smuzhiyun #include <linux/of_net.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #include "rt2x00.h"
22*4882a593Smuzhiyun #include "rt2x00lib.h"
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun /*
25*4882a593Smuzhiyun * Utility functions.
26*4882a593Smuzhiyun */
rt2x00lib_get_bssidx(struct rt2x00_dev * rt2x00dev,struct ieee80211_vif * vif)27*4882a593Smuzhiyun u32 rt2x00lib_get_bssidx(struct rt2x00_dev *rt2x00dev,
28*4882a593Smuzhiyun struct ieee80211_vif *vif)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun /*
31*4882a593Smuzhiyun * When in STA mode, bssidx is always 0 otherwise local_address[5]
32*4882a593Smuzhiyun * contains the bss number, see BSS_ID_MASK comments for details.
33*4882a593Smuzhiyun */
34*4882a593Smuzhiyun if (rt2x00dev->intf_sta_count)
35*4882a593Smuzhiyun return 0;
36*4882a593Smuzhiyun return vif->addr[5] & (rt2x00dev->ops->max_ap_intf - 1);
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(rt2x00lib_get_bssidx);
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun /*
41*4882a593Smuzhiyun * Radio control handlers.
42*4882a593Smuzhiyun */
rt2x00lib_enable_radio(struct rt2x00_dev * rt2x00dev)43*4882a593Smuzhiyun int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun int status;
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun /*
48*4882a593Smuzhiyun * Don't enable the radio twice.
49*4882a593Smuzhiyun * And check if the hardware button has been disabled.
50*4882a593Smuzhiyun */
51*4882a593Smuzhiyun if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
52*4882a593Smuzhiyun return 0;
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /*
55*4882a593Smuzhiyun * Initialize all data queues.
56*4882a593Smuzhiyun */
57*4882a593Smuzhiyun rt2x00queue_init_queues(rt2x00dev);
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun /*
60*4882a593Smuzhiyun * Enable radio.
61*4882a593Smuzhiyun */
62*4882a593Smuzhiyun status =
63*4882a593Smuzhiyun rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_ON);
64*4882a593Smuzhiyun if (status)
65*4882a593Smuzhiyun return status;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_IRQ_ON);
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun rt2x00leds_led_radio(rt2x00dev, true);
70*4882a593Smuzhiyun rt2x00led_led_activity(rt2x00dev, true);
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun set_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags);
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /*
75*4882a593Smuzhiyun * Enable queues.
76*4882a593Smuzhiyun */
77*4882a593Smuzhiyun rt2x00queue_start_queues(rt2x00dev);
78*4882a593Smuzhiyun rt2x00link_start_tuner(rt2x00dev);
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun /*
81*4882a593Smuzhiyun * Start watchdog monitoring.
82*4882a593Smuzhiyun */
83*4882a593Smuzhiyun rt2x00link_start_watchdog(rt2x00dev);
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun return 0;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
rt2x00lib_disable_radio(struct rt2x00_dev * rt2x00dev)88*4882a593Smuzhiyun void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun if (!test_and_clear_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
91*4882a593Smuzhiyun return;
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /*
94*4882a593Smuzhiyun * Stop watchdog monitoring.
95*4882a593Smuzhiyun */
96*4882a593Smuzhiyun rt2x00link_stop_watchdog(rt2x00dev);
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun /*
99*4882a593Smuzhiyun * Stop all queues
100*4882a593Smuzhiyun */
101*4882a593Smuzhiyun rt2x00link_stop_tuner(rt2x00dev);
102*4882a593Smuzhiyun rt2x00queue_stop_queues(rt2x00dev);
103*4882a593Smuzhiyun rt2x00queue_flush_queues(rt2x00dev, true);
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun /*
106*4882a593Smuzhiyun * Disable radio.
107*4882a593Smuzhiyun */
108*4882a593Smuzhiyun rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_OFF);
109*4882a593Smuzhiyun rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_IRQ_OFF);
110*4882a593Smuzhiyun rt2x00led_led_activity(rt2x00dev, false);
111*4882a593Smuzhiyun rt2x00leds_led_radio(rt2x00dev, false);
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
rt2x00lib_intf_scheduled_iter(void * data,u8 * mac,struct ieee80211_vif * vif)114*4882a593Smuzhiyun static void rt2x00lib_intf_scheduled_iter(void *data, u8 *mac,
115*4882a593Smuzhiyun struct ieee80211_vif *vif)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun struct rt2x00_dev *rt2x00dev = data;
118*4882a593Smuzhiyun struct rt2x00_intf *intf = vif_to_intf(vif);
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun /*
121*4882a593Smuzhiyun * It is possible the radio was disabled while the work had been
122*4882a593Smuzhiyun * scheduled. If that happens we should return here immediately,
123*4882a593Smuzhiyun * note that in the spinlock protected area above the delayed_flags
124*4882a593Smuzhiyun * have been cleared correctly.
125*4882a593Smuzhiyun */
126*4882a593Smuzhiyun if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
127*4882a593Smuzhiyun return;
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun if (test_and_clear_bit(DELAYED_UPDATE_BEACON, &intf->delayed_flags)) {
130*4882a593Smuzhiyun mutex_lock(&intf->beacon_skb_mutex);
131*4882a593Smuzhiyun rt2x00queue_update_beacon(rt2x00dev, vif);
132*4882a593Smuzhiyun mutex_unlock(&intf->beacon_skb_mutex);
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
rt2x00lib_intf_scheduled(struct work_struct * work)136*4882a593Smuzhiyun static void rt2x00lib_intf_scheduled(struct work_struct *work)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun struct rt2x00_dev *rt2x00dev =
139*4882a593Smuzhiyun container_of(work, struct rt2x00_dev, intf_work);
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun /*
142*4882a593Smuzhiyun * Iterate over each interface and perform the
143*4882a593Smuzhiyun * requested configurations.
144*4882a593Smuzhiyun */
145*4882a593Smuzhiyun ieee80211_iterate_active_interfaces(rt2x00dev->hw,
146*4882a593Smuzhiyun IEEE80211_IFACE_ITER_RESUME_ALL,
147*4882a593Smuzhiyun rt2x00lib_intf_scheduled_iter,
148*4882a593Smuzhiyun rt2x00dev);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
rt2x00lib_autowakeup(struct work_struct * work)151*4882a593Smuzhiyun static void rt2x00lib_autowakeup(struct work_struct *work)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun struct rt2x00_dev *rt2x00dev =
154*4882a593Smuzhiyun container_of(work, struct rt2x00_dev, autowakeup_work.work);
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
157*4882a593Smuzhiyun return;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE))
160*4882a593Smuzhiyun rt2x00_err(rt2x00dev, "Device failed to wakeup\n");
161*4882a593Smuzhiyun clear_bit(CONFIG_POWERSAVING, &rt2x00dev->flags);
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun /*
165*4882a593Smuzhiyun * Interrupt context handlers.
166*4882a593Smuzhiyun */
rt2x00lib_bc_buffer_iter(void * data,u8 * mac,struct ieee80211_vif * vif)167*4882a593Smuzhiyun static void rt2x00lib_bc_buffer_iter(void *data, u8 *mac,
168*4882a593Smuzhiyun struct ieee80211_vif *vif)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun struct ieee80211_tx_control control = {};
171*4882a593Smuzhiyun struct rt2x00_dev *rt2x00dev = data;
172*4882a593Smuzhiyun struct sk_buff *skb;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun /*
175*4882a593Smuzhiyun * Only AP mode interfaces do broad- and multicast buffering
176*4882a593Smuzhiyun */
177*4882a593Smuzhiyun if (vif->type != NL80211_IFTYPE_AP)
178*4882a593Smuzhiyun return;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun /*
181*4882a593Smuzhiyun * Send out buffered broad- and multicast frames
182*4882a593Smuzhiyun */
183*4882a593Smuzhiyun skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif);
184*4882a593Smuzhiyun while (skb) {
185*4882a593Smuzhiyun rt2x00mac_tx(rt2x00dev->hw, &control, skb);
186*4882a593Smuzhiyun skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif);
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
rt2x00lib_beaconupdate_iter(void * data,u8 * mac,struct ieee80211_vif * vif)190*4882a593Smuzhiyun static void rt2x00lib_beaconupdate_iter(void *data, u8 *mac,
191*4882a593Smuzhiyun struct ieee80211_vif *vif)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun struct rt2x00_dev *rt2x00dev = data;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun if (vif->type != NL80211_IFTYPE_AP &&
196*4882a593Smuzhiyun vif->type != NL80211_IFTYPE_ADHOC &&
197*4882a593Smuzhiyun vif->type != NL80211_IFTYPE_MESH_POINT &&
198*4882a593Smuzhiyun vif->type != NL80211_IFTYPE_WDS)
199*4882a593Smuzhiyun return;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun /*
202*4882a593Smuzhiyun * Update the beacon without locking. This is safe on PCI devices
203*4882a593Smuzhiyun * as they only update the beacon periodically here. This should
204*4882a593Smuzhiyun * never be called for USB devices.
205*4882a593Smuzhiyun */
206*4882a593Smuzhiyun WARN_ON(rt2x00_is_usb(rt2x00dev));
207*4882a593Smuzhiyun rt2x00queue_update_beacon(rt2x00dev, vif);
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
rt2x00lib_beacondone(struct rt2x00_dev * rt2x00dev)210*4882a593Smuzhiyun void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
213*4882a593Smuzhiyun return;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun /* send buffered bc/mc frames out for every bssid */
216*4882a593Smuzhiyun ieee80211_iterate_active_interfaces_atomic(
217*4882a593Smuzhiyun rt2x00dev->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
218*4882a593Smuzhiyun rt2x00lib_bc_buffer_iter, rt2x00dev);
219*4882a593Smuzhiyun /*
220*4882a593Smuzhiyun * Devices with pre tbtt interrupt don't need to update the beacon
221*4882a593Smuzhiyun * here as they will fetch the next beacon directly prior to
222*4882a593Smuzhiyun * transmission.
223*4882a593Smuzhiyun */
224*4882a593Smuzhiyun if (rt2x00_has_cap_pre_tbtt_interrupt(rt2x00dev))
225*4882a593Smuzhiyun return;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun /* fetch next beacon */
228*4882a593Smuzhiyun ieee80211_iterate_active_interfaces_atomic(
229*4882a593Smuzhiyun rt2x00dev->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
230*4882a593Smuzhiyun rt2x00lib_beaconupdate_iter, rt2x00dev);
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(rt2x00lib_beacondone);
233*4882a593Smuzhiyun
rt2x00lib_pretbtt(struct rt2x00_dev * rt2x00dev)234*4882a593Smuzhiyun void rt2x00lib_pretbtt(struct rt2x00_dev *rt2x00dev)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
237*4882a593Smuzhiyun return;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun /* fetch next beacon */
240*4882a593Smuzhiyun ieee80211_iterate_active_interfaces_atomic(
241*4882a593Smuzhiyun rt2x00dev->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
242*4882a593Smuzhiyun rt2x00lib_beaconupdate_iter, rt2x00dev);
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(rt2x00lib_pretbtt);
245*4882a593Smuzhiyun
rt2x00lib_dmastart(struct queue_entry * entry)246*4882a593Smuzhiyun void rt2x00lib_dmastart(struct queue_entry *entry)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
249*4882a593Smuzhiyun rt2x00queue_index_inc(entry, Q_INDEX);
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(rt2x00lib_dmastart);
252*4882a593Smuzhiyun
rt2x00lib_dmadone(struct queue_entry * entry)253*4882a593Smuzhiyun void rt2x00lib_dmadone(struct queue_entry *entry)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun set_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags);
256*4882a593Smuzhiyun clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
257*4882a593Smuzhiyun rt2x00queue_index_inc(entry, Q_INDEX_DMA_DONE);
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(rt2x00lib_dmadone);
260*4882a593Smuzhiyun
rt2x00lib_txdone_bar_status(struct queue_entry * entry)261*4882a593Smuzhiyun static inline int rt2x00lib_txdone_bar_status(struct queue_entry *entry)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
264*4882a593Smuzhiyun struct ieee80211_bar *bar = (void *) entry->skb->data;
265*4882a593Smuzhiyun struct rt2x00_bar_list_entry *bar_entry;
266*4882a593Smuzhiyun int ret;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun if (likely(!ieee80211_is_back_req(bar->frame_control)))
269*4882a593Smuzhiyun return 0;
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun /*
272*4882a593Smuzhiyun * Unlike all other frames, the status report for BARs does
273*4882a593Smuzhiyun * not directly come from the hardware as it is incapable of
274*4882a593Smuzhiyun * matching a BA to a previously send BAR. The hardware will
275*4882a593Smuzhiyun * report all BARs as if they weren't acked at all.
276*4882a593Smuzhiyun *
277*4882a593Smuzhiyun * Instead the RX-path will scan for incoming BAs and set the
278*4882a593Smuzhiyun * block_acked flag if it sees one that was likely caused by
279*4882a593Smuzhiyun * a BAR from us.
280*4882a593Smuzhiyun *
281*4882a593Smuzhiyun * Remove remaining BARs here and return their status for
282*4882a593Smuzhiyun * TX done processing.
283*4882a593Smuzhiyun */
284*4882a593Smuzhiyun ret = 0;
285*4882a593Smuzhiyun rcu_read_lock();
286*4882a593Smuzhiyun list_for_each_entry_rcu(bar_entry, &rt2x00dev->bar_list, list) {
287*4882a593Smuzhiyun if (bar_entry->entry != entry)
288*4882a593Smuzhiyun continue;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun spin_lock_bh(&rt2x00dev->bar_list_lock);
291*4882a593Smuzhiyun /* Return whether this BAR was blockacked or not */
292*4882a593Smuzhiyun ret = bar_entry->block_acked;
293*4882a593Smuzhiyun /* Remove the BAR from our checklist */
294*4882a593Smuzhiyun list_del_rcu(&bar_entry->list);
295*4882a593Smuzhiyun spin_unlock_bh(&rt2x00dev->bar_list_lock);
296*4882a593Smuzhiyun kfree_rcu(bar_entry, head);
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun break;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun rcu_read_unlock();
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun return ret;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
rt2x00lib_fill_tx_status(struct rt2x00_dev * rt2x00dev,struct ieee80211_tx_info * tx_info,struct skb_frame_desc * skbdesc,struct txdone_entry_desc * txdesc,bool success)305*4882a593Smuzhiyun static void rt2x00lib_fill_tx_status(struct rt2x00_dev *rt2x00dev,
306*4882a593Smuzhiyun struct ieee80211_tx_info *tx_info,
307*4882a593Smuzhiyun struct skb_frame_desc *skbdesc,
308*4882a593Smuzhiyun struct txdone_entry_desc *txdesc,
309*4882a593Smuzhiyun bool success)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun u8 rate_idx, rate_flags, retry_rates;
312*4882a593Smuzhiyun int i;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun rate_idx = skbdesc->tx_rate_idx;
315*4882a593Smuzhiyun rate_flags = skbdesc->tx_rate_flags;
316*4882a593Smuzhiyun retry_rates = test_bit(TXDONE_FALLBACK, &txdesc->flags) ?
317*4882a593Smuzhiyun (txdesc->retry + 1) : 1;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun /*
320*4882a593Smuzhiyun * Initialize TX status
321*4882a593Smuzhiyun */
322*4882a593Smuzhiyun memset(&tx_info->status, 0, sizeof(tx_info->status));
323*4882a593Smuzhiyun tx_info->status.ack_signal = 0;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun /*
326*4882a593Smuzhiyun * Frame was send with retries, hardware tried
327*4882a593Smuzhiyun * different rates to send out the frame, at each
328*4882a593Smuzhiyun * retry it lowered the rate 1 step except when the
329*4882a593Smuzhiyun * lowest rate was used.
330*4882a593Smuzhiyun */
331*4882a593Smuzhiyun for (i = 0; i < retry_rates && i < IEEE80211_TX_MAX_RATES; i++) {
332*4882a593Smuzhiyun tx_info->status.rates[i].idx = rate_idx - i;
333*4882a593Smuzhiyun tx_info->status.rates[i].flags = rate_flags;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun if (rate_idx - i == 0) {
336*4882a593Smuzhiyun /*
337*4882a593Smuzhiyun * The lowest rate (index 0) was used until the
338*4882a593Smuzhiyun * number of max retries was reached.
339*4882a593Smuzhiyun */
340*4882a593Smuzhiyun tx_info->status.rates[i].count = retry_rates - i;
341*4882a593Smuzhiyun i++;
342*4882a593Smuzhiyun break;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun tx_info->status.rates[i].count = 1;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun if (i < (IEEE80211_TX_MAX_RATES - 1))
347*4882a593Smuzhiyun tx_info->status.rates[i].idx = -1; /* terminate */
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun if (test_bit(TXDONE_NO_ACK_REQ, &txdesc->flags))
350*4882a593Smuzhiyun tx_info->flags |= IEEE80211_TX_CTL_NO_ACK;
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) {
353*4882a593Smuzhiyun if (success)
354*4882a593Smuzhiyun tx_info->flags |= IEEE80211_TX_STAT_ACK;
355*4882a593Smuzhiyun else
356*4882a593Smuzhiyun rt2x00dev->low_level_stats.dot11ACKFailureCount++;
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun /*
360*4882a593Smuzhiyun * Every single frame has it's own tx status, hence report
361*4882a593Smuzhiyun * every frame as ampdu of size 1.
362*4882a593Smuzhiyun *
363*4882a593Smuzhiyun * TODO: if we can find out how many frames were aggregated
364*4882a593Smuzhiyun * by the hw we could provide the real ampdu_len to mac80211
365*4882a593Smuzhiyun * which would allow the rc algorithm to better decide on
366*4882a593Smuzhiyun * which rates are suitable.
367*4882a593Smuzhiyun */
368*4882a593Smuzhiyun if (test_bit(TXDONE_AMPDU, &txdesc->flags) ||
369*4882a593Smuzhiyun tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
370*4882a593Smuzhiyun tx_info->flags |= IEEE80211_TX_STAT_AMPDU |
371*4882a593Smuzhiyun IEEE80211_TX_CTL_AMPDU;
372*4882a593Smuzhiyun tx_info->status.ampdu_len = 1;
373*4882a593Smuzhiyun tx_info->status.ampdu_ack_len = success ? 1 : 0;
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
377*4882a593Smuzhiyun if (success)
378*4882a593Smuzhiyun rt2x00dev->low_level_stats.dot11RTSSuccessCount++;
379*4882a593Smuzhiyun else
380*4882a593Smuzhiyun rt2x00dev->low_level_stats.dot11RTSFailureCount++;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun
rt2x00lib_clear_entry(struct rt2x00_dev * rt2x00dev,struct queue_entry * entry)384*4882a593Smuzhiyun static void rt2x00lib_clear_entry(struct rt2x00_dev *rt2x00dev,
385*4882a593Smuzhiyun struct queue_entry *entry)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun /*
388*4882a593Smuzhiyun * Make this entry available for reuse.
389*4882a593Smuzhiyun */
390*4882a593Smuzhiyun entry->skb = NULL;
391*4882a593Smuzhiyun entry->flags = 0;
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun rt2x00dev->ops->lib->clear_entry(entry);
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun rt2x00queue_index_inc(entry, Q_INDEX_DONE);
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun /*
398*4882a593Smuzhiyun * If the data queue was below the threshold before the txdone
399*4882a593Smuzhiyun * handler we must make sure the packet queue in the mac80211 stack
400*4882a593Smuzhiyun * is reenabled when the txdone handler has finished. This has to be
401*4882a593Smuzhiyun * serialized with rt2x00mac_tx(), otherwise we can wake up queue
402*4882a593Smuzhiyun * before it was stopped.
403*4882a593Smuzhiyun */
404*4882a593Smuzhiyun spin_lock_bh(&entry->queue->tx_lock);
405*4882a593Smuzhiyun if (!rt2x00queue_threshold(entry->queue))
406*4882a593Smuzhiyun rt2x00queue_unpause_queue(entry->queue);
407*4882a593Smuzhiyun spin_unlock_bh(&entry->queue->tx_lock);
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun
rt2x00lib_txdone_nomatch(struct queue_entry * entry,struct txdone_entry_desc * txdesc)410*4882a593Smuzhiyun void rt2x00lib_txdone_nomatch(struct queue_entry *entry,
411*4882a593Smuzhiyun struct txdone_entry_desc *txdesc)
412*4882a593Smuzhiyun {
413*4882a593Smuzhiyun struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
414*4882a593Smuzhiyun struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
415*4882a593Smuzhiyun struct ieee80211_tx_info txinfo = {};
416*4882a593Smuzhiyun bool success;
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun /*
419*4882a593Smuzhiyun * Unmap the skb.
420*4882a593Smuzhiyun */
421*4882a593Smuzhiyun rt2x00queue_unmap_skb(entry);
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun /*
424*4882a593Smuzhiyun * Signal that the TX descriptor is no longer in the skb.
425*4882a593Smuzhiyun */
426*4882a593Smuzhiyun skbdesc->flags &= ~SKBDESC_DESC_IN_SKB;
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun /*
429*4882a593Smuzhiyun * Send frame to debugfs immediately, after this call is completed
430*4882a593Smuzhiyun * we are going to overwrite the skb->cb array.
431*4882a593Smuzhiyun */
432*4882a593Smuzhiyun rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TXDONE, entry);
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun /*
435*4882a593Smuzhiyun * Determine if the frame has been successfully transmitted and
436*4882a593Smuzhiyun * remove BARs from our check list while checking for their
437*4882a593Smuzhiyun * TX status.
438*4882a593Smuzhiyun */
439*4882a593Smuzhiyun success =
440*4882a593Smuzhiyun rt2x00lib_txdone_bar_status(entry) ||
441*4882a593Smuzhiyun test_bit(TXDONE_SUCCESS, &txdesc->flags);
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun if (!test_bit(TXDONE_UNKNOWN, &txdesc->flags)) {
444*4882a593Smuzhiyun /*
445*4882a593Smuzhiyun * Update TX statistics.
446*4882a593Smuzhiyun */
447*4882a593Smuzhiyun rt2x00dev->link.qual.tx_success += success;
448*4882a593Smuzhiyun rt2x00dev->link.qual.tx_failed += !success;
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun rt2x00lib_fill_tx_status(rt2x00dev, &txinfo, skbdesc, txdesc,
451*4882a593Smuzhiyun success);
452*4882a593Smuzhiyun ieee80211_tx_status_noskb(rt2x00dev->hw, skbdesc->sta, &txinfo);
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun dev_kfree_skb_any(entry->skb);
456*4882a593Smuzhiyun rt2x00lib_clear_entry(rt2x00dev, entry);
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(rt2x00lib_txdone_nomatch);
459*4882a593Smuzhiyun
rt2x00lib_txdone(struct queue_entry * entry,struct txdone_entry_desc * txdesc)460*4882a593Smuzhiyun void rt2x00lib_txdone(struct queue_entry *entry,
461*4882a593Smuzhiyun struct txdone_entry_desc *txdesc)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
464*4882a593Smuzhiyun struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
465*4882a593Smuzhiyun struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
466*4882a593Smuzhiyun u8 skbdesc_flags = skbdesc->flags;
467*4882a593Smuzhiyun unsigned int header_length;
468*4882a593Smuzhiyun bool success;
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun /*
471*4882a593Smuzhiyun * Unmap the skb.
472*4882a593Smuzhiyun */
473*4882a593Smuzhiyun rt2x00queue_unmap_skb(entry);
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun /*
476*4882a593Smuzhiyun * Remove the extra tx headroom from the skb.
477*4882a593Smuzhiyun */
478*4882a593Smuzhiyun skb_pull(entry->skb, rt2x00dev->extra_tx_headroom);
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun /*
481*4882a593Smuzhiyun * Signal that the TX descriptor is no longer in the skb.
482*4882a593Smuzhiyun */
483*4882a593Smuzhiyun skbdesc->flags &= ~SKBDESC_DESC_IN_SKB;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun /*
486*4882a593Smuzhiyun * Determine the length of 802.11 header.
487*4882a593Smuzhiyun */
488*4882a593Smuzhiyun header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun /*
491*4882a593Smuzhiyun * Remove L2 padding which was added during
492*4882a593Smuzhiyun */
493*4882a593Smuzhiyun if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_L2PAD))
494*4882a593Smuzhiyun rt2x00queue_remove_l2pad(entry->skb, header_length);
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun /*
497*4882a593Smuzhiyun * If the IV/EIV data was stripped from the frame before it was
498*4882a593Smuzhiyun * passed to the hardware, we should now reinsert it again because
499*4882a593Smuzhiyun * mac80211 will expect the same data to be present it the
500*4882a593Smuzhiyun * frame as it was passed to us.
501*4882a593Smuzhiyun */
502*4882a593Smuzhiyun if (rt2x00_has_cap_hw_crypto(rt2x00dev))
503*4882a593Smuzhiyun rt2x00crypto_tx_insert_iv(entry->skb, header_length);
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun /*
506*4882a593Smuzhiyun * Send frame to debugfs immediately, after this call is completed
507*4882a593Smuzhiyun * we are going to overwrite the skb->cb array.
508*4882a593Smuzhiyun */
509*4882a593Smuzhiyun rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TXDONE, entry);
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun /*
512*4882a593Smuzhiyun * Determine if the frame has been successfully transmitted and
513*4882a593Smuzhiyun * remove BARs from our check list while checking for their
514*4882a593Smuzhiyun * TX status.
515*4882a593Smuzhiyun */
516*4882a593Smuzhiyun success =
517*4882a593Smuzhiyun rt2x00lib_txdone_bar_status(entry) ||
518*4882a593Smuzhiyun test_bit(TXDONE_SUCCESS, &txdesc->flags) ||
519*4882a593Smuzhiyun test_bit(TXDONE_UNKNOWN, &txdesc->flags);
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun /*
522*4882a593Smuzhiyun * Update TX statistics.
523*4882a593Smuzhiyun */
524*4882a593Smuzhiyun rt2x00dev->link.qual.tx_success += success;
525*4882a593Smuzhiyun rt2x00dev->link.qual.tx_failed += !success;
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun rt2x00lib_fill_tx_status(rt2x00dev, tx_info, skbdesc, txdesc, success);
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun /*
530*4882a593Smuzhiyun * Only send the status report to mac80211 when it's a frame
531*4882a593Smuzhiyun * that originated in mac80211. If this was a extra frame coming
532*4882a593Smuzhiyun * through a mac80211 library call (RTS/CTS) then we should not
533*4882a593Smuzhiyun * send the status report back.
534*4882a593Smuzhiyun */
535*4882a593Smuzhiyun if (!(skbdesc_flags & SKBDESC_NOT_MAC80211)) {
536*4882a593Smuzhiyun if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_TASKLET_CONTEXT))
537*4882a593Smuzhiyun ieee80211_tx_status(rt2x00dev->hw, entry->skb);
538*4882a593Smuzhiyun else
539*4882a593Smuzhiyun ieee80211_tx_status_ni(rt2x00dev->hw, entry->skb);
540*4882a593Smuzhiyun } else {
541*4882a593Smuzhiyun dev_kfree_skb_any(entry->skb);
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun rt2x00lib_clear_entry(rt2x00dev, entry);
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(rt2x00lib_txdone);
547*4882a593Smuzhiyun
rt2x00lib_txdone_noinfo(struct queue_entry * entry,u32 status)548*4882a593Smuzhiyun void rt2x00lib_txdone_noinfo(struct queue_entry *entry, u32 status)
549*4882a593Smuzhiyun {
550*4882a593Smuzhiyun struct txdone_entry_desc txdesc;
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun txdesc.flags = 0;
553*4882a593Smuzhiyun __set_bit(status, &txdesc.flags);
554*4882a593Smuzhiyun txdesc.retry = 0;
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun rt2x00lib_txdone(entry, &txdesc);
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(rt2x00lib_txdone_noinfo);
559*4882a593Smuzhiyun
rt2x00lib_find_ie(u8 * data,unsigned int len,u8 ie)560*4882a593Smuzhiyun static u8 *rt2x00lib_find_ie(u8 *data, unsigned int len, u8 ie)
561*4882a593Smuzhiyun {
562*4882a593Smuzhiyun struct ieee80211_mgmt *mgmt = (void *)data;
563*4882a593Smuzhiyun u8 *pos, *end;
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun pos = (u8 *)mgmt->u.beacon.variable;
566*4882a593Smuzhiyun end = data + len;
567*4882a593Smuzhiyun while (pos < end) {
568*4882a593Smuzhiyun if (pos + 2 + pos[1] > end)
569*4882a593Smuzhiyun return NULL;
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun if (pos[0] == ie)
572*4882a593Smuzhiyun return pos;
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun pos += 2 + pos[1];
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun return NULL;
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun
rt2x00lib_sleep(struct work_struct * work)580*4882a593Smuzhiyun static void rt2x00lib_sleep(struct work_struct *work)
581*4882a593Smuzhiyun {
582*4882a593Smuzhiyun struct rt2x00_dev *rt2x00dev =
583*4882a593Smuzhiyun container_of(work, struct rt2x00_dev, sleep_work);
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
586*4882a593Smuzhiyun return;
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun /*
589*4882a593Smuzhiyun * Check again is powersaving is enabled, to prevent races from delayed
590*4882a593Smuzhiyun * work execution.
591*4882a593Smuzhiyun */
592*4882a593Smuzhiyun if (!test_bit(CONFIG_POWERSAVING, &rt2x00dev->flags))
593*4882a593Smuzhiyun rt2x00lib_config(rt2x00dev, &rt2x00dev->hw->conf,
594*4882a593Smuzhiyun IEEE80211_CONF_CHANGE_PS);
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun
rt2x00lib_rxdone_check_ba(struct rt2x00_dev * rt2x00dev,struct sk_buff * skb,struct rxdone_entry_desc * rxdesc)597*4882a593Smuzhiyun static void rt2x00lib_rxdone_check_ba(struct rt2x00_dev *rt2x00dev,
598*4882a593Smuzhiyun struct sk_buff *skb,
599*4882a593Smuzhiyun struct rxdone_entry_desc *rxdesc)
600*4882a593Smuzhiyun {
601*4882a593Smuzhiyun struct rt2x00_bar_list_entry *entry;
602*4882a593Smuzhiyun struct ieee80211_bar *ba = (void *)skb->data;
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun if (likely(!ieee80211_is_back(ba->frame_control)))
605*4882a593Smuzhiyun return;
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun if (rxdesc->size < sizeof(*ba) + FCS_LEN)
608*4882a593Smuzhiyun return;
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun rcu_read_lock();
611*4882a593Smuzhiyun list_for_each_entry_rcu(entry, &rt2x00dev->bar_list, list) {
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun if (ba->start_seq_num != entry->start_seq_num)
614*4882a593Smuzhiyun continue;
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun #define TID_CHECK(a, b) ( \
617*4882a593Smuzhiyun ((a) & cpu_to_le16(IEEE80211_BAR_CTRL_TID_INFO_MASK)) == \
618*4882a593Smuzhiyun ((b) & cpu_to_le16(IEEE80211_BAR_CTRL_TID_INFO_MASK))) \
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun if (!TID_CHECK(ba->control, entry->control))
621*4882a593Smuzhiyun continue;
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun #undef TID_CHECK
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun if (!ether_addr_equal_64bits(ba->ra, entry->ta))
626*4882a593Smuzhiyun continue;
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun if (!ether_addr_equal_64bits(ba->ta, entry->ra))
629*4882a593Smuzhiyun continue;
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun /* Mark BAR since we received the according BA */
632*4882a593Smuzhiyun spin_lock_bh(&rt2x00dev->bar_list_lock);
633*4882a593Smuzhiyun entry->block_acked = 1;
634*4882a593Smuzhiyun spin_unlock_bh(&rt2x00dev->bar_list_lock);
635*4882a593Smuzhiyun break;
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun rcu_read_unlock();
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun
rt2x00lib_rxdone_check_ps(struct rt2x00_dev * rt2x00dev,struct sk_buff * skb,struct rxdone_entry_desc * rxdesc)641*4882a593Smuzhiyun static void rt2x00lib_rxdone_check_ps(struct rt2x00_dev *rt2x00dev,
642*4882a593Smuzhiyun struct sk_buff *skb,
643*4882a593Smuzhiyun struct rxdone_entry_desc *rxdesc)
644*4882a593Smuzhiyun {
645*4882a593Smuzhiyun struct ieee80211_hdr *hdr = (void *) skb->data;
646*4882a593Smuzhiyun struct ieee80211_tim_ie *tim_ie;
647*4882a593Smuzhiyun u8 *tim;
648*4882a593Smuzhiyun u8 tim_len;
649*4882a593Smuzhiyun bool cam;
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun /* If this is not a beacon, or if mac80211 has no powersaving
652*4882a593Smuzhiyun * configured, or if the device is already in powersaving mode
653*4882a593Smuzhiyun * we can exit now. */
654*4882a593Smuzhiyun if (likely(!ieee80211_is_beacon(hdr->frame_control) ||
655*4882a593Smuzhiyun !(rt2x00dev->hw->conf.flags & IEEE80211_CONF_PS)))
656*4882a593Smuzhiyun return;
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun /* min. beacon length + FCS_LEN */
659*4882a593Smuzhiyun if (skb->len <= 40 + FCS_LEN)
660*4882a593Smuzhiyun return;
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun /* and only beacons from the associated BSSID, please */
663*4882a593Smuzhiyun if (!(rxdesc->dev_flags & RXDONE_MY_BSS) ||
664*4882a593Smuzhiyun !rt2x00dev->aid)
665*4882a593Smuzhiyun return;
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun rt2x00dev->last_beacon = jiffies;
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun tim = rt2x00lib_find_ie(skb->data, skb->len - FCS_LEN, WLAN_EID_TIM);
670*4882a593Smuzhiyun if (!tim)
671*4882a593Smuzhiyun return;
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun if (tim[1] < sizeof(*tim_ie))
674*4882a593Smuzhiyun return;
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun tim_len = tim[1];
677*4882a593Smuzhiyun tim_ie = (struct ieee80211_tim_ie *) &tim[2];
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun /* Check whenever the PHY can be turned off again. */
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun /* 1. What about buffered unicast traffic for our AID? */
682*4882a593Smuzhiyun cam = ieee80211_check_tim(tim_ie, tim_len, rt2x00dev->aid);
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun /* 2. Maybe the AP wants to send multicast/broadcast data? */
685*4882a593Smuzhiyun cam |= (tim_ie->bitmap_ctrl & 0x01);
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun if (!cam && !test_bit(CONFIG_POWERSAVING, &rt2x00dev->flags))
688*4882a593Smuzhiyun queue_work(rt2x00dev->workqueue, &rt2x00dev->sleep_work);
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun
rt2x00lib_rxdone_read_signal(struct rt2x00_dev * rt2x00dev,struct rxdone_entry_desc * rxdesc)691*4882a593Smuzhiyun static int rt2x00lib_rxdone_read_signal(struct rt2x00_dev *rt2x00dev,
692*4882a593Smuzhiyun struct rxdone_entry_desc *rxdesc)
693*4882a593Smuzhiyun {
694*4882a593Smuzhiyun struct ieee80211_supported_band *sband;
695*4882a593Smuzhiyun const struct rt2x00_rate *rate;
696*4882a593Smuzhiyun unsigned int i;
697*4882a593Smuzhiyun int signal = rxdesc->signal;
698*4882a593Smuzhiyun int type = (rxdesc->dev_flags & RXDONE_SIGNAL_MASK);
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun switch (rxdesc->rate_mode) {
701*4882a593Smuzhiyun case RATE_MODE_CCK:
702*4882a593Smuzhiyun case RATE_MODE_OFDM:
703*4882a593Smuzhiyun /*
704*4882a593Smuzhiyun * For non-HT rates the MCS value needs to contain the
705*4882a593Smuzhiyun * actually used rate modulation (CCK or OFDM).
706*4882a593Smuzhiyun */
707*4882a593Smuzhiyun if (rxdesc->dev_flags & RXDONE_SIGNAL_MCS)
708*4882a593Smuzhiyun signal = RATE_MCS(rxdesc->rate_mode, signal);
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun sband = &rt2x00dev->bands[rt2x00dev->curr_band];
711*4882a593Smuzhiyun for (i = 0; i < sband->n_bitrates; i++) {
712*4882a593Smuzhiyun rate = rt2x00_get_rate(sband->bitrates[i].hw_value);
713*4882a593Smuzhiyun if (((type == RXDONE_SIGNAL_PLCP) &&
714*4882a593Smuzhiyun (rate->plcp == signal)) ||
715*4882a593Smuzhiyun ((type == RXDONE_SIGNAL_BITRATE) &&
716*4882a593Smuzhiyun (rate->bitrate == signal)) ||
717*4882a593Smuzhiyun ((type == RXDONE_SIGNAL_MCS) &&
718*4882a593Smuzhiyun (rate->mcs == signal))) {
719*4882a593Smuzhiyun return i;
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun break;
723*4882a593Smuzhiyun case RATE_MODE_HT_MIX:
724*4882a593Smuzhiyun case RATE_MODE_HT_GREENFIELD:
725*4882a593Smuzhiyun if (signal >= 0 && signal <= 76)
726*4882a593Smuzhiyun return signal;
727*4882a593Smuzhiyun break;
728*4882a593Smuzhiyun default:
729*4882a593Smuzhiyun break;
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun rt2x00_warn(rt2x00dev, "Frame received with unrecognized signal, mode=0x%.4x, signal=0x%.4x, type=%d\n",
733*4882a593Smuzhiyun rxdesc->rate_mode, signal, type);
734*4882a593Smuzhiyun return 0;
735*4882a593Smuzhiyun }
736*4882a593Smuzhiyun
rt2x00lib_rxdone(struct queue_entry * entry,gfp_t gfp)737*4882a593Smuzhiyun void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp)
738*4882a593Smuzhiyun {
739*4882a593Smuzhiyun struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
740*4882a593Smuzhiyun struct rxdone_entry_desc rxdesc;
741*4882a593Smuzhiyun struct sk_buff *skb;
742*4882a593Smuzhiyun struct ieee80211_rx_status *rx_status;
743*4882a593Smuzhiyun unsigned int header_length;
744*4882a593Smuzhiyun int rate_idx;
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) ||
747*4882a593Smuzhiyun !test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
748*4882a593Smuzhiyun goto submit_entry;
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
751*4882a593Smuzhiyun goto submit_entry;
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun /*
754*4882a593Smuzhiyun * Allocate a new sk_buffer. If no new buffer available, drop the
755*4882a593Smuzhiyun * received frame and reuse the existing buffer.
756*4882a593Smuzhiyun */
757*4882a593Smuzhiyun skb = rt2x00queue_alloc_rxskb(entry, gfp);
758*4882a593Smuzhiyun if (!skb)
759*4882a593Smuzhiyun goto submit_entry;
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun /*
762*4882a593Smuzhiyun * Unmap the skb.
763*4882a593Smuzhiyun */
764*4882a593Smuzhiyun rt2x00queue_unmap_skb(entry);
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun /*
767*4882a593Smuzhiyun * Extract the RXD details.
768*4882a593Smuzhiyun */
769*4882a593Smuzhiyun memset(&rxdesc, 0, sizeof(rxdesc));
770*4882a593Smuzhiyun rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun /*
773*4882a593Smuzhiyun * Check for valid size in case we get corrupted descriptor from
774*4882a593Smuzhiyun * hardware.
775*4882a593Smuzhiyun */
776*4882a593Smuzhiyun if (unlikely(rxdesc.size == 0 ||
777*4882a593Smuzhiyun rxdesc.size > entry->queue->data_size)) {
778*4882a593Smuzhiyun rt2x00_err(rt2x00dev, "Wrong frame size %d max %d\n",
779*4882a593Smuzhiyun rxdesc.size, entry->queue->data_size);
780*4882a593Smuzhiyun dev_kfree_skb(entry->skb);
781*4882a593Smuzhiyun goto renew_skb;
782*4882a593Smuzhiyun }
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun /*
785*4882a593Smuzhiyun * The data behind the ieee80211 header must be
786*4882a593Smuzhiyun * aligned on a 4 byte boundary.
787*4882a593Smuzhiyun */
788*4882a593Smuzhiyun header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun /*
791*4882a593Smuzhiyun * Hardware might have stripped the IV/EIV/ICV data,
792*4882a593Smuzhiyun * in that case it is possible that the data was
793*4882a593Smuzhiyun * provided separately (through hardware descriptor)
794*4882a593Smuzhiyun * in which case we should reinsert the data into the frame.
795*4882a593Smuzhiyun */
796*4882a593Smuzhiyun if ((rxdesc.dev_flags & RXDONE_CRYPTO_IV) &&
797*4882a593Smuzhiyun (rxdesc.flags & RX_FLAG_IV_STRIPPED))
798*4882a593Smuzhiyun rt2x00crypto_rx_insert_iv(entry->skb, header_length,
799*4882a593Smuzhiyun &rxdesc);
800*4882a593Smuzhiyun else if (header_length &&
801*4882a593Smuzhiyun (rxdesc.size > header_length) &&
802*4882a593Smuzhiyun (rxdesc.dev_flags & RXDONE_L2PAD))
803*4882a593Smuzhiyun rt2x00queue_remove_l2pad(entry->skb, header_length);
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun /* Trim buffer to correct size */
806*4882a593Smuzhiyun skb_trim(entry->skb, rxdesc.size);
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun /*
809*4882a593Smuzhiyun * Translate the signal to the correct bitrate index.
810*4882a593Smuzhiyun */
811*4882a593Smuzhiyun rate_idx = rt2x00lib_rxdone_read_signal(rt2x00dev, &rxdesc);
812*4882a593Smuzhiyun if (rxdesc.rate_mode == RATE_MODE_HT_MIX ||
813*4882a593Smuzhiyun rxdesc.rate_mode == RATE_MODE_HT_GREENFIELD)
814*4882a593Smuzhiyun rxdesc.encoding = RX_ENC_HT;
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun /*
817*4882a593Smuzhiyun * Check if this is a beacon, and more frames have been
818*4882a593Smuzhiyun * buffered while we were in powersaving mode.
819*4882a593Smuzhiyun */
820*4882a593Smuzhiyun rt2x00lib_rxdone_check_ps(rt2x00dev, entry->skb, &rxdesc);
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun /*
823*4882a593Smuzhiyun * Check for incoming BlockAcks to match to the BlockAckReqs
824*4882a593Smuzhiyun * we've send out.
825*4882a593Smuzhiyun */
826*4882a593Smuzhiyun rt2x00lib_rxdone_check_ba(rt2x00dev, entry->skb, &rxdesc);
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun /*
829*4882a593Smuzhiyun * Update extra components
830*4882a593Smuzhiyun */
831*4882a593Smuzhiyun rt2x00link_update_stats(rt2x00dev, entry->skb, &rxdesc);
832*4882a593Smuzhiyun rt2x00debug_update_crypto(rt2x00dev, &rxdesc);
833*4882a593Smuzhiyun rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_RXDONE, entry);
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun /*
836*4882a593Smuzhiyun * Initialize RX status information, and send frame
837*4882a593Smuzhiyun * to mac80211.
838*4882a593Smuzhiyun */
839*4882a593Smuzhiyun rx_status = IEEE80211_SKB_RXCB(entry->skb);
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun /* Ensure that all fields of rx_status are initialized
842*4882a593Smuzhiyun * properly. The skb->cb array was used for driver
843*4882a593Smuzhiyun * specific informations, so rx_status might contain
844*4882a593Smuzhiyun * garbage.
845*4882a593Smuzhiyun */
846*4882a593Smuzhiyun memset(rx_status, 0, sizeof(*rx_status));
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun rx_status->mactime = rxdesc.timestamp;
849*4882a593Smuzhiyun rx_status->band = rt2x00dev->curr_band;
850*4882a593Smuzhiyun rx_status->freq = rt2x00dev->curr_freq;
851*4882a593Smuzhiyun rx_status->rate_idx = rate_idx;
852*4882a593Smuzhiyun rx_status->signal = rxdesc.rssi;
853*4882a593Smuzhiyun rx_status->flag = rxdesc.flags;
854*4882a593Smuzhiyun rx_status->enc_flags = rxdesc.enc_flags;
855*4882a593Smuzhiyun rx_status->encoding = rxdesc.encoding;
856*4882a593Smuzhiyun rx_status->bw = rxdesc.bw;
857*4882a593Smuzhiyun rx_status->antenna = rt2x00dev->link.ant.active.rx;
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun ieee80211_rx_ni(rt2x00dev->hw, entry->skb);
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun renew_skb:
862*4882a593Smuzhiyun /*
863*4882a593Smuzhiyun * Replace the skb with the freshly allocated one.
864*4882a593Smuzhiyun */
865*4882a593Smuzhiyun entry->skb = skb;
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun submit_entry:
868*4882a593Smuzhiyun entry->flags = 0;
869*4882a593Smuzhiyun rt2x00queue_index_inc(entry, Q_INDEX_DONE);
870*4882a593Smuzhiyun if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) &&
871*4882a593Smuzhiyun test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
872*4882a593Smuzhiyun rt2x00dev->ops->lib->clear_entry(entry);
873*4882a593Smuzhiyun }
874*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(rt2x00lib_rxdone);
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun /*
877*4882a593Smuzhiyun * Driver initialization handlers.
878*4882a593Smuzhiyun */
879*4882a593Smuzhiyun const struct rt2x00_rate rt2x00_supported_rates[12] = {
880*4882a593Smuzhiyun {
881*4882a593Smuzhiyun .flags = DEV_RATE_CCK,
882*4882a593Smuzhiyun .bitrate = 10,
883*4882a593Smuzhiyun .ratemask = BIT(0),
884*4882a593Smuzhiyun .plcp = 0x00,
885*4882a593Smuzhiyun .mcs = RATE_MCS(RATE_MODE_CCK, 0),
886*4882a593Smuzhiyun },
887*4882a593Smuzhiyun {
888*4882a593Smuzhiyun .flags = DEV_RATE_CCK | DEV_RATE_SHORT_PREAMBLE,
889*4882a593Smuzhiyun .bitrate = 20,
890*4882a593Smuzhiyun .ratemask = BIT(1),
891*4882a593Smuzhiyun .plcp = 0x01,
892*4882a593Smuzhiyun .mcs = RATE_MCS(RATE_MODE_CCK, 1),
893*4882a593Smuzhiyun },
894*4882a593Smuzhiyun {
895*4882a593Smuzhiyun .flags = DEV_RATE_CCK | DEV_RATE_SHORT_PREAMBLE,
896*4882a593Smuzhiyun .bitrate = 55,
897*4882a593Smuzhiyun .ratemask = BIT(2),
898*4882a593Smuzhiyun .plcp = 0x02,
899*4882a593Smuzhiyun .mcs = RATE_MCS(RATE_MODE_CCK, 2),
900*4882a593Smuzhiyun },
901*4882a593Smuzhiyun {
902*4882a593Smuzhiyun .flags = DEV_RATE_CCK | DEV_RATE_SHORT_PREAMBLE,
903*4882a593Smuzhiyun .bitrate = 110,
904*4882a593Smuzhiyun .ratemask = BIT(3),
905*4882a593Smuzhiyun .plcp = 0x03,
906*4882a593Smuzhiyun .mcs = RATE_MCS(RATE_MODE_CCK, 3),
907*4882a593Smuzhiyun },
908*4882a593Smuzhiyun {
909*4882a593Smuzhiyun .flags = DEV_RATE_OFDM,
910*4882a593Smuzhiyun .bitrate = 60,
911*4882a593Smuzhiyun .ratemask = BIT(4),
912*4882a593Smuzhiyun .plcp = 0x0b,
913*4882a593Smuzhiyun .mcs = RATE_MCS(RATE_MODE_OFDM, 0),
914*4882a593Smuzhiyun },
915*4882a593Smuzhiyun {
916*4882a593Smuzhiyun .flags = DEV_RATE_OFDM,
917*4882a593Smuzhiyun .bitrate = 90,
918*4882a593Smuzhiyun .ratemask = BIT(5),
919*4882a593Smuzhiyun .plcp = 0x0f,
920*4882a593Smuzhiyun .mcs = RATE_MCS(RATE_MODE_OFDM, 1),
921*4882a593Smuzhiyun },
922*4882a593Smuzhiyun {
923*4882a593Smuzhiyun .flags = DEV_RATE_OFDM,
924*4882a593Smuzhiyun .bitrate = 120,
925*4882a593Smuzhiyun .ratemask = BIT(6),
926*4882a593Smuzhiyun .plcp = 0x0a,
927*4882a593Smuzhiyun .mcs = RATE_MCS(RATE_MODE_OFDM, 2),
928*4882a593Smuzhiyun },
929*4882a593Smuzhiyun {
930*4882a593Smuzhiyun .flags = DEV_RATE_OFDM,
931*4882a593Smuzhiyun .bitrate = 180,
932*4882a593Smuzhiyun .ratemask = BIT(7),
933*4882a593Smuzhiyun .plcp = 0x0e,
934*4882a593Smuzhiyun .mcs = RATE_MCS(RATE_MODE_OFDM, 3),
935*4882a593Smuzhiyun },
936*4882a593Smuzhiyun {
937*4882a593Smuzhiyun .flags = DEV_RATE_OFDM,
938*4882a593Smuzhiyun .bitrate = 240,
939*4882a593Smuzhiyun .ratemask = BIT(8),
940*4882a593Smuzhiyun .plcp = 0x09,
941*4882a593Smuzhiyun .mcs = RATE_MCS(RATE_MODE_OFDM, 4),
942*4882a593Smuzhiyun },
943*4882a593Smuzhiyun {
944*4882a593Smuzhiyun .flags = DEV_RATE_OFDM,
945*4882a593Smuzhiyun .bitrate = 360,
946*4882a593Smuzhiyun .ratemask = BIT(9),
947*4882a593Smuzhiyun .plcp = 0x0d,
948*4882a593Smuzhiyun .mcs = RATE_MCS(RATE_MODE_OFDM, 5),
949*4882a593Smuzhiyun },
950*4882a593Smuzhiyun {
951*4882a593Smuzhiyun .flags = DEV_RATE_OFDM,
952*4882a593Smuzhiyun .bitrate = 480,
953*4882a593Smuzhiyun .ratemask = BIT(10),
954*4882a593Smuzhiyun .plcp = 0x08,
955*4882a593Smuzhiyun .mcs = RATE_MCS(RATE_MODE_OFDM, 6),
956*4882a593Smuzhiyun },
957*4882a593Smuzhiyun {
958*4882a593Smuzhiyun .flags = DEV_RATE_OFDM,
959*4882a593Smuzhiyun .bitrate = 540,
960*4882a593Smuzhiyun .ratemask = BIT(11),
961*4882a593Smuzhiyun .plcp = 0x0c,
962*4882a593Smuzhiyun .mcs = RATE_MCS(RATE_MODE_OFDM, 7),
963*4882a593Smuzhiyun },
964*4882a593Smuzhiyun };
965*4882a593Smuzhiyun
rt2x00lib_channel(struct ieee80211_channel * entry,const int channel,const int tx_power,const int value)966*4882a593Smuzhiyun static void rt2x00lib_channel(struct ieee80211_channel *entry,
967*4882a593Smuzhiyun const int channel, const int tx_power,
968*4882a593Smuzhiyun const int value)
969*4882a593Smuzhiyun {
970*4882a593Smuzhiyun /* XXX: this assumption about the band is wrong for 802.11j */
971*4882a593Smuzhiyun entry->band = channel <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
972*4882a593Smuzhiyun entry->center_freq = ieee80211_channel_to_frequency(channel,
973*4882a593Smuzhiyun entry->band);
974*4882a593Smuzhiyun entry->hw_value = value;
975*4882a593Smuzhiyun entry->max_power = tx_power;
976*4882a593Smuzhiyun entry->max_antenna_gain = 0xff;
977*4882a593Smuzhiyun }
978*4882a593Smuzhiyun
rt2x00lib_rate(struct ieee80211_rate * entry,const u16 index,const struct rt2x00_rate * rate)979*4882a593Smuzhiyun static void rt2x00lib_rate(struct ieee80211_rate *entry,
980*4882a593Smuzhiyun const u16 index, const struct rt2x00_rate *rate)
981*4882a593Smuzhiyun {
982*4882a593Smuzhiyun entry->flags = 0;
983*4882a593Smuzhiyun entry->bitrate = rate->bitrate;
984*4882a593Smuzhiyun entry->hw_value = index;
985*4882a593Smuzhiyun entry->hw_value_short = index;
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun if (rate->flags & DEV_RATE_SHORT_PREAMBLE)
988*4882a593Smuzhiyun entry->flags |= IEEE80211_RATE_SHORT_PREAMBLE;
989*4882a593Smuzhiyun }
990*4882a593Smuzhiyun
rt2x00lib_set_mac_address(struct rt2x00_dev * rt2x00dev,u8 * eeprom_mac_addr)991*4882a593Smuzhiyun void rt2x00lib_set_mac_address(struct rt2x00_dev *rt2x00dev, u8 *eeprom_mac_addr)
992*4882a593Smuzhiyun {
993*4882a593Smuzhiyun const char *mac_addr;
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun mac_addr = of_get_mac_address(rt2x00dev->dev->of_node);
996*4882a593Smuzhiyun if (!IS_ERR(mac_addr))
997*4882a593Smuzhiyun ether_addr_copy(eeprom_mac_addr, mac_addr);
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun if (!is_valid_ether_addr(eeprom_mac_addr)) {
1000*4882a593Smuzhiyun eth_random_addr(eeprom_mac_addr);
1001*4882a593Smuzhiyun rt2x00_eeprom_dbg(rt2x00dev, "MAC: %pM\n", eeprom_mac_addr);
1002*4882a593Smuzhiyun }
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(rt2x00lib_set_mac_address);
1005*4882a593Smuzhiyun
rt2x00lib_probe_hw_modes(struct rt2x00_dev * rt2x00dev,struct hw_mode_spec * spec)1006*4882a593Smuzhiyun static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev,
1007*4882a593Smuzhiyun struct hw_mode_spec *spec)
1008*4882a593Smuzhiyun {
1009*4882a593Smuzhiyun struct ieee80211_hw *hw = rt2x00dev->hw;
1010*4882a593Smuzhiyun struct ieee80211_channel *channels;
1011*4882a593Smuzhiyun struct ieee80211_rate *rates;
1012*4882a593Smuzhiyun unsigned int num_rates;
1013*4882a593Smuzhiyun unsigned int i;
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun num_rates = 0;
1016*4882a593Smuzhiyun if (spec->supported_rates & SUPPORT_RATE_CCK)
1017*4882a593Smuzhiyun num_rates += 4;
1018*4882a593Smuzhiyun if (spec->supported_rates & SUPPORT_RATE_OFDM)
1019*4882a593Smuzhiyun num_rates += 8;
1020*4882a593Smuzhiyun
1021*4882a593Smuzhiyun channels = kcalloc(spec->num_channels, sizeof(*channels), GFP_KERNEL);
1022*4882a593Smuzhiyun if (!channels)
1023*4882a593Smuzhiyun return -ENOMEM;
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun rates = kcalloc(num_rates, sizeof(*rates), GFP_KERNEL);
1026*4882a593Smuzhiyun if (!rates)
1027*4882a593Smuzhiyun goto exit_free_channels;
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun /*
1030*4882a593Smuzhiyun * Initialize Rate list.
1031*4882a593Smuzhiyun */
1032*4882a593Smuzhiyun for (i = 0; i < num_rates; i++)
1033*4882a593Smuzhiyun rt2x00lib_rate(&rates[i], i, rt2x00_get_rate(i));
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun /*
1036*4882a593Smuzhiyun * Initialize Channel list.
1037*4882a593Smuzhiyun */
1038*4882a593Smuzhiyun for (i = 0; i < spec->num_channels; i++) {
1039*4882a593Smuzhiyun rt2x00lib_channel(&channels[i],
1040*4882a593Smuzhiyun spec->channels[i].channel,
1041*4882a593Smuzhiyun spec->channels_info[i].max_power, i);
1042*4882a593Smuzhiyun }
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun /*
1045*4882a593Smuzhiyun * Intitialize 802.11b, 802.11g
1046*4882a593Smuzhiyun * Rates: CCK, OFDM.
1047*4882a593Smuzhiyun * Channels: 2.4 GHz
1048*4882a593Smuzhiyun */
1049*4882a593Smuzhiyun if (spec->supported_bands & SUPPORT_BAND_2GHZ) {
1050*4882a593Smuzhiyun rt2x00dev->bands[NL80211_BAND_2GHZ].n_channels = 14;
1051*4882a593Smuzhiyun rt2x00dev->bands[NL80211_BAND_2GHZ].n_bitrates = num_rates;
1052*4882a593Smuzhiyun rt2x00dev->bands[NL80211_BAND_2GHZ].channels = channels;
1053*4882a593Smuzhiyun rt2x00dev->bands[NL80211_BAND_2GHZ].bitrates = rates;
1054*4882a593Smuzhiyun hw->wiphy->bands[NL80211_BAND_2GHZ] =
1055*4882a593Smuzhiyun &rt2x00dev->bands[NL80211_BAND_2GHZ];
1056*4882a593Smuzhiyun memcpy(&rt2x00dev->bands[NL80211_BAND_2GHZ].ht_cap,
1057*4882a593Smuzhiyun &spec->ht, sizeof(spec->ht));
1058*4882a593Smuzhiyun }
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun /*
1061*4882a593Smuzhiyun * Intitialize 802.11a
1062*4882a593Smuzhiyun * Rates: OFDM.
1063*4882a593Smuzhiyun * Channels: OFDM, UNII, HiperLAN2.
1064*4882a593Smuzhiyun */
1065*4882a593Smuzhiyun if (spec->supported_bands & SUPPORT_BAND_5GHZ) {
1066*4882a593Smuzhiyun rt2x00dev->bands[NL80211_BAND_5GHZ].n_channels =
1067*4882a593Smuzhiyun spec->num_channels - 14;
1068*4882a593Smuzhiyun rt2x00dev->bands[NL80211_BAND_5GHZ].n_bitrates =
1069*4882a593Smuzhiyun num_rates - 4;
1070*4882a593Smuzhiyun rt2x00dev->bands[NL80211_BAND_5GHZ].channels = &channels[14];
1071*4882a593Smuzhiyun rt2x00dev->bands[NL80211_BAND_5GHZ].bitrates = &rates[4];
1072*4882a593Smuzhiyun hw->wiphy->bands[NL80211_BAND_5GHZ] =
1073*4882a593Smuzhiyun &rt2x00dev->bands[NL80211_BAND_5GHZ];
1074*4882a593Smuzhiyun memcpy(&rt2x00dev->bands[NL80211_BAND_5GHZ].ht_cap,
1075*4882a593Smuzhiyun &spec->ht, sizeof(spec->ht));
1076*4882a593Smuzhiyun }
1077*4882a593Smuzhiyun
1078*4882a593Smuzhiyun return 0;
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun exit_free_channels:
1081*4882a593Smuzhiyun kfree(channels);
1082*4882a593Smuzhiyun rt2x00_err(rt2x00dev, "Allocation ieee80211 modes failed\n");
1083*4882a593Smuzhiyun return -ENOMEM;
1084*4882a593Smuzhiyun }
1085*4882a593Smuzhiyun
rt2x00lib_remove_hw(struct rt2x00_dev * rt2x00dev)1086*4882a593Smuzhiyun static void rt2x00lib_remove_hw(struct rt2x00_dev *rt2x00dev)
1087*4882a593Smuzhiyun {
1088*4882a593Smuzhiyun if (test_bit(DEVICE_STATE_REGISTERED_HW, &rt2x00dev->flags))
1089*4882a593Smuzhiyun ieee80211_unregister_hw(rt2x00dev->hw);
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun if (likely(rt2x00dev->hw->wiphy->bands[NL80211_BAND_2GHZ])) {
1092*4882a593Smuzhiyun kfree(rt2x00dev->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels);
1093*4882a593Smuzhiyun kfree(rt2x00dev->hw->wiphy->bands[NL80211_BAND_2GHZ]->bitrates);
1094*4882a593Smuzhiyun rt2x00dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
1095*4882a593Smuzhiyun rt2x00dev->hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL;
1096*4882a593Smuzhiyun }
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun kfree(rt2x00dev->spec.channels_info);
1099*4882a593Smuzhiyun }
1100*4882a593Smuzhiyun
rt2x00lib_probe_hw(struct rt2x00_dev * rt2x00dev)1101*4882a593Smuzhiyun static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
1102*4882a593Smuzhiyun {
1103*4882a593Smuzhiyun struct hw_mode_spec *spec = &rt2x00dev->spec;
1104*4882a593Smuzhiyun int status;
1105*4882a593Smuzhiyun
1106*4882a593Smuzhiyun if (test_bit(DEVICE_STATE_REGISTERED_HW, &rt2x00dev->flags))
1107*4882a593Smuzhiyun return 0;
1108*4882a593Smuzhiyun
1109*4882a593Smuzhiyun /*
1110*4882a593Smuzhiyun * Initialize HW modes.
1111*4882a593Smuzhiyun */
1112*4882a593Smuzhiyun status = rt2x00lib_probe_hw_modes(rt2x00dev, spec);
1113*4882a593Smuzhiyun if (status)
1114*4882a593Smuzhiyun return status;
1115*4882a593Smuzhiyun
1116*4882a593Smuzhiyun /*
1117*4882a593Smuzhiyun * Initialize HW fields.
1118*4882a593Smuzhiyun */
1119*4882a593Smuzhiyun rt2x00dev->hw->queues = rt2x00dev->ops->tx_queues;
1120*4882a593Smuzhiyun
1121*4882a593Smuzhiyun /*
1122*4882a593Smuzhiyun * Initialize extra TX headroom required.
1123*4882a593Smuzhiyun */
1124*4882a593Smuzhiyun rt2x00dev->hw->extra_tx_headroom =
1125*4882a593Smuzhiyun max_t(unsigned int, IEEE80211_TX_STATUS_HEADROOM,
1126*4882a593Smuzhiyun rt2x00dev->extra_tx_headroom);
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun /*
1129*4882a593Smuzhiyun * Take TX headroom required for alignment into account.
1130*4882a593Smuzhiyun */
1131*4882a593Smuzhiyun if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_L2PAD))
1132*4882a593Smuzhiyun rt2x00dev->hw->extra_tx_headroom += RT2X00_L2PAD_SIZE;
1133*4882a593Smuzhiyun else if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA))
1134*4882a593Smuzhiyun rt2x00dev->hw->extra_tx_headroom += RT2X00_ALIGN_SIZE;
1135*4882a593Smuzhiyun
1136*4882a593Smuzhiyun /*
1137*4882a593Smuzhiyun * Tell mac80211 about the size of our private STA structure.
1138*4882a593Smuzhiyun */
1139*4882a593Smuzhiyun rt2x00dev->hw->sta_data_size = sizeof(struct rt2x00_sta);
1140*4882a593Smuzhiyun
1141*4882a593Smuzhiyun /*
1142*4882a593Smuzhiyun * Allocate tx status FIFO for driver use.
1143*4882a593Smuzhiyun */
1144*4882a593Smuzhiyun if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_TXSTATUS_FIFO)) {
1145*4882a593Smuzhiyun /*
1146*4882a593Smuzhiyun * Allocate the txstatus fifo. In the worst case the tx
1147*4882a593Smuzhiyun * status fifo has to hold the tx status of all entries
1148*4882a593Smuzhiyun * in all tx queues. Hence, calculate the kfifo size as
1149*4882a593Smuzhiyun * tx_queues * entry_num and round up to the nearest
1150*4882a593Smuzhiyun * power of 2.
1151*4882a593Smuzhiyun */
1152*4882a593Smuzhiyun int kfifo_size =
1153*4882a593Smuzhiyun roundup_pow_of_two(rt2x00dev->ops->tx_queues *
1154*4882a593Smuzhiyun rt2x00dev->tx->limit *
1155*4882a593Smuzhiyun sizeof(u32));
1156*4882a593Smuzhiyun
1157*4882a593Smuzhiyun status = kfifo_alloc(&rt2x00dev->txstatus_fifo, kfifo_size,
1158*4882a593Smuzhiyun GFP_KERNEL);
1159*4882a593Smuzhiyun if (status)
1160*4882a593Smuzhiyun return status;
1161*4882a593Smuzhiyun }
1162*4882a593Smuzhiyun
1163*4882a593Smuzhiyun /*
1164*4882a593Smuzhiyun * Initialize tasklets if used by the driver. Tasklets are
1165*4882a593Smuzhiyun * disabled until the interrupts are turned on. The driver
1166*4882a593Smuzhiyun * has to handle that.
1167*4882a593Smuzhiyun */
1168*4882a593Smuzhiyun #define RT2X00_TASKLET_INIT(taskletname) \
1169*4882a593Smuzhiyun if (rt2x00dev->ops->lib->taskletname) { \
1170*4882a593Smuzhiyun tasklet_setup(&rt2x00dev->taskletname, \
1171*4882a593Smuzhiyun rt2x00dev->ops->lib->taskletname); \
1172*4882a593Smuzhiyun }
1173*4882a593Smuzhiyun
1174*4882a593Smuzhiyun RT2X00_TASKLET_INIT(txstatus_tasklet);
1175*4882a593Smuzhiyun RT2X00_TASKLET_INIT(pretbtt_tasklet);
1176*4882a593Smuzhiyun RT2X00_TASKLET_INIT(tbtt_tasklet);
1177*4882a593Smuzhiyun RT2X00_TASKLET_INIT(rxdone_tasklet);
1178*4882a593Smuzhiyun RT2X00_TASKLET_INIT(autowake_tasklet);
1179*4882a593Smuzhiyun
1180*4882a593Smuzhiyun #undef RT2X00_TASKLET_INIT
1181*4882a593Smuzhiyun
1182*4882a593Smuzhiyun /*
1183*4882a593Smuzhiyun * Register HW.
1184*4882a593Smuzhiyun */
1185*4882a593Smuzhiyun status = ieee80211_register_hw(rt2x00dev->hw);
1186*4882a593Smuzhiyun if (status)
1187*4882a593Smuzhiyun return status;
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun set_bit(DEVICE_STATE_REGISTERED_HW, &rt2x00dev->flags);
1190*4882a593Smuzhiyun
1191*4882a593Smuzhiyun return 0;
1192*4882a593Smuzhiyun }
1193*4882a593Smuzhiyun
1194*4882a593Smuzhiyun /*
1195*4882a593Smuzhiyun * Initialization/uninitialization handlers.
1196*4882a593Smuzhiyun */
rt2x00lib_uninitialize(struct rt2x00_dev * rt2x00dev)1197*4882a593Smuzhiyun static void rt2x00lib_uninitialize(struct rt2x00_dev *rt2x00dev)
1198*4882a593Smuzhiyun {
1199*4882a593Smuzhiyun if (!test_and_clear_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags))
1200*4882a593Smuzhiyun return;
1201*4882a593Smuzhiyun
1202*4882a593Smuzhiyun /*
1203*4882a593Smuzhiyun * Stop rfkill polling.
1204*4882a593Smuzhiyun */
1205*4882a593Smuzhiyun if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DELAYED_RFKILL))
1206*4882a593Smuzhiyun rt2x00rfkill_unregister(rt2x00dev);
1207*4882a593Smuzhiyun
1208*4882a593Smuzhiyun /*
1209*4882a593Smuzhiyun * Allow the HW to uninitialize.
1210*4882a593Smuzhiyun */
1211*4882a593Smuzhiyun rt2x00dev->ops->lib->uninitialize(rt2x00dev);
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun /*
1214*4882a593Smuzhiyun * Free allocated queue entries.
1215*4882a593Smuzhiyun */
1216*4882a593Smuzhiyun rt2x00queue_uninitialize(rt2x00dev);
1217*4882a593Smuzhiyun }
1218*4882a593Smuzhiyun
rt2x00lib_initialize(struct rt2x00_dev * rt2x00dev)1219*4882a593Smuzhiyun static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev)
1220*4882a593Smuzhiyun {
1221*4882a593Smuzhiyun int status;
1222*4882a593Smuzhiyun
1223*4882a593Smuzhiyun if (test_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags))
1224*4882a593Smuzhiyun return 0;
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun /*
1227*4882a593Smuzhiyun * Allocate all queue entries.
1228*4882a593Smuzhiyun */
1229*4882a593Smuzhiyun status = rt2x00queue_initialize(rt2x00dev);
1230*4882a593Smuzhiyun if (status)
1231*4882a593Smuzhiyun return status;
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun /*
1234*4882a593Smuzhiyun * Initialize the device.
1235*4882a593Smuzhiyun */
1236*4882a593Smuzhiyun status = rt2x00dev->ops->lib->initialize(rt2x00dev);
1237*4882a593Smuzhiyun if (status) {
1238*4882a593Smuzhiyun rt2x00queue_uninitialize(rt2x00dev);
1239*4882a593Smuzhiyun return status;
1240*4882a593Smuzhiyun }
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun set_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags);
1243*4882a593Smuzhiyun
1244*4882a593Smuzhiyun /*
1245*4882a593Smuzhiyun * Start rfkill polling.
1246*4882a593Smuzhiyun */
1247*4882a593Smuzhiyun if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DELAYED_RFKILL))
1248*4882a593Smuzhiyun rt2x00rfkill_register(rt2x00dev);
1249*4882a593Smuzhiyun
1250*4882a593Smuzhiyun return 0;
1251*4882a593Smuzhiyun }
1252*4882a593Smuzhiyun
rt2x00lib_start(struct rt2x00_dev * rt2x00dev)1253*4882a593Smuzhiyun int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
1254*4882a593Smuzhiyun {
1255*4882a593Smuzhiyun int retval = 0;
1256*4882a593Smuzhiyun
1257*4882a593Smuzhiyun /*
1258*4882a593Smuzhiyun * If this is the first interface which is added,
1259*4882a593Smuzhiyun * we should load the firmware now.
1260*4882a593Smuzhiyun */
1261*4882a593Smuzhiyun retval = rt2x00lib_load_firmware(rt2x00dev);
1262*4882a593Smuzhiyun if (retval)
1263*4882a593Smuzhiyun goto out;
1264*4882a593Smuzhiyun
1265*4882a593Smuzhiyun /*
1266*4882a593Smuzhiyun * Initialize the device.
1267*4882a593Smuzhiyun */
1268*4882a593Smuzhiyun retval = rt2x00lib_initialize(rt2x00dev);
1269*4882a593Smuzhiyun if (retval)
1270*4882a593Smuzhiyun goto out;
1271*4882a593Smuzhiyun
1272*4882a593Smuzhiyun rt2x00dev->intf_ap_count = 0;
1273*4882a593Smuzhiyun rt2x00dev->intf_sta_count = 0;
1274*4882a593Smuzhiyun rt2x00dev->intf_associated = 0;
1275*4882a593Smuzhiyun
1276*4882a593Smuzhiyun /* Enable the radio */
1277*4882a593Smuzhiyun retval = rt2x00lib_enable_radio(rt2x00dev);
1278*4882a593Smuzhiyun if (retval)
1279*4882a593Smuzhiyun goto out;
1280*4882a593Smuzhiyun
1281*4882a593Smuzhiyun set_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags);
1282*4882a593Smuzhiyun
1283*4882a593Smuzhiyun out:
1284*4882a593Smuzhiyun return retval;
1285*4882a593Smuzhiyun }
1286*4882a593Smuzhiyun
rt2x00lib_stop(struct rt2x00_dev * rt2x00dev)1287*4882a593Smuzhiyun void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev)
1288*4882a593Smuzhiyun {
1289*4882a593Smuzhiyun if (!test_and_clear_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
1290*4882a593Smuzhiyun return;
1291*4882a593Smuzhiyun
1292*4882a593Smuzhiyun /*
1293*4882a593Smuzhiyun * Perhaps we can add something smarter here,
1294*4882a593Smuzhiyun * but for now just disabling the radio should do.
1295*4882a593Smuzhiyun */
1296*4882a593Smuzhiyun rt2x00lib_disable_radio(rt2x00dev);
1297*4882a593Smuzhiyun
1298*4882a593Smuzhiyun rt2x00dev->intf_ap_count = 0;
1299*4882a593Smuzhiyun rt2x00dev->intf_sta_count = 0;
1300*4882a593Smuzhiyun rt2x00dev->intf_associated = 0;
1301*4882a593Smuzhiyun }
1302*4882a593Smuzhiyun
rt2x00lib_set_if_combinations(struct rt2x00_dev * rt2x00dev)1303*4882a593Smuzhiyun static inline void rt2x00lib_set_if_combinations(struct rt2x00_dev *rt2x00dev)
1304*4882a593Smuzhiyun {
1305*4882a593Smuzhiyun struct ieee80211_iface_limit *if_limit;
1306*4882a593Smuzhiyun struct ieee80211_iface_combination *if_combination;
1307*4882a593Smuzhiyun
1308*4882a593Smuzhiyun if (rt2x00dev->ops->max_ap_intf < 2)
1309*4882a593Smuzhiyun return;
1310*4882a593Smuzhiyun
1311*4882a593Smuzhiyun /*
1312*4882a593Smuzhiyun * Build up AP interface limits structure.
1313*4882a593Smuzhiyun */
1314*4882a593Smuzhiyun if_limit = &rt2x00dev->if_limits_ap;
1315*4882a593Smuzhiyun if_limit->max = rt2x00dev->ops->max_ap_intf;
1316*4882a593Smuzhiyun if_limit->types = BIT(NL80211_IFTYPE_AP);
1317*4882a593Smuzhiyun #ifdef CONFIG_MAC80211_MESH
1318*4882a593Smuzhiyun if_limit->types |= BIT(NL80211_IFTYPE_MESH_POINT);
1319*4882a593Smuzhiyun #endif
1320*4882a593Smuzhiyun
1321*4882a593Smuzhiyun /*
1322*4882a593Smuzhiyun * Build up AP interface combinations structure.
1323*4882a593Smuzhiyun */
1324*4882a593Smuzhiyun if_combination = &rt2x00dev->if_combinations[IF_COMB_AP];
1325*4882a593Smuzhiyun if_combination->limits = if_limit;
1326*4882a593Smuzhiyun if_combination->n_limits = 1;
1327*4882a593Smuzhiyun if_combination->max_interfaces = if_limit->max;
1328*4882a593Smuzhiyun if_combination->num_different_channels = 1;
1329*4882a593Smuzhiyun
1330*4882a593Smuzhiyun /*
1331*4882a593Smuzhiyun * Finally, specify the possible combinations to mac80211.
1332*4882a593Smuzhiyun */
1333*4882a593Smuzhiyun rt2x00dev->hw->wiphy->iface_combinations = rt2x00dev->if_combinations;
1334*4882a593Smuzhiyun rt2x00dev->hw->wiphy->n_iface_combinations = 1;
1335*4882a593Smuzhiyun }
1336*4882a593Smuzhiyun
rt2x00dev_extra_tx_headroom(struct rt2x00_dev * rt2x00dev)1337*4882a593Smuzhiyun static unsigned int rt2x00dev_extra_tx_headroom(struct rt2x00_dev *rt2x00dev)
1338*4882a593Smuzhiyun {
1339*4882a593Smuzhiyun if (WARN_ON(!rt2x00dev->tx))
1340*4882a593Smuzhiyun return 0;
1341*4882a593Smuzhiyun
1342*4882a593Smuzhiyun if (rt2x00_is_usb(rt2x00dev))
1343*4882a593Smuzhiyun return rt2x00dev->tx[0].winfo_size + rt2x00dev->tx[0].desc_size;
1344*4882a593Smuzhiyun
1345*4882a593Smuzhiyun return rt2x00dev->tx[0].winfo_size;
1346*4882a593Smuzhiyun }
1347*4882a593Smuzhiyun
1348*4882a593Smuzhiyun /*
1349*4882a593Smuzhiyun * driver allocation handlers.
1350*4882a593Smuzhiyun */
rt2x00lib_probe_dev(struct rt2x00_dev * rt2x00dev)1351*4882a593Smuzhiyun int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
1352*4882a593Smuzhiyun {
1353*4882a593Smuzhiyun int retval = -ENOMEM;
1354*4882a593Smuzhiyun
1355*4882a593Smuzhiyun /*
1356*4882a593Smuzhiyun * Set possible interface combinations.
1357*4882a593Smuzhiyun */
1358*4882a593Smuzhiyun rt2x00lib_set_if_combinations(rt2x00dev);
1359*4882a593Smuzhiyun
1360*4882a593Smuzhiyun /*
1361*4882a593Smuzhiyun * Allocate the driver data memory, if necessary.
1362*4882a593Smuzhiyun */
1363*4882a593Smuzhiyun if (rt2x00dev->ops->drv_data_size > 0) {
1364*4882a593Smuzhiyun rt2x00dev->drv_data = kzalloc(rt2x00dev->ops->drv_data_size,
1365*4882a593Smuzhiyun GFP_KERNEL);
1366*4882a593Smuzhiyun if (!rt2x00dev->drv_data) {
1367*4882a593Smuzhiyun retval = -ENOMEM;
1368*4882a593Smuzhiyun goto exit;
1369*4882a593Smuzhiyun }
1370*4882a593Smuzhiyun }
1371*4882a593Smuzhiyun
1372*4882a593Smuzhiyun spin_lock_init(&rt2x00dev->irqmask_lock);
1373*4882a593Smuzhiyun mutex_init(&rt2x00dev->csr_mutex);
1374*4882a593Smuzhiyun mutex_init(&rt2x00dev->conf_mutex);
1375*4882a593Smuzhiyun INIT_LIST_HEAD(&rt2x00dev->bar_list);
1376*4882a593Smuzhiyun spin_lock_init(&rt2x00dev->bar_list_lock);
1377*4882a593Smuzhiyun hrtimer_init(&rt2x00dev->txstatus_timer, CLOCK_MONOTONIC,
1378*4882a593Smuzhiyun HRTIMER_MODE_REL);
1379*4882a593Smuzhiyun
1380*4882a593Smuzhiyun set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
1381*4882a593Smuzhiyun
1382*4882a593Smuzhiyun /*
1383*4882a593Smuzhiyun * Make room for rt2x00_intf inside the per-interface
1384*4882a593Smuzhiyun * structure ieee80211_vif.
1385*4882a593Smuzhiyun */
1386*4882a593Smuzhiyun rt2x00dev->hw->vif_data_size = sizeof(struct rt2x00_intf);
1387*4882a593Smuzhiyun
1388*4882a593Smuzhiyun /*
1389*4882a593Smuzhiyun * rt2x00 devices can only use the last n bits of the MAC address
1390*4882a593Smuzhiyun * for virtual interfaces.
1391*4882a593Smuzhiyun */
1392*4882a593Smuzhiyun rt2x00dev->hw->wiphy->addr_mask[ETH_ALEN - 1] =
1393*4882a593Smuzhiyun (rt2x00dev->ops->max_ap_intf - 1);
1394*4882a593Smuzhiyun
1395*4882a593Smuzhiyun /*
1396*4882a593Smuzhiyun * Initialize work.
1397*4882a593Smuzhiyun */
1398*4882a593Smuzhiyun rt2x00dev->workqueue =
1399*4882a593Smuzhiyun alloc_ordered_workqueue("%s", 0, wiphy_name(rt2x00dev->hw->wiphy));
1400*4882a593Smuzhiyun if (!rt2x00dev->workqueue) {
1401*4882a593Smuzhiyun retval = -ENOMEM;
1402*4882a593Smuzhiyun goto exit;
1403*4882a593Smuzhiyun }
1404*4882a593Smuzhiyun
1405*4882a593Smuzhiyun INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled);
1406*4882a593Smuzhiyun INIT_DELAYED_WORK(&rt2x00dev->autowakeup_work, rt2x00lib_autowakeup);
1407*4882a593Smuzhiyun INIT_WORK(&rt2x00dev->sleep_work, rt2x00lib_sleep);
1408*4882a593Smuzhiyun
1409*4882a593Smuzhiyun /*
1410*4882a593Smuzhiyun * Let the driver probe the device to detect the capabilities.
1411*4882a593Smuzhiyun */
1412*4882a593Smuzhiyun retval = rt2x00dev->ops->lib->probe_hw(rt2x00dev);
1413*4882a593Smuzhiyun if (retval) {
1414*4882a593Smuzhiyun rt2x00_err(rt2x00dev, "Failed to allocate device\n");
1415*4882a593Smuzhiyun goto exit;
1416*4882a593Smuzhiyun }
1417*4882a593Smuzhiyun
1418*4882a593Smuzhiyun /*
1419*4882a593Smuzhiyun * Allocate queue array.
1420*4882a593Smuzhiyun */
1421*4882a593Smuzhiyun retval = rt2x00queue_allocate(rt2x00dev);
1422*4882a593Smuzhiyun if (retval)
1423*4882a593Smuzhiyun goto exit;
1424*4882a593Smuzhiyun
1425*4882a593Smuzhiyun /* Cache TX headroom value */
1426*4882a593Smuzhiyun rt2x00dev->extra_tx_headroom = rt2x00dev_extra_tx_headroom(rt2x00dev);
1427*4882a593Smuzhiyun
1428*4882a593Smuzhiyun /*
1429*4882a593Smuzhiyun * Determine which operating modes are supported, all modes
1430*4882a593Smuzhiyun * which require beaconing, depend on the availability of
1431*4882a593Smuzhiyun * beacon entries.
1432*4882a593Smuzhiyun */
1433*4882a593Smuzhiyun rt2x00dev->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
1434*4882a593Smuzhiyun if (rt2x00dev->bcn->limit > 0)
1435*4882a593Smuzhiyun rt2x00dev->hw->wiphy->interface_modes |=
1436*4882a593Smuzhiyun BIT(NL80211_IFTYPE_ADHOC) |
1437*4882a593Smuzhiyun #ifdef CONFIG_MAC80211_MESH
1438*4882a593Smuzhiyun BIT(NL80211_IFTYPE_MESH_POINT) |
1439*4882a593Smuzhiyun #endif
1440*4882a593Smuzhiyun #ifdef CONFIG_WIRELESS_WDS
1441*4882a593Smuzhiyun BIT(NL80211_IFTYPE_WDS) |
1442*4882a593Smuzhiyun #endif
1443*4882a593Smuzhiyun BIT(NL80211_IFTYPE_AP);
1444*4882a593Smuzhiyun
1445*4882a593Smuzhiyun rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
1446*4882a593Smuzhiyun
1447*4882a593Smuzhiyun wiphy_ext_feature_set(rt2x00dev->hw->wiphy,
1448*4882a593Smuzhiyun NL80211_EXT_FEATURE_CQM_RSSI_LIST);
1449*4882a593Smuzhiyun
1450*4882a593Smuzhiyun /*
1451*4882a593Smuzhiyun * Initialize ieee80211 structure.
1452*4882a593Smuzhiyun */
1453*4882a593Smuzhiyun retval = rt2x00lib_probe_hw(rt2x00dev);
1454*4882a593Smuzhiyun if (retval) {
1455*4882a593Smuzhiyun rt2x00_err(rt2x00dev, "Failed to initialize hw\n");
1456*4882a593Smuzhiyun goto exit;
1457*4882a593Smuzhiyun }
1458*4882a593Smuzhiyun
1459*4882a593Smuzhiyun /*
1460*4882a593Smuzhiyun * Register extra components.
1461*4882a593Smuzhiyun */
1462*4882a593Smuzhiyun rt2x00link_register(rt2x00dev);
1463*4882a593Smuzhiyun rt2x00leds_register(rt2x00dev);
1464*4882a593Smuzhiyun rt2x00debug_register(rt2x00dev);
1465*4882a593Smuzhiyun
1466*4882a593Smuzhiyun /*
1467*4882a593Smuzhiyun * Start rfkill polling.
1468*4882a593Smuzhiyun */
1469*4882a593Smuzhiyun if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DELAYED_RFKILL))
1470*4882a593Smuzhiyun rt2x00rfkill_register(rt2x00dev);
1471*4882a593Smuzhiyun
1472*4882a593Smuzhiyun return 0;
1473*4882a593Smuzhiyun
1474*4882a593Smuzhiyun exit:
1475*4882a593Smuzhiyun rt2x00lib_remove_dev(rt2x00dev);
1476*4882a593Smuzhiyun
1477*4882a593Smuzhiyun return retval;
1478*4882a593Smuzhiyun }
1479*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(rt2x00lib_probe_dev);
1480*4882a593Smuzhiyun
rt2x00lib_remove_dev(struct rt2x00_dev * rt2x00dev)1481*4882a593Smuzhiyun void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
1482*4882a593Smuzhiyun {
1483*4882a593Smuzhiyun clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
1484*4882a593Smuzhiyun
1485*4882a593Smuzhiyun /*
1486*4882a593Smuzhiyun * Stop rfkill polling.
1487*4882a593Smuzhiyun */
1488*4882a593Smuzhiyun if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DELAYED_RFKILL))
1489*4882a593Smuzhiyun rt2x00rfkill_unregister(rt2x00dev);
1490*4882a593Smuzhiyun
1491*4882a593Smuzhiyun /*
1492*4882a593Smuzhiyun * Disable radio.
1493*4882a593Smuzhiyun */
1494*4882a593Smuzhiyun rt2x00lib_disable_radio(rt2x00dev);
1495*4882a593Smuzhiyun
1496*4882a593Smuzhiyun /*
1497*4882a593Smuzhiyun * Stop all work.
1498*4882a593Smuzhiyun */
1499*4882a593Smuzhiyun cancel_work_sync(&rt2x00dev->intf_work);
1500*4882a593Smuzhiyun cancel_delayed_work_sync(&rt2x00dev->autowakeup_work);
1501*4882a593Smuzhiyun cancel_work_sync(&rt2x00dev->sleep_work);
1502*4882a593Smuzhiyun
1503*4882a593Smuzhiyun hrtimer_cancel(&rt2x00dev->txstatus_timer);
1504*4882a593Smuzhiyun
1505*4882a593Smuzhiyun /*
1506*4882a593Smuzhiyun * Kill the tx status tasklet.
1507*4882a593Smuzhiyun */
1508*4882a593Smuzhiyun tasklet_kill(&rt2x00dev->txstatus_tasklet);
1509*4882a593Smuzhiyun tasklet_kill(&rt2x00dev->pretbtt_tasklet);
1510*4882a593Smuzhiyun tasklet_kill(&rt2x00dev->tbtt_tasklet);
1511*4882a593Smuzhiyun tasklet_kill(&rt2x00dev->rxdone_tasklet);
1512*4882a593Smuzhiyun tasklet_kill(&rt2x00dev->autowake_tasklet);
1513*4882a593Smuzhiyun
1514*4882a593Smuzhiyun /*
1515*4882a593Smuzhiyun * Uninitialize device.
1516*4882a593Smuzhiyun */
1517*4882a593Smuzhiyun rt2x00lib_uninitialize(rt2x00dev);
1518*4882a593Smuzhiyun
1519*4882a593Smuzhiyun if (rt2x00dev->workqueue)
1520*4882a593Smuzhiyun destroy_workqueue(rt2x00dev->workqueue);
1521*4882a593Smuzhiyun
1522*4882a593Smuzhiyun /*
1523*4882a593Smuzhiyun * Free the tx status fifo.
1524*4882a593Smuzhiyun */
1525*4882a593Smuzhiyun kfifo_free(&rt2x00dev->txstatus_fifo);
1526*4882a593Smuzhiyun
1527*4882a593Smuzhiyun /*
1528*4882a593Smuzhiyun * Free extra components
1529*4882a593Smuzhiyun */
1530*4882a593Smuzhiyun rt2x00debug_deregister(rt2x00dev);
1531*4882a593Smuzhiyun rt2x00leds_unregister(rt2x00dev);
1532*4882a593Smuzhiyun
1533*4882a593Smuzhiyun /*
1534*4882a593Smuzhiyun * Free ieee80211_hw memory.
1535*4882a593Smuzhiyun */
1536*4882a593Smuzhiyun rt2x00lib_remove_hw(rt2x00dev);
1537*4882a593Smuzhiyun
1538*4882a593Smuzhiyun /*
1539*4882a593Smuzhiyun * Free firmware image.
1540*4882a593Smuzhiyun */
1541*4882a593Smuzhiyun rt2x00lib_free_firmware(rt2x00dev);
1542*4882a593Smuzhiyun
1543*4882a593Smuzhiyun /*
1544*4882a593Smuzhiyun * Free queue structures.
1545*4882a593Smuzhiyun */
1546*4882a593Smuzhiyun rt2x00queue_free(rt2x00dev);
1547*4882a593Smuzhiyun
1548*4882a593Smuzhiyun /*
1549*4882a593Smuzhiyun * Free the driver data.
1550*4882a593Smuzhiyun */
1551*4882a593Smuzhiyun kfree(rt2x00dev->drv_data);
1552*4882a593Smuzhiyun }
1553*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(rt2x00lib_remove_dev);
1554*4882a593Smuzhiyun
1555*4882a593Smuzhiyun /*
1556*4882a593Smuzhiyun * Device state handlers
1557*4882a593Smuzhiyun */
rt2x00lib_suspend(struct rt2x00_dev * rt2x00dev)1558*4882a593Smuzhiyun int rt2x00lib_suspend(struct rt2x00_dev *rt2x00dev)
1559*4882a593Smuzhiyun {
1560*4882a593Smuzhiyun rt2x00_dbg(rt2x00dev, "Going to sleep\n");
1561*4882a593Smuzhiyun
1562*4882a593Smuzhiyun /*
1563*4882a593Smuzhiyun * Prevent mac80211 from accessing driver while suspended.
1564*4882a593Smuzhiyun */
1565*4882a593Smuzhiyun if (!test_and_clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
1566*4882a593Smuzhiyun return 0;
1567*4882a593Smuzhiyun
1568*4882a593Smuzhiyun /*
1569*4882a593Smuzhiyun * Cleanup as much as possible.
1570*4882a593Smuzhiyun */
1571*4882a593Smuzhiyun rt2x00lib_uninitialize(rt2x00dev);
1572*4882a593Smuzhiyun
1573*4882a593Smuzhiyun /*
1574*4882a593Smuzhiyun * Suspend/disable extra components.
1575*4882a593Smuzhiyun */
1576*4882a593Smuzhiyun rt2x00leds_suspend(rt2x00dev);
1577*4882a593Smuzhiyun rt2x00debug_deregister(rt2x00dev);
1578*4882a593Smuzhiyun
1579*4882a593Smuzhiyun /*
1580*4882a593Smuzhiyun * Set device mode to sleep for power management,
1581*4882a593Smuzhiyun * on some hardware this call seems to consistently fail.
1582*4882a593Smuzhiyun * From the specifications it is hard to tell why it fails,
1583*4882a593Smuzhiyun * and if this is a "bad thing".
1584*4882a593Smuzhiyun * Overall it is safe to just ignore the failure and
1585*4882a593Smuzhiyun * continue suspending. The only downside is that the
1586*4882a593Smuzhiyun * device will not be in optimal power save mode, but with
1587*4882a593Smuzhiyun * the radio and the other components already disabled the
1588*4882a593Smuzhiyun * device is as good as disabled.
1589*4882a593Smuzhiyun */
1590*4882a593Smuzhiyun if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_SLEEP))
1591*4882a593Smuzhiyun rt2x00_warn(rt2x00dev, "Device failed to enter sleep state, continue suspending\n");
1592*4882a593Smuzhiyun
1593*4882a593Smuzhiyun return 0;
1594*4882a593Smuzhiyun }
1595*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(rt2x00lib_suspend);
1596*4882a593Smuzhiyun
rt2x00lib_resume(struct rt2x00_dev * rt2x00dev)1597*4882a593Smuzhiyun int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev)
1598*4882a593Smuzhiyun {
1599*4882a593Smuzhiyun rt2x00_dbg(rt2x00dev, "Waking up\n");
1600*4882a593Smuzhiyun
1601*4882a593Smuzhiyun /*
1602*4882a593Smuzhiyun * Restore/enable extra components.
1603*4882a593Smuzhiyun */
1604*4882a593Smuzhiyun rt2x00debug_register(rt2x00dev);
1605*4882a593Smuzhiyun rt2x00leds_resume(rt2x00dev);
1606*4882a593Smuzhiyun
1607*4882a593Smuzhiyun /*
1608*4882a593Smuzhiyun * We are ready again to receive requests from mac80211.
1609*4882a593Smuzhiyun */
1610*4882a593Smuzhiyun set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
1611*4882a593Smuzhiyun
1612*4882a593Smuzhiyun return 0;
1613*4882a593Smuzhiyun }
1614*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(rt2x00lib_resume);
1615*4882a593Smuzhiyun
1616*4882a593Smuzhiyun /*
1617*4882a593Smuzhiyun * rt2x00lib module information.
1618*4882a593Smuzhiyun */
1619*4882a593Smuzhiyun MODULE_AUTHOR(DRV_PROJECT);
1620*4882a593Smuzhiyun MODULE_VERSION(DRV_VERSION);
1621*4882a593Smuzhiyun MODULE_DESCRIPTION("rt2x00 library");
1622*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1623