1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /******************************************************************************
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Contact Information:
7*4882a593Smuzhiyun * Intel Linux Wireless <ilw@linux.intel.com>
8*4882a593Smuzhiyun * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun *****************************************************************************/
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/kernel.h>
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/slab.h>
15*4882a593Smuzhiyun #include <linux/pci.h>
16*4882a593Smuzhiyun #include <linux/dma-mapping.h>
17*4882a593Smuzhiyun #include <linux/delay.h>
18*4882a593Smuzhiyun #include <linux/sched.h>
19*4882a593Smuzhiyun #include <linux/skbuff.h>
20*4882a593Smuzhiyun #include <linux/netdevice.h>
21*4882a593Smuzhiyun #include <linux/firmware.h>
22*4882a593Smuzhiyun #include <linux/etherdevice.h>
23*4882a593Smuzhiyun #include <asm/unaligned.h>
24*4882a593Smuzhiyun #include <net/mac80211.h>
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #include "common.h"
27*4882a593Smuzhiyun #include "3945.h"
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun /* Send led command */
30*4882a593Smuzhiyun static int
il3945_send_led_cmd(struct il_priv * il,struct il_led_cmd * led_cmd)31*4882a593Smuzhiyun il3945_send_led_cmd(struct il_priv *il, struct il_led_cmd *led_cmd)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun struct il_host_cmd cmd = {
34*4882a593Smuzhiyun .id = C_LEDS,
35*4882a593Smuzhiyun .len = sizeof(struct il_led_cmd),
36*4882a593Smuzhiyun .data = led_cmd,
37*4882a593Smuzhiyun .flags = CMD_ASYNC,
38*4882a593Smuzhiyun .callback = NULL,
39*4882a593Smuzhiyun };
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun return il_send_cmd(il, &cmd);
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun #define IL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \
45*4882a593Smuzhiyun [RATE_##r##M_IDX] = { RATE_##r##M_PLCP, \
46*4882a593Smuzhiyun RATE_##r##M_IEEE, \
47*4882a593Smuzhiyun RATE_##ip##M_IDX, \
48*4882a593Smuzhiyun RATE_##in##M_IDX, \
49*4882a593Smuzhiyun RATE_##rp##M_IDX, \
50*4882a593Smuzhiyun RATE_##rn##M_IDX, \
51*4882a593Smuzhiyun RATE_##pp##M_IDX, \
52*4882a593Smuzhiyun RATE_##np##M_IDX, \
53*4882a593Smuzhiyun RATE_##r##M_IDX_TBL, \
54*4882a593Smuzhiyun RATE_##ip##M_IDX_TBL }
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun /*
57*4882a593Smuzhiyun * Parameter order:
58*4882a593Smuzhiyun * rate, prev rate, next rate, prev tgg rate, next tgg rate
59*4882a593Smuzhiyun *
60*4882a593Smuzhiyun * If there isn't a valid next or previous rate then INV is used which
61*4882a593Smuzhiyun * maps to RATE_INVALID
62*4882a593Smuzhiyun *
63*4882a593Smuzhiyun */
64*4882a593Smuzhiyun const struct il3945_rate_info il3945_rates[RATE_COUNT_3945] = {
65*4882a593Smuzhiyun IL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2), /* 1mbps */
66*4882a593Smuzhiyun IL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5), /* 2mbps */
67*4882a593Smuzhiyun IL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11), /*5.5mbps */
68*4882a593Smuzhiyun IL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18), /* 11mbps */
69*4882a593Smuzhiyun IL_DECLARE_RATE_INFO(6, 5, 9, 5, 11, 5, 11), /* 6mbps */
70*4882a593Smuzhiyun IL_DECLARE_RATE_INFO(9, 6, 11, 5, 11, 5, 11), /* 9mbps */
71*4882a593Smuzhiyun IL_DECLARE_RATE_INFO(12, 11, 18, 11, 18, 11, 18), /* 12mbps */
72*4882a593Smuzhiyun IL_DECLARE_RATE_INFO(18, 12, 24, 12, 24, 11, 24), /* 18mbps */
73*4882a593Smuzhiyun IL_DECLARE_RATE_INFO(24, 18, 36, 18, 36, 18, 36), /* 24mbps */
74*4882a593Smuzhiyun IL_DECLARE_RATE_INFO(36, 24, 48, 24, 48, 24, 48), /* 36mbps */
75*4882a593Smuzhiyun IL_DECLARE_RATE_INFO(48, 36, 54, 36, 54, 36, 54), /* 48mbps */
76*4882a593Smuzhiyun IL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV), /* 54mbps */
77*4882a593Smuzhiyun };
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun static inline u8
il3945_get_prev_ieee_rate(u8 rate_idx)80*4882a593Smuzhiyun il3945_get_prev_ieee_rate(u8 rate_idx)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun u8 rate = il3945_rates[rate_idx].prev_ieee;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun if (rate == RATE_INVALID)
85*4882a593Smuzhiyun rate = rate_idx;
86*4882a593Smuzhiyun return rate;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun /* 1 = enable the il3945_disable_events() function */
90*4882a593Smuzhiyun #define IL_EVT_DISABLE (0)
91*4882a593Smuzhiyun #define IL_EVT_DISABLE_SIZE (1532/32)
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /*
94*4882a593Smuzhiyun * il3945_disable_events - Disable selected events in uCode event log
95*4882a593Smuzhiyun *
96*4882a593Smuzhiyun * Disable an event by writing "1"s into "disable"
97*4882a593Smuzhiyun * bitmap in SRAM. Bit position corresponds to Event # (id/type).
98*4882a593Smuzhiyun * Default values of 0 enable uCode events to be logged.
99*4882a593Smuzhiyun * Use for only special debugging. This function is just a placeholder as-is,
100*4882a593Smuzhiyun * you'll need to provide the special bits! ...
101*4882a593Smuzhiyun * ... and set IL_EVT_DISABLE to 1. */
102*4882a593Smuzhiyun void
il3945_disable_events(struct il_priv * il)103*4882a593Smuzhiyun il3945_disable_events(struct il_priv *il)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun int i;
106*4882a593Smuzhiyun u32 base; /* SRAM address of event log header */
107*4882a593Smuzhiyun u32 disable_ptr; /* SRAM address of event-disable bitmap array */
108*4882a593Smuzhiyun u32 array_size; /* # of u32 entries in array */
109*4882a593Smuzhiyun static const u32 evt_disable[IL_EVT_DISABLE_SIZE] = {
110*4882a593Smuzhiyun 0x00000000, /* 31 - 0 Event id numbers */
111*4882a593Smuzhiyun 0x00000000, /* 63 - 32 */
112*4882a593Smuzhiyun 0x00000000, /* 95 - 64 */
113*4882a593Smuzhiyun 0x00000000, /* 127 - 96 */
114*4882a593Smuzhiyun 0x00000000, /* 159 - 128 */
115*4882a593Smuzhiyun 0x00000000, /* 191 - 160 */
116*4882a593Smuzhiyun 0x00000000, /* 223 - 192 */
117*4882a593Smuzhiyun 0x00000000, /* 255 - 224 */
118*4882a593Smuzhiyun 0x00000000, /* 287 - 256 */
119*4882a593Smuzhiyun 0x00000000, /* 319 - 288 */
120*4882a593Smuzhiyun 0x00000000, /* 351 - 320 */
121*4882a593Smuzhiyun 0x00000000, /* 383 - 352 */
122*4882a593Smuzhiyun 0x00000000, /* 415 - 384 */
123*4882a593Smuzhiyun 0x00000000, /* 447 - 416 */
124*4882a593Smuzhiyun 0x00000000, /* 479 - 448 */
125*4882a593Smuzhiyun 0x00000000, /* 511 - 480 */
126*4882a593Smuzhiyun 0x00000000, /* 543 - 512 */
127*4882a593Smuzhiyun 0x00000000, /* 575 - 544 */
128*4882a593Smuzhiyun 0x00000000, /* 607 - 576 */
129*4882a593Smuzhiyun 0x00000000, /* 639 - 608 */
130*4882a593Smuzhiyun 0x00000000, /* 671 - 640 */
131*4882a593Smuzhiyun 0x00000000, /* 703 - 672 */
132*4882a593Smuzhiyun 0x00000000, /* 735 - 704 */
133*4882a593Smuzhiyun 0x00000000, /* 767 - 736 */
134*4882a593Smuzhiyun 0x00000000, /* 799 - 768 */
135*4882a593Smuzhiyun 0x00000000, /* 831 - 800 */
136*4882a593Smuzhiyun 0x00000000, /* 863 - 832 */
137*4882a593Smuzhiyun 0x00000000, /* 895 - 864 */
138*4882a593Smuzhiyun 0x00000000, /* 927 - 896 */
139*4882a593Smuzhiyun 0x00000000, /* 959 - 928 */
140*4882a593Smuzhiyun 0x00000000, /* 991 - 960 */
141*4882a593Smuzhiyun 0x00000000, /* 1023 - 992 */
142*4882a593Smuzhiyun 0x00000000, /* 1055 - 1024 */
143*4882a593Smuzhiyun 0x00000000, /* 1087 - 1056 */
144*4882a593Smuzhiyun 0x00000000, /* 1119 - 1088 */
145*4882a593Smuzhiyun 0x00000000, /* 1151 - 1120 */
146*4882a593Smuzhiyun 0x00000000, /* 1183 - 1152 */
147*4882a593Smuzhiyun 0x00000000, /* 1215 - 1184 */
148*4882a593Smuzhiyun 0x00000000, /* 1247 - 1216 */
149*4882a593Smuzhiyun 0x00000000, /* 1279 - 1248 */
150*4882a593Smuzhiyun 0x00000000, /* 1311 - 1280 */
151*4882a593Smuzhiyun 0x00000000, /* 1343 - 1312 */
152*4882a593Smuzhiyun 0x00000000, /* 1375 - 1344 */
153*4882a593Smuzhiyun 0x00000000, /* 1407 - 1376 */
154*4882a593Smuzhiyun 0x00000000, /* 1439 - 1408 */
155*4882a593Smuzhiyun 0x00000000, /* 1471 - 1440 */
156*4882a593Smuzhiyun 0x00000000, /* 1503 - 1472 */
157*4882a593Smuzhiyun };
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun base = le32_to_cpu(il->card_alive.log_event_table_ptr);
160*4882a593Smuzhiyun if (!il3945_hw_valid_rtc_data_addr(base)) {
161*4882a593Smuzhiyun IL_ERR("Invalid event log pointer 0x%08X\n", base);
162*4882a593Smuzhiyun return;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun disable_ptr = il_read_targ_mem(il, base + (4 * sizeof(u32)));
166*4882a593Smuzhiyun array_size = il_read_targ_mem(il, base + (5 * sizeof(u32)));
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun if (IL_EVT_DISABLE && array_size == IL_EVT_DISABLE_SIZE) {
169*4882a593Smuzhiyun D_INFO("Disabling selected uCode log events at 0x%x\n",
170*4882a593Smuzhiyun disable_ptr);
171*4882a593Smuzhiyun for (i = 0; i < IL_EVT_DISABLE_SIZE; i++)
172*4882a593Smuzhiyun il_write_targ_mem(il, disable_ptr + (i * sizeof(u32)),
173*4882a593Smuzhiyun evt_disable[i]);
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun } else {
176*4882a593Smuzhiyun D_INFO("Selected uCode log events may be disabled\n");
177*4882a593Smuzhiyun D_INFO(" by writing \"1\"s into disable bitmap\n");
178*4882a593Smuzhiyun D_INFO(" in SRAM at 0x%x, size %d u32s\n", disable_ptr,
179*4882a593Smuzhiyun array_size);
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun static int
il3945_hwrate_to_plcp_idx(u8 plcp)185*4882a593Smuzhiyun il3945_hwrate_to_plcp_idx(u8 plcp)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun int idx;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun for (idx = 0; idx < RATE_COUNT_3945; idx++)
190*4882a593Smuzhiyun if (il3945_rates[idx].plcp == plcp)
191*4882a593Smuzhiyun return idx;
192*4882a593Smuzhiyun return -1;
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun #ifdef CONFIG_IWLEGACY_DEBUG
196*4882a593Smuzhiyun #define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun static const char *
il3945_get_tx_fail_reason(u32 status)199*4882a593Smuzhiyun il3945_get_tx_fail_reason(u32 status)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun switch (status & TX_STATUS_MSK) {
202*4882a593Smuzhiyun case TX_3945_STATUS_SUCCESS:
203*4882a593Smuzhiyun return "SUCCESS";
204*4882a593Smuzhiyun TX_STATUS_ENTRY(SHORT_LIMIT);
205*4882a593Smuzhiyun TX_STATUS_ENTRY(LONG_LIMIT);
206*4882a593Smuzhiyun TX_STATUS_ENTRY(FIFO_UNDERRUN);
207*4882a593Smuzhiyun TX_STATUS_ENTRY(MGMNT_ABORT);
208*4882a593Smuzhiyun TX_STATUS_ENTRY(NEXT_FRAG);
209*4882a593Smuzhiyun TX_STATUS_ENTRY(LIFE_EXPIRE);
210*4882a593Smuzhiyun TX_STATUS_ENTRY(DEST_PS);
211*4882a593Smuzhiyun TX_STATUS_ENTRY(ABORTED);
212*4882a593Smuzhiyun TX_STATUS_ENTRY(BT_RETRY);
213*4882a593Smuzhiyun TX_STATUS_ENTRY(STA_INVALID);
214*4882a593Smuzhiyun TX_STATUS_ENTRY(FRAG_DROPPED);
215*4882a593Smuzhiyun TX_STATUS_ENTRY(TID_DISABLE);
216*4882a593Smuzhiyun TX_STATUS_ENTRY(FRAME_FLUSHED);
217*4882a593Smuzhiyun TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
218*4882a593Smuzhiyun TX_STATUS_ENTRY(TX_LOCKED);
219*4882a593Smuzhiyun TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun return "UNKNOWN";
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun #else
225*4882a593Smuzhiyun static inline const char *
il3945_get_tx_fail_reason(u32 status)226*4882a593Smuzhiyun il3945_get_tx_fail_reason(u32 status)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun return "";
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun #endif
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun /*
233*4882a593Smuzhiyun * get ieee prev rate from rate scale table.
234*4882a593Smuzhiyun * for A and B mode we need to overright prev
235*4882a593Smuzhiyun * value
236*4882a593Smuzhiyun */
237*4882a593Smuzhiyun int
il3945_rs_next_rate(struct il_priv * il,int rate)238*4882a593Smuzhiyun il3945_rs_next_rate(struct il_priv *il, int rate)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun int next_rate = il3945_get_prev_ieee_rate(rate);
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun switch (il->band) {
243*4882a593Smuzhiyun case NL80211_BAND_5GHZ:
244*4882a593Smuzhiyun if (rate == RATE_12M_IDX)
245*4882a593Smuzhiyun next_rate = RATE_9M_IDX;
246*4882a593Smuzhiyun else if (rate == RATE_6M_IDX)
247*4882a593Smuzhiyun next_rate = RATE_6M_IDX;
248*4882a593Smuzhiyun break;
249*4882a593Smuzhiyun case NL80211_BAND_2GHZ:
250*4882a593Smuzhiyun if (!(il->_3945.sta_supp_rates & IL_OFDM_RATES_MASK) &&
251*4882a593Smuzhiyun il_is_associated(il)) {
252*4882a593Smuzhiyun if (rate == RATE_11M_IDX)
253*4882a593Smuzhiyun next_rate = RATE_5M_IDX;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun break;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun default:
258*4882a593Smuzhiyun break;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun return next_rate;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun /*
265*4882a593Smuzhiyun * il3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
266*4882a593Smuzhiyun *
267*4882a593Smuzhiyun * When FW advances 'R' idx, all entries between old and new 'R' idx
268*4882a593Smuzhiyun * need to be reclaimed. As result, some free space forms. If there is
269*4882a593Smuzhiyun * enough free space (> low mark), wake the stack that feeds us.
270*4882a593Smuzhiyun */
271*4882a593Smuzhiyun static void
il3945_tx_queue_reclaim(struct il_priv * il,int txq_id,int idx)272*4882a593Smuzhiyun il3945_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun struct il_tx_queue *txq = &il->txq[txq_id];
275*4882a593Smuzhiyun struct il_queue *q = &txq->q;
276*4882a593Smuzhiyun struct sk_buff *skb;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun BUG_ON(txq_id == IL39_CMD_QUEUE_NUM);
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
281*4882a593Smuzhiyun q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun skb = txq->skbs[txq->q.read_ptr];
284*4882a593Smuzhiyun ieee80211_tx_status_irqsafe(il->hw, skb);
285*4882a593Smuzhiyun txq->skbs[txq->q.read_ptr] = NULL;
286*4882a593Smuzhiyun il->ops->txq_free_tfd(il, txq);
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun if (il_queue_space(q) > q->low_mark && txq_id >= 0 &&
290*4882a593Smuzhiyun txq_id != IL39_CMD_QUEUE_NUM && il->mac80211_registered)
291*4882a593Smuzhiyun il_wake_queue(il, txq);
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun /*
295*4882a593Smuzhiyun * il3945_hdl_tx - Handle Tx response
296*4882a593Smuzhiyun */
297*4882a593Smuzhiyun static void
il3945_hdl_tx(struct il_priv * il,struct il_rx_buf * rxb)298*4882a593Smuzhiyun il3945_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun struct il_rx_pkt *pkt = rxb_addr(rxb);
301*4882a593Smuzhiyun u16 sequence = le16_to_cpu(pkt->hdr.sequence);
302*4882a593Smuzhiyun int txq_id = SEQ_TO_QUEUE(sequence);
303*4882a593Smuzhiyun int idx = SEQ_TO_IDX(sequence);
304*4882a593Smuzhiyun struct il_tx_queue *txq = &il->txq[txq_id];
305*4882a593Smuzhiyun struct ieee80211_tx_info *info;
306*4882a593Smuzhiyun struct il3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
307*4882a593Smuzhiyun u32 status = le32_to_cpu(tx_resp->status);
308*4882a593Smuzhiyun int rate_idx;
309*4882a593Smuzhiyun int fail;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) {
312*4882a593Smuzhiyun IL_ERR("Read idx for DMA queue txq_id (%d) idx %d "
313*4882a593Smuzhiyun "is out of range [0-%d] %d %d\n", txq_id, idx,
314*4882a593Smuzhiyun txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr);
315*4882a593Smuzhiyun return;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun /*
319*4882a593Smuzhiyun * Firmware will not transmit frame on passive channel, if it not yet
320*4882a593Smuzhiyun * received some valid frame on that channel. When this error happen
321*4882a593Smuzhiyun * we have to wait until firmware will unblock itself i.e. when we
322*4882a593Smuzhiyun * note received beacon or other frame. We unblock queues in
323*4882a593Smuzhiyun * il3945_pass_packet_to_mac80211 or in il_mac_bss_info_changed.
324*4882a593Smuzhiyun */
325*4882a593Smuzhiyun if (unlikely((status & TX_STATUS_MSK) == TX_STATUS_FAIL_PASSIVE_NO_RX) &&
326*4882a593Smuzhiyun il->iw_mode == NL80211_IFTYPE_STATION) {
327*4882a593Smuzhiyun il_stop_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
328*4882a593Smuzhiyun D_INFO("Stopped queues - RX waiting on passive channel\n");
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun txq->time_stamp = jiffies;
332*4882a593Smuzhiyun info = IEEE80211_SKB_CB(txq->skbs[txq->q.read_ptr]);
333*4882a593Smuzhiyun ieee80211_tx_info_clear_status(info);
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun /* Fill the MRR chain with some info about on-chip retransmissions */
336*4882a593Smuzhiyun rate_idx = il3945_hwrate_to_plcp_idx(tx_resp->rate);
337*4882a593Smuzhiyun if (info->band == NL80211_BAND_5GHZ)
338*4882a593Smuzhiyun rate_idx -= IL_FIRST_OFDM_RATE;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun fail = tx_resp->failure_frame;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun info->status.rates[0].idx = rate_idx;
343*4882a593Smuzhiyun info->status.rates[0].count = fail + 1; /* add final attempt */
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun /* tx_status->rts_retry_count = tx_resp->failure_rts; */
346*4882a593Smuzhiyun info->flags |=
347*4882a593Smuzhiyun ((status & TX_STATUS_MSK) ==
348*4882a593Smuzhiyun TX_STATUS_SUCCESS) ? IEEE80211_TX_STAT_ACK : 0;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun D_TX("Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n", txq_id,
351*4882a593Smuzhiyun il3945_get_tx_fail_reason(status), status, tx_resp->rate,
352*4882a593Smuzhiyun tx_resp->failure_frame);
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun D_TX_REPLY("Tx queue reclaim %d\n", idx);
355*4882a593Smuzhiyun il3945_tx_queue_reclaim(il, txq_id, idx);
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun if (status & TX_ABORT_REQUIRED_MSK)
358*4882a593Smuzhiyun IL_ERR("TODO: Implement Tx ABORT REQUIRED!!!\n");
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun /*****************************************************************************
362*4882a593Smuzhiyun *
363*4882a593Smuzhiyun * Intel PRO/Wireless 3945ABG/BG Network Connection
364*4882a593Smuzhiyun *
365*4882a593Smuzhiyun * RX handler implementations
366*4882a593Smuzhiyun *
367*4882a593Smuzhiyun *****************************************************************************/
368*4882a593Smuzhiyun #ifdef CONFIG_IWLEGACY_DEBUGFS
369*4882a593Smuzhiyun static void
il3945_accumulative_stats(struct il_priv * il,__le32 * stats)370*4882a593Smuzhiyun il3945_accumulative_stats(struct il_priv *il, __le32 * stats)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun int i;
373*4882a593Smuzhiyun __le32 *prev_stats;
374*4882a593Smuzhiyun u32 *accum_stats;
375*4882a593Smuzhiyun u32 *delta, *max_delta;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun prev_stats = (__le32 *) &il->_3945.stats;
378*4882a593Smuzhiyun accum_stats = (u32 *) &il->_3945.accum_stats;
379*4882a593Smuzhiyun delta = (u32 *) &il->_3945.delta_stats;
380*4882a593Smuzhiyun max_delta = (u32 *) &il->_3945.max_delta;
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun for (i = sizeof(__le32); i < sizeof(struct il3945_notif_stats);
383*4882a593Smuzhiyun i +=
384*4882a593Smuzhiyun sizeof(__le32), stats++, prev_stats++, delta++, max_delta++,
385*4882a593Smuzhiyun accum_stats++) {
386*4882a593Smuzhiyun if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
387*4882a593Smuzhiyun *delta =
388*4882a593Smuzhiyun (le32_to_cpu(*stats) - le32_to_cpu(*prev_stats));
389*4882a593Smuzhiyun *accum_stats += *delta;
390*4882a593Smuzhiyun if (*delta > *max_delta)
391*4882a593Smuzhiyun *max_delta = *delta;
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun /* reset accumulative stats for "no-counter" type stats */
396*4882a593Smuzhiyun il->_3945.accum_stats.general.temperature =
397*4882a593Smuzhiyun il->_3945.stats.general.temperature;
398*4882a593Smuzhiyun il->_3945.accum_stats.general.ttl_timestamp =
399*4882a593Smuzhiyun il->_3945.stats.general.ttl_timestamp;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun #endif
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun void
il3945_hdl_stats(struct il_priv * il,struct il_rx_buf * rxb)404*4882a593Smuzhiyun il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun struct il_rx_pkt *pkt = rxb_addr(rxb);
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun D_RX("Statistics notification received (%d vs %d).\n",
409*4882a593Smuzhiyun (int)sizeof(struct il3945_notif_stats),
410*4882a593Smuzhiyun le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK);
411*4882a593Smuzhiyun #ifdef CONFIG_IWLEGACY_DEBUGFS
412*4882a593Smuzhiyun il3945_accumulative_stats(il, (__le32 *) &pkt->u.raw);
413*4882a593Smuzhiyun #endif
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun memcpy(&il->_3945.stats, pkt->u.raw, sizeof(il->_3945.stats));
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun void
il3945_hdl_c_stats(struct il_priv * il,struct il_rx_buf * rxb)419*4882a593Smuzhiyun il3945_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun struct il_rx_pkt *pkt = rxb_addr(rxb);
422*4882a593Smuzhiyun __le32 *flag = (__le32 *) &pkt->u.raw;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun if (le32_to_cpu(*flag) & UCODE_STATS_CLEAR_MSK) {
425*4882a593Smuzhiyun #ifdef CONFIG_IWLEGACY_DEBUGFS
426*4882a593Smuzhiyun memset(&il->_3945.accum_stats, 0,
427*4882a593Smuzhiyun sizeof(struct il3945_notif_stats));
428*4882a593Smuzhiyun memset(&il->_3945.delta_stats, 0,
429*4882a593Smuzhiyun sizeof(struct il3945_notif_stats));
430*4882a593Smuzhiyun memset(&il->_3945.max_delta, 0,
431*4882a593Smuzhiyun sizeof(struct il3945_notif_stats));
432*4882a593Smuzhiyun #endif
433*4882a593Smuzhiyun D_RX("Statistics have been cleared\n");
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun il3945_hdl_stats(il, rxb);
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun /******************************************************************************
439*4882a593Smuzhiyun *
440*4882a593Smuzhiyun * Misc. internal state and helper functions
441*4882a593Smuzhiyun *
442*4882a593Smuzhiyun ******************************************************************************/
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun /* This is necessary only for a number of stats, see the caller. */
445*4882a593Smuzhiyun static int
il3945_is_network_packet(struct il_priv * il,struct ieee80211_hdr * header)446*4882a593Smuzhiyun il3945_is_network_packet(struct il_priv *il, struct ieee80211_hdr *header)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun /* Filter incoming packets to determine if they are targeted toward
449*4882a593Smuzhiyun * this network, discarding packets coming from ourselves */
450*4882a593Smuzhiyun switch (il->iw_mode) {
451*4882a593Smuzhiyun case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */
452*4882a593Smuzhiyun /* packets to our IBSS update information */
453*4882a593Smuzhiyun return ether_addr_equal_64bits(header->addr3, il->bssid);
454*4882a593Smuzhiyun case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
455*4882a593Smuzhiyun /* packets to our IBSS update information */
456*4882a593Smuzhiyun return ether_addr_equal_64bits(header->addr2, il->bssid);
457*4882a593Smuzhiyun default:
458*4882a593Smuzhiyun return 1;
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun #define SMALL_PACKET_SIZE 256
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun static void
il3945_pass_packet_to_mac80211(struct il_priv * il,struct il_rx_buf * rxb,struct ieee80211_rx_status * stats)465*4882a593Smuzhiyun il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb,
466*4882a593Smuzhiyun struct ieee80211_rx_status *stats)
467*4882a593Smuzhiyun {
468*4882a593Smuzhiyun struct il_rx_pkt *pkt = rxb_addr(rxb);
469*4882a593Smuzhiyun struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IL_RX_DATA(pkt);
470*4882a593Smuzhiyun struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt);
471*4882a593Smuzhiyun struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt);
472*4882a593Smuzhiyun u32 len = le16_to_cpu(rx_hdr->len);
473*4882a593Smuzhiyun struct sk_buff *skb;
474*4882a593Smuzhiyun __le16 fc = hdr->frame_control;
475*4882a593Smuzhiyun u32 fraglen = PAGE_SIZE << il->hw_params.rx_page_order;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun /* We received data from the HW, so stop the watchdog */
478*4882a593Smuzhiyun if (unlikely(len + IL39_RX_FRAME_SIZE > fraglen)) {
479*4882a593Smuzhiyun D_DROP("Corruption detected!\n");
480*4882a593Smuzhiyun return;
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun /* We only process data packets if the interface is open */
484*4882a593Smuzhiyun if (unlikely(!il->is_open)) {
485*4882a593Smuzhiyun D_DROP("Dropping packet while interface is not open.\n");
486*4882a593Smuzhiyun return;
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun if (unlikely(test_bit(IL_STOP_REASON_PASSIVE, &il->stop_reason))) {
490*4882a593Smuzhiyun il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
491*4882a593Smuzhiyun D_INFO("Woke queues - frame received on passive channel\n");
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun skb = dev_alloc_skb(SMALL_PACKET_SIZE);
495*4882a593Smuzhiyun if (!skb) {
496*4882a593Smuzhiyun IL_ERR("dev_alloc_skb failed\n");
497*4882a593Smuzhiyun return;
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun if (!il3945_mod_params.sw_crypto)
501*4882a593Smuzhiyun il_set_decrypted_flag(il, (struct ieee80211_hdr *)pkt,
502*4882a593Smuzhiyun le32_to_cpu(rx_end->status), stats);
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun /* If frame is small enough to fit into skb->head, copy it
505*4882a593Smuzhiyun * and do not consume a full page
506*4882a593Smuzhiyun */
507*4882a593Smuzhiyun if (len <= SMALL_PACKET_SIZE) {
508*4882a593Smuzhiyun skb_put_data(skb, rx_hdr->payload, len);
509*4882a593Smuzhiyun } else {
510*4882a593Smuzhiyun skb_add_rx_frag(skb, 0, rxb->page,
511*4882a593Smuzhiyun (void *)rx_hdr->payload - (void *)pkt, len,
512*4882a593Smuzhiyun fraglen);
513*4882a593Smuzhiyun il->alloc_rxb_page--;
514*4882a593Smuzhiyun rxb->page = NULL;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun il_update_stats(il, false, fc, len);
517*4882a593Smuzhiyun memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun ieee80211_rx(il->hw, skb);
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun #define IL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun static void
il3945_hdl_rx(struct il_priv * il,struct il_rx_buf * rxb)525*4882a593Smuzhiyun il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
526*4882a593Smuzhiyun {
527*4882a593Smuzhiyun struct ieee80211_hdr *header;
528*4882a593Smuzhiyun struct ieee80211_rx_status rx_status = {};
529*4882a593Smuzhiyun struct il_rx_pkt *pkt = rxb_addr(rxb);
530*4882a593Smuzhiyun struct il3945_rx_frame_stats *rx_stats = IL_RX_STATS(pkt);
531*4882a593Smuzhiyun struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt);
532*4882a593Smuzhiyun struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt);
533*4882a593Smuzhiyun u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg);
534*4882a593Smuzhiyun u16 rx_stats_noise_diff __maybe_unused =
535*4882a593Smuzhiyun le16_to_cpu(rx_stats->noise_diff);
536*4882a593Smuzhiyun u8 network_packet;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun rx_status.flag = 0;
539*4882a593Smuzhiyun rx_status.mactime = le64_to_cpu(rx_end->timestamp);
540*4882a593Smuzhiyun rx_status.band =
541*4882a593Smuzhiyun (rx_hdr->
542*4882a593Smuzhiyun phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? NL80211_BAND_2GHZ :
543*4882a593Smuzhiyun NL80211_BAND_5GHZ;
544*4882a593Smuzhiyun rx_status.freq =
545*4882a593Smuzhiyun ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel),
546*4882a593Smuzhiyun rx_status.band);
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun rx_status.rate_idx = il3945_hwrate_to_plcp_idx(rx_hdr->rate);
549*4882a593Smuzhiyun if (rx_status.band == NL80211_BAND_5GHZ)
550*4882a593Smuzhiyun rx_status.rate_idx -= IL_FIRST_OFDM_RATE;
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun rx_status.antenna =
553*4882a593Smuzhiyun (le16_to_cpu(rx_hdr->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >>
554*4882a593Smuzhiyun 4;
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun /* set the preamble flag if appropriate */
557*4882a593Smuzhiyun if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
558*4882a593Smuzhiyun rx_status.enc_flags |= RX_ENC_FLAG_SHORTPRE;
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun if ((unlikely(rx_stats->phy_count > 20))) {
561*4882a593Smuzhiyun D_DROP("dsp size out of range [0,20]: %d\n",
562*4882a593Smuzhiyun rx_stats->phy_count);
563*4882a593Smuzhiyun return;
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR) ||
567*4882a593Smuzhiyun !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
568*4882a593Smuzhiyun D_RX("Bad CRC or FIFO: 0x%08X.\n", rx_end->status);
569*4882a593Smuzhiyun return;
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun /* Convert 3945's rssi indicator to dBm */
573*4882a593Smuzhiyun rx_status.signal = rx_stats->rssi - IL39_RSSI_OFFSET;
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun D_STATS("Rssi %d sig_avg %d noise_diff %d\n", rx_status.signal,
576*4882a593Smuzhiyun rx_stats_sig_avg, rx_stats_noise_diff);
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun header = (struct ieee80211_hdr *)IL_RX_DATA(pkt);
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun network_packet = il3945_is_network_packet(il, header);
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun D_STATS("[%c] %d RSSI:%d Signal:%u, Rate:%u\n",
583*4882a593Smuzhiyun network_packet ? '*' : ' ', le16_to_cpu(rx_hdr->channel),
584*4882a593Smuzhiyun rx_status.signal, rx_status.signal, rx_status.rate_idx);
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun if (network_packet) {
587*4882a593Smuzhiyun il->_3945.last_beacon_time =
588*4882a593Smuzhiyun le32_to_cpu(rx_end->beacon_timestamp);
589*4882a593Smuzhiyun il->_3945.last_tsf = le64_to_cpu(rx_end->timestamp);
590*4882a593Smuzhiyun il->_3945.last_rx_rssi = rx_status.signal;
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun il3945_pass_packet_to_mac80211(il, rxb, &rx_status);
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun int
il3945_hw_txq_attach_buf_to_tfd(struct il_priv * il,struct il_tx_queue * txq,dma_addr_t addr,u16 len,u8 reset,u8 pad)597*4882a593Smuzhiyun il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
598*4882a593Smuzhiyun dma_addr_t addr, u16 len, u8 reset, u8 pad)
599*4882a593Smuzhiyun {
600*4882a593Smuzhiyun int count;
601*4882a593Smuzhiyun struct il_queue *q;
602*4882a593Smuzhiyun struct il3945_tfd *tfd, *tfd_tmp;
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun q = &txq->q;
605*4882a593Smuzhiyun tfd_tmp = (struct il3945_tfd *)txq->tfds;
606*4882a593Smuzhiyun tfd = &tfd_tmp[q->write_ptr];
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun if (reset)
609*4882a593Smuzhiyun memset(tfd, 0, sizeof(*tfd));
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun if (count >= NUM_TFD_CHUNKS || count < 0) {
614*4882a593Smuzhiyun IL_ERR("Error can not send more than %d chunks\n",
615*4882a593Smuzhiyun NUM_TFD_CHUNKS);
616*4882a593Smuzhiyun return -EINVAL;
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun tfd->tbs[count].addr = cpu_to_le32(addr);
620*4882a593Smuzhiyun tfd->tbs[count].len = cpu_to_le32(len);
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun count++;
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun tfd->control_flags =
625*4882a593Smuzhiyun cpu_to_le32(TFD_CTL_COUNT_SET(count) | TFD_CTL_PAD_SET(pad));
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun return 0;
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun /*
631*4882a593Smuzhiyun * il3945_hw_txq_free_tfd - Free one TFD, those at idx [txq->q.read_ptr]
632*4882a593Smuzhiyun *
633*4882a593Smuzhiyun * Does NOT advance any idxes
634*4882a593Smuzhiyun */
635*4882a593Smuzhiyun void
il3945_hw_txq_free_tfd(struct il_priv * il,struct il_tx_queue * txq)636*4882a593Smuzhiyun il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
637*4882a593Smuzhiyun {
638*4882a593Smuzhiyun struct il3945_tfd *tfd_tmp = (struct il3945_tfd *)txq->tfds;
639*4882a593Smuzhiyun int idx = txq->q.read_ptr;
640*4882a593Smuzhiyun struct il3945_tfd *tfd = &tfd_tmp[idx];
641*4882a593Smuzhiyun struct pci_dev *dev = il->pci_dev;
642*4882a593Smuzhiyun int i;
643*4882a593Smuzhiyun int counter;
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun /* sanity check */
646*4882a593Smuzhiyun counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
647*4882a593Smuzhiyun if (counter > NUM_TFD_CHUNKS) {
648*4882a593Smuzhiyun IL_ERR("Too many chunks: %i\n", counter);
649*4882a593Smuzhiyun /* @todo issue fatal error, it is quite serious situation */
650*4882a593Smuzhiyun return;
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun /* Unmap tx_cmd */
654*4882a593Smuzhiyun if (counter)
655*4882a593Smuzhiyun pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping),
656*4882a593Smuzhiyun dma_unmap_len(&txq->meta[idx], len),
657*4882a593Smuzhiyun PCI_DMA_TODEVICE);
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun /* unmap chunks if any */
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun for (i = 1; i < counter; i++)
662*4882a593Smuzhiyun pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr),
663*4882a593Smuzhiyun le32_to_cpu(tfd->tbs[i].len),
664*4882a593Smuzhiyun PCI_DMA_TODEVICE);
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun /* free SKB */
667*4882a593Smuzhiyun if (txq->skbs) {
668*4882a593Smuzhiyun struct sk_buff *skb = txq->skbs[txq->q.read_ptr];
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun /* can be called from irqs-disabled context */
671*4882a593Smuzhiyun if (skb) {
672*4882a593Smuzhiyun dev_kfree_skb_any(skb);
673*4882a593Smuzhiyun txq->skbs[txq->q.read_ptr] = NULL;
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun /*
679*4882a593Smuzhiyun * il3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD:
680*4882a593Smuzhiyun *
681*4882a593Smuzhiyun */
682*4882a593Smuzhiyun void
il3945_hw_build_tx_cmd_rate(struct il_priv * il,struct il_device_cmd * cmd,struct ieee80211_tx_info * info,struct ieee80211_hdr * hdr,int sta_id)683*4882a593Smuzhiyun il3945_hw_build_tx_cmd_rate(struct il_priv *il, struct il_device_cmd *cmd,
684*4882a593Smuzhiyun struct ieee80211_tx_info *info,
685*4882a593Smuzhiyun struct ieee80211_hdr *hdr, int sta_id)
686*4882a593Smuzhiyun {
687*4882a593Smuzhiyun u16 hw_value = ieee80211_get_tx_rate(il->hw, info)->hw_value;
688*4882a593Smuzhiyun u16 rate_idx = min(hw_value & 0xffff, RATE_COUNT_3945 - 1);
689*4882a593Smuzhiyun u16 rate_mask;
690*4882a593Smuzhiyun int rate;
691*4882a593Smuzhiyun const u8 rts_retry_limit = 7;
692*4882a593Smuzhiyun u8 data_retry_limit;
693*4882a593Smuzhiyun __le32 tx_flags;
694*4882a593Smuzhiyun __le16 fc = hdr->frame_control;
695*4882a593Smuzhiyun struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload;
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun rate = il3945_rates[rate_idx].plcp;
698*4882a593Smuzhiyun tx_flags = tx_cmd->tx_flags;
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun /* We need to figure out how to get the sta->supp_rates while
701*4882a593Smuzhiyun * in this running context */
702*4882a593Smuzhiyun rate_mask = RATES_MASK_3945;
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun /* Set retry limit on DATA packets and Probe Responses */
705*4882a593Smuzhiyun if (ieee80211_is_probe_resp(fc))
706*4882a593Smuzhiyun data_retry_limit = 3;
707*4882a593Smuzhiyun else
708*4882a593Smuzhiyun data_retry_limit = IL_DEFAULT_TX_RETRY;
709*4882a593Smuzhiyun tx_cmd->data_retry_limit = data_retry_limit;
710*4882a593Smuzhiyun /* Set retry limit on RTS packets */
711*4882a593Smuzhiyun tx_cmd->rts_retry_limit = min(data_retry_limit, rts_retry_limit);
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun tx_cmd->rate = rate;
714*4882a593Smuzhiyun tx_cmd->tx_flags = tx_flags;
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun /* OFDM */
717*4882a593Smuzhiyun tx_cmd->supp_rates[0] =
718*4882a593Smuzhiyun ((rate_mask & IL_OFDM_RATES_MASK) >> IL_FIRST_OFDM_RATE) & 0xFF;
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun /* CCK */
721*4882a593Smuzhiyun tx_cmd->supp_rates[1] = (rate_mask & 0xF);
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun D_RATE("Tx sta id: %d, rate: %d (plcp), flags: 0x%4X "
724*4882a593Smuzhiyun "cck/ofdm mask: 0x%x/0x%x\n", sta_id, tx_cmd->rate,
725*4882a593Smuzhiyun le32_to_cpu(tx_cmd->tx_flags), tx_cmd->supp_rates[1],
726*4882a593Smuzhiyun tx_cmd->supp_rates[0]);
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun static u8
il3945_sync_sta(struct il_priv * il,int sta_id,u16 tx_rate)730*4882a593Smuzhiyun il3945_sync_sta(struct il_priv *il, int sta_id, u16 tx_rate)
731*4882a593Smuzhiyun {
732*4882a593Smuzhiyun unsigned long flags_spin;
733*4882a593Smuzhiyun struct il_station_entry *station;
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun if (sta_id == IL_INVALID_STATION)
736*4882a593Smuzhiyun return IL_INVALID_STATION;
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun spin_lock_irqsave(&il->sta_lock, flags_spin);
739*4882a593Smuzhiyun station = &il->stations[sta_id];
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
742*4882a593Smuzhiyun station->sta.rate_n_flags = cpu_to_le16(tx_rate);
743*4882a593Smuzhiyun station->sta.mode = STA_CONTROL_MODIFY_MSK;
744*4882a593Smuzhiyun il_send_add_sta(il, &station->sta, CMD_ASYNC);
745*4882a593Smuzhiyun spin_unlock_irqrestore(&il->sta_lock, flags_spin);
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun D_RATE("SCALE sync station %d to rate %d\n", sta_id, tx_rate);
748*4882a593Smuzhiyun return sta_id;
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun static void
il3945_set_pwr_vmain(struct il_priv * il)752*4882a593Smuzhiyun il3945_set_pwr_vmain(struct il_priv *il)
753*4882a593Smuzhiyun {
754*4882a593Smuzhiyun /*
755*4882a593Smuzhiyun * (for documentation purposes)
756*4882a593Smuzhiyun * to set power to V_AUX, do
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun if (pci_pme_capable(il->pci_dev, PCI_D3cold)) {
759*4882a593Smuzhiyun il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
760*4882a593Smuzhiyun APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
761*4882a593Smuzhiyun ~APMG_PS_CTRL_MSK_PWR_SRC);
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun _il_poll_bit(il, CSR_GPIO_IN,
764*4882a593Smuzhiyun CSR_GPIO_IN_VAL_VAUX_PWR_SRC,
765*4882a593Smuzhiyun CSR_GPIO_IN_BIT_AUX_POWER, 5000);
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun */
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
770*4882a593Smuzhiyun APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
771*4882a593Smuzhiyun ~APMG_PS_CTRL_MSK_PWR_SRC);
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun _il_poll_bit(il, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC,
774*4882a593Smuzhiyun CSR_GPIO_IN_BIT_AUX_POWER, 5000);
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun static int
il3945_rx_init(struct il_priv * il,struct il_rx_queue * rxq)778*4882a593Smuzhiyun il3945_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
779*4882a593Smuzhiyun {
780*4882a593Smuzhiyun il_wr(il, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
781*4882a593Smuzhiyun il_wr(il, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma);
782*4882a593Smuzhiyun il_wr(il, FH39_RCSR_WPTR(0), 0);
783*4882a593Smuzhiyun il_wr(il, FH39_RCSR_CONFIG(0),
784*4882a593Smuzhiyun FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE |
785*4882a593Smuzhiyun FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE |
786*4882a593Smuzhiyun FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN |
787*4882a593Smuzhiyun FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 | (RX_QUEUE_SIZE_LOG
788*4882a593Smuzhiyun <<
789*4882a593Smuzhiyun FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE)
790*4882a593Smuzhiyun | FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST | (1 <<
791*4882a593Smuzhiyun FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH)
792*4882a593Smuzhiyun | FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH);
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun /* fake read to flush all prev I/O */
795*4882a593Smuzhiyun il_rd(il, FH39_RSSR_CTRL);
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun return 0;
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun static int
il3945_tx_reset(struct il_priv * il)801*4882a593Smuzhiyun il3945_tx_reset(struct il_priv *il)
802*4882a593Smuzhiyun {
803*4882a593Smuzhiyun /* bypass mode */
804*4882a593Smuzhiyun il_wr_prph(il, ALM_SCD_MODE_REG, 0x2);
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun /* RA 0 is active */
807*4882a593Smuzhiyun il_wr_prph(il, ALM_SCD_ARASTAT_REG, 0x01);
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun /* all 6 fifo are active */
810*4882a593Smuzhiyun il_wr_prph(il, ALM_SCD_TXFACT_REG, 0x3f);
811*4882a593Smuzhiyun
812*4882a593Smuzhiyun il_wr_prph(il, ALM_SCD_SBYP_MODE_1_REG, 0x010000);
813*4882a593Smuzhiyun il_wr_prph(il, ALM_SCD_SBYP_MODE_2_REG, 0x030002);
814*4882a593Smuzhiyun il_wr_prph(il, ALM_SCD_TXF4MF_REG, 0x000004);
815*4882a593Smuzhiyun il_wr_prph(il, ALM_SCD_TXF5MF_REG, 0x000005);
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun il_wr(il, FH39_TSSR_CBB_BASE, il->_3945.shared_phys);
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun il_wr(il, FH39_TSSR_MSG_CONFIG,
820*4882a593Smuzhiyun FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
821*4882a593Smuzhiyun FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON |
822*4882a593Smuzhiyun FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B |
823*4882a593Smuzhiyun FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON |
824*4882a593Smuzhiyun FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON |
825*4882a593Smuzhiyun FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH |
826*4882a593Smuzhiyun FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH);
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun return 0;
829*4882a593Smuzhiyun }
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun /*
832*4882a593Smuzhiyun * il3945_txq_ctx_reset - Reset TX queue context
833*4882a593Smuzhiyun *
834*4882a593Smuzhiyun * Destroys all DMA structures and initialize them again
835*4882a593Smuzhiyun */
836*4882a593Smuzhiyun static int
il3945_txq_ctx_reset(struct il_priv * il)837*4882a593Smuzhiyun il3945_txq_ctx_reset(struct il_priv *il)
838*4882a593Smuzhiyun {
839*4882a593Smuzhiyun int rc, txq_id;
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun il3945_hw_txq_ctx_free(il);
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun /* allocate tx queue structure */
844*4882a593Smuzhiyun rc = il_alloc_txq_mem(il);
845*4882a593Smuzhiyun if (rc)
846*4882a593Smuzhiyun return rc;
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun /* Tx CMD queue */
849*4882a593Smuzhiyun rc = il3945_tx_reset(il);
850*4882a593Smuzhiyun if (rc)
851*4882a593Smuzhiyun goto error;
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun /* Tx queue(s) */
854*4882a593Smuzhiyun for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
855*4882a593Smuzhiyun rc = il_tx_queue_init(il, txq_id);
856*4882a593Smuzhiyun if (rc) {
857*4882a593Smuzhiyun IL_ERR("Tx %d queue init failed\n", txq_id);
858*4882a593Smuzhiyun goto error;
859*4882a593Smuzhiyun }
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun return rc;
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun error:
865*4882a593Smuzhiyun il3945_hw_txq_ctx_free(il);
866*4882a593Smuzhiyun return rc;
867*4882a593Smuzhiyun }
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun /*
870*4882a593Smuzhiyun * Start up 3945's basic functionality after it has been reset
871*4882a593Smuzhiyun * (e.g. after platform boot, or shutdown via il_apm_stop())
872*4882a593Smuzhiyun * NOTE: This does not load uCode nor start the embedded processor
873*4882a593Smuzhiyun */
874*4882a593Smuzhiyun static int
il3945_apm_init(struct il_priv * il)875*4882a593Smuzhiyun il3945_apm_init(struct il_priv *il)
876*4882a593Smuzhiyun {
877*4882a593Smuzhiyun int ret = il_apm_init(il);
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun /* Clear APMG (NIC's internal power management) interrupts */
880*4882a593Smuzhiyun il_wr_prph(il, APMG_RTC_INT_MSK_REG, 0x0);
881*4882a593Smuzhiyun il_wr_prph(il, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun /* Reset radio chip */
884*4882a593Smuzhiyun il_set_bits_prph(il, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
885*4882a593Smuzhiyun udelay(5);
886*4882a593Smuzhiyun il_clear_bits_prph(il, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun return ret;
889*4882a593Smuzhiyun }
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun static void
il3945_nic_config(struct il_priv * il)892*4882a593Smuzhiyun il3945_nic_config(struct il_priv *il)
893*4882a593Smuzhiyun {
894*4882a593Smuzhiyun struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
895*4882a593Smuzhiyun unsigned long flags;
896*4882a593Smuzhiyun u8 rev_id = il->pci_dev->revision;
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun spin_lock_irqsave(&il->lock, flags);
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun /* Determine HW type */
901*4882a593Smuzhiyun D_INFO("HW Revision ID = 0x%X\n", rev_id);
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
904*4882a593Smuzhiyun D_INFO("RTP type\n");
905*4882a593Smuzhiyun else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
906*4882a593Smuzhiyun D_INFO("3945 RADIO-MB type\n");
907*4882a593Smuzhiyun il_set_bit(il, CSR_HW_IF_CONFIG_REG,
908*4882a593Smuzhiyun CSR39_HW_IF_CONFIG_REG_BIT_3945_MB);
909*4882a593Smuzhiyun } else {
910*4882a593Smuzhiyun D_INFO("3945 RADIO-MM type\n");
911*4882a593Smuzhiyun il_set_bit(il, CSR_HW_IF_CONFIG_REG,
912*4882a593Smuzhiyun CSR39_HW_IF_CONFIG_REG_BIT_3945_MM);
913*4882a593Smuzhiyun }
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) {
916*4882a593Smuzhiyun D_INFO("SKU OP mode is mrc\n");
917*4882a593Smuzhiyun il_set_bit(il, CSR_HW_IF_CONFIG_REG,
918*4882a593Smuzhiyun CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC);
919*4882a593Smuzhiyun } else
920*4882a593Smuzhiyun D_INFO("SKU OP mode is basic\n");
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun if ((eeprom->board_revision & 0xF0) == 0xD0) {
923*4882a593Smuzhiyun D_INFO("3945ABG revision is 0x%X\n", eeprom->board_revision);
924*4882a593Smuzhiyun il_set_bit(il, CSR_HW_IF_CONFIG_REG,
925*4882a593Smuzhiyun CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
926*4882a593Smuzhiyun } else {
927*4882a593Smuzhiyun D_INFO("3945ABG revision is 0x%X\n", eeprom->board_revision);
928*4882a593Smuzhiyun il_clear_bit(il, CSR_HW_IF_CONFIG_REG,
929*4882a593Smuzhiyun CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
930*4882a593Smuzhiyun }
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun if (eeprom->almgor_m_version <= 1) {
933*4882a593Smuzhiyun il_set_bit(il, CSR_HW_IF_CONFIG_REG,
934*4882a593Smuzhiyun CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A);
935*4882a593Smuzhiyun D_INFO("Card M type A version is 0x%X\n",
936*4882a593Smuzhiyun eeprom->almgor_m_version);
937*4882a593Smuzhiyun } else {
938*4882a593Smuzhiyun D_INFO("Card M type B version is 0x%X\n",
939*4882a593Smuzhiyun eeprom->almgor_m_version);
940*4882a593Smuzhiyun il_set_bit(il, CSR_HW_IF_CONFIG_REG,
941*4882a593Smuzhiyun CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B);
942*4882a593Smuzhiyun }
943*4882a593Smuzhiyun spin_unlock_irqrestore(&il->lock, flags);
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun if (eeprom->sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
946*4882a593Smuzhiyun D_RF_KILL("SW RF KILL supported in EEPROM.\n");
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun if (eeprom->sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
949*4882a593Smuzhiyun D_RF_KILL("HW RF KILL supported in EEPROM.\n");
950*4882a593Smuzhiyun }
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun int
il3945_hw_nic_init(struct il_priv * il)953*4882a593Smuzhiyun il3945_hw_nic_init(struct il_priv *il)
954*4882a593Smuzhiyun {
955*4882a593Smuzhiyun int rc;
956*4882a593Smuzhiyun unsigned long flags;
957*4882a593Smuzhiyun struct il_rx_queue *rxq = &il->rxq;
958*4882a593Smuzhiyun
959*4882a593Smuzhiyun spin_lock_irqsave(&il->lock, flags);
960*4882a593Smuzhiyun il3945_apm_init(il);
961*4882a593Smuzhiyun spin_unlock_irqrestore(&il->lock, flags);
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun il3945_set_pwr_vmain(il);
964*4882a593Smuzhiyun il3945_nic_config(il);
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun /* Allocate the RX queue, or reset if it is already allocated */
967*4882a593Smuzhiyun if (!rxq->bd) {
968*4882a593Smuzhiyun rc = il_rx_queue_alloc(il);
969*4882a593Smuzhiyun if (rc) {
970*4882a593Smuzhiyun IL_ERR("Unable to initialize Rx queue\n");
971*4882a593Smuzhiyun return -ENOMEM;
972*4882a593Smuzhiyun }
973*4882a593Smuzhiyun } else
974*4882a593Smuzhiyun il3945_rx_queue_reset(il, rxq);
975*4882a593Smuzhiyun
976*4882a593Smuzhiyun il3945_rx_replenish(il);
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun il3945_rx_init(il, rxq);
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun /* Look at using this instead:
981*4882a593Smuzhiyun rxq->need_update = 1;
982*4882a593Smuzhiyun il_rx_queue_update_write_ptr(il, rxq);
983*4882a593Smuzhiyun */
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun il_wr(il, FH39_RCSR_WPTR(0), rxq->write & ~7);
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun rc = il3945_txq_ctx_reset(il);
988*4882a593Smuzhiyun if (rc)
989*4882a593Smuzhiyun return rc;
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun set_bit(S_INIT, &il->status);
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun return 0;
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun /*
997*4882a593Smuzhiyun * il3945_hw_txq_ctx_free - Free TXQ Context
998*4882a593Smuzhiyun *
999*4882a593Smuzhiyun * Destroy all TX DMA queues and structures
1000*4882a593Smuzhiyun */
1001*4882a593Smuzhiyun void
il3945_hw_txq_ctx_free(struct il_priv * il)1002*4882a593Smuzhiyun il3945_hw_txq_ctx_free(struct il_priv *il)
1003*4882a593Smuzhiyun {
1004*4882a593Smuzhiyun int txq_id;
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun /* Tx queues */
1007*4882a593Smuzhiyun if (il->txq) {
1008*4882a593Smuzhiyun for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
1009*4882a593Smuzhiyun if (txq_id == IL39_CMD_QUEUE_NUM)
1010*4882a593Smuzhiyun il_cmd_queue_free(il);
1011*4882a593Smuzhiyun else
1012*4882a593Smuzhiyun il_tx_queue_free(il, txq_id);
1013*4882a593Smuzhiyun }
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun /* free tx queue structure */
1016*4882a593Smuzhiyun il_free_txq_mem(il);
1017*4882a593Smuzhiyun }
1018*4882a593Smuzhiyun
1019*4882a593Smuzhiyun void
il3945_hw_txq_ctx_stop(struct il_priv * il)1020*4882a593Smuzhiyun il3945_hw_txq_ctx_stop(struct il_priv *il)
1021*4882a593Smuzhiyun {
1022*4882a593Smuzhiyun int txq_id;
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun /* stop SCD */
1025*4882a593Smuzhiyun _il_wr_prph(il, ALM_SCD_MODE_REG, 0);
1026*4882a593Smuzhiyun _il_wr_prph(il, ALM_SCD_TXFACT_REG, 0);
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun /* reset TFD queues */
1029*4882a593Smuzhiyun for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
1030*4882a593Smuzhiyun _il_wr(il, FH39_TCSR_CONFIG(txq_id), 0x0);
1031*4882a593Smuzhiyun _il_poll_bit(il, FH39_TSSR_TX_STATUS,
1032*4882a593Smuzhiyun FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
1033*4882a593Smuzhiyun FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
1034*4882a593Smuzhiyun 1000);
1035*4882a593Smuzhiyun }
1036*4882a593Smuzhiyun }
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun /*
1039*4882a593Smuzhiyun * il3945_hw_reg_adjust_power_by_temp
1040*4882a593Smuzhiyun * return idx delta into power gain settings table
1041*4882a593Smuzhiyun */
1042*4882a593Smuzhiyun static int
il3945_hw_reg_adjust_power_by_temp(int new_reading,int old_reading)1043*4882a593Smuzhiyun il3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading)
1044*4882a593Smuzhiyun {
1045*4882a593Smuzhiyun return (new_reading - old_reading) * (-11) / 100;
1046*4882a593Smuzhiyun }
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun /*
1049*4882a593Smuzhiyun * il3945_hw_reg_temp_out_of_range - Keep temperature in sane range
1050*4882a593Smuzhiyun */
1051*4882a593Smuzhiyun static inline int
il3945_hw_reg_temp_out_of_range(int temperature)1052*4882a593Smuzhiyun il3945_hw_reg_temp_out_of_range(int temperature)
1053*4882a593Smuzhiyun {
1054*4882a593Smuzhiyun return (temperature < -260 || temperature > 25) ? 1 : 0;
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun
1057*4882a593Smuzhiyun int
il3945_hw_get_temperature(struct il_priv * il)1058*4882a593Smuzhiyun il3945_hw_get_temperature(struct il_priv *il)
1059*4882a593Smuzhiyun {
1060*4882a593Smuzhiyun return _il_rd(il, CSR_UCODE_DRV_GP2);
1061*4882a593Smuzhiyun }
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun /*
1064*4882a593Smuzhiyun * il3945_hw_reg_txpower_get_temperature
1065*4882a593Smuzhiyun * get the current temperature by reading from NIC
1066*4882a593Smuzhiyun */
1067*4882a593Smuzhiyun static int
il3945_hw_reg_txpower_get_temperature(struct il_priv * il)1068*4882a593Smuzhiyun il3945_hw_reg_txpower_get_temperature(struct il_priv *il)
1069*4882a593Smuzhiyun {
1070*4882a593Smuzhiyun struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
1071*4882a593Smuzhiyun int temperature;
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun temperature = il3945_hw_get_temperature(il);
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun /* driver's okay range is -260 to +25.
1076*4882a593Smuzhiyun * human readable okay range is 0 to +285 */
1077*4882a593Smuzhiyun D_INFO("Temperature: %d\n", temperature + IL_TEMP_CONVERT);
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun /* handle insane temp reading */
1080*4882a593Smuzhiyun if (il3945_hw_reg_temp_out_of_range(temperature)) {
1081*4882a593Smuzhiyun IL_ERR("Error bad temperature value %d\n", temperature);
1082*4882a593Smuzhiyun
1083*4882a593Smuzhiyun /* if really really hot(?),
1084*4882a593Smuzhiyun * substitute the 3rd band/group's temp measured at factory */
1085*4882a593Smuzhiyun if (il->last_temperature > 100)
1086*4882a593Smuzhiyun temperature = eeprom->groups[2].temperature;
1087*4882a593Smuzhiyun else /* else use most recent "sane" value from driver */
1088*4882a593Smuzhiyun temperature = il->last_temperature;
1089*4882a593Smuzhiyun }
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun return temperature; /* raw, not "human readable" */
1092*4882a593Smuzhiyun }
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun /* Adjust Txpower only if temperature variance is greater than threshold.
1095*4882a593Smuzhiyun *
1096*4882a593Smuzhiyun * Both are lower than older versions' 9 degrees */
1097*4882a593Smuzhiyun #define IL_TEMPERATURE_LIMIT_TIMER 6
1098*4882a593Smuzhiyun
1099*4882a593Smuzhiyun /*
1100*4882a593Smuzhiyun * il3945_is_temp_calib_needed - determines if new calibration is needed
1101*4882a593Smuzhiyun *
1102*4882a593Smuzhiyun * records new temperature in tx_mgr->temperature.
1103*4882a593Smuzhiyun * replaces tx_mgr->last_temperature *only* if calib needed
1104*4882a593Smuzhiyun * (assumes caller will actually do the calibration!). */
1105*4882a593Smuzhiyun static int
il3945_is_temp_calib_needed(struct il_priv * il)1106*4882a593Smuzhiyun il3945_is_temp_calib_needed(struct il_priv *il)
1107*4882a593Smuzhiyun {
1108*4882a593Smuzhiyun int temp_diff;
1109*4882a593Smuzhiyun
1110*4882a593Smuzhiyun il->temperature = il3945_hw_reg_txpower_get_temperature(il);
1111*4882a593Smuzhiyun temp_diff = il->temperature - il->last_temperature;
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun /* get absolute value */
1114*4882a593Smuzhiyun if (temp_diff < 0) {
1115*4882a593Smuzhiyun D_POWER("Getting cooler, delta %d,\n", temp_diff);
1116*4882a593Smuzhiyun temp_diff = -temp_diff;
1117*4882a593Smuzhiyun } else if (temp_diff == 0)
1118*4882a593Smuzhiyun D_POWER("Same temp,\n");
1119*4882a593Smuzhiyun else
1120*4882a593Smuzhiyun D_POWER("Getting warmer, delta %d,\n", temp_diff);
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun /* if we don't need calibration, *don't* update last_temperature */
1123*4882a593Smuzhiyun if (temp_diff < IL_TEMPERATURE_LIMIT_TIMER) {
1124*4882a593Smuzhiyun D_POWER("Timed thermal calib not needed\n");
1125*4882a593Smuzhiyun return 0;
1126*4882a593Smuzhiyun }
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun D_POWER("Timed thermal calib needed\n");
1129*4882a593Smuzhiyun
1130*4882a593Smuzhiyun /* assume that caller will actually do calib ...
1131*4882a593Smuzhiyun * update the "last temperature" value */
1132*4882a593Smuzhiyun il->last_temperature = il->temperature;
1133*4882a593Smuzhiyun return 1;
1134*4882a593Smuzhiyun }
1135*4882a593Smuzhiyun
1136*4882a593Smuzhiyun #define IL_MAX_GAIN_ENTRIES 78
1137*4882a593Smuzhiyun #define IL_CCK_FROM_OFDM_POWER_DIFF -5
1138*4882a593Smuzhiyun #define IL_CCK_FROM_OFDM_IDX_DIFF (10)
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun /* radio and DSP power table, each step is 1/2 dB.
1141*4882a593Smuzhiyun * 1st number is for RF analog gain, 2nd number is for DSP pre-DAC gain. */
1142*4882a593Smuzhiyun static struct il3945_tx_power power_gain_table[2][IL_MAX_GAIN_ENTRIES] = {
1143*4882a593Smuzhiyun {
1144*4882a593Smuzhiyun {251, 127}, /* 2.4 GHz, highest power */
1145*4882a593Smuzhiyun {251, 127},
1146*4882a593Smuzhiyun {251, 127},
1147*4882a593Smuzhiyun {251, 127},
1148*4882a593Smuzhiyun {251, 125},
1149*4882a593Smuzhiyun {251, 110},
1150*4882a593Smuzhiyun {251, 105},
1151*4882a593Smuzhiyun {251, 98},
1152*4882a593Smuzhiyun {187, 125},
1153*4882a593Smuzhiyun {187, 115},
1154*4882a593Smuzhiyun {187, 108},
1155*4882a593Smuzhiyun {187, 99},
1156*4882a593Smuzhiyun {243, 119},
1157*4882a593Smuzhiyun {243, 111},
1158*4882a593Smuzhiyun {243, 105},
1159*4882a593Smuzhiyun {243, 97},
1160*4882a593Smuzhiyun {243, 92},
1161*4882a593Smuzhiyun {211, 106},
1162*4882a593Smuzhiyun {211, 100},
1163*4882a593Smuzhiyun {179, 120},
1164*4882a593Smuzhiyun {179, 113},
1165*4882a593Smuzhiyun {179, 107},
1166*4882a593Smuzhiyun {147, 125},
1167*4882a593Smuzhiyun {147, 119},
1168*4882a593Smuzhiyun {147, 112},
1169*4882a593Smuzhiyun {147, 106},
1170*4882a593Smuzhiyun {147, 101},
1171*4882a593Smuzhiyun {147, 97},
1172*4882a593Smuzhiyun {147, 91},
1173*4882a593Smuzhiyun {115, 107},
1174*4882a593Smuzhiyun {235, 121},
1175*4882a593Smuzhiyun {235, 115},
1176*4882a593Smuzhiyun {235, 109},
1177*4882a593Smuzhiyun {203, 127},
1178*4882a593Smuzhiyun {203, 121},
1179*4882a593Smuzhiyun {203, 115},
1180*4882a593Smuzhiyun {203, 108},
1181*4882a593Smuzhiyun {203, 102},
1182*4882a593Smuzhiyun {203, 96},
1183*4882a593Smuzhiyun {203, 92},
1184*4882a593Smuzhiyun {171, 110},
1185*4882a593Smuzhiyun {171, 104},
1186*4882a593Smuzhiyun {171, 98},
1187*4882a593Smuzhiyun {139, 116},
1188*4882a593Smuzhiyun {227, 125},
1189*4882a593Smuzhiyun {227, 119},
1190*4882a593Smuzhiyun {227, 113},
1191*4882a593Smuzhiyun {227, 107},
1192*4882a593Smuzhiyun {227, 101},
1193*4882a593Smuzhiyun {227, 96},
1194*4882a593Smuzhiyun {195, 113},
1195*4882a593Smuzhiyun {195, 106},
1196*4882a593Smuzhiyun {195, 102},
1197*4882a593Smuzhiyun {195, 95},
1198*4882a593Smuzhiyun {163, 113},
1199*4882a593Smuzhiyun {163, 106},
1200*4882a593Smuzhiyun {163, 102},
1201*4882a593Smuzhiyun {163, 95},
1202*4882a593Smuzhiyun {131, 113},
1203*4882a593Smuzhiyun {131, 106},
1204*4882a593Smuzhiyun {131, 102},
1205*4882a593Smuzhiyun {131, 95},
1206*4882a593Smuzhiyun {99, 113},
1207*4882a593Smuzhiyun {99, 106},
1208*4882a593Smuzhiyun {99, 102},
1209*4882a593Smuzhiyun {99, 95},
1210*4882a593Smuzhiyun {67, 113},
1211*4882a593Smuzhiyun {67, 106},
1212*4882a593Smuzhiyun {67, 102},
1213*4882a593Smuzhiyun {67, 95},
1214*4882a593Smuzhiyun {35, 113},
1215*4882a593Smuzhiyun {35, 106},
1216*4882a593Smuzhiyun {35, 102},
1217*4882a593Smuzhiyun {35, 95},
1218*4882a593Smuzhiyun {3, 113},
1219*4882a593Smuzhiyun {3, 106},
1220*4882a593Smuzhiyun {3, 102},
1221*4882a593Smuzhiyun {3, 95} /* 2.4 GHz, lowest power */
1222*4882a593Smuzhiyun },
1223*4882a593Smuzhiyun {
1224*4882a593Smuzhiyun {251, 127}, /* 5.x GHz, highest power */
1225*4882a593Smuzhiyun {251, 120},
1226*4882a593Smuzhiyun {251, 114},
1227*4882a593Smuzhiyun {219, 119},
1228*4882a593Smuzhiyun {219, 101},
1229*4882a593Smuzhiyun {187, 113},
1230*4882a593Smuzhiyun {187, 102},
1231*4882a593Smuzhiyun {155, 114},
1232*4882a593Smuzhiyun {155, 103},
1233*4882a593Smuzhiyun {123, 117},
1234*4882a593Smuzhiyun {123, 107},
1235*4882a593Smuzhiyun {123, 99},
1236*4882a593Smuzhiyun {123, 92},
1237*4882a593Smuzhiyun {91, 108},
1238*4882a593Smuzhiyun {59, 125},
1239*4882a593Smuzhiyun {59, 118},
1240*4882a593Smuzhiyun {59, 109},
1241*4882a593Smuzhiyun {59, 102},
1242*4882a593Smuzhiyun {59, 96},
1243*4882a593Smuzhiyun {59, 90},
1244*4882a593Smuzhiyun {27, 104},
1245*4882a593Smuzhiyun {27, 98},
1246*4882a593Smuzhiyun {27, 92},
1247*4882a593Smuzhiyun {115, 118},
1248*4882a593Smuzhiyun {115, 111},
1249*4882a593Smuzhiyun {115, 104},
1250*4882a593Smuzhiyun {83, 126},
1251*4882a593Smuzhiyun {83, 121},
1252*4882a593Smuzhiyun {83, 113},
1253*4882a593Smuzhiyun {83, 105},
1254*4882a593Smuzhiyun {83, 99},
1255*4882a593Smuzhiyun {51, 118},
1256*4882a593Smuzhiyun {51, 111},
1257*4882a593Smuzhiyun {51, 104},
1258*4882a593Smuzhiyun {51, 98},
1259*4882a593Smuzhiyun {19, 116},
1260*4882a593Smuzhiyun {19, 109},
1261*4882a593Smuzhiyun {19, 102},
1262*4882a593Smuzhiyun {19, 98},
1263*4882a593Smuzhiyun {19, 93},
1264*4882a593Smuzhiyun {171, 113},
1265*4882a593Smuzhiyun {171, 107},
1266*4882a593Smuzhiyun {171, 99},
1267*4882a593Smuzhiyun {139, 120},
1268*4882a593Smuzhiyun {139, 113},
1269*4882a593Smuzhiyun {139, 107},
1270*4882a593Smuzhiyun {139, 99},
1271*4882a593Smuzhiyun {107, 120},
1272*4882a593Smuzhiyun {107, 113},
1273*4882a593Smuzhiyun {107, 107},
1274*4882a593Smuzhiyun {107, 99},
1275*4882a593Smuzhiyun {75, 120},
1276*4882a593Smuzhiyun {75, 113},
1277*4882a593Smuzhiyun {75, 107},
1278*4882a593Smuzhiyun {75, 99},
1279*4882a593Smuzhiyun {43, 120},
1280*4882a593Smuzhiyun {43, 113},
1281*4882a593Smuzhiyun {43, 107},
1282*4882a593Smuzhiyun {43, 99},
1283*4882a593Smuzhiyun {11, 120},
1284*4882a593Smuzhiyun {11, 113},
1285*4882a593Smuzhiyun {11, 107},
1286*4882a593Smuzhiyun {11, 99},
1287*4882a593Smuzhiyun {131, 107},
1288*4882a593Smuzhiyun {131, 99},
1289*4882a593Smuzhiyun {99, 120},
1290*4882a593Smuzhiyun {99, 113},
1291*4882a593Smuzhiyun {99, 107},
1292*4882a593Smuzhiyun {99, 99},
1293*4882a593Smuzhiyun {67, 120},
1294*4882a593Smuzhiyun {67, 113},
1295*4882a593Smuzhiyun {67, 107},
1296*4882a593Smuzhiyun {67, 99},
1297*4882a593Smuzhiyun {35, 120},
1298*4882a593Smuzhiyun {35, 113},
1299*4882a593Smuzhiyun {35, 107},
1300*4882a593Smuzhiyun {35, 99},
1301*4882a593Smuzhiyun {3, 120} /* 5.x GHz, lowest power */
1302*4882a593Smuzhiyun }
1303*4882a593Smuzhiyun };
1304*4882a593Smuzhiyun
1305*4882a593Smuzhiyun static inline u8
il3945_hw_reg_fix_power_idx(int idx)1306*4882a593Smuzhiyun il3945_hw_reg_fix_power_idx(int idx)
1307*4882a593Smuzhiyun {
1308*4882a593Smuzhiyun if (idx < 0)
1309*4882a593Smuzhiyun return 0;
1310*4882a593Smuzhiyun if (idx >= IL_MAX_GAIN_ENTRIES)
1311*4882a593Smuzhiyun return IL_MAX_GAIN_ENTRIES - 1;
1312*4882a593Smuzhiyun return (u8) idx;
1313*4882a593Smuzhiyun }
1314*4882a593Smuzhiyun
1315*4882a593Smuzhiyun /* Kick off thermal recalibration check every 60 seconds */
1316*4882a593Smuzhiyun #define REG_RECALIB_PERIOD (60)
1317*4882a593Smuzhiyun
1318*4882a593Smuzhiyun /*
1319*4882a593Smuzhiyun * il3945_hw_reg_set_scan_power - Set Tx power for scan probe requests
1320*4882a593Smuzhiyun *
1321*4882a593Smuzhiyun * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK)
1322*4882a593Smuzhiyun * or 6 Mbit (OFDM) rates.
1323*4882a593Smuzhiyun */
1324*4882a593Smuzhiyun static void
il3945_hw_reg_set_scan_power(struct il_priv * il,u32 scan_tbl_idx,s32 rate_idx,const s8 * clip_pwrs,struct il_channel_info * ch_info,int band_idx)1325*4882a593Smuzhiyun il3945_hw_reg_set_scan_power(struct il_priv *il, u32 scan_tbl_idx, s32 rate_idx,
1326*4882a593Smuzhiyun const s8 *clip_pwrs,
1327*4882a593Smuzhiyun struct il_channel_info *ch_info, int band_idx)
1328*4882a593Smuzhiyun {
1329*4882a593Smuzhiyun struct il3945_scan_power_info *scan_power_info;
1330*4882a593Smuzhiyun s8 power;
1331*4882a593Smuzhiyun u8 power_idx;
1332*4882a593Smuzhiyun
1333*4882a593Smuzhiyun scan_power_info = &ch_info->scan_pwr_info[scan_tbl_idx];
1334*4882a593Smuzhiyun
1335*4882a593Smuzhiyun /* use this channel group's 6Mbit clipping/saturation pwr,
1336*4882a593Smuzhiyun * but cap at regulatory scan power restriction (set during init
1337*4882a593Smuzhiyun * based on eeprom channel data) for this channel. */
1338*4882a593Smuzhiyun power = min(ch_info->scan_power, clip_pwrs[RATE_6M_IDX_TBL]);
1339*4882a593Smuzhiyun
1340*4882a593Smuzhiyun power = min(power, il->tx_power_user_lmt);
1341*4882a593Smuzhiyun scan_power_info->requested_power = power;
1342*4882a593Smuzhiyun
1343*4882a593Smuzhiyun /* find difference between new scan *power* and current "normal"
1344*4882a593Smuzhiyun * Tx *power* for 6Mb. Use this difference (x2) to adjust the
1345*4882a593Smuzhiyun * current "normal" temperature-compensated Tx power *idx* for
1346*4882a593Smuzhiyun * this rate (1Mb or 6Mb) to yield new temp-compensated scan power
1347*4882a593Smuzhiyun * *idx*. */
1348*4882a593Smuzhiyun power_idx =
1349*4882a593Smuzhiyun ch_info->power_info[rate_idx].power_table_idx - (power -
1350*4882a593Smuzhiyun ch_info->
1351*4882a593Smuzhiyun power_info
1352*4882a593Smuzhiyun [RATE_6M_IDX_TBL].
1353*4882a593Smuzhiyun requested_power) *
1354*4882a593Smuzhiyun 2;
1355*4882a593Smuzhiyun
1356*4882a593Smuzhiyun /* store reference idx that we use when adjusting *all* scan
1357*4882a593Smuzhiyun * powers. So we can accommodate user (all channel) or spectrum
1358*4882a593Smuzhiyun * management (single channel) power changes "between" temperature
1359*4882a593Smuzhiyun * feedback compensation procedures.
1360*4882a593Smuzhiyun * don't force fit this reference idx into gain table; it may be a
1361*4882a593Smuzhiyun * negative number. This will help avoid errors when we're at
1362*4882a593Smuzhiyun * the lower bounds (highest gains, for warmest temperatures)
1363*4882a593Smuzhiyun * of the table. */
1364*4882a593Smuzhiyun
1365*4882a593Smuzhiyun /* don't exceed table bounds for "real" setting */
1366*4882a593Smuzhiyun power_idx = il3945_hw_reg_fix_power_idx(power_idx);
1367*4882a593Smuzhiyun
1368*4882a593Smuzhiyun scan_power_info->power_table_idx = power_idx;
1369*4882a593Smuzhiyun scan_power_info->tpc.tx_gain =
1370*4882a593Smuzhiyun power_gain_table[band_idx][power_idx].tx_gain;
1371*4882a593Smuzhiyun scan_power_info->tpc.dsp_atten =
1372*4882a593Smuzhiyun power_gain_table[band_idx][power_idx].dsp_atten;
1373*4882a593Smuzhiyun }
1374*4882a593Smuzhiyun
1375*4882a593Smuzhiyun /*
1376*4882a593Smuzhiyun * il3945_send_tx_power - fill in Tx Power command with gain settings
1377*4882a593Smuzhiyun *
1378*4882a593Smuzhiyun * Configures power settings for all rates for the current channel,
1379*4882a593Smuzhiyun * using values from channel info struct, and send to NIC
1380*4882a593Smuzhiyun */
1381*4882a593Smuzhiyun static int
il3945_send_tx_power(struct il_priv * il)1382*4882a593Smuzhiyun il3945_send_tx_power(struct il_priv *il)
1383*4882a593Smuzhiyun {
1384*4882a593Smuzhiyun int rate_idx, i;
1385*4882a593Smuzhiyun const struct il_channel_info *ch_info = NULL;
1386*4882a593Smuzhiyun struct il3945_txpowertable_cmd txpower = {
1387*4882a593Smuzhiyun .channel = il->active.channel,
1388*4882a593Smuzhiyun };
1389*4882a593Smuzhiyun u16 chan;
1390*4882a593Smuzhiyun
1391*4882a593Smuzhiyun if (WARN_ONCE
1392*4882a593Smuzhiyun (test_bit(S_SCAN_HW, &il->status),
1393*4882a593Smuzhiyun "TX Power requested while scanning!\n"))
1394*4882a593Smuzhiyun return -EAGAIN;
1395*4882a593Smuzhiyun
1396*4882a593Smuzhiyun chan = le16_to_cpu(il->active.channel);
1397*4882a593Smuzhiyun
1398*4882a593Smuzhiyun txpower.band = (il->band == NL80211_BAND_5GHZ) ? 0 : 1;
1399*4882a593Smuzhiyun ch_info = il_get_channel_info(il, il->band, chan);
1400*4882a593Smuzhiyun if (!ch_info) {
1401*4882a593Smuzhiyun IL_ERR("Failed to get channel info for channel %d [%d]\n", chan,
1402*4882a593Smuzhiyun il->band);
1403*4882a593Smuzhiyun return -EINVAL;
1404*4882a593Smuzhiyun }
1405*4882a593Smuzhiyun
1406*4882a593Smuzhiyun if (!il_is_channel_valid(ch_info)) {
1407*4882a593Smuzhiyun D_POWER("Not calling TX_PWR_TBL_CMD on " "non-Tx channel.\n");
1408*4882a593Smuzhiyun return 0;
1409*4882a593Smuzhiyun }
1410*4882a593Smuzhiyun
1411*4882a593Smuzhiyun /* fill cmd with power settings for all rates for current channel */
1412*4882a593Smuzhiyun /* Fill OFDM rate */
1413*4882a593Smuzhiyun for (rate_idx = IL_FIRST_OFDM_RATE, i = 0;
1414*4882a593Smuzhiyun rate_idx <= IL39_LAST_OFDM_RATE; rate_idx++, i++) {
1415*4882a593Smuzhiyun
1416*4882a593Smuzhiyun txpower.power[i].tpc = ch_info->power_info[i].tpc;
1417*4882a593Smuzhiyun txpower.power[i].rate = il3945_rates[rate_idx].plcp;
1418*4882a593Smuzhiyun
1419*4882a593Smuzhiyun D_POWER("ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
1420*4882a593Smuzhiyun le16_to_cpu(txpower.channel), txpower.band,
1421*4882a593Smuzhiyun txpower.power[i].tpc.tx_gain,
1422*4882a593Smuzhiyun txpower.power[i].tpc.dsp_atten, txpower.power[i].rate);
1423*4882a593Smuzhiyun }
1424*4882a593Smuzhiyun /* Fill CCK rates */
1425*4882a593Smuzhiyun for (rate_idx = IL_FIRST_CCK_RATE; rate_idx <= IL_LAST_CCK_RATE;
1426*4882a593Smuzhiyun rate_idx++, i++) {
1427*4882a593Smuzhiyun txpower.power[i].tpc = ch_info->power_info[i].tpc;
1428*4882a593Smuzhiyun txpower.power[i].rate = il3945_rates[rate_idx].plcp;
1429*4882a593Smuzhiyun
1430*4882a593Smuzhiyun D_POWER("ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
1431*4882a593Smuzhiyun le16_to_cpu(txpower.channel), txpower.band,
1432*4882a593Smuzhiyun txpower.power[i].tpc.tx_gain,
1433*4882a593Smuzhiyun txpower.power[i].tpc.dsp_atten, txpower.power[i].rate);
1434*4882a593Smuzhiyun }
1435*4882a593Smuzhiyun
1436*4882a593Smuzhiyun return il_send_cmd_pdu(il, C_TX_PWR_TBL,
1437*4882a593Smuzhiyun sizeof(struct il3945_txpowertable_cmd),
1438*4882a593Smuzhiyun &txpower);
1439*4882a593Smuzhiyun
1440*4882a593Smuzhiyun }
1441*4882a593Smuzhiyun
1442*4882a593Smuzhiyun /*
1443*4882a593Smuzhiyun * il3945_hw_reg_set_new_power - Configures power tables at new levels
1444*4882a593Smuzhiyun * @ch_info: Channel to update. Uses power_info.requested_power.
1445*4882a593Smuzhiyun *
1446*4882a593Smuzhiyun * Replace requested_power and base_power_idx ch_info fields for
1447*4882a593Smuzhiyun * one channel.
1448*4882a593Smuzhiyun *
1449*4882a593Smuzhiyun * Called if user or spectrum management changes power preferences.
1450*4882a593Smuzhiyun * Takes into account h/w and modulation limitations (clip power).
1451*4882a593Smuzhiyun *
1452*4882a593Smuzhiyun * This does *not* send anything to NIC, just sets up ch_info for one channel.
1453*4882a593Smuzhiyun *
1454*4882a593Smuzhiyun * NOTE: reg_compensate_for_temperature_dif() *must* be run after this to
1455*4882a593Smuzhiyun * properly fill out the scan powers, and actual h/w gain settings,
1456*4882a593Smuzhiyun * and send changes to NIC
1457*4882a593Smuzhiyun */
1458*4882a593Smuzhiyun static int
il3945_hw_reg_set_new_power(struct il_priv * il,struct il_channel_info * ch_info)1459*4882a593Smuzhiyun il3945_hw_reg_set_new_power(struct il_priv *il, struct il_channel_info *ch_info)
1460*4882a593Smuzhiyun {
1461*4882a593Smuzhiyun struct il3945_channel_power_info *power_info;
1462*4882a593Smuzhiyun int power_changed = 0;
1463*4882a593Smuzhiyun int i;
1464*4882a593Smuzhiyun const s8 *clip_pwrs;
1465*4882a593Smuzhiyun int power;
1466*4882a593Smuzhiyun
1467*4882a593Smuzhiyun /* Get this chnlgrp's rate-to-max/clip-powers table */
1468*4882a593Smuzhiyun clip_pwrs = il->_3945.clip_groups[ch_info->group_idx].clip_powers;
1469*4882a593Smuzhiyun
1470*4882a593Smuzhiyun /* Get this channel's rate-to-current-power settings table */
1471*4882a593Smuzhiyun power_info = ch_info->power_info;
1472*4882a593Smuzhiyun
1473*4882a593Smuzhiyun /* update OFDM Txpower settings */
1474*4882a593Smuzhiyun for (i = RATE_6M_IDX_TBL; i <= RATE_54M_IDX_TBL; i++, ++power_info) {
1475*4882a593Smuzhiyun int delta_idx;
1476*4882a593Smuzhiyun
1477*4882a593Smuzhiyun /* limit new power to be no more than h/w capability */
1478*4882a593Smuzhiyun power = min(ch_info->curr_txpow, clip_pwrs[i]);
1479*4882a593Smuzhiyun if (power == power_info->requested_power)
1480*4882a593Smuzhiyun continue;
1481*4882a593Smuzhiyun
1482*4882a593Smuzhiyun /* find difference between old and new requested powers,
1483*4882a593Smuzhiyun * update base (non-temp-compensated) power idx */
1484*4882a593Smuzhiyun delta_idx = (power - power_info->requested_power) * 2;
1485*4882a593Smuzhiyun power_info->base_power_idx -= delta_idx;
1486*4882a593Smuzhiyun
1487*4882a593Smuzhiyun /* save new requested power value */
1488*4882a593Smuzhiyun power_info->requested_power = power;
1489*4882a593Smuzhiyun
1490*4882a593Smuzhiyun power_changed = 1;
1491*4882a593Smuzhiyun }
1492*4882a593Smuzhiyun
1493*4882a593Smuzhiyun /* update CCK Txpower settings, based on OFDM 12M setting ...
1494*4882a593Smuzhiyun * ... all CCK power settings for a given channel are the *same*. */
1495*4882a593Smuzhiyun if (power_changed) {
1496*4882a593Smuzhiyun power =
1497*4882a593Smuzhiyun ch_info->power_info[RATE_12M_IDX_TBL].requested_power +
1498*4882a593Smuzhiyun IL_CCK_FROM_OFDM_POWER_DIFF;
1499*4882a593Smuzhiyun
1500*4882a593Smuzhiyun /* do all CCK rates' il3945_channel_power_info structures */
1501*4882a593Smuzhiyun for (i = RATE_1M_IDX_TBL; i <= RATE_11M_IDX_TBL; i++) {
1502*4882a593Smuzhiyun power_info->requested_power = power;
1503*4882a593Smuzhiyun power_info->base_power_idx =
1504*4882a593Smuzhiyun ch_info->power_info[RATE_12M_IDX_TBL].
1505*4882a593Smuzhiyun base_power_idx + IL_CCK_FROM_OFDM_IDX_DIFF;
1506*4882a593Smuzhiyun ++power_info;
1507*4882a593Smuzhiyun }
1508*4882a593Smuzhiyun }
1509*4882a593Smuzhiyun
1510*4882a593Smuzhiyun return 0;
1511*4882a593Smuzhiyun }
1512*4882a593Smuzhiyun
1513*4882a593Smuzhiyun /*
1514*4882a593Smuzhiyun * il3945_hw_reg_get_ch_txpower_limit - returns new power limit for channel
1515*4882a593Smuzhiyun *
1516*4882a593Smuzhiyun * NOTE: Returned power limit may be less (but not more) than requested,
1517*4882a593Smuzhiyun * based strictly on regulatory (eeprom and spectrum mgt) limitations
1518*4882a593Smuzhiyun * (no consideration for h/w clipping limitations).
1519*4882a593Smuzhiyun */
1520*4882a593Smuzhiyun static int
il3945_hw_reg_get_ch_txpower_limit(struct il_channel_info * ch_info)1521*4882a593Smuzhiyun il3945_hw_reg_get_ch_txpower_limit(struct il_channel_info *ch_info)
1522*4882a593Smuzhiyun {
1523*4882a593Smuzhiyun s8 max_power;
1524*4882a593Smuzhiyun
1525*4882a593Smuzhiyun #if 0
1526*4882a593Smuzhiyun /* if we're using TGd limits, use lower of TGd or EEPROM */
1527*4882a593Smuzhiyun if (ch_info->tgd_data.max_power != 0)
1528*4882a593Smuzhiyun max_power =
1529*4882a593Smuzhiyun min(ch_info->tgd_data.max_power,
1530*4882a593Smuzhiyun ch_info->eeprom.max_power_avg);
1531*4882a593Smuzhiyun
1532*4882a593Smuzhiyun /* else just use EEPROM limits */
1533*4882a593Smuzhiyun else
1534*4882a593Smuzhiyun #endif
1535*4882a593Smuzhiyun max_power = ch_info->eeprom.max_power_avg;
1536*4882a593Smuzhiyun
1537*4882a593Smuzhiyun return min(max_power, ch_info->max_power_avg);
1538*4882a593Smuzhiyun }
1539*4882a593Smuzhiyun
1540*4882a593Smuzhiyun /*
1541*4882a593Smuzhiyun * il3945_hw_reg_comp_txpower_temp - Compensate for temperature
1542*4882a593Smuzhiyun *
1543*4882a593Smuzhiyun * Compensate txpower settings of *all* channels for temperature.
1544*4882a593Smuzhiyun * This only accounts for the difference between current temperature
1545*4882a593Smuzhiyun * and the factory calibration temperatures, and bases the new settings
1546*4882a593Smuzhiyun * on the channel's base_power_idx.
1547*4882a593Smuzhiyun *
1548*4882a593Smuzhiyun * If RxOn is "associated", this sends the new Txpower to NIC!
1549*4882a593Smuzhiyun */
1550*4882a593Smuzhiyun static int
il3945_hw_reg_comp_txpower_temp(struct il_priv * il)1551*4882a593Smuzhiyun il3945_hw_reg_comp_txpower_temp(struct il_priv *il)
1552*4882a593Smuzhiyun {
1553*4882a593Smuzhiyun struct il_channel_info *ch_info = NULL;
1554*4882a593Smuzhiyun struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
1555*4882a593Smuzhiyun int delta_idx;
1556*4882a593Smuzhiyun const s8 *clip_pwrs; /* array of h/w max power levels for each rate */
1557*4882a593Smuzhiyun u8 a_band;
1558*4882a593Smuzhiyun u8 rate_idx;
1559*4882a593Smuzhiyun u8 scan_tbl_idx;
1560*4882a593Smuzhiyun u8 i;
1561*4882a593Smuzhiyun int ref_temp;
1562*4882a593Smuzhiyun int temperature = il->temperature;
1563*4882a593Smuzhiyun
1564*4882a593Smuzhiyun if (il->disable_tx_power_cal || test_bit(S_SCANNING, &il->status)) {
1565*4882a593Smuzhiyun /* do not perform tx power calibration */
1566*4882a593Smuzhiyun return 0;
1567*4882a593Smuzhiyun }
1568*4882a593Smuzhiyun /* set up new Tx power info for each and every channel, 2.4 and 5.x */
1569*4882a593Smuzhiyun for (i = 0; i < il->channel_count; i++) {
1570*4882a593Smuzhiyun ch_info = &il->channel_info[i];
1571*4882a593Smuzhiyun a_band = il_is_channel_a_band(ch_info);
1572*4882a593Smuzhiyun
1573*4882a593Smuzhiyun /* Get this chnlgrp's factory calibration temperature */
1574*4882a593Smuzhiyun ref_temp = (s16) eeprom->groups[ch_info->group_idx].temperature;
1575*4882a593Smuzhiyun
1576*4882a593Smuzhiyun /* get power idx adjustment based on current and factory
1577*4882a593Smuzhiyun * temps */
1578*4882a593Smuzhiyun delta_idx =
1579*4882a593Smuzhiyun il3945_hw_reg_adjust_power_by_temp(temperature, ref_temp);
1580*4882a593Smuzhiyun
1581*4882a593Smuzhiyun /* set tx power value for all rates, OFDM and CCK */
1582*4882a593Smuzhiyun for (rate_idx = 0; rate_idx < RATE_COUNT_3945; rate_idx++) {
1583*4882a593Smuzhiyun int power_idx =
1584*4882a593Smuzhiyun ch_info->power_info[rate_idx].base_power_idx;
1585*4882a593Smuzhiyun
1586*4882a593Smuzhiyun /* temperature compensate */
1587*4882a593Smuzhiyun power_idx += delta_idx;
1588*4882a593Smuzhiyun
1589*4882a593Smuzhiyun /* stay within table range */
1590*4882a593Smuzhiyun power_idx = il3945_hw_reg_fix_power_idx(power_idx);
1591*4882a593Smuzhiyun ch_info->power_info[rate_idx].power_table_idx =
1592*4882a593Smuzhiyun (u8) power_idx;
1593*4882a593Smuzhiyun ch_info->power_info[rate_idx].tpc =
1594*4882a593Smuzhiyun power_gain_table[a_band][power_idx];
1595*4882a593Smuzhiyun }
1596*4882a593Smuzhiyun
1597*4882a593Smuzhiyun /* Get this chnlgrp's rate-to-max/clip-powers table */
1598*4882a593Smuzhiyun clip_pwrs =
1599*4882a593Smuzhiyun il->_3945.clip_groups[ch_info->group_idx].clip_powers;
1600*4882a593Smuzhiyun
1601*4882a593Smuzhiyun /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
1602*4882a593Smuzhiyun for (scan_tbl_idx = 0; scan_tbl_idx < IL_NUM_SCAN_RATES;
1603*4882a593Smuzhiyun scan_tbl_idx++) {
1604*4882a593Smuzhiyun s32 actual_idx =
1605*4882a593Smuzhiyun (scan_tbl_idx ==
1606*4882a593Smuzhiyun 0) ? RATE_1M_IDX_TBL : RATE_6M_IDX_TBL;
1607*4882a593Smuzhiyun il3945_hw_reg_set_scan_power(il, scan_tbl_idx,
1608*4882a593Smuzhiyun actual_idx, clip_pwrs,
1609*4882a593Smuzhiyun ch_info, a_band);
1610*4882a593Smuzhiyun }
1611*4882a593Smuzhiyun }
1612*4882a593Smuzhiyun
1613*4882a593Smuzhiyun /* send Txpower command for current channel to ucode */
1614*4882a593Smuzhiyun return il->ops->send_tx_power(il);
1615*4882a593Smuzhiyun }
1616*4882a593Smuzhiyun
1617*4882a593Smuzhiyun int
il3945_hw_reg_set_txpower(struct il_priv * il,s8 power)1618*4882a593Smuzhiyun il3945_hw_reg_set_txpower(struct il_priv *il, s8 power)
1619*4882a593Smuzhiyun {
1620*4882a593Smuzhiyun struct il_channel_info *ch_info;
1621*4882a593Smuzhiyun s8 max_power;
1622*4882a593Smuzhiyun u8 i;
1623*4882a593Smuzhiyun
1624*4882a593Smuzhiyun if (il->tx_power_user_lmt == power) {
1625*4882a593Smuzhiyun D_POWER("Requested Tx power same as current " "limit: %ddBm.\n",
1626*4882a593Smuzhiyun power);
1627*4882a593Smuzhiyun return 0;
1628*4882a593Smuzhiyun }
1629*4882a593Smuzhiyun
1630*4882a593Smuzhiyun D_POWER("Setting upper limit clamp to %ddBm.\n", power);
1631*4882a593Smuzhiyun il->tx_power_user_lmt = power;
1632*4882a593Smuzhiyun
1633*4882a593Smuzhiyun /* set up new Tx powers for each and every channel, 2.4 and 5.x */
1634*4882a593Smuzhiyun
1635*4882a593Smuzhiyun for (i = 0; i < il->channel_count; i++) {
1636*4882a593Smuzhiyun ch_info = &il->channel_info[i];
1637*4882a593Smuzhiyun
1638*4882a593Smuzhiyun /* find minimum power of all user and regulatory constraints
1639*4882a593Smuzhiyun * (does not consider h/w clipping limitations) */
1640*4882a593Smuzhiyun max_power = il3945_hw_reg_get_ch_txpower_limit(ch_info);
1641*4882a593Smuzhiyun max_power = min(power, max_power);
1642*4882a593Smuzhiyun if (max_power != ch_info->curr_txpow) {
1643*4882a593Smuzhiyun ch_info->curr_txpow = max_power;
1644*4882a593Smuzhiyun
1645*4882a593Smuzhiyun /* this considers the h/w clipping limitations */
1646*4882a593Smuzhiyun il3945_hw_reg_set_new_power(il, ch_info);
1647*4882a593Smuzhiyun }
1648*4882a593Smuzhiyun }
1649*4882a593Smuzhiyun
1650*4882a593Smuzhiyun /* update txpower settings for all channels,
1651*4882a593Smuzhiyun * send to NIC if associated. */
1652*4882a593Smuzhiyun il3945_is_temp_calib_needed(il);
1653*4882a593Smuzhiyun il3945_hw_reg_comp_txpower_temp(il);
1654*4882a593Smuzhiyun
1655*4882a593Smuzhiyun return 0;
1656*4882a593Smuzhiyun }
1657*4882a593Smuzhiyun
1658*4882a593Smuzhiyun static int
il3945_send_rxon_assoc(struct il_priv * il)1659*4882a593Smuzhiyun il3945_send_rxon_assoc(struct il_priv *il)
1660*4882a593Smuzhiyun {
1661*4882a593Smuzhiyun int rc = 0;
1662*4882a593Smuzhiyun struct il_rx_pkt *pkt;
1663*4882a593Smuzhiyun struct il3945_rxon_assoc_cmd rxon_assoc;
1664*4882a593Smuzhiyun struct il_host_cmd cmd = {
1665*4882a593Smuzhiyun .id = C_RXON_ASSOC,
1666*4882a593Smuzhiyun .len = sizeof(rxon_assoc),
1667*4882a593Smuzhiyun .flags = CMD_WANT_SKB,
1668*4882a593Smuzhiyun .data = &rxon_assoc,
1669*4882a593Smuzhiyun };
1670*4882a593Smuzhiyun const struct il_rxon_cmd *rxon1 = &il->staging;
1671*4882a593Smuzhiyun const struct il_rxon_cmd *rxon2 = &il->active;
1672*4882a593Smuzhiyun
1673*4882a593Smuzhiyun if (rxon1->flags == rxon2->flags &&
1674*4882a593Smuzhiyun rxon1->filter_flags == rxon2->filter_flags &&
1675*4882a593Smuzhiyun rxon1->cck_basic_rates == rxon2->cck_basic_rates &&
1676*4882a593Smuzhiyun rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates) {
1677*4882a593Smuzhiyun D_INFO("Using current RXON_ASSOC. Not resending.\n");
1678*4882a593Smuzhiyun return 0;
1679*4882a593Smuzhiyun }
1680*4882a593Smuzhiyun
1681*4882a593Smuzhiyun rxon_assoc.flags = il->staging.flags;
1682*4882a593Smuzhiyun rxon_assoc.filter_flags = il->staging.filter_flags;
1683*4882a593Smuzhiyun rxon_assoc.ofdm_basic_rates = il->staging.ofdm_basic_rates;
1684*4882a593Smuzhiyun rxon_assoc.cck_basic_rates = il->staging.cck_basic_rates;
1685*4882a593Smuzhiyun rxon_assoc.reserved = 0;
1686*4882a593Smuzhiyun
1687*4882a593Smuzhiyun rc = il_send_cmd_sync(il, &cmd);
1688*4882a593Smuzhiyun if (rc)
1689*4882a593Smuzhiyun return rc;
1690*4882a593Smuzhiyun
1691*4882a593Smuzhiyun pkt = (struct il_rx_pkt *)cmd.reply_page;
1692*4882a593Smuzhiyun if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
1693*4882a593Smuzhiyun IL_ERR("Bad return from C_RXON_ASSOC command\n");
1694*4882a593Smuzhiyun rc = -EIO;
1695*4882a593Smuzhiyun }
1696*4882a593Smuzhiyun
1697*4882a593Smuzhiyun il_free_pages(il, cmd.reply_page);
1698*4882a593Smuzhiyun
1699*4882a593Smuzhiyun return rc;
1700*4882a593Smuzhiyun }
1701*4882a593Smuzhiyun
1702*4882a593Smuzhiyun /*
1703*4882a593Smuzhiyun * il3945_commit_rxon - commit staging_rxon to hardware
1704*4882a593Smuzhiyun *
1705*4882a593Smuzhiyun * The RXON command in staging_rxon is committed to the hardware and
1706*4882a593Smuzhiyun * the active_rxon structure is updated with the new data. This
1707*4882a593Smuzhiyun * function correctly transitions out of the RXON_ASSOC_MSK state if
1708*4882a593Smuzhiyun * a HW tune is required based on the RXON structure changes.
1709*4882a593Smuzhiyun */
1710*4882a593Smuzhiyun int
il3945_commit_rxon(struct il_priv * il)1711*4882a593Smuzhiyun il3945_commit_rxon(struct il_priv *il)
1712*4882a593Smuzhiyun {
1713*4882a593Smuzhiyun /* cast away the const for active_rxon in this function */
1714*4882a593Smuzhiyun struct il3945_rxon_cmd *active_rxon = (void *)&il->active;
1715*4882a593Smuzhiyun struct il3945_rxon_cmd *staging_rxon = (void *)&il->staging;
1716*4882a593Smuzhiyun int rc = 0;
1717*4882a593Smuzhiyun bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK);
1718*4882a593Smuzhiyun
1719*4882a593Smuzhiyun if (test_bit(S_EXIT_PENDING, &il->status))
1720*4882a593Smuzhiyun return -EINVAL;
1721*4882a593Smuzhiyun
1722*4882a593Smuzhiyun if (!il_is_alive(il))
1723*4882a593Smuzhiyun return -1;
1724*4882a593Smuzhiyun
1725*4882a593Smuzhiyun /* always get timestamp with Rx frame */
1726*4882a593Smuzhiyun staging_rxon->flags |= RXON_FLG_TSF2HOST_MSK;
1727*4882a593Smuzhiyun
1728*4882a593Smuzhiyun /* select antenna */
1729*4882a593Smuzhiyun staging_rxon->flags &= ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
1730*4882a593Smuzhiyun staging_rxon->flags |= il3945_get_antenna_flags(il);
1731*4882a593Smuzhiyun
1732*4882a593Smuzhiyun rc = il_check_rxon_cmd(il);
1733*4882a593Smuzhiyun if (rc) {
1734*4882a593Smuzhiyun IL_ERR("Invalid RXON configuration. Not committing.\n");
1735*4882a593Smuzhiyun return -EINVAL;
1736*4882a593Smuzhiyun }
1737*4882a593Smuzhiyun
1738*4882a593Smuzhiyun /* If we don't need to send a full RXON, we can use
1739*4882a593Smuzhiyun * il3945_rxon_assoc_cmd which is used to reconfigure filter
1740*4882a593Smuzhiyun * and other flags for the current radio configuration. */
1741*4882a593Smuzhiyun if (!il_full_rxon_required(il)) {
1742*4882a593Smuzhiyun rc = il_send_rxon_assoc(il);
1743*4882a593Smuzhiyun if (rc) {
1744*4882a593Smuzhiyun IL_ERR("Error setting RXON_ASSOC "
1745*4882a593Smuzhiyun "configuration (%d).\n", rc);
1746*4882a593Smuzhiyun return rc;
1747*4882a593Smuzhiyun }
1748*4882a593Smuzhiyun
1749*4882a593Smuzhiyun memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
1750*4882a593Smuzhiyun /*
1751*4882a593Smuzhiyun * We do not commit tx power settings while channel changing,
1752*4882a593Smuzhiyun * do it now if tx power changed.
1753*4882a593Smuzhiyun */
1754*4882a593Smuzhiyun il_set_tx_power(il, il->tx_power_next, false);
1755*4882a593Smuzhiyun return 0;
1756*4882a593Smuzhiyun }
1757*4882a593Smuzhiyun
1758*4882a593Smuzhiyun /* If we are currently associated and the new config requires
1759*4882a593Smuzhiyun * an RXON_ASSOC and the new config wants the associated mask enabled,
1760*4882a593Smuzhiyun * we must clear the associated from the active configuration
1761*4882a593Smuzhiyun * before we apply the new config */
1762*4882a593Smuzhiyun if (il_is_associated(il) && new_assoc) {
1763*4882a593Smuzhiyun D_INFO("Toggling associated bit on current RXON\n");
1764*4882a593Smuzhiyun active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1765*4882a593Smuzhiyun
1766*4882a593Smuzhiyun /*
1767*4882a593Smuzhiyun * reserved4 and 5 could have been filled by the iwlcore code.
1768*4882a593Smuzhiyun * Let's clear them before pushing to the 3945.
1769*4882a593Smuzhiyun */
1770*4882a593Smuzhiyun active_rxon->reserved4 = 0;
1771*4882a593Smuzhiyun active_rxon->reserved5 = 0;
1772*4882a593Smuzhiyun rc = il_send_cmd_pdu(il, C_RXON, sizeof(struct il3945_rxon_cmd),
1773*4882a593Smuzhiyun &il->active);
1774*4882a593Smuzhiyun
1775*4882a593Smuzhiyun /* If the mask clearing failed then we set
1776*4882a593Smuzhiyun * active_rxon back to what it was previously */
1777*4882a593Smuzhiyun if (rc) {
1778*4882a593Smuzhiyun active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
1779*4882a593Smuzhiyun IL_ERR("Error clearing ASSOC_MSK on current "
1780*4882a593Smuzhiyun "configuration (%d).\n", rc);
1781*4882a593Smuzhiyun return rc;
1782*4882a593Smuzhiyun }
1783*4882a593Smuzhiyun il_clear_ucode_stations(il);
1784*4882a593Smuzhiyun il_restore_stations(il);
1785*4882a593Smuzhiyun }
1786*4882a593Smuzhiyun
1787*4882a593Smuzhiyun D_INFO("Sending RXON\n" "* with%s RXON_FILTER_ASSOC_MSK\n"
1788*4882a593Smuzhiyun "* channel = %d\n" "* bssid = %pM\n", (new_assoc ? "" : "out"),
1789*4882a593Smuzhiyun le16_to_cpu(staging_rxon->channel), staging_rxon->bssid_addr);
1790*4882a593Smuzhiyun
1791*4882a593Smuzhiyun /*
1792*4882a593Smuzhiyun * reserved4 and 5 could have been filled by the iwlcore code.
1793*4882a593Smuzhiyun * Let's clear them before pushing to the 3945.
1794*4882a593Smuzhiyun */
1795*4882a593Smuzhiyun staging_rxon->reserved4 = 0;
1796*4882a593Smuzhiyun staging_rxon->reserved5 = 0;
1797*4882a593Smuzhiyun
1798*4882a593Smuzhiyun il_set_rxon_hwcrypto(il, !il3945_mod_params.sw_crypto);
1799*4882a593Smuzhiyun
1800*4882a593Smuzhiyun /* Apply the new configuration */
1801*4882a593Smuzhiyun rc = il_send_cmd_pdu(il, C_RXON, sizeof(struct il3945_rxon_cmd),
1802*4882a593Smuzhiyun staging_rxon);
1803*4882a593Smuzhiyun if (rc) {
1804*4882a593Smuzhiyun IL_ERR("Error setting new configuration (%d).\n", rc);
1805*4882a593Smuzhiyun return rc;
1806*4882a593Smuzhiyun }
1807*4882a593Smuzhiyun
1808*4882a593Smuzhiyun memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
1809*4882a593Smuzhiyun
1810*4882a593Smuzhiyun if (!new_assoc) {
1811*4882a593Smuzhiyun il_clear_ucode_stations(il);
1812*4882a593Smuzhiyun il_restore_stations(il);
1813*4882a593Smuzhiyun }
1814*4882a593Smuzhiyun
1815*4882a593Smuzhiyun /* If we issue a new RXON command which required a tune then we must
1816*4882a593Smuzhiyun * send a new TXPOWER command or we won't be able to Tx any frames */
1817*4882a593Smuzhiyun rc = il_set_tx_power(il, il->tx_power_next, true);
1818*4882a593Smuzhiyun if (rc) {
1819*4882a593Smuzhiyun IL_ERR("Error setting Tx power (%d).\n", rc);
1820*4882a593Smuzhiyun return rc;
1821*4882a593Smuzhiyun }
1822*4882a593Smuzhiyun
1823*4882a593Smuzhiyun /* Init the hardware's rate fallback order based on the band */
1824*4882a593Smuzhiyun rc = il3945_init_hw_rate_table(il);
1825*4882a593Smuzhiyun if (rc) {
1826*4882a593Smuzhiyun IL_ERR("Error setting HW rate table: %02X\n", rc);
1827*4882a593Smuzhiyun return -EIO;
1828*4882a593Smuzhiyun }
1829*4882a593Smuzhiyun
1830*4882a593Smuzhiyun return 0;
1831*4882a593Smuzhiyun }
1832*4882a593Smuzhiyun
1833*4882a593Smuzhiyun /*
1834*4882a593Smuzhiyun * il3945_reg_txpower_periodic - called when time to check our temperature.
1835*4882a593Smuzhiyun *
1836*4882a593Smuzhiyun * -- reset periodic timer
1837*4882a593Smuzhiyun * -- see if temp has changed enough to warrant re-calibration ... if so:
1838*4882a593Smuzhiyun * -- correct coeffs for temp (can reset temp timer)
1839*4882a593Smuzhiyun * -- save this temp as "last",
1840*4882a593Smuzhiyun * -- send new set of gain settings to NIC
1841*4882a593Smuzhiyun * NOTE: This should continue working, even when we're not associated,
1842*4882a593Smuzhiyun * so we can keep our internal table of scan powers current. */
1843*4882a593Smuzhiyun void
il3945_reg_txpower_periodic(struct il_priv * il)1844*4882a593Smuzhiyun il3945_reg_txpower_periodic(struct il_priv *il)
1845*4882a593Smuzhiyun {
1846*4882a593Smuzhiyun /* This will kick in the "brute force"
1847*4882a593Smuzhiyun * il3945_hw_reg_comp_txpower_temp() below */
1848*4882a593Smuzhiyun if (!il3945_is_temp_calib_needed(il))
1849*4882a593Smuzhiyun goto reschedule;
1850*4882a593Smuzhiyun
1851*4882a593Smuzhiyun /* Set up a new set of temp-adjusted TxPowers, send to NIC.
1852*4882a593Smuzhiyun * This is based *only* on current temperature,
1853*4882a593Smuzhiyun * ignoring any previous power measurements */
1854*4882a593Smuzhiyun il3945_hw_reg_comp_txpower_temp(il);
1855*4882a593Smuzhiyun
1856*4882a593Smuzhiyun reschedule:
1857*4882a593Smuzhiyun queue_delayed_work(il->workqueue, &il->_3945.thermal_periodic,
1858*4882a593Smuzhiyun REG_RECALIB_PERIOD * HZ);
1859*4882a593Smuzhiyun }
1860*4882a593Smuzhiyun
1861*4882a593Smuzhiyun static void
il3945_bg_reg_txpower_periodic(struct work_struct * work)1862*4882a593Smuzhiyun il3945_bg_reg_txpower_periodic(struct work_struct *work)
1863*4882a593Smuzhiyun {
1864*4882a593Smuzhiyun struct il_priv *il = container_of(work, struct il_priv,
1865*4882a593Smuzhiyun _3945.thermal_periodic.work);
1866*4882a593Smuzhiyun
1867*4882a593Smuzhiyun mutex_lock(&il->mutex);
1868*4882a593Smuzhiyun if (test_bit(S_EXIT_PENDING, &il->status) || il->txq == NULL)
1869*4882a593Smuzhiyun goto out;
1870*4882a593Smuzhiyun
1871*4882a593Smuzhiyun il3945_reg_txpower_periodic(il);
1872*4882a593Smuzhiyun out:
1873*4882a593Smuzhiyun mutex_unlock(&il->mutex);
1874*4882a593Smuzhiyun }
1875*4882a593Smuzhiyun
1876*4882a593Smuzhiyun /*
1877*4882a593Smuzhiyun * il3945_hw_reg_get_ch_grp_idx - find the channel-group idx (0-4) for channel.
1878*4882a593Smuzhiyun *
1879*4882a593Smuzhiyun * This function is used when initializing channel-info structs.
1880*4882a593Smuzhiyun *
1881*4882a593Smuzhiyun * NOTE: These channel groups do *NOT* match the bands above!
1882*4882a593Smuzhiyun * These channel groups are based on factory-tested channels;
1883*4882a593Smuzhiyun * on A-band, EEPROM's "group frequency" entries represent the top
1884*4882a593Smuzhiyun * channel in each group 1-4. Group 5 All B/G channels are in group 0.
1885*4882a593Smuzhiyun */
1886*4882a593Smuzhiyun static u16
il3945_hw_reg_get_ch_grp_idx(struct il_priv * il,const struct il_channel_info * ch_info)1887*4882a593Smuzhiyun il3945_hw_reg_get_ch_grp_idx(struct il_priv *il,
1888*4882a593Smuzhiyun const struct il_channel_info *ch_info)
1889*4882a593Smuzhiyun {
1890*4882a593Smuzhiyun struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
1891*4882a593Smuzhiyun struct il3945_eeprom_txpower_group *ch_grp = &eeprom->groups[0];
1892*4882a593Smuzhiyun u8 group;
1893*4882a593Smuzhiyun u16 group_idx = 0; /* based on factory calib frequencies */
1894*4882a593Smuzhiyun u8 grp_channel;
1895*4882a593Smuzhiyun
1896*4882a593Smuzhiyun /* Find the group idx for the channel ... don't use idx 1(?) */
1897*4882a593Smuzhiyun if (il_is_channel_a_band(ch_info)) {
1898*4882a593Smuzhiyun for (group = 1; group < 5; group++) {
1899*4882a593Smuzhiyun grp_channel = ch_grp[group].group_channel;
1900*4882a593Smuzhiyun if (ch_info->channel <= grp_channel) {
1901*4882a593Smuzhiyun group_idx = group;
1902*4882a593Smuzhiyun break;
1903*4882a593Smuzhiyun }
1904*4882a593Smuzhiyun }
1905*4882a593Smuzhiyun /* group 4 has a few channels *above* its factory cal freq */
1906*4882a593Smuzhiyun if (group == 5)
1907*4882a593Smuzhiyun group_idx = 4;
1908*4882a593Smuzhiyun } else
1909*4882a593Smuzhiyun group_idx = 0; /* 2.4 GHz, group 0 */
1910*4882a593Smuzhiyun
1911*4882a593Smuzhiyun D_POWER("Chnl %d mapped to grp %d\n", ch_info->channel, group_idx);
1912*4882a593Smuzhiyun return group_idx;
1913*4882a593Smuzhiyun }
1914*4882a593Smuzhiyun
1915*4882a593Smuzhiyun /*
1916*4882a593Smuzhiyun * il3945_hw_reg_get_matched_power_idx - Interpolate to get nominal idx
1917*4882a593Smuzhiyun *
1918*4882a593Smuzhiyun * Interpolate to get nominal (i.e. at factory calibration temperature) idx
1919*4882a593Smuzhiyun * into radio/DSP gain settings table for requested power.
1920*4882a593Smuzhiyun */
1921*4882a593Smuzhiyun static int
il3945_hw_reg_get_matched_power_idx(struct il_priv * il,s8 requested_power,s32 setting_idx,s32 * new_idx)1922*4882a593Smuzhiyun il3945_hw_reg_get_matched_power_idx(struct il_priv *il, s8 requested_power,
1923*4882a593Smuzhiyun s32 setting_idx, s32 *new_idx)
1924*4882a593Smuzhiyun {
1925*4882a593Smuzhiyun const struct il3945_eeprom_txpower_group *chnl_grp = NULL;
1926*4882a593Smuzhiyun struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
1927*4882a593Smuzhiyun s32 idx0, idx1;
1928*4882a593Smuzhiyun s32 power = 2 * requested_power;
1929*4882a593Smuzhiyun s32 i;
1930*4882a593Smuzhiyun const struct il3945_eeprom_txpower_sample *samples;
1931*4882a593Smuzhiyun s32 gains0, gains1;
1932*4882a593Smuzhiyun s32 res;
1933*4882a593Smuzhiyun s32 denominator;
1934*4882a593Smuzhiyun
1935*4882a593Smuzhiyun chnl_grp = &eeprom->groups[setting_idx];
1936*4882a593Smuzhiyun samples = chnl_grp->samples;
1937*4882a593Smuzhiyun for (i = 0; i < 5; i++) {
1938*4882a593Smuzhiyun if (power == samples[i].power) {
1939*4882a593Smuzhiyun *new_idx = samples[i].gain_idx;
1940*4882a593Smuzhiyun return 0;
1941*4882a593Smuzhiyun }
1942*4882a593Smuzhiyun }
1943*4882a593Smuzhiyun
1944*4882a593Smuzhiyun if (power > samples[1].power) {
1945*4882a593Smuzhiyun idx0 = 0;
1946*4882a593Smuzhiyun idx1 = 1;
1947*4882a593Smuzhiyun } else if (power > samples[2].power) {
1948*4882a593Smuzhiyun idx0 = 1;
1949*4882a593Smuzhiyun idx1 = 2;
1950*4882a593Smuzhiyun } else if (power > samples[3].power) {
1951*4882a593Smuzhiyun idx0 = 2;
1952*4882a593Smuzhiyun idx1 = 3;
1953*4882a593Smuzhiyun } else {
1954*4882a593Smuzhiyun idx0 = 3;
1955*4882a593Smuzhiyun idx1 = 4;
1956*4882a593Smuzhiyun }
1957*4882a593Smuzhiyun
1958*4882a593Smuzhiyun denominator = (s32) samples[idx1].power - (s32) samples[idx0].power;
1959*4882a593Smuzhiyun if (denominator == 0)
1960*4882a593Smuzhiyun return -EINVAL;
1961*4882a593Smuzhiyun gains0 = (s32) samples[idx0].gain_idx * (1 << 19);
1962*4882a593Smuzhiyun gains1 = (s32) samples[idx1].gain_idx * (1 << 19);
1963*4882a593Smuzhiyun res =
1964*4882a593Smuzhiyun gains0 + (gains1 - gains0) * ((s32) power -
1965*4882a593Smuzhiyun (s32) samples[idx0].power) /
1966*4882a593Smuzhiyun denominator + (1 << 18);
1967*4882a593Smuzhiyun *new_idx = res >> 19;
1968*4882a593Smuzhiyun return 0;
1969*4882a593Smuzhiyun }
1970*4882a593Smuzhiyun
1971*4882a593Smuzhiyun static void
il3945_hw_reg_init_channel_groups(struct il_priv * il)1972*4882a593Smuzhiyun il3945_hw_reg_init_channel_groups(struct il_priv *il)
1973*4882a593Smuzhiyun {
1974*4882a593Smuzhiyun u32 i;
1975*4882a593Smuzhiyun s32 rate_idx;
1976*4882a593Smuzhiyun struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
1977*4882a593Smuzhiyun const struct il3945_eeprom_txpower_group *group;
1978*4882a593Smuzhiyun
1979*4882a593Smuzhiyun D_POWER("Initializing factory calib info from EEPROM\n");
1980*4882a593Smuzhiyun
1981*4882a593Smuzhiyun for (i = 0; i < IL_NUM_TX_CALIB_GROUPS; i++) {
1982*4882a593Smuzhiyun s8 *clip_pwrs; /* table of power levels for each rate */
1983*4882a593Smuzhiyun s8 satur_pwr; /* saturation power for each chnl group */
1984*4882a593Smuzhiyun group = &eeprom->groups[i];
1985*4882a593Smuzhiyun
1986*4882a593Smuzhiyun /* sanity check on factory saturation power value */
1987*4882a593Smuzhiyun if (group->saturation_power < 40) {
1988*4882a593Smuzhiyun IL_WARN("Error: saturation power is %d, "
1989*4882a593Smuzhiyun "less than minimum expected 40\n",
1990*4882a593Smuzhiyun group->saturation_power);
1991*4882a593Smuzhiyun return;
1992*4882a593Smuzhiyun }
1993*4882a593Smuzhiyun
1994*4882a593Smuzhiyun /*
1995*4882a593Smuzhiyun * Derive requested power levels for each rate, based on
1996*4882a593Smuzhiyun * hardware capabilities (saturation power for band).
1997*4882a593Smuzhiyun * Basic value is 3dB down from saturation, with further
1998*4882a593Smuzhiyun * power reductions for highest 3 data rates. These
1999*4882a593Smuzhiyun * backoffs provide headroom for high rate modulation
2000*4882a593Smuzhiyun * power peaks, without too much distortion (clipping).
2001*4882a593Smuzhiyun */
2002*4882a593Smuzhiyun /* we'll fill in this array with h/w max power levels */
2003*4882a593Smuzhiyun clip_pwrs = (s8 *) il->_3945.clip_groups[i].clip_powers;
2004*4882a593Smuzhiyun
2005*4882a593Smuzhiyun /* divide factory saturation power by 2 to find -3dB level */
2006*4882a593Smuzhiyun satur_pwr = (s8) (group->saturation_power >> 1);
2007*4882a593Smuzhiyun
2008*4882a593Smuzhiyun /* fill in channel group's nominal powers for each rate */
2009*4882a593Smuzhiyun for (rate_idx = 0; rate_idx < RATE_COUNT_3945;
2010*4882a593Smuzhiyun rate_idx++, clip_pwrs++) {
2011*4882a593Smuzhiyun switch (rate_idx) {
2012*4882a593Smuzhiyun case RATE_36M_IDX_TBL:
2013*4882a593Smuzhiyun if (i == 0) /* B/G */
2014*4882a593Smuzhiyun *clip_pwrs = satur_pwr;
2015*4882a593Smuzhiyun else /* A */
2016*4882a593Smuzhiyun *clip_pwrs = satur_pwr - 5;
2017*4882a593Smuzhiyun break;
2018*4882a593Smuzhiyun case RATE_48M_IDX_TBL:
2019*4882a593Smuzhiyun if (i == 0)
2020*4882a593Smuzhiyun *clip_pwrs = satur_pwr - 7;
2021*4882a593Smuzhiyun else
2022*4882a593Smuzhiyun *clip_pwrs = satur_pwr - 10;
2023*4882a593Smuzhiyun break;
2024*4882a593Smuzhiyun case RATE_54M_IDX_TBL:
2025*4882a593Smuzhiyun if (i == 0)
2026*4882a593Smuzhiyun *clip_pwrs = satur_pwr - 9;
2027*4882a593Smuzhiyun else
2028*4882a593Smuzhiyun *clip_pwrs = satur_pwr - 12;
2029*4882a593Smuzhiyun break;
2030*4882a593Smuzhiyun default:
2031*4882a593Smuzhiyun *clip_pwrs = satur_pwr;
2032*4882a593Smuzhiyun break;
2033*4882a593Smuzhiyun }
2034*4882a593Smuzhiyun }
2035*4882a593Smuzhiyun }
2036*4882a593Smuzhiyun }
2037*4882a593Smuzhiyun
2038*4882a593Smuzhiyun /*
2039*4882a593Smuzhiyun * il3945_txpower_set_from_eeprom - Set channel power info based on EEPROM
2040*4882a593Smuzhiyun *
2041*4882a593Smuzhiyun * Second pass (during init) to set up il->channel_info
2042*4882a593Smuzhiyun *
2043*4882a593Smuzhiyun * Set up Tx-power settings in our channel info database for each VALID
2044*4882a593Smuzhiyun * (for this geo/SKU) channel, at all Tx data rates, based on eeprom values
2045*4882a593Smuzhiyun * and current temperature.
2046*4882a593Smuzhiyun *
2047*4882a593Smuzhiyun * Since this is based on current temperature (at init time), these values may
2048*4882a593Smuzhiyun * not be valid for very long, but it gives us a starting/default point,
2049*4882a593Smuzhiyun * and allows us to active (i.e. using Tx) scan.
2050*4882a593Smuzhiyun *
2051*4882a593Smuzhiyun * This does *not* write values to NIC, just sets up our internal table.
2052*4882a593Smuzhiyun */
2053*4882a593Smuzhiyun int
il3945_txpower_set_from_eeprom(struct il_priv * il)2054*4882a593Smuzhiyun il3945_txpower_set_from_eeprom(struct il_priv *il)
2055*4882a593Smuzhiyun {
2056*4882a593Smuzhiyun struct il_channel_info *ch_info = NULL;
2057*4882a593Smuzhiyun struct il3945_channel_power_info *pwr_info;
2058*4882a593Smuzhiyun struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
2059*4882a593Smuzhiyun int delta_idx;
2060*4882a593Smuzhiyun u8 rate_idx;
2061*4882a593Smuzhiyun u8 scan_tbl_idx;
2062*4882a593Smuzhiyun const s8 *clip_pwrs; /* array of power levels for each rate */
2063*4882a593Smuzhiyun u8 gain, dsp_atten;
2064*4882a593Smuzhiyun s8 power;
2065*4882a593Smuzhiyun u8 pwr_idx, base_pwr_idx, a_band;
2066*4882a593Smuzhiyun u8 i;
2067*4882a593Smuzhiyun int temperature;
2068*4882a593Smuzhiyun
2069*4882a593Smuzhiyun /* save temperature reference,
2070*4882a593Smuzhiyun * so we can determine next time to calibrate */
2071*4882a593Smuzhiyun temperature = il3945_hw_reg_txpower_get_temperature(il);
2072*4882a593Smuzhiyun il->last_temperature = temperature;
2073*4882a593Smuzhiyun
2074*4882a593Smuzhiyun il3945_hw_reg_init_channel_groups(il);
2075*4882a593Smuzhiyun
2076*4882a593Smuzhiyun /* initialize Tx power info for each and every channel, 2.4 and 5.x */
2077*4882a593Smuzhiyun for (i = 0, ch_info = il->channel_info; i < il->channel_count;
2078*4882a593Smuzhiyun i++, ch_info++) {
2079*4882a593Smuzhiyun a_band = il_is_channel_a_band(ch_info);
2080*4882a593Smuzhiyun if (!il_is_channel_valid(ch_info))
2081*4882a593Smuzhiyun continue;
2082*4882a593Smuzhiyun
2083*4882a593Smuzhiyun /* find this channel's channel group (*not* "band") idx */
2084*4882a593Smuzhiyun ch_info->group_idx = il3945_hw_reg_get_ch_grp_idx(il, ch_info);
2085*4882a593Smuzhiyun
2086*4882a593Smuzhiyun /* Get this chnlgrp's rate->max/clip-powers table */
2087*4882a593Smuzhiyun clip_pwrs =
2088*4882a593Smuzhiyun il->_3945.clip_groups[ch_info->group_idx].clip_powers;
2089*4882a593Smuzhiyun
2090*4882a593Smuzhiyun /* calculate power idx *adjustment* value according to
2091*4882a593Smuzhiyun * diff between current temperature and factory temperature */
2092*4882a593Smuzhiyun delta_idx =
2093*4882a593Smuzhiyun il3945_hw_reg_adjust_power_by_temp(temperature,
2094*4882a593Smuzhiyun eeprom->groups[ch_info->
2095*4882a593Smuzhiyun group_idx].
2096*4882a593Smuzhiyun temperature);
2097*4882a593Smuzhiyun
2098*4882a593Smuzhiyun D_POWER("Delta idx for channel %d: %d [%d]\n", ch_info->channel,
2099*4882a593Smuzhiyun delta_idx, temperature + IL_TEMP_CONVERT);
2100*4882a593Smuzhiyun
2101*4882a593Smuzhiyun /* set tx power value for all OFDM rates */
2102*4882a593Smuzhiyun for (rate_idx = 0; rate_idx < IL_OFDM_RATES; rate_idx++) {
2103*4882a593Smuzhiyun s32 power_idx;
2104*4882a593Smuzhiyun int rc;
2105*4882a593Smuzhiyun
2106*4882a593Smuzhiyun /* use channel group's clip-power table,
2107*4882a593Smuzhiyun * but don't exceed channel's max power */
2108*4882a593Smuzhiyun s8 pwr = min(ch_info->max_power_avg,
2109*4882a593Smuzhiyun clip_pwrs[rate_idx]);
2110*4882a593Smuzhiyun
2111*4882a593Smuzhiyun pwr_info = &ch_info->power_info[rate_idx];
2112*4882a593Smuzhiyun
2113*4882a593Smuzhiyun /* get base (i.e. at factory-measured temperature)
2114*4882a593Smuzhiyun * power table idx for this rate's power */
2115*4882a593Smuzhiyun rc = il3945_hw_reg_get_matched_power_idx(il, pwr,
2116*4882a593Smuzhiyun ch_info->
2117*4882a593Smuzhiyun group_idx,
2118*4882a593Smuzhiyun &power_idx);
2119*4882a593Smuzhiyun if (rc) {
2120*4882a593Smuzhiyun IL_ERR("Invalid power idx\n");
2121*4882a593Smuzhiyun return rc;
2122*4882a593Smuzhiyun }
2123*4882a593Smuzhiyun pwr_info->base_power_idx = (u8) power_idx;
2124*4882a593Smuzhiyun
2125*4882a593Smuzhiyun /* temperature compensate */
2126*4882a593Smuzhiyun power_idx += delta_idx;
2127*4882a593Smuzhiyun
2128*4882a593Smuzhiyun /* stay within range of gain table */
2129*4882a593Smuzhiyun power_idx = il3945_hw_reg_fix_power_idx(power_idx);
2130*4882a593Smuzhiyun
2131*4882a593Smuzhiyun /* fill 1 OFDM rate's il3945_channel_power_info struct */
2132*4882a593Smuzhiyun pwr_info->requested_power = pwr;
2133*4882a593Smuzhiyun pwr_info->power_table_idx = (u8) power_idx;
2134*4882a593Smuzhiyun pwr_info->tpc.tx_gain =
2135*4882a593Smuzhiyun power_gain_table[a_band][power_idx].tx_gain;
2136*4882a593Smuzhiyun pwr_info->tpc.dsp_atten =
2137*4882a593Smuzhiyun power_gain_table[a_band][power_idx].dsp_atten;
2138*4882a593Smuzhiyun }
2139*4882a593Smuzhiyun
2140*4882a593Smuzhiyun /* set tx power for CCK rates, based on OFDM 12 Mbit settings */
2141*4882a593Smuzhiyun pwr_info = &ch_info->power_info[RATE_12M_IDX_TBL];
2142*4882a593Smuzhiyun power = pwr_info->requested_power + IL_CCK_FROM_OFDM_POWER_DIFF;
2143*4882a593Smuzhiyun pwr_idx = pwr_info->power_table_idx + IL_CCK_FROM_OFDM_IDX_DIFF;
2144*4882a593Smuzhiyun base_pwr_idx =
2145*4882a593Smuzhiyun pwr_info->base_power_idx + IL_CCK_FROM_OFDM_IDX_DIFF;
2146*4882a593Smuzhiyun
2147*4882a593Smuzhiyun /* stay within table range */
2148*4882a593Smuzhiyun pwr_idx = il3945_hw_reg_fix_power_idx(pwr_idx);
2149*4882a593Smuzhiyun gain = power_gain_table[a_band][pwr_idx].tx_gain;
2150*4882a593Smuzhiyun dsp_atten = power_gain_table[a_band][pwr_idx].dsp_atten;
2151*4882a593Smuzhiyun
2152*4882a593Smuzhiyun /* fill each CCK rate's il3945_channel_power_info structure
2153*4882a593Smuzhiyun * NOTE: All CCK-rate Txpwrs are the same for a given chnl!
2154*4882a593Smuzhiyun * NOTE: CCK rates start at end of OFDM rates! */
2155*4882a593Smuzhiyun for (rate_idx = 0; rate_idx < IL_CCK_RATES; rate_idx++) {
2156*4882a593Smuzhiyun pwr_info =
2157*4882a593Smuzhiyun &ch_info->power_info[rate_idx + IL_OFDM_RATES];
2158*4882a593Smuzhiyun pwr_info->requested_power = power;
2159*4882a593Smuzhiyun pwr_info->power_table_idx = pwr_idx;
2160*4882a593Smuzhiyun pwr_info->base_power_idx = base_pwr_idx;
2161*4882a593Smuzhiyun pwr_info->tpc.tx_gain = gain;
2162*4882a593Smuzhiyun pwr_info->tpc.dsp_atten = dsp_atten;
2163*4882a593Smuzhiyun }
2164*4882a593Smuzhiyun
2165*4882a593Smuzhiyun /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
2166*4882a593Smuzhiyun for (scan_tbl_idx = 0; scan_tbl_idx < IL_NUM_SCAN_RATES;
2167*4882a593Smuzhiyun scan_tbl_idx++) {
2168*4882a593Smuzhiyun s32 actual_idx =
2169*4882a593Smuzhiyun (scan_tbl_idx ==
2170*4882a593Smuzhiyun 0) ? RATE_1M_IDX_TBL : RATE_6M_IDX_TBL;
2171*4882a593Smuzhiyun il3945_hw_reg_set_scan_power(il, scan_tbl_idx,
2172*4882a593Smuzhiyun actual_idx, clip_pwrs,
2173*4882a593Smuzhiyun ch_info, a_band);
2174*4882a593Smuzhiyun }
2175*4882a593Smuzhiyun }
2176*4882a593Smuzhiyun
2177*4882a593Smuzhiyun return 0;
2178*4882a593Smuzhiyun }
2179*4882a593Smuzhiyun
2180*4882a593Smuzhiyun int
il3945_hw_rxq_stop(struct il_priv * il)2181*4882a593Smuzhiyun il3945_hw_rxq_stop(struct il_priv *il)
2182*4882a593Smuzhiyun {
2183*4882a593Smuzhiyun int ret;
2184*4882a593Smuzhiyun
2185*4882a593Smuzhiyun _il_wr(il, FH39_RCSR_CONFIG(0), 0);
2186*4882a593Smuzhiyun ret = _il_poll_bit(il, FH39_RSSR_STATUS,
2187*4882a593Smuzhiyun FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
2188*4882a593Smuzhiyun FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
2189*4882a593Smuzhiyun 1000);
2190*4882a593Smuzhiyun if (ret < 0)
2191*4882a593Smuzhiyun IL_ERR("Can't stop Rx DMA.\n");
2192*4882a593Smuzhiyun
2193*4882a593Smuzhiyun return 0;
2194*4882a593Smuzhiyun }
2195*4882a593Smuzhiyun
2196*4882a593Smuzhiyun int
il3945_hw_tx_queue_init(struct il_priv * il,struct il_tx_queue * txq)2197*4882a593Smuzhiyun il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq)
2198*4882a593Smuzhiyun {
2199*4882a593Smuzhiyun int txq_id = txq->q.id;
2200*4882a593Smuzhiyun
2201*4882a593Smuzhiyun struct il3945_shared *shared_data = il->_3945.shared_virt;
2202*4882a593Smuzhiyun
2203*4882a593Smuzhiyun shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32) txq->q.dma_addr);
2204*4882a593Smuzhiyun
2205*4882a593Smuzhiyun il_wr(il, FH39_CBCC_CTRL(txq_id), 0);
2206*4882a593Smuzhiyun il_wr(il, FH39_CBCC_BASE(txq_id), 0);
2207*4882a593Smuzhiyun
2208*4882a593Smuzhiyun il_wr(il, FH39_TCSR_CONFIG(txq_id),
2209*4882a593Smuzhiyun FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT |
2210*4882a593Smuzhiyun FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF |
2211*4882a593Smuzhiyun FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD |
2212*4882a593Smuzhiyun FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL |
2213*4882a593Smuzhiyun FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE);
2214*4882a593Smuzhiyun
2215*4882a593Smuzhiyun /* fake read to flush all prev. writes */
2216*4882a593Smuzhiyun _il_rd(il, FH39_TSSR_CBB_BASE);
2217*4882a593Smuzhiyun
2218*4882a593Smuzhiyun return 0;
2219*4882a593Smuzhiyun }
2220*4882a593Smuzhiyun
2221*4882a593Smuzhiyun /*
2222*4882a593Smuzhiyun * HCMD utils
2223*4882a593Smuzhiyun */
2224*4882a593Smuzhiyun static u16
il3945_get_hcmd_size(u8 cmd_id,u16 len)2225*4882a593Smuzhiyun il3945_get_hcmd_size(u8 cmd_id, u16 len)
2226*4882a593Smuzhiyun {
2227*4882a593Smuzhiyun switch (cmd_id) {
2228*4882a593Smuzhiyun case C_RXON:
2229*4882a593Smuzhiyun return sizeof(struct il3945_rxon_cmd);
2230*4882a593Smuzhiyun case C_POWER_TBL:
2231*4882a593Smuzhiyun return sizeof(struct il3945_powertable_cmd);
2232*4882a593Smuzhiyun default:
2233*4882a593Smuzhiyun return len;
2234*4882a593Smuzhiyun }
2235*4882a593Smuzhiyun }
2236*4882a593Smuzhiyun
2237*4882a593Smuzhiyun static u16
il3945_build_addsta_hcmd(const struct il_addsta_cmd * cmd,u8 * data)2238*4882a593Smuzhiyun il3945_build_addsta_hcmd(const struct il_addsta_cmd *cmd, u8 * data)
2239*4882a593Smuzhiyun {
2240*4882a593Smuzhiyun struct il3945_addsta_cmd *addsta = (struct il3945_addsta_cmd *)data;
2241*4882a593Smuzhiyun addsta->mode = cmd->mode;
2242*4882a593Smuzhiyun memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
2243*4882a593Smuzhiyun memcpy(&addsta->key, &cmd->key, sizeof(struct il4965_keyinfo));
2244*4882a593Smuzhiyun addsta->station_flags = cmd->station_flags;
2245*4882a593Smuzhiyun addsta->station_flags_msk = cmd->station_flags_msk;
2246*4882a593Smuzhiyun addsta->tid_disable_tx = cpu_to_le16(0);
2247*4882a593Smuzhiyun addsta->rate_n_flags = cmd->rate_n_flags;
2248*4882a593Smuzhiyun addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
2249*4882a593Smuzhiyun addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
2250*4882a593Smuzhiyun addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
2251*4882a593Smuzhiyun
2252*4882a593Smuzhiyun return (u16) sizeof(struct il3945_addsta_cmd);
2253*4882a593Smuzhiyun }
2254*4882a593Smuzhiyun
2255*4882a593Smuzhiyun static int
il3945_add_bssid_station(struct il_priv * il,const u8 * addr,u8 * sta_id_r)2256*4882a593Smuzhiyun il3945_add_bssid_station(struct il_priv *il, const u8 * addr, u8 * sta_id_r)
2257*4882a593Smuzhiyun {
2258*4882a593Smuzhiyun int ret;
2259*4882a593Smuzhiyun u8 sta_id;
2260*4882a593Smuzhiyun unsigned long flags;
2261*4882a593Smuzhiyun
2262*4882a593Smuzhiyun if (sta_id_r)
2263*4882a593Smuzhiyun *sta_id_r = IL_INVALID_STATION;
2264*4882a593Smuzhiyun
2265*4882a593Smuzhiyun ret = il_add_station_common(il, addr, 0, NULL, &sta_id);
2266*4882a593Smuzhiyun if (ret) {
2267*4882a593Smuzhiyun IL_ERR("Unable to add station %pM\n", addr);
2268*4882a593Smuzhiyun return ret;
2269*4882a593Smuzhiyun }
2270*4882a593Smuzhiyun
2271*4882a593Smuzhiyun if (sta_id_r)
2272*4882a593Smuzhiyun *sta_id_r = sta_id;
2273*4882a593Smuzhiyun
2274*4882a593Smuzhiyun spin_lock_irqsave(&il->sta_lock, flags);
2275*4882a593Smuzhiyun il->stations[sta_id].used |= IL_STA_LOCAL;
2276*4882a593Smuzhiyun spin_unlock_irqrestore(&il->sta_lock, flags);
2277*4882a593Smuzhiyun
2278*4882a593Smuzhiyun return 0;
2279*4882a593Smuzhiyun }
2280*4882a593Smuzhiyun
2281*4882a593Smuzhiyun static int
il3945_manage_ibss_station(struct il_priv * il,struct ieee80211_vif * vif,bool add)2282*4882a593Smuzhiyun il3945_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
2283*4882a593Smuzhiyun bool add)
2284*4882a593Smuzhiyun {
2285*4882a593Smuzhiyun struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
2286*4882a593Smuzhiyun int ret;
2287*4882a593Smuzhiyun
2288*4882a593Smuzhiyun if (add) {
2289*4882a593Smuzhiyun ret =
2290*4882a593Smuzhiyun il3945_add_bssid_station(il, vif->bss_conf.bssid,
2291*4882a593Smuzhiyun &vif_priv->ibss_bssid_sta_id);
2292*4882a593Smuzhiyun if (ret)
2293*4882a593Smuzhiyun return ret;
2294*4882a593Smuzhiyun
2295*4882a593Smuzhiyun il3945_sync_sta(il, vif_priv->ibss_bssid_sta_id,
2296*4882a593Smuzhiyun (il->band ==
2297*4882a593Smuzhiyun NL80211_BAND_5GHZ) ? RATE_6M_PLCP :
2298*4882a593Smuzhiyun RATE_1M_PLCP);
2299*4882a593Smuzhiyun il3945_rate_scale_init(il->hw, vif_priv->ibss_bssid_sta_id);
2300*4882a593Smuzhiyun
2301*4882a593Smuzhiyun return 0;
2302*4882a593Smuzhiyun }
2303*4882a593Smuzhiyun
2304*4882a593Smuzhiyun return il_remove_station(il, vif_priv->ibss_bssid_sta_id,
2305*4882a593Smuzhiyun vif->bss_conf.bssid);
2306*4882a593Smuzhiyun }
2307*4882a593Smuzhiyun
2308*4882a593Smuzhiyun /*
2309*4882a593Smuzhiyun * il3945_init_hw_rate_table - Initialize the hardware rate fallback table
2310*4882a593Smuzhiyun */
2311*4882a593Smuzhiyun int
il3945_init_hw_rate_table(struct il_priv * il)2312*4882a593Smuzhiyun il3945_init_hw_rate_table(struct il_priv *il)
2313*4882a593Smuzhiyun {
2314*4882a593Smuzhiyun int rc, i, idx, prev_idx;
2315*4882a593Smuzhiyun struct il3945_rate_scaling_cmd rate_cmd = {
2316*4882a593Smuzhiyun .reserved = {0, 0, 0},
2317*4882a593Smuzhiyun };
2318*4882a593Smuzhiyun struct il3945_rate_scaling_info *table = rate_cmd.table;
2319*4882a593Smuzhiyun
2320*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(il3945_rates); i++) {
2321*4882a593Smuzhiyun idx = il3945_rates[i].table_rs_idx;
2322*4882a593Smuzhiyun
2323*4882a593Smuzhiyun table[idx].rate_n_flags = cpu_to_le16(il3945_rates[i].plcp);
2324*4882a593Smuzhiyun table[idx].try_cnt = il->retry_rate;
2325*4882a593Smuzhiyun prev_idx = il3945_get_prev_ieee_rate(i);
2326*4882a593Smuzhiyun table[idx].next_rate_idx = il3945_rates[prev_idx].table_rs_idx;
2327*4882a593Smuzhiyun }
2328*4882a593Smuzhiyun
2329*4882a593Smuzhiyun switch (il->band) {
2330*4882a593Smuzhiyun case NL80211_BAND_5GHZ:
2331*4882a593Smuzhiyun D_RATE("Select A mode rate scale\n");
2332*4882a593Smuzhiyun /* If one of the following CCK rates is used,
2333*4882a593Smuzhiyun * have it fall back to the 6M OFDM rate */
2334*4882a593Smuzhiyun for (i = RATE_1M_IDX_TBL; i <= RATE_11M_IDX_TBL; i++)
2335*4882a593Smuzhiyun table[i].next_rate_idx =
2336*4882a593Smuzhiyun il3945_rates[IL_FIRST_OFDM_RATE].table_rs_idx;
2337*4882a593Smuzhiyun
2338*4882a593Smuzhiyun /* Don't fall back to CCK rates */
2339*4882a593Smuzhiyun table[RATE_12M_IDX_TBL].next_rate_idx = RATE_9M_IDX_TBL;
2340*4882a593Smuzhiyun
2341*4882a593Smuzhiyun /* Don't drop out of OFDM rates */
2342*4882a593Smuzhiyun table[RATE_6M_IDX_TBL].next_rate_idx =
2343*4882a593Smuzhiyun il3945_rates[IL_FIRST_OFDM_RATE].table_rs_idx;
2344*4882a593Smuzhiyun break;
2345*4882a593Smuzhiyun
2346*4882a593Smuzhiyun case NL80211_BAND_2GHZ:
2347*4882a593Smuzhiyun D_RATE("Select B/G mode rate scale\n");
2348*4882a593Smuzhiyun /* If an OFDM rate is used, have it fall back to the
2349*4882a593Smuzhiyun * 1M CCK rates */
2350*4882a593Smuzhiyun
2351*4882a593Smuzhiyun if (!(il->_3945.sta_supp_rates & IL_OFDM_RATES_MASK) &&
2352*4882a593Smuzhiyun il_is_associated(il)) {
2353*4882a593Smuzhiyun
2354*4882a593Smuzhiyun idx = IL_FIRST_CCK_RATE;
2355*4882a593Smuzhiyun for (i = RATE_6M_IDX_TBL; i <= RATE_54M_IDX_TBL; i++)
2356*4882a593Smuzhiyun table[i].next_rate_idx =
2357*4882a593Smuzhiyun il3945_rates[idx].table_rs_idx;
2358*4882a593Smuzhiyun
2359*4882a593Smuzhiyun idx = RATE_11M_IDX_TBL;
2360*4882a593Smuzhiyun /* CCK shouldn't fall back to OFDM... */
2361*4882a593Smuzhiyun table[idx].next_rate_idx = RATE_5M_IDX_TBL;
2362*4882a593Smuzhiyun }
2363*4882a593Smuzhiyun break;
2364*4882a593Smuzhiyun
2365*4882a593Smuzhiyun default:
2366*4882a593Smuzhiyun WARN_ON(1);
2367*4882a593Smuzhiyun break;
2368*4882a593Smuzhiyun }
2369*4882a593Smuzhiyun
2370*4882a593Smuzhiyun /* Update the rate scaling for control frame Tx */
2371*4882a593Smuzhiyun rate_cmd.table_id = 0;
2372*4882a593Smuzhiyun rc = il_send_cmd_pdu(il, C_RATE_SCALE, sizeof(rate_cmd), &rate_cmd);
2373*4882a593Smuzhiyun if (rc)
2374*4882a593Smuzhiyun return rc;
2375*4882a593Smuzhiyun
2376*4882a593Smuzhiyun /* Update the rate scaling for data frame Tx */
2377*4882a593Smuzhiyun rate_cmd.table_id = 1;
2378*4882a593Smuzhiyun return il_send_cmd_pdu(il, C_RATE_SCALE, sizeof(rate_cmd), &rate_cmd);
2379*4882a593Smuzhiyun }
2380*4882a593Smuzhiyun
2381*4882a593Smuzhiyun /* Called when initializing driver */
2382*4882a593Smuzhiyun int
il3945_hw_set_hw_params(struct il_priv * il)2383*4882a593Smuzhiyun il3945_hw_set_hw_params(struct il_priv *il)
2384*4882a593Smuzhiyun {
2385*4882a593Smuzhiyun memset((void *)&il->hw_params, 0, sizeof(struct il_hw_params));
2386*4882a593Smuzhiyun
2387*4882a593Smuzhiyun il->_3945.shared_virt =
2388*4882a593Smuzhiyun dma_alloc_coherent(&il->pci_dev->dev, sizeof(struct il3945_shared),
2389*4882a593Smuzhiyun &il->_3945.shared_phys, GFP_KERNEL);
2390*4882a593Smuzhiyun if (!il->_3945.shared_virt)
2391*4882a593Smuzhiyun return -ENOMEM;
2392*4882a593Smuzhiyun
2393*4882a593Smuzhiyun il->hw_params.bcast_id = IL3945_BROADCAST_ID;
2394*4882a593Smuzhiyun
2395*4882a593Smuzhiyun /* Assign number of Usable TX queues */
2396*4882a593Smuzhiyun il->hw_params.max_txq_num = il->cfg->num_of_queues;
2397*4882a593Smuzhiyun
2398*4882a593Smuzhiyun il->hw_params.tfd_size = sizeof(struct il3945_tfd);
2399*4882a593Smuzhiyun il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_3K);
2400*4882a593Smuzhiyun il->hw_params.max_rxq_size = RX_QUEUE_SIZE;
2401*4882a593Smuzhiyun il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
2402*4882a593Smuzhiyun il->hw_params.max_stations = IL3945_STATION_COUNT;
2403*4882a593Smuzhiyun
2404*4882a593Smuzhiyun il->sta_key_max_num = STA_KEY_MAX_NUM;
2405*4882a593Smuzhiyun
2406*4882a593Smuzhiyun il->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR;
2407*4882a593Smuzhiyun il->hw_params.max_beacon_itrvl = IL39_MAX_UCODE_BEACON_INTERVAL;
2408*4882a593Smuzhiyun il->hw_params.beacon_time_tsf_bits = IL3945_EXT_BEACON_TIME_POS;
2409*4882a593Smuzhiyun
2410*4882a593Smuzhiyun return 0;
2411*4882a593Smuzhiyun }
2412*4882a593Smuzhiyun
2413*4882a593Smuzhiyun unsigned int
il3945_hw_get_beacon_cmd(struct il_priv * il,struct il3945_frame * frame,u8 rate)2414*4882a593Smuzhiyun il3945_hw_get_beacon_cmd(struct il_priv *il, struct il3945_frame *frame,
2415*4882a593Smuzhiyun u8 rate)
2416*4882a593Smuzhiyun {
2417*4882a593Smuzhiyun struct il3945_tx_beacon_cmd *tx_beacon_cmd;
2418*4882a593Smuzhiyun unsigned int frame_size;
2419*4882a593Smuzhiyun
2420*4882a593Smuzhiyun tx_beacon_cmd = (struct il3945_tx_beacon_cmd *)&frame->u;
2421*4882a593Smuzhiyun memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
2422*4882a593Smuzhiyun
2423*4882a593Smuzhiyun tx_beacon_cmd->tx.sta_id = il->hw_params.bcast_id;
2424*4882a593Smuzhiyun tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2425*4882a593Smuzhiyun
2426*4882a593Smuzhiyun frame_size =
2427*4882a593Smuzhiyun il3945_fill_beacon_frame(il, tx_beacon_cmd->frame,
2428*4882a593Smuzhiyun sizeof(frame->u) - sizeof(*tx_beacon_cmd));
2429*4882a593Smuzhiyun
2430*4882a593Smuzhiyun BUG_ON(frame_size > MAX_MPDU_SIZE);
2431*4882a593Smuzhiyun tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size);
2432*4882a593Smuzhiyun
2433*4882a593Smuzhiyun tx_beacon_cmd->tx.rate = rate;
2434*4882a593Smuzhiyun tx_beacon_cmd->tx.tx_flags =
2435*4882a593Smuzhiyun (TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK);
2436*4882a593Smuzhiyun
2437*4882a593Smuzhiyun /* supp_rates[0] == OFDM start at IL_FIRST_OFDM_RATE */
2438*4882a593Smuzhiyun tx_beacon_cmd->tx.supp_rates[0] =
2439*4882a593Smuzhiyun (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
2440*4882a593Smuzhiyun
2441*4882a593Smuzhiyun tx_beacon_cmd->tx.supp_rates[1] = (IL_CCK_BASIC_RATES_MASK & 0xF);
2442*4882a593Smuzhiyun
2443*4882a593Smuzhiyun return sizeof(struct il3945_tx_beacon_cmd) + frame_size;
2444*4882a593Smuzhiyun }
2445*4882a593Smuzhiyun
2446*4882a593Smuzhiyun void
il3945_hw_handler_setup(struct il_priv * il)2447*4882a593Smuzhiyun il3945_hw_handler_setup(struct il_priv *il)
2448*4882a593Smuzhiyun {
2449*4882a593Smuzhiyun il->handlers[C_TX] = il3945_hdl_tx;
2450*4882a593Smuzhiyun il->handlers[N_3945_RX] = il3945_hdl_rx;
2451*4882a593Smuzhiyun }
2452*4882a593Smuzhiyun
2453*4882a593Smuzhiyun void
il3945_hw_setup_deferred_work(struct il_priv * il)2454*4882a593Smuzhiyun il3945_hw_setup_deferred_work(struct il_priv *il)
2455*4882a593Smuzhiyun {
2456*4882a593Smuzhiyun INIT_DELAYED_WORK(&il->_3945.thermal_periodic,
2457*4882a593Smuzhiyun il3945_bg_reg_txpower_periodic);
2458*4882a593Smuzhiyun }
2459*4882a593Smuzhiyun
2460*4882a593Smuzhiyun void
il3945_hw_cancel_deferred_work(struct il_priv * il)2461*4882a593Smuzhiyun il3945_hw_cancel_deferred_work(struct il_priv *il)
2462*4882a593Smuzhiyun {
2463*4882a593Smuzhiyun cancel_delayed_work(&il->_3945.thermal_periodic);
2464*4882a593Smuzhiyun }
2465*4882a593Smuzhiyun
2466*4882a593Smuzhiyun /* check contents of special bootstrap uCode SRAM */
2467*4882a593Smuzhiyun static int
il3945_verify_bsm(struct il_priv * il)2468*4882a593Smuzhiyun il3945_verify_bsm(struct il_priv *il)
2469*4882a593Smuzhiyun {
2470*4882a593Smuzhiyun __le32 *image = il->ucode_boot.v_addr;
2471*4882a593Smuzhiyun u32 len = il->ucode_boot.len;
2472*4882a593Smuzhiyun u32 reg;
2473*4882a593Smuzhiyun u32 val;
2474*4882a593Smuzhiyun
2475*4882a593Smuzhiyun D_INFO("Begin verify bsm\n");
2476*4882a593Smuzhiyun
2477*4882a593Smuzhiyun /* verify BSM SRAM contents */
2478*4882a593Smuzhiyun val = il_rd_prph(il, BSM_WR_DWCOUNT_REG);
2479*4882a593Smuzhiyun for (reg = BSM_SRAM_LOWER_BOUND; reg < BSM_SRAM_LOWER_BOUND + len;
2480*4882a593Smuzhiyun reg += sizeof(u32), image++) {
2481*4882a593Smuzhiyun val = il_rd_prph(il, reg);
2482*4882a593Smuzhiyun if (val != le32_to_cpu(*image)) {
2483*4882a593Smuzhiyun IL_ERR("BSM uCode verification failed at "
2484*4882a593Smuzhiyun "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
2485*4882a593Smuzhiyun BSM_SRAM_LOWER_BOUND, reg - BSM_SRAM_LOWER_BOUND,
2486*4882a593Smuzhiyun len, val, le32_to_cpu(*image));
2487*4882a593Smuzhiyun return -EIO;
2488*4882a593Smuzhiyun }
2489*4882a593Smuzhiyun }
2490*4882a593Smuzhiyun
2491*4882a593Smuzhiyun D_INFO("BSM bootstrap uCode image OK\n");
2492*4882a593Smuzhiyun
2493*4882a593Smuzhiyun return 0;
2494*4882a593Smuzhiyun }
2495*4882a593Smuzhiyun
2496*4882a593Smuzhiyun /******************************************************************************
2497*4882a593Smuzhiyun *
2498*4882a593Smuzhiyun * EEPROM related functions
2499*4882a593Smuzhiyun *
2500*4882a593Smuzhiyun ******************************************************************************/
2501*4882a593Smuzhiyun
2502*4882a593Smuzhiyun /*
2503*4882a593Smuzhiyun * Clear the OWNER_MSK, to establish driver (instead of uCode running on
2504*4882a593Smuzhiyun * embedded controller) as EEPROM reader; each read is a series of pulses
2505*4882a593Smuzhiyun * to/from the EEPROM chip, not a single event, so even reads could conflict
2506*4882a593Smuzhiyun * if they weren't arbitrated by some ownership mechanism. Here, the driver
2507*4882a593Smuzhiyun * simply claims ownership, which should be safe when this function is called
2508*4882a593Smuzhiyun * (i.e. before loading uCode!).
2509*4882a593Smuzhiyun */
2510*4882a593Smuzhiyun static int
il3945_eeprom_acquire_semaphore(struct il_priv * il)2511*4882a593Smuzhiyun il3945_eeprom_acquire_semaphore(struct il_priv *il)
2512*4882a593Smuzhiyun {
2513*4882a593Smuzhiyun _il_clear_bit(il, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
2514*4882a593Smuzhiyun return 0;
2515*4882a593Smuzhiyun }
2516*4882a593Smuzhiyun
2517*4882a593Smuzhiyun static void
il3945_eeprom_release_semaphore(struct il_priv * il)2518*4882a593Smuzhiyun il3945_eeprom_release_semaphore(struct il_priv *il)
2519*4882a593Smuzhiyun {
2520*4882a593Smuzhiyun return;
2521*4882a593Smuzhiyun }
2522*4882a593Smuzhiyun
2523*4882a593Smuzhiyun /*
2524*4882a593Smuzhiyun * il3945_load_bsm - Load bootstrap instructions
2525*4882a593Smuzhiyun *
2526*4882a593Smuzhiyun * BSM operation:
2527*4882a593Smuzhiyun *
2528*4882a593Smuzhiyun * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
2529*4882a593Smuzhiyun * in special SRAM that does not power down during RFKILL. When powering back
2530*4882a593Smuzhiyun * up after power-saving sleeps (or during initial uCode load), the BSM loads
2531*4882a593Smuzhiyun * the bootstrap program into the on-board processor, and starts it.
2532*4882a593Smuzhiyun *
2533*4882a593Smuzhiyun * The bootstrap program loads (via DMA) instructions and data for a new
2534*4882a593Smuzhiyun * program from host DRAM locations indicated by the host driver in the
2535*4882a593Smuzhiyun * BSM_DRAM_* registers. Once the new program is loaded, it starts
2536*4882a593Smuzhiyun * automatically.
2537*4882a593Smuzhiyun *
2538*4882a593Smuzhiyun * When initializing the NIC, the host driver points the BSM to the
2539*4882a593Smuzhiyun * "initialize" uCode image. This uCode sets up some internal data, then
2540*4882a593Smuzhiyun * notifies host via "initialize alive" that it is complete.
2541*4882a593Smuzhiyun *
2542*4882a593Smuzhiyun * The host then replaces the BSM_DRAM_* pointer values to point to the
2543*4882a593Smuzhiyun * normal runtime uCode instructions and a backup uCode data cache buffer
2544*4882a593Smuzhiyun * (filled initially with starting data values for the on-board processor),
2545*4882a593Smuzhiyun * then triggers the "initialize" uCode to load and launch the runtime uCode,
2546*4882a593Smuzhiyun * which begins normal operation.
2547*4882a593Smuzhiyun *
2548*4882a593Smuzhiyun * When doing a power-save shutdown, runtime uCode saves data SRAM into
2549*4882a593Smuzhiyun * the backup data cache in DRAM before SRAM is powered down.
2550*4882a593Smuzhiyun *
2551*4882a593Smuzhiyun * When powering back up, the BSM loads the bootstrap program. This reloads
2552*4882a593Smuzhiyun * the runtime uCode instructions and the backup data cache into SRAM,
2553*4882a593Smuzhiyun * and re-launches the runtime uCode from where it left off.
2554*4882a593Smuzhiyun */
2555*4882a593Smuzhiyun static int
il3945_load_bsm(struct il_priv * il)2556*4882a593Smuzhiyun il3945_load_bsm(struct il_priv *il)
2557*4882a593Smuzhiyun {
2558*4882a593Smuzhiyun __le32 *image = il->ucode_boot.v_addr;
2559*4882a593Smuzhiyun u32 len = il->ucode_boot.len;
2560*4882a593Smuzhiyun dma_addr_t pinst;
2561*4882a593Smuzhiyun dma_addr_t pdata;
2562*4882a593Smuzhiyun u32 inst_len;
2563*4882a593Smuzhiyun u32 data_len;
2564*4882a593Smuzhiyun int rc;
2565*4882a593Smuzhiyun int i;
2566*4882a593Smuzhiyun u32 done;
2567*4882a593Smuzhiyun u32 reg_offset;
2568*4882a593Smuzhiyun
2569*4882a593Smuzhiyun D_INFO("Begin load bsm\n");
2570*4882a593Smuzhiyun
2571*4882a593Smuzhiyun /* make sure bootstrap program is no larger than BSM's SRAM size */
2572*4882a593Smuzhiyun if (len > IL39_MAX_BSM_SIZE)
2573*4882a593Smuzhiyun return -EINVAL;
2574*4882a593Smuzhiyun
2575*4882a593Smuzhiyun /* Tell bootstrap uCode where to find the "Initialize" uCode
2576*4882a593Smuzhiyun * in host DRAM ... host DRAM physical address bits 31:0 for 3945.
2577*4882a593Smuzhiyun * NOTE: il3945_initialize_alive_start() will replace these values,
2578*4882a593Smuzhiyun * after the "initialize" uCode has run, to point to
2579*4882a593Smuzhiyun * runtime/protocol instructions and backup data cache. */
2580*4882a593Smuzhiyun pinst = il->ucode_init.p_addr;
2581*4882a593Smuzhiyun pdata = il->ucode_init_data.p_addr;
2582*4882a593Smuzhiyun inst_len = il->ucode_init.len;
2583*4882a593Smuzhiyun data_len = il->ucode_init_data.len;
2584*4882a593Smuzhiyun
2585*4882a593Smuzhiyun il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
2586*4882a593Smuzhiyun il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
2587*4882a593Smuzhiyun il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
2588*4882a593Smuzhiyun il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
2589*4882a593Smuzhiyun
2590*4882a593Smuzhiyun /* Fill BSM memory with bootstrap instructions */
2591*4882a593Smuzhiyun for (reg_offset = BSM_SRAM_LOWER_BOUND;
2592*4882a593Smuzhiyun reg_offset < BSM_SRAM_LOWER_BOUND + len;
2593*4882a593Smuzhiyun reg_offset += sizeof(u32), image++)
2594*4882a593Smuzhiyun _il_wr_prph(il, reg_offset, le32_to_cpu(*image));
2595*4882a593Smuzhiyun
2596*4882a593Smuzhiyun rc = il3945_verify_bsm(il);
2597*4882a593Smuzhiyun if (rc)
2598*4882a593Smuzhiyun return rc;
2599*4882a593Smuzhiyun
2600*4882a593Smuzhiyun /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
2601*4882a593Smuzhiyun il_wr_prph(il, BSM_WR_MEM_SRC_REG, 0x0);
2602*4882a593Smuzhiyun il_wr_prph(il, BSM_WR_MEM_DST_REG, IL39_RTC_INST_LOWER_BOUND);
2603*4882a593Smuzhiyun il_wr_prph(il, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
2604*4882a593Smuzhiyun
2605*4882a593Smuzhiyun /* Load bootstrap code into instruction SRAM now,
2606*4882a593Smuzhiyun * to prepare to load "initialize" uCode */
2607*4882a593Smuzhiyun il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
2608*4882a593Smuzhiyun
2609*4882a593Smuzhiyun /* Wait for load of bootstrap uCode to finish */
2610*4882a593Smuzhiyun for (i = 0; i < 100; i++) {
2611*4882a593Smuzhiyun done = il_rd_prph(il, BSM_WR_CTRL_REG);
2612*4882a593Smuzhiyun if (!(done & BSM_WR_CTRL_REG_BIT_START))
2613*4882a593Smuzhiyun break;
2614*4882a593Smuzhiyun udelay(10);
2615*4882a593Smuzhiyun }
2616*4882a593Smuzhiyun if (i < 100)
2617*4882a593Smuzhiyun D_INFO("BSM write complete, poll %d iterations\n", i);
2618*4882a593Smuzhiyun else {
2619*4882a593Smuzhiyun IL_ERR("BSM write did not complete!\n");
2620*4882a593Smuzhiyun return -EIO;
2621*4882a593Smuzhiyun }
2622*4882a593Smuzhiyun
2623*4882a593Smuzhiyun /* Enable future boot loads whenever power management unit triggers it
2624*4882a593Smuzhiyun * (e.g. when powering back up after power-save shutdown) */
2625*4882a593Smuzhiyun il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
2626*4882a593Smuzhiyun
2627*4882a593Smuzhiyun return 0;
2628*4882a593Smuzhiyun }
2629*4882a593Smuzhiyun
2630*4882a593Smuzhiyun const struct il_ops il3945_ops = {
2631*4882a593Smuzhiyun .txq_attach_buf_to_tfd = il3945_hw_txq_attach_buf_to_tfd,
2632*4882a593Smuzhiyun .txq_free_tfd = il3945_hw_txq_free_tfd,
2633*4882a593Smuzhiyun .txq_init = il3945_hw_tx_queue_init,
2634*4882a593Smuzhiyun .load_ucode = il3945_load_bsm,
2635*4882a593Smuzhiyun .dump_nic_error_log = il3945_dump_nic_error_log,
2636*4882a593Smuzhiyun .apm_init = il3945_apm_init,
2637*4882a593Smuzhiyun .send_tx_power = il3945_send_tx_power,
2638*4882a593Smuzhiyun .is_valid_rtc_data_addr = il3945_hw_valid_rtc_data_addr,
2639*4882a593Smuzhiyun .eeprom_acquire_semaphore = il3945_eeprom_acquire_semaphore,
2640*4882a593Smuzhiyun .eeprom_release_semaphore = il3945_eeprom_release_semaphore,
2641*4882a593Smuzhiyun
2642*4882a593Smuzhiyun .rxon_assoc = il3945_send_rxon_assoc,
2643*4882a593Smuzhiyun .commit_rxon = il3945_commit_rxon,
2644*4882a593Smuzhiyun
2645*4882a593Smuzhiyun .get_hcmd_size = il3945_get_hcmd_size,
2646*4882a593Smuzhiyun .build_addsta_hcmd = il3945_build_addsta_hcmd,
2647*4882a593Smuzhiyun .request_scan = il3945_request_scan,
2648*4882a593Smuzhiyun .post_scan = il3945_post_scan,
2649*4882a593Smuzhiyun
2650*4882a593Smuzhiyun .post_associate = il3945_post_associate,
2651*4882a593Smuzhiyun .config_ap = il3945_config_ap,
2652*4882a593Smuzhiyun .manage_ibss_station = il3945_manage_ibss_station,
2653*4882a593Smuzhiyun
2654*4882a593Smuzhiyun .send_led_cmd = il3945_send_led_cmd,
2655*4882a593Smuzhiyun };
2656*4882a593Smuzhiyun
2657*4882a593Smuzhiyun static const struct il_cfg il3945_bg_cfg = {
2658*4882a593Smuzhiyun .name = "3945BG",
2659*4882a593Smuzhiyun .fw_name_pre = IL3945_FW_PRE,
2660*4882a593Smuzhiyun .ucode_api_max = IL3945_UCODE_API_MAX,
2661*4882a593Smuzhiyun .ucode_api_min = IL3945_UCODE_API_MIN,
2662*4882a593Smuzhiyun .sku = IL_SKU_G,
2663*4882a593Smuzhiyun .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
2664*4882a593Smuzhiyun .mod_params = &il3945_mod_params,
2665*4882a593Smuzhiyun .led_mode = IL_LED_BLINK,
2666*4882a593Smuzhiyun
2667*4882a593Smuzhiyun .eeprom_size = IL3945_EEPROM_IMG_SIZE,
2668*4882a593Smuzhiyun .num_of_queues = IL39_NUM_QUEUES,
2669*4882a593Smuzhiyun .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
2670*4882a593Smuzhiyun .set_l0s = false,
2671*4882a593Smuzhiyun .use_bsm = true,
2672*4882a593Smuzhiyun .led_compensation = 64,
2673*4882a593Smuzhiyun .wd_timeout = IL_DEF_WD_TIMEOUT,
2674*4882a593Smuzhiyun
2675*4882a593Smuzhiyun .regulatory_bands = {
2676*4882a593Smuzhiyun EEPROM_REGULATORY_BAND_1_CHANNELS,
2677*4882a593Smuzhiyun EEPROM_REGULATORY_BAND_2_CHANNELS,
2678*4882a593Smuzhiyun EEPROM_REGULATORY_BAND_3_CHANNELS,
2679*4882a593Smuzhiyun EEPROM_REGULATORY_BAND_4_CHANNELS,
2680*4882a593Smuzhiyun EEPROM_REGULATORY_BAND_5_CHANNELS,
2681*4882a593Smuzhiyun EEPROM_REGULATORY_BAND_NO_HT40,
2682*4882a593Smuzhiyun EEPROM_REGULATORY_BAND_NO_HT40,
2683*4882a593Smuzhiyun },
2684*4882a593Smuzhiyun };
2685*4882a593Smuzhiyun
2686*4882a593Smuzhiyun static const struct il_cfg il3945_abg_cfg = {
2687*4882a593Smuzhiyun .name = "3945ABG",
2688*4882a593Smuzhiyun .fw_name_pre = IL3945_FW_PRE,
2689*4882a593Smuzhiyun .ucode_api_max = IL3945_UCODE_API_MAX,
2690*4882a593Smuzhiyun .ucode_api_min = IL3945_UCODE_API_MIN,
2691*4882a593Smuzhiyun .sku = IL_SKU_A | IL_SKU_G,
2692*4882a593Smuzhiyun .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
2693*4882a593Smuzhiyun .mod_params = &il3945_mod_params,
2694*4882a593Smuzhiyun .led_mode = IL_LED_BLINK,
2695*4882a593Smuzhiyun
2696*4882a593Smuzhiyun .eeprom_size = IL3945_EEPROM_IMG_SIZE,
2697*4882a593Smuzhiyun .num_of_queues = IL39_NUM_QUEUES,
2698*4882a593Smuzhiyun .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
2699*4882a593Smuzhiyun .set_l0s = false,
2700*4882a593Smuzhiyun .use_bsm = true,
2701*4882a593Smuzhiyun .led_compensation = 64,
2702*4882a593Smuzhiyun .wd_timeout = IL_DEF_WD_TIMEOUT,
2703*4882a593Smuzhiyun
2704*4882a593Smuzhiyun .regulatory_bands = {
2705*4882a593Smuzhiyun EEPROM_REGULATORY_BAND_1_CHANNELS,
2706*4882a593Smuzhiyun EEPROM_REGULATORY_BAND_2_CHANNELS,
2707*4882a593Smuzhiyun EEPROM_REGULATORY_BAND_3_CHANNELS,
2708*4882a593Smuzhiyun EEPROM_REGULATORY_BAND_4_CHANNELS,
2709*4882a593Smuzhiyun EEPROM_REGULATORY_BAND_5_CHANNELS,
2710*4882a593Smuzhiyun EEPROM_REGULATORY_BAND_NO_HT40,
2711*4882a593Smuzhiyun EEPROM_REGULATORY_BAND_NO_HT40,
2712*4882a593Smuzhiyun },
2713*4882a593Smuzhiyun };
2714*4882a593Smuzhiyun
2715*4882a593Smuzhiyun const struct pci_device_id il3945_hw_card_ids[] = {
2716*4882a593Smuzhiyun {IL_PCI_DEVICE(0x4222, 0x1005, il3945_bg_cfg)},
2717*4882a593Smuzhiyun {IL_PCI_DEVICE(0x4222, 0x1034, il3945_bg_cfg)},
2718*4882a593Smuzhiyun {IL_PCI_DEVICE(0x4222, 0x1044, il3945_bg_cfg)},
2719*4882a593Smuzhiyun {IL_PCI_DEVICE(0x4227, 0x1014, il3945_bg_cfg)},
2720*4882a593Smuzhiyun {IL_PCI_DEVICE(0x4222, PCI_ANY_ID, il3945_abg_cfg)},
2721*4882a593Smuzhiyun {IL_PCI_DEVICE(0x4227, PCI_ANY_ID, il3945_abg_cfg)},
2722*4882a593Smuzhiyun {0}
2723*4882a593Smuzhiyun };
2724*4882a593Smuzhiyun
2725*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, il3945_hw_card_ids);
2726