1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2002 Intersil Americas Inc.
4*4882a593Smuzhiyun * Copyright (C) 2003 Herbert Valerio Riedel <hvr@gnu.org>
5*4882a593Smuzhiyun * Copyright (C) 2003 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu>
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/hardirq.h>
9*4882a593Smuzhiyun #include <linux/module.h>
10*4882a593Smuzhiyun #include <linux/slab.h>
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/netdevice.h>
13*4882a593Smuzhiyun #include <linux/ethtool.h>
14*4882a593Smuzhiyun #include <linux/pci.h>
15*4882a593Smuzhiyun #include <linux/sched.h>
16*4882a593Smuzhiyun #include <linux/etherdevice.h>
17*4882a593Smuzhiyun #include <linux/delay.h>
18*4882a593Smuzhiyun #include <linux/if_arp.h>
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #include <asm/io.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #include "prismcompat.h"
23*4882a593Smuzhiyun #include "isl_38xx.h"
24*4882a593Smuzhiyun #include "isl_ioctl.h"
25*4882a593Smuzhiyun #include "islpci_dev.h"
26*4882a593Smuzhiyun #include "islpci_mgt.h"
27*4882a593Smuzhiyun #include "islpci_eth.h"
28*4882a593Smuzhiyun #include "oid_mgt.h"
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #define ISL3877_IMAGE_FILE "isl3877"
31*4882a593Smuzhiyun #define ISL3886_IMAGE_FILE "isl3886"
32*4882a593Smuzhiyun #define ISL3890_IMAGE_FILE "isl3890"
33*4882a593Smuzhiyun MODULE_FIRMWARE(ISL3877_IMAGE_FILE);
34*4882a593Smuzhiyun MODULE_FIRMWARE(ISL3886_IMAGE_FILE);
35*4882a593Smuzhiyun MODULE_FIRMWARE(ISL3890_IMAGE_FILE);
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun static int prism54_bring_down(islpci_private *);
38*4882a593Smuzhiyun static int islpci_alloc_memory(islpci_private *);
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun /* Temporary dummy MAC address to use until firmware is loaded.
41*4882a593Smuzhiyun * The idea there is that some tools (such as nameif) may query
42*4882a593Smuzhiyun * the MAC address before the netdev is 'open'. By using a valid
43*4882a593Smuzhiyun * OUI prefix, they can process the netdev properly.
44*4882a593Smuzhiyun * Of course, this is not the final/real MAC address. It doesn't
45*4882a593Smuzhiyun * matter, as you are suppose to be able to change it anytime via
46*4882a593Smuzhiyun * ndev->set_mac_address. Jean II */
47*4882a593Smuzhiyun static const unsigned char dummy_mac[6] = { 0x00, 0x30, 0xB4, 0x00, 0x00, 0x00 };
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun static int
isl_upload_firmware(islpci_private * priv)50*4882a593Smuzhiyun isl_upload_firmware(islpci_private *priv)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun u32 reg, rc;
53*4882a593Smuzhiyun void __iomem *device_base = priv->device_base;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /* clear the RAMBoot and the Reset bit */
56*4882a593Smuzhiyun reg = readl(device_base + ISL38XX_CTRL_STAT_REG);
57*4882a593Smuzhiyun reg &= ~ISL38XX_CTRL_STAT_RESET;
58*4882a593Smuzhiyun reg &= ~ISL38XX_CTRL_STAT_RAMBOOT;
59*4882a593Smuzhiyun writel(reg, device_base + ISL38XX_CTRL_STAT_REG);
60*4882a593Smuzhiyun wmb();
61*4882a593Smuzhiyun udelay(ISL38XX_WRITEIO_DELAY);
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun /* set the Reset bit without reading the register ! */
64*4882a593Smuzhiyun reg |= ISL38XX_CTRL_STAT_RESET;
65*4882a593Smuzhiyun writel(reg, device_base + ISL38XX_CTRL_STAT_REG);
66*4882a593Smuzhiyun wmb();
67*4882a593Smuzhiyun udelay(ISL38XX_WRITEIO_DELAY);
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun /* clear the Reset bit */
70*4882a593Smuzhiyun reg &= ~ISL38XX_CTRL_STAT_RESET;
71*4882a593Smuzhiyun writel(reg, device_base + ISL38XX_CTRL_STAT_REG);
72*4882a593Smuzhiyun wmb();
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /* wait a while for the device to reboot */
75*4882a593Smuzhiyun mdelay(50);
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun const struct firmware *fw_entry = NULL;
79*4882a593Smuzhiyun long fw_len;
80*4882a593Smuzhiyun const u32 *fw_ptr;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun rc = request_firmware(&fw_entry, priv->firmware, PRISM_FW_PDEV);
83*4882a593Smuzhiyun if (rc) {
84*4882a593Smuzhiyun printk(KERN_ERR
85*4882a593Smuzhiyun "%s: request_firmware() failed for '%s'\n",
86*4882a593Smuzhiyun "prism54", priv->firmware);
87*4882a593Smuzhiyun return rc;
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun /* prepare the Direct Memory Base register */
90*4882a593Smuzhiyun reg = ISL38XX_DEV_FIRMWARE_ADDRES;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun fw_ptr = (u32 *) fw_entry->data;
93*4882a593Smuzhiyun fw_len = fw_entry->size;
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun if (fw_len % 4) {
96*4882a593Smuzhiyun printk(KERN_ERR
97*4882a593Smuzhiyun "%s: firmware '%s' size is not multiple of 32bit, aborting!\n",
98*4882a593Smuzhiyun "prism54", priv->firmware);
99*4882a593Smuzhiyun release_firmware(fw_entry);
100*4882a593Smuzhiyun return -EILSEQ; /* Illegal byte sequence */;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun while (fw_len > 0) {
104*4882a593Smuzhiyun long _fw_len =
105*4882a593Smuzhiyun (fw_len >
106*4882a593Smuzhiyun ISL38XX_MEMORY_WINDOW_SIZE) ?
107*4882a593Smuzhiyun ISL38XX_MEMORY_WINDOW_SIZE : fw_len;
108*4882a593Smuzhiyun u32 __iomem *dev_fw_ptr = device_base + ISL38XX_DIRECT_MEM_WIN;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /* set the card's base address for writing the data */
111*4882a593Smuzhiyun isl38xx_w32_flush(device_base, reg,
112*4882a593Smuzhiyun ISL38XX_DIR_MEM_BASE_REG);
113*4882a593Smuzhiyun wmb(); /* be paranoid */
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun /* increment the write address for next iteration */
116*4882a593Smuzhiyun reg += _fw_len;
117*4882a593Smuzhiyun fw_len -= _fw_len;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun /* write the data to the Direct Memory Window 32bit-wise */
120*4882a593Smuzhiyun /* memcpy_toio() doesn't guarantee 32bit writes :-| */
121*4882a593Smuzhiyun while (_fw_len > 0) {
122*4882a593Smuzhiyun /* use non-swapping writel() */
123*4882a593Smuzhiyun __raw_writel(*fw_ptr, dev_fw_ptr);
124*4882a593Smuzhiyun fw_ptr++, dev_fw_ptr++;
125*4882a593Smuzhiyun _fw_len -= 4;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun /* flush PCI posting */
129*4882a593Smuzhiyun (void) readl(device_base + ISL38XX_PCI_POSTING_FLUSH);
130*4882a593Smuzhiyun wmb(); /* be paranoid again */
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun BUG_ON(_fw_len != 0);
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun BUG_ON(fw_len != 0);
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun /* Firmware version is at offset 40 (also for "newmac") */
138*4882a593Smuzhiyun printk(KERN_DEBUG "%s: firmware version: %.8s\n",
139*4882a593Smuzhiyun priv->ndev->name, fw_entry->data + 40);
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun release_firmware(fw_entry);
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun /* now reset the device
145*4882a593Smuzhiyun * clear the Reset & ClkRun bit, set the RAMBoot bit */
146*4882a593Smuzhiyun reg = readl(device_base + ISL38XX_CTRL_STAT_REG);
147*4882a593Smuzhiyun reg &= ~ISL38XX_CTRL_STAT_CLKRUN;
148*4882a593Smuzhiyun reg &= ~ISL38XX_CTRL_STAT_RESET;
149*4882a593Smuzhiyun reg |= ISL38XX_CTRL_STAT_RAMBOOT;
150*4882a593Smuzhiyun isl38xx_w32_flush(device_base, reg, ISL38XX_CTRL_STAT_REG);
151*4882a593Smuzhiyun wmb();
152*4882a593Smuzhiyun udelay(ISL38XX_WRITEIO_DELAY);
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun /* set the reset bit latches the host override and RAMBoot bits
155*4882a593Smuzhiyun * into the device for operation when the reset bit is reset */
156*4882a593Smuzhiyun reg |= ISL38XX_CTRL_STAT_RESET;
157*4882a593Smuzhiyun writel(reg, device_base + ISL38XX_CTRL_STAT_REG);
158*4882a593Smuzhiyun /* don't do flush PCI posting here! */
159*4882a593Smuzhiyun wmb();
160*4882a593Smuzhiyun udelay(ISL38XX_WRITEIO_DELAY);
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun /* clear the reset bit should start the whole circus */
163*4882a593Smuzhiyun reg &= ~ISL38XX_CTRL_STAT_RESET;
164*4882a593Smuzhiyun writel(reg, device_base + ISL38XX_CTRL_STAT_REG);
165*4882a593Smuzhiyun /* don't do flush PCI posting here! */
166*4882a593Smuzhiyun wmb();
167*4882a593Smuzhiyun udelay(ISL38XX_WRITEIO_DELAY);
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun return 0;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun /******************************************************************************
173*4882a593Smuzhiyun Device Interrupt Handler
174*4882a593Smuzhiyun ******************************************************************************/
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun irqreturn_t
islpci_interrupt(int irq,void * config)177*4882a593Smuzhiyun islpci_interrupt(int irq, void *config)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun u32 reg;
180*4882a593Smuzhiyun islpci_private *priv = config;
181*4882a593Smuzhiyun struct net_device *ndev = priv->ndev;
182*4882a593Smuzhiyun void __iomem *device = priv->device_base;
183*4882a593Smuzhiyun int powerstate = ISL38XX_PSM_POWERSAVE_STATE;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun /* lock the interrupt handler */
186*4882a593Smuzhiyun spin_lock(&priv->slock);
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun /* received an interrupt request on a shared IRQ line
189*4882a593Smuzhiyun * first check whether the device is in sleep mode */
190*4882a593Smuzhiyun reg = readl(device + ISL38XX_CTRL_STAT_REG);
191*4882a593Smuzhiyun if (reg & ISL38XX_CTRL_STAT_SLEEPMODE)
192*4882a593Smuzhiyun /* device is in sleep mode, IRQ was generated by someone else */
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun #if VERBOSE > SHOW_ERROR_MESSAGES
195*4882a593Smuzhiyun DEBUG(SHOW_TRACING, "Assuming someone else called the IRQ\n");
196*4882a593Smuzhiyun #endif
197*4882a593Smuzhiyun spin_unlock(&priv->slock);
198*4882a593Smuzhiyun return IRQ_NONE;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun /* check whether there is any source of interrupt on the device */
203*4882a593Smuzhiyun reg = readl(device + ISL38XX_INT_IDENT_REG);
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun /* also check the contents of the Interrupt Enable Register, because this
206*4882a593Smuzhiyun * will filter out interrupt sources from other devices on the same irq ! */
207*4882a593Smuzhiyun reg &= readl(device + ISL38XX_INT_EN_REG);
208*4882a593Smuzhiyun reg &= ISL38XX_INT_SOURCES;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun if (reg != 0) {
211*4882a593Smuzhiyun if (islpci_get_state(priv) != PRV_STATE_SLEEP)
212*4882a593Smuzhiyun powerstate = ISL38XX_PSM_ACTIVE_STATE;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun /* reset the request bits in the Identification register */
215*4882a593Smuzhiyun isl38xx_w32_flush(device, reg, ISL38XX_INT_ACK_REG);
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun #if VERBOSE > SHOW_ERROR_MESSAGES
218*4882a593Smuzhiyun DEBUG(SHOW_FUNCTION_CALLS,
219*4882a593Smuzhiyun "IRQ: Identification register 0x%p 0x%x\n", device, reg);
220*4882a593Smuzhiyun #endif
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun /* check for each bit in the register separately */
223*4882a593Smuzhiyun if (reg & ISL38XX_INT_IDENT_UPDATE) {
224*4882a593Smuzhiyun #if VERBOSE > SHOW_ERROR_MESSAGES
225*4882a593Smuzhiyun /* Queue has been updated */
226*4882a593Smuzhiyun DEBUG(SHOW_TRACING, "IRQ: Update flag\n");
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun DEBUG(SHOW_QUEUE_INDEXES,
229*4882a593Smuzhiyun "CB drv Qs: [%i][%i][%i][%i][%i][%i]\n",
230*4882a593Smuzhiyun le32_to_cpu(priv->control_block->
231*4882a593Smuzhiyun driver_curr_frag[0]),
232*4882a593Smuzhiyun le32_to_cpu(priv->control_block->
233*4882a593Smuzhiyun driver_curr_frag[1]),
234*4882a593Smuzhiyun le32_to_cpu(priv->control_block->
235*4882a593Smuzhiyun driver_curr_frag[2]),
236*4882a593Smuzhiyun le32_to_cpu(priv->control_block->
237*4882a593Smuzhiyun driver_curr_frag[3]),
238*4882a593Smuzhiyun le32_to_cpu(priv->control_block->
239*4882a593Smuzhiyun driver_curr_frag[4]),
240*4882a593Smuzhiyun le32_to_cpu(priv->control_block->
241*4882a593Smuzhiyun driver_curr_frag[5])
242*4882a593Smuzhiyun );
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun DEBUG(SHOW_QUEUE_INDEXES,
245*4882a593Smuzhiyun "CB dev Qs: [%i][%i][%i][%i][%i][%i]\n",
246*4882a593Smuzhiyun le32_to_cpu(priv->control_block->
247*4882a593Smuzhiyun device_curr_frag[0]),
248*4882a593Smuzhiyun le32_to_cpu(priv->control_block->
249*4882a593Smuzhiyun device_curr_frag[1]),
250*4882a593Smuzhiyun le32_to_cpu(priv->control_block->
251*4882a593Smuzhiyun device_curr_frag[2]),
252*4882a593Smuzhiyun le32_to_cpu(priv->control_block->
253*4882a593Smuzhiyun device_curr_frag[3]),
254*4882a593Smuzhiyun le32_to_cpu(priv->control_block->
255*4882a593Smuzhiyun device_curr_frag[4]),
256*4882a593Smuzhiyun le32_to_cpu(priv->control_block->
257*4882a593Smuzhiyun device_curr_frag[5])
258*4882a593Smuzhiyun );
259*4882a593Smuzhiyun #endif
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun /* cleanup the data low transmit queue */
262*4882a593Smuzhiyun islpci_eth_cleanup_transmit(priv, priv->control_block);
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun /* device is in active state, update the
265*4882a593Smuzhiyun * powerstate flag if necessary */
266*4882a593Smuzhiyun powerstate = ISL38XX_PSM_ACTIVE_STATE;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun /* check all three queues in priority order
269*4882a593Smuzhiyun * call the PIMFOR receive function until the
270*4882a593Smuzhiyun * queue is empty */
271*4882a593Smuzhiyun if (isl38xx_in_queue(priv->control_block,
272*4882a593Smuzhiyun ISL38XX_CB_RX_MGMTQ) != 0) {
273*4882a593Smuzhiyun #if VERBOSE > SHOW_ERROR_MESSAGES
274*4882a593Smuzhiyun DEBUG(SHOW_TRACING,
275*4882a593Smuzhiyun "Received frame in Management Queue\n");
276*4882a593Smuzhiyun #endif
277*4882a593Smuzhiyun islpci_mgt_receive(ndev);
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun islpci_mgt_cleanup_transmit(ndev);
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun /* Refill slots in receive queue */
282*4882a593Smuzhiyun islpci_mgmt_rx_fill(ndev);
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun /* no need to trigger the device, next
285*4882a593Smuzhiyun islpci_mgt_transaction does it */
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun while (isl38xx_in_queue(priv->control_block,
289*4882a593Smuzhiyun ISL38XX_CB_RX_DATA_LQ) != 0) {
290*4882a593Smuzhiyun #if VERBOSE > SHOW_ERROR_MESSAGES
291*4882a593Smuzhiyun DEBUG(SHOW_TRACING,
292*4882a593Smuzhiyun "Received frame in Data Low Queue\n");
293*4882a593Smuzhiyun #endif
294*4882a593Smuzhiyun islpci_eth_receive(priv);
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun /* check whether the data transmit queues were full */
298*4882a593Smuzhiyun if (priv->data_low_tx_full) {
299*4882a593Smuzhiyun /* check whether the transmit is not full anymore */
300*4882a593Smuzhiyun if (ISL38XX_CB_TX_QSIZE -
301*4882a593Smuzhiyun isl38xx_in_queue(priv->control_block,
302*4882a593Smuzhiyun ISL38XX_CB_TX_DATA_LQ) >=
303*4882a593Smuzhiyun ISL38XX_MIN_QTHRESHOLD) {
304*4882a593Smuzhiyun /* nope, the driver is ready for more network frames */
305*4882a593Smuzhiyun netif_wake_queue(priv->ndev);
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun /* reset the full flag */
308*4882a593Smuzhiyun priv->data_low_tx_full = 0;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun if (reg & ISL38XX_INT_IDENT_INIT) {
314*4882a593Smuzhiyun /* Device has been initialized */
315*4882a593Smuzhiyun #if VERBOSE > SHOW_ERROR_MESSAGES
316*4882a593Smuzhiyun DEBUG(SHOW_TRACING,
317*4882a593Smuzhiyun "IRQ: Init flag, device initialized\n");
318*4882a593Smuzhiyun #endif
319*4882a593Smuzhiyun wake_up(&priv->reset_done);
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun if (reg & ISL38XX_INT_IDENT_SLEEP) {
323*4882a593Smuzhiyun /* Device intends to move to powersave state */
324*4882a593Smuzhiyun #if VERBOSE > SHOW_ERROR_MESSAGES
325*4882a593Smuzhiyun DEBUG(SHOW_TRACING, "IRQ: Sleep flag\n");
326*4882a593Smuzhiyun #endif
327*4882a593Smuzhiyun isl38xx_handle_sleep_request(priv->control_block,
328*4882a593Smuzhiyun &powerstate,
329*4882a593Smuzhiyun priv->device_base);
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun if (reg & ISL38XX_INT_IDENT_WAKEUP) {
333*4882a593Smuzhiyun /* Device has been woken up to active state */
334*4882a593Smuzhiyun #if VERBOSE > SHOW_ERROR_MESSAGES
335*4882a593Smuzhiyun DEBUG(SHOW_TRACING, "IRQ: Wakeup flag\n");
336*4882a593Smuzhiyun #endif
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun isl38xx_handle_wakeup(priv->control_block,
339*4882a593Smuzhiyun &powerstate, priv->device_base);
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun } else {
342*4882a593Smuzhiyun #if VERBOSE > SHOW_ERROR_MESSAGES
343*4882a593Smuzhiyun DEBUG(SHOW_TRACING, "Assuming someone else called the IRQ\n");
344*4882a593Smuzhiyun #endif
345*4882a593Smuzhiyun spin_unlock(&priv->slock);
346*4882a593Smuzhiyun return IRQ_NONE;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun /* sleep -> ready */
350*4882a593Smuzhiyun if (islpci_get_state(priv) == PRV_STATE_SLEEP
351*4882a593Smuzhiyun && powerstate == ISL38XX_PSM_ACTIVE_STATE)
352*4882a593Smuzhiyun islpci_set_state(priv, PRV_STATE_READY);
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun /* !sleep -> sleep */
355*4882a593Smuzhiyun if (islpci_get_state(priv) != PRV_STATE_SLEEP
356*4882a593Smuzhiyun && powerstate == ISL38XX_PSM_POWERSAVE_STATE)
357*4882a593Smuzhiyun islpci_set_state(priv, PRV_STATE_SLEEP);
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun /* unlock the interrupt handler */
360*4882a593Smuzhiyun spin_unlock(&priv->slock);
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun return IRQ_HANDLED;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun /******************************************************************************
366*4882a593Smuzhiyun Network Interface Control & Statistical functions
367*4882a593Smuzhiyun ******************************************************************************/
368*4882a593Smuzhiyun static int
islpci_open(struct net_device * ndev)369*4882a593Smuzhiyun islpci_open(struct net_device *ndev)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun u32 rc;
372*4882a593Smuzhiyun islpci_private *priv = netdev_priv(ndev);
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun /* reset data structures, upload firmware and reset device */
375*4882a593Smuzhiyun rc = islpci_reset(priv,1);
376*4882a593Smuzhiyun if (rc) {
377*4882a593Smuzhiyun prism54_bring_down(priv);
378*4882a593Smuzhiyun return rc; /* Returns informative message */
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun netif_start_queue(ndev);
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun /* Turn off carrier if in STA or Ad-hoc mode. It will be turned on
384*4882a593Smuzhiyun * once the firmware receives a trap of being associated
385*4882a593Smuzhiyun * (GEN_OID_LINKSTATE). In other modes (AP or WDS or monitor) we
386*4882a593Smuzhiyun * should just leave the carrier on as its expected the firmware
387*4882a593Smuzhiyun * won't send us a trigger. */
388*4882a593Smuzhiyun if (priv->iw_mode == IW_MODE_INFRA || priv->iw_mode == IW_MODE_ADHOC)
389*4882a593Smuzhiyun netif_carrier_off(ndev);
390*4882a593Smuzhiyun else
391*4882a593Smuzhiyun netif_carrier_on(ndev);
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun return 0;
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun static int
islpci_close(struct net_device * ndev)397*4882a593Smuzhiyun islpci_close(struct net_device *ndev)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun islpci_private *priv = netdev_priv(ndev);
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun printk(KERN_DEBUG "%s: islpci_close ()\n", ndev->name);
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun netif_stop_queue(ndev);
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun return prism54_bring_down(priv);
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun static int
prism54_bring_down(islpci_private * priv)409*4882a593Smuzhiyun prism54_bring_down(islpci_private *priv)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun void __iomem *device_base = priv->device_base;
412*4882a593Smuzhiyun u32 reg;
413*4882a593Smuzhiyun /* we are going to shutdown the device */
414*4882a593Smuzhiyun islpci_set_state(priv, PRV_STATE_PREBOOT);
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun /* disable all device interrupts in case they weren't */
417*4882a593Smuzhiyun isl38xx_disable_interrupts(priv->device_base);
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun /* For safety reasons, we may want to ensure that no DMA transfer is
420*4882a593Smuzhiyun * currently in progress by emptying the TX and RX queues. */
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun /* wait until interrupts have finished executing on other CPUs */
423*4882a593Smuzhiyun synchronize_irq(priv->pdev->irq);
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun reg = readl(device_base + ISL38XX_CTRL_STAT_REG);
426*4882a593Smuzhiyun reg &= ~(ISL38XX_CTRL_STAT_RESET | ISL38XX_CTRL_STAT_RAMBOOT);
427*4882a593Smuzhiyun writel(reg, device_base + ISL38XX_CTRL_STAT_REG);
428*4882a593Smuzhiyun wmb();
429*4882a593Smuzhiyun udelay(ISL38XX_WRITEIO_DELAY);
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun reg |= ISL38XX_CTRL_STAT_RESET;
432*4882a593Smuzhiyun writel(reg, device_base + ISL38XX_CTRL_STAT_REG);
433*4882a593Smuzhiyun wmb();
434*4882a593Smuzhiyun udelay(ISL38XX_WRITEIO_DELAY);
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun /* clear the Reset bit */
437*4882a593Smuzhiyun reg &= ~ISL38XX_CTRL_STAT_RESET;
438*4882a593Smuzhiyun writel(reg, device_base + ISL38XX_CTRL_STAT_REG);
439*4882a593Smuzhiyun wmb();
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun /* wait a while for the device to reset */
442*4882a593Smuzhiyun schedule_timeout_uninterruptible(msecs_to_jiffies(50));
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun return 0;
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun static int
islpci_upload_fw(islpci_private * priv)448*4882a593Smuzhiyun islpci_upload_fw(islpci_private *priv)
449*4882a593Smuzhiyun {
450*4882a593Smuzhiyun islpci_state_t old_state;
451*4882a593Smuzhiyun u32 rc;
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun old_state = islpci_set_state(priv, PRV_STATE_BOOT);
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun printk(KERN_DEBUG "%s: uploading firmware...\n", priv->ndev->name);
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun rc = isl_upload_firmware(priv);
458*4882a593Smuzhiyun if (rc) {
459*4882a593Smuzhiyun /* error uploading the firmware */
460*4882a593Smuzhiyun printk(KERN_ERR "%s: could not upload firmware ('%s')\n",
461*4882a593Smuzhiyun priv->ndev->name, priv->firmware);
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun islpci_set_state(priv, old_state);
464*4882a593Smuzhiyun return rc;
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun printk(KERN_DEBUG "%s: firmware upload complete\n",
468*4882a593Smuzhiyun priv->ndev->name);
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun islpci_set_state(priv, PRV_STATE_POSTBOOT);
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun return 0;
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun static int
islpci_reset_if(islpci_private * priv)476*4882a593Smuzhiyun islpci_reset_if(islpci_private *priv)
477*4882a593Smuzhiyun {
478*4882a593Smuzhiyun long remaining;
479*4882a593Smuzhiyun int result = -ETIME;
480*4882a593Smuzhiyun int count;
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun DEFINE_WAIT(wait);
483*4882a593Smuzhiyun prepare_to_wait(&priv->reset_done, &wait, TASK_UNINTERRUPTIBLE);
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun /* now the last step is to reset the interface */
486*4882a593Smuzhiyun isl38xx_interface_reset(priv->device_base, priv->device_host_address);
487*4882a593Smuzhiyun islpci_set_state(priv, PRV_STATE_PREINIT);
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun for(count = 0; count < 2 && result; count++) {
490*4882a593Smuzhiyun /* The software reset acknowledge needs about 220 msec here.
491*4882a593Smuzhiyun * Be conservative and wait for up to one second. */
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun remaining = schedule_timeout_uninterruptible(HZ);
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun if(remaining > 0) {
496*4882a593Smuzhiyun result = 0;
497*4882a593Smuzhiyun break;
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun /* If we're here it's because our IRQ hasn't yet gone through.
501*4882a593Smuzhiyun * Retry a bit more...
502*4882a593Smuzhiyun */
503*4882a593Smuzhiyun printk(KERN_ERR "%s: no 'reset complete' IRQ seen - retrying\n",
504*4882a593Smuzhiyun priv->ndev->name);
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun finish_wait(&priv->reset_done, &wait);
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun if (result) {
510*4882a593Smuzhiyun printk(KERN_ERR "%s: interface reset failure\n", priv->ndev->name);
511*4882a593Smuzhiyun return result;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun islpci_set_state(priv, PRV_STATE_INIT);
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun /* Now that the device is 100% up, let's allow
517*4882a593Smuzhiyun * for the other interrupts --
518*4882a593Smuzhiyun * NOTE: this is not *yet* true since we've only allowed the
519*4882a593Smuzhiyun * INIT interrupt on the IRQ line. We can perhaps poll
520*4882a593Smuzhiyun * the IRQ line until we know for sure the reset went through */
521*4882a593Smuzhiyun isl38xx_enable_common_interrupts(priv->device_base);
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun down_write(&priv->mib_sem);
524*4882a593Smuzhiyun result = mgt_commit(priv);
525*4882a593Smuzhiyun if (result) {
526*4882a593Smuzhiyun printk(KERN_ERR "%s: interface reset failure\n", priv->ndev->name);
527*4882a593Smuzhiyun up_write(&priv->mib_sem);
528*4882a593Smuzhiyun return result;
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun up_write(&priv->mib_sem);
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun islpci_set_state(priv, PRV_STATE_READY);
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun printk(KERN_DEBUG "%s: interface reset complete\n", priv->ndev->name);
535*4882a593Smuzhiyun return 0;
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun int
islpci_reset(islpci_private * priv,int reload_firmware)539*4882a593Smuzhiyun islpci_reset(islpci_private *priv, int reload_firmware)
540*4882a593Smuzhiyun {
541*4882a593Smuzhiyun isl38xx_control_block *cb = /* volatile not needed */
542*4882a593Smuzhiyun (isl38xx_control_block *) priv->control_block;
543*4882a593Smuzhiyun unsigned counter;
544*4882a593Smuzhiyun int rc;
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun if (reload_firmware)
547*4882a593Smuzhiyun islpci_set_state(priv, PRV_STATE_PREBOOT);
548*4882a593Smuzhiyun else
549*4882a593Smuzhiyun islpci_set_state(priv, PRV_STATE_POSTBOOT);
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun printk(KERN_DEBUG "%s: resetting device...\n", priv->ndev->name);
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun /* disable all device interrupts in case they weren't */
554*4882a593Smuzhiyun isl38xx_disable_interrupts(priv->device_base);
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun /* flush all management queues */
557*4882a593Smuzhiyun priv->index_mgmt_tx = 0;
558*4882a593Smuzhiyun priv->index_mgmt_rx = 0;
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun /* clear the indexes in the frame pointer */
561*4882a593Smuzhiyun for (counter = 0; counter < ISL38XX_CB_QCOUNT; counter++) {
562*4882a593Smuzhiyun cb->driver_curr_frag[counter] = cpu_to_le32(0);
563*4882a593Smuzhiyun cb->device_curr_frag[counter] = cpu_to_le32(0);
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun /* reset the mgmt receive queue */
567*4882a593Smuzhiyun for (counter = 0; counter < ISL38XX_CB_MGMT_QSIZE; counter++) {
568*4882a593Smuzhiyun isl38xx_fragment *frag = &cb->rx_data_mgmt[counter];
569*4882a593Smuzhiyun frag->size = cpu_to_le16(MGMT_FRAME_SIZE);
570*4882a593Smuzhiyun frag->flags = 0;
571*4882a593Smuzhiyun frag->address = cpu_to_le32(priv->mgmt_rx[counter].pci_addr);
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun for (counter = 0; counter < ISL38XX_CB_RX_QSIZE; counter++) {
575*4882a593Smuzhiyun cb->rx_data_low[counter].address =
576*4882a593Smuzhiyun cpu_to_le32((u32) priv->pci_map_rx_address[counter]);
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun /* since the receive queues are filled with empty fragments, now we can
580*4882a593Smuzhiyun * set the corresponding indexes in the Control Block */
581*4882a593Smuzhiyun priv->control_block->driver_curr_frag[ISL38XX_CB_RX_DATA_LQ] =
582*4882a593Smuzhiyun cpu_to_le32(ISL38XX_CB_RX_QSIZE);
583*4882a593Smuzhiyun priv->control_block->driver_curr_frag[ISL38XX_CB_RX_MGMTQ] =
584*4882a593Smuzhiyun cpu_to_le32(ISL38XX_CB_MGMT_QSIZE);
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun /* reset the remaining real index registers and full flags */
587*4882a593Smuzhiyun priv->free_data_rx = 0;
588*4882a593Smuzhiyun priv->free_data_tx = 0;
589*4882a593Smuzhiyun priv->data_low_tx_full = 0;
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun if (reload_firmware) { /* Should we load the firmware ? */
592*4882a593Smuzhiyun /* now that the data structures are cleaned up, upload
593*4882a593Smuzhiyun * firmware and reset interface */
594*4882a593Smuzhiyun rc = islpci_upload_fw(priv);
595*4882a593Smuzhiyun if (rc) {
596*4882a593Smuzhiyun printk(KERN_ERR "%s: islpci_reset: failure\n",
597*4882a593Smuzhiyun priv->ndev->name);
598*4882a593Smuzhiyun return rc;
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun /* finally reset interface */
603*4882a593Smuzhiyun rc = islpci_reset_if(priv);
604*4882a593Smuzhiyun if (rc)
605*4882a593Smuzhiyun printk(KERN_ERR "prism54: Your card/socket may be faulty, or IRQ line too busy :(\n");
606*4882a593Smuzhiyun return rc;
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun /******************************************************************************
610*4882a593Smuzhiyun Network device configuration functions
611*4882a593Smuzhiyun ******************************************************************************/
612*4882a593Smuzhiyun static int
islpci_alloc_memory(islpci_private * priv)613*4882a593Smuzhiyun islpci_alloc_memory(islpci_private *priv)
614*4882a593Smuzhiyun {
615*4882a593Smuzhiyun int counter;
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun #if VERBOSE > SHOW_ERROR_MESSAGES
618*4882a593Smuzhiyun printk(KERN_DEBUG "islpci_alloc_memory\n");
619*4882a593Smuzhiyun #endif
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun /* remap the PCI device base address to accessible */
622*4882a593Smuzhiyun if (!(priv->device_base =
623*4882a593Smuzhiyun ioremap(pci_resource_start(priv->pdev, 0),
624*4882a593Smuzhiyun ISL38XX_PCI_MEM_SIZE))) {
625*4882a593Smuzhiyun /* error in remapping the PCI device memory address range */
626*4882a593Smuzhiyun printk(KERN_ERR "PCI memory remapping failed\n");
627*4882a593Smuzhiyun return -1;
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun /* memory layout for consistent DMA region:
631*4882a593Smuzhiyun *
632*4882a593Smuzhiyun * Area 1: Control Block for the device interface
633*4882a593Smuzhiyun * Area 2: Power Save Mode Buffer for temporary frame storage. Be aware that
634*4882a593Smuzhiyun * the number of supported stations in the AP determines the minimal
635*4882a593Smuzhiyun * size of the buffer !
636*4882a593Smuzhiyun */
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun /* perform the allocation */
639*4882a593Smuzhiyun priv->driver_mem_address = dma_alloc_coherent(&priv->pdev->dev,
640*4882a593Smuzhiyun HOST_MEM_BLOCK,
641*4882a593Smuzhiyun &priv->device_host_address,
642*4882a593Smuzhiyun GFP_KERNEL);
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun if (!priv->driver_mem_address) {
645*4882a593Smuzhiyun /* error allocating the block of PCI memory */
646*4882a593Smuzhiyun printk(KERN_ERR "%s: could not allocate DMA memory, aborting!",
647*4882a593Smuzhiyun "prism54");
648*4882a593Smuzhiyun return -1;
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun /* assign the Control Block to the first address of the allocated area */
652*4882a593Smuzhiyun priv->control_block =
653*4882a593Smuzhiyun (isl38xx_control_block *) priv->driver_mem_address;
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun /* set the Power Save Buffer pointer directly behind the CB */
656*4882a593Smuzhiyun priv->device_psm_buffer =
657*4882a593Smuzhiyun priv->device_host_address + CONTROL_BLOCK_SIZE;
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun /* make sure all buffer pointers are initialized */
660*4882a593Smuzhiyun for (counter = 0; counter < ISL38XX_CB_QCOUNT; counter++) {
661*4882a593Smuzhiyun priv->control_block->driver_curr_frag[counter] = cpu_to_le32(0);
662*4882a593Smuzhiyun priv->control_block->device_curr_frag[counter] = cpu_to_le32(0);
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun priv->index_mgmt_rx = 0;
666*4882a593Smuzhiyun memset(priv->mgmt_rx, 0, sizeof(priv->mgmt_rx));
667*4882a593Smuzhiyun memset(priv->mgmt_tx, 0, sizeof(priv->mgmt_tx));
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun /* allocate rx queue for management frames */
670*4882a593Smuzhiyun if (islpci_mgmt_rx_fill(priv->ndev) < 0)
671*4882a593Smuzhiyun goto out_free;
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun /* now get the data rx skb's */
674*4882a593Smuzhiyun memset(priv->data_low_rx, 0, sizeof (priv->data_low_rx));
675*4882a593Smuzhiyun memset(priv->pci_map_rx_address, 0, sizeof (priv->pci_map_rx_address));
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun for (counter = 0; counter < ISL38XX_CB_RX_QSIZE; counter++) {
678*4882a593Smuzhiyun struct sk_buff *skb;
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun /* allocate an sk_buff for received data frames storage
681*4882a593Smuzhiyun * each frame on receive size consists of 1 fragment
682*4882a593Smuzhiyun * include any required allignment operations */
683*4882a593Smuzhiyun if (!(skb = dev_alloc_skb(MAX_FRAGMENT_SIZE_RX + 2))) {
684*4882a593Smuzhiyun /* error allocating an sk_buff structure elements */
685*4882a593Smuzhiyun printk(KERN_ERR "Error allocating skb.\n");
686*4882a593Smuzhiyun skb = NULL;
687*4882a593Smuzhiyun goto out_free;
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun skb_reserve(skb, (4 - (long) skb->data) & 0x03);
690*4882a593Smuzhiyun /* add the new allocated sk_buff to the buffer array */
691*4882a593Smuzhiyun priv->data_low_rx[counter] = skb;
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun /* map the allocated skb data area to pci */
694*4882a593Smuzhiyun priv->pci_map_rx_address[counter] =
695*4882a593Smuzhiyun dma_map_single(&priv->pdev->dev, (void *)skb->data,
696*4882a593Smuzhiyun MAX_FRAGMENT_SIZE_RX + 2, DMA_FROM_DEVICE);
697*4882a593Smuzhiyun if (dma_mapping_error(&priv->pdev->dev, priv->pci_map_rx_address[counter])) {
698*4882a593Smuzhiyun priv->pci_map_rx_address[counter] = 0;
699*4882a593Smuzhiyun /* error mapping the buffer to device
700*4882a593Smuzhiyun accessible memory address */
701*4882a593Smuzhiyun printk(KERN_ERR "failed to map skb DMA'able\n");
702*4882a593Smuzhiyun goto out_free;
703*4882a593Smuzhiyun }
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun prism54_acl_init(&priv->acl);
707*4882a593Smuzhiyun prism54_wpa_bss_ie_init(priv);
708*4882a593Smuzhiyun if (mgt_init(priv))
709*4882a593Smuzhiyun goto out_free;
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun return 0;
712*4882a593Smuzhiyun out_free:
713*4882a593Smuzhiyun islpci_free_memory(priv);
714*4882a593Smuzhiyun return -1;
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun int
islpci_free_memory(islpci_private * priv)718*4882a593Smuzhiyun islpci_free_memory(islpci_private *priv)
719*4882a593Smuzhiyun {
720*4882a593Smuzhiyun int counter;
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun if (priv->device_base)
723*4882a593Smuzhiyun iounmap(priv->device_base);
724*4882a593Smuzhiyun priv->device_base = NULL;
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun /* free consistent DMA area... */
727*4882a593Smuzhiyun if (priv->driver_mem_address)
728*4882a593Smuzhiyun dma_free_coherent(&priv->pdev->dev, HOST_MEM_BLOCK,
729*4882a593Smuzhiyun priv->driver_mem_address,
730*4882a593Smuzhiyun priv->device_host_address);
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun /* clear some dangling pointers */
733*4882a593Smuzhiyun priv->driver_mem_address = NULL;
734*4882a593Smuzhiyun priv->device_host_address = 0;
735*4882a593Smuzhiyun priv->device_psm_buffer = 0;
736*4882a593Smuzhiyun priv->control_block = NULL;
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun /* clean up mgmt rx buffers */
739*4882a593Smuzhiyun for (counter = 0; counter < ISL38XX_CB_MGMT_QSIZE; counter++) {
740*4882a593Smuzhiyun struct islpci_membuf *buf = &priv->mgmt_rx[counter];
741*4882a593Smuzhiyun if (buf->pci_addr)
742*4882a593Smuzhiyun dma_unmap_single(&priv->pdev->dev, buf->pci_addr,
743*4882a593Smuzhiyun buf->size, DMA_FROM_DEVICE);
744*4882a593Smuzhiyun buf->pci_addr = 0;
745*4882a593Smuzhiyun kfree(buf->mem);
746*4882a593Smuzhiyun buf->size = 0;
747*4882a593Smuzhiyun buf->mem = NULL;
748*4882a593Smuzhiyun }
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun /* clean up data rx buffers */
751*4882a593Smuzhiyun for (counter = 0; counter < ISL38XX_CB_RX_QSIZE; counter++) {
752*4882a593Smuzhiyun if (priv->pci_map_rx_address[counter])
753*4882a593Smuzhiyun dma_unmap_single(&priv->pdev->dev,
754*4882a593Smuzhiyun priv->pci_map_rx_address[counter],
755*4882a593Smuzhiyun MAX_FRAGMENT_SIZE_RX + 2,
756*4882a593Smuzhiyun DMA_FROM_DEVICE);
757*4882a593Smuzhiyun priv->pci_map_rx_address[counter] = 0;
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun if (priv->data_low_rx[counter])
760*4882a593Smuzhiyun dev_kfree_skb(priv->data_low_rx[counter]);
761*4882a593Smuzhiyun priv->data_low_rx[counter] = NULL;
762*4882a593Smuzhiyun }
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun /* Free the access control list and the WPA list */
765*4882a593Smuzhiyun prism54_acl_clean(&priv->acl);
766*4882a593Smuzhiyun prism54_wpa_bss_ie_clean(priv);
767*4882a593Smuzhiyun mgt_clean(priv);
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun return 0;
770*4882a593Smuzhiyun }
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun #if 0
773*4882a593Smuzhiyun static void
774*4882a593Smuzhiyun islpci_set_multicast_list(struct net_device *dev)
775*4882a593Smuzhiyun {
776*4882a593Smuzhiyun /* put device into promisc mode and let network layer handle it */
777*4882a593Smuzhiyun }
778*4882a593Smuzhiyun #endif
779*4882a593Smuzhiyun
islpci_ethtool_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)780*4882a593Smuzhiyun static void islpci_ethtool_get_drvinfo(struct net_device *dev,
781*4882a593Smuzhiyun struct ethtool_drvinfo *info)
782*4882a593Smuzhiyun {
783*4882a593Smuzhiyun strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
784*4882a593Smuzhiyun strlcpy(info->version, DRV_VERSION, sizeof(info->version));
785*4882a593Smuzhiyun }
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun static const struct ethtool_ops islpci_ethtool_ops = {
788*4882a593Smuzhiyun .get_drvinfo = islpci_ethtool_get_drvinfo,
789*4882a593Smuzhiyun };
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun static const struct net_device_ops islpci_netdev_ops = {
792*4882a593Smuzhiyun .ndo_open = islpci_open,
793*4882a593Smuzhiyun .ndo_stop = islpci_close,
794*4882a593Smuzhiyun .ndo_start_xmit = islpci_eth_transmit,
795*4882a593Smuzhiyun .ndo_tx_timeout = islpci_eth_tx_timeout,
796*4882a593Smuzhiyun .ndo_set_mac_address = prism54_set_mac_address,
797*4882a593Smuzhiyun .ndo_validate_addr = eth_validate_addr,
798*4882a593Smuzhiyun };
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun static struct device_type wlan_type = {
801*4882a593Smuzhiyun .name = "wlan",
802*4882a593Smuzhiyun };
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun struct net_device *
islpci_setup(struct pci_dev * pdev)805*4882a593Smuzhiyun islpci_setup(struct pci_dev *pdev)
806*4882a593Smuzhiyun {
807*4882a593Smuzhiyun islpci_private *priv;
808*4882a593Smuzhiyun struct net_device *ndev = alloc_etherdev(sizeof (islpci_private));
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun if (!ndev)
811*4882a593Smuzhiyun return ndev;
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun pci_set_drvdata(pdev, ndev);
814*4882a593Smuzhiyun SET_NETDEV_DEV(ndev, &pdev->dev);
815*4882a593Smuzhiyun SET_NETDEV_DEVTYPE(ndev, &wlan_type);
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun /* setup the structure members */
818*4882a593Smuzhiyun ndev->base_addr = pci_resource_start(pdev, 0);
819*4882a593Smuzhiyun ndev->irq = pdev->irq;
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun /* initialize the function pointers */
822*4882a593Smuzhiyun ndev->netdev_ops = &islpci_netdev_ops;
823*4882a593Smuzhiyun ndev->wireless_handlers = &prism54_handler_def;
824*4882a593Smuzhiyun ndev->ethtool_ops = &islpci_ethtool_ops;
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun /* ndev->set_multicast_list = &islpci_set_multicast_list; */
827*4882a593Smuzhiyun ndev->addr_len = ETH_ALEN;
828*4882a593Smuzhiyun /* Get a non-zero dummy MAC address for nameif. Jean II */
829*4882a593Smuzhiyun memcpy(ndev->dev_addr, dummy_mac, ETH_ALEN);
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun ndev->watchdog_timeo = ISLPCI_TX_TIMEOUT;
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun /* allocate a private device structure to the network device */
834*4882a593Smuzhiyun priv = netdev_priv(ndev);
835*4882a593Smuzhiyun priv->ndev = ndev;
836*4882a593Smuzhiyun priv->pdev = pdev;
837*4882a593Smuzhiyun priv->monitor_type = ARPHRD_IEEE80211;
838*4882a593Smuzhiyun priv->ndev->type = (priv->iw_mode == IW_MODE_MONITOR) ?
839*4882a593Smuzhiyun priv->monitor_type : ARPHRD_ETHER;
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun /* Add pointers to enable iwspy support. */
842*4882a593Smuzhiyun priv->wireless_data.spy_data = &priv->spy_data;
843*4882a593Smuzhiyun ndev->wireless_data = &priv->wireless_data;
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun /* save the start and end address of the PCI memory area */
846*4882a593Smuzhiyun ndev->mem_start = (unsigned long) priv->device_base;
847*4882a593Smuzhiyun ndev->mem_end = ndev->mem_start + ISL38XX_PCI_MEM_SIZE;
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun #if VERBOSE > SHOW_ERROR_MESSAGES
850*4882a593Smuzhiyun DEBUG(SHOW_TRACING, "PCI Memory remapped to 0x%p\n", priv->device_base);
851*4882a593Smuzhiyun #endif
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun init_waitqueue_head(&priv->reset_done);
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun /* init the queue read locks, process wait counter */
856*4882a593Smuzhiyun mutex_init(&priv->mgmt_lock);
857*4882a593Smuzhiyun priv->mgmt_received = NULL;
858*4882a593Smuzhiyun init_waitqueue_head(&priv->mgmt_wqueue);
859*4882a593Smuzhiyun mutex_init(&priv->stats_lock);
860*4882a593Smuzhiyun spin_lock_init(&priv->slock);
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun /* init state machine with off#1 state */
863*4882a593Smuzhiyun priv->state = PRV_STATE_OFF;
864*4882a593Smuzhiyun priv->state_off = 1;
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun /* initialize workqueue's */
867*4882a593Smuzhiyun INIT_WORK(&priv->stats_work, prism54_update_stats);
868*4882a593Smuzhiyun priv->stats_timestamp = 0;
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun INIT_WORK(&priv->reset_task, islpci_do_reset_and_wake);
871*4882a593Smuzhiyun priv->reset_task_pending = 0;
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun /* allocate various memory areas */
874*4882a593Smuzhiyun if (islpci_alloc_memory(priv))
875*4882a593Smuzhiyun goto do_free_netdev;
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun /* select the firmware file depending on the device id */
878*4882a593Smuzhiyun switch (pdev->device) {
879*4882a593Smuzhiyun case 0x3877:
880*4882a593Smuzhiyun strcpy(priv->firmware, ISL3877_IMAGE_FILE);
881*4882a593Smuzhiyun break;
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun case 0x3886:
884*4882a593Smuzhiyun strcpy(priv->firmware, ISL3886_IMAGE_FILE);
885*4882a593Smuzhiyun break;
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun default:
888*4882a593Smuzhiyun strcpy(priv->firmware, ISL3890_IMAGE_FILE);
889*4882a593Smuzhiyun break;
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun if (register_netdev(ndev)) {
893*4882a593Smuzhiyun DEBUG(SHOW_ERROR_MESSAGES,
894*4882a593Smuzhiyun "ERROR: register_netdev() failed\n");
895*4882a593Smuzhiyun goto do_islpci_free_memory;
896*4882a593Smuzhiyun }
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun return ndev;
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun do_islpci_free_memory:
901*4882a593Smuzhiyun islpci_free_memory(priv);
902*4882a593Smuzhiyun do_free_netdev:
903*4882a593Smuzhiyun free_netdev(ndev);
904*4882a593Smuzhiyun priv = NULL;
905*4882a593Smuzhiyun return NULL;
906*4882a593Smuzhiyun }
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun islpci_state_t
islpci_set_state(islpci_private * priv,islpci_state_t new_state)909*4882a593Smuzhiyun islpci_set_state(islpci_private *priv, islpci_state_t new_state)
910*4882a593Smuzhiyun {
911*4882a593Smuzhiyun islpci_state_t old_state;
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun /* lock */
914*4882a593Smuzhiyun old_state = priv->state;
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun /* this means either a race condition or some serious error in
917*4882a593Smuzhiyun * the driver code */
918*4882a593Smuzhiyun switch (new_state) {
919*4882a593Smuzhiyun case PRV_STATE_OFF:
920*4882a593Smuzhiyun priv->state_off++;
921*4882a593Smuzhiyun fallthrough;
922*4882a593Smuzhiyun default:
923*4882a593Smuzhiyun priv->state = new_state;
924*4882a593Smuzhiyun break;
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun case PRV_STATE_PREBOOT:
927*4882a593Smuzhiyun /* there are actually many off-states, enumerated by
928*4882a593Smuzhiyun * state_off */
929*4882a593Smuzhiyun if (old_state == PRV_STATE_OFF)
930*4882a593Smuzhiyun priv->state_off--;
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun /* only if hw_unavailable is zero now it means we either
933*4882a593Smuzhiyun * were in off#1 state, or came here from
934*4882a593Smuzhiyun * somewhere else */
935*4882a593Smuzhiyun if (!priv->state_off)
936*4882a593Smuzhiyun priv->state = new_state;
937*4882a593Smuzhiyun break;
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun #if 0
940*4882a593Smuzhiyun printk(KERN_DEBUG "%s: state transition %d -> %d (off#%d)\n",
941*4882a593Smuzhiyun priv->ndev->name, old_state, new_state, priv->state_off);
942*4882a593Smuzhiyun #endif
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun /* invariants */
945*4882a593Smuzhiyun BUG_ON(priv->state_off < 0);
946*4882a593Smuzhiyun BUG_ON(priv->state_off && (priv->state != PRV_STATE_OFF));
947*4882a593Smuzhiyun BUG_ON(!priv->state_off && (priv->state == PRV_STATE_OFF));
948*4882a593Smuzhiyun
949*4882a593Smuzhiyun /* unlock */
950*4882a593Smuzhiyun return old_state;
951*4882a593Smuzhiyun }
952