xref: /OK3568_Linux_fs/kernel/drivers/net/wireless/intersil/p54/p54pci.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun 
3*4882a593Smuzhiyun /*
4*4882a593Smuzhiyun  * Linux device driver for PCI based Prism54
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
7*4882a593Smuzhiyun  * Copyright (c) 2008, Christian Lamparter <chunkeey@web.de>
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * Based on the islsm (softmac prism54) driver, which is:
10*4882a593Smuzhiyun  * Copyright 2004-2006 Jean-Baptiste Note <jean-baptiste.note@m4x.org>, et al.
11*4882a593Smuzhiyun  */
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <linux/pci.h>
14*4882a593Smuzhiyun #include <linux/slab.h>
15*4882a593Smuzhiyun #include <linux/firmware.h>
16*4882a593Smuzhiyun #include <linux/etherdevice.h>
17*4882a593Smuzhiyun #include <linux/delay.h>
18*4882a593Smuzhiyun #include <linux/completion.h>
19*4882a593Smuzhiyun #include <linux/module.h>
20*4882a593Smuzhiyun #include <net/mac80211.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #include "p54.h"
23*4882a593Smuzhiyun #include "lmac.h"
24*4882a593Smuzhiyun #include "p54pci.h"
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
27*4882a593Smuzhiyun MODULE_DESCRIPTION("Prism54 PCI wireless driver");
28*4882a593Smuzhiyun MODULE_LICENSE("GPL");
29*4882a593Smuzhiyun MODULE_ALIAS("prism54pci");
30*4882a593Smuzhiyun MODULE_FIRMWARE("isl3886pci");
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun static const struct pci_device_id p54p_table[] = {
33*4882a593Smuzhiyun 	/* Intersil PRISM Duette/Prism GT Wireless LAN adapter */
34*4882a593Smuzhiyun 	{ PCI_DEVICE(0x1260, 0x3890) },
35*4882a593Smuzhiyun 	/* 3COM 3CRWE154G72 Wireless LAN adapter */
36*4882a593Smuzhiyun 	{ PCI_DEVICE(0x10b7, 0x6001) },
37*4882a593Smuzhiyun 	/* Intersil PRISM Indigo Wireless LAN adapter */
38*4882a593Smuzhiyun 	{ PCI_DEVICE(0x1260, 0x3877) },
39*4882a593Smuzhiyun 	/* Intersil PRISM Javelin/Xbow Wireless LAN adapter */
40*4882a593Smuzhiyun 	{ PCI_DEVICE(0x1260, 0x3886) },
41*4882a593Smuzhiyun 	/* Intersil PRISM Xbow Wireless LAN adapter (Symbol AP-300) */
42*4882a593Smuzhiyun 	{ PCI_DEVICE(0x1260, 0xffff) },
43*4882a593Smuzhiyun 	{ },
44*4882a593Smuzhiyun };
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, p54p_table);
47*4882a593Smuzhiyun 
p54p_upload_firmware(struct ieee80211_hw * dev)48*4882a593Smuzhiyun static int p54p_upload_firmware(struct ieee80211_hw *dev)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun 	struct p54p_priv *priv = dev->priv;
51*4882a593Smuzhiyun 	__le32 reg;
52*4882a593Smuzhiyun 	int err;
53*4882a593Smuzhiyun 	__le32 *data;
54*4882a593Smuzhiyun 	u32 remains, left, device_addr;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	P54P_WRITE(int_enable, cpu_to_le32(0));
57*4882a593Smuzhiyun 	P54P_READ(int_enable);
58*4882a593Smuzhiyun 	udelay(10);
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	reg = P54P_READ(ctrl_stat);
61*4882a593Smuzhiyun 	reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET);
62*4882a593Smuzhiyun 	reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RAMBOOT);
63*4882a593Smuzhiyun 	P54P_WRITE(ctrl_stat, reg);
64*4882a593Smuzhiyun 	P54P_READ(ctrl_stat);
65*4882a593Smuzhiyun 	udelay(10);
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RESET);
68*4882a593Smuzhiyun 	P54P_WRITE(ctrl_stat, reg);
69*4882a593Smuzhiyun 	wmb();
70*4882a593Smuzhiyun 	udelay(10);
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET);
73*4882a593Smuzhiyun 	P54P_WRITE(ctrl_stat, reg);
74*4882a593Smuzhiyun 	wmb();
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	/* wait for the firmware to reset properly */
77*4882a593Smuzhiyun 	mdelay(10);
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	err = p54_parse_firmware(dev, priv->firmware);
80*4882a593Smuzhiyun 	if (err)
81*4882a593Smuzhiyun 		return err;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	if (priv->common.fw_interface != FW_LM86) {
84*4882a593Smuzhiyun 		dev_err(&priv->pdev->dev, "wrong firmware, "
85*4882a593Smuzhiyun 			"please get a LM86(PCI) firmware a try again.\n");
86*4882a593Smuzhiyun 		return -EINVAL;
87*4882a593Smuzhiyun 	}
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	data = (__le32 *) priv->firmware->data;
90*4882a593Smuzhiyun 	remains = priv->firmware->size;
91*4882a593Smuzhiyun 	device_addr = ISL38XX_DEV_FIRMWARE_ADDR;
92*4882a593Smuzhiyun 	while (remains) {
93*4882a593Smuzhiyun 		u32 i = 0;
94*4882a593Smuzhiyun 		left = min((u32)0x1000, remains);
95*4882a593Smuzhiyun 		P54P_WRITE(direct_mem_base, cpu_to_le32(device_addr));
96*4882a593Smuzhiyun 		P54P_READ(int_enable);
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 		device_addr += 0x1000;
99*4882a593Smuzhiyun 		while (i < left) {
100*4882a593Smuzhiyun 			P54P_WRITE(direct_mem_win[i], *data++);
101*4882a593Smuzhiyun 			i += sizeof(u32);
102*4882a593Smuzhiyun 		}
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 		remains -= left;
105*4882a593Smuzhiyun 		P54P_READ(int_enable);
106*4882a593Smuzhiyun 	}
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	reg = P54P_READ(ctrl_stat);
109*4882a593Smuzhiyun 	reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_CLKRUN);
110*4882a593Smuzhiyun 	reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET);
111*4882a593Smuzhiyun 	reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RAMBOOT);
112*4882a593Smuzhiyun 	P54P_WRITE(ctrl_stat, reg);
113*4882a593Smuzhiyun 	P54P_READ(ctrl_stat);
114*4882a593Smuzhiyun 	udelay(10);
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RESET);
117*4882a593Smuzhiyun 	P54P_WRITE(ctrl_stat, reg);
118*4882a593Smuzhiyun 	wmb();
119*4882a593Smuzhiyun 	udelay(10);
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET);
122*4882a593Smuzhiyun 	P54P_WRITE(ctrl_stat, reg);
123*4882a593Smuzhiyun 	wmb();
124*4882a593Smuzhiyun 	udelay(10);
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	/* wait for the firmware to boot properly */
127*4882a593Smuzhiyun 	mdelay(100);
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	return 0;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun 
p54p_refill_rx_ring(struct ieee80211_hw * dev,int ring_index,struct p54p_desc * ring,u32 ring_limit,struct sk_buff ** rx_buf,u32 index)132*4882a593Smuzhiyun static void p54p_refill_rx_ring(struct ieee80211_hw *dev,
133*4882a593Smuzhiyun 	int ring_index, struct p54p_desc *ring, u32 ring_limit,
134*4882a593Smuzhiyun 	struct sk_buff **rx_buf, u32 index)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun 	struct p54p_priv *priv = dev->priv;
137*4882a593Smuzhiyun 	struct p54p_ring_control *ring_control = priv->ring_control;
138*4882a593Smuzhiyun 	u32 limit, idx, i;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	idx = le32_to_cpu(ring_control->host_idx[ring_index]);
141*4882a593Smuzhiyun 	limit = idx;
142*4882a593Smuzhiyun 	limit -= index;
143*4882a593Smuzhiyun 	limit = ring_limit - limit;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	i = idx % ring_limit;
146*4882a593Smuzhiyun 	while (limit-- > 1) {
147*4882a593Smuzhiyun 		struct p54p_desc *desc = &ring[i];
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 		if (!desc->host_addr) {
150*4882a593Smuzhiyun 			struct sk_buff *skb;
151*4882a593Smuzhiyun 			dma_addr_t mapping;
152*4882a593Smuzhiyun 			skb = dev_alloc_skb(priv->common.rx_mtu + 32);
153*4882a593Smuzhiyun 			if (!skb)
154*4882a593Smuzhiyun 				break;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 			mapping = dma_map_single(&priv->pdev->dev,
157*4882a593Smuzhiyun 						 skb_tail_pointer(skb),
158*4882a593Smuzhiyun 						 priv->common.rx_mtu + 32,
159*4882a593Smuzhiyun 						 DMA_FROM_DEVICE);
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 			if (dma_mapping_error(&priv->pdev->dev, mapping)) {
162*4882a593Smuzhiyun 				dev_kfree_skb_any(skb);
163*4882a593Smuzhiyun 				dev_err(&priv->pdev->dev,
164*4882a593Smuzhiyun 					"RX DMA Mapping error\n");
165*4882a593Smuzhiyun 				break;
166*4882a593Smuzhiyun 			}
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 			desc->host_addr = cpu_to_le32(mapping);
169*4882a593Smuzhiyun 			desc->device_addr = 0;	// FIXME: necessary?
170*4882a593Smuzhiyun 			desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
171*4882a593Smuzhiyun 			desc->flags = 0;
172*4882a593Smuzhiyun 			rx_buf[i] = skb;
173*4882a593Smuzhiyun 		}
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 		i++;
176*4882a593Smuzhiyun 		idx++;
177*4882a593Smuzhiyun 		i %= ring_limit;
178*4882a593Smuzhiyun 	}
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	wmb();
181*4882a593Smuzhiyun 	ring_control->host_idx[ring_index] = cpu_to_le32(idx);
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun 
p54p_check_rx_ring(struct ieee80211_hw * dev,u32 * index,int ring_index,struct p54p_desc * ring,u32 ring_limit,struct sk_buff ** rx_buf)184*4882a593Smuzhiyun static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
185*4882a593Smuzhiyun 	int ring_index, struct p54p_desc *ring, u32 ring_limit,
186*4882a593Smuzhiyun 	struct sk_buff **rx_buf)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun 	struct p54p_priv *priv = dev->priv;
189*4882a593Smuzhiyun 	struct p54p_ring_control *ring_control = priv->ring_control;
190*4882a593Smuzhiyun 	struct p54p_desc *desc;
191*4882a593Smuzhiyun 	u32 idx, i;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	i = (*index) % ring_limit;
194*4882a593Smuzhiyun 	(*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]);
195*4882a593Smuzhiyun 	idx %= ring_limit;
196*4882a593Smuzhiyun 	while (i != idx) {
197*4882a593Smuzhiyun 		u16 len;
198*4882a593Smuzhiyun 		struct sk_buff *skb;
199*4882a593Smuzhiyun 		dma_addr_t dma_addr;
200*4882a593Smuzhiyun 		desc = &ring[i];
201*4882a593Smuzhiyun 		len = le16_to_cpu(desc->len);
202*4882a593Smuzhiyun 		skb = rx_buf[i];
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 		if (!skb) {
205*4882a593Smuzhiyun 			i++;
206*4882a593Smuzhiyun 			i %= ring_limit;
207*4882a593Smuzhiyun 			continue;
208*4882a593Smuzhiyun 		}
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 		if (unlikely(len > priv->common.rx_mtu)) {
211*4882a593Smuzhiyun 			if (net_ratelimit())
212*4882a593Smuzhiyun 				dev_err(&priv->pdev->dev, "rx'd frame size "
213*4882a593Smuzhiyun 					"exceeds length threshold.\n");
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 			len = priv->common.rx_mtu;
216*4882a593Smuzhiyun 		}
217*4882a593Smuzhiyun 		dma_addr = le32_to_cpu(desc->host_addr);
218*4882a593Smuzhiyun 		dma_sync_single_for_cpu(&priv->pdev->dev, dma_addr,
219*4882a593Smuzhiyun 					priv->common.rx_mtu + 32,
220*4882a593Smuzhiyun 					DMA_FROM_DEVICE);
221*4882a593Smuzhiyun 		skb_put(skb, len);
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 		if (p54_rx(dev, skb)) {
224*4882a593Smuzhiyun 			dma_unmap_single(&priv->pdev->dev, dma_addr,
225*4882a593Smuzhiyun 					 priv->common.rx_mtu + 32,
226*4882a593Smuzhiyun 					 DMA_FROM_DEVICE);
227*4882a593Smuzhiyun 			rx_buf[i] = NULL;
228*4882a593Smuzhiyun 			desc->host_addr = cpu_to_le32(0);
229*4882a593Smuzhiyun 		} else {
230*4882a593Smuzhiyun 			skb_trim(skb, 0);
231*4882a593Smuzhiyun 			dma_sync_single_for_device(&priv->pdev->dev, dma_addr,
232*4882a593Smuzhiyun 						   priv->common.rx_mtu + 32,
233*4882a593Smuzhiyun 						   DMA_FROM_DEVICE);
234*4882a593Smuzhiyun 			desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
235*4882a593Smuzhiyun 		}
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 		i++;
238*4882a593Smuzhiyun 		i %= ring_limit;
239*4882a593Smuzhiyun 	}
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf, *index);
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
p54p_check_tx_ring(struct ieee80211_hw * dev,u32 * index,int ring_index,struct p54p_desc * ring,u32 ring_limit,struct sk_buff ** tx_buf)244*4882a593Smuzhiyun static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
245*4882a593Smuzhiyun 	int ring_index, struct p54p_desc *ring, u32 ring_limit,
246*4882a593Smuzhiyun 	struct sk_buff **tx_buf)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun 	struct p54p_priv *priv = dev->priv;
249*4882a593Smuzhiyun 	struct p54p_ring_control *ring_control = priv->ring_control;
250*4882a593Smuzhiyun 	struct p54p_desc *desc;
251*4882a593Smuzhiyun 	struct sk_buff *skb;
252*4882a593Smuzhiyun 	u32 idx, i;
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	i = (*index) % ring_limit;
255*4882a593Smuzhiyun 	(*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]);
256*4882a593Smuzhiyun 	idx %= ring_limit;
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	while (i != idx) {
259*4882a593Smuzhiyun 		desc = &ring[i];
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 		skb = tx_buf[i];
262*4882a593Smuzhiyun 		tx_buf[i] = NULL;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 		dma_unmap_single(&priv->pdev->dev,
265*4882a593Smuzhiyun 				 le32_to_cpu(desc->host_addr),
266*4882a593Smuzhiyun 				 le16_to_cpu(desc->len), DMA_TO_DEVICE);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 		desc->host_addr = 0;
269*4882a593Smuzhiyun 		desc->device_addr = 0;
270*4882a593Smuzhiyun 		desc->len = 0;
271*4882a593Smuzhiyun 		desc->flags = 0;
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 		if (skb && FREE_AFTER_TX(skb))
274*4882a593Smuzhiyun 			p54_free_skb(dev, skb);
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 		i++;
277*4882a593Smuzhiyun 		i %= ring_limit;
278*4882a593Smuzhiyun 	}
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun 
p54p_tasklet(struct tasklet_struct * t)281*4882a593Smuzhiyun static void p54p_tasklet(struct tasklet_struct *t)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun 	struct p54p_priv *priv = from_tasklet(priv, t, tasklet);
284*4882a593Smuzhiyun 	struct ieee80211_hw *dev = pci_get_drvdata(priv->pdev);
285*4882a593Smuzhiyun 	struct p54p_ring_control *ring_control = priv->ring_control;
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	p54p_check_tx_ring(dev, &priv->tx_idx_mgmt, 3, ring_control->tx_mgmt,
288*4882a593Smuzhiyun 			   ARRAY_SIZE(ring_control->tx_mgmt),
289*4882a593Smuzhiyun 			   priv->tx_buf_mgmt);
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	p54p_check_tx_ring(dev, &priv->tx_idx_data, 1, ring_control->tx_data,
292*4882a593Smuzhiyun 			   ARRAY_SIZE(ring_control->tx_data),
293*4882a593Smuzhiyun 			   priv->tx_buf_data);
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	p54p_check_rx_ring(dev, &priv->rx_idx_mgmt, 2, ring_control->rx_mgmt,
296*4882a593Smuzhiyun 		ARRAY_SIZE(ring_control->rx_mgmt), priv->rx_buf_mgmt);
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	p54p_check_rx_ring(dev, &priv->rx_idx_data, 0, ring_control->rx_data,
299*4882a593Smuzhiyun 		ARRAY_SIZE(ring_control->rx_data), priv->rx_buf_data);
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	wmb();
302*4882a593Smuzhiyun 	P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun 
p54p_interrupt(int irq,void * dev_id)305*4882a593Smuzhiyun static irqreturn_t p54p_interrupt(int irq, void *dev_id)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun 	struct ieee80211_hw *dev = dev_id;
308*4882a593Smuzhiyun 	struct p54p_priv *priv = dev->priv;
309*4882a593Smuzhiyun 	__le32 reg;
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	reg = P54P_READ(int_ident);
312*4882a593Smuzhiyun 	if (unlikely(reg == cpu_to_le32(0xFFFFFFFF))) {
313*4882a593Smuzhiyun 		goto out;
314*4882a593Smuzhiyun 	}
315*4882a593Smuzhiyun 	P54P_WRITE(int_ack, reg);
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	reg &= P54P_READ(int_enable);
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	if (reg & cpu_to_le32(ISL38XX_INT_IDENT_UPDATE))
320*4882a593Smuzhiyun 		tasklet_schedule(&priv->tasklet);
321*4882a593Smuzhiyun 	else if (reg & cpu_to_le32(ISL38XX_INT_IDENT_INIT))
322*4882a593Smuzhiyun 		complete(&priv->boot_comp);
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun out:
325*4882a593Smuzhiyun 	return reg ? IRQ_HANDLED : IRQ_NONE;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun 
p54p_tx(struct ieee80211_hw * dev,struct sk_buff * skb)328*4882a593Smuzhiyun static void p54p_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun 	unsigned long flags;
331*4882a593Smuzhiyun 	struct p54p_priv *priv = dev->priv;
332*4882a593Smuzhiyun 	struct p54p_ring_control *ring_control = priv->ring_control;
333*4882a593Smuzhiyun 	struct p54p_desc *desc;
334*4882a593Smuzhiyun 	dma_addr_t mapping;
335*4882a593Smuzhiyun 	u32 idx, i;
336*4882a593Smuzhiyun 	__le32 device_addr;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	spin_lock_irqsave(&priv->lock, flags);
339*4882a593Smuzhiyun 	idx = le32_to_cpu(ring_control->host_idx[1]);
340*4882a593Smuzhiyun 	i = idx % ARRAY_SIZE(ring_control->tx_data);
341*4882a593Smuzhiyun 	device_addr = ((struct p54_hdr *)skb->data)->req_id;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	mapping = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
344*4882a593Smuzhiyun 				 DMA_TO_DEVICE);
345*4882a593Smuzhiyun 	if (dma_mapping_error(&priv->pdev->dev, mapping)) {
346*4882a593Smuzhiyun 		spin_unlock_irqrestore(&priv->lock, flags);
347*4882a593Smuzhiyun 		p54_free_skb(dev, skb);
348*4882a593Smuzhiyun 		dev_err(&priv->pdev->dev, "TX DMA mapping error\n");
349*4882a593Smuzhiyun 		return ;
350*4882a593Smuzhiyun 	}
351*4882a593Smuzhiyun 	priv->tx_buf_data[i] = skb;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	desc = &ring_control->tx_data[i];
354*4882a593Smuzhiyun 	desc->host_addr = cpu_to_le32(mapping);
355*4882a593Smuzhiyun 	desc->device_addr = device_addr;
356*4882a593Smuzhiyun 	desc->len = cpu_to_le16(skb->len);
357*4882a593Smuzhiyun 	desc->flags = 0;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	wmb();
360*4882a593Smuzhiyun 	ring_control->host_idx[1] = cpu_to_le32(idx + 1);
361*4882a593Smuzhiyun 	spin_unlock_irqrestore(&priv->lock, flags);
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
364*4882a593Smuzhiyun 	P54P_READ(dev_int);
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun 
p54p_stop(struct ieee80211_hw * dev)367*4882a593Smuzhiyun static void p54p_stop(struct ieee80211_hw *dev)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun 	struct p54p_priv *priv = dev->priv;
370*4882a593Smuzhiyun 	struct p54p_ring_control *ring_control = priv->ring_control;
371*4882a593Smuzhiyun 	unsigned int i;
372*4882a593Smuzhiyun 	struct p54p_desc *desc;
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	P54P_WRITE(int_enable, cpu_to_le32(0));
375*4882a593Smuzhiyun 	P54P_READ(int_enable);
376*4882a593Smuzhiyun 	udelay(10);
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	free_irq(priv->pdev->irq, dev);
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	tasklet_kill(&priv->tasklet);
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(priv->rx_buf_data); i++) {
385*4882a593Smuzhiyun 		desc = &ring_control->rx_data[i];
386*4882a593Smuzhiyun 		if (desc->host_addr)
387*4882a593Smuzhiyun 			dma_unmap_single(&priv->pdev->dev,
388*4882a593Smuzhiyun 					 le32_to_cpu(desc->host_addr),
389*4882a593Smuzhiyun 					 priv->common.rx_mtu + 32,
390*4882a593Smuzhiyun 					 DMA_FROM_DEVICE);
391*4882a593Smuzhiyun 		kfree_skb(priv->rx_buf_data[i]);
392*4882a593Smuzhiyun 		priv->rx_buf_data[i] = NULL;
393*4882a593Smuzhiyun 	}
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(priv->rx_buf_mgmt); i++) {
396*4882a593Smuzhiyun 		desc = &ring_control->rx_mgmt[i];
397*4882a593Smuzhiyun 		if (desc->host_addr)
398*4882a593Smuzhiyun 			dma_unmap_single(&priv->pdev->dev,
399*4882a593Smuzhiyun 					 le32_to_cpu(desc->host_addr),
400*4882a593Smuzhiyun 					 priv->common.rx_mtu + 32,
401*4882a593Smuzhiyun 					 DMA_FROM_DEVICE);
402*4882a593Smuzhiyun 		kfree_skb(priv->rx_buf_mgmt[i]);
403*4882a593Smuzhiyun 		priv->rx_buf_mgmt[i] = NULL;
404*4882a593Smuzhiyun 	}
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(priv->tx_buf_data); i++) {
407*4882a593Smuzhiyun 		desc = &ring_control->tx_data[i];
408*4882a593Smuzhiyun 		if (desc->host_addr)
409*4882a593Smuzhiyun 			dma_unmap_single(&priv->pdev->dev,
410*4882a593Smuzhiyun 					 le32_to_cpu(desc->host_addr),
411*4882a593Smuzhiyun 					 le16_to_cpu(desc->len),
412*4882a593Smuzhiyun 					 DMA_TO_DEVICE);
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 		p54_free_skb(dev, priv->tx_buf_data[i]);
415*4882a593Smuzhiyun 		priv->tx_buf_data[i] = NULL;
416*4882a593Smuzhiyun 	}
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(priv->tx_buf_mgmt); i++) {
419*4882a593Smuzhiyun 		desc = &ring_control->tx_mgmt[i];
420*4882a593Smuzhiyun 		if (desc->host_addr)
421*4882a593Smuzhiyun 			dma_unmap_single(&priv->pdev->dev,
422*4882a593Smuzhiyun 					 le32_to_cpu(desc->host_addr),
423*4882a593Smuzhiyun 					 le16_to_cpu(desc->len),
424*4882a593Smuzhiyun 					 DMA_TO_DEVICE);
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 		p54_free_skb(dev, priv->tx_buf_mgmt[i]);
427*4882a593Smuzhiyun 		priv->tx_buf_mgmt[i] = NULL;
428*4882a593Smuzhiyun 	}
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	memset(ring_control, 0, sizeof(*ring_control));
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun 
p54p_open(struct ieee80211_hw * dev)433*4882a593Smuzhiyun static int p54p_open(struct ieee80211_hw *dev)
434*4882a593Smuzhiyun {
435*4882a593Smuzhiyun 	struct p54p_priv *priv = dev->priv;
436*4882a593Smuzhiyun 	int err;
437*4882a593Smuzhiyun 	long timeout;
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	init_completion(&priv->boot_comp);
440*4882a593Smuzhiyun 	err = request_irq(priv->pdev->irq, p54p_interrupt,
441*4882a593Smuzhiyun 			  IRQF_SHARED, "p54pci", dev);
442*4882a593Smuzhiyun 	if (err) {
443*4882a593Smuzhiyun 		dev_err(&priv->pdev->dev, "failed to register IRQ handler\n");
444*4882a593Smuzhiyun 		return err;
445*4882a593Smuzhiyun 	}
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	memset(priv->ring_control, 0, sizeof(*priv->ring_control));
448*4882a593Smuzhiyun 	err = p54p_upload_firmware(dev);
449*4882a593Smuzhiyun 	if (err) {
450*4882a593Smuzhiyun 		free_irq(priv->pdev->irq, dev);
451*4882a593Smuzhiyun 		return err;
452*4882a593Smuzhiyun 	}
453*4882a593Smuzhiyun 	priv->rx_idx_data = priv->tx_idx_data = 0;
454*4882a593Smuzhiyun 	priv->rx_idx_mgmt = priv->tx_idx_mgmt = 0;
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	p54p_refill_rx_ring(dev, 0, priv->ring_control->rx_data,
457*4882a593Smuzhiyun 		ARRAY_SIZE(priv->ring_control->rx_data), priv->rx_buf_data, 0);
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	p54p_refill_rx_ring(dev, 2, priv->ring_control->rx_mgmt,
460*4882a593Smuzhiyun 		ARRAY_SIZE(priv->ring_control->rx_mgmt), priv->rx_buf_mgmt, 0);
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	P54P_WRITE(ring_control_base, cpu_to_le32(priv->ring_control_dma));
463*4882a593Smuzhiyun 	P54P_READ(ring_control_base);
464*4882a593Smuzhiyun 	wmb();
465*4882a593Smuzhiyun 	udelay(10);
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_INIT));
468*4882a593Smuzhiyun 	P54P_READ(int_enable);
469*4882a593Smuzhiyun 	wmb();
470*4882a593Smuzhiyun 	udelay(10);
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));
473*4882a593Smuzhiyun 	P54P_READ(dev_int);
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	timeout = wait_for_completion_interruptible_timeout(
476*4882a593Smuzhiyun 			&priv->boot_comp, HZ);
477*4882a593Smuzhiyun 	if (timeout <= 0) {
478*4882a593Smuzhiyun 		wiphy_err(dev->wiphy, "Cannot boot firmware!\n");
479*4882a593Smuzhiyun 		p54p_stop(dev);
480*4882a593Smuzhiyun 		return timeout ? -ERESTARTSYS : -ETIMEDOUT;
481*4882a593Smuzhiyun 	}
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_UPDATE));
484*4882a593Smuzhiyun 	P54P_READ(int_enable);
485*4882a593Smuzhiyun 	wmb();
486*4882a593Smuzhiyun 	udelay(10);
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
489*4882a593Smuzhiyun 	P54P_READ(dev_int);
490*4882a593Smuzhiyun 	wmb();
491*4882a593Smuzhiyun 	udelay(10);
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	return 0;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun 
p54p_firmware_step2(const struct firmware * fw,void * context)496*4882a593Smuzhiyun static void p54p_firmware_step2(const struct firmware *fw,
497*4882a593Smuzhiyun 				void *context)
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun 	struct p54p_priv *priv = context;
500*4882a593Smuzhiyun 	struct ieee80211_hw *dev = priv->common.hw;
501*4882a593Smuzhiyun 	struct pci_dev *pdev = priv->pdev;
502*4882a593Smuzhiyun 	int err;
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	if (!fw) {
505*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Cannot find firmware (isl3886pci)\n");
506*4882a593Smuzhiyun 		err = -ENOENT;
507*4882a593Smuzhiyun 		goto out;
508*4882a593Smuzhiyun 	}
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	priv->firmware = fw;
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	err = p54p_open(dev);
513*4882a593Smuzhiyun 	if (err)
514*4882a593Smuzhiyun 		goto out;
515*4882a593Smuzhiyun 	err = p54_read_eeprom(dev);
516*4882a593Smuzhiyun 	p54p_stop(dev);
517*4882a593Smuzhiyun 	if (err)
518*4882a593Smuzhiyun 		goto out;
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	err = p54_register_common(dev, &pdev->dev);
521*4882a593Smuzhiyun 	if (err)
522*4882a593Smuzhiyun 		goto out;
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun out:
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	complete(&priv->fw_loaded);
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	if (err) {
529*4882a593Smuzhiyun 		struct device *parent = pdev->dev.parent;
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 		if (parent)
532*4882a593Smuzhiyun 			device_lock(parent);
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 		/*
535*4882a593Smuzhiyun 		 * This will indirectly result in a call to p54p_remove.
536*4882a593Smuzhiyun 		 * Hence, we don't need to bother with freeing any
537*4882a593Smuzhiyun 		 * allocated ressources at all.
538*4882a593Smuzhiyun 		 */
539*4882a593Smuzhiyun 		device_release_driver(&pdev->dev);
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 		if (parent)
542*4882a593Smuzhiyun 			device_unlock(parent);
543*4882a593Smuzhiyun 	}
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 	pci_dev_put(pdev);
546*4882a593Smuzhiyun }
547*4882a593Smuzhiyun 
p54p_probe(struct pci_dev * pdev,const struct pci_device_id * id)548*4882a593Smuzhiyun static int p54p_probe(struct pci_dev *pdev,
549*4882a593Smuzhiyun 				const struct pci_device_id *id)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun 	struct p54p_priv *priv;
552*4882a593Smuzhiyun 	struct ieee80211_hw *dev;
553*4882a593Smuzhiyun 	unsigned long mem_addr, mem_len;
554*4882a593Smuzhiyun 	int err;
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	pci_dev_get(pdev);
557*4882a593Smuzhiyun 	err = pci_enable_device(pdev);
558*4882a593Smuzhiyun 	if (err) {
559*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Cannot enable new PCI device\n");
560*4882a593Smuzhiyun 		goto err_put;
561*4882a593Smuzhiyun 	}
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 	mem_addr = pci_resource_start(pdev, 0);
564*4882a593Smuzhiyun 	mem_len = pci_resource_len(pdev, 0);
565*4882a593Smuzhiyun 	if (mem_len < sizeof(struct p54p_csr)) {
566*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Too short PCI resources\n");
567*4882a593Smuzhiyun 		err = -ENODEV;
568*4882a593Smuzhiyun 		goto err_disable_dev;
569*4882a593Smuzhiyun 	}
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	err = pci_request_regions(pdev, "p54pci");
572*4882a593Smuzhiyun 	if (err) {
573*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
574*4882a593Smuzhiyun 		goto err_disable_dev;
575*4882a593Smuzhiyun 	}
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
578*4882a593Smuzhiyun 	if (!err)
579*4882a593Smuzhiyun 		err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
580*4882a593Smuzhiyun 	if (err) {
581*4882a593Smuzhiyun 		dev_err(&pdev->dev, "No suitable DMA available\n");
582*4882a593Smuzhiyun 		goto err_free_reg;
583*4882a593Smuzhiyun 	}
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 	pci_set_master(pdev);
586*4882a593Smuzhiyun 	pci_try_set_mwi(pdev);
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	pci_write_config_byte(pdev, 0x40, 0);
589*4882a593Smuzhiyun 	pci_write_config_byte(pdev, 0x41, 0);
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	dev = p54_init_common(sizeof(*priv));
592*4882a593Smuzhiyun 	if (!dev) {
593*4882a593Smuzhiyun 		dev_err(&pdev->dev, "ieee80211 alloc failed\n");
594*4882a593Smuzhiyun 		err = -ENOMEM;
595*4882a593Smuzhiyun 		goto err_free_reg;
596*4882a593Smuzhiyun 	}
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	priv = dev->priv;
599*4882a593Smuzhiyun 	priv->pdev = pdev;
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 	init_completion(&priv->fw_loaded);
602*4882a593Smuzhiyun 	SET_IEEE80211_DEV(dev, &pdev->dev);
603*4882a593Smuzhiyun 	pci_set_drvdata(pdev, dev);
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	priv->map = ioremap(mem_addr, mem_len);
606*4882a593Smuzhiyun 	if (!priv->map) {
607*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Cannot map device memory\n");
608*4882a593Smuzhiyun 		err = -ENOMEM;
609*4882a593Smuzhiyun 		goto err_free_dev;
610*4882a593Smuzhiyun 	}
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	priv->ring_control = dma_alloc_coherent(&pdev->dev,
613*4882a593Smuzhiyun 						sizeof(*priv->ring_control),
614*4882a593Smuzhiyun 						&priv->ring_control_dma, GFP_KERNEL);
615*4882a593Smuzhiyun 	if (!priv->ring_control) {
616*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Cannot allocate rings\n");
617*4882a593Smuzhiyun 		err = -ENOMEM;
618*4882a593Smuzhiyun 		goto err_iounmap;
619*4882a593Smuzhiyun 	}
620*4882a593Smuzhiyun 	priv->common.open = p54p_open;
621*4882a593Smuzhiyun 	priv->common.stop = p54p_stop;
622*4882a593Smuzhiyun 	priv->common.tx = p54p_tx;
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 	spin_lock_init(&priv->lock);
625*4882a593Smuzhiyun 	tasklet_setup(&priv->tasklet, p54p_tasklet);
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	err = request_firmware_nowait(THIS_MODULE, 1, "isl3886pci",
628*4882a593Smuzhiyun 				      &priv->pdev->dev, GFP_KERNEL,
629*4882a593Smuzhiyun 				      priv, p54p_firmware_step2);
630*4882a593Smuzhiyun 	if (!err)
631*4882a593Smuzhiyun 		return 0;
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 	dma_free_coherent(&pdev->dev, sizeof(*priv->ring_control),
634*4882a593Smuzhiyun 			  priv->ring_control, priv->ring_control_dma);
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun  err_iounmap:
637*4882a593Smuzhiyun 	iounmap(priv->map);
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun  err_free_dev:
640*4882a593Smuzhiyun 	p54_free_common(dev);
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun  err_free_reg:
643*4882a593Smuzhiyun 	pci_release_regions(pdev);
644*4882a593Smuzhiyun  err_disable_dev:
645*4882a593Smuzhiyun 	pci_disable_device(pdev);
646*4882a593Smuzhiyun err_put:
647*4882a593Smuzhiyun 	pci_dev_put(pdev);
648*4882a593Smuzhiyun 	return err;
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun 
p54p_remove(struct pci_dev * pdev)651*4882a593Smuzhiyun static void p54p_remove(struct pci_dev *pdev)
652*4882a593Smuzhiyun {
653*4882a593Smuzhiyun 	struct ieee80211_hw *dev = pci_get_drvdata(pdev);
654*4882a593Smuzhiyun 	struct p54p_priv *priv;
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 	if (!dev)
657*4882a593Smuzhiyun 		return;
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	priv = dev->priv;
660*4882a593Smuzhiyun 	wait_for_completion(&priv->fw_loaded);
661*4882a593Smuzhiyun 	p54_unregister_common(dev);
662*4882a593Smuzhiyun 	release_firmware(priv->firmware);
663*4882a593Smuzhiyun 	dma_free_coherent(&pdev->dev, sizeof(*priv->ring_control),
664*4882a593Smuzhiyun 			  priv->ring_control, priv->ring_control_dma);
665*4882a593Smuzhiyun 	iounmap(priv->map);
666*4882a593Smuzhiyun 	pci_release_regions(pdev);
667*4882a593Smuzhiyun 	pci_disable_device(pdev);
668*4882a593Smuzhiyun 	p54_free_common(dev);
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
p54p_suspend(struct device * device)672*4882a593Smuzhiyun static int p54p_suspend(struct device *device)
673*4882a593Smuzhiyun {
674*4882a593Smuzhiyun 	struct pci_dev *pdev = to_pci_dev(device);
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 	pci_save_state(pdev);
677*4882a593Smuzhiyun 	pci_set_power_state(pdev, PCI_D3hot);
678*4882a593Smuzhiyun 	pci_disable_device(pdev);
679*4882a593Smuzhiyun 	return 0;
680*4882a593Smuzhiyun }
681*4882a593Smuzhiyun 
p54p_resume(struct device * device)682*4882a593Smuzhiyun static int p54p_resume(struct device *device)
683*4882a593Smuzhiyun {
684*4882a593Smuzhiyun 	struct pci_dev *pdev = to_pci_dev(device);
685*4882a593Smuzhiyun 	int err;
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	err = pci_reenable_device(pdev);
688*4882a593Smuzhiyun 	if (err)
689*4882a593Smuzhiyun 		return err;
690*4882a593Smuzhiyun 	return pci_set_power_state(pdev, PCI_D0);
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun static SIMPLE_DEV_PM_OPS(p54pci_pm_ops, p54p_suspend, p54p_resume);
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun #define P54P_PM_OPS (&p54pci_pm_ops)
696*4882a593Smuzhiyun #else
697*4882a593Smuzhiyun #define P54P_PM_OPS (NULL)
698*4882a593Smuzhiyun #endif /* CONFIG_PM_SLEEP */
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun static struct pci_driver p54p_driver = {
701*4882a593Smuzhiyun 	.name		= "p54pci",
702*4882a593Smuzhiyun 	.id_table	= p54p_table,
703*4882a593Smuzhiyun 	.probe		= p54p_probe,
704*4882a593Smuzhiyun 	.remove		= p54p_remove,
705*4882a593Smuzhiyun 	.driver.pm	= P54P_PM_OPS,
706*4882a593Smuzhiyun };
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun module_pci_driver(p54p_driver);
709