xref: /OK3568_Linux_fs/kernel/drivers/net/wireless/intersil/prism54/islpci_mgt.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  Copyright (C) 2002 Intersil Americas Inc.
4*4882a593Smuzhiyun  *  Copyright 2004 Jens Maurer <Jens.Maurer@gmx.net>
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/netdevice.h>
8*4882a593Smuzhiyun #include <linux/module.h>
9*4882a593Smuzhiyun #include <linux/pci.h>
10*4882a593Smuzhiyun #include <linux/sched.h>
11*4882a593Smuzhiyun #include <linux/slab.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <asm/io.h>
14*4882a593Smuzhiyun #include <linux/if_arp.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include "prismcompat.h"
17*4882a593Smuzhiyun #include "isl_38xx.h"
18*4882a593Smuzhiyun #include "islpci_mgt.h"
19*4882a593Smuzhiyun #include "isl_oid.h"		/* additional types and defs for isl38xx fw */
20*4882a593Smuzhiyun #include "isl_ioctl.h"
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #include <net/iw_handler.h>
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun /******************************************************************************
25*4882a593Smuzhiyun         Global variable definition section
26*4882a593Smuzhiyun ******************************************************************************/
27*4882a593Smuzhiyun int pc_debug = VERBOSE;
28*4882a593Smuzhiyun module_param(pc_debug, int, 0);
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun /******************************************************************************
31*4882a593Smuzhiyun     Driver general functions
32*4882a593Smuzhiyun ******************************************************************************/
33*4882a593Smuzhiyun #if VERBOSE > SHOW_ERROR_MESSAGES
34*4882a593Smuzhiyun void
display_buffer(char * buffer,int length)35*4882a593Smuzhiyun display_buffer(char *buffer, int length)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun 	if ((pc_debug & SHOW_BUFFER_CONTENTS) == 0)
38*4882a593Smuzhiyun 		return;
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	while (length > 0) {
41*4882a593Smuzhiyun 		printk("[%02x]", *buffer & 255);
42*4882a593Smuzhiyun 		length--;
43*4882a593Smuzhiyun 		buffer++;
44*4882a593Smuzhiyun 	}
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	printk("\n");
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun #endif
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun /*****************************************************************************
51*4882a593Smuzhiyun     Queue handling for management frames
52*4882a593Smuzhiyun ******************************************************************************/
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun /*
55*4882a593Smuzhiyun  * Helper function to create a PIMFOR management frame header.
56*4882a593Smuzhiyun  */
57*4882a593Smuzhiyun static void
pimfor_encode_header(int operation,u32 oid,u32 length,pimfor_header_t * h)58*4882a593Smuzhiyun pimfor_encode_header(int operation, u32 oid, u32 length, pimfor_header_t *h)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	h->version = PIMFOR_VERSION;
61*4882a593Smuzhiyun 	h->operation = operation;
62*4882a593Smuzhiyun 	h->device_id = PIMFOR_DEV_ID_MHLI_MIB;
63*4882a593Smuzhiyun 	h->flags = 0;
64*4882a593Smuzhiyun 	h->oid = cpu_to_be32(oid);
65*4882a593Smuzhiyun 	h->length = cpu_to_be32(length);
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun /*
69*4882a593Smuzhiyun  * Helper function to analyze a PIMFOR management frame header.
70*4882a593Smuzhiyun  */
71*4882a593Smuzhiyun static pimfor_header_t *
pimfor_decode_header(void * data,int len)72*4882a593Smuzhiyun pimfor_decode_header(void *data, int len)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	pimfor_header_t *h = data;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	while ((void *) h < data + len) {
77*4882a593Smuzhiyun 		if (h->flags & PIMFOR_FLAG_LITTLE_ENDIAN) {
78*4882a593Smuzhiyun 			le32_to_cpus(&h->oid);
79*4882a593Smuzhiyun 			le32_to_cpus(&h->length);
80*4882a593Smuzhiyun 		} else {
81*4882a593Smuzhiyun 			be32_to_cpus(&h->oid);
82*4882a593Smuzhiyun 			be32_to_cpus(&h->length);
83*4882a593Smuzhiyun 		}
84*4882a593Smuzhiyun 		if (h->oid != OID_INL_TUNNEL)
85*4882a593Smuzhiyun 			return h;
86*4882a593Smuzhiyun 		h++;
87*4882a593Smuzhiyun 	}
88*4882a593Smuzhiyun 	return NULL;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun /*
92*4882a593Smuzhiyun  * Fill the receive queue for management frames with fresh buffers.
93*4882a593Smuzhiyun  */
94*4882a593Smuzhiyun int
islpci_mgmt_rx_fill(struct net_device * ndev)95*4882a593Smuzhiyun islpci_mgmt_rx_fill(struct net_device *ndev)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun 	islpci_private *priv = netdev_priv(ndev);
98*4882a593Smuzhiyun 	isl38xx_control_block *cb =	/* volatile not needed */
99*4882a593Smuzhiyun 	    (isl38xx_control_block *) priv->control_block;
100*4882a593Smuzhiyun 	u32 curr = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_RX_MGMTQ]);
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun #if VERBOSE > SHOW_ERROR_MESSAGES
103*4882a593Smuzhiyun 	DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgmt_rx_fill\n");
104*4882a593Smuzhiyun #endif
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	while (curr - priv->index_mgmt_rx < ISL38XX_CB_MGMT_QSIZE) {
107*4882a593Smuzhiyun 		u32 index = curr % ISL38XX_CB_MGMT_QSIZE;
108*4882a593Smuzhiyun 		struct islpci_membuf *buf = &priv->mgmt_rx[index];
109*4882a593Smuzhiyun 		isl38xx_fragment *frag = &cb->rx_data_mgmt[index];
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 		if (buf->mem == NULL) {
112*4882a593Smuzhiyun 			buf->mem = kmalloc(MGMT_FRAME_SIZE, GFP_ATOMIC);
113*4882a593Smuzhiyun 			if (!buf->mem)
114*4882a593Smuzhiyun 				return -ENOMEM;
115*4882a593Smuzhiyun 			buf->size = MGMT_FRAME_SIZE;
116*4882a593Smuzhiyun 		}
117*4882a593Smuzhiyun 		if (buf->pci_addr == 0) {
118*4882a593Smuzhiyun 			buf->pci_addr = dma_map_single(&priv->pdev->dev,
119*4882a593Smuzhiyun 						       buf->mem,
120*4882a593Smuzhiyun 						       MGMT_FRAME_SIZE,
121*4882a593Smuzhiyun 						       DMA_FROM_DEVICE);
122*4882a593Smuzhiyun 			if (dma_mapping_error(&priv->pdev->dev, buf->pci_addr)) {
123*4882a593Smuzhiyun 				printk(KERN_WARNING
124*4882a593Smuzhiyun 				       "Failed to make memory DMA'able.\n");
125*4882a593Smuzhiyun 				return -ENOMEM;
126*4882a593Smuzhiyun 			}
127*4882a593Smuzhiyun 		}
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 		/* be safe: always reset control block information */
130*4882a593Smuzhiyun 		frag->size = cpu_to_le16(MGMT_FRAME_SIZE);
131*4882a593Smuzhiyun 		frag->flags = 0;
132*4882a593Smuzhiyun 		frag->address = cpu_to_le32(buf->pci_addr);
133*4882a593Smuzhiyun 		curr++;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 		/* The fragment address in the control block must have
136*4882a593Smuzhiyun 		 * been written before announcing the frame buffer to
137*4882a593Smuzhiyun 		 * device */
138*4882a593Smuzhiyun 		wmb();
139*4882a593Smuzhiyun 		cb->driver_curr_frag[ISL38XX_CB_RX_MGMTQ] = cpu_to_le32(curr);
140*4882a593Smuzhiyun 	}
141*4882a593Smuzhiyun 	return 0;
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun /*
145*4882a593Smuzhiyun  * Create and transmit a management frame using "operation" and "oid",
146*4882a593Smuzhiyun  * with arguments data/length.
147*4882a593Smuzhiyun  * We either return an error and free the frame, or we return 0 and
148*4882a593Smuzhiyun  * islpci_mgt_cleanup_transmit() frees the frame in the tx-done
149*4882a593Smuzhiyun  * interrupt.
150*4882a593Smuzhiyun  */
151*4882a593Smuzhiyun static int
islpci_mgt_transmit(struct net_device * ndev,int operation,unsigned long oid,void * data,int length)152*4882a593Smuzhiyun islpci_mgt_transmit(struct net_device *ndev, int operation, unsigned long oid,
153*4882a593Smuzhiyun 		    void *data, int length)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun 	islpci_private *priv = netdev_priv(ndev);
156*4882a593Smuzhiyun 	isl38xx_control_block *cb =
157*4882a593Smuzhiyun 	    (isl38xx_control_block *) priv->control_block;
158*4882a593Smuzhiyun 	void *p;
159*4882a593Smuzhiyun 	int err = -EINVAL;
160*4882a593Smuzhiyun 	unsigned long flags;
161*4882a593Smuzhiyun 	isl38xx_fragment *frag;
162*4882a593Smuzhiyun 	struct islpci_membuf buf;
163*4882a593Smuzhiyun 	u32 curr_frag;
164*4882a593Smuzhiyun 	int index;
165*4882a593Smuzhiyun 	int frag_len = length + PIMFOR_HEADER_SIZE;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun #if VERBOSE > SHOW_ERROR_MESSAGES
168*4882a593Smuzhiyun 	DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_transmit\n");
169*4882a593Smuzhiyun #endif
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	if (frag_len > MGMT_FRAME_SIZE) {
172*4882a593Smuzhiyun 		printk(KERN_DEBUG "%s: mgmt frame too large %d\n",
173*4882a593Smuzhiyun 		       ndev->name, frag_len);
174*4882a593Smuzhiyun 		goto error;
175*4882a593Smuzhiyun 	}
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	err = -ENOMEM;
178*4882a593Smuzhiyun 	p = buf.mem = kmalloc(frag_len, GFP_KERNEL);
179*4882a593Smuzhiyun 	if (!buf.mem)
180*4882a593Smuzhiyun 		goto error;
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	buf.size = frag_len;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	/* create the header directly in the fragment data area */
185*4882a593Smuzhiyun 	pimfor_encode_header(operation, oid, length, (pimfor_header_t *) p);
186*4882a593Smuzhiyun 	p += PIMFOR_HEADER_SIZE;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	if (data)
189*4882a593Smuzhiyun 		memcpy(p, data, length);
190*4882a593Smuzhiyun 	else
191*4882a593Smuzhiyun 		memset(p, 0, length);
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun #if VERBOSE > SHOW_ERROR_MESSAGES
194*4882a593Smuzhiyun 	{
195*4882a593Smuzhiyun 		pimfor_header_t *h = buf.mem;
196*4882a593Smuzhiyun 		DEBUG(SHOW_PIMFOR_FRAMES,
197*4882a593Smuzhiyun 		      "PIMFOR: op %i, oid 0x%08lx, device %i, flags 0x%x length 0x%x\n",
198*4882a593Smuzhiyun 		      h->operation, oid, h->device_id, h->flags, length);
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 		/* display the buffer contents for debugging */
201*4882a593Smuzhiyun 		display_buffer((char *) h, sizeof (pimfor_header_t));
202*4882a593Smuzhiyun 		display_buffer(p, length);
203*4882a593Smuzhiyun 	}
204*4882a593Smuzhiyun #endif
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	err = -ENOMEM;
207*4882a593Smuzhiyun 	buf.pci_addr = dma_map_single(&priv->pdev->dev, buf.mem, frag_len,
208*4882a593Smuzhiyun 				      DMA_TO_DEVICE);
209*4882a593Smuzhiyun 	if (dma_mapping_error(&priv->pdev->dev, buf.pci_addr)) {
210*4882a593Smuzhiyun 		printk(KERN_WARNING "%s: cannot map PCI memory for mgmt\n",
211*4882a593Smuzhiyun 		       ndev->name);
212*4882a593Smuzhiyun 		goto error_free;
213*4882a593Smuzhiyun 	}
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	/* Protect the control block modifications against interrupts. */
216*4882a593Smuzhiyun 	spin_lock_irqsave(&priv->slock, flags);
217*4882a593Smuzhiyun 	curr_frag = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_TX_MGMTQ]);
218*4882a593Smuzhiyun 	if (curr_frag - priv->index_mgmt_tx >= ISL38XX_CB_MGMT_QSIZE) {
219*4882a593Smuzhiyun 		printk(KERN_WARNING "%s: mgmt tx queue is still full\n",
220*4882a593Smuzhiyun 		       ndev->name);
221*4882a593Smuzhiyun 		goto error_unlock;
222*4882a593Smuzhiyun 	}
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	/* commit the frame to the tx device queue */
225*4882a593Smuzhiyun 	index = curr_frag % ISL38XX_CB_MGMT_QSIZE;
226*4882a593Smuzhiyun 	priv->mgmt_tx[index] = buf;
227*4882a593Smuzhiyun 	frag = &cb->tx_data_mgmt[index];
228*4882a593Smuzhiyun 	frag->size = cpu_to_le16(frag_len);
229*4882a593Smuzhiyun 	frag->flags = 0;	/* for any other than the last fragment, set to 1 */
230*4882a593Smuzhiyun 	frag->address = cpu_to_le32(buf.pci_addr);
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	/* The fragment address in the control block must have
233*4882a593Smuzhiyun 	 * been written before announcing the frame buffer to
234*4882a593Smuzhiyun 	 * device */
235*4882a593Smuzhiyun 	wmb();
236*4882a593Smuzhiyun 	cb->driver_curr_frag[ISL38XX_CB_TX_MGMTQ] = cpu_to_le32(curr_frag + 1);
237*4882a593Smuzhiyun 	spin_unlock_irqrestore(&priv->slock, flags);
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	/* trigger the device */
240*4882a593Smuzhiyun 	islpci_trigger(priv);
241*4882a593Smuzhiyun 	return 0;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun       error_unlock:
244*4882a593Smuzhiyun 	spin_unlock_irqrestore(&priv->slock, flags);
245*4882a593Smuzhiyun       error_free:
246*4882a593Smuzhiyun 	kfree(buf.mem);
247*4882a593Smuzhiyun       error:
248*4882a593Smuzhiyun 	return err;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun /*
252*4882a593Smuzhiyun  * Receive a management frame from the device.
253*4882a593Smuzhiyun  * This can be an arbitrary number of traps, and at most one response
254*4882a593Smuzhiyun  * frame for a previous request sent via islpci_mgt_transmit().
255*4882a593Smuzhiyun  */
256*4882a593Smuzhiyun int
islpci_mgt_receive(struct net_device * ndev)257*4882a593Smuzhiyun islpci_mgt_receive(struct net_device *ndev)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun 	islpci_private *priv = netdev_priv(ndev);
260*4882a593Smuzhiyun 	isl38xx_control_block *cb =
261*4882a593Smuzhiyun 	    (isl38xx_control_block *) priv->control_block;
262*4882a593Smuzhiyun 	u32 curr_frag;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun #if VERBOSE > SHOW_ERROR_MESSAGES
265*4882a593Smuzhiyun 	DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_receive\n");
266*4882a593Smuzhiyun #endif
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	/* Only once per interrupt, determine fragment range to
269*4882a593Smuzhiyun 	 * process.  This avoids an endless loop (i.e. lockup) if
270*4882a593Smuzhiyun 	 * frames come in faster than we can process them. */
271*4882a593Smuzhiyun 	curr_frag = le32_to_cpu(cb->device_curr_frag[ISL38XX_CB_RX_MGMTQ]);
272*4882a593Smuzhiyun 	barrier();
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	for (; priv->index_mgmt_rx < curr_frag; priv->index_mgmt_rx++) {
275*4882a593Smuzhiyun 		pimfor_header_t *header;
276*4882a593Smuzhiyun 		u32 index = priv->index_mgmt_rx % ISL38XX_CB_MGMT_QSIZE;
277*4882a593Smuzhiyun 		struct islpci_membuf *buf = &priv->mgmt_rx[index];
278*4882a593Smuzhiyun 		u16 frag_len;
279*4882a593Smuzhiyun 		int size;
280*4882a593Smuzhiyun 		struct islpci_mgmtframe *frame;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 		/* I have no idea (and no documentation) if flags != 0
283*4882a593Smuzhiyun 		 * is possible.  Drop the frame, reuse the buffer. */
284*4882a593Smuzhiyun 		if (le16_to_cpu(cb->rx_data_mgmt[index].flags) != 0) {
285*4882a593Smuzhiyun 			printk(KERN_WARNING "%s: unknown flags 0x%04x\n",
286*4882a593Smuzhiyun 			       ndev->name,
287*4882a593Smuzhiyun 			       le16_to_cpu(cb->rx_data_mgmt[index].flags));
288*4882a593Smuzhiyun 			continue;
289*4882a593Smuzhiyun 		}
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 		/* The device only returns the size of the header(s) here. */
292*4882a593Smuzhiyun 		frag_len = le16_to_cpu(cb->rx_data_mgmt[index].size);
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 		/*
295*4882a593Smuzhiyun 		 * We appear to have no way to tell the device the
296*4882a593Smuzhiyun 		 * size of a receive buffer.  Thus, if this check
297*4882a593Smuzhiyun 		 * triggers, we likely have kernel heap corruption. */
298*4882a593Smuzhiyun 		if (frag_len > MGMT_FRAME_SIZE) {
299*4882a593Smuzhiyun 			printk(KERN_WARNING
300*4882a593Smuzhiyun 				"%s: Bogus packet size of %d (%#x).\n",
301*4882a593Smuzhiyun 				ndev->name, frag_len, frag_len);
302*4882a593Smuzhiyun 			frag_len = MGMT_FRAME_SIZE;
303*4882a593Smuzhiyun 		}
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 		/* Ensure the results of device DMA are visible to the CPU. */
306*4882a593Smuzhiyun 		dma_sync_single_for_cpu(&priv->pdev->dev, buf->pci_addr,
307*4882a593Smuzhiyun 					buf->size, DMA_FROM_DEVICE);
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 		/* Perform endianess conversion for PIMFOR header in-place. */
310*4882a593Smuzhiyun 		header = pimfor_decode_header(buf->mem, frag_len);
311*4882a593Smuzhiyun 		if (!header) {
312*4882a593Smuzhiyun 			printk(KERN_WARNING "%s: no PIMFOR header found\n",
313*4882a593Smuzhiyun 			       ndev->name);
314*4882a593Smuzhiyun 			continue;
315*4882a593Smuzhiyun 		}
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 		/* The device ID from the PIMFOR packet received from
318*4882a593Smuzhiyun 		 * the MVC is always 0.  We forward a sensible device_id.
319*4882a593Smuzhiyun 		 * Not that anyone upstream would care... */
320*4882a593Smuzhiyun 		header->device_id = priv->ndev->ifindex;
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun #if VERBOSE > SHOW_ERROR_MESSAGES
323*4882a593Smuzhiyun 		DEBUG(SHOW_PIMFOR_FRAMES,
324*4882a593Smuzhiyun 		      "PIMFOR: op %i, oid 0x%08x, device %i, flags 0x%x length 0x%x\n",
325*4882a593Smuzhiyun 		      header->operation, header->oid, header->device_id,
326*4882a593Smuzhiyun 		      header->flags, header->length);
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 		/* display the buffer contents for debugging */
329*4882a593Smuzhiyun 		display_buffer((char *) header, PIMFOR_HEADER_SIZE);
330*4882a593Smuzhiyun 		display_buffer((char *) header + PIMFOR_HEADER_SIZE,
331*4882a593Smuzhiyun 			       header->length);
332*4882a593Smuzhiyun #endif
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 		/* nobody sends these */
335*4882a593Smuzhiyun 		if (header->flags & PIMFOR_FLAG_APPLIC_ORIGIN) {
336*4882a593Smuzhiyun 			printk(KERN_DEBUG
337*4882a593Smuzhiyun 			       "%s: errant PIMFOR application frame\n",
338*4882a593Smuzhiyun 			       ndev->name);
339*4882a593Smuzhiyun 			continue;
340*4882a593Smuzhiyun 		}
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 		/* Determine frame size, skipping OID_INL_TUNNEL headers. */
343*4882a593Smuzhiyun 		size = PIMFOR_HEADER_SIZE + header->length;
344*4882a593Smuzhiyun 		frame = kmalloc(sizeof(struct islpci_mgmtframe) + size,
345*4882a593Smuzhiyun 				GFP_ATOMIC);
346*4882a593Smuzhiyun 		if (!frame)
347*4882a593Smuzhiyun 			continue;
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 		frame->ndev = ndev;
350*4882a593Smuzhiyun 		memcpy(&frame->buf, header, size);
351*4882a593Smuzhiyun 		frame->header = (pimfor_header_t *) frame->buf;
352*4882a593Smuzhiyun 		frame->data = frame->buf + PIMFOR_HEADER_SIZE;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun #if VERBOSE > SHOW_ERROR_MESSAGES
355*4882a593Smuzhiyun 		DEBUG(SHOW_PIMFOR_FRAMES,
356*4882a593Smuzhiyun 		      "frame: header: %p, data: %p, size: %d\n",
357*4882a593Smuzhiyun 		      frame->header, frame->data, size);
358*4882a593Smuzhiyun #endif
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 		if (header->operation == PIMFOR_OP_TRAP) {
361*4882a593Smuzhiyun #if VERBOSE > SHOW_ERROR_MESSAGES
362*4882a593Smuzhiyun 			printk(KERN_DEBUG
363*4882a593Smuzhiyun 			       "TRAP: oid 0x%x, device %i, flags 0x%x length %i\n",
364*4882a593Smuzhiyun 			       header->oid, header->device_id, header->flags,
365*4882a593Smuzhiyun 			       header->length);
366*4882a593Smuzhiyun #endif
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 			/* Create work to handle trap out of interrupt
369*4882a593Smuzhiyun 			 * context. */
370*4882a593Smuzhiyun 			INIT_WORK(&frame->ws, prism54_process_trap);
371*4882a593Smuzhiyun 			schedule_work(&frame->ws);
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 		} else {
374*4882a593Smuzhiyun 			/* Signal the one waiting process that a response
375*4882a593Smuzhiyun 			 * has been received. */
376*4882a593Smuzhiyun 			if ((frame = xchg(&priv->mgmt_received, frame)) != NULL) {
377*4882a593Smuzhiyun 				printk(KERN_WARNING
378*4882a593Smuzhiyun 				       "%s: mgmt response not collected\n",
379*4882a593Smuzhiyun 				       ndev->name);
380*4882a593Smuzhiyun 				kfree(frame);
381*4882a593Smuzhiyun 			}
382*4882a593Smuzhiyun #if VERBOSE > SHOW_ERROR_MESSAGES
383*4882a593Smuzhiyun 			DEBUG(SHOW_TRACING, "Wake up Mgmt Queue\n");
384*4882a593Smuzhiyun #endif
385*4882a593Smuzhiyun 			wake_up(&priv->mgmt_wqueue);
386*4882a593Smuzhiyun 		}
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	}
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	return 0;
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun /*
394*4882a593Smuzhiyun  * Cleanup the transmit queue by freeing all frames handled by the device.
395*4882a593Smuzhiyun  */
396*4882a593Smuzhiyun void
islpci_mgt_cleanup_transmit(struct net_device * ndev)397*4882a593Smuzhiyun islpci_mgt_cleanup_transmit(struct net_device *ndev)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun 	islpci_private *priv = netdev_priv(ndev);
400*4882a593Smuzhiyun 	isl38xx_control_block *cb =	/* volatile not needed */
401*4882a593Smuzhiyun 	    (isl38xx_control_block *) priv->control_block;
402*4882a593Smuzhiyun 	u32 curr_frag;
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun #if VERBOSE > SHOW_ERROR_MESSAGES
405*4882a593Smuzhiyun 	DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_cleanup_transmit\n");
406*4882a593Smuzhiyun #endif
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	/* Only once per cleanup, determine fragment range to
409*4882a593Smuzhiyun 	 * process.  This avoids an endless loop (i.e. lockup) if
410*4882a593Smuzhiyun 	 * the device became confused, incrementing device_curr_frag
411*4882a593Smuzhiyun 	 * rapidly. */
412*4882a593Smuzhiyun 	curr_frag = le32_to_cpu(cb->device_curr_frag[ISL38XX_CB_TX_MGMTQ]);
413*4882a593Smuzhiyun 	barrier();
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	for (; priv->index_mgmt_tx < curr_frag; priv->index_mgmt_tx++) {
416*4882a593Smuzhiyun 		int index = priv->index_mgmt_tx % ISL38XX_CB_MGMT_QSIZE;
417*4882a593Smuzhiyun 		struct islpci_membuf *buf = &priv->mgmt_tx[index];
418*4882a593Smuzhiyun 		dma_unmap_single(&priv->pdev->dev, buf->pci_addr, buf->size,
419*4882a593Smuzhiyun 				 DMA_TO_DEVICE);
420*4882a593Smuzhiyun 		buf->pci_addr = 0;
421*4882a593Smuzhiyun 		kfree(buf->mem);
422*4882a593Smuzhiyun 		buf->mem = NULL;
423*4882a593Smuzhiyun 		buf->size = 0;
424*4882a593Smuzhiyun 	}
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun /*
428*4882a593Smuzhiyun  * Perform one request-response transaction to the device.
429*4882a593Smuzhiyun  */
430*4882a593Smuzhiyun int
islpci_mgt_transaction(struct net_device * ndev,int operation,unsigned long oid,void * senddata,int sendlen,struct islpci_mgmtframe ** recvframe)431*4882a593Smuzhiyun islpci_mgt_transaction(struct net_device *ndev,
432*4882a593Smuzhiyun 		       int operation, unsigned long oid,
433*4882a593Smuzhiyun 		       void *senddata, int sendlen,
434*4882a593Smuzhiyun 		       struct islpci_mgmtframe **recvframe)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun 	islpci_private *priv = netdev_priv(ndev);
437*4882a593Smuzhiyun 	const long wait_cycle_jiffies = msecs_to_jiffies(ISL38XX_WAIT_CYCLE * 10);
438*4882a593Smuzhiyun 	long timeout_left = ISL38XX_MAX_WAIT_CYCLES * wait_cycle_jiffies;
439*4882a593Smuzhiyun 	int err;
440*4882a593Smuzhiyun 	DEFINE_WAIT(wait);
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	*recvframe = NULL;
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	if (mutex_lock_interruptible(&priv->mgmt_lock))
445*4882a593Smuzhiyun 		return -ERESTARTSYS;
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	prepare_to_wait(&priv->mgmt_wqueue, &wait, TASK_UNINTERRUPTIBLE);
448*4882a593Smuzhiyun 	err = islpci_mgt_transmit(ndev, operation, oid, senddata, sendlen);
449*4882a593Smuzhiyun 	if (err)
450*4882a593Smuzhiyun 		goto out;
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	err = -ETIMEDOUT;
453*4882a593Smuzhiyun 	while (timeout_left > 0) {
454*4882a593Smuzhiyun 		int timeleft;
455*4882a593Smuzhiyun 		struct islpci_mgmtframe *frame;
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 		timeleft = schedule_timeout_uninterruptible(wait_cycle_jiffies);
458*4882a593Smuzhiyun 		frame = xchg(&priv->mgmt_received, NULL);
459*4882a593Smuzhiyun 		if (frame) {
460*4882a593Smuzhiyun 			if (frame->header->oid == oid) {
461*4882a593Smuzhiyun 				*recvframe = frame;
462*4882a593Smuzhiyun 				err = 0;
463*4882a593Smuzhiyun 				goto out;
464*4882a593Smuzhiyun 			} else {
465*4882a593Smuzhiyun 				printk(KERN_DEBUG
466*4882a593Smuzhiyun 				       "%s: expecting oid 0x%x, received 0x%x.\n",
467*4882a593Smuzhiyun 				       ndev->name, (unsigned int) oid,
468*4882a593Smuzhiyun 				       frame->header->oid);
469*4882a593Smuzhiyun 				kfree(frame);
470*4882a593Smuzhiyun 				frame = NULL;
471*4882a593Smuzhiyun 			}
472*4882a593Smuzhiyun 		}
473*4882a593Smuzhiyun 		if (timeleft == 0) {
474*4882a593Smuzhiyun 			printk(KERN_DEBUG
475*4882a593Smuzhiyun 				"%s: timeout waiting for mgmt response %lu, "
476*4882a593Smuzhiyun 				"triggering device\n",
477*4882a593Smuzhiyun 				ndev->name, timeout_left);
478*4882a593Smuzhiyun 			islpci_trigger(priv);
479*4882a593Smuzhiyun 		}
480*4882a593Smuzhiyun 		timeout_left += timeleft - wait_cycle_jiffies;
481*4882a593Smuzhiyun 	}
482*4882a593Smuzhiyun 	printk(KERN_WARNING "%s: timeout waiting for mgmt response\n",
483*4882a593Smuzhiyun 	       ndev->name);
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	/* TODO: we should reset the device here */
486*4882a593Smuzhiyun  out:
487*4882a593Smuzhiyun 	finish_wait(&priv->mgmt_wqueue, &wait);
488*4882a593Smuzhiyun 	mutex_unlock(&priv->mgmt_lock);
489*4882a593Smuzhiyun 	return err;
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun 
492