xref: /OK3568_Linux_fs/kernel/drivers/net/fddi/defxx.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * File Name:
3*4882a593Smuzhiyun  *   defxx.c
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright Information:
6*4882a593Smuzhiyun  *   Copyright Digital Equipment Corporation 1996.
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  *   This software may be used and distributed according to the terms of
9*4882a593Smuzhiyun  *   the GNU General Public License, incorporated herein by reference.
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * Abstract:
12*4882a593Smuzhiyun  *   A Linux device driver supporting the Digital Equipment Corporation
13*4882a593Smuzhiyun  *   FDDI TURBOchannel, EISA and PCI controller families.  Supported
14*4882a593Smuzhiyun  *   adapters include:
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  *		DEC FDDIcontroller/TURBOchannel (DEFTA)
17*4882a593Smuzhiyun  *		DEC FDDIcontroller/EISA         (DEFEA)
18*4882a593Smuzhiyun  *		DEC FDDIcontroller/PCI          (DEFPA)
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  * The original author:
21*4882a593Smuzhiyun  *   LVS	Lawrence V. Stefani <lstefani@yahoo.com>
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  * Maintainers:
24*4882a593Smuzhiyun  *   macro	Maciej W. Rozycki <macro@linux-mips.org>
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  * Credits:
27*4882a593Smuzhiyun  *   I'd like to thank Patricia Cross for helping me get started with
28*4882a593Smuzhiyun  *   Linux, David Davies for a lot of help upgrading and configuring
29*4882a593Smuzhiyun  *   my development system and for answering many OS and driver
30*4882a593Smuzhiyun  *   development questions, and Alan Cox for recommendations and
31*4882a593Smuzhiyun  *   integration help on getting FDDI support into Linux.  LVS
32*4882a593Smuzhiyun  *
33*4882a593Smuzhiyun  * Driver Architecture:
34*4882a593Smuzhiyun  *   The driver architecture is largely based on previous driver work
35*4882a593Smuzhiyun  *   for other operating systems.  The upper edge interface and
36*4882a593Smuzhiyun  *   functions were largely taken from existing Linux device drivers
37*4882a593Smuzhiyun  *   such as David Davies' DE4X5.C driver and Donald Becker's TULIP.C
38*4882a593Smuzhiyun  *   driver.
39*4882a593Smuzhiyun  *
40*4882a593Smuzhiyun  *   Adapter Probe -
41*4882a593Smuzhiyun  *		The driver scans for supported EISA adapters by reading the
42*4882a593Smuzhiyun  *		SLOT ID register for each EISA slot and making a match
43*4882a593Smuzhiyun  *		against the expected value.
44*4882a593Smuzhiyun  *
45*4882a593Smuzhiyun  *   Bus-Specific Initialization -
46*4882a593Smuzhiyun  *		This driver currently supports both EISA and PCI controller
47*4882a593Smuzhiyun  *		families.  While the custom DMA chip and FDDI logic is similar
48*4882a593Smuzhiyun  *		or identical, the bus logic is very different.  After
49*4882a593Smuzhiyun  *		initialization, the	only bus-specific differences is in how the
50*4882a593Smuzhiyun  *		driver enables and disables interrupts.  Other than that, the
51*4882a593Smuzhiyun  *		run-time critical code behaves the same on both families.
52*4882a593Smuzhiyun  *		It's important to note that both adapter families are configured
53*4882a593Smuzhiyun  *		to I/O map, rather than memory map, the adapter registers.
54*4882a593Smuzhiyun  *
55*4882a593Smuzhiyun  *   Driver Open/Close -
56*4882a593Smuzhiyun  *		In the driver open routine, the driver ISR (interrupt service
57*4882a593Smuzhiyun  *		routine) is registered and the adapter is brought to an
58*4882a593Smuzhiyun  *		operational state.  In the driver close routine, the opposite
59*4882a593Smuzhiyun  *		occurs; the driver ISR is deregistered and the adapter is
60*4882a593Smuzhiyun  *		brought to a safe, but closed state.  Users may use consecutive
61*4882a593Smuzhiyun  *		commands to bring the adapter up and down as in the following
62*4882a593Smuzhiyun  *		example:
63*4882a593Smuzhiyun  *					ifconfig fddi0 up
64*4882a593Smuzhiyun  *					ifconfig fddi0 down
65*4882a593Smuzhiyun  *					ifconfig fddi0 up
66*4882a593Smuzhiyun  *
67*4882a593Smuzhiyun  *   Driver Shutdown -
68*4882a593Smuzhiyun  *		Apparently, there is no shutdown or halt routine support under
69*4882a593Smuzhiyun  *		Linux.  This routine would be called during "reboot" or
70*4882a593Smuzhiyun  *		"shutdown" to allow the driver to place the adapter in a safe
71*4882a593Smuzhiyun  *		state before a warm reboot occurs.  To be really safe, the user
72*4882a593Smuzhiyun  *		should close the adapter before shutdown (eg. ifconfig fddi0 down)
73*4882a593Smuzhiyun  *		to ensure that the adapter DMA engine is taken off-line.  However,
74*4882a593Smuzhiyun  *		the current driver code anticipates this problem and always issues
75*4882a593Smuzhiyun  *		a soft reset of the adapter	at the beginning of driver initialization.
76*4882a593Smuzhiyun  *		A future driver enhancement in this area may occur in 2.1.X where
77*4882a593Smuzhiyun  *		Alan indicated that a shutdown handler may be implemented.
78*4882a593Smuzhiyun  *
79*4882a593Smuzhiyun  *   Interrupt Service Routine -
80*4882a593Smuzhiyun  *		The driver supports shared interrupts, so the ISR is registered for
81*4882a593Smuzhiyun  *		each board with the appropriate flag and the pointer to that board's
82*4882a593Smuzhiyun  *		device structure.  This provides the context during interrupt
83*4882a593Smuzhiyun  *		processing to support shared interrupts and multiple boards.
84*4882a593Smuzhiyun  *
85*4882a593Smuzhiyun  *		Interrupt enabling/disabling can occur at many levels.  At the host
86*4882a593Smuzhiyun  *		end, you can disable system interrupts, or disable interrupts at the
87*4882a593Smuzhiyun  *		PIC (on Intel systems).  Across the bus, both EISA and PCI adapters
88*4882a593Smuzhiyun  *		have a bus-logic chip interrupt enable/disable as well as a DMA
89*4882a593Smuzhiyun  *		controller interrupt enable/disable.
90*4882a593Smuzhiyun  *
91*4882a593Smuzhiyun  *		The driver currently enables and disables adapter interrupts at the
92*4882a593Smuzhiyun  *		bus-logic chip and assumes that Linux will take care of clearing or
93*4882a593Smuzhiyun  *		acknowledging any host-based interrupt chips.
94*4882a593Smuzhiyun  *
95*4882a593Smuzhiyun  *   Control Functions -
96*4882a593Smuzhiyun  *		Control functions are those used to support functions such as adding
97*4882a593Smuzhiyun  *		or deleting multicast addresses, enabling or disabling packet
98*4882a593Smuzhiyun  *		reception filters, or other custom/proprietary commands.  Presently,
99*4882a593Smuzhiyun  *		the driver supports the "get statistics", "set multicast list", and
100*4882a593Smuzhiyun  *		"set mac address" functions defined by Linux.  A list of possible
101*4882a593Smuzhiyun  *		enhancements include:
102*4882a593Smuzhiyun  *
103*4882a593Smuzhiyun  *				- Custom ioctl interface for executing port interface commands
104*4882a593Smuzhiyun  *				- Custom ioctl interface for adding unicast addresses to
105*4882a593Smuzhiyun  *				  adapter CAM (to support bridge functions).
106*4882a593Smuzhiyun  *				- Custom ioctl interface for supporting firmware upgrades.
107*4882a593Smuzhiyun  *
108*4882a593Smuzhiyun  *   Hardware (port interface) Support Routines -
109*4882a593Smuzhiyun  *		The driver function names that start with "dfx_hw_" represent
110*4882a593Smuzhiyun  *		low-level port interface routines that are called frequently.  They
111*4882a593Smuzhiyun  *		include issuing a DMA or port control command to the adapter,
112*4882a593Smuzhiyun  *		resetting the adapter, or reading the adapter state.  Since the
113*4882a593Smuzhiyun  *		driver initialization and run-time code must make calls into the
114*4882a593Smuzhiyun  *		port interface, these routines were written to be as generic and
115*4882a593Smuzhiyun  *		usable as possible.
116*4882a593Smuzhiyun  *
117*4882a593Smuzhiyun  *   Receive Path -
118*4882a593Smuzhiyun  *		The adapter DMA engine supports a 256 entry receive descriptor block
119*4882a593Smuzhiyun  *		of which up to 255 entries can be used at any given time.  The
120*4882a593Smuzhiyun  *		architecture is a standard producer, consumer, completion model in
121*4882a593Smuzhiyun  *		which the driver "produces" receive buffers to the adapter, the
122*4882a593Smuzhiyun  *		adapter "consumes" the receive buffers by DMAing incoming packet data,
123*4882a593Smuzhiyun  *		and the driver "completes" the receive buffers by servicing the
124*4882a593Smuzhiyun  *		incoming packet, then "produces" a new buffer and starts the cycle
125*4882a593Smuzhiyun  *		again.  Receive buffers can be fragmented in up to 16 fragments
126*4882a593Smuzhiyun  *		(descriptor	entries).  For simplicity, this driver posts
127*4882a593Smuzhiyun  *		single-fragment receive buffers of 4608 bytes, then allocates a
128*4882a593Smuzhiyun  *		sk_buff, copies the data, then reposts the buffer.  To reduce CPU
129*4882a593Smuzhiyun  *		utilization, a better approach would be to pass up the receive
130*4882a593Smuzhiyun  *		buffer (no extra copy) then allocate and post a replacement buffer.
131*4882a593Smuzhiyun  *		This is a performance enhancement that should be looked into at
132*4882a593Smuzhiyun  *		some point.
133*4882a593Smuzhiyun  *
134*4882a593Smuzhiyun  *   Transmit Path -
135*4882a593Smuzhiyun  *		Like the receive path, the adapter DMA engine supports a 256 entry
136*4882a593Smuzhiyun  *		transmit descriptor block of which up to 255 entries can be used at
137*4882a593Smuzhiyun  *		any	given time.  Transmit buffers can be fragmented	in up to 255
138*4882a593Smuzhiyun  *		fragments (descriptor entries).  This driver always posts one
139*4882a593Smuzhiyun  *		fragment per transmit packet request.
140*4882a593Smuzhiyun  *
141*4882a593Smuzhiyun  *		The fragment contains the entire packet from FC to end of data.
142*4882a593Smuzhiyun  *		Before posting the buffer to the adapter, the driver sets a three-byte
143*4882a593Smuzhiyun  *		packet request header (PRH) which is required by the Motorola MAC chip
144*4882a593Smuzhiyun  *		used on the adapters.  The PRH tells the MAC the type of token to
145*4882a593Smuzhiyun  *		receive/send, whether or not to generate and append the CRC, whether
146*4882a593Smuzhiyun  *		synchronous or asynchronous framing is used, etc.  Since the PRH
147*4882a593Smuzhiyun  *		definition is not necessarily consistent across all FDDI chipsets,
148*4882a593Smuzhiyun  *		the driver, rather than the common FDDI packet handler routines,
149*4882a593Smuzhiyun  *		sets these bytes.
150*4882a593Smuzhiyun  *
151*4882a593Smuzhiyun  *		To reduce the amount of descriptor fetches needed per transmit request,
152*4882a593Smuzhiyun  *		the driver takes advantage of the fact that there are at least three
153*4882a593Smuzhiyun  *		bytes available before the skb->data field on the outgoing transmit
154*4882a593Smuzhiyun  *		request.  This is guaranteed by having fddi_setup() in net_init.c set
155*4882a593Smuzhiyun  *		dev->hard_header_len to 24 bytes.  21 bytes accounts for the largest
156*4882a593Smuzhiyun  *		header in an 802.2 SNAP frame.  The other 3 bytes are the extra "pad"
157*4882a593Smuzhiyun  *		bytes which we'll use to store the PRH.
158*4882a593Smuzhiyun  *
159*4882a593Smuzhiyun  *		There's a subtle advantage to adding these pad bytes to the
160*4882a593Smuzhiyun  *		hard_header_len, it ensures that the data portion of the packet for
161*4882a593Smuzhiyun  *		an 802.2 SNAP frame is longword aligned.  Other FDDI driver
162*4882a593Smuzhiyun  *		implementations may not need the extra padding and can start copying
163*4882a593Smuzhiyun  *		or DMAing directly from the FC byte which starts at skb->data.  Should
164*4882a593Smuzhiyun  *		another driver implementation need ADDITIONAL padding, the net_init.c
165*4882a593Smuzhiyun  *		module should be updated and dev->hard_header_len should be increased.
166*4882a593Smuzhiyun  *		NOTE: To maintain the alignment on the data portion of the packet,
167*4882a593Smuzhiyun  *		dev->hard_header_len should always be evenly divisible by 4 and at
168*4882a593Smuzhiyun  *		least 24 bytes in size.
169*4882a593Smuzhiyun  *
170*4882a593Smuzhiyun  * Modification History:
171*4882a593Smuzhiyun  *		Date		Name	Description
172*4882a593Smuzhiyun  *		16-Aug-96	LVS		Created.
173*4882a593Smuzhiyun  *		20-Aug-96	LVS		Updated dfx_probe so that version information
174*4882a593Smuzhiyun  *							string is only displayed if 1 or more cards are
175*4882a593Smuzhiyun  *							found.  Changed dfx_rcv_queue_process to copy
176*4882a593Smuzhiyun  *							3 NULL bytes before FC to ensure that data is
177*4882a593Smuzhiyun  *							longword aligned in receive buffer.
178*4882a593Smuzhiyun  *		09-Sep-96	LVS		Updated dfx_ctl_set_multicast_list to enable
179*4882a593Smuzhiyun  *							LLC group promiscuous mode if multicast list
180*4882a593Smuzhiyun  *							is too large.  LLC individual/group promiscuous
181*4882a593Smuzhiyun  *							mode is now disabled if IFF_PROMISC flag not set.
182*4882a593Smuzhiyun  *							dfx_xmt_queue_pkt no longer checks for NULL skb
183*4882a593Smuzhiyun  *							on Alan Cox recommendation.  Added node address
184*4882a593Smuzhiyun  *							override support.
185*4882a593Smuzhiyun  *		12-Sep-96	LVS		Reset current address to factory address during
186*4882a593Smuzhiyun  *							device open.  Updated transmit path to post a
187*4882a593Smuzhiyun  *							single fragment which includes PRH->end of data.
188*4882a593Smuzhiyun  *		Mar 2000	AC		Did various cleanups for 2.3.x
189*4882a593Smuzhiyun  *		Jun 2000	jgarzik		PCI and resource alloc cleanups
190*4882a593Smuzhiyun  *		Jul 2000	tjeerd		Much cleanup and some bug fixes
191*4882a593Smuzhiyun  *		Sep 2000	tjeerd		Fix leak on unload, cosmetic code cleanup
192*4882a593Smuzhiyun  *		Feb 2001			Skb allocation fixes
193*4882a593Smuzhiyun  *		Feb 2001	davej		PCI enable cleanups.
194*4882a593Smuzhiyun  *		04 Aug 2003	macro		Converted to the DMA API.
195*4882a593Smuzhiyun  *		14 Aug 2004	macro		Fix device names reported.
196*4882a593Smuzhiyun  *		14 Jun 2005	macro		Use irqreturn_t.
197*4882a593Smuzhiyun  *		23 Oct 2006	macro		Big-endian host support.
198*4882a593Smuzhiyun  *		14 Dec 2006	macro		TURBOchannel support.
199*4882a593Smuzhiyun  *		01 Jul 2014	macro		Fixes for DMA on 64-bit hosts.
200*4882a593Smuzhiyun  */
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun /* Include files */
203*4882a593Smuzhiyun #include <linux/bitops.h>
204*4882a593Smuzhiyun #include <linux/compiler.h>
205*4882a593Smuzhiyun #include <linux/delay.h>
206*4882a593Smuzhiyun #include <linux/dma-mapping.h>
207*4882a593Smuzhiyun #include <linux/eisa.h>
208*4882a593Smuzhiyun #include <linux/errno.h>
209*4882a593Smuzhiyun #include <linux/fddidevice.h>
210*4882a593Smuzhiyun #include <linux/interrupt.h>
211*4882a593Smuzhiyun #include <linux/ioport.h>
212*4882a593Smuzhiyun #include <linux/kernel.h>
213*4882a593Smuzhiyun #include <linux/module.h>
214*4882a593Smuzhiyun #include <linux/netdevice.h>
215*4882a593Smuzhiyun #include <linux/pci.h>
216*4882a593Smuzhiyun #include <linux/skbuff.h>
217*4882a593Smuzhiyun #include <linux/slab.h>
218*4882a593Smuzhiyun #include <linux/string.h>
219*4882a593Smuzhiyun #include <linux/tc.h>
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun #include <asm/byteorder.h>
222*4882a593Smuzhiyun #include <asm/io.h>
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun #include "defxx.h"
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun /* Version information string should be updated prior to each new release!  */
227*4882a593Smuzhiyun #define DRV_NAME "defxx"
228*4882a593Smuzhiyun #define DRV_VERSION "v1.11"
229*4882a593Smuzhiyun #define DRV_RELDATE "2014/07/01"
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun static const char version[] =
232*4882a593Smuzhiyun 	DRV_NAME ": " DRV_VERSION " " DRV_RELDATE
233*4882a593Smuzhiyun 	"  Lawrence V. Stefani and others\n";
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun #define DYNAMIC_BUFFERS 1
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun #define SKBUFF_RX_COPYBREAK 200
238*4882a593Smuzhiyun /*
239*4882a593Smuzhiyun  * NEW_SKB_SIZE = PI_RCV_DATA_K_SIZE_MAX+128 to allow 128 byte
240*4882a593Smuzhiyun  * alignment for compatibility with old EISA boards.
241*4882a593Smuzhiyun  */
242*4882a593Smuzhiyun #define NEW_SKB_SIZE (PI_RCV_DATA_K_SIZE_MAX+128)
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun #ifdef CONFIG_EISA
245*4882a593Smuzhiyun #define DFX_BUS_EISA(dev) (dev->bus == &eisa_bus_type)
246*4882a593Smuzhiyun #else
247*4882a593Smuzhiyun #define DFX_BUS_EISA(dev) 0
248*4882a593Smuzhiyun #endif
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun #ifdef CONFIG_TC
251*4882a593Smuzhiyun #define DFX_BUS_TC(dev) (dev->bus == &tc_bus_type)
252*4882a593Smuzhiyun #else
253*4882a593Smuzhiyun #define DFX_BUS_TC(dev) 0
254*4882a593Smuzhiyun #endif
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun #ifdef CONFIG_DEFXX_MMIO
257*4882a593Smuzhiyun #define DFX_MMIO 1
258*4882a593Smuzhiyun #else
259*4882a593Smuzhiyun #define DFX_MMIO 0
260*4882a593Smuzhiyun #endif
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun /* Define module-wide (static) routines */
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun static void		dfx_bus_init(struct net_device *dev);
265*4882a593Smuzhiyun static void		dfx_bus_uninit(struct net_device *dev);
266*4882a593Smuzhiyun static void		dfx_bus_config_check(DFX_board_t *bp);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun static int		dfx_driver_init(struct net_device *dev,
269*4882a593Smuzhiyun 					const char *print_name,
270*4882a593Smuzhiyun 					resource_size_t bar_start);
271*4882a593Smuzhiyun static int		dfx_adap_init(DFX_board_t *bp, int get_buffers);
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun static int		dfx_open(struct net_device *dev);
274*4882a593Smuzhiyun static int		dfx_close(struct net_device *dev);
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun static void		dfx_int_pr_halt_id(DFX_board_t *bp);
277*4882a593Smuzhiyun static void		dfx_int_type_0_process(DFX_board_t *bp);
278*4882a593Smuzhiyun static void		dfx_int_common(struct net_device *dev);
279*4882a593Smuzhiyun static irqreturn_t	dfx_interrupt(int irq, void *dev_id);
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun static struct		net_device_stats *dfx_ctl_get_stats(struct net_device *dev);
282*4882a593Smuzhiyun static void		dfx_ctl_set_multicast_list(struct net_device *dev);
283*4882a593Smuzhiyun static int		dfx_ctl_set_mac_address(struct net_device *dev, void *addr);
284*4882a593Smuzhiyun static int		dfx_ctl_update_cam(DFX_board_t *bp);
285*4882a593Smuzhiyun static int		dfx_ctl_update_filters(DFX_board_t *bp);
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun static int		dfx_hw_dma_cmd_req(DFX_board_t *bp);
288*4882a593Smuzhiyun static int		dfx_hw_port_ctrl_req(DFX_board_t *bp, PI_UINT32	command, PI_UINT32 data_a, PI_UINT32 data_b, PI_UINT32 *host_data);
289*4882a593Smuzhiyun static void		dfx_hw_adap_reset(DFX_board_t *bp, PI_UINT32 type);
290*4882a593Smuzhiyun static int		dfx_hw_adap_state_rd(DFX_board_t *bp);
291*4882a593Smuzhiyun static int		dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type);
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun static int		dfx_rcv_init(DFX_board_t *bp, int get_buffers);
294*4882a593Smuzhiyun static void		dfx_rcv_queue_process(DFX_board_t *bp);
295*4882a593Smuzhiyun #ifdef DYNAMIC_BUFFERS
296*4882a593Smuzhiyun static void		dfx_rcv_flush(DFX_board_t *bp);
297*4882a593Smuzhiyun #else
dfx_rcv_flush(DFX_board_t * bp)298*4882a593Smuzhiyun static inline void	dfx_rcv_flush(DFX_board_t *bp) {}
299*4882a593Smuzhiyun #endif
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
302*4882a593Smuzhiyun 				     struct net_device *dev);
303*4882a593Smuzhiyun static int		dfx_xmt_done(DFX_board_t *bp);
304*4882a593Smuzhiyun static void		dfx_xmt_flush(DFX_board_t *bp);
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun /* Define module-wide (static) variables */
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun static struct pci_driver dfx_pci_driver;
309*4882a593Smuzhiyun static struct eisa_driver dfx_eisa_driver;
310*4882a593Smuzhiyun static struct tc_driver dfx_tc_driver;
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun /*
314*4882a593Smuzhiyun  * =======================
315*4882a593Smuzhiyun  * = dfx_port_write_long =
316*4882a593Smuzhiyun  * = dfx_port_read_long  =
317*4882a593Smuzhiyun  * =======================
318*4882a593Smuzhiyun  *
319*4882a593Smuzhiyun  * Overview:
320*4882a593Smuzhiyun  *   Routines for reading and writing values from/to adapter
321*4882a593Smuzhiyun  *
322*4882a593Smuzhiyun  * Returns:
323*4882a593Smuzhiyun  *   None
324*4882a593Smuzhiyun  *
325*4882a593Smuzhiyun  * Arguments:
326*4882a593Smuzhiyun  *   bp		- pointer to board information
327*4882a593Smuzhiyun  *   offset	- register offset from base I/O address
328*4882a593Smuzhiyun  *   data	- for dfx_port_write_long, this is a value to write;
329*4882a593Smuzhiyun  *		  for dfx_port_read_long, this is a pointer to store
330*4882a593Smuzhiyun  *		  the read value
331*4882a593Smuzhiyun  *
332*4882a593Smuzhiyun  * Functional Description:
333*4882a593Smuzhiyun  *   These routines perform the correct operation to read or write
334*4882a593Smuzhiyun  *   the adapter register.
335*4882a593Smuzhiyun  *
336*4882a593Smuzhiyun  *   EISA port block base addresses are based on the slot number in which the
337*4882a593Smuzhiyun  *   controller is installed.  For example, if the EISA controller is installed
338*4882a593Smuzhiyun  *   in slot 4, the port block base address is 0x4000.  If the controller is
339*4882a593Smuzhiyun  *   installed in slot 2, the port block base address is 0x2000, and so on.
340*4882a593Smuzhiyun  *   This port block can be used to access PDQ, ESIC, and DEFEA on-board
341*4882a593Smuzhiyun  *   registers using the register offsets defined in DEFXX.H.
342*4882a593Smuzhiyun  *
343*4882a593Smuzhiyun  *   PCI port block base addresses are assigned by the PCI BIOS or system
344*4882a593Smuzhiyun  *   firmware.  There is one 128 byte port block which can be accessed.  It
345*4882a593Smuzhiyun  *   allows for I/O mapping of both PDQ and PFI registers using the register
346*4882a593Smuzhiyun  *   offsets defined in DEFXX.H.
347*4882a593Smuzhiyun  *
348*4882a593Smuzhiyun  * Return Codes:
349*4882a593Smuzhiyun  *   None
350*4882a593Smuzhiyun  *
351*4882a593Smuzhiyun  * Assumptions:
352*4882a593Smuzhiyun  *   bp->base is a valid base I/O address for this adapter.
353*4882a593Smuzhiyun  *   offset is a valid register offset for this adapter.
354*4882a593Smuzhiyun  *
355*4882a593Smuzhiyun  * Side Effects:
356*4882a593Smuzhiyun  *   Rather than produce macros for these functions, these routines
357*4882a593Smuzhiyun  *   are defined using "inline" to ensure that the compiler will
358*4882a593Smuzhiyun  *   generate inline code and not waste a procedure call and return.
359*4882a593Smuzhiyun  *   This provides all the benefits of macros, but with the
360*4882a593Smuzhiyun  *   advantage of strict data type checking.
361*4882a593Smuzhiyun  */
362*4882a593Smuzhiyun 
dfx_writel(DFX_board_t * bp,int offset,u32 data)363*4882a593Smuzhiyun static inline void dfx_writel(DFX_board_t *bp, int offset, u32 data)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun 	writel(data, bp->base.mem + offset);
366*4882a593Smuzhiyun 	mb();
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun 
dfx_outl(DFX_board_t * bp,int offset,u32 data)369*4882a593Smuzhiyun static inline void dfx_outl(DFX_board_t *bp, int offset, u32 data)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun 	outl(data, bp->base.port + offset);
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun 
dfx_port_write_long(DFX_board_t * bp,int offset,u32 data)374*4882a593Smuzhiyun static void dfx_port_write_long(DFX_board_t *bp, int offset, u32 data)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun 	struct device __maybe_unused *bdev = bp->bus_dev;
377*4882a593Smuzhiyun 	int dfx_bus_tc = DFX_BUS_TC(bdev);
378*4882a593Smuzhiyun 	int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	if (dfx_use_mmio)
381*4882a593Smuzhiyun 		dfx_writel(bp, offset, data);
382*4882a593Smuzhiyun 	else
383*4882a593Smuzhiyun 		dfx_outl(bp, offset, data);
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 
dfx_readl(DFX_board_t * bp,int offset,u32 * data)387*4882a593Smuzhiyun static inline void dfx_readl(DFX_board_t *bp, int offset, u32 *data)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun 	mb();
390*4882a593Smuzhiyun 	*data = readl(bp->base.mem + offset);
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun 
dfx_inl(DFX_board_t * bp,int offset,u32 * data)393*4882a593Smuzhiyun static inline void dfx_inl(DFX_board_t *bp, int offset, u32 *data)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun 	*data = inl(bp->base.port + offset);
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun 
dfx_port_read_long(DFX_board_t * bp,int offset,u32 * data)398*4882a593Smuzhiyun static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data)
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun 	struct device __maybe_unused *bdev = bp->bus_dev;
401*4882a593Smuzhiyun 	int dfx_bus_tc = DFX_BUS_TC(bdev);
402*4882a593Smuzhiyun 	int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	if (dfx_use_mmio)
405*4882a593Smuzhiyun 		dfx_readl(bp, offset, data);
406*4882a593Smuzhiyun 	else
407*4882a593Smuzhiyun 		dfx_inl(bp, offset, data);
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun /*
412*4882a593Smuzhiyun  * ================
413*4882a593Smuzhiyun  * = dfx_get_bars =
414*4882a593Smuzhiyun  * ================
415*4882a593Smuzhiyun  *
416*4882a593Smuzhiyun  * Overview:
417*4882a593Smuzhiyun  *   Retrieves the address ranges used to access control and status
418*4882a593Smuzhiyun  *   registers.
419*4882a593Smuzhiyun  *
420*4882a593Smuzhiyun  * Returns:
421*4882a593Smuzhiyun  *   None
422*4882a593Smuzhiyun  *
423*4882a593Smuzhiyun  * Arguments:
424*4882a593Smuzhiyun  *   bdev	- pointer to device information
425*4882a593Smuzhiyun  *   bar_start	- pointer to store the start addresses
426*4882a593Smuzhiyun  *   bar_len	- pointer to store the lengths of the areas
427*4882a593Smuzhiyun  *
428*4882a593Smuzhiyun  * Assumptions:
429*4882a593Smuzhiyun  *   I am sure there are some.
430*4882a593Smuzhiyun  *
431*4882a593Smuzhiyun  * Side Effects:
432*4882a593Smuzhiyun  *   None
433*4882a593Smuzhiyun  */
dfx_get_bars(struct device * bdev,resource_size_t * bar_start,resource_size_t * bar_len)434*4882a593Smuzhiyun static void dfx_get_bars(struct device *bdev,
435*4882a593Smuzhiyun 			 resource_size_t *bar_start, resource_size_t *bar_len)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun 	int dfx_bus_pci = dev_is_pci(bdev);
438*4882a593Smuzhiyun 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
439*4882a593Smuzhiyun 	int dfx_bus_tc = DFX_BUS_TC(bdev);
440*4882a593Smuzhiyun 	int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	if (dfx_bus_pci) {
443*4882a593Smuzhiyun 		int num = dfx_use_mmio ? 0 : 1;
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 		bar_start[0] = pci_resource_start(to_pci_dev(bdev), num);
446*4882a593Smuzhiyun 		bar_len[0] = pci_resource_len(to_pci_dev(bdev), num);
447*4882a593Smuzhiyun 		bar_start[2] = bar_start[1] = 0;
448*4882a593Smuzhiyun 		bar_len[2] = bar_len[1] = 0;
449*4882a593Smuzhiyun 	}
450*4882a593Smuzhiyun 	if (dfx_bus_eisa) {
451*4882a593Smuzhiyun 		unsigned long base_addr = to_eisa_device(bdev)->base_addr;
452*4882a593Smuzhiyun 		resource_size_t bar_lo;
453*4882a593Smuzhiyun 		resource_size_t bar_hi;
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 		if (dfx_use_mmio) {
456*4882a593Smuzhiyun 			bar_lo = inb(base_addr + PI_ESIC_K_MEM_ADD_LO_CMP_2);
457*4882a593Smuzhiyun 			bar_lo <<= 8;
458*4882a593Smuzhiyun 			bar_lo |= inb(base_addr + PI_ESIC_K_MEM_ADD_LO_CMP_1);
459*4882a593Smuzhiyun 			bar_lo <<= 8;
460*4882a593Smuzhiyun 			bar_lo |= inb(base_addr + PI_ESIC_K_MEM_ADD_LO_CMP_0);
461*4882a593Smuzhiyun 			bar_lo <<= 8;
462*4882a593Smuzhiyun 			bar_start[0] = bar_lo;
463*4882a593Smuzhiyun 			bar_hi = inb(base_addr + PI_ESIC_K_MEM_ADD_HI_CMP_2);
464*4882a593Smuzhiyun 			bar_hi <<= 8;
465*4882a593Smuzhiyun 			bar_hi |= inb(base_addr + PI_ESIC_K_MEM_ADD_HI_CMP_1);
466*4882a593Smuzhiyun 			bar_hi <<= 8;
467*4882a593Smuzhiyun 			bar_hi |= inb(base_addr + PI_ESIC_K_MEM_ADD_HI_CMP_0);
468*4882a593Smuzhiyun 			bar_hi <<= 8;
469*4882a593Smuzhiyun 			bar_len[0] = ((bar_hi - bar_lo) | PI_MEM_ADD_MASK_M) +
470*4882a593Smuzhiyun 				     1;
471*4882a593Smuzhiyun 		} else {
472*4882a593Smuzhiyun 			bar_start[0] = base_addr;
473*4882a593Smuzhiyun 			bar_len[0] = PI_ESIC_K_CSR_IO_LEN;
474*4882a593Smuzhiyun 		}
475*4882a593Smuzhiyun 		bar_start[1] = base_addr + PI_DEFEA_K_BURST_HOLDOFF;
476*4882a593Smuzhiyun 		bar_len[1] = PI_ESIC_K_BURST_HOLDOFF_LEN;
477*4882a593Smuzhiyun 		bar_start[2] = base_addr + PI_ESIC_K_ESIC_CSR;
478*4882a593Smuzhiyun 		bar_len[2] = PI_ESIC_K_ESIC_CSR_LEN;
479*4882a593Smuzhiyun 	}
480*4882a593Smuzhiyun 	if (dfx_bus_tc) {
481*4882a593Smuzhiyun 		bar_start[0] = to_tc_dev(bdev)->resource.start +
482*4882a593Smuzhiyun 			       PI_TC_K_CSR_OFFSET;
483*4882a593Smuzhiyun 		bar_len[0] = PI_TC_K_CSR_LEN;
484*4882a593Smuzhiyun 		bar_start[2] = bar_start[1] = 0;
485*4882a593Smuzhiyun 		bar_len[2] = bar_len[1] = 0;
486*4882a593Smuzhiyun 	}
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun static const struct net_device_ops dfx_netdev_ops = {
490*4882a593Smuzhiyun 	.ndo_open		= dfx_open,
491*4882a593Smuzhiyun 	.ndo_stop		= dfx_close,
492*4882a593Smuzhiyun 	.ndo_start_xmit		= dfx_xmt_queue_pkt,
493*4882a593Smuzhiyun 	.ndo_get_stats		= dfx_ctl_get_stats,
494*4882a593Smuzhiyun 	.ndo_set_rx_mode	= dfx_ctl_set_multicast_list,
495*4882a593Smuzhiyun 	.ndo_set_mac_address	= dfx_ctl_set_mac_address,
496*4882a593Smuzhiyun };
497*4882a593Smuzhiyun 
dfx_register_res_alloc_err(const char * print_name,bool mmio,bool eisa)498*4882a593Smuzhiyun static void dfx_register_res_alloc_err(const char *print_name, bool mmio,
499*4882a593Smuzhiyun 				       bool eisa)
500*4882a593Smuzhiyun {
501*4882a593Smuzhiyun 	pr_err("%s: Cannot use %s, no address set, aborting\n",
502*4882a593Smuzhiyun 	       print_name, mmio ? "MMIO" : "I/O");
503*4882a593Smuzhiyun 	pr_err("%s: Recompile driver with \"CONFIG_DEFXX_MMIO=%c\"\n",
504*4882a593Smuzhiyun 	       print_name, mmio ? 'n' : 'y');
505*4882a593Smuzhiyun 	if (eisa && mmio)
506*4882a593Smuzhiyun 		pr_err("%s: Or run ECU and set adapter's MMIO location\n",
507*4882a593Smuzhiyun 		       print_name);
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun 
dfx_register_res_err(const char * print_name,bool mmio,unsigned long start,unsigned long len)510*4882a593Smuzhiyun static void dfx_register_res_err(const char *print_name, bool mmio,
511*4882a593Smuzhiyun 				 unsigned long start, unsigned long len)
512*4882a593Smuzhiyun {
513*4882a593Smuzhiyun 	pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, aborting\n",
514*4882a593Smuzhiyun 	       print_name, mmio ? "MMIO" : "I/O", len, start);
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun /*
518*4882a593Smuzhiyun  * ================
519*4882a593Smuzhiyun  * = dfx_register =
520*4882a593Smuzhiyun  * ================
521*4882a593Smuzhiyun  *
522*4882a593Smuzhiyun  * Overview:
523*4882a593Smuzhiyun  *   Initializes a supported FDDI controller
524*4882a593Smuzhiyun  *
525*4882a593Smuzhiyun  * Returns:
526*4882a593Smuzhiyun  *   Condition code
527*4882a593Smuzhiyun  *
528*4882a593Smuzhiyun  * Arguments:
529*4882a593Smuzhiyun  *   bdev - pointer to device information
530*4882a593Smuzhiyun  *
531*4882a593Smuzhiyun  * Functional Description:
532*4882a593Smuzhiyun  *
533*4882a593Smuzhiyun  * Return Codes:
534*4882a593Smuzhiyun  *   0		 - This device (fddi0, fddi1, etc) configured successfully
535*4882a593Smuzhiyun  *   -EBUSY      - Failed to get resources, or dfx_driver_init failed.
536*4882a593Smuzhiyun  *
537*4882a593Smuzhiyun  * Assumptions:
538*4882a593Smuzhiyun  *   It compiles so it should work :-( (PCI cards do :-)
539*4882a593Smuzhiyun  *
540*4882a593Smuzhiyun  * Side Effects:
541*4882a593Smuzhiyun  *   Device structures for FDDI adapters (fddi0, fddi1, etc) are
542*4882a593Smuzhiyun  *   initialized and the board resources are read and stored in
543*4882a593Smuzhiyun  *   the device structure.
544*4882a593Smuzhiyun  */
dfx_register(struct device * bdev)545*4882a593Smuzhiyun static int dfx_register(struct device *bdev)
546*4882a593Smuzhiyun {
547*4882a593Smuzhiyun 	static int version_disp;
548*4882a593Smuzhiyun 	int dfx_bus_pci = dev_is_pci(bdev);
549*4882a593Smuzhiyun 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
550*4882a593Smuzhiyun 	int dfx_bus_tc = DFX_BUS_TC(bdev);
551*4882a593Smuzhiyun 	int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
552*4882a593Smuzhiyun 	const char *print_name = dev_name(bdev);
553*4882a593Smuzhiyun 	struct net_device *dev;
554*4882a593Smuzhiyun 	DFX_board_t	  *bp;			/* board pointer */
555*4882a593Smuzhiyun 	resource_size_t bar_start[3] = {0};	/* pointers to ports */
556*4882a593Smuzhiyun 	resource_size_t bar_len[3] = {0};	/* resource length */
557*4882a593Smuzhiyun 	int alloc_size;				/* total buffer size used */
558*4882a593Smuzhiyun 	struct resource *region;
559*4882a593Smuzhiyun 	int err = 0;
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 	if (!version_disp) {	/* display version info if adapter is found */
562*4882a593Smuzhiyun 		version_disp = 1;	/* set display flag to TRUE so that */
563*4882a593Smuzhiyun 		printk(version);	/* we only display this string ONCE */
564*4882a593Smuzhiyun 	}
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	dev = alloc_fddidev(sizeof(*bp));
567*4882a593Smuzhiyun 	if (!dev) {
568*4882a593Smuzhiyun 		printk(KERN_ERR "%s: Unable to allocate fddidev, aborting\n",
569*4882a593Smuzhiyun 		       print_name);
570*4882a593Smuzhiyun 		return -ENOMEM;
571*4882a593Smuzhiyun 	}
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	/* Enable PCI device. */
574*4882a593Smuzhiyun 	if (dfx_bus_pci) {
575*4882a593Smuzhiyun 		err = pci_enable_device(to_pci_dev(bdev));
576*4882a593Smuzhiyun 		if (err) {
577*4882a593Smuzhiyun 			pr_err("%s: Cannot enable PCI device, aborting\n",
578*4882a593Smuzhiyun 			       print_name);
579*4882a593Smuzhiyun 			goto err_out;
580*4882a593Smuzhiyun 		}
581*4882a593Smuzhiyun 	}
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	SET_NETDEV_DEV(dev, bdev);
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 	bp = netdev_priv(dev);
586*4882a593Smuzhiyun 	bp->bus_dev = bdev;
587*4882a593Smuzhiyun 	dev_set_drvdata(bdev, dev);
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 	dfx_get_bars(bdev, bar_start, bar_len);
590*4882a593Smuzhiyun 	if (bar_len[0] == 0 ||
591*4882a593Smuzhiyun 	    (dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0)) {
592*4882a593Smuzhiyun 		dfx_register_res_alloc_err(print_name, dfx_use_mmio,
593*4882a593Smuzhiyun 					   dfx_bus_eisa);
594*4882a593Smuzhiyun 		err = -ENXIO;
595*4882a593Smuzhiyun 		goto err_out_disable;
596*4882a593Smuzhiyun 	}
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	if (dfx_use_mmio)
599*4882a593Smuzhiyun 		region = request_mem_region(bar_start[0], bar_len[0],
600*4882a593Smuzhiyun 					    print_name);
601*4882a593Smuzhiyun 	else
602*4882a593Smuzhiyun 		region = request_region(bar_start[0], bar_len[0], print_name);
603*4882a593Smuzhiyun 	if (!region) {
604*4882a593Smuzhiyun 		dfx_register_res_err(print_name, dfx_use_mmio,
605*4882a593Smuzhiyun 				     bar_start[0], bar_len[0]);
606*4882a593Smuzhiyun 		err = -EBUSY;
607*4882a593Smuzhiyun 		goto err_out_disable;
608*4882a593Smuzhiyun 	}
609*4882a593Smuzhiyun 	if (bar_start[1] != 0) {
610*4882a593Smuzhiyun 		region = request_region(bar_start[1], bar_len[1], print_name);
611*4882a593Smuzhiyun 		if (!region) {
612*4882a593Smuzhiyun 			dfx_register_res_err(print_name, 0,
613*4882a593Smuzhiyun 					     bar_start[1], bar_len[1]);
614*4882a593Smuzhiyun 			err = -EBUSY;
615*4882a593Smuzhiyun 			goto err_out_csr_region;
616*4882a593Smuzhiyun 		}
617*4882a593Smuzhiyun 	}
618*4882a593Smuzhiyun 	if (bar_start[2] != 0) {
619*4882a593Smuzhiyun 		region = request_region(bar_start[2], bar_len[2], print_name);
620*4882a593Smuzhiyun 		if (!region) {
621*4882a593Smuzhiyun 			dfx_register_res_err(print_name, 0,
622*4882a593Smuzhiyun 					     bar_start[2], bar_len[2]);
623*4882a593Smuzhiyun 			err = -EBUSY;
624*4882a593Smuzhiyun 			goto err_out_bh_region;
625*4882a593Smuzhiyun 		}
626*4882a593Smuzhiyun 	}
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 	/* Set up I/O base address. */
629*4882a593Smuzhiyun 	if (dfx_use_mmio) {
630*4882a593Smuzhiyun 		bp->base.mem = ioremap(bar_start[0], bar_len[0]);
631*4882a593Smuzhiyun 		if (!bp->base.mem) {
632*4882a593Smuzhiyun 			printk(KERN_ERR "%s: Cannot map MMIO\n", print_name);
633*4882a593Smuzhiyun 			err = -ENOMEM;
634*4882a593Smuzhiyun 			goto err_out_esic_region;
635*4882a593Smuzhiyun 		}
636*4882a593Smuzhiyun 	} else {
637*4882a593Smuzhiyun 		bp->base.port = bar_start[0];
638*4882a593Smuzhiyun 		dev->base_addr = bar_start[0];
639*4882a593Smuzhiyun 	}
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 	/* Initialize new device structure */
642*4882a593Smuzhiyun 	dev->netdev_ops			= &dfx_netdev_ops;
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 	if (dfx_bus_pci)
645*4882a593Smuzhiyun 		pci_set_master(to_pci_dev(bdev));
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	if (dfx_driver_init(dev, print_name, bar_start[0]) != DFX_K_SUCCESS) {
648*4882a593Smuzhiyun 		err = -ENODEV;
649*4882a593Smuzhiyun 		goto err_out_unmap;
650*4882a593Smuzhiyun 	}
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun 	err = register_netdev(dev);
653*4882a593Smuzhiyun 	if (err)
654*4882a593Smuzhiyun 		goto err_out_kfree;
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 	printk("%s: registered as %s\n", print_name, dev->name);
657*4882a593Smuzhiyun 	return 0;
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun err_out_kfree:
660*4882a593Smuzhiyun 	alloc_size = sizeof(PI_DESCR_BLOCK) +
661*4882a593Smuzhiyun 		     PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX +
662*4882a593Smuzhiyun #ifndef DYNAMIC_BUFFERS
663*4882a593Smuzhiyun 		     (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
664*4882a593Smuzhiyun #endif
665*4882a593Smuzhiyun 		     sizeof(PI_CONSUMER_BLOCK) +
666*4882a593Smuzhiyun 		     (PI_ALIGN_K_DESC_BLK - 1);
667*4882a593Smuzhiyun 	if (bp->kmalloced)
668*4882a593Smuzhiyun 		dma_free_coherent(bdev, alloc_size,
669*4882a593Smuzhiyun 				  bp->kmalloced, bp->kmalloced_dma);
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun err_out_unmap:
672*4882a593Smuzhiyun 	if (dfx_use_mmio)
673*4882a593Smuzhiyun 		iounmap(bp->base.mem);
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun err_out_esic_region:
676*4882a593Smuzhiyun 	if (bar_start[2] != 0)
677*4882a593Smuzhiyun 		release_region(bar_start[2], bar_len[2]);
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun err_out_bh_region:
680*4882a593Smuzhiyun 	if (bar_start[1] != 0)
681*4882a593Smuzhiyun 		release_region(bar_start[1], bar_len[1]);
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun err_out_csr_region:
684*4882a593Smuzhiyun 	if (dfx_use_mmio)
685*4882a593Smuzhiyun 		release_mem_region(bar_start[0], bar_len[0]);
686*4882a593Smuzhiyun 	else
687*4882a593Smuzhiyun 		release_region(bar_start[0], bar_len[0]);
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun err_out_disable:
690*4882a593Smuzhiyun 	if (dfx_bus_pci)
691*4882a593Smuzhiyun 		pci_disable_device(to_pci_dev(bdev));
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun err_out:
694*4882a593Smuzhiyun 	free_netdev(dev);
695*4882a593Smuzhiyun 	return err;
696*4882a593Smuzhiyun }
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun /*
700*4882a593Smuzhiyun  * ================
701*4882a593Smuzhiyun  * = dfx_bus_init =
702*4882a593Smuzhiyun  * ================
703*4882a593Smuzhiyun  *
704*4882a593Smuzhiyun  * Overview:
705*4882a593Smuzhiyun  *   Initializes the bus-specific controller logic.
706*4882a593Smuzhiyun  *
707*4882a593Smuzhiyun  * Returns:
708*4882a593Smuzhiyun  *   None
709*4882a593Smuzhiyun  *
710*4882a593Smuzhiyun  * Arguments:
711*4882a593Smuzhiyun  *   dev - pointer to device information
712*4882a593Smuzhiyun  *
713*4882a593Smuzhiyun  * Functional Description:
714*4882a593Smuzhiyun  *   Determine and save adapter IRQ in device table,
715*4882a593Smuzhiyun  *   then perform bus-specific logic initialization.
716*4882a593Smuzhiyun  *
717*4882a593Smuzhiyun  * Return Codes:
718*4882a593Smuzhiyun  *   None
719*4882a593Smuzhiyun  *
720*4882a593Smuzhiyun  * Assumptions:
721*4882a593Smuzhiyun  *   bp->base has already been set with the proper
722*4882a593Smuzhiyun  *	 base I/O address for this device.
723*4882a593Smuzhiyun  *
724*4882a593Smuzhiyun  * Side Effects:
725*4882a593Smuzhiyun  *   Interrupts are enabled at the adapter bus-specific logic.
726*4882a593Smuzhiyun  *   Note:  Interrupts at the DMA engine (PDQ chip) are not
727*4882a593Smuzhiyun  *   enabled yet.
728*4882a593Smuzhiyun  */
729*4882a593Smuzhiyun 
dfx_bus_init(struct net_device * dev)730*4882a593Smuzhiyun static void dfx_bus_init(struct net_device *dev)
731*4882a593Smuzhiyun {
732*4882a593Smuzhiyun 	DFX_board_t *bp = netdev_priv(dev);
733*4882a593Smuzhiyun 	struct device *bdev = bp->bus_dev;
734*4882a593Smuzhiyun 	int dfx_bus_pci = dev_is_pci(bdev);
735*4882a593Smuzhiyun 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
736*4882a593Smuzhiyun 	int dfx_bus_tc = DFX_BUS_TC(bdev);
737*4882a593Smuzhiyun 	int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
738*4882a593Smuzhiyun 	u8 val;
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 	DBG_printk("In dfx_bus_init...\n");
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 	/* Initialize a pointer back to the net_device struct */
743*4882a593Smuzhiyun 	bp->dev = dev;
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 	/* Initialize adapter based on bus type */
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun 	if (dfx_bus_tc)
748*4882a593Smuzhiyun 		dev->irq = to_tc_dev(bdev)->interrupt;
749*4882a593Smuzhiyun 	if (dfx_bus_eisa) {
750*4882a593Smuzhiyun 		unsigned long base_addr = to_eisa_device(bdev)->base_addr;
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun 		/* Disable the board before fiddling with the decoders.  */
753*4882a593Smuzhiyun 		outb(0, base_addr + PI_ESIC_K_SLOT_CNTRL);
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun 		/* Get the interrupt level from the ESIC chip.  */
756*4882a593Smuzhiyun 		val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
757*4882a593Smuzhiyun 		val &= PI_CONFIG_STAT_0_M_IRQ;
758*4882a593Smuzhiyun 		val >>= PI_CONFIG_STAT_0_V_IRQ;
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 		switch (val) {
761*4882a593Smuzhiyun 		case PI_CONFIG_STAT_0_IRQ_K_9:
762*4882a593Smuzhiyun 			dev->irq = 9;
763*4882a593Smuzhiyun 			break;
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun 		case PI_CONFIG_STAT_0_IRQ_K_10:
766*4882a593Smuzhiyun 			dev->irq = 10;
767*4882a593Smuzhiyun 			break;
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 		case PI_CONFIG_STAT_0_IRQ_K_11:
770*4882a593Smuzhiyun 			dev->irq = 11;
771*4882a593Smuzhiyun 			break;
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun 		case PI_CONFIG_STAT_0_IRQ_K_15:
774*4882a593Smuzhiyun 			dev->irq = 15;
775*4882a593Smuzhiyun 			break;
776*4882a593Smuzhiyun 		}
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun 		/*
779*4882a593Smuzhiyun 		 * Enable memory decoding (MEMCS1) and/or port decoding
780*4882a593Smuzhiyun 		 * (IOCS1/IOCS0) as appropriate in Function Control
781*4882a593Smuzhiyun 		 * Register.  MEMCS1 or IOCS0 is used for PDQ registers,
782*4882a593Smuzhiyun 		 * taking 16 32-bit words, while IOCS1 is used for the
783*4882a593Smuzhiyun 		 * Burst Holdoff register, taking a single 32-bit word
784*4882a593Smuzhiyun 		 * only.  We use the slot-specific I/O range as per the
785*4882a593Smuzhiyun 		 * ESIC spec, that is set bits 15:12 in the mask registers
786*4882a593Smuzhiyun 		 * to mask them out.
787*4882a593Smuzhiyun 		 */
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 		/* Set the decode range of the board.  */
790*4882a593Smuzhiyun 		val = 0;
791*4882a593Smuzhiyun 		outb(val, base_addr + PI_ESIC_K_IO_ADD_CMP_0_1);
792*4882a593Smuzhiyun 		val = PI_DEFEA_K_CSR_IO;
793*4882a593Smuzhiyun 		outb(val, base_addr + PI_ESIC_K_IO_ADD_CMP_0_0);
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 		val = PI_IO_CMP_M_SLOT;
796*4882a593Smuzhiyun 		outb(val, base_addr + PI_ESIC_K_IO_ADD_MASK_0_1);
797*4882a593Smuzhiyun 		val = (PI_ESIC_K_CSR_IO_LEN - 1) & ~3;
798*4882a593Smuzhiyun 		outb(val, base_addr + PI_ESIC_K_IO_ADD_MASK_0_0);
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun 		val = 0;
801*4882a593Smuzhiyun 		outb(val, base_addr + PI_ESIC_K_IO_ADD_CMP_1_1);
802*4882a593Smuzhiyun 		val = PI_DEFEA_K_BURST_HOLDOFF;
803*4882a593Smuzhiyun 		outb(val, base_addr + PI_ESIC_K_IO_ADD_CMP_1_0);
804*4882a593Smuzhiyun 
805*4882a593Smuzhiyun 		val = PI_IO_CMP_M_SLOT;
806*4882a593Smuzhiyun 		outb(val, base_addr + PI_ESIC_K_IO_ADD_MASK_1_1);
807*4882a593Smuzhiyun 		val = (PI_ESIC_K_BURST_HOLDOFF_LEN - 1) & ~3;
808*4882a593Smuzhiyun 		outb(val, base_addr + PI_ESIC_K_IO_ADD_MASK_1_0);
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun 		/* Enable the decoders.  */
811*4882a593Smuzhiyun 		val = PI_FUNCTION_CNTRL_M_IOCS1;
812*4882a593Smuzhiyun 		if (dfx_use_mmio)
813*4882a593Smuzhiyun 			val |= PI_FUNCTION_CNTRL_M_MEMCS1;
814*4882a593Smuzhiyun 		else
815*4882a593Smuzhiyun 			val |= PI_FUNCTION_CNTRL_M_IOCS0;
816*4882a593Smuzhiyun 		outb(val, base_addr + PI_ESIC_K_FUNCTION_CNTRL);
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun 		/*
819*4882a593Smuzhiyun 		 * Enable access to the rest of the module
820*4882a593Smuzhiyun 		 * (including PDQ and packet memory).
821*4882a593Smuzhiyun 		 */
822*4882a593Smuzhiyun 		val = PI_SLOT_CNTRL_M_ENB;
823*4882a593Smuzhiyun 		outb(val, base_addr + PI_ESIC_K_SLOT_CNTRL);
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 		/*
826*4882a593Smuzhiyun 		 * Map PDQ registers into memory or port space.  This is
827*4882a593Smuzhiyun 		 * done with a bit in the Burst Holdoff register.
828*4882a593Smuzhiyun 		 */
829*4882a593Smuzhiyun 		val = inb(base_addr + PI_DEFEA_K_BURST_HOLDOFF);
830*4882a593Smuzhiyun 		if (dfx_use_mmio)
831*4882a593Smuzhiyun 			val |= PI_BURST_HOLDOFF_M_MEM_MAP;
832*4882a593Smuzhiyun 		else
833*4882a593Smuzhiyun 			val &= ~PI_BURST_HOLDOFF_M_MEM_MAP;
834*4882a593Smuzhiyun 		outb(val, base_addr + PI_DEFEA_K_BURST_HOLDOFF);
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun 		/* Enable interrupts at EISA bus interface chip (ESIC) */
837*4882a593Smuzhiyun 		val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
838*4882a593Smuzhiyun 		val |= PI_CONFIG_STAT_0_M_INT_ENB;
839*4882a593Smuzhiyun 		outb(val, base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
840*4882a593Smuzhiyun 	}
841*4882a593Smuzhiyun 	if (dfx_bus_pci) {
842*4882a593Smuzhiyun 		struct pci_dev *pdev = to_pci_dev(bdev);
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 		/* Get the interrupt level from the PCI Configuration Table */
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 		dev->irq = pdev->irq;
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun 		/* Check Latency Timer and set if less than minimal */
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 		pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &val);
851*4882a593Smuzhiyun 		if (val < PFI_K_LAT_TIMER_MIN) {
852*4882a593Smuzhiyun 			val = PFI_K_LAT_TIMER_DEF;
853*4882a593Smuzhiyun 			pci_write_config_byte(pdev, PCI_LATENCY_TIMER, val);
854*4882a593Smuzhiyun 		}
855*4882a593Smuzhiyun 
856*4882a593Smuzhiyun 		/* Enable interrupts at PCI bus interface chip (PFI) */
857*4882a593Smuzhiyun 		val = PFI_MODE_M_PDQ_INT_ENB | PFI_MODE_M_DMA_ENB;
858*4882a593Smuzhiyun 		dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, val);
859*4882a593Smuzhiyun 	}
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun /*
863*4882a593Smuzhiyun  * ==================
864*4882a593Smuzhiyun  * = dfx_bus_uninit =
865*4882a593Smuzhiyun  * ==================
866*4882a593Smuzhiyun  *
867*4882a593Smuzhiyun  * Overview:
868*4882a593Smuzhiyun  *   Uninitializes the bus-specific controller logic.
869*4882a593Smuzhiyun  *
870*4882a593Smuzhiyun  * Returns:
871*4882a593Smuzhiyun  *   None
872*4882a593Smuzhiyun  *
873*4882a593Smuzhiyun  * Arguments:
874*4882a593Smuzhiyun  *   dev - pointer to device information
875*4882a593Smuzhiyun  *
876*4882a593Smuzhiyun  * Functional Description:
877*4882a593Smuzhiyun  *   Perform bus-specific logic uninitialization.
878*4882a593Smuzhiyun  *
879*4882a593Smuzhiyun  * Return Codes:
880*4882a593Smuzhiyun  *   None
881*4882a593Smuzhiyun  *
882*4882a593Smuzhiyun  * Assumptions:
883*4882a593Smuzhiyun  *   bp->base has already been set with the proper
884*4882a593Smuzhiyun  *	 base I/O address for this device.
885*4882a593Smuzhiyun  *
886*4882a593Smuzhiyun  * Side Effects:
887*4882a593Smuzhiyun  *   Interrupts are disabled at the adapter bus-specific logic.
888*4882a593Smuzhiyun  */
889*4882a593Smuzhiyun 
dfx_bus_uninit(struct net_device * dev)890*4882a593Smuzhiyun static void dfx_bus_uninit(struct net_device *dev)
891*4882a593Smuzhiyun {
892*4882a593Smuzhiyun 	DFX_board_t *bp = netdev_priv(dev);
893*4882a593Smuzhiyun 	struct device *bdev = bp->bus_dev;
894*4882a593Smuzhiyun 	int dfx_bus_pci = dev_is_pci(bdev);
895*4882a593Smuzhiyun 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
896*4882a593Smuzhiyun 	u8 val;
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun 	DBG_printk("In dfx_bus_uninit...\n");
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun 	/* Uninitialize adapter based on bus type */
901*4882a593Smuzhiyun 
902*4882a593Smuzhiyun 	if (dfx_bus_eisa) {
903*4882a593Smuzhiyun 		unsigned long base_addr = to_eisa_device(bdev)->base_addr;
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun 		/* Disable interrupts at EISA bus interface chip (ESIC) */
906*4882a593Smuzhiyun 		val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
907*4882a593Smuzhiyun 		val &= ~PI_CONFIG_STAT_0_M_INT_ENB;
908*4882a593Smuzhiyun 		outb(val, base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 		/* Disable the board.  */
911*4882a593Smuzhiyun 		outb(0, base_addr + PI_ESIC_K_SLOT_CNTRL);
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun 		/* Disable memory and port decoders.  */
914*4882a593Smuzhiyun 		outb(0, base_addr + PI_ESIC_K_FUNCTION_CNTRL);
915*4882a593Smuzhiyun 	}
916*4882a593Smuzhiyun 	if (dfx_bus_pci) {
917*4882a593Smuzhiyun 		/* Disable interrupts at PCI bus interface chip (PFI) */
918*4882a593Smuzhiyun 		dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, 0);
919*4882a593Smuzhiyun 	}
920*4882a593Smuzhiyun }
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun /*
924*4882a593Smuzhiyun  * ========================
925*4882a593Smuzhiyun  * = dfx_bus_config_check =
926*4882a593Smuzhiyun  * ========================
927*4882a593Smuzhiyun  *
928*4882a593Smuzhiyun  * Overview:
929*4882a593Smuzhiyun  *   Checks the configuration (burst size, full-duplex, etc.)  If any parameters
930*4882a593Smuzhiyun  *   are illegal, then this routine will set new defaults.
931*4882a593Smuzhiyun  *
932*4882a593Smuzhiyun  * Returns:
933*4882a593Smuzhiyun  *   None
934*4882a593Smuzhiyun  *
935*4882a593Smuzhiyun  * Arguments:
936*4882a593Smuzhiyun  *   bp - pointer to board information
937*4882a593Smuzhiyun  *
938*4882a593Smuzhiyun  * Functional Description:
939*4882a593Smuzhiyun  *   For Revision 1 FDDI EISA, Revision 2 or later FDDI EISA with rev E or later
940*4882a593Smuzhiyun  *   PDQ, and all FDDI PCI controllers, all values are legal.
941*4882a593Smuzhiyun  *
942*4882a593Smuzhiyun  * Return Codes:
943*4882a593Smuzhiyun  *   None
944*4882a593Smuzhiyun  *
945*4882a593Smuzhiyun  * Assumptions:
946*4882a593Smuzhiyun  *   dfx_adap_init has NOT been called yet so burst size and other items have
947*4882a593Smuzhiyun  *   not been set.
948*4882a593Smuzhiyun  *
949*4882a593Smuzhiyun  * Side Effects:
950*4882a593Smuzhiyun  *   None
951*4882a593Smuzhiyun  */
952*4882a593Smuzhiyun 
dfx_bus_config_check(DFX_board_t * bp)953*4882a593Smuzhiyun static void dfx_bus_config_check(DFX_board_t *bp)
954*4882a593Smuzhiyun {
955*4882a593Smuzhiyun 	struct device __maybe_unused *bdev = bp->bus_dev;
956*4882a593Smuzhiyun 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
957*4882a593Smuzhiyun 	int	status;				/* return code from adapter port control call */
958*4882a593Smuzhiyun 	u32	host_data;			/* LW data returned from port control call */
959*4882a593Smuzhiyun 
960*4882a593Smuzhiyun 	DBG_printk("In dfx_bus_config_check...\n");
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun 	/* Configuration check only valid for EISA adapter */
963*4882a593Smuzhiyun 
964*4882a593Smuzhiyun 	if (dfx_bus_eisa) {
965*4882a593Smuzhiyun 		/*
966*4882a593Smuzhiyun 		 * First check if revision 2 EISA controller.  Rev. 1 cards used
967*4882a593Smuzhiyun 		 * PDQ revision B, so no workaround needed in this case.  Rev. 3
968*4882a593Smuzhiyun 		 * cards used PDQ revision E, so no workaround needed in this
969*4882a593Smuzhiyun 		 * case, either.  Only Rev. 2 cards used either Rev. D or E
970*4882a593Smuzhiyun 		 * chips, so we must verify the chip revision on Rev. 2 cards.
971*4882a593Smuzhiyun 		 */
972*4882a593Smuzhiyun 		if (to_eisa_device(bdev)->id.driver_data == DEFEA_PROD_ID_2) {
973*4882a593Smuzhiyun 			/*
974*4882a593Smuzhiyun 			 * Revision 2 FDDI EISA controller found,
975*4882a593Smuzhiyun 			 * so let's check PDQ revision of adapter.
976*4882a593Smuzhiyun 			 */
977*4882a593Smuzhiyun 			status = dfx_hw_port_ctrl_req(bp,
978*4882a593Smuzhiyun 											PI_PCTRL_M_SUB_CMD,
979*4882a593Smuzhiyun 											PI_SUB_CMD_K_PDQ_REV_GET,
980*4882a593Smuzhiyun 											0,
981*4882a593Smuzhiyun 											&host_data);
982*4882a593Smuzhiyun 			if ((status != DFX_K_SUCCESS) || (host_data == 2))
983*4882a593Smuzhiyun 				{
984*4882a593Smuzhiyun 				/*
985*4882a593Smuzhiyun 				 * Either we couldn't determine the PDQ revision, or
986*4882a593Smuzhiyun 				 * we determined that it is at revision D.  In either case,
987*4882a593Smuzhiyun 				 * we need to implement the workaround.
988*4882a593Smuzhiyun 				 */
989*4882a593Smuzhiyun 
990*4882a593Smuzhiyun 				/* Ensure that the burst size is set to 8 longwords or less */
991*4882a593Smuzhiyun 
992*4882a593Smuzhiyun 				switch (bp->burst_size)
993*4882a593Smuzhiyun 					{
994*4882a593Smuzhiyun 					case PI_PDATA_B_DMA_BURST_SIZE_32:
995*4882a593Smuzhiyun 					case PI_PDATA_B_DMA_BURST_SIZE_16:
996*4882a593Smuzhiyun 						bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_8;
997*4882a593Smuzhiyun 						break;
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun 					default:
1000*4882a593Smuzhiyun 						break;
1001*4882a593Smuzhiyun 					}
1002*4882a593Smuzhiyun 
1003*4882a593Smuzhiyun 				/* Ensure that full-duplex mode is not enabled */
1004*4882a593Smuzhiyun 
1005*4882a593Smuzhiyun 				bp->full_duplex_enb = PI_SNMP_K_FALSE;
1006*4882a593Smuzhiyun 				}
1007*4882a593Smuzhiyun 			}
1008*4882a593Smuzhiyun 		}
1009*4882a593Smuzhiyun 	}
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun 
1012*4882a593Smuzhiyun /*
1013*4882a593Smuzhiyun  * ===================
1014*4882a593Smuzhiyun  * = dfx_driver_init =
1015*4882a593Smuzhiyun  * ===================
1016*4882a593Smuzhiyun  *
1017*4882a593Smuzhiyun  * Overview:
1018*4882a593Smuzhiyun  *   Initializes remaining adapter board structure information
1019*4882a593Smuzhiyun  *   and makes sure adapter is in a safe state prior to dfx_open().
1020*4882a593Smuzhiyun  *
1021*4882a593Smuzhiyun  * Returns:
1022*4882a593Smuzhiyun  *   Condition code
1023*4882a593Smuzhiyun  *
1024*4882a593Smuzhiyun  * Arguments:
1025*4882a593Smuzhiyun  *   dev - pointer to device information
1026*4882a593Smuzhiyun  *   print_name - printable device name
1027*4882a593Smuzhiyun  *
1028*4882a593Smuzhiyun  * Functional Description:
1029*4882a593Smuzhiyun  *   This function allocates additional resources such as the host memory
1030*4882a593Smuzhiyun  *   blocks needed by the adapter (eg. descriptor and consumer blocks).
1031*4882a593Smuzhiyun  *	 Remaining bus initialization steps are also completed.  The adapter
1032*4882a593Smuzhiyun  *   is also reset so that it is in the DMA_UNAVAILABLE state.  The OS
1033*4882a593Smuzhiyun  *   must call dfx_open() to open the adapter and bring it on-line.
1034*4882a593Smuzhiyun  *
1035*4882a593Smuzhiyun  * Return Codes:
1036*4882a593Smuzhiyun  *   DFX_K_SUCCESS	- initialization succeeded
1037*4882a593Smuzhiyun  *   DFX_K_FAILURE	- initialization failed - could not allocate memory
1038*4882a593Smuzhiyun  *						or read adapter MAC address
1039*4882a593Smuzhiyun  *
1040*4882a593Smuzhiyun  * Assumptions:
1041*4882a593Smuzhiyun  *   Memory allocated from pci_alloc_consistent() call is physically
1042*4882a593Smuzhiyun  *   contiguous, locked memory.
1043*4882a593Smuzhiyun  *
1044*4882a593Smuzhiyun  * Side Effects:
1045*4882a593Smuzhiyun  *   Adapter is reset and should be in DMA_UNAVAILABLE state before
1046*4882a593Smuzhiyun  *   returning from this routine.
1047*4882a593Smuzhiyun  */
1048*4882a593Smuzhiyun 
dfx_driver_init(struct net_device * dev,const char * print_name,resource_size_t bar_start)1049*4882a593Smuzhiyun static int dfx_driver_init(struct net_device *dev, const char *print_name,
1050*4882a593Smuzhiyun 			   resource_size_t bar_start)
1051*4882a593Smuzhiyun {
1052*4882a593Smuzhiyun 	DFX_board_t *bp = netdev_priv(dev);
1053*4882a593Smuzhiyun 	struct device *bdev = bp->bus_dev;
1054*4882a593Smuzhiyun 	int dfx_bus_pci = dev_is_pci(bdev);
1055*4882a593Smuzhiyun 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
1056*4882a593Smuzhiyun 	int dfx_bus_tc = DFX_BUS_TC(bdev);
1057*4882a593Smuzhiyun 	int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
1058*4882a593Smuzhiyun 	int alloc_size;			/* total buffer size needed */
1059*4882a593Smuzhiyun 	char *top_v, *curr_v;		/* virtual addrs into memory block */
1060*4882a593Smuzhiyun 	dma_addr_t top_p, curr_p;	/* physical addrs into memory block */
1061*4882a593Smuzhiyun 	u32 data;			/* host data register value */
1062*4882a593Smuzhiyun 	__le32 le32;
1063*4882a593Smuzhiyun 	char *board_name = NULL;
1064*4882a593Smuzhiyun 
1065*4882a593Smuzhiyun 	DBG_printk("In dfx_driver_init...\n");
1066*4882a593Smuzhiyun 
1067*4882a593Smuzhiyun 	/* Initialize bus-specific hardware registers */
1068*4882a593Smuzhiyun 
1069*4882a593Smuzhiyun 	dfx_bus_init(dev);
1070*4882a593Smuzhiyun 
1071*4882a593Smuzhiyun 	/*
1072*4882a593Smuzhiyun 	 * Initialize default values for configurable parameters
1073*4882a593Smuzhiyun 	 *
1074*4882a593Smuzhiyun 	 * Note: All of these parameters are ones that a user may
1075*4882a593Smuzhiyun 	 *       want to customize.  It'd be nice to break these
1076*4882a593Smuzhiyun 	 *		 out into Space.c or someplace else that's more
1077*4882a593Smuzhiyun 	 *		 accessible/understandable than this file.
1078*4882a593Smuzhiyun 	 */
1079*4882a593Smuzhiyun 
1080*4882a593Smuzhiyun 	bp->full_duplex_enb		= PI_SNMP_K_FALSE;
1081*4882a593Smuzhiyun 	bp->req_ttrt			= 8 * 12500;		/* 8ms in 80 nanosec units */
1082*4882a593Smuzhiyun 	bp->burst_size			= PI_PDATA_B_DMA_BURST_SIZE_DEF;
1083*4882a593Smuzhiyun 	bp->rcv_bufs_to_post	= RCV_BUFS_DEF;
1084*4882a593Smuzhiyun 
1085*4882a593Smuzhiyun 	/*
1086*4882a593Smuzhiyun 	 * Ensure that HW configuration is OK
1087*4882a593Smuzhiyun 	 *
1088*4882a593Smuzhiyun 	 * Note: Depending on the hardware revision, we may need to modify
1089*4882a593Smuzhiyun 	 *       some of the configurable parameters to workaround hardware
1090*4882a593Smuzhiyun 	 *       limitations.  We'll perform this configuration check AFTER
1091*4882a593Smuzhiyun 	 *       setting the parameters to their default values.
1092*4882a593Smuzhiyun 	 */
1093*4882a593Smuzhiyun 
1094*4882a593Smuzhiyun 	dfx_bus_config_check(bp);
1095*4882a593Smuzhiyun 
1096*4882a593Smuzhiyun 	/* Disable PDQ interrupts first */
1097*4882a593Smuzhiyun 
1098*4882a593Smuzhiyun 	dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1099*4882a593Smuzhiyun 
1100*4882a593Smuzhiyun 	/* Place adapter in DMA_UNAVAILABLE state by resetting adapter */
1101*4882a593Smuzhiyun 
1102*4882a593Smuzhiyun 	(void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST);
1103*4882a593Smuzhiyun 
1104*4882a593Smuzhiyun 	/*  Read the factory MAC address from the adapter then save it */
1105*4882a593Smuzhiyun 
1106*4882a593Smuzhiyun 	if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_LO, 0,
1107*4882a593Smuzhiyun 				 &data) != DFX_K_SUCCESS) {
1108*4882a593Smuzhiyun 		printk("%s: Could not read adapter factory MAC address!\n",
1109*4882a593Smuzhiyun 		       print_name);
1110*4882a593Smuzhiyun 		return DFX_K_FAILURE;
1111*4882a593Smuzhiyun 	}
1112*4882a593Smuzhiyun 	le32 = cpu_to_le32(data);
1113*4882a593Smuzhiyun 	memcpy(&bp->factory_mac_addr[0], &le32, sizeof(u32));
1114*4882a593Smuzhiyun 
1115*4882a593Smuzhiyun 	if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_HI, 0,
1116*4882a593Smuzhiyun 				 &data) != DFX_K_SUCCESS) {
1117*4882a593Smuzhiyun 		printk("%s: Could not read adapter factory MAC address!\n",
1118*4882a593Smuzhiyun 		       print_name);
1119*4882a593Smuzhiyun 		return DFX_K_FAILURE;
1120*4882a593Smuzhiyun 	}
1121*4882a593Smuzhiyun 	le32 = cpu_to_le32(data);
1122*4882a593Smuzhiyun 	memcpy(&bp->factory_mac_addr[4], &le32, sizeof(u16));
1123*4882a593Smuzhiyun 
1124*4882a593Smuzhiyun 	/*
1125*4882a593Smuzhiyun 	 * Set current address to factory address
1126*4882a593Smuzhiyun 	 *
1127*4882a593Smuzhiyun 	 * Note: Node address override support is handled through
1128*4882a593Smuzhiyun 	 *       dfx_ctl_set_mac_address.
1129*4882a593Smuzhiyun 	 */
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun 	memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN);
1132*4882a593Smuzhiyun 	if (dfx_bus_tc)
1133*4882a593Smuzhiyun 		board_name = "DEFTA";
1134*4882a593Smuzhiyun 	if (dfx_bus_eisa)
1135*4882a593Smuzhiyun 		board_name = "DEFEA";
1136*4882a593Smuzhiyun 	if (dfx_bus_pci)
1137*4882a593Smuzhiyun 		board_name = "DEFPA";
1138*4882a593Smuzhiyun 	pr_info("%s: %s at %s addr = 0x%llx, IRQ = %d, Hardware addr = %pMF\n",
1139*4882a593Smuzhiyun 		print_name, board_name, dfx_use_mmio ? "MMIO" : "I/O",
1140*4882a593Smuzhiyun 		(long long)bar_start, dev->irq, dev->dev_addr);
1141*4882a593Smuzhiyun 
1142*4882a593Smuzhiyun 	/*
1143*4882a593Smuzhiyun 	 * Get memory for descriptor block, consumer block, and other buffers
1144*4882a593Smuzhiyun 	 * that need to be DMA read or written to by the adapter.
1145*4882a593Smuzhiyun 	 */
1146*4882a593Smuzhiyun 
1147*4882a593Smuzhiyun 	alloc_size = sizeof(PI_DESCR_BLOCK) +
1148*4882a593Smuzhiyun 					PI_CMD_REQ_K_SIZE_MAX +
1149*4882a593Smuzhiyun 					PI_CMD_RSP_K_SIZE_MAX +
1150*4882a593Smuzhiyun #ifndef DYNAMIC_BUFFERS
1151*4882a593Smuzhiyun 					(bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
1152*4882a593Smuzhiyun #endif
1153*4882a593Smuzhiyun 					sizeof(PI_CONSUMER_BLOCK) +
1154*4882a593Smuzhiyun 					(PI_ALIGN_K_DESC_BLK - 1);
1155*4882a593Smuzhiyun 	bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size,
1156*4882a593Smuzhiyun 						   &bp->kmalloced_dma,
1157*4882a593Smuzhiyun 						   GFP_ATOMIC);
1158*4882a593Smuzhiyun 	if (top_v == NULL)
1159*4882a593Smuzhiyun 		return DFX_K_FAILURE;
1160*4882a593Smuzhiyun 
1161*4882a593Smuzhiyun 	top_p = bp->kmalloced_dma;	/* get physical address of buffer */
1162*4882a593Smuzhiyun 
1163*4882a593Smuzhiyun 	/*
1164*4882a593Smuzhiyun 	 *  To guarantee the 8K alignment required for the descriptor block, 8K - 1
1165*4882a593Smuzhiyun 	 *  plus the amount of memory needed was allocated.  The physical address
1166*4882a593Smuzhiyun 	 *	is now 8K aligned.  By carving up the memory in a specific order,
1167*4882a593Smuzhiyun 	 *  we'll guarantee the alignment requirements for all other structures.
1168*4882a593Smuzhiyun 	 *
1169*4882a593Smuzhiyun 	 *  Note: If the assumptions change regarding the non-paged, non-cached,
1170*4882a593Smuzhiyun 	 *		  physically contiguous nature of the memory block or the address
1171*4882a593Smuzhiyun 	 *		  alignments, then we'll need to implement a different algorithm
1172*4882a593Smuzhiyun 	 *		  for allocating the needed memory.
1173*4882a593Smuzhiyun 	 */
1174*4882a593Smuzhiyun 
1175*4882a593Smuzhiyun 	curr_p = ALIGN(top_p, PI_ALIGN_K_DESC_BLK);
1176*4882a593Smuzhiyun 	curr_v = top_v + (curr_p - top_p);
1177*4882a593Smuzhiyun 
1178*4882a593Smuzhiyun 	/* Reserve space for descriptor block */
1179*4882a593Smuzhiyun 
1180*4882a593Smuzhiyun 	bp->descr_block_virt = (PI_DESCR_BLOCK *) curr_v;
1181*4882a593Smuzhiyun 	bp->descr_block_phys = curr_p;
1182*4882a593Smuzhiyun 	curr_v += sizeof(PI_DESCR_BLOCK);
1183*4882a593Smuzhiyun 	curr_p += sizeof(PI_DESCR_BLOCK);
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun 	/* Reserve space for command request buffer */
1186*4882a593Smuzhiyun 
1187*4882a593Smuzhiyun 	bp->cmd_req_virt = (PI_DMA_CMD_REQ *) curr_v;
1188*4882a593Smuzhiyun 	bp->cmd_req_phys = curr_p;
1189*4882a593Smuzhiyun 	curr_v += PI_CMD_REQ_K_SIZE_MAX;
1190*4882a593Smuzhiyun 	curr_p += PI_CMD_REQ_K_SIZE_MAX;
1191*4882a593Smuzhiyun 
1192*4882a593Smuzhiyun 	/* Reserve space for command response buffer */
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun 	bp->cmd_rsp_virt = (PI_DMA_CMD_RSP *) curr_v;
1195*4882a593Smuzhiyun 	bp->cmd_rsp_phys = curr_p;
1196*4882a593Smuzhiyun 	curr_v += PI_CMD_RSP_K_SIZE_MAX;
1197*4882a593Smuzhiyun 	curr_p += PI_CMD_RSP_K_SIZE_MAX;
1198*4882a593Smuzhiyun 
1199*4882a593Smuzhiyun 	/* Reserve space for the LLC host receive queue buffers */
1200*4882a593Smuzhiyun 
1201*4882a593Smuzhiyun 	bp->rcv_block_virt = curr_v;
1202*4882a593Smuzhiyun 	bp->rcv_block_phys = curr_p;
1203*4882a593Smuzhiyun 
1204*4882a593Smuzhiyun #ifndef DYNAMIC_BUFFERS
1205*4882a593Smuzhiyun 	curr_v += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX);
1206*4882a593Smuzhiyun 	curr_p += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX);
1207*4882a593Smuzhiyun #endif
1208*4882a593Smuzhiyun 
1209*4882a593Smuzhiyun 	/* Reserve space for the consumer block */
1210*4882a593Smuzhiyun 
1211*4882a593Smuzhiyun 	bp->cons_block_virt = (PI_CONSUMER_BLOCK *) curr_v;
1212*4882a593Smuzhiyun 	bp->cons_block_phys = curr_p;
1213*4882a593Smuzhiyun 
1214*4882a593Smuzhiyun 	/* Display virtual and physical addresses if debug driver */
1215*4882a593Smuzhiyun 
1216*4882a593Smuzhiyun 	DBG_printk("%s: Descriptor block virt = %p, phys = %pad\n",
1217*4882a593Smuzhiyun 		   print_name, bp->descr_block_virt, &bp->descr_block_phys);
1218*4882a593Smuzhiyun 	DBG_printk("%s: Command Request buffer virt = %p, phys = %pad\n",
1219*4882a593Smuzhiyun 		   print_name, bp->cmd_req_virt, &bp->cmd_req_phys);
1220*4882a593Smuzhiyun 	DBG_printk("%s: Command Response buffer virt = %p, phys = %pad\n",
1221*4882a593Smuzhiyun 		   print_name, bp->cmd_rsp_virt, &bp->cmd_rsp_phys);
1222*4882a593Smuzhiyun 	DBG_printk("%s: Receive buffer block virt = %p, phys = %pad\n",
1223*4882a593Smuzhiyun 		   print_name, bp->rcv_block_virt, &bp->rcv_block_phys);
1224*4882a593Smuzhiyun 	DBG_printk("%s: Consumer block virt = %p, phys = %pad\n",
1225*4882a593Smuzhiyun 		   print_name, bp->cons_block_virt, &bp->cons_block_phys);
1226*4882a593Smuzhiyun 
1227*4882a593Smuzhiyun 	return DFX_K_SUCCESS;
1228*4882a593Smuzhiyun }
1229*4882a593Smuzhiyun 
1230*4882a593Smuzhiyun 
1231*4882a593Smuzhiyun /*
1232*4882a593Smuzhiyun  * =================
1233*4882a593Smuzhiyun  * = dfx_adap_init =
1234*4882a593Smuzhiyun  * =================
1235*4882a593Smuzhiyun  *
1236*4882a593Smuzhiyun  * Overview:
1237*4882a593Smuzhiyun  *   Brings the adapter to the link avail/link unavailable state.
1238*4882a593Smuzhiyun  *
1239*4882a593Smuzhiyun  * Returns:
1240*4882a593Smuzhiyun  *   Condition code
1241*4882a593Smuzhiyun  *
1242*4882a593Smuzhiyun  * Arguments:
1243*4882a593Smuzhiyun  *   bp - pointer to board information
1244*4882a593Smuzhiyun  *   get_buffers - non-zero if buffers to be allocated
1245*4882a593Smuzhiyun  *
1246*4882a593Smuzhiyun  * Functional Description:
1247*4882a593Smuzhiyun  *   Issues the low-level firmware/hardware calls necessary to bring
1248*4882a593Smuzhiyun  *   the adapter up, or to properly reset and restore adapter during
1249*4882a593Smuzhiyun  *   run-time.
1250*4882a593Smuzhiyun  *
1251*4882a593Smuzhiyun  * Return Codes:
1252*4882a593Smuzhiyun  *   DFX_K_SUCCESS - Adapter brought up successfully
1253*4882a593Smuzhiyun  *   DFX_K_FAILURE - Adapter initialization failed
1254*4882a593Smuzhiyun  *
1255*4882a593Smuzhiyun  * Assumptions:
1256*4882a593Smuzhiyun  *   bp->reset_type should be set to a valid reset type value before
1257*4882a593Smuzhiyun  *   calling this routine.
1258*4882a593Smuzhiyun  *
1259*4882a593Smuzhiyun  * Side Effects:
1260*4882a593Smuzhiyun  *   Adapter should be in LINK_AVAILABLE or LINK_UNAVAILABLE state
1261*4882a593Smuzhiyun  *   upon a successful return of this routine.
1262*4882a593Smuzhiyun  */
1263*4882a593Smuzhiyun 
dfx_adap_init(DFX_board_t * bp,int get_buffers)1264*4882a593Smuzhiyun static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
1265*4882a593Smuzhiyun 	{
1266*4882a593Smuzhiyun 	DBG_printk("In dfx_adap_init...\n");
1267*4882a593Smuzhiyun 
1268*4882a593Smuzhiyun 	/* Disable PDQ interrupts first */
1269*4882a593Smuzhiyun 
1270*4882a593Smuzhiyun 	dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1271*4882a593Smuzhiyun 
1272*4882a593Smuzhiyun 	/* Place adapter in DMA_UNAVAILABLE state by resetting adapter */
1273*4882a593Smuzhiyun 
1274*4882a593Smuzhiyun 	if (dfx_hw_dma_uninit(bp, bp->reset_type) != DFX_K_SUCCESS)
1275*4882a593Smuzhiyun 		{
1276*4882a593Smuzhiyun 		printk("%s: Could not uninitialize/reset adapter!\n", bp->dev->name);
1277*4882a593Smuzhiyun 		return DFX_K_FAILURE;
1278*4882a593Smuzhiyun 		}
1279*4882a593Smuzhiyun 
1280*4882a593Smuzhiyun 	/*
1281*4882a593Smuzhiyun 	 * When the PDQ is reset, some false Type 0 interrupts may be pending,
1282*4882a593Smuzhiyun 	 * so we'll acknowledge all Type 0 interrupts now before continuing.
1283*4882a593Smuzhiyun 	 */
1284*4882a593Smuzhiyun 
1285*4882a593Smuzhiyun 	dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, PI_HOST_INT_K_ACK_ALL_TYPE_0);
1286*4882a593Smuzhiyun 
1287*4882a593Smuzhiyun 	/*
1288*4882a593Smuzhiyun 	 * Clear Type 1 and Type 2 registers before going to DMA_AVAILABLE state
1289*4882a593Smuzhiyun 	 *
1290*4882a593Smuzhiyun 	 * Note: We only need to clear host copies of these registers.  The PDQ reset
1291*4882a593Smuzhiyun 	 *       takes care of the on-board register values.
1292*4882a593Smuzhiyun 	 */
1293*4882a593Smuzhiyun 
1294*4882a593Smuzhiyun 	bp->cmd_req_reg.lword	= 0;
1295*4882a593Smuzhiyun 	bp->cmd_rsp_reg.lword	= 0;
1296*4882a593Smuzhiyun 	bp->rcv_xmt_reg.lword	= 0;
1297*4882a593Smuzhiyun 
1298*4882a593Smuzhiyun 	/* Clear consumer block before going to DMA_AVAILABLE state */
1299*4882a593Smuzhiyun 
1300*4882a593Smuzhiyun 	memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK));
1301*4882a593Smuzhiyun 
1302*4882a593Smuzhiyun 	/* Initialize the DMA Burst Size */
1303*4882a593Smuzhiyun 
1304*4882a593Smuzhiyun 	if (dfx_hw_port_ctrl_req(bp,
1305*4882a593Smuzhiyun 							PI_PCTRL_M_SUB_CMD,
1306*4882a593Smuzhiyun 							PI_SUB_CMD_K_BURST_SIZE_SET,
1307*4882a593Smuzhiyun 							bp->burst_size,
1308*4882a593Smuzhiyun 							NULL) != DFX_K_SUCCESS)
1309*4882a593Smuzhiyun 		{
1310*4882a593Smuzhiyun 		printk("%s: Could not set adapter burst size!\n", bp->dev->name);
1311*4882a593Smuzhiyun 		return DFX_K_FAILURE;
1312*4882a593Smuzhiyun 		}
1313*4882a593Smuzhiyun 
1314*4882a593Smuzhiyun 	/*
1315*4882a593Smuzhiyun 	 * Set base address of Consumer Block
1316*4882a593Smuzhiyun 	 *
1317*4882a593Smuzhiyun 	 * Assumption: 32-bit physical address of consumer block is 64 byte
1318*4882a593Smuzhiyun 	 *			   aligned.  That is, bits 0-5 of the address must be zero.
1319*4882a593Smuzhiyun 	 */
1320*4882a593Smuzhiyun 
1321*4882a593Smuzhiyun 	if (dfx_hw_port_ctrl_req(bp,
1322*4882a593Smuzhiyun 							PI_PCTRL_M_CONS_BLOCK,
1323*4882a593Smuzhiyun 							bp->cons_block_phys,
1324*4882a593Smuzhiyun 							0,
1325*4882a593Smuzhiyun 							NULL) != DFX_K_SUCCESS)
1326*4882a593Smuzhiyun 		{
1327*4882a593Smuzhiyun 		printk("%s: Could not set consumer block address!\n", bp->dev->name);
1328*4882a593Smuzhiyun 		return DFX_K_FAILURE;
1329*4882a593Smuzhiyun 		}
1330*4882a593Smuzhiyun 
1331*4882a593Smuzhiyun 	/*
1332*4882a593Smuzhiyun 	 * Set the base address of Descriptor Block and bring adapter
1333*4882a593Smuzhiyun 	 * to DMA_AVAILABLE state.
1334*4882a593Smuzhiyun 	 *
1335*4882a593Smuzhiyun 	 * Note: We also set the literal and data swapping requirements
1336*4882a593Smuzhiyun 	 *       in this command.
1337*4882a593Smuzhiyun 	 *
1338*4882a593Smuzhiyun 	 * Assumption: 32-bit physical address of descriptor block
1339*4882a593Smuzhiyun 	 *       is 8Kbyte aligned.
1340*4882a593Smuzhiyun 	 */
1341*4882a593Smuzhiyun 	if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_INIT,
1342*4882a593Smuzhiyun 				 (u32)(bp->descr_block_phys |
1343*4882a593Smuzhiyun 				       PI_PDATA_A_INIT_M_BSWAP_INIT),
1344*4882a593Smuzhiyun 				 0, NULL) != DFX_K_SUCCESS) {
1345*4882a593Smuzhiyun 		printk("%s: Could not set descriptor block address!\n",
1346*4882a593Smuzhiyun 		       bp->dev->name);
1347*4882a593Smuzhiyun 		return DFX_K_FAILURE;
1348*4882a593Smuzhiyun 	}
1349*4882a593Smuzhiyun 
1350*4882a593Smuzhiyun 	/* Set transmit flush timeout value */
1351*4882a593Smuzhiyun 
1352*4882a593Smuzhiyun 	bp->cmd_req_virt->cmd_type = PI_CMD_K_CHARS_SET;
1353*4882a593Smuzhiyun 	bp->cmd_req_virt->char_set.item[0].item_code	= PI_ITEM_K_FLUSH_TIME;
1354*4882a593Smuzhiyun 	bp->cmd_req_virt->char_set.item[0].value		= 3;	/* 3 seconds */
1355*4882a593Smuzhiyun 	bp->cmd_req_virt->char_set.item[0].item_index	= 0;
1356*4882a593Smuzhiyun 	bp->cmd_req_virt->char_set.item[1].item_code	= PI_ITEM_K_EOL;
1357*4882a593Smuzhiyun 	if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1358*4882a593Smuzhiyun 		{
1359*4882a593Smuzhiyun 		printk("%s: DMA command request failed!\n", bp->dev->name);
1360*4882a593Smuzhiyun 		return DFX_K_FAILURE;
1361*4882a593Smuzhiyun 		}
1362*4882a593Smuzhiyun 
1363*4882a593Smuzhiyun 	/* Set the initial values for eFDXEnable and MACTReq MIB objects */
1364*4882a593Smuzhiyun 
1365*4882a593Smuzhiyun 	bp->cmd_req_virt->cmd_type = PI_CMD_K_SNMP_SET;
1366*4882a593Smuzhiyun 	bp->cmd_req_virt->snmp_set.item[0].item_code	= PI_ITEM_K_FDX_ENB_DIS;
1367*4882a593Smuzhiyun 	bp->cmd_req_virt->snmp_set.item[0].value		= bp->full_duplex_enb;
1368*4882a593Smuzhiyun 	bp->cmd_req_virt->snmp_set.item[0].item_index	= 0;
1369*4882a593Smuzhiyun 	bp->cmd_req_virt->snmp_set.item[1].item_code	= PI_ITEM_K_MAC_T_REQ;
1370*4882a593Smuzhiyun 	bp->cmd_req_virt->snmp_set.item[1].value		= bp->req_ttrt;
1371*4882a593Smuzhiyun 	bp->cmd_req_virt->snmp_set.item[1].item_index	= 0;
1372*4882a593Smuzhiyun 	bp->cmd_req_virt->snmp_set.item[2].item_code	= PI_ITEM_K_EOL;
1373*4882a593Smuzhiyun 	if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1374*4882a593Smuzhiyun 		{
1375*4882a593Smuzhiyun 		printk("%s: DMA command request failed!\n", bp->dev->name);
1376*4882a593Smuzhiyun 		return DFX_K_FAILURE;
1377*4882a593Smuzhiyun 		}
1378*4882a593Smuzhiyun 
1379*4882a593Smuzhiyun 	/* Initialize adapter CAM */
1380*4882a593Smuzhiyun 
1381*4882a593Smuzhiyun 	if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
1382*4882a593Smuzhiyun 		{
1383*4882a593Smuzhiyun 		printk("%s: Adapter CAM update failed!\n", bp->dev->name);
1384*4882a593Smuzhiyun 		return DFX_K_FAILURE;
1385*4882a593Smuzhiyun 		}
1386*4882a593Smuzhiyun 
1387*4882a593Smuzhiyun 	/* Initialize adapter filters */
1388*4882a593Smuzhiyun 
1389*4882a593Smuzhiyun 	if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
1390*4882a593Smuzhiyun 		{
1391*4882a593Smuzhiyun 		printk("%s: Adapter filters update failed!\n", bp->dev->name);
1392*4882a593Smuzhiyun 		return DFX_K_FAILURE;
1393*4882a593Smuzhiyun 		}
1394*4882a593Smuzhiyun 
1395*4882a593Smuzhiyun 	/*
1396*4882a593Smuzhiyun 	 * Remove any existing dynamic buffers (i.e. if the adapter is being
1397*4882a593Smuzhiyun 	 * reinitialized)
1398*4882a593Smuzhiyun 	 */
1399*4882a593Smuzhiyun 
1400*4882a593Smuzhiyun 	if (get_buffers)
1401*4882a593Smuzhiyun 		dfx_rcv_flush(bp);
1402*4882a593Smuzhiyun 
1403*4882a593Smuzhiyun 	/* Initialize receive descriptor block and produce buffers */
1404*4882a593Smuzhiyun 
1405*4882a593Smuzhiyun 	if (dfx_rcv_init(bp, get_buffers))
1406*4882a593Smuzhiyun 	        {
1407*4882a593Smuzhiyun 		printk("%s: Receive buffer allocation failed\n", bp->dev->name);
1408*4882a593Smuzhiyun 		if (get_buffers)
1409*4882a593Smuzhiyun 			dfx_rcv_flush(bp);
1410*4882a593Smuzhiyun 		return DFX_K_FAILURE;
1411*4882a593Smuzhiyun 		}
1412*4882a593Smuzhiyun 
1413*4882a593Smuzhiyun 	/* Issue START command and bring adapter to LINK_(UN)AVAILABLE state */
1414*4882a593Smuzhiyun 
1415*4882a593Smuzhiyun 	bp->cmd_req_virt->cmd_type = PI_CMD_K_START;
1416*4882a593Smuzhiyun 	if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1417*4882a593Smuzhiyun 		{
1418*4882a593Smuzhiyun 		printk("%s: Start command failed\n", bp->dev->name);
1419*4882a593Smuzhiyun 		if (get_buffers)
1420*4882a593Smuzhiyun 			dfx_rcv_flush(bp);
1421*4882a593Smuzhiyun 		return DFX_K_FAILURE;
1422*4882a593Smuzhiyun 		}
1423*4882a593Smuzhiyun 
1424*4882a593Smuzhiyun 	/* Initialization succeeded, reenable PDQ interrupts */
1425*4882a593Smuzhiyun 
1426*4882a593Smuzhiyun 	dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_ENABLE_DEF_INTS);
1427*4882a593Smuzhiyun 	return DFX_K_SUCCESS;
1428*4882a593Smuzhiyun 	}
1429*4882a593Smuzhiyun 
1430*4882a593Smuzhiyun 
1431*4882a593Smuzhiyun /*
1432*4882a593Smuzhiyun  * ============
1433*4882a593Smuzhiyun  * = dfx_open =
1434*4882a593Smuzhiyun  * ============
1435*4882a593Smuzhiyun  *
1436*4882a593Smuzhiyun  * Overview:
1437*4882a593Smuzhiyun  *   Opens the adapter
1438*4882a593Smuzhiyun  *
1439*4882a593Smuzhiyun  * Returns:
1440*4882a593Smuzhiyun  *   Condition code
1441*4882a593Smuzhiyun  *
1442*4882a593Smuzhiyun  * Arguments:
1443*4882a593Smuzhiyun  *   dev - pointer to device information
1444*4882a593Smuzhiyun  *
1445*4882a593Smuzhiyun  * Functional Description:
1446*4882a593Smuzhiyun  *   This function brings the adapter to an operational state.
1447*4882a593Smuzhiyun  *
1448*4882a593Smuzhiyun  * Return Codes:
1449*4882a593Smuzhiyun  *   0		 - Adapter was successfully opened
1450*4882a593Smuzhiyun  *   -EAGAIN - Could not register IRQ or adapter initialization failed
1451*4882a593Smuzhiyun  *
1452*4882a593Smuzhiyun  * Assumptions:
1453*4882a593Smuzhiyun  *   This routine should only be called for a device that was
1454*4882a593Smuzhiyun  *   initialized successfully.
1455*4882a593Smuzhiyun  *
1456*4882a593Smuzhiyun  * Side Effects:
1457*4882a593Smuzhiyun  *   Adapter should be in LINK_AVAILABLE or LINK_UNAVAILABLE state
1458*4882a593Smuzhiyun  *   if the open is successful.
1459*4882a593Smuzhiyun  */
1460*4882a593Smuzhiyun 
dfx_open(struct net_device * dev)1461*4882a593Smuzhiyun static int dfx_open(struct net_device *dev)
1462*4882a593Smuzhiyun {
1463*4882a593Smuzhiyun 	DFX_board_t *bp = netdev_priv(dev);
1464*4882a593Smuzhiyun 	int ret;
1465*4882a593Smuzhiyun 
1466*4882a593Smuzhiyun 	DBG_printk("In dfx_open...\n");
1467*4882a593Smuzhiyun 
1468*4882a593Smuzhiyun 	/* Register IRQ - support shared interrupts by passing device ptr */
1469*4882a593Smuzhiyun 
1470*4882a593Smuzhiyun 	ret = request_irq(dev->irq, dfx_interrupt, IRQF_SHARED, dev->name,
1471*4882a593Smuzhiyun 			  dev);
1472*4882a593Smuzhiyun 	if (ret) {
1473*4882a593Smuzhiyun 		printk(KERN_ERR "%s: Requested IRQ %d is busy\n", dev->name, dev->irq);
1474*4882a593Smuzhiyun 		return ret;
1475*4882a593Smuzhiyun 	}
1476*4882a593Smuzhiyun 
1477*4882a593Smuzhiyun 	/*
1478*4882a593Smuzhiyun 	 * Set current address to factory MAC address
1479*4882a593Smuzhiyun 	 *
1480*4882a593Smuzhiyun 	 * Note: We've already done this step in dfx_driver_init.
1481*4882a593Smuzhiyun 	 *       However, it's possible that a user has set a node
1482*4882a593Smuzhiyun 	 *		 address override, then closed and reopened the
1483*4882a593Smuzhiyun 	 *		 adapter.  Unless we reset the device address field
1484*4882a593Smuzhiyun 	 *		 now, we'll continue to use the existing modified
1485*4882a593Smuzhiyun 	 *		 address.
1486*4882a593Smuzhiyun 	 */
1487*4882a593Smuzhiyun 
1488*4882a593Smuzhiyun 	memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN);
1489*4882a593Smuzhiyun 
1490*4882a593Smuzhiyun 	/* Clear local unicast/multicast address tables and counts */
1491*4882a593Smuzhiyun 
1492*4882a593Smuzhiyun 	memset(bp->uc_table, 0, sizeof(bp->uc_table));
1493*4882a593Smuzhiyun 	memset(bp->mc_table, 0, sizeof(bp->mc_table));
1494*4882a593Smuzhiyun 	bp->uc_count = 0;
1495*4882a593Smuzhiyun 	bp->mc_count = 0;
1496*4882a593Smuzhiyun 
1497*4882a593Smuzhiyun 	/* Disable promiscuous filter settings */
1498*4882a593Smuzhiyun 
1499*4882a593Smuzhiyun 	bp->ind_group_prom	= PI_FSTATE_K_BLOCK;
1500*4882a593Smuzhiyun 	bp->group_prom		= PI_FSTATE_K_BLOCK;
1501*4882a593Smuzhiyun 
1502*4882a593Smuzhiyun 	spin_lock_init(&bp->lock);
1503*4882a593Smuzhiyun 
1504*4882a593Smuzhiyun 	/* Reset and initialize adapter */
1505*4882a593Smuzhiyun 
1506*4882a593Smuzhiyun 	bp->reset_type = PI_PDATA_A_RESET_M_SKIP_ST;	/* skip self-test */
1507*4882a593Smuzhiyun 	if (dfx_adap_init(bp, 1) != DFX_K_SUCCESS)
1508*4882a593Smuzhiyun 	{
1509*4882a593Smuzhiyun 		printk(KERN_ERR "%s: Adapter open failed!\n", dev->name);
1510*4882a593Smuzhiyun 		free_irq(dev->irq, dev);
1511*4882a593Smuzhiyun 		return -EAGAIN;
1512*4882a593Smuzhiyun 	}
1513*4882a593Smuzhiyun 
1514*4882a593Smuzhiyun 	/* Set device structure info */
1515*4882a593Smuzhiyun 	netif_start_queue(dev);
1516*4882a593Smuzhiyun 	return 0;
1517*4882a593Smuzhiyun }
1518*4882a593Smuzhiyun 
1519*4882a593Smuzhiyun 
1520*4882a593Smuzhiyun /*
1521*4882a593Smuzhiyun  * =============
1522*4882a593Smuzhiyun  * = dfx_close =
1523*4882a593Smuzhiyun  * =============
1524*4882a593Smuzhiyun  *
1525*4882a593Smuzhiyun  * Overview:
1526*4882a593Smuzhiyun  *   Closes the device/module.
1527*4882a593Smuzhiyun  *
1528*4882a593Smuzhiyun  * Returns:
1529*4882a593Smuzhiyun  *   Condition code
1530*4882a593Smuzhiyun  *
1531*4882a593Smuzhiyun  * Arguments:
1532*4882a593Smuzhiyun  *   dev - pointer to device information
1533*4882a593Smuzhiyun  *
1534*4882a593Smuzhiyun  * Functional Description:
1535*4882a593Smuzhiyun  *   This routine closes the adapter and brings it to a safe state.
1536*4882a593Smuzhiyun  *   The interrupt service routine is deregistered with the OS.
1537*4882a593Smuzhiyun  *   The adapter can be opened again with another call to dfx_open().
1538*4882a593Smuzhiyun  *
1539*4882a593Smuzhiyun  * Return Codes:
1540*4882a593Smuzhiyun  *   Always return 0.
1541*4882a593Smuzhiyun  *
1542*4882a593Smuzhiyun  * Assumptions:
1543*4882a593Smuzhiyun  *   No further requests for this adapter are made after this routine is
1544*4882a593Smuzhiyun  *   called.  dfx_open() can be called to reset and reinitialize the
1545*4882a593Smuzhiyun  *   adapter.
1546*4882a593Smuzhiyun  *
1547*4882a593Smuzhiyun  * Side Effects:
1548*4882a593Smuzhiyun  *   Adapter should be in DMA_UNAVAILABLE state upon completion of this
1549*4882a593Smuzhiyun  *   routine.
1550*4882a593Smuzhiyun  */
1551*4882a593Smuzhiyun 
dfx_close(struct net_device * dev)1552*4882a593Smuzhiyun static int dfx_close(struct net_device *dev)
1553*4882a593Smuzhiyun {
1554*4882a593Smuzhiyun 	DFX_board_t *bp = netdev_priv(dev);
1555*4882a593Smuzhiyun 
1556*4882a593Smuzhiyun 	DBG_printk("In dfx_close...\n");
1557*4882a593Smuzhiyun 
1558*4882a593Smuzhiyun 	/* Disable PDQ interrupts first */
1559*4882a593Smuzhiyun 
1560*4882a593Smuzhiyun 	dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1561*4882a593Smuzhiyun 
1562*4882a593Smuzhiyun 	/* Place adapter in DMA_UNAVAILABLE state by resetting adapter */
1563*4882a593Smuzhiyun 
1564*4882a593Smuzhiyun 	(void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST);
1565*4882a593Smuzhiyun 
1566*4882a593Smuzhiyun 	/*
1567*4882a593Smuzhiyun 	 * Flush any pending transmit buffers
1568*4882a593Smuzhiyun 	 *
1569*4882a593Smuzhiyun 	 * Note: It's important that we flush the transmit buffers
1570*4882a593Smuzhiyun 	 *		 BEFORE we clear our copy of the Type 2 register.
1571*4882a593Smuzhiyun 	 *		 Otherwise, we'll have no idea how many buffers
1572*4882a593Smuzhiyun 	 *		 we need to free.
1573*4882a593Smuzhiyun 	 */
1574*4882a593Smuzhiyun 
1575*4882a593Smuzhiyun 	dfx_xmt_flush(bp);
1576*4882a593Smuzhiyun 
1577*4882a593Smuzhiyun 	/*
1578*4882a593Smuzhiyun 	 * Clear Type 1 and Type 2 registers after adapter reset
1579*4882a593Smuzhiyun 	 *
1580*4882a593Smuzhiyun 	 * Note: Even though we're closing the adapter, it's
1581*4882a593Smuzhiyun 	 *       possible that an interrupt will occur after
1582*4882a593Smuzhiyun 	 *		 dfx_close is called.  Without some assurance to
1583*4882a593Smuzhiyun 	 *		 the contrary we want to make sure that we don't
1584*4882a593Smuzhiyun 	 *		 process receive and transmit LLC frames and update
1585*4882a593Smuzhiyun 	 *		 the Type 2 register with bad information.
1586*4882a593Smuzhiyun 	 */
1587*4882a593Smuzhiyun 
1588*4882a593Smuzhiyun 	bp->cmd_req_reg.lword	= 0;
1589*4882a593Smuzhiyun 	bp->cmd_rsp_reg.lword	= 0;
1590*4882a593Smuzhiyun 	bp->rcv_xmt_reg.lword	= 0;
1591*4882a593Smuzhiyun 
1592*4882a593Smuzhiyun 	/* Clear consumer block for the same reason given above */
1593*4882a593Smuzhiyun 
1594*4882a593Smuzhiyun 	memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK));
1595*4882a593Smuzhiyun 
1596*4882a593Smuzhiyun 	/* Release all dynamically allocate skb in the receive ring. */
1597*4882a593Smuzhiyun 
1598*4882a593Smuzhiyun 	dfx_rcv_flush(bp);
1599*4882a593Smuzhiyun 
1600*4882a593Smuzhiyun 	/* Clear device structure flags */
1601*4882a593Smuzhiyun 
1602*4882a593Smuzhiyun 	netif_stop_queue(dev);
1603*4882a593Smuzhiyun 
1604*4882a593Smuzhiyun 	/* Deregister (free) IRQ */
1605*4882a593Smuzhiyun 
1606*4882a593Smuzhiyun 	free_irq(dev->irq, dev);
1607*4882a593Smuzhiyun 
1608*4882a593Smuzhiyun 	return 0;
1609*4882a593Smuzhiyun }
1610*4882a593Smuzhiyun 
1611*4882a593Smuzhiyun 
1612*4882a593Smuzhiyun /*
1613*4882a593Smuzhiyun  * ======================
1614*4882a593Smuzhiyun  * = dfx_int_pr_halt_id =
1615*4882a593Smuzhiyun  * ======================
1616*4882a593Smuzhiyun  *
1617*4882a593Smuzhiyun  * Overview:
1618*4882a593Smuzhiyun  *   Displays halt id's in string form.
1619*4882a593Smuzhiyun  *
1620*4882a593Smuzhiyun  * Returns:
1621*4882a593Smuzhiyun  *   None
1622*4882a593Smuzhiyun  *
1623*4882a593Smuzhiyun  * Arguments:
1624*4882a593Smuzhiyun  *   bp - pointer to board information
1625*4882a593Smuzhiyun  *
1626*4882a593Smuzhiyun  * Functional Description:
1627*4882a593Smuzhiyun  *   Determine current halt id and display appropriate string.
1628*4882a593Smuzhiyun  *
1629*4882a593Smuzhiyun  * Return Codes:
1630*4882a593Smuzhiyun  *   None
1631*4882a593Smuzhiyun  *
1632*4882a593Smuzhiyun  * Assumptions:
1633*4882a593Smuzhiyun  *   None
1634*4882a593Smuzhiyun  *
1635*4882a593Smuzhiyun  * Side Effects:
1636*4882a593Smuzhiyun  *   None
1637*4882a593Smuzhiyun  */
1638*4882a593Smuzhiyun 
dfx_int_pr_halt_id(DFX_board_t * bp)1639*4882a593Smuzhiyun static void dfx_int_pr_halt_id(DFX_board_t	*bp)
1640*4882a593Smuzhiyun 	{
1641*4882a593Smuzhiyun 	PI_UINT32	port_status;			/* PDQ port status register value */
1642*4882a593Smuzhiyun 	PI_UINT32	halt_id;				/* PDQ port status halt ID */
1643*4882a593Smuzhiyun 
1644*4882a593Smuzhiyun 	/* Read the latest port status */
1645*4882a593Smuzhiyun 
1646*4882a593Smuzhiyun 	dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
1647*4882a593Smuzhiyun 
1648*4882a593Smuzhiyun 	/* Display halt state transition information */
1649*4882a593Smuzhiyun 
1650*4882a593Smuzhiyun 	halt_id = (port_status & PI_PSTATUS_M_HALT_ID) >> PI_PSTATUS_V_HALT_ID;
1651*4882a593Smuzhiyun 	switch (halt_id)
1652*4882a593Smuzhiyun 		{
1653*4882a593Smuzhiyun 		case PI_HALT_ID_K_SELFTEST_TIMEOUT:
1654*4882a593Smuzhiyun 			printk("%s: Halt ID: Selftest Timeout\n", bp->dev->name);
1655*4882a593Smuzhiyun 			break;
1656*4882a593Smuzhiyun 
1657*4882a593Smuzhiyun 		case PI_HALT_ID_K_PARITY_ERROR:
1658*4882a593Smuzhiyun 			printk("%s: Halt ID: Host Bus Parity Error\n", bp->dev->name);
1659*4882a593Smuzhiyun 			break;
1660*4882a593Smuzhiyun 
1661*4882a593Smuzhiyun 		case PI_HALT_ID_K_HOST_DIR_HALT:
1662*4882a593Smuzhiyun 			printk("%s: Halt ID: Host-Directed Halt\n", bp->dev->name);
1663*4882a593Smuzhiyun 			break;
1664*4882a593Smuzhiyun 
1665*4882a593Smuzhiyun 		case PI_HALT_ID_K_SW_FAULT:
1666*4882a593Smuzhiyun 			printk("%s: Halt ID: Adapter Software Fault\n", bp->dev->name);
1667*4882a593Smuzhiyun 			break;
1668*4882a593Smuzhiyun 
1669*4882a593Smuzhiyun 		case PI_HALT_ID_K_HW_FAULT:
1670*4882a593Smuzhiyun 			printk("%s: Halt ID: Adapter Hardware Fault\n", bp->dev->name);
1671*4882a593Smuzhiyun 			break;
1672*4882a593Smuzhiyun 
1673*4882a593Smuzhiyun 		case PI_HALT_ID_K_PC_TRACE:
1674*4882a593Smuzhiyun 			printk("%s: Halt ID: FDDI Network PC Trace Path Test\n", bp->dev->name);
1675*4882a593Smuzhiyun 			break;
1676*4882a593Smuzhiyun 
1677*4882a593Smuzhiyun 		case PI_HALT_ID_K_DMA_ERROR:
1678*4882a593Smuzhiyun 			printk("%s: Halt ID: Adapter DMA Error\n", bp->dev->name);
1679*4882a593Smuzhiyun 			break;
1680*4882a593Smuzhiyun 
1681*4882a593Smuzhiyun 		case PI_HALT_ID_K_IMAGE_CRC_ERROR:
1682*4882a593Smuzhiyun 			printk("%s: Halt ID: Firmware Image CRC Error\n", bp->dev->name);
1683*4882a593Smuzhiyun 			break;
1684*4882a593Smuzhiyun 
1685*4882a593Smuzhiyun 		case PI_HALT_ID_K_BUS_EXCEPTION:
1686*4882a593Smuzhiyun 			printk("%s: Halt ID: 68000 Bus Exception\n", bp->dev->name);
1687*4882a593Smuzhiyun 			break;
1688*4882a593Smuzhiyun 
1689*4882a593Smuzhiyun 		default:
1690*4882a593Smuzhiyun 			printk("%s: Halt ID: Unknown (code = %X)\n", bp->dev->name, halt_id);
1691*4882a593Smuzhiyun 			break;
1692*4882a593Smuzhiyun 		}
1693*4882a593Smuzhiyun 	}
1694*4882a593Smuzhiyun 
1695*4882a593Smuzhiyun 
1696*4882a593Smuzhiyun /*
1697*4882a593Smuzhiyun  * ==========================
1698*4882a593Smuzhiyun  * = dfx_int_type_0_process =
1699*4882a593Smuzhiyun  * ==========================
1700*4882a593Smuzhiyun  *
1701*4882a593Smuzhiyun  * Overview:
1702*4882a593Smuzhiyun  *   Processes Type 0 interrupts.
1703*4882a593Smuzhiyun  *
1704*4882a593Smuzhiyun  * Returns:
1705*4882a593Smuzhiyun  *   None
1706*4882a593Smuzhiyun  *
1707*4882a593Smuzhiyun  * Arguments:
1708*4882a593Smuzhiyun  *   bp - pointer to board information
1709*4882a593Smuzhiyun  *
1710*4882a593Smuzhiyun  * Functional Description:
1711*4882a593Smuzhiyun  *   Processes all enabled Type 0 interrupts.  If the reason for the interrupt
1712*4882a593Smuzhiyun  *   is a serious fault on the adapter, then an error message is displayed
1713*4882a593Smuzhiyun  *   and the adapter is reset.
1714*4882a593Smuzhiyun  *
1715*4882a593Smuzhiyun  *   One tricky potential timing window is the rapid succession of "link avail"
1716*4882a593Smuzhiyun  *   "link unavail" state change interrupts.  The acknowledgement of the Type 0
1717*4882a593Smuzhiyun  *   interrupt must be done before reading the state from the Port Status
1718*4882a593Smuzhiyun  *   register.  This is true because a state change could occur after reading
1719*4882a593Smuzhiyun  *   the data, but before acknowledging the interrupt.  If this state change
1720*4882a593Smuzhiyun  *   does happen, it would be lost because the driver is using the old state,
1721*4882a593Smuzhiyun  *   and it will never know about the new state because it subsequently
1722*4882a593Smuzhiyun  *   acknowledges the state change interrupt.
1723*4882a593Smuzhiyun  *
1724*4882a593Smuzhiyun  *          INCORRECT                                      CORRECT
1725*4882a593Smuzhiyun  *      read type 0 int reasons                   read type 0 int reasons
1726*4882a593Smuzhiyun  *      read adapter state                        ack type 0 interrupts
1727*4882a593Smuzhiyun  *      ack type 0 interrupts                     read adapter state
1728*4882a593Smuzhiyun  *      ... process interrupt ...                 ... process interrupt ...
1729*4882a593Smuzhiyun  *
1730*4882a593Smuzhiyun  * Return Codes:
1731*4882a593Smuzhiyun  *   None
1732*4882a593Smuzhiyun  *
1733*4882a593Smuzhiyun  * Assumptions:
1734*4882a593Smuzhiyun  *   None
1735*4882a593Smuzhiyun  *
1736*4882a593Smuzhiyun  * Side Effects:
1737*4882a593Smuzhiyun  *   An adapter reset may occur if the adapter has any Type 0 error interrupts
1738*4882a593Smuzhiyun  *   or if the port status indicates that the adapter is halted.  The driver
1739*4882a593Smuzhiyun  *   is responsible for reinitializing the adapter with the current CAM
1740*4882a593Smuzhiyun  *   contents and adapter filter settings.
1741*4882a593Smuzhiyun  */
1742*4882a593Smuzhiyun 
dfx_int_type_0_process(DFX_board_t * bp)1743*4882a593Smuzhiyun static void dfx_int_type_0_process(DFX_board_t	*bp)
1744*4882a593Smuzhiyun 
1745*4882a593Smuzhiyun 	{
1746*4882a593Smuzhiyun 	PI_UINT32	type_0_status;		/* Host Interrupt Type 0 register */
1747*4882a593Smuzhiyun 	PI_UINT32	state;				/* current adap state (from port status) */
1748*4882a593Smuzhiyun 
1749*4882a593Smuzhiyun 	/*
1750*4882a593Smuzhiyun 	 * Read host interrupt Type 0 register to determine which Type 0
1751*4882a593Smuzhiyun 	 * interrupts are pending.  Immediately write it back out to clear
1752*4882a593Smuzhiyun 	 * those interrupts.
1753*4882a593Smuzhiyun 	 */
1754*4882a593Smuzhiyun 
1755*4882a593Smuzhiyun 	dfx_port_read_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, &type_0_status);
1756*4882a593Smuzhiyun 	dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, type_0_status);
1757*4882a593Smuzhiyun 
1758*4882a593Smuzhiyun 	/* Check for Type 0 error interrupts */
1759*4882a593Smuzhiyun 
1760*4882a593Smuzhiyun 	if (type_0_status & (PI_TYPE_0_STAT_M_NXM |
1761*4882a593Smuzhiyun 							PI_TYPE_0_STAT_M_PM_PAR_ERR |
1762*4882a593Smuzhiyun 							PI_TYPE_0_STAT_M_BUS_PAR_ERR))
1763*4882a593Smuzhiyun 		{
1764*4882a593Smuzhiyun 		/* Check for Non-Existent Memory error */
1765*4882a593Smuzhiyun 
1766*4882a593Smuzhiyun 		if (type_0_status & PI_TYPE_0_STAT_M_NXM)
1767*4882a593Smuzhiyun 			printk("%s: Non-Existent Memory Access Error\n", bp->dev->name);
1768*4882a593Smuzhiyun 
1769*4882a593Smuzhiyun 		/* Check for Packet Memory Parity error */
1770*4882a593Smuzhiyun 
1771*4882a593Smuzhiyun 		if (type_0_status & PI_TYPE_0_STAT_M_PM_PAR_ERR)
1772*4882a593Smuzhiyun 			printk("%s: Packet Memory Parity Error\n", bp->dev->name);
1773*4882a593Smuzhiyun 
1774*4882a593Smuzhiyun 		/* Check for Host Bus Parity error */
1775*4882a593Smuzhiyun 
1776*4882a593Smuzhiyun 		if (type_0_status & PI_TYPE_0_STAT_M_BUS_PAR_ERR)
1777*4882a593Smuzhiyun 			printk("%s: Host Bus Parity Error\n", bp->dev->name);
1778*4882a593Smuzhiyun 
1779*4882a593Smuzhiyun 		/* Reset adapter and bring it back on-line */
1780*4882a593Smuzhiyun 
1781*4882a593Smuzhiyun 		bp->link_available = PI_K_FALSE;	/* link is no longer available */
1782*4882a593Smuzhiyun 		bp->reset_type = 0;					/* rerun on-board diagnostics */
1783*4882a593Smuzhiyun 		printk("%s: Resetting adapter...\n", bp->dev->name);
1784*4882a593Smuzhiyun 		if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS)
1785*4882a593Smuzhiyun 			{
1786*4882a593Smuzhiyun 			printk("%s: Adapter reset failed!  Disabling adapter interrupts.\n", bp->dev->name);
1787*4882a593Smuzhiyun 			dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1788*4882a593Smuzhiyun 			return;
1789*4882a593Smuzhiyun 			}
1790*4882a593Smuzhiyun 		printk("%s: Adapter reset successful!\n", bp->dev->name);
1791*4882a593Smuzhiyun 		return;
1792*4882a593Smuzhiyun 		}
1793*4882a593Smuzhiyun 
1794*4882a593Smuzhiyun 	/* Check for transmit flush interrupt */
1795*4882a593Smuzhiyun 
1796*4882a593Smuzhiyun 	if (type_0_status & PI_TYPE_0_STAT_M_XMT_FLUSH)
1797*4882a593Smuzhiyun 		{
1798*4882a593Smuzhiyun 		/* Flush any pending xmt's and acknowledge the flush interrupt */
1799*4882a593Smuzhiyun 
1800*4882a593Smuzhiyun 		bp->link_available = PI_K_FALSE;		/* link is no longer available */
1801*4882a593Smuzhiyun 		dfx_xmt_flush(bp);						/* flush any outstanding packets */
1802*4882a593Smuzhiyun 		(void) dfx_hw_port_ctrl_req(bp,
1803*4882a593Smuzhiyun 									PI_PCTRL_M_XMT_DATA_FLUSH_DONE,
1804*4882a593Smuzhiyun 									0,
1805*4882a593Smuzhiyun 									0,
1806*4882a593Smuzhiyun 									NULL);
1807*4882a593Smuzhiyun 		}
1808*4882a593Smuzhiyun 
1809*4882a593Smuzhiyun 	/* Check for adapter state change */
1810*4882a593Smuzhiyun 
1811*4882a593Smuzhiyun 	if (type_0_status & PI_TYPE_0_STAT_M_STATE_CHANGE)
1812*4882a593Smuzhiyun 		{
1813*4882a593Smuzhiyun 		/* Get latest adapter state */
1814*4882a593Smuzhiyun 
1815*4882a593Smuzhiyun 		state = dfx_hw_adap_state_rd(bp);	/* get adapter state */
1816*4882a593Smuzhiyun 		if (state == PI_STATE_K_HALTED)
1817*4882a593Smuzhiyun 			{
1818*4882a593Smuzhiyun 			/*
1819*4882a593Smuzhiyun 			 * Adapter has transitioned to HALTED state, try to reset
1820*4882a593Smuzhiyun 			 * adapter to bring it back on-line.  If reset fails,
1821*4882a593Smuzhiyun 			 * leave the adapter in the broken state.
1822*4882a593Smuzhiyun 			 */
1823*4882a593Smuzhiyun 
1824*4882a593Smuzhiyun 			printk("%s: Controller has transitioned to HALTED state!\n", bp->dev->name);
1825*4882a593Smuzhiyun 			dfx_int_pr_halt_id(bp);			/* display halt id as string */
1826*4882a593Smuzhiyun 
1827*4882a593Smuzhiyun 			/* Reset adapter and bring it back on-line */
1828*4882a593Smuzhiyun 
1829*4882a593Smuzhiyun 			bp->link_available = PI_K_FALSE;	/* link is no longer available */
1830*4882a593Smuzhiyun 			bp->reset_type = 0;					/* rerun on-board diagnostics */
1831*4882a593Smuzhiyun 			printk("%s: Resetting adapter...\n", bp->dev->name);
1832*4882a593Smuzhiyun 			if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS)
1833*4882a593Smuzhiyun 				{
1834*4882a593Smuzhiyun 				printk("%s: Adapter reset failed!  Disabling adapter interrupts.\n", bp->dev->name);
1835*4882a593Smuzhiyun 				dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1836*4882a593Smuzhiyun 				return;
1837*4882a593Smuzhiyun 				}
1838*4882a593Smuzhiyun 			printk("%s: Adapter reset successful!\n", bp->dev->name);
1839*4882a593Smuzhiyun 			}
1840*4882a593Smuzhiyun 		else if (state == PI_STATE_K_LINK_AVAIL)
1841*4882a593Smuzhiyun 			{
1842*4882a593Smuzhiyun 			bp->link_available = PI_K_TRUE;		/* set link available flag */
1843*4882a593Smuzhiyun 			}
1844*4882a593Smuzhiyun 		}
1845*4882a593Smuzhiyun 	}
1846*4882a593Smuzhiyun 
1847*4882a593Smuzhiyun 
1848*4882a593Smuzhiyun /*
1849*4882a593Smuzhiyun  * ==================
1850*4882a593Smuzhiyun  * = dfx_int_common =
1851*4882a593Smuzhiyun  * ==================
1852*4882a593Smuzhiyun  *
1853*4882a593Smuzhiyun  * Overview:
1854*4882a593Smuzhiyun  *   Interrupt service routine (ISR)
1855*4882a593Smuzhiyun  *
1856*4882a593Smuzhiyun  * Returns:
1857*4882a593Smuzhiyun  *   None
1858*4882a593Smuzhiyun  *
1859*4882a593Smuzhiyun  * Arguments:
1860*4882a593Smuzhiyun  *   bp - pointer to board information
1861*4882a593Smuzhiyun  *
1862*4882a593Smuzhiyun  * Functional Description:
1863*4882a593Smuzhiyun  *   This is the ISR which processes incoming adapter interrupts.
1864*4882a593Smuzhiyun  *
1865*4882a593Smuzhiyun  * Return Codes:
1866*4882a593Smuzhiyun  *   None
1867*4882a593Smuzhiyun  *
1868*4882a593Smuzhiyun  * Assumptions:
1869*4882a593Smuzhiyun  *   This routine assumes PDQ interrupts have not been disabled.
1870*4882a593Smuzhiyun  *   When interrupts are disabled at the PDQ, the Port Status register
1871*4882a593Smuzhiyun  *   is automatically cleared.  This routine uses the Port Status
1872*4882a593Smuzhiyun  *   register value to determine whether a Type 0 interrupt occurred,
1873*4882a593Smuzhiyun  *   so it's important that adapter interrupts are not normally
1874*4882a593Smuzhiyun  *   enabled/disabled at the PDQ.
1875*4882a593Smuzhiyun  *
1876*4882a593Smuzhiyun  *   It's vital that this routine is NOT reentered for the
1877*4882a593Smuzhiyun  *   same board and that the OS is not in another section of
1878*4882a593Smuzhiyun  *   code (eg. dfx_xmt_queue_pkt) for the same board on a
1879*4882a593Smuzhiyun  *   different thread.
1880*4882a593Smuzhiyun  *
1881*4882a593Smuzhiyun  * Side Effects:
1882*4882a593Smuzhiyun  *   Pending interrupts are serviced.  Depending on the type of
1883*4882a593Smuzhiyun  *   interrupt, acknowledging and clearing the interrupt at the
1884*4882a593Smuzhiyun  *   PDQ involves writing a register to clear the interrupt bit
1885*4882a593Smuzhiyun  *   or updating completion indices.
1886*4882a593Smuzhiyun  */
1887*4882a593Smuzhiyun 
dfx_int_common(struct net_device * dev)1888*4882a593Smuzhiyun static void dfx_int_common(struct net_device *dev)
1889*4882a593Smuzhiyun {
1890*4882a593Smuzhiyun 	DFX_board_t *bp = netdev_priv(dev);
1891*4882a593Smuzhiyun 	PI_UINT32	port_status;		/* Port Status register */
1892*4882a593Smuzhiyun 
1893*4882a593Smuzhiyun 	/* Process xmt interrupts - frequent case, so always call this routine */
1894*4882a593Smuzhiyun 
1895*4882a593Smuzhiyun 	if(dfx_xmt_done(bp))				/* free consumed xmt packets */
1896*4882a593Smuzhiyun 		netif_wake_queue(dev);
1897*4882a593Smuzhiyun 
1898*4882a593Smuzhiyun 	/* Process rcv interrupts - frequent case, so always call this routine */
1899*4882a593Smuzhiyun 
1900*4882a593Smuzhiyun 	dfx_rcv_queue_process(bp);		/* service received LLC frames */
1901*4882a593Smuzhiyun 
1902*4882a593Smuzhiyun 	/*
1903*4882a593Smuzhiyun 	 * Transmit and receive producer and completion indices are updated on the
1904*4882a593Smuzhiyun 	 * adapter by writing to the Type 2 Producer register.  Since the frequent
1905*4882a593Smuzhiyun 	 * case is that we'll be processing either LLC transmit or receive buffers,
1906*4882a593Smuzhiyun 	 * we'll optimize I/O writes by doing a single register write here.
1907*4882a593Smuzhiyun 	 */
1908*4882a593Smuzhiyun 
1909*4882a593Smuzhiyun 	dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
1910*4882a593Smuzhiyun 
1911*4882a593Smuzhiyun 	/* Read PDQ Port Status register to find out which interrupts need processing */
1912*4882a593Smuzhiyun 
1913*4882a593Smuzhiyun 	dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
1914*4882a593Smuzhiyun 
1915*4882a593Smuzhiyun 	/* Process Type 0 interrupts (if any) - infrequent, so only call when needed */
1916*4882a593Smuzhiyun 
1917*4882a593Smuzhiyun 	if (port_status & PI_PSTATUS_M_TYPE_0_PENDING)
1918*4882a593Smuzhiyun 		dfx_int_type_0_process(bp);	/* process Type 0 interrupts */
1919*4882a593Smuzhiyun 	}
1920*4882a593Smuzhiyun 
1921*4882a593Smuzhiyun 
1922*4882a593Smuzhiyun /*
1923*4882a593Smuzhiyun  * =================
1924*4882a593Smuzhiyun  * = dfx_interrupt =
1925*4882a593Smuzhiyun  * =================
1926*4882a593Smuzhiyun  *
1927*4882a593Smuzhiyun  * Overview:
1928*4882a593Smuzhiyun  *   Interrupt processing routine
1929*4882a593Smuzhiyun  *
1930*4882a593Smuzhiyun  * Returns:
1931*4882a593Smuzhiyun  *   Whether a valid interrupt was seen.
1932*4882a593Smuzhiyun  *
1933*4882a593Smuzhiyun  * Arguments:
1934*4882a593Smuzhiyun  *   irq	- interrupt vector
1935*4882a593Smuzhiyun  *   dev_id	- pointer to device information
1936*4882a593Smuzhiyun  *
1937*4882a593Smuzhiyun  * Functional Description:
1938*4882a593Smuzhiyun  *   This routine calls the interrupt processing routine for this adapter.  It
1939*4882a593Smuzhiyun  *   disables and reenables adapter interrupts, as appropriate.  We can support
1940*4882a593Smuzhiyun  *   shared interrupts since the incoming dev_id pointer provides our device
1941*4882a593Smuzhiyun  *   structure context.
1942*4882a593Smuzhiyun  *
1943*4882a593Smuzhiyun  * Return Codes:
1944*4882a593Smuzhiyun  *   IRQ_HANDLED - an IRQ was handled.
1945*4882a593Smuzhiyun  *   IRQ_NONE    - no IRQ was handled.
1946*4882a593Smuzhiyun  *
1947*4882a593Smuzhiyun  * Assumptions:
1948*4882a593Smuzhiyun  *   The interrupt acknowledgement at the hardware level (eg. ACKing the PIC
1949*4882a593Smuzhiyun  *   on Intel-based systems) is done by the operating system outside this
1950*4882a593Smuzhiyun  *   routine.
1951*4882a593Smuzhiyun  *
1952*4882a593Smuzhiyun  *	 System interrupts are enabled through this call.
1953*4882a593Smuzhiyun  *
1954*4882a593Smuzhiyun  * Side Effects:
1955*4882a593Smuzhiyun  *   Interrupts are disabled, then reenabled at the adapter.
1956*4882a593Smuzhiyun  */
1957*4882a593Smuzhiyun 
dfx_interrupt(int irq,void * dev_id)1958*4882a593Smuzhiyun static irqreturn_t dfx_interrupt(int irq, void *dev_id)
1959*4882a593Smuzhiyun {
1960*4882a593Smuzhiyun 	struct net_device *dev = dev_id;
1961*4882a593Smuzhiyun 	DFX_board_t *bp = netdev_priv(dev);
1962*4882a593Smuzhiyun 	struct device *bdev = bp->bus_dev;
1963*4882a593Smuzhiyun 	int dfx_bus_pci = dev_is_pci(bdev);
1964*4882a593Smuzhiyun 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
1965*4882a593Smuzhiyun 	int dfx_bus_tc = DFX_BUS_TC(bdev);
1966*4882a593Smuzhiyun 
1967*4882a593Smuzhiyun 	/* Service adapter interrupts */
1968*4882a593Smuzhiyun 
1969*4882a593Smuzhiyun 	if (dfx_bus_pci) {
1970*4882a593Smuzhiyun 		u32 status;
1971*4882a593Smuzhiyun 
1972*4882a593Smuzhiyun 		dfx_port_read_long(bp, PFI_K_REG_STATUS, &status);
1973*4882a593Smuzhiyun 		if (!(status & PFI_STATUS_M_PDQ_INT))
1974*4882a593Smuzhiyun 			return IRQ_NONE;
1975*4882a593Smuzhiyun 
1976*4882a593Smuzhiyun 		spin_lock(&bp->lock);
1977*4882a593Smuzhiyun 
1978*4882a593Smuzhiyun 		/* Disable PDQ-PFI interrupts at PFI */
1979*4882a593Smuzhiyun 		dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL,
1980*4882a593Smuzhiyun 				    PFI_MODE_M_DMA_ENB);
1981*4882a593Smuzhiyun 
1982*4882a593Smuzhiyun 		/* Call interrupt service routine for this adapter */
1983*4882a593Smuzhiyun 		dfx_int_common(dev);
1984*4882a593Smuzhiyun 
1985*4882a593Smuzhiyun 		/* Clear PDQ interrupt status bit and reenable interrupts */
1986*4882a593Smuzhiyun 		dfx_port_write_long(bp, PFI_K_REG_STATUS,
1987*4882a593Smuzhiyun 				    PFI_STATUS_M_PDQ_INT);
1988*4882a593Smuzhiyun 		dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL,
1989*4882a593Smuzhiyun 				    (PFI_MODE_M_PDQ_INT_ENB |
1990*4882a593Smuzhiyun 				     PFI_MODE_M_DMA_ENB));
1991*4882a593Smuzhiyun 
1992*4882a593Smuzhiyun 		spin_unlock(&bp->lock);
1993*4882a593Smuzhiyun 	}
1994*4882a593Smuzhiyun 	if (dfx_bus_eisa) {
1995*4882a593Smuzhiyun 		unsigned long base_addr = to_eisa_device(bdev)->base_addr;
1996*4882a593Smuzhiyun 		u8 status;
1997*4882a593Smuzhiyun 
1998*4882a593Smuzhiyun 		status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
1999*4882a593Smuzhiyun 		if (!(status & PI_CONFIG_STAT_0_M_PEND))
2000*4882a593Smuzhiyun 			return IRQ_NONE;
2001*4882a593Smuzhiyun 
2002*4882a593Smuzhiyun 		spin_lock(&bp->lock);
2003*4882a593Smuzhiyun 
2004*4882a593Smuzhiyun 		/* Disable interrupts at the ESIC */
2005*4882a593Smuzhiyun 		status &= ~PI_CONFIG_STAT_0_M_INT_ENB;
2006*4882a593Smuzhiyun 		outb(status, base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
2007*4882a593Smuzhiyun 
2008*4882a593Smuzhiyun 		/* Call interrupt service routine for this adapter */
2009*4882a593Smuzhiyun 		dfx_int_common(dev);
2010*4882a593Smuzhiyun 
2011*4882a593Smuzhiyun 		/* Reenable interrupts at the ESIC */
2012*4882a593Smuzhiyun 		status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
2013*4882a593Smuzhiyun 		status |= PI_CONFIG_STAT_0_M_INT_ENB;
2014*4882a593Smuzhiyun 		outb(status, base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
2015*4882a593Smuzhiyun 
2016*4882a593Smuzhiyun 		spin_unlock(&bp->lock);
2017*4882a593Smuzhiyun 	}
2018*4882a593Smuzhiyun 	if (dfx_bus_tc) {
2019*4882a593Smuzhiyun 		u32 status;
2020*4882a593Smuzhiyun 
2021*4882a593Smuzhiyun 		dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &status);
2022*4882a593Smuzhiyun 		if (!(status & (PI_PSTATUS_M_RCV_DATA_PENDING |
2023*4882a593Smuzhiyun 				PI_PSTATUS_M_XMT_DATA_PENDING |
2024*4882a593Smuzhiyun 				PI_PSTATUS_M_SMT_HOST_PENDING |
2025*4882a593Smuzhiyun 				PI_PSTATUS_M_UNSOL_PENDING |
2026*4882a593Smuzhiyun 				PI_PSTATUS_M_CMD_RSP_PENDING |
2027*4882a593Smuzhiyun 				PI_PSTATUS_M_CMD_REQ_PENDING |
2028*4882a593Smuzhiyun 				PI_PSTATUS_M_TYPE_0_PENDING)))
2029*4882a593Smuzhiyun 			return IRQ_NONE;
2030*4882a593Smuzhiyun 
2031*4882a593Smuzhiyun 		spin_lock(&bp->lock);
2032*4882a593Smuzhiyun 
2033*4882a593Smuzhiyun 		/* Call interrupt service routine for this adapter */
2034*4882a593Smuzhiyun 		dfx_int_common(dev);
2035*4882a593Smuzhiyun 
2036*4882a593Smuzhiyun 		spin_unlock(&bp->lock);
2037*4882a593Smuzhiyun 	}
2038*4882a593Smuzhiyun 
2039*4882a593Smuzhiyun 	return IRQ_HANDLED;
2040*4882a593Smuzhiyun }
2041*4882a593Smuzhiyun 
2042*4882a593Smuzhiyun 
2043*4882a593Smuzhiyun /*
2044*4882a593Smuzhiyun  * =====================
2045*4882a593Smuzhiyun  * = dfx_ctl_get_stats =
2046*4882a593Smuzhiyun  * =====================
2047*4882a593Smuzhiyun  *
2048*4882a593Smuzhiyun  * Overview:
2049*4882a593Smuzhiyun  *   Get statistics for FDDI adapter
2050*4882a593Smuzhiyun  *
2051*4882a593Smuzhiyun  * Returns:
2052*4882a593Smuzhiyun  *   Pointer to FDDI statistics structure
2053*4882a593Smuzhiyun  *
2054*4882a593Smuzhiyun  * Arguments:
2055*4882a593Smuzhiyun  *   dev - pointer to device information
2056*4882a593Smuzhiyun  *
2057*4882a593Smuzhiyun  * Functional Description:
2058*4882a593Smuzhiyun  *   Gets current MIB objects from adapter, then
2059*4882a593Smuzhiyun  *   returns FDDI statistics structure as defined
2060*4882a593Smuzhiyun  *   in if_fddi.h.
2061*4882a593Smuzhiyun  *
2062*4882a593Smuzhiyun  *   Note: Since the FDDI statistics structure is
2063*4882a593Smuzhiyun  *   still new and the device structure doesn't
2064*4882a593Smuzhiyun  *   have an FDDI-specific get statistics handler,
2065*4882a593Smuzhiyun  *   we'll return the FDDI statistics structure as
2066*4882a593Smuzhiyun  *   a pointer to an Ethernet statistics structure.
2067*4882a593Smuzhiyun  *   That way, at least the first part of the statistics
2068*4882a593Smuzhiyun  *   structure can be decoded properly, and it allows
2069*4882a593Smuzhiyun  *   "smart" applications to perform a second cast to
2070*4882a593Smuzhiyun  *   decode the FDDI-specific statistics.
2071*4882a593Smuzhiyun  *
2072*4882a593Smuzhiyun  *   We'll have to pay attention to this routine as the
2073*4882a593Smuzhiyun  *   device structure becomes more mature and LAN media
2074*4882a593Smuzhiyun  *   independent.
2075*4882a593Smuzhiyun  *
2076*4882a593Smuzhiyun  * Return Codes:
2077*4882a593Smuzhiyun  *   None
2078*4882a593Smuzhiyun  *
2079*4882a593Smuzhiyun  * Assumptions:
2080*4882a593Smuzhiyun  *   None
2081*4882a593Smuzhiyun  *
2082*4882a593Smuzhiyun  * Side Effects:
2083*4882a593Smuzhiyun  *   None
2084*4882a593Smuzhiyun  */
2085*4882a593Smuzhiyun 
dfx_ctl_get_stats(struct net_device * dev)2086*4882a593Smuzhiyun static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev)
2087*4882a593Smuzhiyun 	{
2088*4882a593Smuzhiyun 	DFX_board_t *bp = netdev_priv(dev);
2089*4882a593Smuzhiyun 
2090*4882a593Smuzhiyun 	/* Fill the bp->stats structure with driver-maintained counters */
2091*4882a593Smuzhiyun 
2092*4882a593Smuzhiyun 	bp->stats.gen.rx_packets = bp->rcv_total_frames;
2093*4882a593Smuzhiyun 	bp->stats.gen.tx_packets = bp->xmt_total_frames;
2094*4882a593Smuzhiyun 	bp->stats.gen.rx_bytes   = bp->rcv_total_bytes;
2095*4882a593Smuzhiyun 	bp->stats.gen.tx_bytes   = bp->xmt_total_bytes;
2096*4882a593Smuzhiyun 	bp->stats.gen.rx_errors  = bp->rcv_crc_errors +
2097*4882a593Smuzhiyun 				   bp->rcv_frame_status_errors +
2098*4882a593Smuzhiyun 				   bp->rcv_length_errors;
2099*4882a593Smuzhiyun 	bp->stats.gen.tx_errors  = bp->xmt_length_errors;
2100*4882a593Smuzhiyun 	bp->stats.gen.rx_dropped = bp->rcv_discards;
2101*4882a593Smuzhiyun 	bp->stats.gen.tx_dropped = bp->xmt_discards;
2102*4882a593Smuzhiyun 	bp->stats.gen.multicast  = bp->rcv_multicast_frames;
2103*4882a593Smuzhiyun 	bp->stats.gen.collisions = 0;		/* always zero (0) for FDDI */
2104*4882a593Smuzhiyun 
2105*4882a593Smuzhiyun 	/* Get FDDI SMT MIB objects */
2106*4882a593Smuzhiyun 
2107*4882a593Smuzhiyun 	bp->cmd_req_virt->cmd_type = PI_CMD_K_SMT_MIB_GET;
2108*4882a593Smuzhiyun 	if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2109*4882a593Smuzhiyun 		return (struct net_device_stats *)&bp->stats;
2110*4882a593Smuzhiyun 
2111*4882a593Smuzhiyun 	/* Fill the bp->stats structure with the SMT MIB object values */
2112*4882a593Smuzhiyun 
2113*4882a593Smuzhiyun 	memcpy(bp->stats.smt_station_id, &bp->cmd_rsp_virt->smt_mib_get.smt_station_id, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_station_id));
2114*4882a593Smuzhiyun 	bp->stats.smt_op_version_id					= bp->cmd_rsp_virt->smt_mib_get.smt_op_version_id;
2115*4882a593Smuzhiyun 	bp->stats.smt_hi_version_id					= bp->cmd_rsp_virt->smt_mib_get.smt_hi_version_id;
2116*4882a593Smuzhiyun 	bp->stats.smt_lo_version_id					= bp->cmd_rsp_virt->smt_mib_get.smt_lo_version_id;
2117*4882a593Smuzhiyun 	memcpy(bp->stats.smt_user_data, &bp->cmd_rsp_virt->smt_mib_get.smt_user_data, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_user_data));
2118*4882a593Smuzhiyun 	bp->stats.smt_mib_version_id				= bp->cmd_rsp_virt->smt_mib_get.smt_mib_version_id;
2119*4882a593Smuzhiyun 	bp->stats.smt_mac_cts						= bp->cmd_rsp_virt->smt_mib_get.smt_mac_ct;
2120*4882a593Smuzhiyun 	bp->stats.smt_non_master_cts				= bp->cmd_rsp_virt->smt_mib_get.smt_non_master_ct;
2121*4882a593Smuzhiyun 	bp->stats.smt_master_cts					= bp->cmd_rsp_virt->smt_mib_get.smt_master_ct;
2122*4882a593Smuzhiyun 	bp->stats.smt_available_paths				= bp->cmd_rsp_virt->smt_mib_get.smt_available_paths;
2123*4882a593Smuzhiyun 	bp->stats.smt_config_capabilities			= bp->cmd_rsp_virt->smt_mib_get.smt_config_capabilities;
2124*4882a593Smuzhiyun 	bp->stats.smt_config_policy					= bp->cmd_rsp_virt->smt_mib_get.smt_config_policy;
2125*4882a593Smuzhiyun 	bp->stats.smt_connection_policy				= bp->cmd_rsp_virt->smt_mib_get.smt_connection_policy;
2126*4882a593Smuzhiyun 	bp->stats.smt_t_notify						= bp->cmd_rsp_virt->smt_mib_get.smt_t_notify;
2127*4882a593Smuzhiyun 	bp->stats.smt_stat_rpt_policy				= bp->cmd_rsp_virt->smt_mib_get.smt_stat_rpt_policy;
2128*4882a593Smuzhiyun 	bp->stats.smt_trace_max_expiration			= bp->cmd_rsp_virt->smt_mib_get.smt_trace_max_expiration;
2129*4882a593Smuzhiyun 	bp->stats.smt_bypass_present				= bp->cmd_rsp_virt->smt_mib_get.smt_bypass_present;
2130*4882a593Smuzhiyun 	bp->stats.smt_ecm_state						= bp->cmd_rsp_virt->smt_mib_get.smt_ecm_state;
2131*4882a593Smuzhiyun 	bp->stats.smt_cf_state						= bp->cmd_rsp_virt->smt_mib_get.smt_cf_state;
2132*4882a593Smuzhiyun 	bp->stats.smt_remote_disconnect_flag		= bp->cmd_rsp_virt->smt_mib_get.smt_remote_disconnect_flag;
2133*4882a593Smuzhiyun 	bp->stats.smt_station_status				= bp->cmd_rsp_virt->smt_mib_get.smt_station_status;
2134*4882a593Smuzhiyun 	bp->stats.smt_peer_wrap_flag				= bp->cmd_rsp_virt->smt_mib_get.smt_peer_wrap_flag;
2135*4882a593Smuzhiyun 	bp->stats.smt_time_stamp					= bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls;
2136*4882a593Smuzhiyun 	bp->stats.smt_transition_time_stamp			= bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls;
2137*4882a593Smuzhiyun 	bp->stats.mac_frame_status_functions		= bp->cmd_rsp_virt->smt_mib_get.mac_frame_status_functions;
2138*4882a593Smuzhiyun 	bp->stats.mac_t_max_capability				= bp->cmd_rsp_virt->smt_mib_get.mac_t_max_capability;
2139*4882a593Smuzhiyun 	bp->stats.mac_tvx_capability				= bp->cmd_rsp_virt->smt_mib_get.mac_tvx_capability;
2140*4882a593Smuzhiyun 	bp->stats.mac_available_paths				= bp->cmd_rsp_virt->smt_mib_get.mac_available_paths;
2141*4882a593Smuzhiyun 	bp->stats.mac_current_path					= bp->cmd_rsp_virt->smt_mib_get.mac_current_path;
2142*4882a593Smuzhiyun 	memcpy(bp->stats.mac_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_upstream_nbr, FDDI_K_ALEN);
2143*4882a593Smuzhiyun 	memcpy(bp->stats.mac_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_downstream_nbr, FDDI_K_ALEN);
2144*4882a593Smuzhiyun 	memcpy(bp->stats.mac_old_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_upstream_nbr, FDDI_K_ALEN);
2145*4882a593Smuzhiyun 	memcpy(bp->stats.mac_old_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_downstream_nbr, FDDI_K_ALEN);
2146*4882a593Smuzhiyun 	bp->stats.mac_dup_address_test				= bp->cmd_rsp_virt->smt_mib_get.mac_dup_address_test;
2147*4882a593Smuzhiyun 	bp->stats.mac_requested_paths				= bp->cmd_rsp_virt->smt_mib_get.mac_requested_paths;
2148*4882a593Smuzhiyun 	bp->stats.mac_downstream_port_type			= bp->cmd_rsp_virt->smt_mib_get.mac_downstream_port_type;
2149*4882a593Smuzhiyun 	memcpy(bp->stats.mac_smt_address, &bp->cmd_rsp_virt->smt_mib_get.mac_smt_address, FDDI_K_ALEN);
2150*4882a593Smuzhiyun 	bp->stats.mac_t_req							= bp->cmd_rsp_virt->smt_mib_get.mac_t_req;
2151*4882a593Smuzhiyun 	bp->stats.mac_t_neg							= bp->cmd_rsp_virt->smt_mib_get.mac_t_neg;
2152*4882a593Smuzhiyun 	bp->stats.mac_t_max							= bp->cmd_rsp_virt->smt_mib_get.mac_t_max;
2153*4882a593Smuzhiyun 	bp->stats.mac_tvx_value						= bp->cmd_rsp_virt->smt_mib_get.mac_tvx_value;
2154*4882a593Smuzhiyun 	bp->stats.mac_frame_error_threshold			= bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_threshold;
2155*4882a593Smuzhiyun 	bp->stats.mac_frame_error_ratio				= bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_ratio;
2156*4882a593Smuzhiyun 	bp->stats.mac_rmt_state						= bp->cmd_rsp_virt->smt_mib_get.mac_rmt_state;
2157*4882a593Smuzhiyun 	bp->stats.mac_da_flag						= bp->cmd_rsp_virt->smt_mib_get.mac_da_flag;
2158*4882a593Smuzhiyun 	bp->stats.mac_una_da_flag					= bp->cmd_rsp_virt->smt_mib_get.mac_unda_flag;
2159*4882a593Smuzhiyun 	bp->stats.mac_frame_error_flag				= bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_flag;
2160*4882a593Smuzhiyun 	bp->stats.mac_ma_unitdata_available			= bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_available;
2161*4882a593Smuzhiyun 	bp->stats.mac_hardware_present				= bp->cmd_rsp_virt->smt_mib_get.mac_hardware_present;
2162*4882a593Smuzhiyun 	bp->stats.mac_ma_unitdata_enable			= bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_enable;
2163*4882a593Smuzhiyun 	bp->stats.path_tvx_lower_bound				= bp->cmd_rsp_virt->smt_mib_get.path_tvx_lower_bound;
2164*4882a593Smuzhiyun 	bp->stats.path_t_max_lower_bound			= bp->cmd_rsp_virt->smt_mib_get.path_t_max_lower_bound;
2165*4882a593Smuzhiyun 	bp->stats.path_max_t_req					= bp->cmd_rsp_virt->smt_mib_get.path_max_t_req;
2166*4882a593Smuzhiyun 	memcpy(bp->stats.path_configuration, &bp->cmd_rsp_virt->smt_mib_get.path_configuration, sizeof(bp->cmd_rsp_virt->smt_mib_get.path_configuration));
2167*4882a593Smuzhiyun 	bp->stats.port_my_type[0]					= bp->cmd_rsp_virt->smt_mib_get.port_my_type[0];
2168*4882a593Smuzhiyun 	bp->stats.port_my_type[1]					= bp->cmd_rsp_virt->smt_mib_get.port_my_type[1];
2169*4882a593Smuzhiyun 	bp->stats.port_neighbor_type[0]				= bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[0];
2170*4882a593Smuzhiyun 	bp->stats.port_neighbor_type[1]				= bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[1];
2171*4882a593Smuzhiyun 	bp->stats.port_connection_policies[0]		= bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[0];
2172*4882a593Smuzhiyun 	bp->stats.port_connection_policies[1]		= bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[1];
2173*4882a593Smuzhiyun 	bp->stats.port_mac_indicated[0]				= bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[0];
2174*4882a593Smuzhiyun 	bp->stats.port_mac_indicated[1]				= bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[1];
2175*4882a593Smuzhiyun 	bp->stats.port_current_path[0]				= bp->cmd_rsp_virt->smt_mib_get.port_current_path[0];
2176*4882a593Smuzhiyun 	bp->stats.port_current_path[1]				= bp->cmd_rsp_virt->smt_mib_get.port_current_path[1];
2177*4882a593Smuzhiyun 	memcpy(&bp->stats.port_requested_paths[0*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[0], 3);
2178*4882a593Smuzhiyun 	memcpy(&bp->stats.port_requested_paths[1*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[1], 3);
2179*4882a593Smuzhiyun 	bp->stats.port_mac_placement[0]				= bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[0];
2180*4882a593Smuzhiyun 	bp->stats.port_mac_placement[1]				= bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[1];
2181*4882a593Smuzhiyun 	bp->stats.port_available_paths[0]			= bp->cmd_rsp_virt->smt_mib_get.port_available_paths[0];
2182*4882a593Smuzhiyun 	bp->stats.port_available_paths[1]			= bp->cmd_rsp_virt->smt_mib_get.port_available_paths[1];
2183*4882a593Smuzhiyun 	bp->stats.port_pmd_class[0]					= bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[0];
2184*4882a593Smuzhiyun 	bp->stats.port_pmd_class[1]					= bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[1];
2185*4882a593Smuzhiyun 	bp->stats.port_connection_capabilities[0]	= bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[0];
2186*4882a593Smuzhiyun 	bp->stats.port_connection_capabilities[1]	= bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[1];
2187*4882a593Smuzhiyun 	bp->stats.port_bs_flag[0]					= bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[0];
2188*4882a593Smuzhiyun 	bp->stats.port_bs_flag[1]					= bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[1];
2189*4882a593Smuzhiyun 	bp->stats.port_ler_estimate[0]				= bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[0];
2190*4882a593Smuzhiyun 	bp->stats.port_ler_estimate[1]				= bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[1];
2191*4882a593Smuzhiyun 	bp->stats.port_ler_cutoff[0]				= bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[0];
2192*4882a593Smuzhiyun 	bp->stats.port_ler_cutoff[1]				= bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[1];
2193*4882a593Smuzhiyun 	bp->stats.port_ler_alarm[0]					= bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[0];
2194*4882a593Smuzhiyun 	bp->stats.port_ler_alarm[1]					= bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[1];
2195*4882a593Smuzhiyun 	bp->stats.port_connect_state[0]				= bp->cmd_rsp_virt->smt_mib_get.port_connect_state[0];
2196*4882a593Smuzhiyun 	bp->stats.port_connect_state[1]				= bp->cmd_rsp_virt->smt_mib_get.port_connect_state[1];
2197*4882a593Smuzhiyun 	bp->stats.port_pcm_state[0]					= bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[0];
2198*4882a593Smuzhiyun 	bp->stats.port_pcm_state[1]					= bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[1];
2199*4882a593Smuzhiyun 	bp->stats.port_pc_withhold[0]				= bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[0];
2200*4882a593Smuzhiyun 	bp->stats.port_pc_withhold[1]				= bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[1];
2201*4882a593Smuzhiyun 	bp->stats.port_ler_flag[0]					= bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[0];
2202*4882a593Smuzhiyun 	bp->stats.port_ler_flag[1]					= bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[1];
2203*4882a593Smuzhiyun 	bp->stats.port_hardware_present[0]			= bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[0];
2204*4882a593Smuzhiyun 	bp->stats.port_hardware_present[1]			= bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[1];
2205*4882a593Smuzhiyun 
2206*4882a593Smuzhiyun 	/* Get FDDI counters */
2207*4882a593Smuzhiyun 
2208*4882a593Smuzhiyun 	bp->cmd_req_virt->cmd_type = PI_CMD_K_CNTRS_GET;
2209*4882a593Smuzhiyun 	if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2210*4882a593Smuzhiyun 		return (struct net_device_stats *)&bp->stats;
2211*4882a593Smuzhiyun 
2212*4882a593Smuzhiyun 	/* Fill the bp->stats structure with the FDDI counter values */
2213*4882a593Smuzhiyun 
2214*4882a593Smuzhiyun 	bp->stats.mac_frame_cts				= bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls;
2215*4882a593Smuzhiyun 	bp->stats.mac_copied_cts			= bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls;
2216*4882a593Smuzhiyun 	bp->stats.mac_transmit_cts			= bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls;
2217*4882a593Smuzhiyun 	bp->stats.mac_error_cts				= bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls;
2218*4882a593Smuzhiyun 	bp->stats.mac_lost_cts				= bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls;
2219*4882a593Smuzhiyun 	bp->stats.port_lct_fail_cts[0]		= bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls;
2220*4882a593Smuzhiyun 	bp->stats.port_lct_fail_cts[1]		= bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls;
2221*4882a593Smuzhiyun 	bp->stats.port_lem_reject_cts[0]	= bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls;
2222*4882a593Smuzhiyun 	bp->stats.port_lem_reject_cts[1]	= bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls;
2223*4882a593Smuzhiyun 	bp->stats.port_lem_cts[0]			= bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls;
2224*4882a593Smuzhiyun 	bp->stats.port_lem_cts[1]			= bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls;
2225*4882a593Smuzhiyun 
2226*4882a593Smuzhiyun 	return (struct net_device_stats *)&bp->stats;
2227*4882a593Smuzhiyun 	}
2228*4882a593Smuzhiyun 
2229*4882a593Smuzhiyun 
2230*4882a593Smuzhiyun /*
2231*4882a593Smuzhiyun  * ==============================
2232*4882a593Smuzhiyun  * = dfx_ctl_set_multicast_list =
2233*4882a593Smuzhiyun  * ==============================
2234*4882a593Smuzhiyun  *
2235*4882a593Smuzhiyun  * Overview:
2236*4882a593Smuzhiyun  *   Enable/Disable LLC frame promiscuous mode reception
2237*4882a593Smuzhiyun  *   on the adapter and/or update multicast address table.
2238*4882a593Smuzhiyun  *
2239*4882a593Smuzhiyun  * Returns:
2240*4882a593Smuzhiyun  *   None
2241*4882a593Smuzhiyun  *
2242*4882a593Smuzhiyun  * Arguments:
2243*4882a593Smuzhiyun  *   dev - pointer to device information
2244*4882a593Smuzhiyun  *
2245*4882a593Smuzhiyun  * Functional Description:
2246*4882a593Smuzhiyun  *   This routine follows a fairly simple algorithm for setting the
2247*4882a593Smuzhiyun  *   adapter filters and CAM:
2248*4882a593Smuzhiyun  *
2249*4882a593Smuzhiyun  *		if IFF_PROMISC flag is set
2250*4882a593Smuzhiyun  *			enable LLC individual/group promiscuous mode
2251*4882a593Smuzhiyun  *		else
2252*4882a593Smuzhiyun  *			disable LLC individual/group promiscuous mode
2253*4882a593Smuzhiyun  *			if number of incoming multicast addresses >
2254*4882a593Smuzhiyun  *					(CAM max size - number of unicast addresses in CAM)
2255*4882a593Smuzhiyun  *				enable LLC group promiscuous mode
2256*4882a593Smuzhiyun  *				set driver-maintained multicast address count to zero
2257*4882a593Smuzhiyun  *			else
2258*4882a593Smuzhiyun  *				disable LLC group promiscuous mode
2259*4882a593Smuzhiyun  *				set driver-maintained multicast address count to incoming count
2260*4882a593Smuzhiyun  *			update adapter CAM
2261*4882a593Smuzhiyun  *		update adapter filters
2262*4882a593Smuzhiyun  *
2263*4882a593Smuzhiyun  * Return Codes:
2264*4882a593Smuzhiyun  *   None
2265*4882a593Smuzhiyun  *
2266*4882a593Smuzhiyun  * Assumptions:
2267*4882a593Smuzhiyun  *   Multicast addresses are presented in canonical (LSB) format.
2268*4882a593Smuzhiyun  *
2269*4882a593Smuzhiyun  * Side Effects:
2270*4882a593Smuzhiyun  *   On-board adapter CAM and filters are updated.
2271*4882a593Smuzhiyun  */
2272*4882a593Smuzhiyun 
dfx_ctl_set_multicast_list(struct net_device * dev)2273*4882a593Smuzhiyun static void dfx_ctl_set_multicast_list(struct net_device *dev)
2274*4882a593Smuzhiyun {
2275*4882a593Smuzhiyun 	DFX_board_t *bp = netdev_priv(dev);
2276*4882a593Smuzhiyun 	int					i;			/* used as index in for loop */
2277*4882a593Smuzhiyun 	struct netdev_hw_addr *ha;
2278*4882a593Smuzhiyun 
2279*4882a593Smuzhiyun 	/* Enable LLC frame promiscuous mode, if necessary */
2280*4882a593Smuzhiyun 
2281*4882a593Smuzhiyun 	if (dev->flags & IFF_PROMISC)
2282*4882a593Smuzhiyun 		bp->ind_group_prom = PI_FSTATE_K_PASS;		/* Enable LLC ind/group prom mode */
2283*4882a593Smuzhiyun 
2284*4882a593Smuzhiyun 	/* Else, update multicast address table */
2285*4882a593Smuzhiyun 
2286*4882a593Smuzhiyun 	else
2287*4882a593Smuzhiyun 		{
2288*4882a593Smuzhiyun 		bp->ind_group_prom = PI_FSTATE_K_BLOCK;		/* Disable LLC ind/group prom mode */
2289*4882a593Smuzhiyun 		/*
2290*4882a593Smuzhiyun 		 * Check whether incoming multicast address count exceeds table size
2291*4882a593Smuzhiyun 		 *
2292*4882a593Smuzhiyun 		 * Note: The adapters utilize an on-board 64 entry CAM for
2293*4882a593Smuzhiyun 		 *       supporting perfect filtering of multicast packets
2294*4882a593Smuzhiyun 		 *		 and bridge functions when adding unicast addresses.
2295*4882a593Smuzhiyun 		 *		 There is no hash function available.  To support
2296*4882a593Smuzhiyun 		 *		 additional multicast addresses, the all multicast
2297*4882a593Smuzhiyun 		 *		 filter (LLC group promiscuous mode) must be enabled.
2298*4882a593Smuzhiyun 		 *
2299*4882a593Smuzhiyun 		 *		 The firmware reserves two CAM entries for SMT-related
2300*4882a593Smuzhiyun 		 *		 multicast addresses, which leaves 62 entries available.
2301*4882a593Smuzhiyun 		 *		 The following code ensures that we're not being asked
2302*4882a593Smuzhiyun 		 *		 to add more than 62 addresses to the CAM.  If we are,
2303*4882a593Smuzhiyun 		 *		 the driver will enable the all multicast filter.
2304*4882a593Smuzhiyun 		 *		 Should the number of multicast addresses drop below
2305*4882a593Smuzhiyun 		 *		 the high water mark, the filter will be disabled and
2306*4882a593Smuzhiyun 		 *		 perfect filtering will be used.
2307*4882a593Smuzhiyun 		 */
2308*4882a593Smuzhiyun 
2309*4882a593Smuzhiyun 		if (netdev_mc_count(dev) > (PI_CMD_ADDR_FILTER_K_SIZE - bp->uc_count))
2310*4882a593Smuzhiyun 			{
2311*4882a593Smuzhiyun 			bp->group_prom	= PI_FSTATE_K_PASS;		/* Enable LLC group prom mode */
2312*4882a593Smuzhiyun 			bp->mc_count	= 0;					/* Don't add mc addrs to CAM */
2313*4882a593Smuzhiyun 			}
2314*4882a593Smuzhiyun 		else
2315*4882a593Smuzhiyun 			{
2316*4882a593Smuzhiyun 			bp->group_prom	= PI_FSTATE_K_BLOCK;	/* Disable LLC group prom mode */
2317*4882a593Smuzhiyun 			bp->mc_count	= netdev_mc_count(dev);		/* Add mc addrs to CAM */
2318*4882a593Smuzhiyun 			}
2319*4882a593Smuzhiyun 
2320*4882a593Smuzhiyun 		/* Copy addresses to multicast address table, then update adapter CAM */
2321*4882a593Smuzhiyun 
2322*4882a593Smuzhiyun 		i = 0;
2323*4882a593Smuzhiyun 		netdev_for_each_mc_addr(ha, dev)
2324*4882a593Smuzhiyun 			memcpy(&bp->mc_table[i++ * FDDI_K_ALEN],
2325*4882a593Smuzhiyun 			       ha->addr, FDDI_K_ALEN);
2326*4882a593Smuzhiyun 
2327*4882a593Smuzhiyun 		if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
2328*4882a593Smuzhiyun 			{
2329*4882a593Smuzhiyun 			DBG_printk("%s: Could not update multicast address table!\n", dev->name);
2330*4882a593Smuzhiyun 			}
2331*4882a593Smuzhiyun 		else
2332*4882a593Smuzhiyun 			{
2333*4882a593Smuzhiyun 			DBG_printk("%s: Multicast address table updated!  Added %d addresses.\n", dev->name, bp->mc_count);
2334*4882a593Smuzhiyun 			}
2335*4882a593Smuzhiyun 		}
2336*4882a593Smuzhiyun 
2337*4882a593Smuzhiyun 	/* Update adapter filters */
2338*4882a593Smuzhiyun 
2339*4882a593Smuzhiyun 	if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
2340*4882a593Smuzhiyun 		{
2341*4882a593Smuzhiyun 		DBG_printk("%s: Could not update adapter filters!\n", dev->name);
2342*4882a593Smuzhiyun 		}
2343*4882a593Smuzhiyun 	else
2344*4882a593Smuzhiyun 		{
2345*4882a593Smuzhiyun 		DBG_printk("%s: Adapter filters updated!\n", dev->name);
2346*4882a593Smuzhiyun 		}
2347*4882a593Smuzhiyun 	}
2348*4882a593Smuzhiyun 
2349*4882a593Smuzhiyun 
2350*4882a593Smuzhiyun /*
2351*4882a593Smuzhiyun  * ===========================
2352*4882a593Smuzhiyun  * = dfx_ctl_set_mac_address =
2353*4882a593Smuzhiyun  * ===========================
2354*4882a593Smuzhiyun  *
2355*4882a593Smuzhiyun  * Overview:
2356*4882a593Smuzhiyun  *   Add node address override (unicast address) to adapter
2357*4882a593Smuzhiyun  *   CAM and update dev_addr field in device table.
2358*4882a593Smuzhiyun  *
2359*4882a593Smuzhiyun  * Returns:
2360*4882a593Smuzhiyun  *   None
2361*4882a593Smuzhiyun  *
2362*4882a593Smuzhiyun  * Arguments:
2363*4882a593Smuzhiyun  *   dev  - pointer to device information
2364*4882a593Smuzhiyun  *   addr - pointer to sockaddr structure containing unicast address to add
2365*4882a593Smuzhiyun  *
2366*4882a593Smuzhiyun  * Functional Description:
2367*4882a593Smuzhiyun  *   The adapter supports node address overrides by adding one or more
2368*4882a593Smuzhiyun  *   unicast addresses to the adapter CAM.  This is similar to adding
2369*4882a593Smuzhiyun  *   multicast addresses.  In this routine we'll update the driver and
2370*4882a593Smuzhiyun  *   device structures with the new address, then update the adapter CAM
2371*4882a593Smuzhiyun  *   to ensure that the adapter will copy and strip frames destined and
2372*4882a593Smuzhiyun  *   sourced by that address.
2373*4882a593Smuzhiyun  *
2374*4882a593Smuzhiyun  * Return Codes:
2375*4882a593Smuzhiyun  *   Always returns zero.
2376*4882a593Smuzhiyun  *
2377*4882a593Smuzhiyun  * Assumptions:
2378*4882a593Smuzhiyun  *   The address pointed to by addr->sa_data is a valid unicast
2379*4882a593Smuzhiyun  *   address and is presented in canonical (LSB) format.
2380*4882a593Smuzhiyun  *
2381*4882a593Smuzhiyun  * Side Effects:
2382*4882a593Smuzhiyun  *   On-board adapter CAM is updated.  On-board adapter filters
2383*4882a593Smuzhiyun  *   may be updated.
2384*4882a593Smuzhiyun  */
2385*4882a593Smuzhiyun 
dfx_ctl_set_mac_address(struct net_device * dev,void * addr)2386*4882a593Smuzhiyun static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr)
2387*4882a593Smuzhiyun 	{
2388*4882a593Smuzhiyun 	struct sockaddr	*p_sockaddr = (struct sockaddr *)addr;
2389*4882a593Smuzhiyun 	DFX_board_t *bp = netdev_priv(dev);
2390*4882a593Smuzhiyun 
2391*4882a593Smuzhiyun 	/* Copy unicast address to driver-maintained structs and update count */
2392*4882a593Smuzhiyun 
2393*4882a593Smuzhiyun 	memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN);	/* update device struct */
2394*4882a593Smuzhiyun 	memcpy(&bp->uc_table[0], p_sockaddr->sa_data, FDDI_K_ALEN);	/* update driver struct */
2395*4882a593Smuzhiyun 	bp->uc_count = 1;
2396*4882a593Smuzhiyun 
2397*4882a593Smuzhiyun 	/*
2398*4882a593Smuzhiyun 	 * Verify we're not exceeding the CAM size by adding unicast address
2399*4882a593Smuzhiyun 	 *
2400*4882a593Smuzhiyun 	 * Note: It's possible that before entering this routine we've
2401*4882a593Smuzhiyun 	 *       already filled the CAM with 62 multicast addresses.
2402*4882a593Smuzhiyun 	 *		 Since we need to place the node address override into
2403*4882a593Smuzhiyun 	 *		 the CAM, we have to check to see that we're not
2404*4882a593Smuzhiyun 	 *		 exceeding the CAM size.  If we are, we have to enable
2405*4882a593Smuzhiyun 	 *		 the LLC group (multicast) promiscuous mode filter as
2406*4882a593Smuzhiyun 	 *		 in dfx_ctl_set_multicast_list.
2407*4882a593Smuzhiyun 	 */
2408*4882a593Smuzhiyun 
2409*4882a593Smuzhiyun 	if ((bp->uc_count + bp->mc_count) > PI_CMD_ADDR_FILTER_K_SIZE)
2410*4882a593Smuzhiyun 		{
2411*4882a593Smuzhiyun 		bp->group_prom	= PI_FSTATE_K_PASS;		/* Enable LLC group prom mode */
2412*4882a593Smuzhiyun 		bp->mc_count	= 0;					/* Don't add mc addrs to CAM */
2413*4882a593Smuzhiyun 
2414*4882a593Smuzhiyun 		/* Update adapter filters */
2415*4882a593Smuzhiyun 
2416*4882a593Smuzhiyun 		if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
2417*4882a593Smuzhiyun 			{
2418*4882a593Smuzhiyun 			DBG_printk("%s: Could not update adapter filters!\n", dev->name);
2419*4882a593Smuzhiyun 			}
2420*4882a593Smuzhiyun 		else
2421*4882a593Smuzhiyun 			{
2422*4882a593Smuzhiyun 			DBG_printk("%s: Adapter filters updated!\n", dev->name);
2423*4882a593Smuzhiyun 			}
2424*4882a593Smuzhiyun 		}
2425*4882a593Smuzhiyun 
2426*4882a593Smuzhiyun 	/* Update adapter CAM with new unicast address */
2427*4882a593Smuzhiyun 
2428*4882a593Smuzhiyun 	if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
2429*4882a593Smuzhiyun 		{
2430*4882a593Smuzhiyun 		DBG_printk("%s: Could not set new MAC address!\n", dev->name);
2431*4882a593Smuzhiyun 		}
2432*4882a593Smuzhiyun 	else
2433*4882a593Smuzhiyun 		{
2434*4882a593Smuzhiyun 		DBG_printk("%s: Adapter CAM updated with new MAC address\n", dev->name);
2435*4882a593Smuzhiyun 		}
2436*4882a593Smuzhiyun 	return 0;			/* always return zero */
2437*4882a593Smuzhiyun 	}
2438*4882a593Smuzhiyun 
2439*4882a593Smuzhiyun 
2440*4882a593Smuzhiyun /*
2441*4882a593Smuzhiyun  * ======================
2442*4882a593Smuzhiyun  * = dfx_ctl_update_cam =
2443*4882a593Smuzhiyun  * ======================
2444*4882a593Smuzhiyun  *
2445*4882a593Smuzhiyun  * Overview:
2446*4882a593Smuzhiyun  *   Procedure to update adapter CAM (Content Addressable Memory)
2447*4882a593Smuzhiyun  *   with desired unicast and multicast address entries.
2448*4882a593Smuzhiyun  *
2449*4882a593Smuzhiyun  * Returns:
2450*4882a593Smuzhiyun  *   Condition code
2451*4882a593Smuzhiyun  *
2452*4882a593Smuzhiyun  * Arguments:
2453*4882a593Smuzhiyun  *   bp - pointer to board information
2454*4882a593Smuzhiyun  *
2455*4882a593Smuzhiyun  * Functional Description:
2456*4882a593Smuzhiyun  *   Updates adapter CAM with current contents of board structure
2457*4882a593Smuzhiyun  *   unicast and multicast address tables.  Since there are only 62
2458*4882a593Smuzhiyun  *   free entries in CAM, this routine ensures that the command
2459*4882a593Smuzhiyun  *   request buffer is not overrun.
2460*4882a593Smuzhiyun  *
2461*4882a593Smuzhiyun  * Return Codes:
2462*4882a593Smuzhiyun  *   DFX_K_SUCCESS - Request succeeded
2463*4882a593Smuzhiyun  *   DFX_K_FAILURE - Request failed
2464*4882a593Smuzhiyun  *
2465*4882a593Smuzhiyun  * Assumptions:
2466*4882a593Smuzhiyun  *   All addresses being added (unicast and multicast) are in canonical
2467*4882a593Smuzhiyun  *   order.
2468*4882a593Smuzhiyun  *
2469*4882a593Smuzhiyun  * Side Effects:
2470*4882a593Smuzhiyun  *   On-board adapter CAM is updated.
2471*4882a593Smuzhiyun  */
2472*4882a593Smuzhiyun 
dfx_ctl_update_cam(DFX_board_t * bp)2473*4882a593Smuzhiyun static int dfx_ctl_update_cam(DFX_board_t *bp)
2474*4882a593Smuzhiyun 	{
2475*4882a593Smuzhiyun 	int			i;				/* used as index */
2476*4882a593Smuzhiyun 	PI_LAN_ADDR	*p_addr;		/* pointer to CAM entry */
2477*4882a593Smuzhiyun 
2478*4882a593Smuzhiyun 	/*
2479*4882a593Smuzhiyun 	 * Fill in command request information
2480*4882a593Smuzhiyun 	 *
2481*4882a593Smuzhiyun 	 * Note: Even though both the unicast and multicast address
2482*4882a593Smuzhiyun 	 *       table entries are stored as contiguous 6 byte entries,
2483*4882a593Smuzhiyun 	 *		 the firmware address filter set command expects each
2484*4882a593Smuzhiyun 	 *		 entry to be two longwords (8 bytes total).  We must be
2485*4882a593Smuzhiyun 	 *		 careful to only copy the six bytes of each unicast and
2486*4882a593Smuzhiyun 	 *		 multicast table entry into each command entry.  This
2487*4882a593Smuzhiyun 	 *		 is also why we must first clear the entire command
2488*4882a593Smuzhiyun 	 *		 request buffer.
2489*4882a593Smuzhiyun 	 */
2490*4882a593Smuzhiyun 
2491*4882a593Smuzhiyun 	memset(bp->cmd_req_virt, 0, PI_CMD_REQ_K_SIZE_MAX);	/* first clear buffer */
2492*4882a593Smuzhiyun 	bp->cmd_req_virt->cmd_type = PI_CMD_K_ADDR_FILTER_SET;
2493*4882a593Smuzhiyun 	p_addr = &bp->cmd_req_virt->addr_filter_set.entry[0];
2494*4882a593Smuzhiyun 
2495*4882a593Smuzhiyun 	/* Now add unicast addresses to command request buffer, if any */
2496*4882a593Smuzhiyun 
2497*4882a593Smuzhiyun 	for (i=0; i < (int)bp->uc_count; i++)
2498*4882a593Smuzhiyun 		{
2499*4882a593Smuzhiyun 		if (i < PI_CMD_ADDR_FILTER_K_SIZE)
2500*4882a593Smuzhiyun 			{
2501*4882a593Smuzhiyun 			memcpy(p_addr, &bp->uc_table[i*FDDI_K_ALEN], FDDI_K_ALEN);
2502*4882a593Smuzhiyun 			p_addr++;			/* point to next command entry */
2503*4882a593Smuzhiyun 			}
2504*4882a593Smuzhiyun 		}
2505*4882a593Smuzhiyun 
2506*4882a593Smuzhiyun 	/* Now add multicast addresses to command request buffer, if any */
2507*4882a593Smuzhiyun 
2508*4882a593Smuzhiyun 	for (i=0; i < (int)bp->mc_count; i++)
2509*4882a593Smuzhiyun 		{
2510*4882a593Smuzhiyun 		if ((i + bp->uc_count) < PI_CMD_ADDR_FILTER_K_SIZE)
2511*4882a593Smuzhiyun 			{
2512*4882a593Smuzhiyun 			memcpy(p_addr, &bp->mc_table[i*FDDI_K_ALEN], FDDI_K_ALEN);
2513*4882a593Smuzhiyun 			p_addr++;			/* point to next command entry */
2514*4882a593Smuzhiyun 			}
2515*4882a593Smuzhiyun 		}
2516*4882a593Smuzhiyun 
2517*4882a593Smuzhiyun 	/* Issue command to update adapter CAM, then return */
2518*4882a593Smuzhiyun 
2519*4882a593Smuzhiyun 	if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2520*4882a593Smuzhiyun 		return DFX_K_FAILURE;
2521*4882a593Smuzhiyun 	return DFX_K_SUCCESS;
2522*4882a593Smuzhiyun 	}
2523*4882a593Smuzhiyun 
2524*4882a593Smuzhiyun 
2525*4882a593Smuzhiyun /*
2526*4882a593Smuzhiyun  * ==========================
2527*4882a593Smuzhiyun  * = dfx_ctl_update_filters =
2528*4882a593Smuzhiyun  * ==========================
2529*4882a593Smuzhiyun  *
2530*4882a593Smuzhiyun  * Overview:
2531*4882a593Smuzhiyun  *   Procedure to update adapter filters with desired
2532*4882a593Smuzhiyun  *   filter settings.
2533*4882a593Smuzhiyun  *
2534*4882a593Smuzhiyun  * Returns:
2535*4882a593Smuzhiyun  *   Condition code
2536*4882a593Smuzhiyun  *
2537*4882a593Smuzhiyun  * Arguments:
2538*4882a593Smuzhiyun  *   bp - pointer to board information
2539*4882a593Smuzhiyun  *
2540*4882a593Smuzhiyun  * Functional Description:
2541*4882a593Smuzhiyun  *   Enables or disables filter using current filter settings.
2542*4882a593Smuzhiyun  *
2543*4882a593Smuzhiyun  * Return Codes:
2544*4882a593Smuzhiyun  *   DFX_K_SUCCESS - Request succeeded.
2545*4882a593Smuzhiyun  *   DFX_K_FAILURE - Request failed.
2546*4882a593Smuzhiyun  *
2547*4882a593Smuzhiyun  * Assumptions:
2548*4882a593Smuzhiyun  *   We must always pass up packets destined to the broadcast
2549*4882a593Smuzhiyun  *   address (FF-FF-FF-FF-FF-FF), so we'll always keep the
2550*4882a593Smuzhiyun  *   broadcast filter enabled.
2551*4882a593Smuzhiyun  *
2552*4882a593Smuzhiyun  * Side Effects:
2553*4882a593Smuzhiyun  *   On-board adapter filters are updated.
2554*4882a593Smuzhiyun  */
2555*4882a593Smuzhiyun 
dfx_ctl_update_filters(DFX_board_t * bp)2556*4882a593Smuzhiyun static int dfx_ctl_update_filters(DFX_board_t *bp)
2557*4882a593Smuzhiyun 	{
2558*4882a593Smuzhiyun 	int	i = 0;					/* used as index */
2559*4882a593Smuzhiyun 
2560*4882a593Smuzhiyun 	/* Fill in command request information */
2561*4882a593Smuzhiyun 
2562*4882a593Smuzhiyun 	bp->cmd_req_virt->cmd_type = PI_CMD_K_FILTERS_SET;
2563*4882a593Smuzhiyun 
2564*4882a593Smuzhiyun 	/* Initialize Broadcast filter - * ALWAYS ENABLED * */
2565*4882a593Smuzhiyun 
2566*4882a593Smuzhiyun 	bp->cmd_req_virt->filter_set.item[i].item_code	= PI_ITEM_K_BROADCAST;
2567*4882a593Smuzhiyun 	bp->cmd_req_virt->filter_set.item[i++].value	= PI_FSTATE_K_PASS;
2568*4882a593Smuzhiyun 
2569*4882a593Smuzhiyun 	/* Initialize LLC Individual/Group Promiscuous filter */
2570*4882a593Smuzhiyun 
2571*4882a593Smuzhiyun 	bp->cmd_req_virt->filter_set.item[i].item_code	= PI_ITEM_K_IND_GROUP_PROM;
2572*4882a593Smuzhiyun 	bp->cmd_req_virt->filter_set.item[i++].value	= bp->ind_group_prom;
2573*4882a593Smuzhiyun 
2574*4882a593Smuzhiyun 	/* Initialize LLC Group Promiscuous filter */
2575*4882a593Smuzhiyun 
2576*4882a593Smuzhiyun 	bp->cmd_req_virt->filter_set.item[i].item_code	= PI_ITEM_K_GROUP_PROM;
2577*4882a593Smuzhiyun 	bp->cmd_req_virt->filter_set.item[i++].value	= bp->group_prom;
2578*4882a593Smuzhiyun 
2579*4882a593Smuzhiyun 	/* Terminate the item code list */
2580*4882a593Smuzhiyun 
2581*4882a593Smuzhiyun 	bp->cmd_req_virt->filter_set.item[i].item_code	= PI_ITEM_K_EOL;
2582*4882a593Smuzhiyun 
2583*4882a593Smuzhiyun 	/* Issue command to update adapter filters, then return */
2584*4882a593Smuzhiyun 
2585*4882a593Smuzhiyun 	if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2586*4882a593Smuzhiyun 		return DFX_K_FAILURE;
2587*4882a593Smuzhiyun 	return DFX_K_SUCCESS;
2588*4882a593Smuzhiyun 	}
2589*4882a593Smuzhiyun 
2590*4882a593Smuzhiyun 
2591*4882a593Smuzhiyun /*
2592*4882a593Smuzhiyun  * ======================
2593*4882a593Smuzhiyun  * = dfx_hw_dma_cmd_req =
2594*4882a593Smuzhiyun  * ======================
2595*4882a593Smuzhiyun  *
2596*4882a593Smuzhiyun  * Overview:
2597*4882a593Smuzhiyun  *   Sends PDQ DMA command to adapter firmware
2598*4882a593Smuzhiyun  *
2599*4882a593Smuzhiyun  * Returns:
2600*4882a593Smuzhiyun  *   Condition code
2601*4882a593Smuzhiyun  *
2602*4882a593Smuzhiyun  * Arguments:
2603*4882a593Smuzhiyun  *   bp - pointer to board information
2604*4882a593Smuzhiyun  *
2605*4882a593Smuzhiyun  * Functional Description:
2606*4882a593Smuzhiyun  *   The command request and response buffers are posted to the adapter in the manner
2607*4882a593Smuzhiyun  *   described in the PDQ Port Specification:
2608*4882a593Smuzhiyun  *
2609*4882a593Smuzhiyun  *		1. Command Response Buffer is posted to adapter.
2610*4882a593Smuzhiyun  *		2. Command Request Buffer is posted to adapter.
2611*4882a593Smuzhiyun  *		3. Command Request consumer index is polled until it indicates that request
2612*4882a593Smuzhiyun  *         buffer has been DMA'd to adapter.
2613*4882a593Smuzhiyun  *		4. Command Response consumer index is polled until it indicates that response
2614*4882a593Smuzhiyun  *         buffer has been DMA'd from adapter.
2615*4882a593Smuzhiyun  *
2616*4882a593Smuzhiyun  *   This ordering ensures that a response buffer is already available for the firmware
2617*4882a593Smuzhiyun  *   to use once it's done processing the request buffer.
2618*4882a593Smuzhiyun  *
2619*4882a593Smuzhiyun  * Return Codes:
2620*4882a593Smuzhiyun  *   DFX_K_SUCCESS	  - DMA command succeeded
2621*4882a593Smuzhiyun  * 	 DFX_K_OUTSTATE   - Adapter is NOT in proper state
2622*4882a593Smuzhiyun  *   DFX_K_HW_TIMEOUT - DMA command timed out
2623*4882a593Smuzhiyun  *
2624*4882a593Smuzhiyun  * Assumptions:
2625*4882a593Smuzhiyun  *   Command request buffer has already been filled with desired DMA command.
2626*4882a593Smuzhiyun  *
2627*4882a593Smuzhiyun  * Side Effects:
2628*4882a593Smuzhiyun  *   None
2629*4882a593Smuzhiyun  */
2630*4882a593Smuzhiyun 
dfx_hw_dma_cmd_req(DFX_board_t * bp)2631*4882a593Smuzhiyun static int dfx_hw_dma_cmd_req(DFX_board_t *bp)
2632*4882a593Smuzhiyun 	{
2633*4882a593Smuzhiyun 	int status;			/* adapter status */
2634*4882a593Smuzhiyun 	int timeout_cnt;	/* used in for loops */
2635*4882a593Smuzhiyun 
2636*4882a593Smuzhiyun 	/* Make sure the adapter is in a state that we can issue the DMA command in */
2637*4882a593Smuzhiyun 
2638*4882a593Smuzhiyun 	status = dfx_hw_adap_state_rd(bp);
2639*4882a593Smuzhiyun 	if ((status == PI_STATE_K_RESET)		||
2640*4882a593Smuzhiyun 		(status == PI_STATE_K_HALTED)		||
2641*4882a593Smuzhiyun 		(status == PI_STATE_K_DMA_UNAVAIL)	||
2642*4882a593Smuzhiyun 		(status == PI_STATE_K_UPGRADE))
2643*4882a593Smuzhiyun 		return DFX_K_OUTSTATE;
2644*4882a593Smuzhiyun 
2645*4882a593Smuzhiyun 	/* Put response buffer on the command response queue */
2646*4882a593Smuzhiyun 
2647*4882a593Smuzhiyun 	bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
2648*4882a593Smuzhiyun 			((PI_CMD_RSP_K_SIZE_MAX / PI_ALIGN_K_CMD_RSP_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
2649*4882a593Smuzhiyun 	bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_1 = bp->cmd_rsp_phys;
2650*4882a593Smuzhiyun 
2651*4882a593Smuzhiyun 	/* Bump (and wrap) the producer index and write out to register */
2652*4882a593Smuzhiyun 
2653*4882a593Smuzhiyun 	bp->cmd_rsp_reg.index.prod += 1;
2654*4882a593Smuzhiyun 	bp->cmd_rsp_reg.index.prod &= PI_CMD_RSP_K_NUM_ENTRIES-1;
2655*4882a593Smuzhiyun 	dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword);
2656*4882a593Smuzhiyun 
2657*4882a593Smuzhiyun 	/* Put request buffer on the command request queue */
2658*4882a593Smuzhiyun 
2659*4882a593Smuzhiyun 	bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_0 = (u32) (PI_XMT_DESCR_M_SOP |
2660*4882a593Smuzhiyun 			PI_XMT_DESCR_M_EOP | (PI_CMD_REQ_K_SIZE_MAX << PI_XMT_DESCR_V_SEG_LEN));
2661*4882a593Smuzhiyun 	bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_1 = bp->cmd_req_phys;
2662*4882a593Smuzhiyun 
2663*4882a593Smuzhiyun 	/* Bump (and wrap) the producer index and write out to register */
2664*4882a593Smuzhiyun 
2665*4882a593Smuzhiyun 	bp->cmd_req_reg.index.prod += 1;
2666*4882a593Smuzhiyun 	bp->cmd_req_reg.index.prod &= PI_CMD_REQ_K_NUM_ENTRIES-1;
2667*4882a593Smuzhiyun 	dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword);
2668*4882a593Smuzhiyun 
2669*4882a593Smuzhiyun 	/*
2670*4882a593Smuzhiyun 	 * Here we wait for the command request consumer index to be equal
2671*4882a593Smuzhiyun 	 * to the producer, indicating that the adapter has DMAed the request.
2672*4882a593Smuzhiyun 	 */
2673*4882a593Smuzhiyun 
2674*4882a593Smuzhiyun 	for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--)
2675*4882a593Smuzhiyun 		{
2676*4882a593Smuzhiyun 		if (bp->cmd_req_reg.index.prod == (u8)(bp->cons_block_virt->cmd_req))
2677*4882a593Smuzhiyun 			break;
2678*4882a593Smuzhiyun 		udelay(100);			/* wait for 100 microseconds */
2679*4882a593Smuzhiyun 		}
2680*4882a593Smuzhiyun 	if (timeout_cnt == 0)
2681*4882a593Smuzhiyun 		return DFX_K_HW_TIMEOUT;
2682*4882a593Smuzhiyun 
2683*4882a593Smuzhiyun 	/* Bump (and wrap) the completion index and write out to register */
2684*4882a593Smuzhiyun 
2685*4882a593Smuzhiyun 	bp->cmd_req_reg.index.comp += 1;
2686*4882a593Smuzhiyun 	bp->cmd_req_reg.index.comp &= PI_CMD_REQ_K_NUM_ENTRIES-1;
2687*4882a593Smuzhiyun 	dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword);
2688*4882a593Smuzhiyun 
2689*4882a593Smuzhiyun 	/*
2690*4882a593Smuzhiyun 	 * Here we wait for the command response consumer index to be equal
2691*4882a593Smuzhiyun 	 * to the producer, indicating that the adapter has DMAed the response.
2692*4882a593Smuzhiyun 	 */
2693*4882a593Smuzhiyun 
2694*4882a593Smuzhiyun 	for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--)
2695*4882a593Smuzhiyun 		{
2696*4882a593Smuzhiyun 		if (bp->cmd_rsp_reg.index.prod == (u8)(bp->cons_block_virt->cmd_rsp))
2697*4882a593Smuzhiyun 			break;
2698*4882a593Smuzhiyun 		udelay(100);			/* wait for 100 microseconds */
2699*4882a593Smuzhiyun 		}
2700*4882a593Smuzhiyun 	if (timeout_cnt == 0)
2701*4882a593Smuzhiyun 		return DFX_K_HW_TIMEOUT;
2702*4882a593Smuzhiyun 
2703*4882a593Smuzhiyun 	/* Bump (and wrap) the completion index and write out to register */
2704*4882a593Smuzhiyun 
2705*4882a593Smuzhiyun 	bp->cmd_rsp_reg.index.comp += 1;
2706*4882a593Smuzhiyun 	bp->cmd_rsp_reg.index.comp &= PI_CMD_RSP_K_NUM_ENTRIES-1;
2707*4882a593Smuzhiyun 	dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword);
2708*4882a593Smuzhiyun 	return DFX_K_SUCCESS;
2709*4882a593Smuzhiyun 	}
2710*4882a593Smuzhiyun 
2711*4882a593Smuzhiyun 
2712*4882a593Smuzhiyun /*
2713*4882a593Smuzhiyun  * ========================
2714*4882a593Smuzhiyun  * = dfx_hw_port_ctrl_req =
2715*4882a593Smuzhiyun  * ========================
2716*4882a593Smuzhiyun  *
2717*4882a593Smuzhiyun  * Overview:
2718*4882a593Smuzhiyun  *   Sends PDQ port control command to adapter firmware
2719*4882a593Smuzhiyun  *
2720*4882a593Smuzhiyun  * Returns:
2721*4882a593Smuzhiyun  *   Host data register value in host_data if ptr is not NULL
2722*4882a593Smuzhiyun  *
2723*4882a593Smuzhiyun  * Arguments:
2724*4882a593Smuzhiyun  *   bp			- pointer to board information
2725*4882a593Smuzhiyun  *	 command	- port control command
2726*4882a593Smuzhiyun  *	 data_a		- port data A register value
2727*4882a593Smuzhiyun  *	 data_b		- port data B register value
2728*4882a593Smuzhiyun  *	 host_data	- ptr to host data register value
2729*4882a593Smuzhiyun  *
2730*4882a593Smuzhiyun  * Functional Description:
2731*4882a593Smuzhiyun  *   Send generic port control command to adapter by writing
2732*4882a593Smuzhiyun  *   to various PDQ port registers, then polling for completion.
2733*4882a593Smuzhiyun  *
2734*4882a593Smuzhiyun  * Return Codes:
2735*4882a593Smuzhiyun  *   DFX_K_SUCCESS	  - port control command succeeded
2736*4882a593Smuzhiyun  *   DFX_K_HW_TIMEOUT - port control command timed out
2737*4882a593Smuzhiyun  *
2738*4882a593Smuzhiyun  * Assumptions:
2739*4882a593Smuzhiyun  *   None
2740*4882a593Smuzhiyun  *
2741*4882a593Smuzhiyun  * Side Effects:
2742*4882a593Smuzhiyun  *   None
2743*4882a593Smuzhiyun  */
2744*4882a593Smuzhiyun 
dfx_hw_port_ctrl_req(DFX_board_t * bp,PI_UINT32 command,PI_UINT32 data_a,PI_UINT32 data_b,PI_UINT32 * host_data)2745*4882a593Smuzhiyun static int dfx_hw_port_ctrl_req(
2746*4882a593Smuzhiyun 	DFX_board_t	*bp,
2747*4882a593Smuzhiyun 	PI_UINT32	command,
2748*4882a593Smuzhiyun 	PI_UINT32	data_a,
2749*4882a593Smuzhiyun 	PI_UINT32	data_b,
2750*4882a593Smuzhiyun 	PI_UINT32	*host_data
2751*4882a593Smuzhiyun 	)
2752*4882a593Smuzhiyun 
2753*4882a593Smuzhiyun 	{
2754*4882a593Smuzhiyun 	PI_UINT32	port_cmd;		/* Port Control command register value */
2755*4882a593Smuzhiyun 	int			timeout_cnt;	/* used in for loops */
2756*4882a593Smuzhiyun 
2757*4882a593Smuzhiyun 	/* Set Command Error bit in command longword */
2758*4882a593Smuzhiyun 
2759*4882a593Smuzhiyun 	port_cmd = (PI_UINT32) (command | PI_PCTRL_M_CMD_ERROR);
2760*4882a593Smuzhiyun 
2761*4882a593Smuzhiyun 	/* Issue port command to the adapter */
2762*4882a593Smuzhiyun 
2763*4882a593Smuzhiyun 	dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, data_a);
2764*4882a593Smuzhiyun 	dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_B, data_b);
2765*4882a593Smuzhiyun 	dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_CTRL, port_cmd);
2766*4882a593Smuzhiyun 
2767*4882a593Smuzhiyun 	/* Now wait for command to complete */
2768*4882a593Smuzhiyun 
2769*4882a593Smuzhiyun 	if (command == PI_PCTRL_M_BLAST_FLASH)
2770*4882a593Smuzhiyun 		timeout_cnt = 600000;	/* set command timeout count to 60 seconds */
2771*4882a593Smuzhiyun 	else
2772*4882a593Smuzhiyun 		timeout_cnt = 20000;	/* set command timeout count to 2 seconds */
2773*4882a593Smuzhiyun 
2774*4882a593Smuzhiyun 	for (; timeout_cnt > 0; timeout_cnt--)
2775*4882a593Smuzhiyun 		{
2776*4882a593Smuzhiyun 		dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_CTRL, &port_cmd);
2777*4882a593Smuzhiyun 		if (!(port_cmd & PI_PCTRL_M_CMD_ERROR))
2778*4882a593Smuzhiyun 			break;
2779*4882a593Smuzhiyun 		udelay(100);			/* wait for 100 microseconds */
2780*4882a593Smuzhiyun 		}
2781*4882a593Smuzhiyun 	if (timeout_cnt == 0)
2782*4882a593Smuzhiyun 		return DFX_K_HW_TIMEOUT;
2783*4882a593Smuzhiyun 
2784*4882a593Smuzhiyun 	/*
2785*4882a593Smuzhiyun 	 * If the address of host_data is non-zero, assume caller has supplied a
2786*4882a593Smuzhiyun 	 * non NULL pointer, and return the contents of the HOST_DATA register in
2787*4882a593Smuzhiyun 	 * it.
2788*4882a593Smuzhiyun 	 */
2789*4882a593Smuzhiyun 
2790*4882a593Smuzhiyun 	if (host_data != NULL)
2791*4882a593Smuzhiyun 		dfx_port_read_long(bp, PI_PDQ_K_REG_HOST_DATA, host_data);
2792*4882a593Smuzhiyun 	return DFX_K_SUCCESS;
2793*4882a593Smuzhiyun 	}
2794*4882a593Smuzhiyun 
2795*4882a593Smuzhiyun 
2796*4882a593Smuzhiyun /*
2797*4882a593Smuzhiyun  * =====================
2798*4882a593Smuzhiyun  * = dfx_hw_adap_reset =
2799*4882a593Smuzhiyun  * =====================
2800*4882a593Smuzhiyun  *
2801*4882a593Smuzhiyun  * Overview:
2802*4882a593Smuzhiyun  *   Resets adapter
2803*4882a593Smuzhiyun  *
2804*4882a593Smuzhiyun  * Returns:
2805*4882a593Smuzhiyun  *   None
2806*4882a593Smuzhiyun  *
2807*4882a593Smuzhiyun  * Arguments:
2808*4882a593Smuzhiyun  *   bp   - pointer to board information
2809*4882a593Smuzhiyun  *   type - type of reset to perform
2810*4882a593Smuzhiyun  *
2811*4882a593Smuzhiyun  * Functional Description:
2812*4882a593Smuzhiyun  *   Issue soft reset to adapter by writing to PDQ Port Reset
2813*4882a593Smuzhiyun  *   register.  Use incoming reset type to tell adapter what
2814*4882a593Smuzhiyun  *   kind of reset operation to perform.
2815*4882a593Smuzhiyun  *
2816*4882a593Smuzhiyun  * Return Codes:
2817*4882a593Smuzhiyun  *   None
2818*4882a593Smuzhiyun  *
2819*4882a593Smuzhiyun  * Assumptions:
2820*4882a593Smuzhiyun  *   This routine merely issues a soft reset to the adapter.
2821*4882a593Smuzhiyun  *   It is expected that after this routine returns, the caller
2822*4882a593Smuzhiyun  *   will appropriately poll the Port Status register for the
2823*4882a593Smuzhiyun  *   adapter to enter the proper state.
2824*4882a593Smuzhiyun  *
2825*4882a593Smuzhiyun  * Side Effects:
2826*4882a593Smuzhiyun  *   Internal adapter registers are cleared.
2827*4882a593Smuzhiyun  */
2828*4882a593Smuzhiyun 
dfx_hw_adap_reset(DFX_board_t * bp,PI_UINT32 type)2829*4882a593Smuzhiyun static void dfx_hw_adap_reset(
2830*4882a593Smuzhiyun 	DFX_board_t	*bp,
2831*4882a593Smuzhiyun 	PI_UINT32	type
2832*4882a593Smuzhiyun 	)
2833*4882a593Smuzhiyun 
2834*4882a593Smuzhiyun 	{
2835*4882a593Smuzhiyun 	/* Set Reset type and assert reset */
2836*4882a593Smuzhiyun 
2837*4882a593Smuzhiyun 	dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, type);	/* tell adapter type of reset */
2838*4882a593Smuzhiyun 	dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, PI_RESET_M_ASSERT_RESET);
2839*4882a593Smuzhiyun 
2840*4882a593Smuzhiyun 	/* Wait for at least 1 Microsecond according to the spec. We wait 20 just to be safe */
2841*4882a593Smuzhiyun 
2842*4882a593Smuzhiyun 	udelay(20);
2843*4882a593Smuzhiyun 
2844*4882a593Smuzhiyun 	/* Deassert reset */
2845*4882a593Smuzhiyun 
2846*4882a593Smuzhiyun 	dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, 0);
2847*4882a593Smuzhiyun 	}
2848*4882a593Smuzhiyun 
2849*4882a593Smuzhiyun 
2850*4882a593Smuzhiyun /*
2851*4882a593Smuzhiyun  * ========================
2852*4882a593Smuzhiyun  * = dfx_hw_adap_state_rd =
2853*4882a593Smuzhiyun  * ========================
2854*4882a593Smuzhiyun  *
2855*4882a593Smuzhiyun  * Overview:
2856*4882a593Smuzhiyun  *   Returns current adapter state
2857*4882a593Smuzhiyun  *
2858*4882a593Smuzhiyun  * Returns:
2859*4882a593Smuzhiyun  *   Adapter state per PDQ Port Specification
2860*4882a593Smuzhiyun  *
2861*4882a593Smuzhiyun  * Arguments:
2862*4882a593Smuzhiyun  *   bp - pointer to board information
2863*4882a593Smuzhiyun  *
2864*4882a593Smuzhiyun  * Functional Description:
2865*4882a593Smuzhiyun  *   Reads PDQ Port Status register and returns adapter state.
2866*4882a593Smuzhiyun  *
2867*4882a593Smuzhiyun  * Return Codes:
2868*4882a593Smuzhiyun  *   None
2869*4882a593Smuzhiyun  *
2870*4882a593Smuzhiyun  * Assumptions:
2871*4882a593Smuzhiyun  *   None
2872*4882a593Smuzhiyun  *
2873*4882a593Smuzhiyun  * Side Effects:
2874*4882a593Smuzhiyun  *   None
2875*4882a593Smuzhiyun  */
2876*4882a593Smuzhiyun 
dfx_hw_adap_state_rd(DFX_board_t * bp)2877*4882a593Smuzhiyun static int dfx_hw_adap_state_rd(DFX_board_t *bp)
2878*4882a593Smuzhiyun 	{
2879*4882a593Smuzhiyun 	PI_UINT32 port_status;		/* Port Status register value */
2880*4882a593Smuzhiyun 
2881*4882a593Smuzhiyun 	dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
2882*4882a593Smuzhiyun 	return (port_status & PI_PSTATUS_M_STATE) >> PI_PSTATUS_V_STATE;
2883*4882a593Smuzhiyun 	}
2884*4882a593Smuzhiyun 
2885*4882a593Smuzhiyun 
2886*4882a593Smuzhiyun /*
2887*4882a593Smuzhiyun  * =====================
2888*4882a593Smuzhiyun  * = dfx_hw_dma_uninit =
2889*4882a593Smuzhiyun  * =====================
2890*4882a593Smuzhiyun  *
2891*4882a593Smuzhiyun  * Overview:
2892*4882a593Smuzhiyun  *   Brings adapter to DMA_UNAVAILABLE state
2893*4882a593Smuzhiyun  *
2894*4882a593Smuzhiyun  * Returns:
2895*4882a593Smuzhiyun  *   Condition code
2896*4882a593Smuzhiyun  *
2897*4882a593Smuzhiyun  * Arguments:
2898*4882a593Smuzhiyun  *   bp   - pointer to board information
2899*4882a593Smuzhiyun  *   type - type of reset to perform
2900*4882a593Smuzhiyun  *
2901*4882a593Smuzhiyun  * Functional Description:
2902*4882a593Smuzhiyun  *   Bring adapter to DMA_UNAVAILABLE state by performing the following:
2903*4882a593Smuzhiyun  *		1. Set reset type bit in Port Data A Register then reset adapter.
2904*4882a593Smuzhiyun  *		2. Check that adapter is in DMA_UNAVAILABLE state.
2905*4882a593Smuzhiyun  *
2906*4882a593Smuzhiyun  * Return Codes:
2907*4882a593Smuzhiyun  *   DFX_K_SUCCESS	  - adapter is in DMA_UNAVAILABLE state
2908*4882a593Smuzhiyun  *   DFX_K_HW_TIMEOUT - adapter did not reset properly
2909*4882a593Smuzhiyun  *
2910*4882a593Smuzhiyun  * Assumptions:
2911*4882a593Smuzhiyun  *   None
2912*4882a593Smuzhiyun  *
2913*4882a593Smuzhiyun  * Side Effects:
2914*4882a593Smuzhiyun  *   Internal adapter registers are cleared.
2915*4882a593Smuzhiyun  */
2916*4882a593Smuzhiyun 
dfx_hw_dma_uninit(DFX_board_t * bp,PI_UINT32 type)2917*4882a593Smuzhiyun static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type)
2918*4882a593Smuzhiyun 	{
2919*4882a593Smuzhiyun 	int timeout_cnt;	/* used in for loops */
2920*4882a593Smuzhiyun 
2921*4882a593Smuzhiyun 	/* Set reset type bit and reset adapter */
2922*4882a593Smuzhiyun 
2923*4882a593Smuzhiyun 	dfx_hw_adap_reset(bp, type);
2924*4882a593Smuzhiyun 
2925*4882a593Smuzhiyun 	/* Now wait for adapter to enter DMA_UNAVAILABLE state */
2926*4882a593Smuzhiyun 
2927*4882a593Smuzhiyun 	for (timeout_cnt = 100000; timeout_cnt > 0; timeout_cnt--)
2928*4882a593Smuzhiyun 		{
2929*4882a593Smuzhiyun 		if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_DMA_UNAVAIL)
2930*4882a593Smuzhiyun 			break;
2931*4882a593Smuzhiyun 		udelay(100);					/* wait for 100 microseconds */
2932*4882a593Smuzhiyun 		}
2933*4882a593Smuzhiyun 	if (timeout_cnt == 0)
2934*4882a593Smuzhiyun 		return DFX_K_HW_TIMEOUT;
2935*4882a593Smuzhiyun 	return DFX_K_SUCCESS;
2936*4882a593Smuzhiyun 	}
2937*4882a593Smuzhiyun 
2938*4882a593Smuzhiyun /*
2939*4882a593Smuzhiyun  *	Align an sk_buff to a boundary power of 2
2940*4882a593Smuzhiyun  *
2941*4882a593Smuzhiyun  */
2942*4882a593Smuzhiyun #ifdef DYNAMIC_BUFFERS
my_skb_align(struct sk_buff * skb,int n)2943*4882a593Smuzhiyun static void my_skb_align(struct sk_buff *skb, int n)
2944*4882a593Smuzhiyun {
2945*4882a593Smuzhiyun 	unsigned long x = (unsigned long)skb->data;
2946*4882a593Smuzhiyun 	unsigned long v;
2947*4882a593Smuzhiyun 
2948*4882a593Smuzhiyun 	v = ALIGN(x, n);	/* Where we want to be */
2949*4882a593Smuzhiyun 
2950*4882a593Smuzhiyun 	skb_reserve(skb, v - x);
2951*4882a593Smuzhiyun }
2952*4882a593Smuzhiyun #endif
2953*4882a593Smuzhiyun 
2954*4882a593Smuzhiyun /*
2955*4882a593Smuzhiyun  * ================
2956*4882a593Smuzhiyun  * = dfx_rcv_init =
2957*4882a593Smuzhiyun  * ================
2958*4882a593Smuzhiyun  *
2959*4882a593Smuzhiyun  * Overview:
2960*4882a593Smuzhiyun  *   Produces buffers to adapter LLC Host receive descriptor block
2961*4882a593Smuzhiyun  *
2962*4882a593Smuzhiyun  * Returns:
2963*4882a593Smuzhiyun  *   None
2964*4882a593Smuzhiyun  *
2965*4882a593Smuzhiyun  * Arguments:
2966*4882a593Smuzhiyun  *   bp - pointer to board information
2967*4882a593Smuzhiyun  *   get_buffers - non-zero if buffers to be allocated
2968*4882a593Smuzhiyun  *
2969*4882a593Smuzhiyun  * Functional Description:
2970*4882a593Smuzhiyun  *   This routine can be called during dfx_adap_init() or during an adapter
2971*4882a593Smuzhiyun  *	 reset.  It initializes the descriptor block and produces all allocated
2972*4882a593Smuzhiyun  *   LLC Host queue receive buffers.
2973*4882a593Smuzhiyun  *
2974*4882a593Smuzhiyun  * Return Codes:
2975*4882a593Smuzhiyun  *   Return 0 on success or -ENOMEM if buffer allocation failed (when using
2976*4882a593Smuzhiyun  *   dynamic buffer allocation). If the buffer allocation failed, the
2977*4882a593Smuzhiyun  *   already allocated buffers will not be released and the caller should do
2978*4882a593Smuzhiyun  *   this.
2979*4882a593Smuzhiyun  *
2980*4882a593Smuzhiyun  * Assumptions:
2981*4882a593Smuzhiyun  *   The PDQ has been reset and the adapter and driver maintained Type 2
2982*4882a593Smuzhiyun  *   register indices are cleared.
2983*4882a593Smuzhiyun  *
2984*4882a593Smuzhiyun  * Side Effects:
2985*4882a593Smuzhiyun  *   Receive buffers are posted to the adapter LLC queue and the adapter
2986*4882a593Smuzhiyun  *   is notified.
2987*4882a593Smuzhiyun  */
2988*4882a593Smuzhiyun 
dfx_rcv_init(DFX_board_t * bp,int get_buffers)2989*4882a593Smuzhiyun static int dfx_rcv_init(DFX_board_t *bp, int get_buffers)
2990*4882a593Smuzhiyun 	{
2991*4882a593Smuzhiyun 	int	i, j;					/* used in for loop */
2992*4882a593Smuzhiyun 
2993*4882a593Smuzhiyun 	/*
2994*4882a593Smuzhiyun 	 *  Since each receive buffer is a single fragment of same length, initialize
2995*4882a593Smuzhiyun 	 *  first longword in each receive descriptor for entire LLC Host descriptor
2996*4882a593Smuzhiyun 	 *  block.  Also initialize second longword in each receive descriptor with
2997*4882a593Smuzhiyun 	 *  physical address of receive buffer.  We'll always allocate receive
2998*4882a593Smuzhiyun 	 *  buffers in powers of 2 so that we can easily fill the 256 entry descriptor
2999*4882a593Smuzhiyun 	 *  block and produce new receive buffers by simply updating the receive
3000*4882a593Smuzhiyun 	 *  producer index.
3001*4882a593Smuzhiyun 	 *
3002*4882a593Smuzhiyun 	 * 	Assumptions:
3003*4882a593Smuzhiyun 	 *		To support all shipping versions of PDQ, the receive buffer size
3004*4882a593Smuzhiyun 	 *		must be mod 128 in length and the physical address must be 128 byte
3005*4882a593Smuzhiyun 	 *		aligned.  In other words, bits 0-6 of the length and address must
3006*4882a593Smuzhiyun 	 *		be zero for the following descriptor field entries to be correct on
3007*4882a593Smuzhiyun 	 *		all PDQ-based boards.  We guaranteed both requirements during
3008*4882a593Smuzhiyun 	 *		driver initialization when we allocated memory for the receive buffers.
3009*4882a593Smuzhiyun 	 */
3010*4882a593Smuzhiyun 
3011*4882a593Smuzhiyun 	if (get_buffers) {
3012*4882a593Smuzhiyun #ifdef DYNAMIC_BUFFERS
3013*4882a593Smuzhiyun 	for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
3014*4882a593Smuzhiyun 		for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
3015*4882a593Smuzhiyun 		{
3016*4882a593Smuzhiyun 			struct sk_buff *newskb;
3017*4882a593Smuzhiyun 			dma_addr_t dma_addr;
3018*4882a593Smuzhiyun 
3019*4882a593Smuzhiyun 			newskb = __netdev_alloc_skb(bp->dev, NEW_SKB_SIZE,
3020*4882a593Smuzhiyun 						    GFP_NOIO);
3021*4882a593Smuzhiyun 			if (!newskb)
3022*4882a593Smuzhiyun 				return -ENOMEM;
3023*4882a593Smuzhiyun 			/*
3024*4882a593Smuzhiyun 			 * align to 128 bytes for compatibility with
3025*4882a593Smuzhiyun 			 * the old EISA boards.
3026*4882a593Smuzhiyun 			 */
3027*4882a593Smuzhiyun 
3028*4882a593Smuzhiyun 			my_skb_align(newskb, 128);
3029*4882a593Smuzhiyun 			dma_addr = dma_map_single(bp->bus_dev,
3030*4882a593Smuzhiyun 						  newskb->data,
3031*4882a593Smuzhiyun 						  PI_RCV_DATA_K_SIZE_MAX,
3032*4882a593Smuzhiyun 						  DMA_FROM_DEVICE);
3033*4882a593Smuzhiyun 			if (dma_mapping_error(bp->bus_dev, dma_addr)) {
3034*4882a593Smuzhiyun 				dev_kfree_skb(newskb);
3035*4882a593Smuzhiyun 				return -ENOMEM;
3036*4882a593Smuzhiyun 			}
3037*4882a593Smuzhiyun 			bp->descr_block_virt->rcv_data[i + j].long_0 =
3038*4882a593Smuzhiyun 				(u32)(PI_RCV_DESCR_M_SOP |
3039*4882a593Smuzhiyun 				      ((PI_RCV_DATA_K_SIZE_MAX /
3040*4882a593Smuzhiyun 					PI_ALIGN_K_RCV_DATA_BUFF) <<
3041*4882a593Smuzhiyun 				       PI_RCV_DESCR_V_SEG_LEN));
3042*4882a593Smuzhiyun 			bp->descr_block_virt->rcv_data[i + j].long_1 =
3043*4882a593Smuzhiyun 				(u32)dma_addr;
3044*4882a593Smuzhiyun 
3045*4882a593Smuzhiyun 			/*
3046*4882a593Smuzhiyun 			 * p_rcv_buff_va is only used inside the
3047*4882a593Smuzhiyun 			 * kernel so we put the skb pointer here.
3048*4882a593Smuzhiyun 			 */
3049*4882a593Smuzhiyun 			bp->p_rcv_buff_va[i+j] = (char *) newskb;
3050*4882a593Smuzhiyun 		}
3051*4882a593Smuzhiyun #else
3052*4882a593Smuzhiyun 	for (i=0; i < (int)(bp->rcv_bufs_to_post); i++)
3053*4882a593Smuzhiyun 		for (j=0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
3054*4882a593Smuzhiyun 			{
3055*4882a593Smuzhiyun 			bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
3056*4882a593Smuzhiyun 				((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
3057*4882a593Smuzhiyun 			bp->descr_block_virt->rcv_data[i+j].long_1 = (u32) (bp->rcv_block_phys + (i * PI_RCV_DATA_K_SIZE_MAX));
3058*4882a593Smuzhiyun 			bp->p_rcv_buff_va[i+j] = (bp->rcv_block_virt + (i * PI_RCV_DATA_K_SIZE_MAX));
3059*4882a593Smuzhiyun 			}
3060*4882a593Smuzhiyun #endif
3061*4882a593Smuzhiyun 	}
3062*4882a593Smuzhiyun 
3063*4882a593Smuzhiyun 	/* Update receive producer and Type 2 register */
3064*4882a593Smuzhiyun 
3065*4882a593Smuzhiyun 	bp->rcv_xmt_reg.index.rcv_prod = bp->rcv_bufs_to_post;
3066*4882a593Smuzhiyun 	dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
3067*4882a593Smuzhiyun 	return 0;
3068*4882a593Smuzhiyun 	}
3069*4882a593Smuzhiyun 
3070*4882a593Smuzhiyun 
3071*4882a593Smuzhiyun /*
3072*4882a593Smuzhiyun  * =========================
3073*4882a593Smuzhiyun  * = dfx_rcv_queue_process =
3074*4882a593Smuzhiyun  * =========================
3075*4882a593Smuzhiyun  *
3076*4882a593Smuzhiyun  * Overview:
3077*4882a593Smuzhiyun  *   Process received LLC frames.
3078*4882a593Smuzhiyun  *
3079*4882a593Smuzhiyun  * Returns:
3080*4882a593Smuzhiyun  *   None
3081*4882a593Smuzhiyun  *
3082*4882a593Smuzhiyun  * Arguments:
3083*4882a593Smuzhiyun  *   bp - pointer to board information
3084*4882a593Smuzhiyun  *
3085*4882a593Smuzhiyun  * Functional Description:
3086*4882a593Smuzhiyun  *   Received LLC frames are processed until there are no more consumed frames.
3087*4882a593Smuzhiyun  *   Once all frames are processed, the receive buffers are returned to the
3088*4882a593Smuzhiyun  *   adapter.  Note that this algorithm fixes the length of time that can be spent
3089*4882a593Smuzhiyun  *   in this routine, because there are a fixed number of receive buffers to
3090*4882a593Smuzhiyun  *   process and buffers are not produced until this routine exits and returns
3091*4882a593Smuzhiyun  *   to the ISR.
3092*4882a593Smuzhiyun  *
3093*4882a593Smuzhiyun  * Return Codes:
3094*4882a593Smuzhiyun  *   None
3095*4882a593Smuzhiyun  *
3096*4882a593Smuzhiyun  * Assumptions:
3097*4882a593Smuzhiyun  *   None
3098*4882a593Smuzhiyun  *
3099*4882a593Smuzhiyun  * Side Effects:
3100*4882a593Smuzhiyun  *   None
3101*4882a593Smuzhiyun  */
3102*4882a593Smuzhiyun 
dfx_rcv_queue_process(DFX_board_t * bp)3103*4882a593Smuzhiyun static void dfx_rcv_queue_process(
3104*4882a593Smuzhiyun 	DFX_board_t *bp
3105*4882a593Smuzhiyun 	)
3106*4882a593Smuzhiyun 
3107*4882a593Smuzhiyun 	{
3108*4882a593Smuzhiyun 	PI_TYPE_2_CONSUMER	*p_type_2_cons;		/* ptr to rcv/xmt consumer block register */
3109*4882a593Smuzhiyun 	char				*p_buff;			/* ptr to start of packet receive buffer (FMC descriptor) */
3110*4882a593Smuzhiyun 	u32					descr, pkt_len;		/* FMC descriptor field and packet length */
3111*4882a593Smuzhiyun 	struct sk_buff		*skb = NULL;			/* pointer to a sk_buff to hold incoming packet data */
3112*4882a593Smuzhiyun 
3113*4882a593Smuzhiyun 	/* Service all consumed LLC receive frames */
3114*4882a593Smuzhiyun 
3115*4882a593Smuzhiyun 	p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data);
3116*4882a593Smuzhiyun 	while (bp->rcv_xmt_reg.index.rcv_comp != p_type_2_cons->index.rcv_cons)
3117*4882a593Smuzhiyun 		{
3118*4882a593Smuzhiyun 		/* Process any errors */
3119*4882a593Smuzhiyun 		dma_addr_t dma_addr;
3120*4882a593Smuzhiyun 		int entry;
3121*4882a593Smuzhiyun 
3122*4882a593Smuzhiyun 		entry = bp->rcv_xmt_reg.index.rcv_comp;
3123*4882a593Smuzhiyun #ifdef DYNAMIC_BUFFERS
3124*4882a593Smuzhiyun 		p_buff = (char *) (((struct sk_buff *)bp->p_rcv_buff_va[entry])->data);
3125*4882a593Smuzhiyun #else
3126*4882a593Smuzhiyun 		p_buff = bp->p_rcv_buff_va[entry];
3127*4882a593Smuzhiyun #endif
3128*4882a593Smuzhiyun 		dma_addr = bp->descr_block_virt->rcv_data[entry].long_1;
3129*4882a593Smuzhiyun 		dma_sync_single_for_cpu(bp->bus_dev,
3130*4882a593Smuzhiyun 					dma_addr + RCV_BUFF_K_DESCR,
3131*4882a593Smuzhiyun 					sizeof(u32),
3132*4882a593Smuzhiyun 					DMA_FROM_DEVICE);
3133*4882a593Smuzhiyun 		memcpy(&descr, p_buff + RCV_BUFF_K_DESCR, sizeof(u32));
3134*4882a593Smuzhiyun 
3135*4882a593Smuzhiyun 		if (descr & PI_FMC_DESCR_M_RCC_FLUSH)
3136*4882a593Smuzhiyun 			{
3137*4882a593Smuzhiyun 			if (descr & PI_FMC_DESCR_M_RCC_CRC)
3138*4882a593Smuzhiyun 				bp->rcv_crc_errors++;
3139*4882a593Smuzhiyun 			else
3140*4882a593Smuzhiyun 				bp->rcv_frame_status_errors++;
3141*4882a593Smuzhiyun 			}
3142*4882a593Smuzhiyun 		else
3143*4882a593Smuzhiyun 		{
3144*4882a593Smuzhiyun 			int rx_in_place = 0;
3145*4882a593Smuzhiyun 
3146*4882a593Smuzhiyun 			/* The frame was received without errors - verify packet length */
3147*4882a593Smuzhiyun 
3148*4882a593Smuzhiyun 			pkt_len = (u32)((descr & PI_FMC_DESCR_M_LEN) >> PI_FMC_DESCR_V_LEN);
3149*4882a593Smuzhiyun 			pkt_len -= 4;				/* subtract 4 byte CRC */
3150*4882a593Smuzhiyun 			if (!IN_RANGE(pkt_len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN))
3151*4882a593Smuzhiyun 				bp->rcv_length_errors++;
3152*4882a593Smuzhiyun 			else{
3153*4882a593Smuzhiyun #ifdef DYNAMIC_BUFFERS
3154*4882a593Smuzhiyun 				struct sk_buff *newskb = NULL;
3155*4882a593Smuzhiyun 
3156*4882a593Smuzhiyun 				if (pkt_len > SKBUFF_RX_COPYBREAK) {
3157*4882a593Smuzhiyun 					dma_addr_t new_dma_addr;
3158*4882a593Smuzhiyun 
3159*4882a593Smuzhiyun 					newskb = netdev_alloc_skb(bp->dev,
3160*4882a593Smuzhiyun 								  NEW_SKB_SIZE);
3161*4882a593Smuzhiyun 					if (newskb){
3162*4882a593Smuzhiyun 						my_skb_align(newskb, 128);
3163*4882a593Smuzhiyun 						new_dma_addr = dma_map_single(
3164*4882a593Smuzhiyun 								bp->bus_dev,
3165*4882a593Smuzhiyun 								newskb->data,
3166*4882a593Smuzhiyun 								PI_RCV_DATA_K_SIZE_MAX,
3167*4882a593Smuzhiyun 								DMA_FROM_DEVICE);
3168*4882a593Smuzhiyun 						if (dma_mapping_error(
3169*4882a593Smuzhiyun 								bp->bus_dev,
3170*4882a593Smuzhiyun 								new_dma_addr)) {
3171*4882a593Smuzhiyun 							dev_kfree_skb(newskb);
3172*4882a593Smuzhiyun 							newskb = NULL;
3173*4882a593Smuzhiyun 						}
3174*4882a593Smuzhiyun 					}
3175*4882a593Smuzhiyun 					if (newskb) {
3176*4882a593Smuzhiyun 						rx_in_place = 1;
3177*4882a593Smuzhiyun 
3178*4882a593Smuzhiyun 						skb = (struct sk_buff *)bp->p_rcv_buff_va[entry];
3179*4882a593Smuzhiyun 						dma_unmap_single(bp->bus_dev,
3180*4882a593Smuzhiyun 							dma_addr,
3181*4882a593Smuzhiyun 							PI_RCV_DATA_K_SIZE_MAX,
3182*4882a593Smuzhiyun 							DMA_FROM_DEVICE);
3183*4882a593Smuzhiyun 						skb_reserve(skb, RCV_BUFF_K_PADDING);
3184*4882a593Smuzhiyun 						bp->p_rcv_buff_va[entry] = (char *)newskb;
3185*4882a593Smuzhiyun 						bp->descr_block_virt->rcv_data[entry].long_1 = (u32)new_dma_addr;
3186*4882a593Smuzhiyun 					}
3187*4882a593Smuzhiyun 				}
3188*4882a593Smuzhiyun 				if (!newskb)
3189*4882a593Smuzhiyun #endif
3190*4882a593Smuzhiyun 					/* Alloc new buffer to pass up,
3191*4882a593Smuzhiyun 					 * add room for PRH. */
3192*4882a593Smuzhiyun 					skb = netdev_alloc_skb(bp->dev,
3193*4882a593Smuzhiyun 							       pkt_len + 3);
3194*4882a593Smuzhiyun 				if (skb == NULL)
3195*4882a593Smuzhiyun 					{
3196*4882a593Smuzhiyun 					printk("%s: Could not allocate receive buffer.  Dropping packet.\n", bp->dev->name);
3197*4882a593Smuzhiyun 					bp->rcv_discards++;
3198*4882a593Smuzhiyun 					break;
3199*4882a593Smuzhiyun 					}
3200*4882a593Smuzhiyun 				else {
3201*4882a593Smuzhiyun 					if (!rx_in_place) {
3202*4882a593Smuzhiyun 						/* Receive buffer allocated, pass receive packet up */
3203*4882a593Smuzhiyun 						dma_sync_single_for_cpu(
3204*4882a593Smuzhiyun 							bp->bus_dev,
3205*4882a593Smuzhiyun 							dma_addr +
3206*4882a593Smuzhiyun 							RCV_BUFF_K_PADDING,
3207*4882a593Smuzhiyun 							pkt_len + 3,
3208*4882a593Smuzhiyun 							DMA_FROM_DEVICE);
3209*4882a593Smuzhiyun 
3210*4882a593Smuzhiyun 						skb_copy_to_linear_data(skb,
3211*4882a593Smuzhiyun 							       p_buff + RCV_BUFF_K_PADDING,
3212*4882a593Smuzhiyun 							       pkt_len + 3);
3213*4882a593Smuzhiyun 					}
3214*4882a593Smuzhiyun 
3215*4882a593Smuzhiyun 					skb_reserve(skb,3);		/* adjust data field so that it points to FC byte */
3216*4882a593Smuzhiyun 					skb_put(skb, pkt_len);		/* pass up packet length, NOT including CRC */
3217*4882a593Smuzhiyun 					skb->protocol = fddi_type_trans(skb, bp->dev);
3218*4882a593Smuzhiyun 					bp->rcv_total_bytes += skb->len;
3219*4882a593Smuzhiyun 					netif_rx(skb);
3220*4882a593Smuzhiyun 
3221*4882a593Smuzhiyun 					/* Update the rcv counters */
3222*4882a593Smuzhiyun 					bp->rcv_total_frames++;
3223*4882a593Smuzhiyun 					if (*(p_buff + RCV_BUFF_K_DA) & 0x01)
3224*4882a593Smuzhiyun 						bp->rcv_multicast_frames++;
3225*4882a593Smuzhiyun 				}
3226*4882a593Smuzhiyun 			}
3227*4882a593Smuzhiyun 			}
3228*4882a593Smuzhiyun 
3229*4882a593Smuzhiyun 		/*
3230*4882a593Smuzhiyun 		 * Advance the producer (for recycling) and advance the completion
3231*4882a593Smuzhiyun 		 * (for servicing received frames).  Note that it is okay to
3232*4882a593Smuzhiyun 		 * advance the producer without checking that it passes the
3233*4882a593Smuzhiyun 		 * completion index because they are both advanced at the same
3234*4882a593Smuzhiyun 		 * rate.
3235*4882a593Smuzhiyun 		 */
3236*4882a593Smuzhiyun 
3237*4882a593Smuzhiyun 		bp->rcv_xmt_reg.index.rcv_prod += 1;
3238*4882a593Smuzhiyun 		bp->rcv_xmt_reg.index.rcv_comp += 1;
3239*4882a593Smuzhiyun 		}
3240*4882a593Smuzhiyun 	}
3241*4882a593Smuzhiyun 
3242*4882a593Smuzhiyun 
3243*4882a593Smuzhiyun /*
3244*4882a593Smuzhiyun  * =====================
3245*4882a593Smuzhiyun  * = dfx_xmt_queue_pkt =
3246*4882a593Smuzhiyun  * =====================
3247*4882a593Smuzhiyun  *
3248*4882a593Smuzhiyun  * Overview:
3249*4882a593Smuzhiyun  *   Queues packets for transmission
3250*4882a593Smuzhiyun  *
3251*4882a593Smuzhiyun  * Returns:
3252*4882a593Smuzhiyun  *   Condition code
3253*4882a593Smuzhiyun  *
3254*4882a593Smuzhiyun  * Arguments:
3255*4882a593Smuzhiyun  *   skb - pointer to sk_buff to queue for transmission
3256*4882a593Smuzhiyun  *   dev - pointer to device information
3257*4882a593Smuzhiyun  *
3258*4882a593Smuzhiyun  * Functional Description:
3259*4882a593Smuzhiyun  *   Here we assume that an incoming skb transmit request
3260*4882a593Smuzhiyun  *   is contained in a single physically contiguous buffer
3261*4882a593Smuzhiyun  *   in which the virtual address of the start of packet
3262*4882a593Smuzhiyun  *   (skb->data) can be converted to a physical address
3263*4882a593Smuzhiyun  *   by using pci_map_single().
3264*4882a593Smuzhiyun  *
3265*4882a593Smuzhiyun  *   Since the adapter architecture requires a three byte
3266*4882a593Smuzhiyun  *   packet request header to prepend the start of packet,
3267*4882a593Smuzhiyun  *   we'll write the three byte field immediately prior to
3268*4882a593Smuzhiyun  *   the FC byte.  This assumption is valid because we've
3269*4882a593Smuzhiyun  *   ensured that dev->hard_header_len includes three pad
3270*4882a593Smuzhiyun  *   bytes.  By posting a single fragment to the adapter,
3271*4882a593Smuzhiyun  *   we'll reduce the number of descriptor fetches and
3272*4882a593Smuzhiyun  *   bus traffic needed to send the request.
3273*4882a593Smuzhiyun  *
3274*4882a593Smuzhiyun  *   Also, we can't free the skb until after it's been DMA'd
3275*4882a593Smuzhiyun  *   out by the adapter, so we'll queue it in the driver and
3276*4882a593Smuzhiyun  *   return it in dfx_xmt_done.
3277*4882a593Smuzhiyun  *
3278*4882a593Smuzhiyun  * Return Codes:
3279*4882a593Smuzhiyun  *   0 - driver queued packet, link is unavailable, or skbuff was bad
3280*4882a593Smuzhiyun  *	 1 - caller should requeue the sk_buff for later transmission
3281*4882a593Smuzhiyun  *
3282*4882a593Smuzhiyun  * Assumptions:
3283*4882a593Smuzhiyun  *	 First and foremost, we assume the incoming skb pointer
3284*4882a593Smuzhiyun  *   is NOT NULL and is pointing to a valid sk_buff structure.
3285*4882a593Smuzhiyun  *
3286*4882a593Smuzhiyun  *   The outgoing packet is complete, starting with the
3287*4882a593Smuzhiyun  *   frame control byte including the last byte of data,
3288*4882a593Smuzhiyun  *   but NOT including the 4 byte CRC.  We'll let the
3289*4882a593Smuzhiyun  *   adapter hardware generate and append the CRC.
3290*4882a593Smuzhiyun  *
3291*4882a593Smuzhiyun  *   The entire packet is stored in one physically
3292*4882a593Smuzhiyun  *   contiguous buffer which is not cached and whose
3293*4882a593Smuzhiyun  *   32-bit physical address can be determined.
3294*4882a593Smuzhiyun  *
3295*4882a593Smuzhiyun  *   It's vital that this routine is NOT reentered for the
3296*4882a593Smuzhiyun  *   same board and that the OS is not in another section of
3297*4882a593Smuzhiyun  *   code (eg. dfx_int_common) for the same board on a
3298*4882a593Smuzhiyun  *   different thread.
3299*4882a593Smuzhiyun  *
3300*4882a593Smuzhiyun  * Side Effects:
3301*4882a593Smuzhiyun  *   None
3302*4882a593Smuzhiyun  */
3303*4882a593Smuzhiyun 
dfx_xmt_queue_pkt(struct sk_buff * skb,struct net_device * dev)3304*4882a593Smuzhiyun static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
3305*4882a593Smuzhiyun 				     struct net_device *dev)
3306*4882a593Smuzhiyun 	{
3307*4882a593Smuzhiyun 	DFX_board_t		*bp = netdev_priv(dev);
3308*4882a593Smuzhiyun 	u8			prod;				/* local transmit producer index */
3309*4882a593Smuzhiyun 	PI_XMT_DESCR		*p_xmt_descr;		/* ptr to transmit descriptor block entry */
3310*4882a593Smuzhiyun 	XMT_DRIVER_DESCR	*p_xmt_drv_descr;	/* ptr to transmit driver descriptor */
3311*4882a593Smuzhiyun 	dma_addr_t		dma_addr;
3312*4882a593Smuzhiyun 	unsigned long		flags;
3313*4882a593Smuzhiyun 
3314*4882a593Smuzhiyun 	netif_stop_queue(dev);
3315*4882a593Smuzhiyun 
3316*4882a593Smuzhiyun 	/*
3317*4882a593Smuzhiyun 	 * Verify that incoming transmit request is OK
3318*4882a593Smuzhiyun 	 *
3319*4882a593Smuzhiyun 	 * Note: The packet size check is consistent with other
3320*4882a593Smuzhiyun 	 *		 Linux device drivers, although the correct packet
3321*4882a593Smuzhiyun 	 *		 size should be verified before calling the
3322*4882a593Smuzhiyun 	 *		 transmit routine.
3323*4882a593Smuzhiyun 	 */
3324*4882a593Smuzhiyun 
3325*4882a593Smuzhiyun 	if (!IN_RANGE(skb->len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN))
3326*4882a593Smuzhiyun 	{
3327*4882a593Smuzhiyun 		printk("%s: Invalid packet length - %u bytes\n",
3328*4882a593Smuzhiyun 			dev->name, skb->len);
3329*4882a593Smuzhiyun 		bp->xmt_length_errors++;		/* bump error counter */
3330*4882a593Smuzhiyun 		netif_wake_queue(dev);
3331*4882a593Smuzhiyun 		dev_kfree_skb(skb);
3332*4882a593Smuzhiyun 		return NETDEV_TX_OK;			/* return "success" */
3333*4882a593Smuzhiyun 	}
3334*4882a593Smuzhiyun 	/*
3335*4882a593Smuzhiyun 	 * See if adapter link is available, if not, free buffer
3336*4882a593Smuzhiyun 	 *
3337*4882a593Smuzhiyun 	 * Note: If the link isn't available, free buffer and return 0
3338*4882a593Smuzhiyun 	 *		 rather than tell the upper layer to requeue the packet.
3339*4882a593Smuzhiyun 	 *		 The methodology here is that by the time the link
3340*4882a593Smuzhiyun 	 *		 becomes available, the packet to be sent will be
3341*4882a593Smuzhiyun 	 *		 fairly stale.  By simply dropping the packet, the
3342*4882a593Smuzhiyun 	 *		 higher layer protocols will eventually time out
3343*4882a593Smuzhiyun 	 *		 waiting for response packets which it won't receive.
3344*4882a593Smuzhiyun 	 */
3345*4882a593Smuzhiyun 
3346*4882a593Smuzhiyun 	if (bp->link_available == PI_K_FALSE)
3347*4882a593Smuzhiyun 		{
3348*4882a593Smuzhiyun 		if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_LINK_AVAIL)	/* is link really available? */
3349*4882a593Smuzhiyun 			bp->link_available = PI_K_TRUE;		/* if so, set flag and continue */
3350*4882a593Smuzhiyun 		else
3351*4882a593Smuzhiyun 			{
3352*4882a593Smuzhiyun 			bp->xmt_discards++;					/* bump error counter */
3353*4882a593Smuzhiyun 			dev_kfree_skb(skb);		/* free sk_buff now */
3354*4882a593Smuzhiyun 			netif_wake_queue(dev);
3355*4882a593Smuzhiyun 			return NETDEV_TX_OK;		/* return "success" */
3356*4882a593Smuzhiyun 			}
3357*4882a593Smuzhiyun 		}
3358*4882a593Smuzhiyun 
3359*4882a593Smuzhiyun 	/* Write the three PRH bytes immediately before the FC byte */
3360*4882a593Smuzhiyun 
3361*4882a593Smuzhiyun 	skb_push(skb, 3);
3362*4882a593Smuzhiyun 	skb->data[0] = DFX_PRH0_BYTE;	/* these byte values are defined */
3363*4882a593Smuzhiyun 	skb->data[1] = DFX_PRH1_BYTE;	/* in the Motorola FDDI MAC chip */
3364*4882a593Smuzhiyun 	skb->data[2] = DFX_PRH2_BYTE;	/* specification */
3365*4882a593Smuzhiyun 
3366*4882a593Smuzhiyun 	dma_addr = dma_map_single(bp->bus_dev, skb->data, skb->len,
3367*4882a593Smuzhiyun 				  DMA_TO_DEVICE);
3368*4882a593Smuzhiyun 	if (dma_mapping_error(bp->bus_dev, dma_addr)) {
3369*4882a593Smuzhiyun 		skb_pull(skb, 3);
3370*4882a593Smuzhiyun 		return NETDEV_TX_BUSY;
3371*4882a593Smuzhiyun 	}
3372*4882a593Smuzhiyun 
3373*4882a593Smuzhiyun 	spin_lock_irqsave(&bp->lock, flags);
3374*4882a593Smuzhiyun 
3375*4882a593Smuzhiyun 	/* Get the current producer and the next free xmt data descriptor */
3376*4882a593Smuzhiyun 
3377*4882a593Smuzhiyun 	prod		= bp->rcv_xmt_reg.index.xmt_prod;
3378*4882a593Smuzhiyun 	p_xmt_descr = &(bp->descr_block_virt->xmt_data[prod]);
3379*4882a593Smuzhiyun 
3380*4882a593Smuzhiyun 	/*
3381*4882a593Smuzhiyun 	 * Get pointer to auxiliary queue entry to contain information
3382*4882a593Smuzhiyun 	 * for this packet.
3383*4882a593Smuzhiyun 	 *
3384*4882a593Smuzhiyun 	 * Note: The current xmt producer index will become the
3385*4882a593Smuzhiyun 	 *	 current xmt completion index when we complete this
3386*4882a593Smuzhiyun 	 *	 packet later on.  So, we'll get the pointer to the
3387*4882a593Smuzhiyun 	 *	 next auxiliary queue entry now before we bump the
3388*4882a593Smuzhiyun 	 *	 producer index.
3389*4882a593Smuzhiyun 	 */
3390*4882a593Smuzhiyun 
3391*4882a593Smuzhiyun 	p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[prod++]);	/* also bump producer index */
3392*4882a593Smuzhiyun 
3393*4882a593Smuzhiyun 	/*
3394*4882a593Smuzhiyun 	 * Write the descriptor with buffer info and bump producer
3395*4882a593Smuzhiyun 	 *
3396*4882a593Smuzhiyun 	 * Note: Since we need to start DMA from the packet request
3397*4882a593Smuzhiyun 	 *		 header, we'll add 3 bytes to the DMA buffer length,
3398*4882a593Smuzhiyun 	 *		 and we'll determine the physical address of the
3399*4882a593Smuzhiyun 	 *		 buffer from the PRH, not skb->data.
3400*4882a593Smuzhiyun 	 *
3401*4882a593Smuzhiyun 	 * Assumptions:
3402*4882a593Smuzhiyun 	 *		 1. Packet starts with the frame control (FC) byte
3403*4882a593Smuzhiyun 	 *		    at skb->data.
3404*4882a593Smuzhiyun 	 *		 2. The 4-byte CRC is not appended to the buffer or
3405*4882a593Smuzhiyun 	 *			included in the length.
3406*4882a593Smuzhiyun 	 *		 3. Packet length (skb->len) is from FC to end of
3407*4882a593Smuzhiyun 	 *			data, inclusive.
3408*4882a593Smuzhiyun 	 *		 4. The packet length does not exceed the maximum
3409*4882a593Smuzhiyun 	 *			FDDI LLC frame length of 4491 bytes.
3410*4882a593Smuzhiyun 	 *		 5. The entire packet is contained in a physically
3411*4882a593Smuzhiyun 	 *			contiguous, non-cached, locked memory space
3412*4882a593Smuzhiyun 	 *			comprised of a single buffer pointed to by
3413*4882a593Smuzhiyun 	 *			skb->data.
3414*4882a593Smuzhiyun 	 *		 6. The physical address of the start of packet
3415*4882a593Smuzhiyun 	 *			can be determined from the virtual address
3416*4882a593Smuzhiyun 	 *			by using pci_map_single() and is only 32-bits
3417*4882a593Smuzhiyun 	 *			wide.
3418*4882a593Smuzhiyun 	 */
3419*4882a593Smuzhiyun 
3420*4882a593Smuzhiyun 	p_xmt_descr->long_0	= (u32) (PI_XMT_DESCR_M_SOP | PI_XMT_DESCR_M_EOP | ((skb->len) << PI_XMT_DESCR_V_SEG_LEN));
3421*4882a593Smuzhiyun 	p_xmt_descr->long_1 = (u32)dma_addr;
3422*4882a593Smuzhiyun 
3423*4882a593Smuzhiyun 	/*
3424*4882a593Smuzhiyun 	 * Verify that descriptor is actually available
3425*4882a593Smuzhiyun 	 *
3426*4882a593Smuzhiyun 	 * Note: If descriptor isn't available, return 1 which tells
3427*4882a593Smuzhiyun 	 *	 the upper layer to requeue the packet for later
3428*4882a593Smuzhiyun 	 *	 transmission.
3429*4882a593Smuzhiyun 	 *
3430*4882a593Smuzhiyun 	 *       We need to ensure that the producer never reaches the
3431*4882a593Smuzhiyun 	 *	 completion, except to indicate that the queue is empty.
3432*4882a593Smuzhiyun 	 */
3433*4882a593Smuzhiyun 
3434*4882a593Smuzhiyun 	if (prod == bp->rcv_xmt_reg.index.xmt_comp)
3435*4882a593Smuzhiyun 	{
3436*4882a593Smuzhiyun 		skb_pull(skb,3);
3437*4882a593Smuzhiyun 		spin_unlock_irqrestore(&bp->lock, flags);
3438*4882a593Smuzhiyun 		return NETDEV_TX_BUSY;	/* requeue packet for later */
3439*4882a593Smuzhiyun 	}
3440*4882a593Smuzhiyun 
3441*4882a593Smuzhiyun 	/*
3442*4882a593Smuzhiyun 	 * Save info for this packet for xmt done indication routine
3443*4882a593Smuzhiyun 	 *
3444*4882a593Smuzhiyun 	 * Normally, we'd save the producer index in the p_xmt_drv_descr
3445*4882a593Smuzhiyun 	 * structure so that we'd have it handy when we complete this
3446*4882a593Smuzhiyun 	 * packet later (in dfx_xmt_done).  However, since the current
3447*4882a593Smuzhiyun 	 * transmit architecture guarantees a single fragment for the
3448*4882a593Smuzhiyun 	 * entire packet, we can simply bump the completion index by
3449*4882a593Smuzhiyun 	 * one (1) for each completed packet.
3450*4882a593Smuzhiyun 	 *
3451*4882a593Smuzhiyun 	 * Note: If this assumption changes and we're presented with
3452*4882a593Smuzhiyun 	 *	 an inconsistent number of transmit fragments for packet
3453*4882a593Smuzhiyun 	 *	 data, we'll need to modify this code to save the current
3454*4882a593Smuzhiyun 	 *	 transmit producer index.
3455*4882a593Smuzhiyun 	 */
3456*4882a593Smuzhiyun 
3457*4882a593Smuzhiyun 	p_xmt_drv_descr->p_skb = skb;
3458*4882a593Smuzhiyun 
3459*4882a593Smuzhiyun 	/* Update Type 2 register */
3460*4882a593Smuzhiyun 
3461*4882a593Smuzhiyun 	bp->rcv_xmt_reg.index.xmt_prod = prod;
3462*4882a593Smuzhiyun 	dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
3463*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bp->lock, flags);
3464*4882a593Smuzhiyun 	netif_wake_queue(dev);
3465*4882a593Smuzhiyun 	return NETDEV_TX_OK;	/* packet queued to adapter */
3466*4882a593Smuzhiyun 	}
3467*4882a593Smuzhiyun 
3468*4882a593Smuzhiyun 
3469*4882a593Smuzhiyun /*
3470*4882a593Smuzhiyun  * ================
3471*4882a593Smuzhiyun  * = dfx_xmt_done =
3472*4882a593Smuzhiyun  * ================
3473*4882a593Smuzhiyun  *
3474*4882a593Smuzhiyun  * Overview:
3475*4882a593Smuzhiyun  *   Processes all frames that have been transmitted.
3476*4882a593Smuzhiyun  *
3477*4882a593Smuzhiyun  * Returns:
3478*4882a593Smuzhiyun  *   None
3479*4882a593Smuzhiyun  *
3480*4882a593Smuzhiyun  * Arguments:
3481*4882a593Smuzhiyun  *   bp - pointer to board information
3482*4882a593Smuzhiyun  *
3483*4882a593Smuzhiyun  * Functional Description:
3484*4882a593Smuzhiyun  *   For all consumed transmit descriptors that have not
3485*4882a593Smuzhiyun  *   yet been completed, we'll free the skb we were holding
3486*4882a593Smuzhiyun  *   onto using dev_kfree_skb and bump the appropriate
3487*4882a593Smuzhiyun  *   counters.
3488*4882a593Smuzhiyun  *
3489*4882a593Smuzhiyun  * Return Codes:
3490*4882a593Smuzhiyun  *   None
3491*4882a593Smuzhiyun  *
3492*4882a593Smuzhiyun  * Assumptions:
3493*4882a593Smuzhiyun  *   The Type 2 register is not updated in this routine.  It is
3494*4882a593Smuzhiyun  *   assumed that it will be updated in the ISR when dfx_xmt_done
3495*4882a593Smuzhiyun  *   returns.
3496*4882a593Smuzhiyun  *
3497*4882a593Smuzhiyun  * Side Effects:
3498*4882a593Smuzhiyun  *   None
3499*4882a593Smuzhiyun  */
3500*4882a593Smuzhiyun 
dfx_xmt_done(DFX_board_t * bp)3501*4882a593Smuzhiyun static int dfx_xmt_done(DFX_board_t *bp)
3502*4882a593Smuzhiyun 	{
3503*4882a593Smuzhiyun 	XMT_DRIVER_DESCR	*p_xmt_drv_descr;	/* ptr to transmit driver descriptor */
3504*4882a593Smuzhiyun 	PI_TYPE_2_CONSUMER	*p_type_2_cons;		/* ptr to rcv/xmt consumer block register */
3505*4882a593Smuzhiyun 	u8			comp;			/* local transmit completion index */
3506*4882a593Smuzhiyun 	int 			freed = 0;		/* buffers freed */
3507*4882a593Smuzhiyun 
3508*4882a593Smuzhiyun 	/* Service all consumed transmit frames */
3509*4882a593Smuzhiyun 
3510*4882a593Smuzhiyun 	p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data);
3511*4882a593Smuzhiyun 	while (bp->rcv_xmt_reg.index.xmt_comp != p_type_2_cons->index.xmt_cons)
3512*4882a593Smuzhiyun 		{
3513*4882a593Smuzhiyun 		/* Get pointer to the transmit driver descriptor block information */
3514*4882a593Smuzhiyun 
3515*4882a593Smuzhiyun 		p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]);
3516*4882a593Smuzhiyun 
3517*4882a593Smuzhiyun 		/* Increment transmit counters */
3518*4882a593Smuzhiyun 
3519*4882a593Smuzhiyun 		bp->xmt_total_frames++;
3520*4882a593Smuzhiyun 		bp->xmt_total_bytes += p_xmt_drv_descr->p_skb->len;
3521*4882a593Smuzhiyun 
3522*4882a593Smuzhiyun 		/* Return skb to operating system */
3523*4882a593Smuzhiyun 		comp = bp->rcv_xmt_reg.index.xmt_comp;
3524*4882a593Smuzhiyun 		dma_unmap_single(bp->bus_dev,
3525*4882a593Smuzhiyun 				 bp->descr_block_virt->xmt_data[comp].long_1,
3526*4882a593Smuzhiyun 				 p_xmt_drv_descr->p_skb->len,
3527*4882a593Smuzhiyun 				 DMA_TO_DEVICE);
3528*4882a593Smuzhiyun 		dev_consume_skb_irq(p_xmt_drv_descr->p_skb);
3529*4882a593Smuzhiyun 
3530*4882a593Smuzhiyun 		/*
3531*4882a593Smuzhiyun 		 * Move to start of next packet by updating completion index
3532*4882a593Smuzhiyun 		 *
3533*4882a593Smuzhiyun 		 * Here we assume that a transmit packet request is always
3534*4882a593Smuzhiyun 		 * serviced by posting one fragment.  We can therefore
3535*4882a593Smuzhiyun 		 * simplify the completion code by incrementing the
3536*4882a593Smuzhiyun 		 * completion index by one.  This code will need to be
3537*4882a593Smuzhiyun 		 * modified if this assumption changes.  See comments
3538*4882a593Smuzhiyun 		 * in dfx_xmt_queue_pkt for more details.
3539*4882a593Smuzhiyun 		 */
3540*4882a593Smuzhiyun 
3541*4882a593Smuzhiyun 		bp->rcv_xmt_reg.index.xmt_comp += 1;
3542*4882a593Smuzhiyun 		freed++;
3543*4882a593Smuzhiyun 		}
3544*4882a593Smuzhiyun 	return freed;
3545*4882a593Smuzhiyun 	}
3546*4882a593Smuzhiyun 
3547*4882a593Smuzhiyun 
3548*4882a593Smuzhiyun /*
3549*4882a593Smuzhiyun  * =================
3550*4882a593Smuzhiyun  * = dfx_rcv_flush =
3551*4882a593Smuzhiyun  * =================
3552*4882a593Smuzhiyun  *
3553*4882a593Smuzhiyun  * Overview:
3554*4882a593Smuzhiyun  *   Remove all skb's in the receive ring.
3555*4882a593Smuzhiyun  *
3556*4882a593Smuzhiyun  * Returns:
3557*4882a593Smuzhiyun  *   None
3558*4882a593Smuzhiyun  *
3559*4882a593Smuzhiyun  * Arguments:
3560*4882a593Smuzhiyun  *   bp - pointer to board information
3561*4882a593Smuzhiyun  *
3562*4882a593Smuzhiyun  * Functional Description:
3563*4882a593Smuzhiyun  *   Free's all the dynamically allocated skb's that are
3564*4882a593Smuzhiyun  *   currently attached to the device receive ring. This
3565*4882a593Smuzhiyun  *   function is typically only used when the device is
3566*4882a593Smuzhiyun  *   initialized or reinitialized.
3567*4882a593Smuzhiyun  *
3568*4882a593Smuzhiyun  * Return Codes:
3569*4882a593Smuzhiyun  *   None
3570*4882a593Smuzhiyun  *
3571*4882a593Smuzhiyun  * Side Effects:
3572*4882a593Smuzhiyun  *   None
3573*4882a593Smuzhiyun  */
3574*4882a593Smuzhiyun #ifdef DYNAMIC_BUFFERS
dfx_rcv_flush(DFX_board_t * bp)3575*4882a593Smuzhiyun static void dfx_rcv_flush( DFX_board_t *bp )
3576*4882a593Smuzhiyun 	{
3577*4882a593Smuzhiyun 	int i, j;
3578*4882a593Smuzhiyun 
3579*4882a593Smuzhiyun 	for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
3580*4882a593Smuzhiyun 		for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
3581*4882a593Smuzhiyun 		{
3582*4882a593Smuzhiyun 			struct sk_buff *skb;
3583*4882a593Smuzhiyun 			skb = (struct sk_buff *)bp->p_rcv_buff_va[i+j];
3584*4882a593Smuzhiyun 			if (skb) {
3585*4882a593Smuzhiyun 				dma_unmap_single(bp->bus_dev,
3586*4882a593Smuzhiyun 						 bp->descr_block_virt->rcv_data[i+j].long_1,
3587*4882a593Smuzhiyun 						 PI_RCV_DATA_K_SIZE_MAX,
3588*4882a593Smuzhiyun 						 DMA_FROM_DEVICE);
3589*4882a593Smuzhiyun 				dev_kfree_skb(skb);
3590*4882a593Smuzhiyun 			}
3591*4882a593Smuzhiyun 			bp->p_rcv_buff_va[i+j] = NULL;
3592*4882a593Smuzhiyun 		}
3593*4882a593Smuzhiyun 
3594*4882a593Smuzhiyun 	}
3595*4882a593Smuzhiyun #endif /* DYNAMIC_BUFFERS */
3596*4882a593Smuzhiyun 
3597*4882a593Smuzhiyun /*
3598*4882a593Smuzhiyun  * =================
3599*4882a593Smuzhiyun  * = dfx_xmt_flush =
3600*4882a593Smuzhiyun  * =================
3601*4882a593Smuzhiyun  *
3602*4882a593Smuzhiyun  * Overview:
3603*4882a593Smuzhiyun  *   Processes all frames whether they've been transmitted
3604*4882a593Smuzhiyun  *   or not.
3605*4882a593Smuzhiyun  *
3606*4882a593Smuzhiyun  * Returns:
3607*4882a593Smuzhiyun  *   None
3608*4882a593Smuzhiyun  *
3609*4882a593Smuzhiyun  * Arguments:
3610*4882a593Smuzhiyun  *   bp - pointer to board information
3611*4882a593Smuzhiyun  *
3612*4882a593Smuzhiyun  * Functional Description:
3613*4882a593Smuzhiyun  *   For all produced transmit descriptors that have not
3614*4882a593Smuzhiyun  *   yet been completed, we'll free the skb we were holding
3615*4882a593Smuzhiyun  *   onto using dev_kfree_skb and bump the appropriate
3616*4882a593Smuzhiyun  *   counters.  Of course, it's possible that some of
3617*4882a593Smuzhiyun  *   these transmit requests actually did go out, but we
3618*4882a593Smuzhiyun  *   won't make that distinction here.  Finally, we'll
3619*4882a593Smuzhiyun  *   update the consumer index to match the producer.
3620*4882a593Smuzhiyun  *
3621*4882a593Smuzhiyun  * Return Codes:
3622*4882a593Smuzhiyun  *   None
3623*4882a593Smuzhiyun  *
3624*4882a593Smuzhiyun  * Assumptions:
3625*4882a593Smuzhiyun  *   This routine does NOT update the Type 2 register.  It
3626*4882a593Smuzhiyun  *   is assumed that this routine is being called during a
3627*4882a593Smuzhiyun  *   transmit flush interrupt, or a shutdown or close routine.
3628*4882a593Smuzhiyun  *
3629*4882a593Smuzhiyun  * Side Effects:
3630*4882a593Smuzhiyun  *   None
3631*4882a593Smuzhiyun  */
3632*4882a593Smuzhiyun 
dfx_xmt_flush(DFX_board_t * bp)3633*4882a593Smuzhiyun static void dfx_xmt_flush( DFX_board_t *bp )
3634*4882a593Smuzhiyun 	{
3635*4882a593Smuzhiyun 	u32			prod_cons;		/* rcv/xmt consumer block longword */
3636*4882a593Smuzhiyun 	XMT_DRIVER_DESCR	*p_xmt_drv_descr;	/* ptr to transmit driver descriptor */
3637*4882a593Smuzhiyun 	u8			comp;			/* local transmit completion index */
3638*4882a593Smuzhiyun 
3639*4882a593Smuzhiyun 	/* Flush all outstanding transmit frames */
3640*4882a593Smuzhiyun 
3641*4882a593Smuzhiyun 	while (bp->rcv_xmt_reg.index.xmt_comp != bp->rcv_xmt_reg.index.xmt_prod)
3642*4882a593Smuzhiyun 		{
3643*4882a593Smuzhiyun 		/* Get pointer to the transmit driver descriptor block information */
3644*4882a593Smuzhiyun 
3645*4882a593Smuzhiyun 		p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]);
3646*4882a593Smuzhiyun 
3647*4882a593Smuzhiyun 		/* Return skb to operating system */
3648*4882a593Smuzhiyun 		comp = bp->rcv_xmt_reg.index.xmt_comp;
3649*4882a593Smuzhiyun 		dma_unmap_single(bp->bus_dev,
3650*4882a593Smuzhiyun 				 bp->descr_block_virt->xmt_data[comp].long_1,
3651*4882a593Smuzhiyun 				 p_xmt_drv_descr->p_skb->len,
3652*4882a593Smuzhiyun 				 DMA_TO_DEVICE);
3653*4882a593Smuzhiyun 		dev_kfree_skb(p_xmt_drv_descr->p_skb);
3654*4882a593Smuzhiyun 
3655*4882a593Smuzhiyun 		/* Increment transmit error counter */
3656*4882a593Smuzhiyun 
3657*4882a593Smuzhiyun 		bp->xmt_discards++;
3658*4882a593Smuzhiyun 
3659*4882a593Smuzhiyun 		/*
3660*4882a593Smuzhiyun 		 * Move to start of next packet by updating completion index
3661*4882a593Smuzhiyun 		 *
3662*4882a593Smuzhiyun 		 * Here we assume that a transmit packet request is always
3663*4882a593Smuzhiyun 		 * serviced by posting one fragment.  We can therefore
3664*4882a593Smuzhiyun 		 * simplify the completion code by incrementing the
3665*4882a593Smuzhiyun 		 * completion index by one.  This code will need to be
3666*4882a593Smuzhiyun 		 * modified if this assumption changes.  See comments
3667*4882a593Smuzhiyun 		 * in dfx_xmt_queue_pkt for more details.
3668*4882a593Smuzhiyun 		 */
3669*4882a593Smuzhiyun 
3670*4882a593Smuzhiyun 		bp->rcv_xmt_reg.index.xmt_comp += 1;
3671*4882a593Smuzhiyun 		}
3672*4882a593Smuzhiyun 
3673*4882a593Smuzhiyun 	/* Update the transmit consumer index in the consumer block */
3674*4882a593Smuzhiyun 
3675*4882a593Smuzhiyun 	prod_cons = (u32)(bp->cons_block_virt->xmt_rcv_data & ~PI_CONS_M_XMT_INDEX);
3676*4882a593Smuzhiyun 	prod_cons |= (u32)(bp->rcv_xmt_reg.index.xmt_prod << PI_CONS_V_XMT_INDEX);
3677*4882a593Smuzhiyun 	bp->cons_block_virt->xmt_rcv_data = prod_cons;
3678*4882a593Smuzhiyun 	}
3679*4882a593Smuzhiyun 
3680*4882a593Smuzhiyun /*
3681*4882a593Smuzhiyun  * ==================
3682*4882a593Smuzhiyun  * = dfx_unregister =
3683*4882a593Smuzhiyun  * ==================
3684*4882a593Smuzhiyun  *
3685*4882a593Smuzhiyun  * Overview:
3686*4882a593Smuzhiyun  *   Shuts down an FDDI controller
3687*4882a593Smuzhiyun  *
3688*4882a593Smuzhiyun  * Returns:
3689*4882a593Smuzhiyun  *   Condition code
3690*4882a593Smuzhiyun  *
3691*4882a593Smuzhiyun  * Arguments:
3692*4882a593Smuzhiyun  *   bdev - pointer to device information
3693*4882a593Smuzhiyun  *
3694*4882a593Smuzhiyun  * Functional Description:
3695*4882a593Smuzhiyun  *
3696*4882a593Smuzhiyun  * Return Codes:
3697*4882a593Smuzhiyun  *   None
3698*4882a593Smuzhiyun  *
3699*4882a593Smuzhiyun  * Assumptions:
3700*4882a593Smuzhiyun  *   It compiles so it should work :-( (PCI cards do :-)
3701*4882a593Smuzhiyun  *
3702*4882a593Smuzhiyun  * Side Effects:
3703*4882a593Smuzhiyun  *   Device structures for FDDI adapters (fddi0, fddi1, etc) are
3704*4882a593Smuzhiyun  *   freed.
3705*4882a593Smuzhiyun  */
dfx_unregister(struct device * bdev)3706*4882a593Smuzhiyun static void dfx_unregister(struct device *bdev)
3707*4882a593Smuzhiyun {
3708*4882a593Smuzhiyun 	struct net_device *dev = dev_get_drvdata(bdev);
3709*4882a593Smuzhiyun 	DFX_board_t *bp = netdev_priv(dev);
3710*4882a593Smuzhiyun 	int dfx_bus_pci = dev_is_pci(bdev);
3711*4882a593Smuzhiyun 	int dfx_bus_tc = DFX_BUS_TC(bdev);
3712*4882a593Smuzhiyun 	int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
3713*4882a593Smuzhiyun 	resource_size_t bar_start[3] = {0};	/* pointers to ports */
3714*4882a593Smuzhiyun 	resource_size_t bar_len[3] = {0};	/* resource lengths */
3715*4882a593Smuzhiyun 	int		alloc_size;		/* total buffer size used */
3716*4882a593Smuzhiyun 
3717*4882a593Smuzhiyun 	unregister_netdev(dev);
3718*4882a593Smuzhiyun 
3719*4882a593Smuzhiyun 	alloc_size = sizeof(PI_DESCR_BLOCK) +
3720*4882a593Smuzhiyun 		     PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX +
3721*4882a593Smuzhiyun #ifndef DYNAMIC_BUFFERS
3722*4882a593Smuzhiyun 		     (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
3723*4882a593Smuzhiyun #endif
3724*4882a593Smuzhiyun 		     sizeof(PI_CONSUMER_BLOCK) +
3725*4882a593Smuzhiyun 		     (PI_ALIGN_K_DESC_BLK - 1);
3726*4882a593Smuzhiyun 	if (bp->kmalloced)
3727*4882a593Smuzhiyun 		dma_free_coherent(bdev, alloc_size,
3728*4882a593Smuzhiyun 				  bp->kmalloced, bp->kmalloced_dma);
3729*4882a593Smuzhiyun 
3730*4882a593Smuzhiyun 	dfx_bus_uninit(dev);
3731*4882a593Smuzhiyun 
3732*4882a593Smuzhiyun 	dfx_get_bars(bdev, bar_start, bar_len);
3733*4882a593Smuzhiyun 	if (bar_start[2] != 0)
3734*4882a593Smuzhiyun 		release_region(bar_start[2], bar_len[2]);
3735*4882a593Smuzhiyun 	if (bar_start[1] != 0)
3736*4882a593Smuzhiyun 		release_region(bar_start[1], bar_len[1]);
3737*4882a593Smuzhiyun 	if (dfx_use_mmio) {
3738*4882a593Smuzhiyun 		iounmap(bp->base.mem);
3739*4882a593Smuzhiyun 		release_mem_region(bar_start[0], bar_len[0]);
3740*4882a593Smuzhiyun 	} else
3741*4882a593Smuzhiyun 		release_region(bar_start[0], bar_len[0]);
3742*4882a593Smuzhiyun 
3743*4882a593Smuzhiyun 	if (dfx_bus_pci)
3744*4882a593Smuzhiyun 		pci_disable_device(to_pci_dev(bdev));
3745*4882a593Smuzhiyun 
3746*4882a593Smuzhiyun 	free_netdev(dev);
3747*4882a593Smuzhiyun }
3748*4882a593Smuzhiyun 
3749*4882a593Smuzhiyun 
3750*4882a593Smuzhiyun static int __maybe_unused dfx_dev_register(struct device *);
3751*4882a593Smuzhiyun static int __maybe_unused dfx_dev_unregister(struct device *);
3752*4882a593Smuzhiyun 
3753*4882a593Smuzhiyun #ifdef CONFIG_PCI
3754*4882a593Smuzhiyun static int dfx_pci_register(struct pci_dev *, const struct pci_device_id *);
3755*4882a593Smuzhiyun static void dfx_pci_unregister(struct pci_dev *);
3756*4882a593Smuzhiyun 
3757*4882a593Smuzhiyun static const struct pci_device_id dfx_pci_table[] = {
3758*4882a593Smuzhiyun 	{ PCI_DEVICE(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI) },
3759*4882a593Smuzhiyun 	{ }
3760*4882a593Smuzhiyun };
3761*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, dfx_pci_table);
3762*4882a593Smuzhiyun 
3763*4882a593Smuzhiyun static struct pci_driver dfx_pci_driver = {
3764*4882a593Smuzhiyun 	.name		= "defxx",
3765*4882a593Smuzhiyun 	.id_table	= dfx_pci_table,
3766*4882a593Smuzhiyun 	.probe		= dfx_pci_register,
3767*4882a593Smuzhiyun 	.remove		= dfx_pci_unregister,
3768*4882a593Smuzhiyun };
3769*4882a593Smuzhiyun 
dfx_pci_register(struct pci_dev * pdev,const struct pci_device_id * ent)3770*4882a593Smuzhiyun static int dfx_pci_register(struct pci_dev *pdev,
3771*4882a593Smuzhiyun 			    const struct pci_device_id *ent)
3772*4882a593Smuzhiyun {
3773*4882a593Smuzhiyun 	return dfx_register(&pdev->dev);
3774*4882a593Smuzhiyun }
3775*4882a593Smuzhiyun 
dfx_pci_unregister(struct pci_dev * pdev)3776*4882a593Smuzhiyun static void dfx_pci_unregister(struct pci_dev *pdev)
3777*4882a593Smuzhiyun {
3778*4882a593Smuzhiyun 	dfx_unregister(&pdev->dev);
3779*4882a593Smuzhiyun }
3780*4882a593Smuzhiyun #endif /* CONFIG_PCI */
3781*4882a593Smuzhiyun 
3782*4882a593Smuzhiyun #ifdef CONFIG_EISA
3783*4882a593Smuzhiyun static const struct eisa_device_id dfx_eisa_table[] = {
3784*4882a593Smuzhiyun         { "DEC3001", DEFEA_PROD_ID_1 },
3785*4882a593Smuzhiyun         { "DEC3002", DEFEA_PROD_ID_2 },
3786*4882a593Smuzhiyun         { "DEC3003", DEFEA_PROD_ID_3 },
3787*4882a593Smuzhiyun         { "DEC3004", DEFEA_PROD_ID_4 },
3788*4882a593Smuzhiyun         { }
3789*4882a593Smuzhiyun };
3790*4882a593Smuzhiyun MODULE_DEVICE_TABLE(eisa, dfx_eisa_table);
3791*4882a593Smuzhiyun 
3792*4882a593Smuzhiyun static struct eisa_driver dfx_eisa_driver = {
3793*4882a593Smuzhiyun 	.id_table	= dfx_eisa_table,
3794*4882a593Smuzhiyun 	.driver		= {
3795*4882a593Smuzhiyun 		.name	= "defxx",
3796*4882a593Smuzhiyun 		.bus	= &eisa_bus_type,
3797*4882a593Smuzhiyun 		.probe	= dfx_dev_register,
3798*4882a593Smuzhiyun 		.remove	= dfx_dev_unregister,
3799*4882a593Smuzhiyun 	},
3800*4882a593Smuzhiyun };
3801*4882a593Smuzhiyun #endif /* CONFIG_EISA */
3802*4882a593Smuzhiyun 
3803*4882a593Smuzhiyun #ifdef CONFIG_TC
3804*4882a593Smuzhiyun static struct tc_device_id const dfx_tc_table[] = {
3805*4882a593Smuzhiyun 	{ "DEC     ", "PMAF-FA " },
3806*4882a593Smuzhiyun 	{ "DEC     ", "PMAF-FD " },
3807*4882a593Smuzhiyun 	{ "DEC     ", "PMAF-FS " },
3808*4882a593Smuzhiyun 	{ "DEC     ", "PMAF-FU " },
3809*4882a593Smuzhiyun 	{ }
3810*4882a593Smuzhiyun };
3811*4882a593Smuzhiyun MODULE_DEVICE_TABLE(tc, dfx_tc_table);
3812*4882a593Smuzhiyun 
3813*4882a593Smuzhiyun static struct tc_driver dfx_tc_driver = {
3814*4882a593Smuzhiyun 	.id_table	= dfx_tc_table,
3815*4882a593Smuzhiyun 	.driver		= {
3816*4882a593Smuzhiyun 		.name	= "defxx",
3817*4882a593Smuzhiyun 		.bus	= &tc_bus_type,
3818*4882a593Smuzhiyun 		.probe	= dfx_dev_register,
3819*4882a593Smuzhiyun 		.remove	= dfx_dev_unregister,
3820*4882a593Smuzhiyun 	},
3821*4882a593Smuzhiyun };
3822*4882a593Smuzhiyun #endif /* CONFIG_TC */
3823*4882a593Smuzhiyun 
dfx_dev_register(struct device * dev)3824*4882a593Smuzhiyun static int __maybe_unused dfx_dev_register(struct device *dev)
3825*4882a593Smuzhiyun {
3826*4882a593Smuzhiyun 	int status;
3827*4882a593Smuzhiyun 
3828*4882a593Smuzhiyun 	status = dfx_register(dev);
3829*4882a593Smuzhiyun 	if (!status)
3830*4882a593Smuzhiyun 		get_device(dev);
3831*4882a593Smuzhiyun 	return status;
3832*4882a593Smuzhiyun }
3833*4882a593Smuzhiyun 
dfx_dev_unregister(struct device * dev)3834*4882a593Smuzhiyun static int __maybe_unused dfx_dev_unregister(struct device *dev)
3835*4882a593Smuzhiyun {
3836*4882a593Smuzhiyun 	put_device(dev);
3837*4882a593Smuzhiyun 	dfx_unregister(dev);
3838*4882a593Smuzhiyun 	return 0;
3839*4882a593Smuzhiyun }
3840*4882a593Smuzhiyun 
3841*4882a593Smuzhiyun 
dfx_init(void)3842*4882a593Smuzhiyun static int dfx_init(void)
3843*4882a593Smuzhiyun {
3844*4882a593Smuzhiyun 	int status;
3845*4882a593Smuzhiyun 
3846*4882a593Smuzhiyun 	status = pci_register_driver(&dfx_pci_driver);
3847*4882a593Smuzhiyun 	if (!status)
3848*4882a593Smuzhiyun 		status = eisa_driver_register(&dfx_eisa_driver);
3849*4882a593Smuzhiyun 	if (!status)
3850*4882a593Smuzhiyun 		status = tc_register_driver(&dfx_tc_driver);
3851*4882a593Smuzhiyun 	return status;
3852*4882a593Smuzhiyun }
3853*4882a593Smuzhiyun 
dfx_cleanup(void)3854*4882a593Smuzhiyun static void dfx_cleanup(void)
3855*4882a593Smuzhiyun {
3856*4882a593Smuzhiyun 	tc_unregister_driver(&dfx_tc_driver);
3857*4882a593Smuzhiyun 	eisa_driver_unregister(&dfx_eisa_driver);
3858*4882a593Smuzhiyun 	pci_unregister_driver(&dfx_pci_driver);
3859*4882a593Smuzhiyun }
3860*4882a593Smuzhiyun 
3861*4882a593Smuzhiyun module_init(dfx_init);
3862*4882a593Smuzhiyun module_exit(dfx_cleanup);
3863*4882a593Smuzhiyun MODULE_AUTHOR("Lawrence V. Stefani");
3864*4882a593Smuzhiyun MODULE_DESCRIPTION("DEC FDDIcontroller TC/EISA/PCI (DEFTA/DEFEA/DEFPA) driver "
3865*4882a593Smuzhiyun 		   DRV_VERSION " " DRV_RELDATE);
3866*4882a593Smuzhiyun MODULE_LICENSE("GPL");
3867