1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * File Name:
4*4882a593Smuzhiyun * skfddi.c
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright Information:
7*4882a593Smuzhiyun * Copyright SysKonnect 1998,1999.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * The information in this file is provided "AS IS" without warranty.
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * Abstract:
12*4882a593Smuzhiyun * A Linux device driver supporting the SysKonnect FDDI PCI controller
13*4882a593Smuzhiyun * familie.
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * Maintainers:
16*4882a593Smuzhiyun * CG Christoph Goos (cgoos@syskonnect.de)
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * Contributors:
19*4882a593Smuzhiyun * DM David S. Miller
20*4882a593Smuzhiyun *
21*4882a593Smuzhiyun * Address all question to:
22*4882a593Smuzhiyun * linux@syskonnect.de
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * The technical manual for the adapters is available from SysKonnect's
25*4882a593Smuzhiyun * web pages: www.syskonnect.com
26*4882a593Smuzhiyun * Goto "Support" and search Knowledge Base for "manual".
27*4882a593Smuzhiyun *
28*4882a593Smuzhiyun * Driver Architecture:
29*4882a593Smuzhiyun * The driver architecture is based on the DEC FDDI driver by
30*4882a593Smuzhiyun * Lawrence V. Stefani and several ethernet drivers.
31*4882a593Smuzhiyun * I also used an existing Windows NT miniport driver.
32*4882a593Smuzhiyun * All hardware dependent functions are handled by the SysKonnect
33*4882a593Smuzhiyun * Hardware Module.
34*4882a593Smuzhiyun * The only headerfiles that are directly related to this source
35*4882a593Smuzhiyun * are skfddi.c, h/types.h, h/osdef1st.h, h/targetos.h.
36*4882a593Smuzhiyun * The others belong to the SysKonnect FDDI Hardware Module and
37*4882a593Smuzhiyun * should better not be changed.
38*4882a593Smuzhiyun *
39*4882a593Smuzhiyun * Modification History:
40*4882a593Smuzhiyun * Date Name Description
41*4882a593Smuzhiyun * 02-Mar-98 CG Created.
42*4882a593Smuzhiyun *
43*4882a593Smuzhiyun * 10-Mar-99 CG Support for 2.2.x added.
44*4882a593Smuzhiyun * 25-Mar-99 CG Corrected IRQ routing for SMP (APIC)
45*4882a593Smuzhiyun * 26-Oct-99 CG Fixed compilation error on 2.2.13
46*4882a593Smuzhiyun * 12-Nov-99 CG Source code release
47*4882a593Smuzhiyun * 22-Nov-99 CG Included in kernel source.
48*4882a593Smuzhiyun * 07-May-00 DM 64 bit fixes, new dma interface
49*4882a593Smuzhiyun * 31-Jul-03 DB Audit copy_*_user in skfp_ioctl
50*4882a593Smuzhiyun * Daniele Bellucci <bellucda@tiscali.it>
51*4882a593Smuzhiyun * 03-Dec-03 SH Convert to PCI device model
52*4882a593Smuzhiyun *
53*4882a593Smuzhiyun * Compilation options (-Dxxx):
54*4882a593Smuzhiyun * DRIVERDEBUG print lots of messages to log file
55*4882a593Smuzhiyun * DUMPPACKETS print received/transmitted packets to logfile
56*4882a593Smuzhiyun *
57*4882a593Smuzhiyun * Tested cpu architectures:
58*4882a593Smuzhiyun * - i386
59*4882a593Smuzhiyun * - sparc64
60*4882a593Smuzhiyun */
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun /* Version information string - should be updated prior to */
63*4882a593Smuzhiyun /* each new release!!! */
64*4882a593Smuzhiyun #define VERSION "2.07"
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun static const char * const boot_msg =
67*4882a593Smuzhiyun "SysKonnect FDDI PCI Adapter driver v" VERSION " for\n"
68*4882a593Smuzhiyun " SK-55xx/SK-58xx adapters (SK-NET FDDI-FP/UP/LP)";
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun /* Include files */
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun #include <linux/capability.h>
73*4882a593Smuzhiyun #include <linux/module.h>
74*4882a593Smuzhiyun #include <linux/kernel.h>
75*4882a593Smuzhiyun #include <linux/errno.h>
76*4882a593Smuzhiyun #include <linux/ioport.h>
77*4882a593Smuzhiyun #include <linux/interrupt.h>
78*4882a593Smuzhiyun #include <linux/pci.h>
79*4882a593Smuzhiyun #include <linux/netdevice.h>
80*4882a593Smuzhiyun #include <linux/fddidevice.h>
81*4882a593Smuzhiyun #include <linux/skbuff.h>
82*4882a593Smuzhiyun #include <linux/bitops.h>
83*4882a593Smuzhiyun #include <linux/gfp.h>
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun #include <asm/byteorder.h>
86*4882a593Smuzhiyun #include <asm/io.h>
87*4882a593Smuzhiyun #include <linux/uaccess.h>
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun #include "h/types.h"
90*4882a593Smuzhiyun #undef ADDR // undo Linux definition
91*4882a593Smuzhiyun #include "h/skfbi.h"
92*4882a593Smuzhiyun #include "h/fddi.h"
93*4882a593Smuzhiyun #include "h/smc.h"
94*4882a593Smuzhiyun #include "h/smtstate.h"
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun // Define module-wide (static) routines
98*4882a593Smuzhiyun static int skfp_driver_init(struct net_device *dev);
99*4882a593Smuzhiyun static int skfp_open(struct net_device *dev);
100*4882a593Smuzhiyun static int skfp_close(struct net_device *dev);
101*4882a593Smuzhiyun static irqreturn_t skfp_interrupt(int irq, void *dev_id);
102*4882a593Smuzhiyun static struct net_device_stats *skfp_ctl_get_stats(struct net_device *dev);
103*4882a593Smuzhiyun static void skfp_ctl_set_multicast_list(struct net_device *dev);
104*4882a593Smuzhiyun static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev);
105*4882a593Smuzhiyun static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr);
106*4882a593Smuzhiyun static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
107*4882a593Smuzhiyun static netdev_tx_t skfp_send_pkt(struct sk_buff *skb,
108*4882a593Smuzhiyun struct net_device *dev);
109*4882a593Smuzhiyun static void send_queued_packets(struct s_smc *smc);
110*4882a593Smuzhiyun static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr);
111*4882a593Smuzhiyun static void ResetAdapter(struct s_smc *smc);
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun // Functions needed by the hardware module
115*4882a593Smuzhiyun void *mac_drv_get_space(struct s_smc *smc, u_int size);
116*4882a593Smuzhiyun void *mac_drv_get_desc_mem(struct s_smc *smc, u_int size);
117*4882a593Smuzhiyun unsigned long mac_drv_virt2phys(struct s_smc *smc, void *virt);
118*4882a593Smuzhiyun unsigned long dma_master(struct s_smc *smc, void *virt, int len, int flag);
119*4882a593Smuzhiyun void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr,
120*4882a593Smuzhiyun int flag);
121*4882a593Smuzhiyun void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd);
122*4882a593Smuzhiyun void llc_restart_tx(struct s_smc *smc);
123*4882a593Smuzhiyun void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
124*4882a593Smuzhiyun int frag_count, int len);
125*4882a593Smuzhiyun void mac_drv_requeue_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
126*4882a593Smuzhiyun int frag_count);
127*4882a593Smuzhiyun void mac_drv_fill_rxd(struct s_smc *smc);
128*4882a593Smuzhiyun void mac_drv_clear_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
129*4882a593Smuzhiyun int frag_count);
130*4882a593Smuzhiyun int mac_drv_rx_init(struct s_smc *smc, int len, int fc, char *look_ahead,
131*4882a593Smuzhiyun int la_len);
132*4882a593Smuzhiyun void dump_data(unsigned char *Data, int length);
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun // External functions from the hardware module
135*4882a593Smuzhiyun extern u_int mac_drv_check_space(void);
136*4882a593Smuzhiyun extern int mac_drv_init(struct s_smc *smc);
137*4882a593Smuzhiyun extern void hwm_tx_frag(struct s_smc *smc, char far * virt, u_long phys,
138*4882a593Smuzhiyun int len, int frame_status);
139*4882a593Smuzhiyun extern int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count,
140*4882a593Smuzhiyun int frame_len, int frame_status);
141*4882a593Smuzhiyun extern void fddi_isr(struct s_smc *smc);
142*4882a593Smuzhiyun extern void hwm_rx_frag(struct s_smc *smc, char far * virt, u_long phys,
143*4882a593Smuzhiyun int len, int frame_status);
144*4882a593Smuzhiyun extern void mac_drv_rx_mode(struct s_smc *smc, int mode);
145*4882a593Smuzhiyun extern void mac_drv_clear_rx_queue(struct s_smc *smc);
146*4882a593Smuzhiyun extern void enable_tx_irq(struct s_smc *smc, u_short queue);
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun static const struct pci_device_id skfddi_pci_tbl[] = {
149*4882a593Smuzhiyun { PCI_VENDOR_ID_SK, PCI_DEVICE_ID_SK_FP, PCI_ANY_ID, PCI_ANY_ID, },
150*4882a593Smuzhiyun { } /* Terminating entry */
151*4882a593Smuzhiyun };
152*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, skfddi_pci_tbl);
153*4882a593Smuzhiyun MODULE_LICENSE("GPL");
154*4882a593Smuzhiyun MODULE_AUTHOR("Mirko Lindner <mlindner@syskonnect.de>");
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun // Define module-wide (static) variables
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun static int num_boards; /* total number of adapters configured */
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun static const struct net_device_ops skfp_netdev_ops = {
161*4882a593Smuzhiyun .ndo_open = skfp_open,
162*4882a593Smuzhiyun .ndo_stop = skfp_close,
163*4882a593Smuzhiyun .ndo_start_xmit = skfp_send_pkt,
164*4882a593Smuzhiyun .ndo_get_stats = skfp_ctl_get_stats,
165*4882a593Smuzhiyun .ndo_set_rx_mode = skfp_ctl_set_multicast_list,
166*4882a593Smuzhiyun .ndo_set_mac_address = skfp_ctl_set_mac_address,
167*4882a593Smuzhiyun .ndo_do_ioctl = skfp_ioctl,
168*4882a593Smuzhiyun };
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun /*
171*4882a593Smuzhiyun * =================
172*4882a593Smuzhiyun * = skfp_init_one =
173*4882a593Smuzhiyun * =================
174*4882a593Smuzhiyun *
175*4882a593Smuzhiyun * Overview:
176*4882a593Smuzhiyun * Probes for supported FDDI PCI controllers
177*4882a593Smuzhiyun *
178*4882a593Smuzhiyun * Returns:
179*4882a593Smuzhiyun * Condition code
180*4882a593Smuzhiyun *
181*4882a593Smuzhiyun * Arguments:
182*4882a593Smuzhiyun * pdev - pointer to PCI device information
183*4882a593Smuzhiyun *
184*4882a593Smuzhiyun * Functional Description:
185*4882a593Smuzhiyun * This is now called by PCI driver registration process
186*4882a593Smuzhiyun * for each board found.
187*4882a593Smuzhiyun *
188*4882a593Smuzhiyun * Return Codes:
189*4882a593Smuzhiyun * 0 - This device (fddi0, fddi1, etc) configured successfully
190*4882a593Smuzhiyun * -ENODEV - No devices present, or no SysKonnect FDDI PCI device
191*4882a593Smuzhiyun * present for this device name
192*4882a593Smuzhiyun *
193*4882a593Smuzhiyun *
194*4882a593Smuzhiyun * Side Effects:
195*4882a593Smuzhiyun * Device structures for FDDI adapters (fddi0, fddi1, etc) are
196*4882a593Smuzhiyun * initialized and the board resources are read and stored in
197*4882a593Smuzhiyun * the device structure.
198*4882a593Smuzhiyun */
skfp_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)199*4882a593Smuzhiyun static int skfp_init_one(struct pci_dev *pdev,
200*4882a593Smuzhiyun const struct pci_device_id *ent)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun struct net_device *dev;
203*4882a593Smuzhiyun struct s_smc *smc; /* board pointer */
204*4882a593Smuzhiyun void __iomem *mem;
205*4882a593Smuzhiyun int err;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun pr_debug("entering skfp_init_one\n");
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun if (num_boards == 0)
210*4882a593Smuzhiyun printk("%s\n", boot_msg);
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun err = pci_enable_device(pdev);
213*4882a593Smuzhiyun if (err)
214*4882a593Smuzhiyun return err;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun err = pci_request_regions(pdev, "skfddi");
217*4882a593Smuzhiyun if (err)
218*4882a593Smuzhiyun goto err_out1;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun pci_set_master(pdev);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun #ifdef MEM_MAPPED_IO
223*4882a593Smuzhiyun if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
224*4882a593Smuzhiyun printk(KERN_ERR "skfp: region is not an MMIO resource\n");
225*4882a593Smuzhiyun err = -EIO;
226*4882a593Smuzhiyun goto err_out2;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun mem = ioremap(pci_resource_start(pdev, 0), 0x4000);
230*4882a593Smuzhiyun #else
231*4882a593Smuzhiyun if (!(pci_resource_flags(pdev, 1) & IO_RESOURCE_IO)) {
232*4882a593Smuzhiyun printk(KERN_ERR "skfp: region is not PIO resource\n");
233*4882a593Smuzhiyun err = -EIO;
234*4882a593Smuzhiyun goto err_out2;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun mem = ioport_map(pci_resource_start(pdev, 1), FP_IO_LEN);
238*4882a593Smuzhiyun #endif
239*4882a593Smuzhiyun if (!mem) {
240*4882a593Smuzhiyun printk(KERN_ERR "skfp: Unable to map register, "
241*4882a593Smuzhiyun "FDDI adapter will be disabled.\n");
242*4882a593Smuzhiyun err = -EIO;
243*4882a593Smuzhiyun goto err_out2;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun dev = alloc_fddidev(sizeof(struct s_smc));
247*4882a593Smuzhiyun if (!dev) {
248*4882a593Smuzhiyun printk(KERN_ERR "skfp: Unable to allocate fddi device, "
249*4882a593Smuzhiyun "FDDI adapter will be disabled.\n");
250*4882a593Smuzhiyun err = -ENOMEM;
251*4882a593Smuzhiyun goto err_out3;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun dev->irq = pdev->irq;
255*4882a593Smuzhiyun dev->netdev_ops = &skfp_netdev_ops;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun SET_NETDEV_DEV(dev, &pdev->dev);
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun /* Initialize board structure with bus-specific info */
260*4882a593Smuzhiyun smc = netdev_priv(dev);
261*4882a593Smuzhiyun smc->os.dev = dev;
262*4882a593Smuzhiyun smc->os.bus_type = SK_BUS_TYPE_PCI;
263*4882a593Smuzhiyun smc->os.pdev = *pdev;
264*4882a593Smuzhiyun smc->os.QueueSkb = MAX_TX_QUEUE_LEN;
265*4882a593Smuzhiyun smc->os.MaxFrameSize = MAX_FRAME_SIZE;
266*4882a593Smuzhiyun smc->os.dev = dev;
267*4882a593Smuzhiyun smc->hw.slot = -1;
268*4882a593Smuzhiyun smc->hw.iop = mem;
269*4882a593Smuzhiyun smc->os.ResetRequested = FALSE;
270*4882a593Smuzhiyun skb_queue_head_init(&smc->os.SendSkbQueue);
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun dev->base_addr = (unsigned long)mem;
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun err = skfp_driver_init(dev);
275*4882a593Smuzhiyun if (err)
276*4882a593Smuzhiyun goto err_out4;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun err = register_netdev(dev);
279*4882a593Smuzhiyun if (err)
280*4882a593Smuzhiyun goto err_out5;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun ++num_boards;
283*4882a593Smuzhiyun pci_set_drvdata(pdev, dev);
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun if ((pdev->subsystem_device & 0xff00) == 0x5500 ||
286*4882a593Smuzhiyun (pdev->subsystem_device & 0xff00) == 0x5800)
287*4882a593Smuzhiyun printk("%s: SysKonnect FDDI PCI adapter"
288*4882a593Smuzhiyun " found (SK-%04X)\n", dev->name,
289*4882a593Smuzhiyun pdev->subsystem_device);
290*4882a593Smuzhiyun else
291*4882a593Smuzhiyun printk("%s: FDDI PCI adapter found\n", dev->name);
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun return 0;
294*4882a593Smuzhiyun err_out5:
295*4882a593Smuzhiyun if (smc->os.SharedMemAddr)
296*4882a593Smuzhiyun dma_free_coherent(&pdev->dev, smc->os.SharedMemSize,
297*4882a593Smuzhiyun smc->os.SharedMemAddr,
298*4882a593Smuzhiyun smc->os.SharedMemDMA);
299*4882a593Smuzhiyun dma_free_coherent(&pdev->dev, MAX_FRAME_SIZE,
300*4882a593Smuzhiyun smc->os.LocalRxBuffer, smc->os.LocalRxBufferDMA);
301*4882a593Smuzhiyun err_out4:
302*4882a593Smuzhiyun free_netdev(dev);
303*4882a593Smuzhiyun err_out3:
304*4882a593Smuzhiyun #ifdef MEM_MAPPED_IO
305*4882a593Smuzhiyun iounmap(mem);
306*4882a593Smuzhiyun #else
307*4882a593Smuzhiyun ioport_unmap(mem);
308*4882a593Smuzhiyun #endif
309*4882a593Smuzhiyun err_out2:
310*4882a593Smuzhiyun pci_release_regions(pdev);
311*4882a593Smuzhiyun err_out1:
312*4882a593Smuzhiyun pci_disable_device(pdev);
313*4882a593Smuzhiyun return err;
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun /*
317*4882a593Smuzhiyun * Called for each adapter board from pci_unregister_driver
318*4882a593Smuzhiyun */
skfp_remove_one(struct pci_dev * pdev)319*4882a593Smuzhiyun static void skfp_remove_one(struct pci_dev *pdev)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun struct net_device *p = pci_get_drvdata(pdev);
322*4882a593Smuzhiyun struct s_smc *lp = netdev_priv(p);
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun unregister_netdev(p);
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun if (lp->os.SharedMemAddr) {
327*4882a593Smuzhiyun dma_free_coherent(&pdev->dev,
328*4882a593Smuzhiyun lp->os.SharedMemSize,
329*4882a593Smuzhiyun lp->os.SharedMemAddr,
330*4882a593Smuzhiyun lp->os.SharedMemDMA);
331*4882a593Smuzhiyun lp->os.SharedMemAddr = NULL;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun if (lp->os.LocalRxBuffer) {
334*4882a593Smuzhiyun dma_free_coherent(&pdev->dev,
335*4882a593Smuzhiyun MAX_FRAME_SIZE,
336*4882a593Smuzhiyun lp->os.LocalRxBuffer,
337*4882a593Smuzhiyun lp->os.LocalRxBufferDMA);
338*4882a593Smuzhiyun lp->os.LocalRxBuffer = NULL;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun #ifdef MEM_MAPPED_IO
341*4882a593Smuzhiyun iounmap(lp->hw.iop);
342*4882a593Smuzhiyun #else
343*4882a593Smuzhiyun ioport_unmap(lp->hw.iop);
344*4882a593Smuzhiyun #endif
345*4882a593Smuzhiyun pci_release_regions(pdev);
346*4882a593Smuzhiyun free_netdev(p);
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun pci_disable_device(pdev);
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun /*
352*4882a593Smuzhiyun * ====================
353*4882a593Smuzhiyun * = skfp_driver_init =
354*4882a593Smuzhiyun * ====================
355*4882a593Smuzhiyun *
356*4882a593Smuzhiyun * Overview:
357*4882a593Smuzhiyun * Initializes remaining adapter board structure information
358*4882a593Smuzhiyun * and makes sure adapter is in a safe state prior to skfp_open().
359*4882a593Smuzhiyun *
360*4882a593Smuzhiyun * Returns:
361*4882a593Smuzhiyun * Condition code
362*4882a593Smuzhiyun *
363*4882a593Smuzhiyun * Arguments:
364*4882a593Smuzhiyun * dev - pointer to device information
365*4882a593Smuzhiyun *
366*4882a593Smuzhiyun * Functional Description:
367*4882a593Smuzhiyun * This function allocates additional resources such as the host memory
368*4882a593Smuzhiyun * blocks needed by the adapter.
369*4882a593Smuzhiyun * The adapter is also reset. The OS must call skfp_open() to open
370*4882a593Smuzhiyun * the adapter and bring it on-line.
371*4882a593Smuzhiyun *
372*4882a593Smuzhiyun * Return Codes:
373*4882a593Smuzhiyun * 0 - initialization succeeded
374*4882a593Smuzhiyun * -1 - initialization failed
375*4882a593Smuzhiyun */
skfp_driver_init(struct net_device * dev)376*4882a593Smuzhiyun static int skfp_driver_init(struct net_device *dev)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun struct s_smc *smc = netdev_priv(dev);
379*4882a593Smuzhiyun skfddi_priv *bp = &smc->os;
380*4882a593Smuzhiyun int err = -EIO;
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun pr_debug("entering skfp_driver_init\n");
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun // set the io address in private structures
385*4882a593Smuzhiyun bp->base_addr = dev->base_addr;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun // Get the interrupt level from the PCI Configuration Table
388*4882a593Smuzhiyun smc->hw.irq = dev->irq;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun spin_lock_init(&bp->DriverLock);
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun // Allocate invalid frame
393*4882a593Smuzhiyun bp->LocalRxBuffer = dma_alloc_coherent(&bp->pdev.dev, MAX_FRAME_SIZE,
394*4882a593Smuzhiyun &bp->LocalRxBufferDMA,
395*4882a593Smuzhiyun GFP_ATOMIC);
396*4882a593Smuzhiyun if (!bp->LocalRxBuffer) {
397*4882a593Smuzhiyun printk("could not allocate mem for ");
398*4882a593Smuzhiyun printk("LocalRxBuffer: %d byte\n", MAX_FRAME_SIZE);
399*4882a593Smuzhiyun goto fail;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun // Determine the required size of the 'shared' memory area.
403*4882a593Smuzhiyun bp->SharedMemSize = mac_drv_check_space();
404*4882a593Smuzhiyun pr_debug("Memory for HWM: %ld\n", bp->SharedMemSize);
405*4882a593Smuzhiyun if (bp->SharedMemSize > 0) {
406*4882a593Smuzhiyun bp->SharedMemSize += 16; // for descriptor alignment
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun bp->SharedMemAddr = dma_alloc_coherent(&bp->pdev.dev,
409*4882a593Smuzhiyun bp->SharedMemSize,
410*4882a593Smuzhiyun &bp->SharedMemDMA,
411*4882a593Smuzhiyun GFP_ATOMIC);
412*4882a593Smuzhiyun if (!bp->SharedMemAddr) {
413*4882a593Smuzhiyun printk("could not allocate mem for ");
414*4882a593Smuzhiyun printk("hardware module: %ld byte\n",
415*4882a593Smuzhiyun bp->SharedMemSize);
416*4882a593Smuzhiyun goto fail;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun } else {
420*4882a593Smuzhiyun bp->SharedMemAddr = NULL;
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun bp->SharedMemHeap = 0;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun card_stop(smc); // Reset adapter.
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun pr_debug("mac_drv_init()..\n");
428*4882a593Smuzhiyun if (mac_drv_init(smc) != 0) {
429*4882a593Smuzhiyun pr_debug("mac_drv_init() failed\n");
430*4882a593Smuzhiyun goto fail;
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun read_address(smc, NULL);
433*4882a593Smuzhiyun pr_debug("HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a);
434*4882a593Smuzhiyun memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun smt_reset_defaults(smc, 0);
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun return 0;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun fail:
441*4882a593Smuzhiyun if (bp->SharedMemAddr) {
442*4882a593Smuzhiyun dma_free_coherent(&bp->pdev.dev,
443*4882a593Smuzhiyun bp->SharedMemSize,
444*4882a593Smuzhiyun bp->SharedMemAddr,
445*4882a593Smuzhiyun bp->SharedMemDMA);
446*4882a593Smuzhiyun bp->SharedMemAddr = NULL;
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun if (bp->LocalRxBuffer) {
449*4882a593Smuzhiyun dma_free_coherent(&bp->pdev.dev, MAX_FRAME_SIZE,
450*4882a593Smuzhiyun bp->LocalRxBuffer, bp->LocalRxBufferDMA);
451*4882a593Smuzhiyun bp->LocalRxBuffer = NULL;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun return err;
454*4882a593Smuzhiyun } // skfp_driver_init
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun /*
458*4882a593Smuzhiyun * =============
459*4882a593Smuzhiyun * = skfp_open =
460*4882a593Smuzhiyun * =============
461*4882a593Smuzhiyun *
462*4882a593Smuzhiyun * Overview:
463*4882a593Smuzhiyun * Opens the adapter
464*4882a593Smuzhiyun *
465*4882a593Smuzhiyun * Returns:
466*4882a593Smuzhiyun * Condition code
467*4882a593Smuzhiyun *
468*4882a593Smuzhiyun * Arguments:
469*4882a593Smuzhiyun * dev - pointer to device information
470*4882a593Smuzhiyun *
471*4882a593Smuzhiyun * Functional Description:
472*4882a593Smuzhiyun * This function brings the adapter to an operational state.
473*4882a593Smuzhiyun *
474*4882a593Smuzhiyun * Return Codes:
475*4882a593Smuzhiyun * 0 - Adapter was successfully opened
476*4882a593Smuzhiyun * -EAGAIN - Could not register IRQ
477*4882a593Smuzhiyun */
skfp_open(struct net_device * dev)478*4882a593Smuzhiyun static int skfp_open(struct net_device *dev)
479*4882a593Smuzhiyun {
480*4882a593Smuzhiyun struct s_smc *smc = netdev_priv(dev);
481*4882a593Smuzhiyun int err;
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun pr_debug("entering skfp_open\n");
484*4882a593Smuzhiyun /* Register IRQ - support shared interrupts by passing device ptr */
485*4882a593Smuzhiyun err = request_irq(dev->irq, skfp_interrupt, IRQF_SHARED,
486*4882a593Smuzhiyun dev->name, dev);
487*4882a593Smuzhiyun if (err)
488*4882a593Smuzhiyun return err;
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun /*
491*4882a593Smuzhiyun * Set current address to factory MAC address
492*4882a593Smuzhiyun *
493*4882a593Smuzhiyun * Note: We've already done this step in skfp_driver_init.
494*4882a593Smuzhiyun * However, it's possible that a user has set a node
495*4882a593Smuzhiyun * address override, then closed and reopened the
496*4882a593Smuzhiyun * adapter. Unless we reset the device address field
497*4882a593Smuzhiyun * now, we'll continue to use the existing modified
498*4882a593Smuzhiyun * address.
499*4882a593Smuzhiyun */
500*4882a593Smuzhiyun read_address(smc, NULL);
501*4882a593Smuzhiyun memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun init_smt(smc, NULL);
504*4882a593Smuzhiyun smt_online(smc, 1);
505*4882a593Smuzhiyun STI_FBI();
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun /* Clear local multicast address tables */
508*4882a593Smuzhiyun mac_clear_multicast(smc);
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun /* Disable promiscuous filter settings */
511*4882a593Smuzhiyun mac_drv_rx_mode(smc, RX_DISABLE_PROMISC);
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun netif_start_queue(dev);
514*4882a593Smuzhiyun return 0;
515*4882a593Smuzhiyun } // skfp_open
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun /*
519*4882a593Smuzhiyun * ==============
520*4882a593Smuzhiyun * = skfp_close =
521*4882a593Smuzhiyun * ==============
522*4882a593Smuzhiyun *
523*4882a593Smuzhiyun * Overview:
524*4882a593Smuzhiyun * Closes the device/module.
525*4882a593Smuzhiyun *
526*4882a593Smuzhiyun * Returns:
527*4882a593Smuzhiyun * Condition code
528*4882a593Smuzhiyun *
529*4882a593Smuzhiyun * Arguments:
530*4882a593Smuzhiyun * dev - pointer to device information
531*4882a593Smuzhiyun *
532*4882a593Smuzhiyun * Functional Description:
533*4882a593Smuzhiyun * This routine closes the adapter and brings it to a safe state.
534*4882a593Smuzhiyun * The interrupt service routine is deregistered with the OS.
535*4882a593Smuzhiyun * The adapter can be opened again with another call to skfp_open().
536*4882a593Smuzhiyun *
537*4882a593Smuzhiyun * Return Codes:
538*4882a593Smuzhiyun * Always return 0.
539*4882a593Smuzhiyun *
540*4882a593Smuzhiyun * Assumptions:
541*4882a593Smuzhiyun * No further requests for this adapter are made after this routine is
542*4882a593Smuzhiyun * called. skfp_open() can be called to reset and reinitialize the
543*4882a593Smuzhiyun * adapter.
544*4882a593Smuzhiyun */
skfp_close(struct net_device * dev)545*4882a593Smuzhiyun static int skfp_close(struct net_device *dev)
546*4882a593Smuzhiyun {
547*4882a593Smuzhiyun struct s_smc *smc = netdev_priv(dev);
548*4882a593Smuzhiyun skfddi_priv *bp = &smc->os;
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun CLI_FBI();
551*4882a593Smuzhiyun smt_reset_defaults(smc, 1);
552*4882a593Smuzhiyun card_stop(smc);
553*4882a593Smuzhiyun mac_drv_clear_tx_queue(smc);
554*4882a593Smuzhiyun mac_drv_clear_rx_queue(smc);
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun netif_stop_queue(dev);
557*4882a593Smuzhiyun /* Deregister (free) IRQ */
558*4882a593Smuzhiyun free_irq(dev->irq, dev);
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun skb_queue_purge(&bp->SendSkbQueue);
561*4882a593Smuzhiyun bp->QueueSkb = MAX_TX_QUEUE_LEN;
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun return 0;
564*4882a593Smuzhiyun } // skfp_close
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun /*
568*4882a593Smuzhiyun * ==================
569*4882a593Smuzhiyun * = skfp_interrupt =
570*4882a593Smuzhiyun * ==================
571*4882a593Smuzhiyun *
572*4882a593Smuzhiyun * Overview:
573*4882a593Smuzhiyun * Interrupt processing routine
574*4882a593Smuzhiyun *
575*4882a593Smuzhiyun * Returns:
576*4882a593Smuzhiyun * None
577*4882a593Smuzhiyun *
578*4882a593Smuzhiyun * Arguments:
579*4882a593Smuzhiyun * irq - interrupt vector
580*4882a593Smuzhiyun * dev_id - pointer to device information
581*4882a593Smuzhiyun *
582*4882a593Smuzhiyun * Functional Description:
583*4882a593Smuzhiyun * This routine calls the interrupt processing routine for this adapter. It
584*4882a593Smuzhiyun * disables and reenables adapter interrupts, as appropriate. We can support
585*4882a593Smuzhiyun * shared interrupts since the incoming dev_id pointer provides our device
586*4882a593Smuzhiyun * structure context. All the real work is done in the hardware module.
587*4882a593Smuzhiyun *
588*4882a593Smuzhiyun * Return Codes:
589*4882a593Smuzhiyun * None
590*4882a593Smuzhiyun *
591*4882a593Smuzhiyun * Assumptions:
592*4882a593Smuzhiyun * The interrupt acknowledgement at the hardware level (eg. ACKing the PIC
593*4882a593Smuzhiyun * on Intel-based systems) is done by the operating system outside this
594*4882a593Smuzhiyun * routine.
595*4882a593Smuzhiyun *
596*4882a593Smuzhiyun * System interrupts are enabled through this call.
597*4882a593Smuzhiyun *
598*4882a593Smuzhiyun * Side Effects:
599*4882a593Smuzhiyun * Interrupts are disabled, then reenabled at the adapter.
600*4882a593Smuzhiyun */
601*4882a593Smuzhiyun
skfp_interrupt(int irq,void * dev_id)602*4882a593Smuzhiyun static irqreturn_t skfp_interrupt(int irq, void *dev_id)
603*4882a593Smuzhiyun {
604*4882a593Smuzhiyun struct net_device *dev = dev_id;
605*4882a593Smuzhiyun struct s_smc *smc; /* private board structure pointer */
606*4882a593Smuzhiyun skfddi_priv *bp;
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun smc = netdev_priv(dev);
609*4882a593Smuzhiyun bp = &smc->os;
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun // IRQs enabled or disabled ?
612*4882a593Smuzhiyun if (inpd(ADDR(B0_IMSK)) == 0) {
613*4882a593Smuzhiyun // IRQs are disabled: must be shared interrupt
614*4882a593Smuzhiyun return IRQ_NONE;
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun // Note: At this point, IRQs are enabled.
617*4882a593Smuzhiyun if ((inpd(ISR_A) & smc->hw.is_imask) == 0) { // IRQ?
618*4882a593Smuzhiyun // Adapter did not issue an IRQ: must be shared interrupt
619*4882a593Smuzhiyun return IRQ_NONE;
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun CLI_FBI(); // Disable IRQs from our adapter.
622*4882a593Smuzhiyun spin_lock(&bp->DriverLock);
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun // Call interrupt handler in hardware module (HWM).
625*4882a593Smuzhiyun fddi_isr(smc);
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun if (smc->os.ResetRequested) {
628*4882a593Smuzhiyun ResetAdapter(smc);
629*4882a593Smuzhiyun smc->os.ResetRequested = FALSE;
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun spin_unlock(&bp->DriverLock);
632*4882a593Smuzhiyun STI_FBI(); // Enable IRQs from our adapter.
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun return IRQ_HANDLED;
635*4882a593Smuzhiyun } // skfp_interrupt
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun /*
639*4882a593Smuzhiyun * ======================
640*4882a593Smuzhiyun * = skfp_ctl_get_stats =
641*4882a593Smuzhiyun * ======================
642*4882a593Smuzhiyun *
643*4882a593Smuzhiyun * Overview:
644*4882a593Smuzhiyun * Get statistics for FDDI adapter
645*4882a593Smuzhiyun *
646*4882a593Smuzhiyun * Returns:
647*4882a593Smuzhiyun * Pointer to FDDI statistics structure
648*4882a593Smuzhiyun *
649*4882a593Smuzhiyun * Arguments:
650*4882a593Smuzhiyun * dev - pointer to device information
651*4882a593Smuzhiyun *
652*4882a593Smuzhiyun * Functional Description:
653*4882a593Smuzhiyun * Gets current MIB objects from adapter, then
654*4882a593Smuzhiyun * returns FDDI statistics structure as defined
655*4882a593Smuzhiyun * in if_fddi.h.
656*4882a593Smuzhiyun *
657*4882a593Smuzhiyun * Note: Since the FDDI statistics structure is
658*4882a593Smuzhiyun * still new and the device structure doesn't
659*4882a593Smuzhiyun * have an FDDI-specific get statistics handler,
660*4882a593Smuzhiyun * we'll return the FDDI statistics structure as
661*4882a593Smuzhiyun * a pointer to an Ethernet statistics structure.
662*4882a593Smuzhiyun * That way, at least the first part of the statistics
663*4882a593Smuzhiyun * structure can be decoded properly.
664*4882a593Smuzhiyun * We'll have to pay attention to this routine as the
665*4882a593Smuzhiyun * device structure becomes more mature and LAN media
666*4882a593Smuzhiyun * independent.
667*4882a593Smuzhiyun *
668*4882a593Smuzhiyun */
skfp_ctl_get_stats(struct net_device * dev)669*4882a593Smuzhiyun static struct net_device_stats *skfp_ctl_get_stats(struct net_device *dev)
670*4882a593Smuzhiyun {
671*4882a593Smuzhiyun struct s_smc *bp = netdev_priv(dev);
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun /* Fill the bp->stats structure with driver-maintained counters */
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun bp->os.MacStat.port_bs_flag[0] = 0x1234;
676*4882a593Smuzhiyun bp->os.MacStat.port_bs_flag[1] = 0x5678;
677*4882a593Smuzhiyun // goos: need to fill out fddi statistic
678*4882a593Smuzhiyun #if 0
679*4882a593Smuzhiyun /* Get FDDI SMT MIB objects */
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun /* Fill the bp->stats structure with the SMT MIB object values */
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun memcpy(bp->stats.smt_station_id, &bp->cmd_rsp_virt->smt_mib_get.smt_station_id, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_station_id));
684*4882a593Smuzhiyun bp->stats.smt_op_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_op_version_id;
685*4882a593Smuzhiyun bp->stats.smt_hi_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_hi_version_id;
686*4882a593Smuzhiyun bp->stats.smt_lo_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_lo_version_id;
687*4882a593Smuzhiyun memcpy(bp->stats.smt_user_data, &bp->cmd_rsp_virt->smt_mib_get.smt_user_data, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_user_data));
688*4882a593Smuzhiyun bp->stats.smt_mib_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_mib_version_id;
689*4882a593Smuzhiyun bp->stats.smt_mac_cts = bp->cmd_rsp_virt->smt_mib_get.smt_mac_ct;
690*4882a593Smuzhiyun bp->stats.smt_non_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_non_master_ct;
691*4882a593Smuzhiyun bp->stats.smt_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_master_ct;
692*4882a593Smuzhiyun bp->stats.smt_available_paths = bp->cmd_rsp_virt->smt_mib_get.smt_available_paths;
693*4882a593Smuzhiyun bp->stats.smt_config_capabilities = bp->cmd_rsp_virt->smt_mib_get.smt_config_capabilities;
694*4882a593Smuzhiyun bp->stats.smt_config_policy = bp->cmd_rsp_virt->smt_mib_get.smt_config_policy;
695*4882a593Smuzhiyun bp->stats.smt_connection_policy = bp->cmd_rsp_virt->smt_mib_get.smt_connection_policy;
696*4882a593Smuzhiyun bp->stats.smt_t_notify = bp->cmd_rsp_virt->smt_mib_get.smt_t_notify;
697*4882a593Smuzhiyun bp->stats.smt_stat_rpt_policy = bp->cmd_rsp_virt->smt_mib_get.smt_stat_rpt_policy;
698*4882a593Smuzhiyun bp->stats.smt_trace_max_expiration = bp->cmd_rsp_virt->smt_mib_get.smt_trace_max_expiration;
699*4882a593Smuzhiyun bp->stats.smt_bypass_present = bp->cmd_rsp_virt->smt_mib_get.smt_bypass_present;
700*4882a593Smuzhiyun bp->stats.smt_ecm_state = bp->cmd_rsp_virt->smt_mib_get.smt_ecm_state;
701*4882a593Smuzhiyun bp->stats.smt_cf_state = bp->cmd_rsp_virt->smt_mib_get.smt_cf_state;
702*4882a593Smuzhiyun bp->stats.smt_remote_disconnect_flag = bp->cmd_rsp_virt->smt_mib_get.smt_remote_disconnect_flag;
703*4882a593Smuzhiyun bp->stats.smt_station_status = bp->cmd_rsp_virt->smt_mib_get.smt_station_status;
704*4882a593Smuzhiyun bp->stats.smt_peer_wrap_flag = bp->cmd_rsp_virt->smt_mib_get.smt_peer_wrap_flag;
705*4882a593Smuzhiyun bp->stats.smt_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls;
706*4882a593Smuzhiyun bp->stats.smt_transition_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls;
707*4882a593Smuzhiyun bp->stats.mac_frame_status_functions = bp->cmd_rsp_virt->smt_mib_get.mac_frame_status_functions;
708*4882a593Smuzhiyun bp->stats.mac_t_max_capability = bp->cmd_rsp_virt->smt_mib_get.mac_t_max_capability;
709*4882a593Smuzhiyun bp->stats.mac_tvx_capability = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_capability;
710*4882a593Smuzhiyun bp->stats.mac_available_paths = bp->cmd_rsp_virt->smt_mib_get.mac_available_paths;
711*4882a593Smuzhiyun bp->stats.mac_current_path = bp->cmd_rsp_virt->smt_mib_get.mac_current_path;
712*4882a593Smuzhiyun memcpy(bp->stats.mac_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_upstream_nbr, FDDI_K_ALEN);
713*4882a593Smuzhiyun memcpy(bp->stats.mac_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_downstream_nbr, FDDI_K_ALEN);
714*4882a593Smuzhiyun memcpy(bp->stats.mac_old_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_upstream_nbr, FDDI_K_ALEN);
715*4882a593Smuzhiyun memcpy(bp->stats.mac_old_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_downstream_nbr, FDDI_K_ALEN);
716*4882a593Smuzhiyun bp->stats.mac_dup_address_test = bp->cmd_rsp_virt->smt_mib_get.mac_dup_address_test;
717*4882a593Smuzhiyun bp->stats.mac_requested_paths = bp->cmd_rsp_virt->smt_mib_get.mac_requested_paths;
718*4882a593Smuzhiyun bp->stats.mac_downstream_port_type = bp->cmd_rsp_virt->smt_mib_get.mac_downstream_port_type;
719*4882a593Smuzhiyun memcpy(bp->stats.mac_smt_address, &bp->cmd_rsp_virt->smt_mib_get.mac_smt_address, FDDI_K_ALEN);
720*4882a593Smuzhiyun bp->stats.mac_t_req = bp->cmd_rsp_virt->smt_mib_get.mac_t_req;
721*4882a593Smuzhiyun bp->stats.mac_t_neg = bp->cmd_rsp_virt->smt_mib_get.mac_t_neg;
722*4882a593Smuzhiyun bp->stats.mac_t_max = bp->cmd_rsp_virt->smt_mib_get.mac_t_max;
723*4882a593Smuzhiyun bp->stats.mac_tvx_value = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_value;
724*4882a593Smuzhiyun bp->stats.mac_frame_error_threshold = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_threshold;
725*4882a593Smuzhiyun bp->stats.mac_frame_error_ratio = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_ratio;
726*4882a593Smuzhiyun bp->stats.mac_rmt_state = bp->cmd_rsp_virt->smt_mib_get.mac_rmt_state;
727*4882a593Smuzhiyun bp->stats.mac_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_da_flag;
728*4882a593Smuzhiyun bp->stats.mac_una_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_unda_flag;
729*4882a593Smuzhiyun bp->stats.mac_frame_error_flag = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_flag;
730*4882a593Smuzhiyun bp->stats.mac_ma_unitdata_available = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_available;
731*4882a593Smuzhiyun bp->stats.mac_hardware_present = bp->cmd_rsp_virt->smt_mib_get.mac_hardware_present;
732*4882a593Smuzhiyun bp->stats.mac_ma_unitdata_enable = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_enable;
733*4882a593Smuzhiyun bp->stats.path_tvx_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_tvx_lower_bound;
734*4882a593Smuzhiyun bp->stats.path_t_max_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_t_max_lower_bound;
735*4882a593Smuzhiyun bp->stats.path_max_t_req = bp->cmd_rsp_virt->smt_mib_get.path_max_t_req;
736*4882a593Smuzhiyun memcpy(bp->stats.path_configuration, &bp->cmd_rsp_virt->smt_mib_get.path_configuration, sizeof(bp->cmd_rsp_virt->smt_mib_get.path_configuration));
737*4882a593Smuzhiyun bp->stats.port_my_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[0];
738*4882a593Smuzhiyun bp->stats.port_my_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[1];
739*4882a593Smuzhiyun bp->stats.port_neighbor_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[0];
740*4882a593Smuzhiyun bp->stats.port_neighbor_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[1];
741*4882a593Smuzhiyun bp->stats.port_connection_policies[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[0];
742*4882a593Smuzhiyun bp->stats.port_connection_policies[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[1];
743*4882a593Smuzhiyun bp->stats.port_mac_indicated[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[0];
744*4882a593Smuzhiyun bp->stats.port_mac_indicated[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[1];
745*4882a593Smuzhiyun bp->stats.port_current_path[0] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[0];
746*4882a593Smuzhiyun bp->stats.port_current_path[1] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[1];
747*4882a593Smuzhiyun memcpy(&bp->stats.port_requested_paths[0 * 3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[0], 3);
748*4882a593Smuzhiyun memcpy(&bp->stats.port_requested_paths[1 * 3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[1], 3);
749*4882a593Smuzhiyun bp->stats.port_mac_placement[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[0];
750*4882a593Smuzhiyun bp->stats.port_mac_placement[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[1];
751*4882a593Smuzhiyun bp->stats.port_available_paths[0] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[0];
752*4882a593Smuzhiyun bp->stats.port_available_paths[1] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[1];
753*4882a593Smuzhiyun bp->stats.port_pmd_class[0] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[0];
754*4882a593Smuzhiyun bp->stats.port_pmd_class[1] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[1];
755*4882a593Smuzhiyun bp->stats.port_connection_capabilities[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[0];
756*4882a593Smuzhiyun bp->stats.port_connection_capabilities[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[1];
757*4882a593Smuzhiyun bp->stats.port_bs_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[0];
758*4882a593Smuzhiyun bp->stats.port_bs_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[1];
759*4882a593Smuzhiyun bp->stats.port_ler_estimate[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[0];
760*4882a593Smuzhiyun bp->stats.port_ler_estimate[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[1];
761*4882a593Smuzhiyun bp->stats.port_ler_cutoff[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[0];
762*4882a593Smuzhiyun bp->stats.port_ler_cutoff[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[1];
763*4882a593Smuzhiyun bp->stats.port_ler_alarm[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[0];
764*4882a593Smuzhiyun bp->stats.port_ler_alarm[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[1];
765*4882a593Smuzhiyun bp->stats.port_connect_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[0];
766*4882a593Smuzhiyun bp->stats.port_connect_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[1];
767*4882a593Smuzhiyun bp->stats.port_pcm_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[0];
768*4882a593Smuzhiyun bp->stats.port_pcm_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[1];
769*4882a593Smuzhiyun bp->stats.port_pc_withhold[0] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[0];
770*4882a593Smuzhiyun bp->stats.port_pc_withhold[1] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[1];
771*4882a593Smuzhiyun bp->stats.port_ler_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[0];
772*4882a593Smuzhiyun bp->stats.port_ler_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[1];
773*4882a593Smuzhiyun bp->stats.port_hardware_present[0] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[0];
774*4882a593Smuzhiyun bp->stats.port_hardware_present[1] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[1];
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun /* Fill the bp->stats structure with the FDDI counter values */
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun bp->stats.mac_frame_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls;
780*4882a593Smuzhiyun bp->stats.mac_copied_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls;
781*4882a593Smuzhiyun bp->stats.mac_transmit_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls;
782*4882a593Smuzhiyun bp->stats.mac_error_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls;
783*4882a593Smuzhiyun bp->stats.mac_lost_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls;
784*4882a593Smuzhiyun bp->stats.port_lct_fail_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls;
785*4882a593Smuzhiyun bp->stats.port_lct_fail_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls;
786*4882a593Smuzhiyun bp->stats.port_lem_reject_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls;
787*4882a593Smuzhiyun bp->stats.port_lem_reject_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls;
788*4882a593Smuzhiyun bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls;
789*4882a593Smuzhiyun bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls;
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun #endif
792*4882a593Smuzhiyun return (struct net_device_stats *)&bp->os.MacStat;
793*4882a593Smuzhiyun } // ctl_get_stat
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun /*
797*4882a593Smuzhiyun * ==============================
798*4882a593Smuzhiyun * = skfp_ctl_set_multicast_list =
799*4882a593Smuzhiyun * ==============================
800*4882a593Smuzhiyun *
801*4882a593Smuzhiyun * Overview:
802*4882a593Smuzhiyun * Enable/Disable LLC frame promiscuous mode reception
803*4882a593Smuzhiyun * on the adapter and/or update multicast address table.
804*4882a593Smuzhiyun *
805*4882a593Smuzhiyun * Returns:
806*4882a593Smuzhiyun * None
807*4882a593Smuzhiyun *
808*4882a593Smuzhiyun * Arguments:
809*4882a593Smuzhiyun * dev - pointer to device information
810*4882a593Smuzhiyun *
811*4882a593Smuzhiyun * Functional Description:
812*4882a593Smuzhiyun * This function acquires the driver lock and only calls
813*4882a593Smuzhiyun * skfp_ctl_set_multicast_list_wo_lock then.
814*4882a593Smuzhiyun * This routine follows a fairly simple algorithm for setting the
815*4882a593Smuzhiyun * adapter filters and CAM:
816*4882a593Smuzhiyun *
817*4882a593Smuzhiyun * if IFF_PROMISC flag is set
818*4882a593Smuzhiyun * enable promiscuous mode
819*4882a593Smuzhiyun * else
820*4882a593Smuzhiyun * disable promiscuous mode
821*4882a593Smuzhiyun * if number of multicast addresses <= max. multicast number
822*4882a593Smuzhiyun * add mc addresses to adapter table
823*4882a593Smuzhiyun * else
824*4882a593Smuzhiyun * enable promiscuous mode
825*4882a593Smuzhiyun * update adapter filters
826*4882a593Smuzhiyun *
827*4882a593Smuzhiyun * Assumptions:
828*4882a593Smuzhiyun * Multicast addresses are presented in canonical (LSB) format.
829*4882a593Smuzhiyun *
830*4882a593Smuzhiyun * Side Effects:
831*4882a593Smuzhiyun * On-board adapter filters are updated.
832*4882a593Smuzhiyun */
skfp_ctl_set_multicast_list(struct net_device * dev)833*4882a593Smuzhiyun static void skfp_ctl_set_multicast_list(struct net_device *dev)
834*4882a593Smuzhiyun {
835*4882a593Smuzhiyun struct s_smc *smc = netdev_priv(dev);
836*4882a593Smuzhiyun skfddi_priv *bp = &smc->os;
837*4882a593Smuzhiyun unsigned long Flags;
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun spin_lock_irqsave(&bp->DriverLock, Flags);
840*4882a593Smuzhiyun skfp_ctl_set_multicast_list_wo_lock(dev);
841*4882a593Smuzhiyun spin_unlock_irqrestore(&bp->DriverLock, Flags);
842*4882a593Smuzhiyun } // skfp_ctl_set_multicast_list
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun
skfp_ctl_set_multicast_list_wo_lock(struct net_device * dev)846*4882a593Smuzhiyun static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
847*4882a593Smuzhiyun {
848*4882a593Smuzhiyun struct s_smc *smc = netdev_priv(dev);
849*4882a593Smuzhiyun struct netdev_hw_addr *ha;
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun /* Enable promiscuous mode, if necessary */
852*4882a593Smuzhiyun if (dev->flags & IFF_PROMISC) {
853*4882a593Smuzhiyun mac_drv_rx_mode(smc, RX_ENABLE_PROMISC);
854*4882a593Smuzhiyun pr_debug("PROMISCUOUS MODE ENABLED\n");
855*4882a593Smuzhiyun }
856*4882a593Smuzhiyun /* Else, update multicast address table */
857*4882a593Smuzhiyun else {
858*4882a593Smuzhiyun mac_drv_rx_mode(smc, RX_DISABLE_PROMISC);
859*4882a593Smuzhiyun pr_debug("PROMISCUOUS MODE DISABLED\n");
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun // Reset all MC addresses
862*4882a593Smuzhiyun mac_clear_multicast(smc);
863*4882a593Smuzhiyun mac_drv_rx_mode(smc, RX_DISABLE_ALLMULTI);
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun if (dev->flags & IFF_ALLMULTI) {
866*4882a593Smuzhiyun mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
867*4882a593Smuzhiyun pr_debug("ENABLE ALL MC ADDRESSES\n");
868*4882a593Smuzhiyun } else if (!netdev_mc_empty(dev)) {
869*4882a593Smuzhiyun if (netdev_mc_count(dev) <= FPMAX_MULTICAST) {
870*4882a593Smuzhiyun /* use exact filtering */
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun // point to first multicast addr
873*4882a593Smuzhiyun netdev_for_each_mc_addr(ha, dev) {
874*4882a593Smuzhiyun mac_add_multicast(smc,
875*4882a593Smuzhiyun (struct fddi_addr *)ha->addr,
876*4882a593Smuzhiyun 1);
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun pr_debug("ENABLE MC ADDRESS: %pMF\n",
879*4882a593Smuzhiyun ha->addr);
880*4882a593Smuzhiyun }
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun } else { // more MC addresses than HW supports
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
885*4882a593Smuzhiyun pr_debug("ENABLE ALL MC ADDRESSES\n");
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun } else { // no MC addresses
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun pr_debug("DISABLE ALL MC ADDRESSES\n");
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun /* Update adapter filters */
893*4882a593Smuzhiyun mac_update_multicast(smc);
894*4882a593Smuzhiyun }
895*4882a593Smuzhiyun } // skfp_ctl_set_multicast_list_wo_lock
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun /*
899*4882a593Smuzhiyun * ===========================
900*4882a593Smuzhiyun * = skfp_ctl_set_mac_address =
901*4882a593Smuzhiyun * ===========================
902*4882a593Smuzhiyun *
903*4882a593Smuzhiyun * Overview:
904*4882a593Smuzhiyun * set new mac address on adapter and update dev_addr field in device table.
905*4882a593Smuzhiyun *
906*4882a593Smuzhiyun * Returns:
907*4882a593Smuzhiyun * None
908*4882a593Smuzhiyun *
909*4882a593Smuzhiyun * Arguments:
910*4882a593Smuzhiyun * dev - pointer to device information
911*4882a593Smuzhiyun * addr - pointer to sockaddr structure containing unicast address to set
912*4882a593Smuzhiyun *
913*4882a593Smuzhiyun * Assumptions:
914*4882a593Smuzhiyun * The address pointed to by addr->sa_data is a valid unicast
915*4882a593Smuzhiyun * address and is presented in canonical (LSB) format.
916*4882a593Smuzhiyun */
skfp_ctl_set_mac_address(struct net_device * dev,void * addr)917*4882a593Smuzhiyun static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr)
918*4882a593Smuzhiyun {
919*4882a593Smuzhiyun struct s_smc *smc = netdev_priv(dev);
920*4882a593Smuzhiyun struct sockaddr *p_sockaddr = (struct sockaddr *) addr;
921*4882a593Smuzhiyun skfddi_priv *bp = &smc->os;
922*4882a593Smuzhiyun unsigned long Flags;
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN);
926*4882a593Smuzhiyun spin_lock_irqsave(&bp->DriverLock, Flags);
927*4882a593Smuzhiyun ResetAdapter(smc);
928*4882a593Smuzhiyun spin_unlock_irqrestore(&bp->DriverLock, Flags);
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun return 0; /* always return zero */
931*4882a593Smuzhiyun } // skfp_ctl_set_mac_address
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun /*
935*4882a593Smuzhiyun * ==============
936*4882a593Smuzhiyun * = skfp_ioctl =
937*4882a593Smuzhiyun * ==============
938*4882a593Smuzhiyun *
939*4882a593Smuzhiyun * Overview:
940*4882a593Smuzhiyun *
941*4882a593Smuzhiyun * Perform IOCTL call functions here. Some are privileged operations and the
942*4882a593Smuzhiyun * effective uid is checked in those cases.
943*4882a593Smuzhiyun *
944*4882a593Smuzhiyun * Returns:
945*4882a593Smuzhiyun * status value
946*4882a593Smuzhiyun * 0 - success
947*4882a593Smuzhiyun * other - failure
948*4882a593Smuzhiyun *
949*4882a593Smuzhiyun * Arguments:
950*4882a593Smuzhiyun * dev - pointer to device information
951*4882a593Smuzhiyun * rq - pointer to ioctl request structure
952*4882a593Smuzhiyun * cmd - ?
953*4882a593Smuzhiyun *
954*4882a593Smuzhiyun */
955*4882a593Smuzhiyun
956*4882a593Smuzhiyun
skfp_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)957*4882a593Smuzhiyun static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
958*4882a593Smuzhiyun {
959*4882a593Smuzhiyun struct s_smc *smc = netdev_priv(dev);
960*4882a593Smuzhiyun skfddi_priv *lp = &smc->os;
961*4882a593Smuzhiyun struct s_skfp_ioctl ioc;
962*4882a593Smuzhiyun int status = 0;
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun if (copy_from_user(&ioc, rq->ifr_data, sizeof(struct s_skfp_ioctl)))
965*4882a593Smuzhiyun return -EFAULT;
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun switch (ioc.cmd) {
968*4882a593Smuzhiyun case SKFP_GET_STATS: /* Get the driver statistics */
969*4882a593Smuzhiyun ioc.len = sizeof(lp->MacStat);
970*4882a593Smuzhiyun status = copy_to_user(ioc.data, skfp_ctl_get_stats(dev), ioc.len)
971*4882a593Smuzhiyun ? -EFAULT : 0;
972*4882a593Smuzhiyun break;
973*4882a593Smuzhiyun case SKFP_CLR_STATS: /* Zero out the driver statistics */
974*4882a593Smuzhiyun if (!capable(CAP_NET_ADMIN)) {
975*4882a593Smuzhiyun status = -EPERM;
976*4882a593Smuzhiyun } else {
977*4882a593Smuzhiyun memset(&lp->MacStat, 0, sizeof(lp->MacStat));
978*4882a593Smuzhiyun }
979*4882a593Smuzhiyun break;
980*4882a593Smuzhiyun default:
981*4882a593Smuzhiyun printk("ioctl for %s: unknown cmd: %04x\n", dev->name, ioc.cmd);
982*4882a593Smuzhiyun status = -EOPNOTSUPP;
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun } // switch
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun return status;
987*4882a593Smuzhiyun } // skfp_ioctl
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun /*
991*4882a593Smuzhiyun * =====================
992*4882a593Smuzhiyun * = skfp_send_pkt =
993*4882a593Smuzhiyun * =====================
994*4882a593Smuzhiyun *
995*4882a593Smuzhiyun * Overview:
996*4882a593Smuzhiyun * Queues a packet for transmission and try to transmit it.
997*4882a593Smuzhiyun *
998*4882a593Smuzhiyun * Returns:
999*4882a593Smuzhiyun * Condition code
1000*4882a593Smuzhiyun *
1001*4882a593Smuzhiyun * Arguments:
1002*4882a593Smuzhiyun * skb - pointer to sk_buff to queue for transmission
1003*4882a593Smuzhiyun * dev - pointer to device information
1004*4882a593Smuzhiyun *
1005*4882a593Smuzhiyun * Functional Description:
1006*4882a593Smuzhiyun * Here we assume that an incoming skb transmit request
1007*4882a593Smuzhiyun * is contained in a single physically contiguous buffer
1008*4882a593Smuzhiyun * in which the virtual address of the start of packet
1009*4882a593Smuzhiyun * (skb->data) can be converted to a physical address
1010*4882a593Smuzhiyun * by using pci_map_single().
1011*4882a593Smuzhiyun *
1012*4882a593Smuzhiyun * We have an internal queue for packets we can not send
1013*4882a593Smuzhiyun * immediately. Packets in this queue can be given to the
1014*4882a593Smuzhiyun * adapter if transmit buffers are freed.
1015*4882a593Smuzhiyun *
1016*4882a593Smuzhiyun * We can't free the skb until after it's been DMA'd
1017*4882a593Smuzhiyun * out by the adapter, so we'll keep it in the driver and
1018*4882a593Smuzhiyun * return it in mac_drv_tx_complete.
1019*4882a593Smuzhiyun *
1020*4882a593Smuzhiyun * Return Codes:
1021*4882a593Smuzhiyun * 0 - driver has queued and/or sent packet
1022*4882a593Smuzhiyun * 1 - caller should requeue the sk_buff for later transmission
1023*4882a593Smuzhiyun *
1024*4882a593Smuzhiyun * Assumptions:
1025*4882a593Smuzhiyun * The entire packet is stored in one physically
1026*4882a593Smuzhiyun * contiguous buffer which is not cached and whose
1027*4882a593Smuzhiyun * 32-bit physical address can be determined.
1028*4882a593Smuzhiyun *
1029*4882a593Smuzhiyun * It's vital that this routine is NOT reentered for the
1030*4882a593Smuzhiyun * same board and that the OS is not in another section of
1031*4882a593Smuzhiyun * code (eg. skfp_interrupt) for the same board on a
1032*4882a593Smuzhiyun * different thread.
1033*4882a593Smuzhiyun *
1034*4882a593Smuzhiyun * Side Effects:
1035*4882a593Smuzhiyun * None
1036*4882a593Smuzhiyun */
skfp_send_pkt(struct sk_buff * skb,struct net_device * dev)1037*4882a593Smuzhiyun static netdev_tx_t skfp_send_pkt(struct sk_buff *skb,
1038*4882a593Smuzhiyun struct net_device *dev)
1039*4882a593Smuzhiyun {
1040*4882a593Smuzhiyun struct s_smc *smc = netdev_priv(dev);
1041*4882a593Smuzhiyun skfddi_priv *bp = &smc->os;
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun pr_debug("skfp_send_pkt\n");
1044*4882a593Smuzhiyun
1045*4882a593Smuzhiyun /*
1046*4882a593Smuzhiyun * Verify that incoming transmit request is OK
1047*4882a593Smuzhiyun *
1048*4882a593Smuzhiyun * Note: The packet size check is consistent with other
1049*4882a593Smuzhiyun * Linux device drivers, although the correct packet
1050*4882a593Smuzhiyun * size should be verified before calling the
1051*4882a593Smuzhiyun * transmit routine.
1052*4882a593Smuzhiyun */
1053*4882a593Smuzhiyun
1054*4882a593Smuzhiyun if (!(skb->len >= FDDI_K_LLC_ZLEN && skb->len <= FDDI_K_LLC_LEN)) {
1055*4882a593Smuzhiyun bp->MacStat.gen.tx_errors++; /* bump error counter */
1056*4882a593Smuzhiyun // dequeue packets from xmt queue and send them
1057*4882a593Smuzhiyun netif_start_queue(dev);
1058*4882a593Smuzhiyun dev_kfree_skb(skb);
1059*4882a593Smuzhiyun return NETDEV_TX_OK; /* return "success" */
1060*4882a593Smuzhiyun }
1061*4882a593Smuzhiyun if (bp->QueueSkb == 0) { // return with tbusy set: queue full
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun netif_stop_queue(dev);
1064*4882a593Smuzhiyun return NETDEV_TX_BUSY;
1065*4882a593Smuzhiyun }
1066*4882a593Smuzhiyun bp->QueueSkb--;
1067*4882a593Smuzhiyun skb_queue_tail(&bp->SendSkbQueue, skb);
1068*4882a593Smuzhiyun send_queued_packets(netdev_priv(dev));
1069*4882a593Smuzhiyun if (bp->QueueSkb == 0) {
1070*4882a593Smuzhiyun netif_stop_queue(dev);
1071*4882a593Smuzhiyun }
1072*4882a593Smuzhiyun return NETDEV_TX_OK;
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun } // skfp_send_pkt
1075*4882a593Smuzhiyun
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun /*
1078*4882a593Smuzhiyun * =======================
1079*4882a593Smuzhiyun * = send_queued_packets =
1080*4882a593Smuzhiyun * =======================
1081*4882a593Smuzhiyun *
1082*4882a593Smuzhiyun * Overview:
1083*4882a593Smuzhiyun * Send packets from the driver queue as long as there are some and
1084*4882a593Smuzhiyun * transmit resources are available.
1085*4882a593Smuzhiyun *
1086*4882a593Smuzhiyun * Returns:
1087*4882a593Smuzhiyun * None
1088*4882a593Smuzhiyun *
1089*4882a593Smuzhiyun * Arguments:
1090*4882a593Smuzhiyun * smc - pointer to smc (adapter) structure
1091*4882a593Smuzhiyun *
1092*4882a593Smuzhiyun * Functional Description:
1093*4882a593Smuzhiyun * Take a packet from queue if there is any. If not, then we are done.
1094*4882a593Smuzhiyun * Check if there are resources to send the packet. If not, requeue it
1095*4882a593Smuzhiyun * and exit.
1096*4882a593Smuzhiyun * Set packet descriptor flags and give packet to adapter.
1097*4882a593Smuzhiyun * Check if any send resources can be freed (we do not use the
1098*4882a593Smuzhiyun * transmit complete interrupt).
1099*4882a593Smuzhiyun */
send_queued_packets(struct s_smc * smc)1100*4882a593Smuzhiyun static void send_queued_packets(struct s_smc *smc)
1101*4882a593Smuzhiyun {
1102*4882a593Smuzhiyun skfddi_priv *bp = &smc->os;
1103*4882a593Smuzhiyun struct sk_buff *skb;
1104*4882a593Smuzhiyun unsigned char fc;
1105*4882a593Smuzhiyun int queue;
1106*4882a593Smuzhiyun struct s_smt_fp_txd *txd; // Current TxD.
1107*4882a593Smuzhiyun dma_addr_t dma_address;
1108*4882a593Smuzhiyun unsigned long Flags;
1109*4882a593Smuzhiyun
1110*4882a593Smuzhiyun int frame_status; // HWM tx frame status.
1111*4882a593Smuzhiyun
1112*4882a593Smuzhiyun pr_debug("send queued packets\n");
1113*4882a593Smuzhiyun for (;;) {
1114*4882a593Smuzhiyun // send first buffer from queue
1115*4882a593Smuzhiyun skb = skb_dequeue(&bp->SendSkbQueue);
1116*4882a593Smuzhiyun
1117*4882a593Smuzhiyun if (!skb) {
1118*4882a593Smuzhiyun pr_debug("queue empty\n");
1119*4882a593Smuzhiyun return;
1120*4882a593Smuzhiyun } // queue empty !
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun spin_lock_irqsave(&bp->DriverLock, Flags);
1123*4882a593Smuzhiyun fc = skb->data[0];
1124*4882a593Smuzhiyun queue = (fc & FC_SYNC_BIT) ? QUEUE_S : QUEUE_A0;
1125*4882a593Smuzhiyun #ifdef ESS
1126*4882a593Smuzhiyun // Check if the frame may/must be sent as a synchronous frame.
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun if ((fc & ~(FC_SYNC_BIT | FC_LLC_PRIOR)) == FC_ASYNC_LLC) {
1129*4882a593Smuzhiyun // It's an LLC frame.
1130*4882a593Smuzhiyun if (!smc->ess.sync_bw_available)
1131*4882a593Smuzhiyun fc &= ~FC_SYNC_BIT; // No bandwidth available.
1132*4882a593Smuzhiyun
1133*4882a593Smuzhiyun else { // Bandwidth is available.
1134*4882a593Smuzhiyun
1135*4882a593Smuzhiyun if (smc->mib.fddiESSSynchTxMode) {
1136*4882a593Smuzhiyun // Send as sync. frame.
1137*4882a593Smuzhiyun fc |= FC_SYNC_BIT;
1138*4882a593Smuzhiyun }
1139*4882a593Smuzhiyun }
1140*4882a593Smuzhiyun }
1141*4882a593Smuzhiyun #endif // ESS
1142*4882a593Smuzhiyun frame_status = hwm_tx_init(smc, fc, 1, skb->len, queue);
1143*4882a593Smuzhiyun
1144*4882a593Smuzhiyun if ((frame_status & (LOC_TX | LAN_TX)) == 0) {
1145*4882a593Smuzhiyun // Unable to send the frame.
1146*4882a593Smuzhiyun
1147*4882a593Smuzhiyun if ((frame_status & RING_DOWN) != 0) {
1148*4882a593Smuzhiyun // Ring is down.
1149*4882a593Smuzhiyun pr_debug("Tx attempt while ring down.\n");
1150*4882a593Smuzhiyun } else if ((frame_status & OUT_OF_TXD) != 0) {
1151*4882a593Smuzhiyun pr_debug("%s: out of TXDs.\n", bp->dev->name);
1152*4882a593Smuzhiyun } else {
1153*4882a593Smuzhiyun pr_debug("%s: out of transmit resources",
1154*4882a593Smuzhiyun bp->dev->name);
1155*4882a593Smuzhiyun }
1156*4882a593Smuzhiyun
1157*4882a593Smuzhiyun // Note: We will retry the operation as soon as
1158*4882a593Smuzhiyun // transmit resources become available.
1159*4882a593Smuzhiyun skb_queue_head(&bp->SendSkbQueue, skb);
1160*4882a593Smuzhiyun spin_unlock_irqrestore(&bp->DriverLock, Flags);
1161*4882a593Smuzhiyun return; // Packet has been queued.
1162*4882a593Smuzhiyun
1163*4882a593Smuzhiyun } // if (unable to send frame)
1164*4882a593Smuzhiyun
1165*4882a593Smuzhiyun bp->QueueSkb++; // one packet less in local queue
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun // source address in packet ?
1168*4882a593Smuzhiyun CheckSourceAddress(skb->data, smc->hw.fddi_canon_addr.a);
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyun txd = (struct s_smt_fp_txd *) HWM_GET_CURR_TXD(smc, queue);
1171*4882a593Smuzhiyun
1172*4882a593Smuzhiyun dma_address = pci_map_single(&bp->pdev, skb->data,
1173*4882a593Smuzhiyun skb->len, PCI_DMA_TODEVICE);
1174*4882a593Smuzhiyun if (frame_status & LAN_TX) {
1175*4882a593Smuzhiyun txd->txd_os.skb = skb; // save skb
1176*4882a593Smuzhiyun txd->txd_os.dma_addr = dma_address; // save dma mapping
1177*4882a593Smuzhiyun }
1178*4882a593Smuzhiyun hwm_tx_frag(smc, skb->data, dma_address, skb->len,
1179*4882a593Smuzhiyun frame_status | FIRST_FRAG | LAST_FRAG | EN_IRQ_EOF);
1180*4882a593Smuzhiyun
1181*4882a593Smuzhiyun if (!(frame_status & LAN_TX)) { // local only frame
1182*4882a593Smuzhiyun pci_unmap_single(&bp->pdev, dma_address,
1183*4882a593Smuzhiyun skb->len, PCI_DMA_TODEVICE);
1184*4882a593Smuzhiyun dev_kfree_skb_irq(skb);
1185*4882a593Smuzhiyun }
1186*4882a593Smuzhiyun spin_unlock_irqrestore(&bp->DriverLock, Flags);
1187*4882a593Smuzhiyun } // for
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun return; // never reached
1190*4882a593Smuzhiyun
1191*4882a593Smuzhiyun } // send_queued_packets
1192*4882a593Smuzhiyun
1193*4882a593Smuzhiyun
1194*4882a593Smuzhiyun /************************
1195*4882a593Smuzhiyun *
1196*4882a593Smuzhiyun * CheckSourceAddress
1197*4882a593Smuzhiyun *
1198*4882a593Smuzhiyun * Verify if the source address is set. Insert it if necessary.
1199*4882a593Smuzhiyun *
1200*4882a593Smuzhiyun ************************/
CheckSourceAddress(unsigned char * frame,unsigned char * hw_addr)1201*4882a593Smuzhiyun static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr)
1202*4882a593Smuzhiyun {
1203*4882a593Smuzhiyun unsigned char SRBit;
1204*4882a593Smuzhiyun
1205*4882a593Smuzhiyun if ((((unsigned long) frame[1 + 6]) & ~0x01) != 0) // source routing bit
1206*4882a593Smuzhiyun
1207*4882a593Smuzhiyun return;
1208*4882a593Smuzhiyun if ((unsigned short) frame[1 + 10] != 0)
1209*4882a593Smuzhiyun return;
1210*4882a593Smuzhiyun SRBit = frame[1 + 6] & 0x01;
1211*4882a593Smuzhiyun memcpy(&frame[1 + 6], hw_addr, ETH_ALEN);
1212*4882a593Smuzhiyun frame[8] |= SRBit;
1213*4882a593Smuzhiyun } // CheckSourceAddress
1214*4882a593Smuzhiyun
1215*4882a593Smuzhiyun
1216*4882a593Smuzhiyun /************************
1217*4882a593Smuzhiyun *
1218*4882a593Smuzhiyun * ResetAdapter
1219*4882a593Smuzhiyun *
1220*4882a593Smuzhiyun * Reset the adapter and bring it back to operational mode.
1221*4882a593Smuzhiyun * Args
1222*4882a593Smuzhiyun * smc - A pointer to the SMT context struct.
1223*4882a593Smuzhiyun * Out
1224*4882a593Smuzhiyun * Nothing.
1225*4882a593Smuzhiyun *
1226*4882a593Smuzhiyun ************************/
ResetAdapter(struct s_smc * smc)1227*4882a593Smuzhiyun static void ResetAdapter(struct s_smc *smc)
1228*4882a593Smuzhiyun {
1229*4882a593Smuzhiyun
1230*4882a593Smuzhiyun pr_debug("[fddi: ResetAdapter]\n");
1231*4882a593Smuzhiyun
1232*4882a593Smuzhiyun // Stop the adapter.
1233*4882a593Smuzhiyun
1234*4882a593Smuzhiyun card_stop(smc); // Stop all activity.
1235*4882a593Smuzhiyun
1236*4882a593Smuzhiyun // Clear the transmit and receive descriptor queues.
1237*4882a593Smuzhiyun mac_drv_clear_tx_queue(smc);
1238*4882a593Smuzhiyun mac_drv_clear_rx_queue(smc);
1239*4882a593Smuzhiyun
1240*4882a593Smuzhiyun // Restart the adapter.
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun smt_reset_defaults(smc, 1); // Initialize the SMT module.
1243*4882a593Smuzhiyun
1244*4882a593Smuzhiyun init_smt(smc, (smc->os.dev)->dev_addr); // Initialize the hardware.
1245*4882a593Smuzhiyun
1246*4882a593Smuzhiyun smt_online(smc, 1); // Insert into the ring again.
1247*4882a593Smuzhiyun STI_FBI();
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun // Restore original receive mode (multicasts, promiscuous, etc.).
1250*4882a593Smuzhiyun skfp_ctl_set_multicast_list_wo_lock(smc->os.dev);
1251*4882a593Smuzhiyun } // ResetAdapter
1252*4882a593Smuzhiyun
1253*4882a593Smuzhiyun
1254*4882a593Smuzhiyun //--------------- functions called by hardware module ----------------
1255*4882a593Smuzhiyun
1256*4882a593Smuzhiyun /************************
1257*4882a593Smuzhiyun *
1258*4882a593Smuzhiyun * llc_restart_tx
1259*4882a593Smuzhiyun *
1260*4882a593Smuzhiyun * The hardware driver calls this routine when the transmit complete
1261*4882a593Smuzhiyun * interrupt bits (end of frame) for the synchronous or asynchronous
1262*4882a593Smuzhiyun * queue is set.
1263*4882a593Smuzhiyun *
1264*4882a593Smuzhiyun * NOTE The hardware driver calls this function also if no packets are queued.
1265*4882a593Smuzhiyun * The routine must be able to handle this case.
1266*4882a593Smuzhiyun * Args
1267*4882a593Smuzhiyun * smc - A pointer to the SMT context struct.
1268*4882a593Smuzhiyun * Out
1269*4882a593Smuzhiyun * Nothing.
1270*4882a593Smuzhiyun *
1271*4882a593Smuzhiyun ************************/
llc_restart_tx(struct s_smc * smc)1272*4882a593Smuzhiyun void llc_restart_tx(struct s_smc *smc)
1273*4882a593Smuzhiyun {
1274*4882a593Smuzhiyun skfddi_priv *bp = &smc->os;
1275*4882a593Smuzhiyun
1276*4882a593Smuzhiyun pr_debug("[llc_restart_tx]\n");
1277*4882a593Smuzhiyun
1278*4882a593Smuzhiyun // Try to send queued packets
1279*4882a593Smuzhiyun spin_unlock(&bp->DriverLock);
1280*4882a593Smuzhiyun send_queued_packets(smc);
1281*4882a593Smuzhiyun spin_lock(&bp->DriverLock);
1282*4882a593Smuzhiyun netif_start_queue(bp->dev);// system may send again if it was blocked
1283*4882a593Smuzhiyun
1284*4882a593Smuzhiyun } // llc_restart_tx
1285*4882a593Smuzhiyun
1286*4882a593Smuzhiyun
1287*4882a593Smuzhiyun /************************
1288*4882a593Smuzhiyun *
1289*4882a593Smuzhiyun * mac_drv_get_space
1290*4882a593Smuzhiyun *
1291*4882a593Smuzhiyun * The hardware module calls this function to allocate the memory
1292*4882a593Smuzhiyun * for the SMT MBufs if the define MB_OUTSIDE_SMC is specified.
1293*4882a593Smuzhiyun * Args
1294*4882a593Smuzhiyun * smc - A pointer to the SMT context struct.
1295*4882a593Smuzhiyun *
1296*4882a593Smuzhiyun * size - Size of memory in bytes to allocate.
1297*4882a593Smuzhiyun * Out
1298*4882a593Smuzhiyun * != 0 A pointer to the virtual address of the allocated memory.
1299*4882a593Smuzhiyun * == 0 Allocation error.
1300*4882a593Smuzhiyun *
1301*4882a593Smuzhiyun ************************/
mac_drv_get_space(struct s_smc * smc,unsigned int size)1302*4882a593Smuzhiyun void *mac_drv_get_space(struct s_smc *smc, unsigned int size)
1303*4882a593Smuzhiyun {
1304*4882a593Smuzhiyun void *virt;
1305*4882a593Smuzhiyun
1306*4882a593Smuzhiyun pr_debug("mac_drv_get_space (%d bytes), ", size);
1307*4882a593Smuzhiyun virt = (void *) (smc->os.SharedMemAddr + smc->os.SharedMemHeap);
1308*4882a593Smuzhiyun
1309*4882a593Smuzhiyun if ((smc->os.SharedMemHeap + size) > smc->os.SharedMemSize) {
1310*4882a593Smuzhiyun printk("Unexpected SMT memory size requested: %d\n", size);
1311*4882a593Smuzhiyun return NULL;
1312*4882a593Smuzhiyun }
1313*4882a593Smuzhiyun smc->os.SharedMemHeap += size; // Move heap pointer.
1314*4882a593Smuzhiyun
1315*4882a593Smuzhiyun pr_debug("mac_drv_get_space end\n");
1316*4882a593Smuzhiyun pr_debug("virt addr: %lx\n", (ulong) virt);
1317*4882a593Smuzhiyun pr_debug("bus addr: %lx\n", (ulong)
1318*4882a593Smuzhiyun (smc->os.SharedMemDMA +
1319*4882a593Smuzhiyun ((char *) virt - (char *)smc->os.SharedMemAddr)));
1320*4882a593Smuzhiyun return virt;
1321*4882a593Smuzhiyun } // mac_drv_get_space
1322*4882a593Smuzhiyun
1323*4882a593Smuzhiyun
1324*4882a593Smuzhiyun /************************
1325*4882a593Smuzhiyun *
1326*4882a593Smuzhiyun * mac_drv_get_desc_mem
1327*4882a593Smuzhiyun *
1328*4882a593Smuzhiyun * This function is called by the hardware dependent module.
1329*4882a593Smuzhiyun * It allocates the memory for the RxD and TxD descriptors.
1330*4882a593Smuzhiyun *
1331*4882a593Smuzhiyun * This memory must be non-cached, non-movable and non-swappable.
1332*4882a593Smuzhiyun * This memory should start at a physical page boundary.
1333*4882a593Smuzhiyun * Args
1334*4882a593Smuzhiyun * smc - A pointer to the SMT context struct.
1335*4882a593Smuzhiyun *
1336*4882a593Smuzhiyun * size - Size of memory in bytes to allocate.
1337*4882a593Smuzhiyun * Out
1338*4882a593Smuzhiyun * != 0 A pointer to the virtual address of the allocated memory.
1339*4882a593Smuzhiyun * == 0 Allocation error.
1340*4882a593Smuzhiyun *
1341*4882a593Smuzhiyun ************************/
mac_drv_get_desc_mem(struct s_smc * smc,unsigned int size)1342*4882a593Smuzhiyun void *mac_drv_get_desc_mem(struct s_smc *smc, unsigned int size)
1343*4882a593Smuzhiyun {
1344*4882a593Smuzhiyun
1345*4882a593Smuzhiyun char *virt;
1346*4882a593Smuzhiyun
1347*4882a593Smuzhiyun pr_debug("mac_drv_get_desc_mem\n");
1348*4882a593Smuzhiyun
1349*4882a593Smuzhiyun // Descriptor memory must be aligned on 16-byte boundary.
1350*4882a593Smuzhiyun
1351*4882a593Smuzhiyun virt = mac_drv_get_space(smc, size);
1352*4882a593Smuzhiyun
1353*4882a593Smuzhiyun size = (u_int) (16 - (((unsigned long) virt) & 15UL));
1354*4882a593Smuzhiyun size = size % 16;
1355*4882a593Smuzhiyun
1356*4882a593Smuzhiyun pr_debug("Allocate %u bytes alignment gap ", size);
1357*4882a593Smuzhiyun pr_debug("for descriptor memory.\n");
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun if (!mac_drv_get_space(smc, size)) {
1360*4882a593Smuzhiyun printk("fddi: Unable to align descriptor memory.\n");
1361*4882a593Smuzhiyun return NULL;
1362*4882a593Smuzhiyun }
1363*4882a593Smuzhiyun return virt + size;
1364*4882a593Smuzhiyun } // mac_drv_get_desc_mem
1365*4882a593Smuzhiyun
1366*4882a593Smuzhiyun
1367*4882a593Smuzhiyun /************************
1368*4882a593Smuzhiyun *
1369*4882a593Smuzhiyun * mac_drv_virt2phys
1370*4882a593Smuzhiyun *
1371*4882a593Smuzhiyun * Get the physical address of a given virtual address.
1372*4882a593Smuzhiyun * Args
1373*4882a593Smuzhiyun * smc - A pointer to the SMT context struct.
1374*4882a593Smuzhiyun *
1375*4882a593Smuzhiyun * virt - A (virtual) pointer into our 'shared' memory area.
1376*4882a593Smuzhiyun * Out
1377*4882a593Smuzhiyun * Physical address of the given virtual address.
1378*4882a593Smuzhiyun *
1379*4882a593Smuzhiyun ************************/
mac_drv_virt2phys(struct s_smc * smc,void * virt)1380*4882a593Smuzhiyun unsigned long mac_drv_virt2phys(struct s_smc *smc, void *virt)
1381*4882a593Smuzhiyun {
1382*4882a593Smuzhiyun return smc->os.SharedMemDMA +
1383*4882a593Smuzhiyun ((char *) virt - (char *)smc->os.SharedMemAddr);
1384*4882a593Smuzhiyun } // mac_drv_virt2phys
1385*4882a593Smuzhiyun
1386*4882a593Smuzhiyun
1387*4882a593Smuzhiyun /************************
1388*4882a593Smuzhiyun *
1389*4882a593Smuzhiyun * dma_master
1390*4882a593Smuzhiyun *
1391*4882a593Smuzhiyun * The HWM calls this function, when the driver leads through a DMA
1392*4882a593Smuzhiyun * transfer. If the OS-specific module must prepare the system hardware
1393*4882a593Smuzhiyun * for the DMA transfer, it should do it in this function.
1394*4882a593Smuzhiyun *
1395*4882a593Smuzhiyun * The hardware module calls this dma_master if it wants to send an SMT
1396*4882a593Smuzhiyun * frame. This means that the virt address passed in here is part of
1397*4882a593Smuzhiyun * the 'shared' memory area.
1398*4882a593Smuzhiyun * Args
1399*4882a593Smuzhiyun * smc - A pointer to the SMT context struct.
1400*4882a593Smuzhiyun *
1401*4882a593Smuzhiyun * virt - The virtual address of the data.
1402*4882a593Smuzhiyun *
1403*4882a593Smuzhiyun * len - The length in bytes of the data.
1404*4882a593Smuzhiyun *
1405*4882a593Smuzhiyun * flag - Indicates the transmit direction and the buffer type:
1406*4882a593Smuzhiyun * DMA_RD (0x01) system RAM ==> adapter buffer memory
1407*4882a593Smuzhiyun * DMA_WR (0x02) adapter buffer memory ==> system RAM
1408*4882a593Smuzhiyun * SMT_BUF (0x80) SMT buffer
1409*4882a593Smuzhiyun *
1410*4882a593Smuzhiyun * >> NOTE: SMT_BUF and DMA_RD are always set for PCI. <<
1411*4882a593Smuzhiyun * Out
1412*4882a593Smuzhiyun * Returns the pyhsical address for the DMA transfer.
1413*4882a593Smuzhiyun *
1414*4882a593Smuzhiyun ************************/
dma_master(struct s_smc * smc,void * virt,int len,int flag)1415*4882a593Smuzhiyun u_long dma_master(struct s_smc * smc, void *virt, int len, int flag)
1416*4882a593Smuzhiyun {
1417*4882a593Smuzhiyun return smc->os.SharedMemDMA +
1418*4882a593Smuzhiyun ((char *) virt - (char *)smc->os.SharedMemAddr);
1419*4882a593Smuzhiyun } // dma_master
1420*4882a593Smuzhiyun
1421*4882a593Smuzhiyun
1422*4882a593Smuzhiyun /************************
1423*4882a593Smuzhiyun *
1424*4882a593Smuzhiyun * dma_complete
1425*4882a593Smuzhiyun *
1426*4882a593Smuzhiyun * The hardware module calls this routine when it has completed a DMA
1427*4882a593Smuzhiyun * transfer. If the operating system dependent module has set up the DMA
1428*4882a593Smuzhiyun * channel via dma_master() (e.g. Windows NT or AIX) it should clean up
1429*4882a593Smuzhiyun * the DMA channel.
1430*4882a593Smuzhiyun * Args
1431*4882a593Smuzhiyun * smc - A pointer to the SMT context struct.
1432*4882a593Smuzhiyun *
1433*4882a593Smuzhiyun * descr - A pointer to a TxD or RxD, respectively.
1434*4882a593Smuzhiyun *
1435*4882a593Smuzhiyun * flag - Indicates the DMA transfer direction / SMT buffer:
1436*4882a593Smuzhiyun * DMA_RD (0x01) system RAM ==> adapter buffer memory
1437*4882a593Smuzhiyun * DMA_WR (0x02) adapter buffer memory ==> system RAM
1438*4882a593Smuzhiyun * SMT_BUF (0x80) SMT buffer (managed by HWM)
1439*4882a593Smuzhiyun * Out
1440*4882a593Smuzhiyun * Nothing.
1441*4882a593Smuzhiyun *
1442*4882a593Smuzhiyun ************************/
dma_complete(struct s_smc * smc,volatile union s_fp_descr * descr,int flag)1443*4882a593Smuzhiyun void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr, int flag)
1444*4882a593Smuzhiyun {
1445*4882a593Smuzhiyun /* For TX buffers, there are two cases. If it is an SMT transmit
1446*4882a593Smuzhiyun * buffer, there is nothing to do since we use consistent memory
1447*4882a593Smuzhiyun * for the 'shared' memory area. The other case is for normal
1448*4882a593Smuzhiyun * transmit packets given to us by the networking stack, and in
1449*4882a593Smuzhiyun * that case we cleanup the PCI DMA mapping in mac_drv_tx_complete
1450*4882a593Smuzhiyun * below.
1451*4882a593Smuzhiyun *
1452*4882a593Smuzhiyun * For RX buffers, we have to unmap dynamic PCI DMA mappings here
1453*4882a593Smuzhiyun * because the hardware module is about to potentially look at
1454*4882a593Smuzhiyun * the contents of the buffer. If we did not call the PCI DMA
1455*4882a593Smuzhiyun * unmap first, the hardware module could read inconsistent data.
1456*4882a593Smuzhiyun */
1457*4882a593Smuzhiyun if (flag & DMA_WR) {
1458*4882a593Smuzhiyun skfddi_priv *bp = &smc->os;
1459*4882a593Smuzhiyun volatile struct s_smt_fp_rxd *r = &descr->r;
1460*4882a593Smuzhiyun
1461*4882a593Smuzhiyun /* If SKB is NULL, we used the local buffer. */
1462*4882a593Smuzhiyun if (r->rxd_os.skb && r->rxd_os.dma_addr) {
1463*4882a593Smuzhiyun int MaxFrameSize = bp->MaxFrameSize;
1464*4882a593Smuzhiyun
1465*4882a593Smuzhiyun pci_unmap_single(&bp->pdev, r->rxd_os.dma_addr,
1466*4882a593Smuzhiyun MaxFrameSize, PCI_DMA_FROMDEVICE);
1467*4882a593Smuzhiyun r->rxd_os.dma_addr = 0;
1468*4882a593Smuzhiyun }
1469*4882a593Smuzhiyun }
1470*4882a593Smuzhiyun } // dma_complete
1471*4882a593Smuzhiyun
1472*4882a593Smuzhiyun
1473*4882a593Smuzhiyun /************************
1474*4882a593Smuzhiyun *
1475*4882a593Smuzhiyun * mac_drv_tx_complete
1476*4882a593Smuzhiyun *
1477*4882a593Smuzhiyun * Transmit of a packet is complete. Release the tx staging buffer.
1478*4882a593Smuzhiyun *
1479*4882a593Smuzhiyun * Args
1480*4882a593Smuzhiyun * smc - A pointer to the SMT context struct.
1481*4882a593Smuzhiyun *
1482*4882a593Smuzhiyun * txd - A pointer to the last TxD which is used by the frame.
1483*4882a593Smuzhiyun * Out
1484*4882a593Smuzhiyun * Returns nothing.
1485*4882a593Smuzhiyun *
1486*4882a593Smuzhiyun ************************/
mac_drv_tx_complete(struct s_smc * smc,volatile struct s_smt_fp_txd * txd)1487*4882a593Smuzhiyun void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd)
1488*4882a593Smuzhiyun {
1489*4882a593Smuzhiyun struct sk_buff *skb;
1490*4882a593Smuzhiyun
1491*4882a593Smuzhiyun pr_debug("entering mac_drv_tx_complete\n");
1492*4882a593Smuzhiyun // Check if this TxD points to a skb
1493*4882a593Smuzhiyun
1494*4882a593Smuzhiyun if (!(skb = txd->txd_os.skb)) {
1495*4882a593Smuzhiyun pr_debug("TXD with no skb assigned.\n");
1496*4882a593Smuzhiyun return;
1497*4882a593Smuzhiyun }
1498*4882a593Smuzhiyun txd->txd_os.skb = NULL;
1499*4882a593Smuzhiyun
1500*4882a593Smuzhiyun // release the DMA mapping
1501*4882a593Smuzhiyun pci_unmap_single(&smc->os.pdev, txd->txd_os.dma_addr,
1502*4882a593Smuzhiyun skb->len, PCI_DMA_TODEVICE);
1503*4882a593Smuzhiyun txd->txd_os.dma_addr = 0;
1504*4882a593Smuzhiyun
1505*4882a593Smuzhiyun smc->os.MacStat.gen.tx_packets++; // Count transmitted packets.
1506*4882a593Smuzhiyun smc->os.MacStat.gen.tx_bytes+=skb->len; // Count bytes
1507*4882a593Smuzhiyun
1508*4882a593Smuzhiyun // free the skb
1509*4882a593Smuzhiyun dev_kfree_skb_irq(skb);
1510*4882a593Smuzhiyun
1511*4882a593Smuzhiyun pr_debug("leaving mac_drv_tx_complete\n");
1512*4882a593Smuzhiyun } // mac_drv_tx_complete
1513*4882a593Smuzhiyun
1514*4882a593Smuzhiyun
1515*4882a593Smuzhiyun /************************
1516*4882a593Smuzhiyun *
1517*4882a593Smuzhiyun * dump packets to logfile
1518*4882a593Smuzhiyun *
1519*4882a593Smuzhiyun ************************/
1520*4882a593Smuzhiyun #ifdef DUMPPACKETS
dump_data(unsigned char * Data,int length)1521*4882a593Smuzhiyun void dump_data(unsigned char *Data, int length)
1522*4882a593Smuzhiyun {
1523*4882a593Smuzhiyun printk(KERN_INFO "---Packet start---\n");
1524*4882a593Smuzhiyun print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, Data, min_t(size_t, length, 64), false);
1525*4882a593Smuzhiyun printk(KERN_INFO "------------------\n");
1526*4882a593Smuzhiyun } // dump_data
1527*4882a593Smuzhiyun #else
1528*4882a593Smuzhiyun #define dump_data(data,len)
1529*4882a593Smuzhiyun #endif // DUMPPACKETS
1530*4882a593Smuzhiyun
1531*4882a593Smuzhiyun /************************
1532*4882a593Smuzhiyun *
1533*4882a593Smuzhiyun * mac_drv_rx_complete
1534*4882a593Smuzhiyun *
1535*4882a593Smuzhiyun * The hardware module calls this function if an LLC frame is received
1536*4882a593Smuzhiyun * in a receive buffer. Also the SMT, NSA, and directed beacon frames
1537*4882a593Smuzhiyun * from the network will be passed to the LLC layer by this function
1538*4882a593Smuzhiyun * if passing is enabled.
1539*4882a593Smuzhiyun *
1540*4882a593Smuzhiyun * mac_drv_rx_complete forwards the frame to the LLC layer if it should
1541*4882a593Smuzhiyun * be received. It also fills the RxD ring with new receive buffers if
1542*4882a593Smuzhiyun * some can be queued.
1543*4882a593Smuzhiyun * Args
1544*4882a593Smuzhiyun * smc - A pointer to the SMT context struct.
1545*4882a593Smuzhiyun *
1546*4882a593Smuzhiyun * rxd - A pointer to the first RxD which is used by the receive frame.
1547*4882a593Smuzhiyun *
1548*4882a593Smuzhiyun * frag_count - Count of RxDs used by the received frame.
1549*4882a593Smuzhiyun *
1550*4882a593Smuzhiyun * len - Frame length.
1551*4882a593Smuzhiyun * Out
1552*4882a593Smuzhiyun * Nothing.
1553*4882a593Smuzhiyun *
1554*4882a593Smuzhiyun ************************/
mac_drv_rx_complete(struct s_smc * smc,volatile struct s_smt_fp_rxd * rxd,int frag_count,int len)1555*4882a593Smuzhiyun void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1556*4882a593Smuzhiyun int frag_count, int len)
1557*4882a593Smuzhiyun {
1558*4882a593Smuzhiyun skfddi_priv *bp = &smc->os;
1559*4882a593Smuzhiyun struct sk_buff *skb;
1560*4882a593Smuzhiyun unsigned char *virt, *cp;
1561*4882a593Smuzhiyun unsigned short ri;
1562*4882a593Smuzhiyun u_int RifLength;
1563*4882a593Smuzhiyun
1564*4882a593Smuzhiyun pr_debug("entering mac_drv_rx_complete (len=%d)\n", len);
1565*4882a593Smuzhiyun if (frag_count != 1) { // This is not allowed to happen.
1566*4882a593Smuzhiyun
1567*4882a593Smuzhiyun printk("fddi: Multi-fragment receive!\n");
1568*4882a593Smuzhiyun goto RequeueRxd; // Re-use the given RXD(s).
1569*4882a593Smuzhiyun
1570*4882a593Smuzhiyun }
1571*4882a593Smuzhiyun skb = rxd->rxd_os.skb;
1572*4882a593Smuzhiyun if (!skb) {
1573*4882a593Smuzhiyun pr_debug("No skb in rxd\n");
1574*4882a593Smuzhiyun smc->os.MacStat.gen.rx_errors++;
1575*4882a593Smuzhiyun goto RequeueRxd;
1576*4882a593Smuzhiyun }
1577*4882a593Smuzhiyun virt = skb->data;
1578*4882a593Smuzhiyun
1579*4882a593Smuzhiyun // The DMA mapping was released in dma_complete above.
1580*4882a593Smuzhiyun
1581*4882a593Smuzhiyun dump_data(skb->data, len);
1582*4882a593Smuzhiyun
1583*4882a593Smuzhiyun /*
1584*4882a593Smuzhiyun * FDDI Frame format:
1585*4882a593Smuzhiyun * +-------+-------+-------+------------+--------+------------+
1586*4882a593Smuzhiyun * | FC[1] | DA[6] | SA[6] | RIF[0..18] | LLC[3] | Data[0..n] |
1587*4882a593Smuzhiyun * +-------+-------+-------+------------+--------+------------+
1588*4882a593Smuzhiyun *
1589*4882a593Smuzhiyun * FC = Frame Control
1590*4882a593Smuzhiyun * DA = Destination Address
1591*4882a593Smuzhiyun * SA = Source Address
1592*4882a593Smuzhiyun * RIF = Routing Information Field
1593*4882a593Smuzhiyun * LLC = Logical Link Control
1594*4882a593Smuzhiyun */
1595*4882a593Smuzhiyun
1596*4882a593Smuzhiyun // Remove Routing Information Field (RIF), if present.
1597*4882a593Smuzhiyun
1598*4882a593Smuzhiyun if ((virt[1 + 6] & FDDI_RII) == 0)
1599*4882a593Smuzhiyun RifLength = 0;
1600*4882a593Smuzhiyun else {
1601*4882a593Smuzhiyun int n;
1602*4882a593Smuzhiyun // goos: RIF removal has still to be tested
1603*4882a593Smuzhiyun pr_debug("RIF found\n");
1604*4882a593Smuzhiyun // Get RIF length from Routing Control (RC) field.
1605*4882a593Smuzhiyun cp = virt + FDDI_MAC_HDR_LEN; // Point behind MAC header.
1606*4882a593Smuzhiyun
1607*4882a593Smuzhiyun ri = ntohs(*((__be16 *) cp));
1608*4882a593Smuzhiyun RifLength = ri & FDDI_RCF_LEN_MASK;
1609*4882a593Smuzhiyun if (len < (int) (FDDI_MAC_HDR_LEN + RifLength)) {
1610*4882a593Smuzhiyun printk("fddi: Invalid RIF.\n");
1611*4882a593Smuzhiyun goto RequeueRxd; // Discard the frame.
1612*4882a593Smuzhiyun
1613*4882a593Smuzhiyun }
1614*4882a593Smuzhiyun virt[1 + 6] &= ~FDDI_RII; // Clear RII bit.
1615*4882a593Smuzhiyun // regions overlap
1616*4882a593Smuzhiyun
1617*4882a593Smuzhiyun virt = cp + RifLength;
1618*4882a593Smuzhiyun for (n = FDDI_MAC_HDR_LEN; n; n--)
1619*4882a593Smuzhiyun *--virt = *--cp;
1620*4882a593Smuzhiyun // adjust sbd->data pointer
1621*4882a593Smuzhiyun skb_pull(skb, RifLength);
1622*4882a593Smuzhiyun len -= RifLength;
1623*4882a593Smuzhiyun RifLength = 0;
1624*4882a593Smuzhiyun }
1625*4882a593Smuzhiyun
1626*4882a593Smuzhiyun // Count statistics.
1627*4882a593Smuzhiyun smc->os.MacStat.gen.rx_packets++; // Count indicated receive
1628*4882a593Smuzhiyun // packets.
1629*4882a593Smuzhiyun smc->os.MacStat.gen.rx_bytes+=len; // Count bytes.
1630*4882a593Smuzhiyun
1631*4882a593Smuzhiyun // virt points to header again
1632*4882a593Smuzhiyun if (virt[1] & 0x01) { // Check group (multicast) bit.
1633*4882a593Smuzhiyun
1634*4882a593Smuzhiyun smc->os.MacStat.gen.multicast++;
1635*4882a593Smuzhiyun }
1636*4882a593Smuzhiyun
1637*4882a593Smuzhiyun // deliver frame to system
1638*4882a593Smuzhiyun rxd->rxd_os.skb = NULL;
1639*4882a593Smuzhiyun skb_trim(skb, len);
1640*4882a593Smuzhiyun skb->protocol = fddi_type_trans(skb, bp->dev);
1641*4882a593Smuzhiyun
1642*4882a593Smuzhiyun netif_rx(skb);
1643*4882a593Smuzhiyun
1644*4882a593Smuzhiyun HWM_RX_CHECK(smc, RX_LOW_WATERMARK);
1645*4882a593Smuzhiyun return;
1646*4882a593Smuzhiyun
1647*4882a593Smuzhiyun RequeueRxd:
1648*4882a593Smuzhiyun pr_debug("Rx: re-queue RXD.\n");
1649*4882a593Smuzhiyun mac_drv_requeue_rxd(smc, rxd, frag_count);
1650*4882a593Smuzhiyun smc->os.MacStat.gen.rx_errors++; // Count receive packets
1651*4882a593Smuzhiyun // not indicated.
1652*4882a593Smuzhiyun
1653*4882a593Smuzhiyun } // mac_drv_rx_complete
1654*4882a593Smuzhiyun
1655*4882a593Smuzhiyun
1656*4882a593Smuzhiyun /************************
1657*4882a593Smuzhiyun *
1658*4882a593Smuzhiyun * mac_drv_requeue_rxd
1659*4882a593Smuzhiyun *
1660*4882a593Smuzhiyun * The hardware module calls this function to request the OS-specific
1661*4882a593Smuzhiyun * module to queue the receive buffer(s) represented by the pointer
1662*4882a593Smuzhiyun * to the RxD and the frag_count into the receive queue again. This
1663*4882a593Smuzhiyun * buffer was filled with an invalid frame or an SMT frame.
1664*4882a593Smuzhiyun * Args
1665*4882a593Smuzhiyun * smc - A pointer to the SMT context struct.
1666*4882a593Smuzhiyun *
1667*4882a593Smuzhiyun * rxd - A pointer to the first RxD which is used by the receive frame.
1668*4882a593Smuzhiyun *
1669*4882a593Smuzhiyun * frag_count - Count of RxDs used by the received frame.
1670*4882a593Smuzhiyun * Out
1671*4882a593Smuzhiyun * Nothing.
1672*4882a593Smuzhiyun *
1673*4882a593Smuzhiyun ************************/
mac_drv_requeue_rxd(struct s_smc * smc,volatile struct s_smt_fp_rxd * rxd,int frag_count)1674*4882a593Smuzhiyun void mac_drv_requeue_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1675*4882a593Smuzhiyun int frag_count)
1676*4882a593Smuzhiyun {
1677*4882a593Smuzhiyun volatile struct s_smt_fp_rxd *next_rxd;
1678*4882a593Smuzhiyun volatile struct s_smt_fp_rxd *src_rxd;
1679*4882a593Smuzhiyun struct sk_buff *skb;
1680*4882a593Smuzhiyun int MaxFrameSize;
1681*4882a593Smuzhiyun unsigned char *v_addr;
1682*4882a593Smuzhiyun dma_addr_t b_addr;
1683*4882a593Smuzhiyun
1684*4882a593Smuzhiyun if (frag_count != 1) // This is not allowed to happen.
1685*4882a593Smuzhiyun
1686*4882a593Smuzhiyun printk("fddi: Multi-fragment requeue!\n");
1687*4882a593Smuzhiyun
1688*4882a593Smuzhiyun MaxFrameSize = smc->os.MaxFrameSize;
1689*4882a593Smuzhiyun src_rxd = rxd;
1690*4882a593Smuzhiyun for (; frag_count > 0; frag_count--) {
1691*4882a593Smuzhiyun next_rxd = src_rxd->rxd_next;
1692*4882a593Smuzhiyun rxd = HWM_GET_CURR_RXD(smc);
1693*4882a593Smuzhiyun
1694*4882a593Smuzhiyun skb = src_rxd->rxd_os.skb;
1695*4882a593Smuzhiyun if (skb == NULL) { // this should not happen
1696*4882a593Smuzhiyun
1697*4882a593Smuzhiyun pr_debug("Requeue with no skb in rxd!\n");
1698*4882a593Smuzhiyun skb = alloc_skb(MaxFrameSize + 3, GFP_ATOMIC);
1699*4882a593Smuzhiyun if (skb) {
1700*4882a593Smuzhiyun // we got a skb
1701*4882a593Smuzhiyun rxd->rxd_os.skb = skb;
1702*4882a593Smuzhiyun skb_reserve(skb, 3);
1703*4882a593Smuzhiyun skb_put(skb, MaxFrameSize);
1704*4882a593Smuzhiyun v_addr = skb->data;
1705*4882a593Smuzhiyun b_addr = pci_map_single(&smc->os.pdev,
1706*4882a593Smuzhiyun v_addr,
1707*4882a593Smuzhiyun MaxFrameSize,
1708*4882a593Smuzhiyun PCI_DMA_FROMDEVICE);
1709*4882a593Smuzhiyun rxd->rxd_os.dma_addr = b_addr;
1710*4882a593Smuzhiyun } else {
1711*4882a593Smuzhiyun // no skb available, use local buffer
1712*4882a593Smuzhiyun pr_debug("Queueing invalid buffer!\n");
1713*4882a593Smuzhiyun rxd->rxd_os.skb = NULL;
1714*4882a593Smuzhiyun v_addr = smc->os.LocalRxBuffer;
1715*4882a593Smuzhiyun b_addr = smc->os.LocalRxBufferDMA;
1716*4882a593Smuzhiyun }
1717*4882a593Smuzhiyun } else {
1718*4882a593Smuzhiyun // we use skb from old rxd
1719*4882a593Smuzhiyun rxd->rxd_os.skb = skb;
1720*4882a593Smuzhiyun v_addr = skb->data;
1721*4882a593Smuzhiyun b_addr = pci_map_single(&smc->os.pdev,
1722*4882a593Smuzhiyun v_addr,
1723*4882a593Smuzhiyun MaxFrameSize,
1724*4882a593Smuzhiyun PCI_DMA_FROMDEVICE);
1725*4882a593Smuzhiyun rxd->rxd_os.dma_addr = b_addr;
1726*4882a593Smuzhiyun }
1727*4882a593Smuzhiyun hwm_rx_frag(smc, v_addr, b_addr, MaxFrameSize,
1728*4882a593Smuzhiyun FIRST_FRAG | LAST_FRAG);
1729*4882a593Smuzhiyun
1730*4882a593Smuzhiyun src_rxd = next_rxd;
1731*4882a593Smuzhiyun }
1732*4882a593Smuzhiyun } // mac_drv_requeue_rxd
1733*4882a593Smuzhiyun
1734*4882a593Smuzhiyun
1735*4882a593Smuzhiyun /************************
1736*4882a593Smuzhiyun *
1737*4882a593Smuzhiyun * mac_drv_fill_rxd
1738*4882a593Smuzhiyun *
1739*4882a593Smuzhiyun * The hardware module calls this function at initialization time
1740*4882a593Smuzhiyun * to fill the RxD ring with receive buffers. It is also called by
1741*4882a593Smuzhiyun * mac_drv_rx_complete if rx_free is large enough to queue some new
1742*4882a593Smuzhiyun * receive buffers into the RxD ring. mac_drv_fill_rxd queues new
1743*4882a593Smuzhiyun * receive buffers as long as enough RxDs and receive buffers are
1744*4882a593Smuzhiyun * available.
1745*4882a593Smuzhiyun * Args
1746*4882a593Smuzhiyun * smc - A pointer to the SMT context struct.
1747*4882a593Smuzhiyun * Out
1748*4882a593Smuzhiyun * Nothing.
1749*4882a593Smuzhiyun *
1750*4882a593Smuzhiyun ************************/
mac_drv_fill_rxd(struct s_smc * smc)1751*4882a593Smuzhiyun void mac_drv_fill_rxd(struct s_smc *smc)
1752*4882a593Smuzhiyun {
1753*4882a593Smuzhiyun int MaxFrameSize;
1754*4882a593Smuzhiyun unsigned char *v_addr;
1755*4882a593Smuzhiyun unsigned long b_addr;
1756*4882a593Smuzhiyun struct sk_buff *skb;
1757*4882a593Smuzhiyun volatile struct s_smt_fp_rxd *rxd;
1758*4882a593Smuzhiyun
1759*4882a593Smuzhiyun pr_debug("entering mac_drv_fill_rxd\n");
1760*4882a593Smuzhiyun
1761*4882a593Smuzhiyun // Walk through the list of free receive buffers, passing receive
1762*4882a593Smuzhiyun // buffers to the HWM as long as RXDs are available.
1763*4882a593Smuzhiyun
1764*4882a593Smuzhiyun MaxFrameSize = smc->os.MaxFrameSize;
1765*4882a593Smuzhiyun // Check if there is any RXD left.
1766*4882a593Smuzhiyun while (HWM_GET_RX_FREE(smc) > 0) {
1767*4882a593Smuzhiyun pr_debug(".\n");
1768*4882a593Smuzhiyun
1769*4882a593Smuzhiyun rxd = HWM_GET_CURR_RXD(smc);
1770*4882a593Smuzhiyun skb = alloc_skb(MaxFrameSize + 3, GFP_ATOMIC);
1771*4882a593Smuzhiyun if (skb) {
1772*4882a593Smuzhiyun // we got a skb
1773*4882a593Smuzhiyun skb_reserve(skb, 3);
1774*4882a593Smuzhiyun skb_put(skb, MaxFrameSize);
1775*4882a593Smuzhiyun v_addr = skb->data;
1776*4882a593Smuzhiyun b_addr = pci_map_single(&smc->os.pdev,
1777*4882a593Smuzhiyun v_addr,
1778*4882a593Smuzhiyun MaxFrameSize,
1779*4882a593Smuzhiyun PCI_DMA_FROMDEVICE);
1780*4882a593Smuzhiyun rxd->rxd_os.dma_addr = b_addr;
1781*4882a593Smuzhiyun } else {
1782*4882a593Smuzhiyun // no skb available, use local buffer
1783*4882a593Smuzhiyun // System has run out of buffer memory, but we want to
1784*4882a593Smuzhiyun // keep the receiver running in hope of better times.
1785*4882a593Smuzhiyun // Multiple descriptors may point to this local buffer,
1786*4882a593Smuzhiyun // so data in it must be considered invalid.
1787*4882a593Smuzhiyun pr_debug("Queueing invalid buffer!\n");
1788*4882a593Smuzhiyun v_addr = smc->os.LocalRxBuffer;
1789*4882a593Smuzhiyun b_addr = smc->os.LocalRxBufferDMA;
1790*4882a593Smuzhiyun }
1791*4882a593Smuzhiyun
1792*4882a593Smuzhiyun rxd->rxd_os.skb = skb;
1793*4882a593Smuzhiyun
1794*4882a593Smuzhiyun // Pass receive buffer to HWM.
1795*4882a593Smuzhiyun hwm_rx_frag(smc, v_addr, b_addr, MaxFrameSize,
1796*4882a593Smuzhiyun FIRST_FRAG | LAST_FRAG);
1797*4882a593Smuzhiyun }
1798*4882a593Smuzhiyun pr_debug("leaving mac_drv_fill_rxd\n");
1799*4882a593Smuzhiyun } // mac_drv_fill_rxd
1800*4882a593Smuzhiyun
1801*4882a593Smuzhiyun
1802*4882a593Smuzhiyun /************************
1803*4882a593Smuzhiyun *
1804*4882a593Smuzhiyun * mac_drv_clear_rxd
1805*4882a593Smuzhiyun *
1806*4882a593Smuzhiyun * The hardware module calls this function to release unused
1807*4882a593Smuzhiyun * receive buffers.
1808*4882a593Smuzhiyun * Args
1809*4882a593Smuzhiyun * smc - A pointer to the SMT context struct.
1810*4882a593Smuzhiyun *
1811*4882a593Smuzhiyun * rxd - A pointer to the first RxD which is used by the receive buffer.
1812*4882a593Smuzhiyun *
1813*4882a593Smuzhiyun * frag_count - Count of RxDs used by the receive buffer.
1814*4882a593Smuzhiyun * Out
1815*4882a593Smuzhiyun * Nothing.
1816*4882a593Smuzhiyun *
1817*4882a593Smuzhiyun ************************/
mac_drv_clear_rxd(struct s_smc * smc,volatile struct s_smt_fp_rxd * rxd,int frag_count)1818*4882a593Smuzhiyun void mac_drv_clear_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1819*4882a593Smuzhiyun int frag_count)
1820*4882a593Smuzhiyun {
1821*4882a593Smuzhiyun
1822*4882a593Smuzhiyun struct sk_buff *skb;
1823*4882a593Smuzhiyun
1824*4882a593Smuzhiyun pr_debug("entering mac_drv_clear_rxd\n");
1825*4882a593Smuzhiyun
1826*4882a593Smuzhiyun if (frag_count != 1) // This is not allowed to happen.
1827*4882a593Smuzhiyun
1828*4882a593Smuzhiyun printk("fddi: Multi-fragment clear!\n");
1829*4882a593Smuzhiyun
1830*4882a593Smuzhiyun for (; frag_count > 0; frag_count--) {
1831*4882a593Smuzhiyun skb = rxd->rxd_os.skb;
1832*4882a593Smuzhiyun if (skb != NULL) {
1833*4882a593Smuzhiyun skfddi_priv *bp = &smc->os;
1834*4882a593Smuzhiyun int MaxFrameSize = bp->MaxFrameSize;
1835*4882a593Smuzhiyun
1836*4882a593Smuzhiyun pci_unmap_single(&bp->pdev, rxd->rxd_os.dma_addr,
1837*4882a593Smuzhiyun MaxFrameSize, PCI_DMA_FROMDEVICE);
1838*4882a593Smuzhiyun
1839*4882a593Smuzhiyun dev_kfree_skb(skb);
1840*4882a593Smuzhiyun rxd->rxd_os.skb = NULL;
1841*4882a593Smuzhiyun }
1842*4882a593Smuzhiyun rxd = rxd->rxd_next; // Next RXD.
1843*4882a593Smuzhiyun
1844*4882a593Smuzhiyun }
1845*4882a593Smuzhiyun } // mac_drv_clear_rxd
1846*4882a593Smuzhiyun
1847*4882a593Smuzhiyun
1848*4882a593Smuzhiyun /************************
1849*4882a593Smuzhiyun *
1850*4882a593Smuzhiyun * mac_drv_rx_init
1851*4882a593Smuzhiyun *
1852*4882a593Smuzhiyun * The hardware module calls this routine when an SMT or NSA frame of the
1853*4882a593Smuzhiyun * local SMT should be delivered to the LLC layer.
1854*4882a593Smuzhiyun *
1855*4882a593Smuzhiyun * It is necessary to have this function, because there is no other way to
1856*4882a593Smuzhiyun * copy the contents of SMT MBufs into receive buffers.
1857*4882a593Smuzhiyun *
1858*4882a593Smuzhiyun * mac_drv_rx_init allocates the required target memory for this frame,
1859*4882a593Smuzhiyun * and receives the frame fragment by fragment by calling mac_drv_rx_frag.
1860*4882a593Smuzhiyun * Args
1861*4882a593Smuzhiyun * smc - A pointer to the SMT context struct.
1862*4882a593Smuzhiyun *
1863*4882a593Smuzhiyun * len - The length (in bytes) of the received frame (FC, DA, SA, Data).
1864*4882a593Smuzhiyun *
1865*4882a593Smuzhiyun * fc - The Frame Control field of the received frame.
1866*4882a593Smuzhiyun *
1867*4882a593Smuzhiyun * look_ahead - A pointer to the lookahead data buffer (may be NULL).
1868*4882a593Smuzhiyun *
1869*4882a593Smuzhiyun * la_len - The length of the lookahead data stored in the lookahead
1870*4882a593Smuzhiyun * buffer (may be zero).
1871*4882a593Smuzhiyun * Out
1872*4882a593Smuzhiyun * Always returns zero (0).
1873*4882a593Smuzhiyun *
1874*4882a593Smuzhiyun ************************/
mac_drv_rx_init(struct s_smc * smc,int len,int fc,char * look_ahead,int la_len)1875*4882a593Smuzhiyun int mac_drv_rx_init(struct s_smc *smc, int len, int fc,
1876*4882a593Smuzhiyun char *look_ahead, int la_len)
1877*4882a593Smuzhiyun {
1878*4882a593Smuzhiyun struct sk_buff *skb;
1879*4882a593Smuzhiyun
1880*4882a593Smuzhiyun pr_debug("entering mac_drv_rx_init(len=%d)\n", len);
1881*4882a593Smuzhiyun
1882*4882a593Smuzhiyun // "Received" a SMT or NSA frame of the local SMT.
1883*4882a593Smuzhiyun
1884*4882a593Smuzhiyun if (len != la_len || len < FDDI_MAC_HDR_LEN || !look_ahead) {
1885*4882a593Smuzhiyun pr_debug("fddi: Discard invalid local SMT frame\n");
1886*4882a593Smuzhiyun pr_debug(" len=%d, la_len=%d, (ULONG) look_ahead=%08lXh.\n",
1887*4882a593Smuzhiyun len, la_len, (unsigned long) look_ahead);
1888*4882a593Smuzhiyun return 0;
1889*4882a593Smuzhiyun }
1890*4882a593Smuzhiyun skb = alloc_skb(len + 3, GFP_ATOMIC);
1891*4882a593Smuzhiyun if (!skb) {
1892*4882a593Smuzhiyun pr_debug("fddi: Local SMT: skb memory exhausted.\n");
1893*4882a593Smuzhiyun return 0;
1894*4882a593Smuzhiyun }
1895*4882a593Smuzhiyun skb_reserve(skb, 3);
1896*4882a593Smuzhiyun skb_put(skb, len);
1897*4882a593Smuzhiyun skb_copy_to_linear_data(skb, look_ahead, len);
1898*4882a593Smuzhiyun
1899*4882a593Smuzhiyun // deliver frame to system
1900*4882a593Smuzhiyun skb->protocol = fddi_type_trans(skb, smc->os.dev);
1901*4882a593Smuzhiyun netif_rx(skb);
1902*4882a593Smuzhiyun
1903*4882a593Smuzhiyun return 0;
1904*4882a593Smuzhiyun } // mac_drv_rx_init
1905*4882a593Smuzhiyun
1906*4882a593Smuzhiyun
1907*4882a593Smuzhiyun /************************
1908*4882a593Smuzhiyun *
1909*4882a593Smuzhiyun * smt_timer_poll
1910*4882a593Smuzhiyun *
1911*4882a593Smuzhiyun * This routine is called periodically by the SMT module to clean up the
1912*4882a593Smuzhiyun * driver.
1913*4882a593Smuzhiyun *
1914*4882a593Smuzhiyun * Return any queued frames back to the upper protocol layers if the ring
1915*4882a593Smuzhiyun * is down.
1916*4882a593Smuzhiyun * Args
1917*4882a593Smuzhiyun * smc - A pointer to the SMT context struct.
1918*4882a593Smuzhiyun * Out
1919*4882a593Smuzhiyun * Nothing.
1920*4882a593Smuzhiyun *
1921*4882a593Smuzhiyun ************************/
smt_timer_poll(struct s_smc * smc)1922*4882a593Smuzhiyun void smt_timer_poll(struct s_smc *smc)
1923*4882a593Smuzhiyun {
1924*4882a593Smuzhiyun } // smt_timer_poll
1925*4882a593Smuzhiyun
1926*4882a593Smuzhiyun
1927*4882a593Smuzhiyun /************************
1928*4882a593Smuzhiyun *
1929*4882a593Smuzhiyun * ring_status_indication
1930*4882a593Smuzhiyun *
1931*4882a593Smuzhiyun * This function indicates a change of the ring state.
1932*4882a593Smuzhiyun * Args
1933*4882a593Smuzhiyun * smc - A pointer to the SMT context struct.
1934*4882a593Smuzhiyun *
1935*4882a593Smuzhiyun * status - The current ring status.
1936*4882a593Smuzhiyun * Out
1937*4882a593Smuzhiyun * Nothing.
1938*4882a593Smuzhiyun *
1939*4882a593Smuzhiyun ************************/
ring_status_indication(struct s_smc * smc,u_long status)1940*4882a593Smuzhiyun void ring_status_indication(struct s_smc *smc, u_long status)
1941*4882a593Smuzhiyun {
1942*4882a593Smuzhiyun pr_debug("ring_status_indication( ");
1943*4882a593Smuzhiyun if (status & RS_RES15)
1944*4882a593Smuzhiyun pr_debug("RS_RES15 ");
1945*4882a593Smuzhiyun if (status & RS_HARDERROR)
1946*4882a593Smuzhiyun pr_debug("RS_HARDERROR ");
1947*4882a593Smuzhiyun if (status & RS_SOFTERROR)
1948*4882a593Smuzhiyun pr_debug("RS_SOFTERROR ");
1949*4882a593Smuzhiyun if (status & RS_BEACON)
1950*4882a593Smuzhiyun pr_debug("RS_BEACON ");
1951*4882a593Smuzhiyun if (status & RS_PATHTEST)
1952*4882a593Smuzhiyun pr_debug("RS_PATHTEST ");
1953*4882a593Smuzhiyun if (status & RS_SELFTEST)
1954*4882a593Smuzhiyun pr_debug("RS_SELFTEST ");
1955*4882a593Smuzhiyun if (status & RS_RES9)
1956*4882a593Smuzhiyun pr_debug("RS_RES9 ");
1957*4882a593Smuzhiyun if (status & RS_DISCONNECT)
1958*4882a593Smuzhiyun pr_debug("RS_DISCONNECT ");
1959*4882a593Smuzhiyun if (status & RS_RES7)
1960*4882a593Smuzhiyun pr_debug("RS_RES7 ");
1961*4882a593Smuzhiyun if (status & RS_DUPADDR)
1962*4882a593Smuzhiyun pr_debug("RS_DUPADDR ");
1963*4882a593Smuzhiyun if (status & RS_NORINGOP)
1964*4882a593Smuzhiyun pr_debug("RS_NORINGOP ");
1965*4882a593Smuzhiyun if (status & RS_VERSION)
1966*4882a593Smuzhiyun pr_debug("RS_VERSION ");
1967*4882a593Smuzhiyun if (status & RS_STUCKBYPASSS)
1968*4882a593Smuzhiyun pr_debug("RS_STUCKBYPASSS ");
1969*4882a593Smuzhiyun if (status & RS_EVENT)
1970*4882a593Smuzhiyun pr_debug("RS_EVENT ");
1971*4882a593Smuzhiyun if (status & RS_RINGOPCHANGE)
1972*4882a593Smuzhiyun pr_debug("RS_RINGOPCHANGE ");
1973*4882a593Smuzhiyun if (status & RS_RES0)
1974*4882a593Smuzhiyun pr_debug("RS_RES0 ");
1975*4882a593Smuzhiyun pr_debug("]\n");
1976*4882a593Smuzhiyun } // ring_status_indication
1977*4882a593Smuzhiyun
1978*4882a593Smuzhiyun
1979*4882a593Smuzhiyun /************************
1980*4882a593Smuzhiyun *
1981*4882a593Smuzhiyun * smt_get_time
1982*4882a593Smuzhiyun *
1983*4882a593Smuzhiyun * Gets the current time from the system.
1984*4882a593Smuzhiyun * Args
1985*4882a593Smuzhiyun * None.
1986*4882a593Smuzhiyun * Out
1987*4882a593Smuzhiyun * The current time in TICKS_PER_SECOND.
1988*4882a593Smuzhiyun *
1989*4882a593Smuzhiyun * TICKS_PER_SECOND has the unit 'count of timer ticks per second'. It is
1990*4882a593Smuzhiyun * defined in "targetos.h". The definition of TICKS_PER_SECOND must comply
1991*4882a593Smuzhiyun * to the time returned by smt_get_time().
1992*4882a593Smuzhiyun *
1993*4882a593Smuzhiyun ************************/
smt_get_time(void)1994*4882a593Smuzhiyun unsigned long smt_get_time(void)
1995*4882a593Smuzhiyun {
1996*4882a593Smuzhiyun return jiffies;
1997*4882a593Smuzhiyun } // smt_get_time
1998*4882a593Smuzhiyun
1999*4882a593Smuzhiyun
2000*4882a593Smuzhiyun /************************
2001*4882a593Smuzhiyun *
2002*4882a593Smuzhiyun * smt_stat_counter
2003*4882a593Smuzhiyun *
2004*4882a593Smuzhiyun * Status counter update (ring_op, fifo full).
2005*4882a593Smuzhiyun * Args
2006*4882a593Smuzhiyun * smc - A pointer to the SMT context struct.
2007*4882a593Smuzhiyun *
2008*4882a593Smuzhiyun * stat - = 0: A ring operational change occurred.
2009*4882a593Smuzhiyun * = 1: The FORMAC FIFO buffer is full / FIFO overflow.
2010*4882a593Smuzhiyun * Out
2011*4882a593Smuzhiyun * Nothing.
2012*4882a593Smuzhiyun *
2013*4882a593Smuzhiyun ************************/
smt_stat_counter(struct s_smc * smc,int stat)2014*4882a593Smuzhiyun void smt_stat_counter(struct s_smc *smc, int stat)
2015*4882a593Smuzhiyun {
2016*4882a593Smuzhiyun // BOOLEAN RingIsUp ;
2017*4882a593Smuzhiyun
2018*4882a593Smuzhiyun pr_debug("smt_stat_counter\n");
2019*4882a593Smuzhiyun switch (stat) {
2020*4882a593Smuzhiyun case 0:
2021*4882a593Smuzhiyun pr_debug("Ring operational change.\n");
2022*4882a593Smuzhiyun break;
2023*4882a593Smuzhiyun case 1:
2024*4882a593Smuzhiyun pr_debug("Receive fifo overflow.\n");
2025*4882a593Smuzhiyun smc->os.MacStat.gen.rx_errors++;
2026*4882a593Smuzhiyun break;
2027*4882a593Smuzhiyun default:
2028*4882a593Smuzhiyun pr_debug("Unknown status (%d).\n", stat);
2029*4882a593Smuzhiyun break;
2030*4882a593Smuzhiyun }
2031*4882a593Smuzhiyun } // smt_stat_counter
2032*4882a593Smuzhiyun
2033*4882a593Smuzhiyun
2034*4882a593Smuzhiyun /************************
2035*4882a593Smuzhiyun *
2036*4882a593Smuzhiyun * cfm_state_change
2037*4882a593Smuzhiyun *
2038*4882a593Smuzhiyun * Sets CFM state in custom statistics.
2039*4882a593Smuzhiyun * Args
2040*4882a593Smuzhiyun * smc - A pointer to the SMT context struct.
2041*4882a593Smuzhiyun *
2042*4882a593Smuzhiyun * c_state - Possible values are:
2043*4882a593Smuzhiyun *
2044*4882a593Smuzhiyun * EC0_OUT, EC1_IN, EC2_TRACE, EC3_LEAVE, EC4_PATH_TEST,
2045*4882a593Smuzhiyun * EC5_INSERT, EC6_CHECK, EC7_DEINSERT
2046*4882a593Smuzhiyun * Out
2047*4882a593Smuzhiyun * Nothing.
2048*4882a593Smuzhiyun *
2049*4882a593Smuzhiyun ************************/
cfm_state_change(struct s_smc * smc,int c_state)2050*4882a593Smuzhiyun void cfm_state_change(struct s_smc *smc, int c_state)
2051*4882a593Smuzhiyun {
2052*4882a593Smuzhiyun #ifdef DRIVERDEBUG
2053*4882a593Smuzhiyun char *s;
2054*4882a593Smuzhiyun
2055*4882a593Smuzhiyun switch (c_state) {
2056*4882a593Smuzhiyun case SC0_ISOLATED:
2057*4882a593Smuzhiyun s = "SC0_ISOLATED";
2058*4882a593Smuzhiyun break;
2059*4882a593Smuzhiyun case SC1_WRAP_A:
2060*4882a593Smuzhiyun s = "SC1_WRAP_A";
2061*4882a593Smuzhiyun break;
2062*4882a593Smuzhiyun case SC2_WRAP_B:
2063*4882a593Smuzhiyun s = "SC2_WRAP_B";
2064*4882a593Smuzhiyun break;
2065*4882a593Smuzhiyun case SC4_THRU_A:
2066*4882a593Smuzhiyun s = "SC4_THRU_A";
2067*4882a593Smuzhiyun break;
2068*4882a593Smuzhiyun case SC5_THRU_B:
2069*4882a593Smuzhiyun s = "SC5_THRU_B";
2070*4882a593Smuzhiyun break;
2071*4882a593Smuzhiyun case SC7_WRAP_S:
2072*4882a593Smuzhiyun s = "SC7_WRAP_S";
2073*4882a593Smuzhiyun break;
2074*4882a593Smuzhiyun case SC9_C_WRAP_A:
2075*4882a593Smuzhiyun s = "SC9_C_WRAP_A";
2076*4882a593Smuzhiyun break;
2077*4882a593Smuzhiyun case SC10_C_WRAP_B:
2078*4882a593Smuzhiyun s = "SC10_C_WRAP_B";
2079*4882a593Smuzhiyun break;
2080*4882a593Smuzhiyun case SC11_C_WRAP_S:
2081*4882a593Smuzhiyun s = "SC11_C_WRAP_S";
2082*4882a593Smuzhiyun break;
2083*4882a593Smuzhiyun default:
2084*4882a593Smuzhiyun pr_debug("cfm_state_change: unknown %d\n", c_state);
2085*4882a593Smuzhiyun return;
2086*4882a593Smuzhiyun }
2087*4882a593Smuzhiyun pr_debug("cfm_state_change: %s\n", s);
2088*4882a593Smuzhiyun #endif // DRIVERDEBUG
2089*4882a593Smuzhiyun } // cfm_state_change
2090*4882a593Smuzhiyun
2091*4882a593Smuzhiyun
2092*4882a593Smuzhiyun /************************
2093*4882a593Smuzhiyun *
2094*4882a593Smuzhiyun * ecm_state_change
2095*4882a593Smuzhiyun *
2096*4882a593Smuzhiyun * Sets ECM state in custom statistics.
2097*4882a593Smuzhiyun * Args
2098*4882a593Smuzhiyun * smc - A pointer to the SMT context struct.
2099*4882a593Smuzhiyun *
2100*4882a593Smuzhiyun * e_state - Possible values are:
2101*4882a593Smuzhiyun *
2102*4882a593Smuzhiyun * SC0_ISOLATED, SC1_WRAP_A (5), SC2_WRAP_B (6), SC4_THRU_A (12),
2103*4882a593Smuzhiyun * SC5_THRU_B (7), SC7_WRAP_S (8)
2104*4882a593Smuzhiyun * Out
2105*4882a593Smuzhiyun * Nothing.
2106*4882a593Smuzhiyun *
2107*4882a593Smuzhiyun ************************/
ecm_state_change(struct s_smc * smc,int e_state)2108*4882a593Smuzhiyun void ecm_state_change(struct s_smc *smc, int e_state)
2109*4882a593Smuzhiyun {
2110*4882a593Smuzhiyun #ifdef DRIVERDEBUG
2111*4882a593Smuzhiyun char *s;
2112*4882a593Smuzhiyun
2113*4882a593Smuzhiyun switch (e_state) {
2114*4882a593Smuzhiyun case EC0_OUT:
2115*4882a593Smuzhiyun s = "EC0_OUT";
2116*4882a593Smuzhiyun break;
2117*4882a593Smuzhiyun case EC1_IN:
2118*4882a593Smuzhiyun s = "EC1_IN";
2119*4882a593Smuzhiyun break;
2120*4882a593Smuzhiyun case EC2_TRACE:
2121*4882a593Smuzhiyun s = "EC2_TRACE";
2122*4882a593Smuzhiyun break;
2123*4882a593Smuzhiyun case EC3_LEAVE:
2124*4882a593Smuzhiyun s = "EC3_LEAVE";
2125*4882a593Smuzhiyun break;
2126*4882a593Smuzhiyun case EC4_PATH_TEST:
2127*4882a593Smuzhiyun s = "EC4_PATH_TEST";
2128*4882a593Smuzhiyun break;
2129*4882a593Smuzhiyun case EC5_INSERT:
2130*4882a593Smuzhiyun s = "EC5_INSERT";
2131*4882a593Smuzhiyun break;
2132*4882a593Smuzhiyun case EC6_CHECK:
2133*4882a593Smuzhiyun s = "EC6_CHECK";
2134*4882a593Smuzhiyun break;
2135*4882a593Smuzhiyun case EC7_DEINSERT:
2136*4882a593Smuzhiyun s = "EC7_DEINSERT";
2137*4882a593Smuzhiyun break;
2138*4882a593Smuzhiyun default:
2139*4882a593Smuzhiyun s = "unknown";
2140*4882a593Smuzhiyun break;
2141*4882a593Smuzhiyun }
2142*4882a593Smuzhiyun pr_debug("ecm_state_change: %s\n", s);
2143*4882a593Smuzhiyun #endif //DRIVERDEBUG
2144*4882a593Smuzhiyun } // ecm_state_change
2145*4882a593Smuzhiyun
2146*4882a593Smuzhiyun
2147*4882a593Smuzhiyun /************************
2148*4882a593Smuzhiyun *
2149*4882a593Smuzhiyun * rmt_state_change
2150*4882a593Smuzhiyun *
2151*4882a593Smuzhiyun * Sets RMT state in custom statistics.
2152*4882a593Smuzhiyun * Args
2153*4882a593Smuzhiyun * smc - A pointer to the SMT context struct.
2154*4882a593Smuzhiyun *
2155*4882a593Smuzhiyun * r_state - Possible values are:
2156*4882a593Smuzhiyun *
2157*4882a593Smuzhiyun * RM0_ISOLATED, RM1_NON_OP, RM2_RING_OP, RM3_DETECT,
2158*4882a593Smuzhiyun * RM4_NON_OP_DUP, RM5_RING_OP_DUP, RM6_DIRECTED, RM7_TRACE
2159*4882a593Smuzhiyun * Out
2160*4882a593Smuzhiyun * Nothing.
2161*4882a593Smuzhiyun *
2162*4882a593Smuzhiyun ************************/
rmt_state_change(struct s_smc * smc,int r_state)2163*4882a593Smuzhiyun void rmt_state_change(struct s_smc *smc, int r_state)
2164*4882a593Smuzhiyun {
2165*4882a593Smuzhiyun #ifdef DRIVERDEBUG
2166*4882a593Smuzhiyun char *s;
2167*4882a593Smuzhiyun
2168*4882a593Smuzhiyun switch (r_state) {
2169*4882a593Smuzhiyun case RM0_ISOLATED:
2170*4882a593Smuzhiyun s = "RM0_ISOLATED";
2171*4882a593Smuzhiyun break;
2172*4882a593Smuzhiyun case RM1_NON_OP:
2173*4882a593Smuzhiyun s = "RM1_NON_OP - not operational";
2174*4882a593Smuzhiyun break;
2175*4882a593Smuzhiyun case RM2_RING_OP:
2176*4882a593Smuzhiyun s = "RM2_RING_OP - ring operational";
2177*4882a593Smuzhiyun break;
2178*4882a593Smuzhiyun case RM3_DETECT:
2179*4882a593Smuzhiyun s = "RM3_DETECT - detect dupl addresses";
2180*4882a593Smuzhiyun break;
2181*4882a593Smuzhiyun case RM4_NON_OP_DUP:
2182*4882a593Smuzhiyun s = "RM4_NON_OP_DUP - dupl. addr detected";
2183*4882a593Smuzhiyun break;
2184*4882a593Smuzhiyun case RM5_RING_OP_DUP:
2185*4882a593Smuzhiyun s = "RM5_RING_OP_DUP - ring oper. with dupl. addr";
2186*4882a593Smuzhiyun break;
2187*4882a593Smuzhiyun case RM6_DIRECTED:
2188*4882a593Smuzhiyun s = "RM6_DIRECTED - sending directed beacons";
2189*4882a593Smuzhiyun break;
2190*4882a593Smuzhiyun case RM7_TRACE:
2191*4882a593Smuzhiyun s = "RM7_TRACE - trace initiated";
2192*4882a593Smuzhiyun break;
2193*4882a593Smuzhiyun default:
2194*4882a593Smuzhiyun s = "unknown";
2195*4882a593Smuzhiyun break;
2196*4882a593Smuzhiyun }
2197*4882a593Smuzhiyun pr_debug("[rmt_state_change: %s]\n", s);
2198*4882a593Smuzhiyun #endif // DRIVERDEBUG
2199*4882a593Smuzhiyun } // rmt_state_change
2200*4882a593Smuzhiyun
2201*4882a593Smuzhiyun
2202*4882a593Smuzhiyun /************************
2203*4882a593Smuzhiyun *
2204*4882a593Smuzhiyun * drv_reset_indication
2205*4882a593Smuzhiyun *
2206*4882a593Smuzhiyun * This function is called by the SMT when it has detected a severe
2207*4882a593Smuzhiyun * hardware problem. The driver should perform a reset on the adapter
2208*4882a593Smuzhiyun * as soon as possible, but not from within this function.
2209*4882a593Smuzhiyun * Args
2210*4882a593Smuzhiyun * smc - A pointer to the SMT context struct.
2211*4882a593Smuzhiyun * Out
2212*4882a593Smuzhiyun * Nothing.
2213*4882a593Smuzhiyun *
2214*4882a593Smuzhiyun ************************/
drv_reset_indication(struct s_smc * smc)2215*4882a593Smuzhiyun void drv_reset_indication(struct s_smc *smc)
2216*4882a593Smuzhiyun {
2217*4882a593Smuzhiyun pr_debug("entering drv_reset_indication\n");
2218*4882a593Smuzhiyun
2219*4882a593Smuzhiyun smc->os.ResetRequested = TRUE; // Set flag.
2220*4882a593Smuzhiyun
2221*4882a593Smuzhiyun } // drv_reset_indication
2222*4882a593Smuzhiyun
2223*4882a593Smuzhiyun static struct pci_driver skfddi_pci_driver = {
2224*4882a593Smuzhiyun .name = "skfddi",
2225*4882a593Smuzhiyun .id_table = skfddi_pci_tbl,
2226*4882a593Smuzhiyun .probe = skfp_init_one,
2227*4882a593Smuzhiyun .remove = skfp_remove_one,
2228*4882a593Smuzhiyun };
2229*4882a593Smuzhiyun
2230*4882a593Smuzhiyun module_pci_driver(skfddi_pci_driver);
2231