Lines Matching +full:reg +full:- +full:spacing

1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2006 Intel Corporation. */
26 * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
27 * configurations. 8255x supports a 32-bit linear addressing
32 * Memory-mapped mode is used exclusively to access the device's
33 * shared-memory structure, the Control/Status Registers (CSR). All
39 * 8255x is highly MII-compliant and all access to the PHY go
41 * driver leverages the mii.c library shared with other MII-compliant
44 * Big- and Little-Endian byte order as well as 32- and 64-bit
45 * archs are supported. Weak-ordered memory and non-cache-coherent
51 * together in a fixed-size ring (CBL) thus forming the flexible mode
52 * memory structure. A TCB marked with the suspend-bit indicates
58 * Non-Tx commands (config, multicast setup, etc) are linked
60 * used for both Tx and non-Tx commands is the Command Block (CB).
79 * protocol headers are u32-aligned. Since the RFD is part of the
87 * packet as end-of-list (EL). After updating the link, we remove EL
89 * previous-to-end RFD.
93 * replacement RFDs cannot be allocated, or the RU goes non-active,
95 * and Rx indication and re-allocation happen in the same context,
96 * therefore no locking is required. A software-generated interrupt
98 * scenario where all Rx resources have been indicated and none re-
104 * supported, but driver will accommodate the extra 4-byte VLAN tag
115 * o several entry points race with dev->close
116 * o check for tx-no-resources/stop Q races with tx clean/wake Q
119 * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
120 * - Stratus87247: protect MDI control register manipulations
121 * 2009/06/01 - Andreas Mohr <andi at lisas dot de>
122 * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs
138 #include <linux/dma-mapping.h>
154 #define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
285 RU_UNINITIALIZED = -1,
388 * cb_command - Command Block flags
472 /* Important: keep total struct u32-aligned */
542 u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data);
610 (void)ioread8(&nic->csr->scb.status); in e100_write_flush()
617 spin_lock_irqsave(&nic->cmd_lock, flags); in e100_enable_irq()
618 iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi); in e100_enable_irq()
620 spin_unlock_irqrestore(&nic->cmd_lock, flags); in e100_enable_irq()
627 spin_lock_irqsave(&nic->cmd_lock, flags); in e100_disable_irq()
628 iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi); in e100_disable_irq()
630 spin_unlock_irqrestore(&nic->cmd_lock, flags); in e100_disable_irq()
637 iowrite32(selective_reset, &nic->csr->port); in e100_hw_reset()
641 iowrite32(software_reset, &nic->csr->port); in e100_hw_reset()
644 /* Mask off our interrupt line - it's unmasked after reset */ in e100_hw_reset()
650 u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest); in e100_self_test()
652 /* Passing the self-test is a pretty good indication in e100_self_test()
655 nic->mem->selftest.signature = 0; in e100_self_test()
656 nic->mem->selftest.result = 0xFFFFFFFF; in e100_self_test()
658 iowrite32(selftest | dma_addr, &nic->csr->port); in e100_self_test()
660 /* Wait 10 msec for self-test to complete */ in e100_self_test()
663 /* Interrupts are enabled after self-test */ in e100_self_test()
666 /* Check results of self-test */ in e100_self_test()
667 if (nic->mem->selftest.result != 0) { in e100_self_test()
668 netif_err(nic, hw, nic->netdev, in e100_self_test()
669 "Self-test failed: result=0x%08X\n", in e100_self_test()
670 nic->mem->selftest.result); in e100_self_test()
671 return -ETIMEDOUT; in e100_self_test()
673 if (nic->mem->selftest.signature == 0) { in e100_self_test()
674 netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n"); in e100_self_test()
675 return -ETIMEDOUT; in e100_self_test()
688 cmd_addr_data[0] = op_ewen << (addr_len - 2); in e100_eeprom_write()
691 cmd_addr_data[2] = op_ewds << (addr_len - 2); in e100_eeprom_write()
693 /* Bit-bang cmds to write word to eeprom */ in e100_eeprom_write()
697 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); in e100_eeprom_write()
700 for (i = 31; i >= 0; i--) { in e100_eeprom_write()
703 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); in e100_eeprom_write()
706 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); in e100_eeprom_write()
713 iowrite8(0, &nic->csr->eeprom_ctrl_lo); in e100_eeprom_write()
718 /* General technique stolen from the eepro100 driver - very clever */
729 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); in e100_eeprom_read()
732 /* Bit-bang to read word from eeprom */ in e100_eeprom_read()
733 for (i = 31; i >= 0; i--) { in e100_eeprom_read()
735 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); in e100_eeprom_read()
738 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); in e100_eeprom_read()
743 ctrl = ioread8(&nic->csr->eeprom_ctrl_lo); in e100_eeprom_read()
745 *addr_len -= (i - 16); in e100_eeprom_read()
753 iowrite8(0, &nic->csr->eeprom_ctrl_lo); in e100_eeprom_read()
764 /* Try reading with an 8-bit addr len to discover actual addr len */ in e100_eeprom_load()
766 nic->eeprom_wc = 1 << addr_len; in e100_eeprom_load()
768 for (addr = 0; addr < nic->eeprom_wc; addr++) { in e100_eeprom_load()
769 nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr); in e100_eeprom_load()
770 if (addr < nic->eeprom_wc - 1) in e100_eeprom_load()
771 checksum += le16_to_cpu(nic->eeprom[addr]); in e100_eeprom_load()
776 if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) { in e100_eeprom_load()
777 netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n"); in e100_eeprom_load()
779 return -EAGAIN; in e100_eeprom_load()
790 /* Try reading with an 8-bit addr len to discover actual addr len */ in e100_eeprom_save()
792 nic->eeprom_wc = 1 << addr_len; in e100_eeprom_save()
794 if (start + count >= nic->eeprom_wc) in e100_eeprom_save()
795 return -EINVAL; in e100_eeprom_save()
798 e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]); in e100_eeprom_save()
802 for (addr = 0; addr < nic->eeprom_wc - 1; addr++) in e100_eeprom_save()
803 checksum += le16_to_cpu(nic->eeprom[addr]); in e100_eeprom_save()
804 nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum); in e100_eeprom_save()
805 e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1, in e100_eeprom_save()
806 nic->eeprom[nic->eeprom_wc - 1]); in e100_eeprom_save()
819 spin_lock_irqsave(&nic->cmd_lock, flags); in e100_exec_cmd()
823 if (likely(!ioread8(&nic->csr->scb.cmd_lo))) in e100_exec_cmd()
830 err = -EAGAIN; in e100_exec_cmd()
835 iowrite32(dma_addr, &nic->csr->scb.gen_ptr); in e100_exec_cmd()
836 iowrite8(cmd, &nic->csr->scb.cmd_lo); in e100_exec_cmd()
839 spin_unlock_irqrestore(&nic->cmd_lock, flags); in e100_exec_cmd()
851 spin_lock_irqsave(&nic->cb_lock, flags); in e100_exec_cb()
853 if (unlikely(!nic->cbs_avail)) { in e100_exec_cb()
854 err = -ENOMEM; in e100_exec_cb()
858 cb = nic->cb_to_use; in e100_exec_cb()
859 nic->cb_to_use = cb->next; in e100_exec_cb()
860 nic->cbs_avail--; in e100_exec_cb()
861 cb->skb = skb; in e100_exec_cb()
867 if (unlikely(!nic->cbs_avail)) in e100_exec_cb()
868 err = -ENOSPC; in e100_exec_cb()
872 * set S-bit in current first, then clear S-bit in previous. */ in e100_exec_cb()
873 cb->command |= cpu_to_le16(cb_s); in e100_exec_cb()
875 cb->prev->command &= cpu_to_le16(~cb_s); in e100_exec_cb()
877 while (nic->cb_to_send != nic->cb_to_use) { in e100_exec_cb()
878 if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd, in e100_exec_cb()
879 nic->cb_to_send->dma_addr))) { in e100_exec_cb()
885 if (err == -ENOSPC) { in e100_exec_cb()
887 schedule_work(&nic->tx_timeout_task); in e100_exec_cb()
891 nic->cuc_cmd = cuc_resume; in e100_exec_cb()
892 nic->cb_to_send = nic->cb_to_send->next; in e100_exec_cb()
897 spin_unlock_irqrestore(&nic->cb_lock, flags); in e100_exec_cb()
902 static int mdio_read(struct net_device *netdev, int addr, int reg) in mdio_read() argument
905 return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0); in mdio_read()
908 static void mdio_write(struct net_device *netdev, int addr, int reg, int data) in mdio_write() argument
912 nic->mdio_ctrl(nic, addr, mdi_write, reg, data); in mdio_write()
915 /* the standard mdio_ctrl() function for usual MII-compliant hardware */
916 static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data) in mdio_ctrl_hw() argument
926 * manipulation of the MDI control registers is a multi-step in mdio_ctrl_hw()
929 spin_lock_irqsave(&nic->mdio_lock, flags); in mdio_ctrl_hw()
930 for (i = 100; i; --i) { in mdio_ctrl_hw()
931 if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready) in mdio_ctrl_hw()
936 netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n"); in mdio_ctrl_hw()
937 spin_unlock_irqrestore(&nic->mdio_lock, flags); in mdio_ctrl_hw()
940 iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl); in mdio_ctrl_hw()
944 if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready) in mdio_ctrl_hw()
947 spin_unlock_irqrestore(&nic->mdio_lock, flags); in mdio_ctrl_hw()
948 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, in mdio_ctrl_hw()
949 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n", in mdio_ctrl_hw()
951 addr, reg, data, data_out); in mdio_ctrl_hw()
959 u32 reg, in mdio_ctrl_phy_82552_v() argument
962 if ((reg == MII_BMCR) && (dir == mdi_write)) { in mdio_ctrl_phy_82552_v()
964 u16 advert = mdio_read(nic->netdev, nic->mii.phy_id, in mdio_ctrl_phy_82552_v()
977 return mdio_ctrl_hw(nic, addr, dir, reg, data); in mdio_ctrl_phy_82552_v()
980 /* Fully software-emulated mdio_ctrl() function for cards without
981 * MII-compliant PHYs.
989 u32 reg, in mdio_ctrl_phy_mii_emulated() argument
997 switch (reg) { in mdio_ctrl_phy_mii_emulated()
999 /* Auto-negotiation, right? */ in mdio_ctrl_phy_mii_emulated()
1011 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, in mdio_ctrl_phy_mii_emulated()
1012 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", in mdio_ctrl_phy_mii_emulated()
1014 addr, reg, data); in mdio_ctrl_phy_mii_emulated()
1018 switch (reg) { in mdio_ctrl_phy_mii_emulated()
1020 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, in mdio_ctrl_phy_mii_emulated()
1021 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", in mdio_ctrl_phy_mii_emulated()
1023 addr, reg, data); in mdio_ctrl_phy_mii_emulated()
1033 return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated); in e100_phy_supports_mii()
1042 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision; in e100_get_defaults()
1043 if (nic->mac == mac_unknown) in e100_get_defaults()
1044 nic->mac = mac_82557_D100_A; in e100_get_defaults()
1046 nic->params.rfds = rfds; in e100_get_defaults()
1047 nic->params.cbs = cbs; in e100_get_defaults()
1050 nic->tx_threshold = 0xE0; in e100_get_defaults()
1053 nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf | in e100_get_defaults()
1054 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i)); in e100_get_defaults()
1057 nic->blank_rfd.command = 0; in e100_get_defaults()
1058 nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF); in e100_get_defaults()
1059 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN); in e100_get_defaults()
1062 nic->mii.phy_id_mask = 0x1F; in e100_get_defaults()
1063 nic->mii.reg_num_mask = 0x1F; in e100_get_defaults()
1064 nic->mii.dev = nic->netdev; in e100_get_defaults()
1065 nic->mii.mdio_read = mdio_read; in e100_get_defaults()
1066 nic->mii.mdio_write = mdio_write; in e100_get_defaults()
1071 struct config *config = &cb->u.config; in e100_configure()
1073 struct net_device *netdev = nic->netdev; in e100_configure()
1075 cb->command = cpu_to_le16(cb_config); in e100_configure()
1079 config->byte_count = 0x16; /* bytes in this struct */ in e100_configure()
1080 config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */ in e100_configure()
1081 config->direct_rx_dma = 0x1; /* reserved */ in e100_configure()
1082 config->standard_tcb = 0x1; /* 1=standard, 0=extended */ in e100_configure()
1083 config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */ in e100_configure()
1084 config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */ in e100_configure()
1085 config->tx_underrun_retry = 0x3; /* # of underrun retries */ in e100_configure()
1087 config->mii_mode = 1; /* 1=MII mode, 0=i82503 mode */ in e100_configure()
1088 config->pad10 = 0x6; in e100_configure()
1089 config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */ in e100_configure()
1090 config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */ in e100_configure()
1091 config->ifs = 0x6; /* x16 = inter frame spacing */ in e100_configure()
1092 config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */ in e100_configure()
1093 config->pad15_1 = 0x1; in e100_configure()
1094 config->pad15_2 = 0x1; in e100_configure()
1095 config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */ in e100_configure()
1096 config->fc_delay_hi = 0x40; /* time delay for fc frame */ in e100_configure()
1097 config->tx_padding = 0x1; /* 1=pad short frames */ in e100_configure()
1098 config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */ in e100_configure()
1099 config->pad18 = 0x1; in e100_configure()
1100 config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */ in e100_configure()
1101 config->pad20_1 = 0x1F; in e100_configure()
1102 config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */ in e100_configure()
1103 config->pad21_1 = 0x5; in e100_configure()
1105 config->adaptive_ifs = nic->adaptive_ifs; in e100_configure()
1106 config->loopback = nic->loopback; in e100_configure()
1108 if (nic->mii.force_media && nic->mii.full_duplex) in e100_configure()
1109 config->full_duplex_force = 0x1; /* 1=force, 0=auto */ in e100_configure()
1111 if (nic->flags & promiscuous || nic->loopback) { in e100_configure()
1112 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ in e100_configure()
1113 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ in e100_configure()
1114 config->promiscuous_mode = 0x1; /* 1=on, 0=off */ in e100_configure()
1117 if (unlikely(netdev->features & NETIF_F_RXFCS)) in e100_configure()
1118 config->rx_crc_transfer = 0x1; /* 1=save, 0=discard */ in e100_configure()
1120 if (nic->flags & multicast_all) in e100_configure()
1121 config->multicast_all = 0x1; /* 1=accept, 0=no */ in e100_configure()
1124 if (netif_running(nic->netdev) || !(nic->flags & wol_magic)) in e100_configure()
1125 config->magic_packet_disable = 0x1; /* 1=off, 0=on */ in e100_configure()
1127 if (nic->mac >= mac_82558_D101_A4) { in e100_configure()
1128 config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */ in e100_configure()
1129 config->mwi_enable = 0x1; /* 1=enable, 0=disable */ in e100_configure()
1130 config->standard_tcb = 0x0; /* 1=standard, 0=extended */ in e100_configure()
1131 config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */ in e100_configure()
1132 if (nic->mac >= mac_82559_D101M) { in e100_configure()
1133 config->tno_intr = 0x1; /* TCO stats enable */ in e100_configure()
1135 if (nic->mac >= mac_82551_10) { in e100_configure()
1136 config->byte_count = 0x20; /* extended bytes */ in e100_configure()
1137 config->rx_d102_mode = 0x1; /* GMRC for TCO */ in e100_configure()
1140 config->standard_stat_counter = 0x0; in e100_configure()
1144 if (netdev->features & NETIF_F_RXALL) { in e100_configure()
1145 config->rx_save_overruns = 0x1; /* 1=save, 0=discard */ in e100_configure()
1146 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ in e100_configure()
1147 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ in e100_configure()
1150 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[00-07]=%8ph\n", in e100_configure()
1152 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[08-15]=%8ph\n", in e100_configure()
1154 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%8ph\n", in e100_configure()
1162 * All CPUSaver parameters are 16-bit literals that are part of a
1167 * INTDELAY - This loads the dead-man timer with its initial value.
1172 * the value should probably stay within the 0x200 - 0x1000.
1174 * BUNDLEMAX -
1184 * BUNDLESMALL -
1185 * This contains a bit-mask describing the minimum size frame that
1222 const struct firmware *fw = nic->fw; in e100_request_firmware()
1227 /* do not load u-code for ICH devices */ in e100_request_firmware()
1228 if (nic->flags & ich) in e100_request_firmware()
1236 * "fixes for bugs in the B-step hardware (specifically, bugs in e100_request_firmware()
1244 if (nic->mac == mac_82559_D101M) { in e100_request_firmware()
1246 } else if (nic->mac == mac_82559_D101S) { in e100_request_firmware()
1248 } else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) { in e100_request_firmware()
1261 err = request_firmware(&fw, fw_name, &nic->pdev->dev); in e100_request_firmware()
1265 netif_err(nic, probe, nic->netdev, in e100_request_firmware()
1270 netif_info(nic, probe, nic->netdev, in e100_request_firmware()
1279 if (fw->size != UCODE_SIZE * 4 + 3) { in e100_request_firmware()
1280 netif_err(nic, probe, nic->netdev, in e100_request_firmware()
1282 fw_name, fw->size); in e100_request_firmware()
1284 return ERR_PTR(-EINVAL); in e100_request_firmware()
1288 timer = fw->data[UCODE_SIZE * 4]; in e100_request_firmware()
1289 bundle = fw->data[UCODE_SIZE * 4 + 1]; in e100_request_firmware()
1290 min_size = fw->data[UCODE_SIZE * 4 + 2]; in e100_request_firmware()
1294 netif_err(nic, probe, nic->netdev, in e100_request_firmware()
1298 return ERR_PTR(-EINVAL); in e100_request_firmware()
1303 nic->fw = fw; in e100_request_firmware()
1315 cb->skb = NULL; in e100_setup_ucode()
1318 memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4); in e100_setup_ucode()
1321 timer = fw->data[UCODE_SIZE * 4]; in e100_setup_ucode()
1322 bundle = fw->data[UCODE_SIZE * 4 + 1]; in e100_setup_ucode()
1323 min_size = fw->data[UCODE_SIZE * 4 + 2]; in e100_setup_ucode()
1325 /* Insert user-tunable settings in cb->u.ucode */ in e100_setup_ucode()
1326 cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000); in e100_setup_ucode()
1327 cb->u.ucode[timer] |= cpu_to_le32(INTDELAY); in e100_setup_ucode()
1328 cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000); in e100_setup_ucode()
1329 cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX); in e100_setup_ucode()
1330 cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000); in e100_setup_ucode()
1331 cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80); in e100_setup_ucode()
1333 cb->command = cpu_to_le16(cb_ucode | cb_el); in e100_setup_ucode()
1341 struct cb *cb = nic->cb_to_clean; in e100_load_ucode_wait()
1349 netif_err(nic, probe, nic->netdev, in e100_load_ucode_wait()
1353 nic->cuc_cmd = cuc_start; in e100_load_ucode_wait()
1360 while (!(cb->status & cpu_to_le16(cb_complete))) { in e100_load_ucode_wait()
1362 if (!--counter) break; in e100_load_ucode_wait()
1366 iowrite8(~0, &nic->csr->scb.stat_ack); in e100_load_ucode_wait()
1369 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) { in e100_load_ucode_wait()
1370 netif_err(nic, probe, nic->netdev, "ucode load failed\n"); in e100_load_ucode_wait()
1371 err = -EPERM; in e100_load_ucode_wait()
1380 cb->command = cpu_to_le16(cb_iaaddr); in e100_setup_iaaddr()
1381 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN); in e100_setup_iaaddr()
1387 cb->command = cpu_to_le16(cb_dump); in e100_dump()
1388 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr + in e100_dump()
1398 phy_type = (le16_to_cpu(nic->eeprom[eeprom_phy_iface]) >> 8) & 0x0f; in e100_phy_check_without_mii()
1401 case NoSuchPhy: /* Non-MII PHY; UNTESTED! */ in e100_phy_check_without_mii()
1402 case I82503: /* Non-MII PHY; UNTESTED! */ in e100_phy_check_without_mii()
1403 case S80C24: /* Non-MII PHY; tested and working */ in e100_phy_check_without_mii()
1410 netif_info(nic, probe, nic->netdev, in e100_phy_check_without_mii()
1411 "found MII-less i82503 or 80c24 or other PHY\n"); in e100_phy_check_without_mii()
1413 nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated; in e100_phy_check_without_mii()
1414 nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */ in e100_phy_check_without_mii()
1416 /* these might be needed for certain MII-less cards... in e100_phy_check_without_mii()
1417 * nic->flags |= ich; in e100_phy_check_without_mii()
1418 * nic->flags |= ich_10h_workaround; */ in e100_phy_check_without_mii()
1436 struct net_device *netdev = nic->netdev; in e100_phy_init()
1442 nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr; in e100_phy_init()
1443 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR); in e100_phy_init()
1444 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); in e100_phy_init()
1445 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); in e100_phy_init()
1458 netif_err(nic, hw, nic->netdev, in e100_phy_init()
1460 return -EAGAIN; in e100_phy_init()
1463 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, in e100_phy_init()
1464 "phy_addr = %d\n", nic->mii.phy_id); in e100_phy_init()
1467 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1); in e100_phy_init()
1468 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2); in e100_phy_init()
1469 nic->phy = (u32)id_hi << 16 | (u32)id_lo; in e100_phy_init()
1470 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, in e100_phy_init()
1471 "phy ID = 0x%08X\n", nic->phy); in e100_phy_init()
1475 if (addr != nic->mii.phy_id) { in e100_phy_init()
1477 } else if (nic->phy != phy_82552_v) { in e100_phy_init()
1488 if (nic->phy == phy_82552_v) in e100_phy_init()
1489 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, in e100_phy_init()
1494 if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) { in e100_phy_init()
1496 cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG); in e100_phy_init()
1499 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong); in e100_phy_init()
1502 if (nic->phy == phy_82552_v) { in e100_phy_init()
1503 u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE); in e100_phy_init()
1506 nic->mdio_ctrl = mdio_ctrl_phy_82552_v; in e100_phy_init()
1508 /* Workaround Si not advertising flow-control during autoneg */ in e100_phy_init()
1510 mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert); in e100_phy_init()
1513 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR); in e100_phy_init()
1515 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr); in e100_phy_init()
1516 } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && in e100_phy_init()
1517 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) && in e100_phy_init()
1518 (le16_to_cpu(nic->eeprom[eeprom_cnfg_mdix]) & eeprom_mdix_enabled))) { in e100_phy_init()
1519 /* enable/disable MDI/MDI-X auto-switching. */ in e100_phy_init()
1520 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, in e100_phy_init()
1521 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH); in e100_phy_init()
1533 netif_err(nic, hw, nic->netdev, "e100_hw_init\n"); in e100_hw_init()
1550 nic->dma_addr + offsetof(struct mem, stats)))) in e100_hw_init()
1562 struct net_device *netdev = nic->netdev; in e100_multi()
1566 cb->command = cpu_to_le16(cb_multi); in e100_multi()
1567 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN); in e100_multi()
1572 memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr, in e100_multi()
1582 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, in e100_set_multicast_list()
1584 netdev_mc_count(netdev), netdev->flags); in e100_set_multicast_list()
1586 if (netdev->flags & IFF_PROMISC) in e100_set_multicast_list()
1587 nic->flags |= promiscuous; in e100_set_multicast_list()
1589 nic->flags &= ~promiscuous; in e100_set_multicast_list()
1591 if (netdev->flags & IFF_ALLMULTI || in e100_set_multicast_list()
1593 nic->flags |= multicast_all; in e100_set_multicast_list()
1595 nic->flags &= ~multicast_all; in e100_set_multicast_list()
1603 struct net_device *dev = nic->netdev; in e100_update_stats()
1604 struct net_device_stats *ns = &dev->stats; in e100_update_stats()
1605 struct stats *s = &nic->mem->stats; in e100_update_stats()
1606 __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause : in e100_update_stats()
1607 (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames : in e100_update_stats()
1608 &s->complete; in e100_update_stats()
1616 nic->tx_frames = le32_to_cpu(s->tx_good_frames); in e100_update_stats()
1617 nic->tx_collisions = le32_to_cpu(s->tx_total_collisions); in e100_update_stats()
1618 ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions); in e100_update_stats()
1619 ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions); in e100_update_stats()
1620 ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs); in e100_update_stats()
1621 ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns); in e100_update_stats()
1622 ns->collisions += nic->tx_collisions; in e100_update_stats()
1623 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) + in e100_update_stats()
1624 le32_to_cpu(s->tx_lost_crs); in e100_update_stats()
1625 nic->rx_short_frame_errors += in e100_update_stats()
1626 le32_to_cpu(s->rx_short_frame_errors); in e100_update_stats()
1627 ns->rx_length_errors = nic->rx_short_frame_errors + in e100_update_stats()
1628 nic->rx_over_length_errors; in e100_update_stats()
1629 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors); in e100_update_stats()
1630 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors); in e100_update_stats()
1631 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors); in e100_update_stats()
1632 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors); in e100_update_stats()
1633 ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors); in e100_update_stats()
1634 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) + in e100_update_stats()
1635 le32_to_cpu(s->rx_alignment_errors) + in e100_update_stats()
1636 le32_to_cpu(s->rx_short_frame_errors) + in e100_update_stats()
1637 le32_to_cpu(s->rx_cdt_errors); in e100_update_stats()
1638 nic->tx_deferred += le32_to_cpu(s->tx_deferred); in e100_update_stats()
1639 nic->tx_single_collisions += in e100_update_stats()
1640 le32_to_cpu(s->tx_single_collisions); in e100_update_stats()
1641 nic->tx_multiple_collisions += in e100_update_stats()
1642 le32_to_cpu(s->tx_multiple_collisions); in e100_update_stats()
1643 if (nic->mac >= mac_82558_D101_A4) { in e100_update_stats()
1644 nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause); in e100_update_stats()
1645 nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause); in e100_update_stats()
1646 nic->rx_fc_unsupported += in e100_update_stats()
1647 le32_to_cpu(s->fc_rcv_unsupported); in e100_update_stats()
1648 if (nic->mac >= mac_82559_D101M) { in e100_update_stats()
1649 nic->tx_tco_frames += in e100_update_stats()
1650 le16_to_cpu(s->xmt_tco_frames); in e100_update_stats()
1651 nic->rx_tco_frames += in e100_update_stats()
1652 le16_to_cpu(s->rcv_tco_frames); in e100_update_stats()
1659 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, in e100_update_stats()
1665 /* Adjust inter-frame-spacing (IFS) between two transmits if in e100_adjust_adaptive_ifs()
1666 * we're getting collisions on a half-duplex connection. */ in e100_adjust_adaptive_ifs()
1669 u32 prev = nic->adaptive_ifs; in e100_adjust_adaptive_ifs()
1672 if ((nic->tx_frames / 32 < nic->tx_collisions) && in e100_adjust_adaptive_ifs()
1673 (nic->tx_frames > min_frames)) { in e100_adjust_adaptive_ifs()
1674 if (nic->adaptive_ifs < 60) in e100_adjust_adaptive_ifs()
1675 nic->adaptive_ifs += 5; in e100_adjust_adaptive_ifs()
1676 } else if (nic->tx_frames < min_frames) { in e100_adjust_adaptive_ifs()
1677 if (nic->adaptive_ifs >= 5) in e100_adjust_adaptive_ifs()
1678 nic->adaptive_ifs -= 5; in e100_adjust_adaptive_ifs()
1680 if (nic->adaptive_ifs != prev) in e100_adjust_adaptive_ifs()
1691 netif_printk(nic, timer, KERN_DEBUG, nic->netdev, in e100_watchdog()
1696 mii_ethtool_gset(&nic->mii, &cmd); in e100_watchdog()
1699 if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) { in e100_watchdog()
1700 netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n", in e100_watchdog()
1703 } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) { in e100_watchdog()
1704 netdev_info(nic->netdev, "NIC Link is Down\n"); in e100_watchdog()
1707 mii_check_link(&nic->mii); in e100_watchdog()
1711 * Unfortunately have to use a spinlock to not re-enable interrupts in e100_watchdog()
1714 spin_lock_irq(&nic->cmd_lock); in e100_watchdog()
1715 iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi); in e100_watchdog()
1717 spin_unlock_irq(&nic->cmd_lock); in e100_watchdog()
1722 if (nic->mac <= mac_82557_D100_C) in e100_watchdog()
1724 e100_set_multicast_list(nic->netdev); in e100_watchdog()
1726 if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF) in e100_watchdog()
1728 nic->flags |= ich_10h_workaround; in e100_watchdog()
1730 nic->flags &= ~ich_10h_workaround; in e100_watchdog()
1732 mod_timer(&nic->watchdog, in e100_watchdog()
1740 cb->command = nic->tx_command; in e100_xmit_prepare()
1742 dma_addr = dma_map_single(&nic->pdev->dev, skb->data, skb->len, in e100_xmit_prepare()
1745 if (dma_mapping_error(&nic->pdev->dev, dma_addr)) in e100_xmit_prepare()
1746 return -ENOMEM; in e100_xmit_prepare()
1752 if (unlikely(skb->no_fcs)) in e100_xmit_prepare()
1753 cb->command |= cpu_to_le16(cb_tx_nc); in e100_xmit_prepare()
1755 cb->command &= ~cpu_to_le16(cb_tx_nc); in e100_xmit_prepare()
1758 if ((nic->cbs_avail & ~15) == nic->cbs_avail) in e100_xmit_prepare()
1759 cb->command |= cpu_to_le16(cb_i); in e100_xmit_prepare()
1760 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); in e100_xmit_prepare()
1761 cb->u.tcb.tcb_byte_count = 0; in e100_xmit_prepare()
1762 cb->u.tcb.threshold = nic->tx_threshold; in e100_xmit_prepare()
1763 cb->u.tcb.tbd_count = 1; in e100_xmit_prepare()
1764 cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr); in e100_xmit_prepare()
1765 cb->u.tcb.tbd.size = cpu_to_le16(skb->len); in e100_xmit_prepare()
1776 if (nic->flags & ich_10h_workaround) { in e100_xmit_frame()
1781 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, in e100_xmit_frame()
1789 case -ENOSPC: in e100_xmit_frame()
1791 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, in e100_xmit_frame()
1795 case -ENOMEM: in e100_xmit_frame()
1796 /* This is a hard error - log it. */ in e100_xmit_frame()
1797 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, in e100_xmit_frame()
1808 struct net_device *dev = nic->netdev; in e100_tx_clean()
1812 spin_lock(&nic->cb_lock); in e100_tx_clean()
1815 for (cb = nic->cb_to_clean; in e100_tx_clean()
1816 cb->status & cpu_to_le16(cb_complete); in e100_tx_clean()
1817 cb = nic->cb_to_clean = cb->next) { in e100_tx_clean()
1819 netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev, in e100_tx_clean()
1820 "cb[%d]->status = 0x%04X\n", in e100_tx_clean()
1821 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)), in e100_tx_clean()
1822 cb->status); in e100_tx_clean()
1824 if (likely(cb->skb != NULL)) { in e100_tx_clean()
1825 dev->stats.tx_packets++; in e100_tx_clean()
1826 dev->stats.tx_bytes += cb->skb->len; in e100_tx_clean()
1828 dma_unmap_single(&nic->pdev->dev, in e100_tx_clean()
1829 le32_to_cpu(cb->u.tcb.tbd.buf_addr), in e100_tx_clean()
1830 le16_to_cpu(cb->u.tcb.tbd.size), in e100_tx_clean()
1832 dev_kfree_skb_any(cb->skb); in e100_tx_clean()
1833 cb->skb = NULL; in e100_tx_clean()
1836 cb->status = 0; in e100_tx_clean()
1837 nic->cbs_avail++; in e100_tx_clean()
1840 spin_unlock(&nic->cb_lock); in e100_tx_clean()
1843 if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev))) in e100_tx_clean()
1844 netif_wake_queue(nic->netdev); in e100_tx_clean()
1851 if (nic->cbs) { in e100_clean_cbs()
1852 while (nic->cbs_avail != nic->params.cbs.count) { in e100_clean_cbs()
1853 struct cb *cb = nic->cb_to_clean; in e100_clean_cbs()
1854 if (cb->skb) { in e100_clean_cbs()
1855 dma_unmap_single(&nic->pdev->dev, in e100_clean_cbs()
1856 le32_to_cpu(cb->u.tcb.tbd.buf_addr), in e100_clean_cbs()
1857 le16_to_cpu(cb->u.tcb.tbd.size), in e100_clean_cbs()
1859 dev_kfree_skb(cb->skb); in e100_clean_cbs()
1861 nic->cb_to_clean = nic->cb_to_clean->next; in e100_clean_cbs()
1862 nic->cbs_avail++; in e100_clean_cbs()
1864 dma_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr); in e100_clean_cbs()
1865 nic->cbs = NULL; in e100_clean_cbs()
1866 nic->cbs_avail = 0; in e100_clean_cbs()
1868 nic->cuc_cmd = cuc_start; in e100_clean_cbs()
1869 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = in e100_clean_cbs()
1870 nic->cbs; in e100_clean_cbs()
1876 unsigned int i, count = nic->params.cbs.count; in e100_alloc_cbs()
1878 nic->cuc_cmd = cuc_start; in e100_alloc_cbs()
1879 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL; in e100_alloc_cbs()
1880 nic->cbs_avail = 0; in e100_alloc_cbs()
1882 nic->cbs = dma_pool_zalloc(nic->cbs_pool, GFP_KERNEL, in e100_alloc_cbs()
1883 &nic->cbs_dma_addr); in e100_alloc_cbs()
1884 if (!nic->cbs) in e100_alloc_cbs()
1885 return -ENOMEM; in e100_alloc_cbs()
1887 for (cb = nic->cbs, i = 0; i < count; cb++, i++) { in e100_alloc_cbs()
1888 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs; in e100_alloc_cbs()
1889 cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1; in e100_alloc_cbs()
1891 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb); in e100_alloc_cbs()
1892 cb->link = cpu_to_le32(nic->cbs_dma_addr + in e100_alloc_cbs()
1896 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs; in e100_alloc_cbs()
1897 nic->cbs_avail = count; in e100_alloc_cbs()
1904 if (!nic->rxs) return; in e100_start_receiver()
1905 if (RU_SUSPENDED != nic->ru_running) return; in e100_start_receiver()
1908 if (!rx) rx = nic->rxs; in e100_start_receiver()
1910 /* (Re)start RU if suspended or idle and RFA is non-NULL */ in e100_start_receiver()
1911 if (rx->skb) { in e100_start_receiver()
1912 e100_exec_cmd(nic, ruc_start, rx->dma_addr); in e100_start_receiver()
1913 nic->ru_running = RU_RUNNING; in e100_start_receiver()
1920 if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN))) in e100_rx_alloc_skb()
1921 return -ENOMEM; in e100_rx_alloc_skb()
1924 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd)); in e100_rx_alloc_skb()
1925 rx->dma_addr = dma_map_single(&nic->pdev->dev, rx->skb->data, in e100_rx_alloc_skb()
1928 if (dma_mapping_error(&nic->pdev->dev, rx->dma_addr)) { in e100_rx_alloc_skb()
1929 dev_kfree_skb_any(rx->skb); in e100_rx_alloc_skb()
1930 rx->skb = NULL; in e100_rx_alloc_skb()
1931 rx->dma_addr = 0; in e100_rx_alloc_skb()
1932 return -ENOMEM; in e100_rx_alloc_skb()
1938 if (rx->prev->skb) { in e100_rx_alloc_skb()
1939 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data; in e100_rx_alloc_skb()
1940 put_unaligned_le32(rx->dma_addr, &prev_rfd->link); in e100_rx_alloc_skb()
1941 dma_sync_single_for_device(&nic->pdev->dev, in e100_rx_alloc_skb()
1942 rx->prev->dma_addr, in e100_rx_alloc_skb()
1953 struct net_device *dev = nic->netdev; in e100_rx_indicate()
1954 struct sk_buff *skb = rx->skb; in e100_rx_indicate()
1955 struct rfd *rfd = (struct rfd *)skb->data; in e100_rx_indicate()
1960 return -EAGAIN; in e100_rx_indicate()
1963 dma_sync_single_for_cpu(&nic->pdev->dev, rx->dma_addr, in e100_rx_indicate()
1965 rfd_status = le16_to_cpu(rfd->status); in e100_rx_indicate()
1967 netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev, in e100_rx_indicate()
1976 * This allows for a fast restart without re-enabling in e100_rx_indicate()
1978 if ((le16_to_cpu(rfd->command) & cb_el) && in e100_rx_indicate()
1979 (RU_RUNNING == nic->ru_running)) in e100_rx_indicate()
1981 if (ioread8(&nic->csr->scb.status) & rus_no_res) in e100_rx_indicate()
1982 nic->ru_running = RU_SUSPENDED; in e100_rx_indicate()
1983 dma_sync_single_for_device(&nic->pdev->dev, rx->dma_addr, in e100_rx_indicate()
1986 return -ENODATA; in e100_rx_indicate()
1990 if (unlikely(dev->features & NETIF_F_RXFCS)) in e100_rx_indicate()
1992 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF; in e100_rx_indicate()
1993 if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd))) in e100_rx_indicate()
1994 actual_size = RFD_BUF_LEN - sizeof(struct rfd); in e100_rx_indicate()
1997 dma_unmap_single(&nic->pdev->dev, rx->dma_addr, RFD_BUF_LEN, in e100_rx_indicate()
2003 * This allows for a fast restart without re-enabling interrupts. in e100_rx_indicate()
2006 if ((le16_to_cpu(rfd->command) & cb_el) && in e100_rx_indicate()
2007 (RU_RUNNING == nic->ru_running)) { in e100_rx_indicate()
2009 if (ioread8(&nic->csr->scb.status) & rus_no_res) in e100_rx_indicate()
2010 nic->ru_running = RU_SUSPENDED; in e100_rx_indicate()
2016 skb->protocol = eth_type_trans(skb, nic->netdev); in e100_rx_indicate()
2021 if (unlikely(dev->features & NETIF_F_RXALL)) { in e100_rx_indicate()
2024 nic->rx_over_length_errors++; in e100_rx_indicate()
2033 nic->rx_over_length_errors++; in e100_rx_indicate()
2037 dev->stats.rx_packets++; in e100_rx_indicate()
2038 dev->stats.rx_bytes += (actual_size - fcs_pad); in e100_rx_indicate()
2044 rx->skb = NULL; in e100_rx_indicate()
2058 for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) { in e100_rx_clean()
2061 if (-EAGAIN == err || -ENODATA == err) in e100_rx_clean()
2072 if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running) in e100_rx_clean()
2075 old_before_last_rx = nic->rx_to_use->prev->prev; in e100_rx_clean()
2076 old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data; in e100_rx_clean()
2079 for (rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) { in e100_rx_clean()
2084 new_before_last_rx = nic->rx_to_use->prev->prev; in e100_rx_clean()
2086 /* Set the el-bit on the buffer that is before the last buffer. in e100_rx_clean()
2091 * When the hardware hits the before last buffer with el-bit in e100_rx_clean()
2096 (struct rfd *)new_before_last_rx->skb->data; in e100_rx_clean()
2097 new_before_last_rfd->size = 0; in e100_rx_clean()
2098 new_before_last_rfd->command |= cpu_to_le16(cb_el); in e100_rx_clean()
2099 dma_sync_single_for_device(&nic->pdev->dev, in e100_rx_clean()
2100 new_before_last_rx->dma_addr, in e100_rx_clean()
2107 old_before_last_rfd->command &= ~cpu_to_le16(cb_el); in e100_rx_clean()
2108 dma_sync_single_for_device(&nic->pdev->dev, in e100_rx_clean()
2109 old_before_last_rx->dma_addr, in e100_rx_clean()
2112 old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN in e100_rx_clean()
2114 dma_sync_single_for_device(&nic->pdev->dev, in e100_rx_clean()
2115 old_before_last_rx->dma_addr, in e100_rx_clean()
2122 iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack); in e100_rx_clean()
2123 e100_start_receiver(nic, nic->rx_to_clean); in e100_rx_clean()
2132 unsigned int i, count = nic->params.rfds.count; in e100_rx_clean_list()
2134 nic->ru_running = RU_UNINITIALIZED; in e100_rx_clean_list()
2136 if (nic->rxs) { in e100_rx_clean_list()
2137 for (rx = nic->rxs, i = 0; i < count; rx++, i++) { in e100_rx_clean_list()
2138 if (rx->skb) { in e100_rx_clean_list()
2139 dma_unmap_single(&nic->pdev->dev, in e100_rx_clean_list()
2140 rx->dma_addr, RFD_BUF_LEN, in e100_rx_clean_list()
2142 dev_kfree_skb(rx->skb); in e100_rx_clean_list()
2145 kfree(nic->rxs); in e100_rx_clean_list()
2146 nic->rxs = NULL; in e100_rx_clean_list()
2149 nic->rx_to_use = nic->rx_to_clean = NULL; in e100_rx_clean_list()
2155 unsigned int i, count = nic->params.rfds.count; in e100_rx_alloc_list()
2158 nic->rx_to_use = nic->rx_to_clean = NULL; in e100_rx_alloc_list()
2159 nic->ru_running = RU_UNINITIALIZED; in e100_rx_alloc_list()
2161 if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_KERNEL))) in e100_rx_alloc_list()
2162 return -ENOMEM; in e100_rx_alloc_list()
2164 for (rx = nic->rxs, i = 0; i < count; rx++, i++) { in e100_rx_alloc_list()
2165 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs; in e100_rx_alloc_list()
2166 rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1; in e100_rx_alloc_list()
2169 return -ENOMEM; in e100_rx_alloc_list()
2172 /* Set the el-bit on the buffer that is before the last buffer. in e100_rx_alloc_list()
2176 * When the hardware hits the before last buffer with el-bit and size in e100_rx_alloc_list()
2179 rx = nic->rxs->prev->prev; in e100_rx_alloc_list()
2180 before_last = (struct rfd *)rx->skb->data; in e100_rx_alloc_list()
2181 before_last->command |= cpu_to_le16(cb_el); in e100_rx_alloc_list()
2182 before_last->size = 0; in e100_rx_alloc_list()
2183 dma_sync_single_for_device(&nic->pdev->dev, rx->dma_addr, in e100_rx_alloc_list()
2186 nic->rx_to_use = nic->rx_to_clean = nic->rxs; in e100_rx_alloc_list()
2187 nic->ru_running = RU_SUSPENDED; in e100_rx_alloc_list()
2196 u8 stat_ack = ioread8(&nic->csr->scb.stat_ack); in e100_intr()
2198 netif_printk(nic, intr, KERN_DEBUG, nic->netdev, in e100_intr()
2206 iowrite8(stat_ack, &nic->csr->scb.stat_ack); in e100_intr()
2210 nic->ru_running = RU_SUSPENDED; in e100_intr()
2212 if (likely(napi_schedule_prep(&nic->napi))) { in e100_intr()
2214 __napi_schedule(&nic->napi); in e100_intr()
2232 /* only re-enable interrupt if stack agrees polling is really done */ in e100_poll()
2245 e100_intr(nic->pdev->irq, netdev); in e100_netpoll()
2256 if (!is_valid_ether_addr(addr->sa_data)) in e100_set_mac_address()
2257 return -EADDRNOTAVAIL; in e100_set_mac_address()
2259 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); in e100_set_mac_address()
2268 return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) && in e100_asf()
2269 (le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_asf) && in e100_asf()
2270 !(le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_gcl) && in e100_asf()
2271 ((le16_to_cpu(nic->eeprom[eeprom_smbus_addr]) & 0xFF) != 0xFE); in e100_asf()
2284 e100_set_multicast_list(nic->netdev); in e100_up()
2286 mod_timer(&nic->watchdog, jiffies); in e100_up()
2287 if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED, in e100_up()
2288 nic->netdev->name, nic->netdev))) in e100_up()
2290 netif_wake_queue(nic->netdev); in e100_up()
2291 napi_enable(&nic->napi); in e100_up()
2298 del_timer_sync(&nic->watchdog); in e100_up()
2309 napi_disable(&nic->napi); in e100_down()
2310 netif_stop_queue(nic->netdev); in e100_down()
2312 free_irq(nic->pdev->irq, nic->netdev); in e100_down()
2313 del_timer_sync(&nic->watchdog); in e100_down()
2314 netif_carrier_off(nic->netdev); in e100_down()
2325 schedule_work(&nic->tx_timeout_task); in e100_tx_timeout()
2331 struct net_device *netdev = nic->netdev; in e100_tx_timeout_task()
2333 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, in e100_tx_timeout_task()
2334 "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status)); in e100_tx_timeout_task()
2352 * packet compares byte-for-byte to the transmitted packet. */ in e100_loopback_test()
2360 if (nic->flags & ich && loopback_mode == lb_phy) in e100_loopback_test()
2363 nic->loopback = loopback_mode; in e100_loopback_test()
2368 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, in e100_loopback_test()
2373 if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) { in e100_loopback_test()
2374 err = -ENOMEM; in e100_loopback_test()
2378 memset(skb->data, 0xFF, ETH_DATA_LEN); in e100_loopback_test()
2379 e100_xmit_frame(skb, nic->netdev); in e100_loopback_test()
2383 dma_sync_single_for_cpu(&nic->pdev->dev, nic->rx_to_clean->dma_addr, in e100_loopback_test()
2386 if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd), in e100_loopback_test()
2387 skb->data, ETH_DATA_LEN)) in e100_loopback_test()
2388 err = -EAGAIN; in e100_loopback_test()
2391 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0); in e100_loopback_test()
2392 nic->loopback = lb_none; in e100_loopback_test()
2410 mii_ethtool_get_link_ksettings(&nic->mii, cmd); in e100_get_link_ksettings()
2421 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET); in e100_set_link_ksettings()
2422 err = mii_ethtool_set_link_ksettings(&nic->mii, cmd); in e100_set_link_ksettings()
2432 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); in e100_get_drvinfo()
2433 strlcpy(info->bus_info, pci_name(nic->pdev), in e100_get_drvinfo()
2434 sizeof(info->bus_info)); in e100_get_drvinfo()
2445 return (1 + E100_PHY_REGS) * sizeof(u32) + sizeof(nic->mem->dump_buf); in e100_get_regs_len()
2455 regs->version = (1 << 24) | nic->pdev->revision; in e100_get_regs()
2456 buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 | in e100_get_regs()
2457 ioread8(&nic->csr->scb.cmd_lo) << 16 | in e100_get_regs()
2458 ioread16(&nic->csr->scb.status); in e100_get_regs()
2464 buff[1 + i] = mdio_read(netdev, nic->mii.phy_id, in e100_get_regs()
2465 E100_PHY_REGS - 1 - i); in e100_get_regs()
2466 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf)); in e100_get_regs()
2469 memcpy(&buff[1 + E100_PHY_REGS], nic->mem->dump_buf, in e100_get_regs()
2470 sizeof(nic->mem->dump_buf)); in e100_get_regs()
2476 wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0; in e100_get_wol()
2477 wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0; in e100_get_wol()
2484 if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) || in e100_set_wol()
2485 !device_can_wakeup(&nic->pdev->dev)) in e100_set_wol()
2486 return -EOPNOTSUPP; in e100_set_wol()
2488 if (wol->wolopts) in e100_set_wol()
2489 nic->flags |= wol_magic; in e100_set_wol()
2491 nic->flags &= ~wol_magic; in e100_set_wol()
2493 device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts); in e100_set_wol()
2503 return nic->msg_enable; in e100_get_msglevel()
2509 nic->msg_enable = value; in e100_set_msglevel()
2515 return mii_nway_restart(&nic->mii); in e100_nway_reset()
2521 return mii_link_ok(&nic->mii); in e100_get_link()
2527 return nic->eeprom_wc << 1; in e100_get_eeprom_len()
2536 eeprom->magic = E100_EEPROM_MAGIC; in e100_get_eeprom()
2537 memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len); in e100_get_eeprom()
2547 if (eeprom->magic != E100_EEPROM_MAGIC) in e100_set_eeprom()
2548 return -EINVAL; in e100_set_eeprom()
2550 memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len); in e100_set_eeprom()
2552 return e100_eeprom_save(nic, eeprom->offset >> 1, in e100_set_eeprom()
2553 (eeprom->len >> 1) + 1); in e100_set_eeprom()
2560 struct param_range *rfds = &nic->params.rfds; in e100_get_ringparam()
2561 struct param_range *cbs = &nic->params.cbs; in e100_get_ringparam()
2563 ring->rx_max_pending = rfds->max; in e100_get_ringparam()
2564 ring->tx_max_pending = cbs->max; in e100_get_ringparam()
2565 ring->rx_pending = rfds->count; in e100_get_ringparam()
2566 ring->tx_pending = cbs->count; in e100_get_ringparam()
2573 struct param_range *rfds = &nic->params.rfds; in e100_set_ringparam()
2574 struct param_range *cbs = &nic->params.cbs; in e100_set_ringparam()
2576 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) in e100_set_ringparam()
2577 return -EINVAL; in e100_set_ringparam()
2581 rfds->count = max(ring->rx_pending, rfds->min); in e100_set_ringparam()
2582 rfds->count = min(rfds->count, rfds->max); in e100_set_ringparam()
2583 cbs->count = max(ring->tx_pending, cbs->min); in e100_set_ringparam()
2584 cbs->count = min(cbs->count, cbs->max); in e100_set_ringparam()
2585 netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n", in e100_set_ringparam()
2586 rfds->count, cbs->count); in e100_set_ringparam()
2610 data[0] = !mii_link_ok(&nic->mii); in e100_diag_test()
2612 if (test->flags & ETH_TEST_FL_OFFLINE) { in e100_diag_test()
2615 mii_ethtool_gset(&nic->mii, &cmd); in e100_diag_test()
2624 mii_ethtool_sset(&nic->mii, &cmd); in e100_diag_test()
2630 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0; in e100_diag_test()
2645 u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE : in e100_set_phys_id()
2654 leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON : in e100_set_phys_id()
2655 (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559; in e100_set_phys_id()
2659 leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off; in e100_set_phys_id()
2666 mdio_write(netdev, nic->mii.phy_id, led_reg, leds); in e100_set_phys_id()
2677 /* device-specific stats */
2694 return -EOPNOTSUPP; in e100_get_sset_count()
2705 data[i] = ((unsigned long *)&netdev->stats)[i]; in e100_get_ethtool_stats()
2707 data[i++] = nic->tx_deferred; in e100_get_ethtool_stats()
2708 data[i++] = nic->tx_single_collisions; in e100_get_ethtool_stats()
2709 data[i++] = nic->tx_multiple_collisions; in e100_get_ethtool_stats()
2710 data[i++] = nic->tx_fc_pause; in e100_get_ethtool_stats()
2711 data[i++] = nic->rx_fc_pause; in e100_get_ethtool_stats()
2712 data[i++] = nic->rx_fc_unsupported; in e100_get_ethtool_stats()
2713 data[i++] = nic->tx_tco_frames; in e100_get_ethtool_stats()
2714 data[i++] = nic->rx_tco_frames; in e100_get_ethtool_stats()
2715 data[i++] = nic->rx_short_frame_errors; in e100_get_ethtool_stats()
2716 data[i++] = nic->rx_over_length_errors; in e100_get_ethtool_stats()
2760 return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL); in e100_do_ioctl()
2765 nic->mem = dma_alloc_coherent(&nic->pdev->dev, sizeof(struct mem), in e100_alloc()
2766 &nic->dma_addr, GFP_KERNEL); in e100_alloc()
2767 return nic->mem ? 0 : -ENOMEM; in e100_alloc()
2772 if (nic->mem) { in e100_free()
2773 dma_free_coherent(&nic->pdev->dev, sizeof(struct mem), in e100_free()
2774 nic->mem, nic->dma_addr); in e100_free()
2775 nic->mem = NULL; in e100_free()
2786 netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n"); in e100_open()
2800 netdev_features_t changed = features ^ netdev->features; in e100_set_features()
2805 netdev->features = features; in e100_set_features()
2832 return -ENOMEM; in e100_probe()
2834 netdev->hw_features |= NETIF_F_RXFCS; in e100_probe()
2835 netdev->priv_flags |= IFF_SUPP_NOFCS; in e100_probe()
2836 netdev->hw_features |= NETIF_F_RXALL; in e100_probe()
2838 netdev->netdev_ops = &e100_netdev_ops; in e100_probe()
2839 netdev->ethtool_ops = &e100_ethtool_ops; in e100_probe()
2840 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD; in e100_probe()
2841 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); in e100_probe()
2844 netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT); in e100_probe()
2845 nic->netdev = netdev; in e100_probe()
2846 nic->pdev = pdev; in e100_probe()
2847 nic->msg_enable = (1 << debug) - 1; in e100_probe()
2848 nic->mdio_ctrl = mdio_ctrl_hw; in e100_probe()
2852 netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n"); in e100_probe()
2857 netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n"); in e100_probe()
2858 err = -ENODEV; in e100_probe()
2863 netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n"); in e100_probe()
2867 if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))) { in e100_probe()
2868 netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n"); in e100_probe()
2872 SET_NETDEV_DEV(netdev, &pdev->dev); in e100_probe()
2875 netif_info(nic, probe, nic->netdev, "using i/o access mode\n"); in e100_probe()
2877 nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr)); in e100_probe()
2878 if (!nic->csr) { in e100_probe()
2879 netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n"); in e100_probe()
2880 err = -ENOMEM; in e100_probe()
2884 if (ent->driver_data) in e100_probe()
2885 nic->flags |= ich; in e100_probe()
2887 nic->flags &= ~ich; in e100_probe()
2892 if (nic->mac < mac_82558_D101_A4) in e100_probe()
2893 netdev->features |= NETIF_F_VLAN_CHALLENGED; in e100_probe()
2896 spin_lock_init(&nic->cb_lock); in e100_probe()
2897 spin_lock_init(&nic->cmd_lock); in e100_probe()
2898 spin_lock_init(&nic->mdio_lock); in e100_probe()
2901 * funky state and has an interrupt pending - hint: we don't have the in e100_probe()
2907 timer_setup(&nic->watchdog, e100_watchdog, 0); in e100_probe()
2909 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task); in e100_probe()
2912 netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n"); in e100_probe()
2921 memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN); in e100_probe()
2922 if (!is_valid_ether_addr(netdev->dev_addr)) { in e100_probe()
2924 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n"); in e100_probe()
2925 err = -EAGAIN; in e100_probe()
2928 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n"); in e100_probe()
2933 if ((nic->mac >= mac_82558_D101_A4) && in e100_probe()
2934 (le16_to_cpu(nic->eeprom[eeprom_id]) & eeprom_id_wol)) { in e100_probe()
2935 nic->flags |= wol_magic; in e100_probe()
2936 device_set_wakeup_enable(&pdev->dev, true); in e100_probe()
2942 strcpy(netdev->name, "eth%d"); in e100_probe()
2944 netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n"); in e100_probe()
2947 nic->cbs_pool = dma_pool_create(netdev->name, in e100_probe()
2948 &nic->pdev->dev, in e100_probe()
2949 nic->params.cbs.max * sizeof(struct cb), in e100_probe()
2952 if (!nic->cbs_pool) { in e100_probe()
2953 netif_err(nic, probe, nic->netdev, "Cannot create DMA pool, aborting\n"); in e100_probe()
2954 err = -ENOMEM; in e100_probe()
2957 netif_info(nic, probe, nic->netdev, in e100_probe()
2960 pdev->irq, netdev->dev_addr); in e100_probe()
2969 pci_iounmap(pdev, nic->csr); in e100_probe()
2987 pci_iounmap(pdev, nic->csr); in e100_remove()
2988 dma_pool_destroy(nic->cbs_pool); in e100_remove()
2996 #define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */
2997 #define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */
3008 if ((nic->flags & wol_magic) | e100_asf(nic)) { in __e100_shutdown()
3009 /* enable reverse auto-negotiation */ in __e100_shutdown()
3010 if (nic->phy == phy_82552_v) { in __e100_shutdown()
3011 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, in __e100_shutdown()
3014 mdio_write(netdev, nic->mii.phy_id, in __e100_shutdown()
3059 /* disable reverse auto-negotiation */ in e100_resume()
3060 if (nic->phy == phy_82552_v) { in e100_resume()
3061 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, in e100_resume()
3064 mdio_write(netdev, nic->mii.phy_id, in e100_resume()
3085 /* ------------------ PCI Error Recovery infrastructure -------------- */
3087 * e100_io_error_detected - called when PCI error is detected.
3110 * e100_io_slot_reset - called after the pci bus has been reset.
3121 pr_err("Cannot re-enable PCI device after reset\n"); in e100_io_slot_reset()
3127 if (0 != PCI_FUNC(pdev->devfn)) in e100_io_slot_reset()
3136 * e100_io_resume - resume normal operations
3153 mod_timer(&nic->watchdog, jiffies); in e100_io_resume()
3180 if (((1 << debug) - 1) & NETIF_MSG_DRV) { in e100_init_module()