xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/intel/e1000/e1000.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /* Copyright(c) 1999 - 2006 Intel Corporation. */
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun /* Linux PRO/1000 Ethernet Driver main header file */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #ifndef _E1000_H_
7*4882a593Smuzhiyun #define _E1000_H_
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/stddef.h>
10*4882a593Smuzhiyun #include <linux/module.h>
11*4882a593Smuzhiyun #include <linux/types.h>
12*4882a593Smuzhiyun #include <asm/byteorder.h>
13*4882a593Smuzhiyun #include <linux/mm.h>
14*4882a593Smuzhiyun #include <linux/errno.h>
15*4882a593Smuzhiyun #include <linux/ioport.h>
16*4882a593Smuzhiyun #include <linux/pci.h>
17*4882a593Smuzhiyun #include <linux/kernel.h>
18*4882a593Smuzhiyun #include <linux/netdevice.h>
19*4882a593Smuzhiyun #include <linux/etherdevice.h>
20*4882a593Smuzhiyun #include <linux/skbuff.h>
21*4882a593Smuzhiyun #include <linux/delay.h>
22*4882a593Smuzhiyun #include <linux/timer.h>
23*4882a593Smuzhiyun #include <linux/slab.h>
24*4882a593Smuzhiyun #include <linux/vmalloc.h>
25*4882a593Smuzhiyun #include <linux/interrupt.h>
26*4882a593Smuzhiyun #include <linux/string.h>
27*4882a593Smuzhiyun #include <linux/pagemap.h>
28*4882a593Smuzhiyun #include <linux/dma-mapping.h>
29*4882a593Smuzhiyun #include <linux/bitops.h>
30*4882a593Smuzhiyun #include <asm/io.h>
31*4882a593Smuzhiyun #include <asm/irq.h>
32*4882a593Smuzhiyun #include <linux/capability.h>
33*4882a593Smuzhiyun #include <linux/in.h>
34*4882a593Smuzhiyun #include <linux/ip.h>
35*4882a593Smuzhiyun #include <linux/ipv6.h>
36*4882a593Smuzhiyun #include <linux/tcp.h>
37*4882a593Smuzhiyun #include <linux/udp.h>
38*4882a593Smuzhiyun #include <net/pkt_sched.h>
39*4882a593Smuzhiyun #include <linux/list.h>
40*4882a593Smuzhiyun #include <linux/reboot.h>
41*4882a593Smuzhiyun #include <net/checksum.h>
42*4882a593Smuzhiyun #include <linux/mii.h>
43*4882a593Smuzhiyun #include <linux/ethtool.h>
44*4882a593Smuzhiyun #include <linux/if_vlan.h>
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun #define BAR_0		0
47*4882a593Smuzhiyun #define BAR_1		1
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun #define INTEL_E1000_ETHERNET_DEVICE(device_id) {\
50*4882a593Smuzhiyun 	PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun struct e1000_adapter;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun #include "e1000_hw.h"
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun #define E1000_MAX_INTR			10
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun /*
59*4882a593Smuzhiyun  * Count for polling __E1000_RESET condition every 10-20msec.
60*4882a593Smuzhiyun  */
61*4882a593Smuzhiyun #define E1000_CHECK_RESET_COUNT	50
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun /* TX/RX descriptor defines */
64*4882a593Smuzhiyun #define E1000_DEFAULT_TXD		256
65*4882a593Smuzhiyun #define E1000_MAX_TXD			256
66*4882a593Smuzhiyun #define E1000_MIN_TXD			48
67*4882a593Smuzhiyun #define E1000_MAX_82544_TXD		4096
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun #define E1000_DEFAULT_RXD		256
70*4882a593Smuzhiyun #define E1000_MAX_RXD			256
71*4882a593Smuzhiyun #define E1000_MIN_RXD			48
72*4882a593Smuzhiyun #define E1000_MAX_82544_RXD		4096
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun #define E1000_MIN_ITR_USECS		10 /* 100000 irq/sec */
75*4882a593Smuzhiyun #define E1000_MAX_ITR_USECS		10000 /* 100    irq/sec */
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun /* this is the size past which hardware will drop packets when setting LPE=0 */
78*4882a593Smuzhiyun #define MAXIMUM_ETHERNET_VLAN_SIZE	1522
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun /* Supported Rx Buffer Sizes */
81*4882a593Smuzhiyun #define E1000_RXBUFFER_128		128    /* Used for packet split */
82*4882a593Smuzhiyun #define E1000_RXBUFFER_256		256    /* Used for packet split */
83*4882a593Smuzhiyun #define E1000_RXBUFFER_512		512
84*4882a593Smuzhiyun #define E1000_RXBUFFER_1024		1024
85*4882a593Smuzhiyun #define E1000_RXBUFFER_2048		2048
86*4882a593Smuzhiyun #define E1000_RXBUFFER_4096		4096
87*4882a593Smuzhiyun #define E1000_RXBUFFER_8192		8192
88*4882a593Smuzhiyun #define E1000_RXBUFFER_16384		16384
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun /* SmartSpeed delimiters */
91*4882a593Smuzhiyun #define E1000_SMARTSPEED_DOWNSHIFT	3
92*4882a593Smuzhiyun #define E1000_SMARTSPEED_MAX		15
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun /* Packet Buffer allocations */
95*4882a593Smuzhiyun #define E1000_PBA_BYTES_SHIFT		0xA
96*4882a593Smuzhiyun #define E1000_TX_HEAD_ADDR_SHIFT	7
97*4882a593Smuzhiyun #define E1000_PBA_TX_MASK		0xFFFF0000
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun /* Flow Control Watermarks */
100*4882a593Smuzhiyun #define E1000_FC_HIGH_DIFF	0x1638 /* High: 5688 bytes below Rx FIFO size */
101*4882a593Smuzhiyun #define E1000_FC_LOW_DIFF	0x1640 /* Low:  5696 bytes below Rx FIFO size */
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun #define E1000_FC_PAUSE_TIME	0xFFFF /* pause for the max or until send xon */
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun /* How many Tx Descriptors do we need to call netif_wake_queue ? */
106*4882a593Smuzhiyun #define E1000_TX_QUEUE_WAKE	16
107*4882a593Smuzhiyun /* How many Rx Buffers do we bundle into one write to the hardware ? */
108*4882a593Smuzhiyun #define E1000_RX_BUFFER_WRITE	16 /* Must be power of 2 */
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun #define AUTO_ALL_MODES		0
111*4882a593Smuzhiyun #define E1000_EEPROM_82544_APM	0x0004
112*4882a593Smuzhiyun #define E1000_EEPROM_APME	0x0400
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun #ifndef E1000_MASTER_SLAVE
115*4882a593Smuzhiyun /* Switch to override PHY master/slave setting */
116*4882a593Smuzhiyun #define E1000_MASTER_SLAVE	e1000_ms_hw_default
117*4882a593Smuzhiyun #endif
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun #define E1000_MNG_VLAN_NONE	(-1)
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun /* wrapper around a pointer to a socket buffer,
122*4882a593Smuzhiyun  * so a DMA handle can be stored along with the buffer
123*4882a593Smuzhiyun  */
124*4882a593Smuzhiyun struct e1000_tx_buffer {
125*4882a593Smuzhiyun 	struct sk_buff *skb;
126*4882a593Smuzhiyun 	dma_addr_t dma;
127*4882a593Smuzhiyun 	unsigned long time_stamp;
128*4882a593Smuzhiyun 	u16 length;
129*4882a593Smuzhiyun 	u16 next_to_watch;
130*4882a593Smuzhiyun 	bool mapped_as_page;
131*4882a593Smuzhiyun 	unsigned short segs;
132*4882a593Smuzhiyun 	unsigned int bytecount;
133*4882a593Smuzhiyun };
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun struct e1000_rx_buffer {
136*4882a593Smuzhiyun 	union {
137*4882a593Smuzhiyun 		struct page *page; /* jumbo: alloc_page */
138*4882a593Smuzhiyun 		u8 *data; /* else, netdev_alloc_frag */
139*4882a593Smuzhiyun 	} rxbuf;
140*4882a593Smuzhiyun 	dma_addr_t dma;
141*4882a593Smuzhiyun };
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun struct e1000_tx_ring {
144*4882a593Smuzhiyun 	/* pointer to the descriptor ring memory */
145*4882a593Smuzhiyun 	void *desc;
146*4882a593Smuzhiyun 	/* physical address of the descriptor ring */
147*4882a593Smuzhiyun 	dma_addr_t dma;
148*4882a593Smuzhiyun 	/* length of descriptor ring in bytes */
149*4882a593Smuzhiyun 	unsigned int size;
150*4882a593Smuzhiyun 	/* number of descriptors in the ring */
151*4882a593Smuzhiyun 	unsigned int count;
152*4882a593Smuzhiyun 	/* next descriptor to associate a buffer with */
153*4882a593Smuzhiyun 	unsigned int next_to_use;
154*4882a593Smuzhiyun 	/* next descriptor to check for DD status bit */
155*4882a593Smuzhiyun 	unsigned int next_to_clean;
156*4882a593Smuzhiyun 	/* array of buffer information structs */
157*4882a593Smuzhiyun 	struct e1000_tx_buffer *buffer_info;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	u16 tdh;
160*4882a593Smuzhiyun 	u16 tdt;
161*4882a593Smuzhiyun 	bool last_tx_tso;
162*4882a593Smuzhiyun };
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun struct e1000_rx_ring {
165*4882a593Smuzhiyun 	/* pointer to the descriptor ring memory */
166*4882a593Smuzhiyun 	void *desc;
167*4882a593Smuzhiyun 	/* physical address of the descriptor ring */
168*4882a593Smuzhiyun 	dma_addr_t dma;
169*4882a593Smuzhiyun 	/* length of descriptor ring in bytes */
170*4882a593Smuzhiyun 	unsigned int size;
171*4882a593Smuzhiyun 	/* number of descriptors in the ring */
172*4882a593Smuzhiyun 	unsigned int count;
173*4882a593Smuzhiyun 	/* next descriptor to associate a buffer with */
174*4882a593Smuzhiyun 	unsigned int next_to_use;
175*4882a593Smuzhiyun 	/* next descriptor to check for DD status bit */
176*4882a593Smuzhiyun 	unsigned int next_to_clean;
177*4882a593Smuzhiyun 	/* array of buffer information structs */
178*4882a593Smuzhiyun 	struct e1000_rx_buffer *buffer_info;
179*4882a593Smuzhiyun 	struct sk_buff *rx_skb_top;
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	/* cpu for rx queue */
182*4882a593Smuzhiyun 	int cpu;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	u16 rdh;
185*4882a593Smuzhiyun 	u16 rdt;
186*4882a593Smuzhiyun };
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun #define E1000_DESC_UNUSED(R)						\
189*4882a593Smuzhiyun ({									\
190*4882a593Smuzhiyun 	unsigned int clean = smp_load_acquire(&(R)->next_to_clean);	\
191*4882a593Smuzhiyun 	unsigned int use = READ_ONCE((R)->next_to_use);			\
192*4882a593Smuzhiyun 	(clean > use ? 0 : (R)->count) + clean - use - 1;		\
193*4882a593Smuzhiyun })
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun #define E1000_RX_DESC_EXT(R, i)						\
196*4882a593Smuzhiyun 	(&(((union e1000_rx_desc_extended *)((R).desc))[i]))
197*4882a593Smuzhiyun #define E1000_GET_DESC(R, i, type)	(&(((struct type *)((R).desc))[i]))
198*4882a593Smuzhiyun #define E1000_RX_DESC(R, i)		E1000_GET_DESC(R, i, e1000_rx_desc)
199*4882a593Smuzhiyun #define E1000_TX_DESC(R, i)		E1000_GET_DESC(R, i, e1000_tx_desc)
200*4882a593Smuzhiyun #define E1000_CONTEXT_DESC(R, i)	E1000_GET_DESC(R, i, e1000_context_desc)
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun /* board specific private data structure */
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun struct e1000_adapter {
205*4882a593Smuzhiyun 	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
206*4882a593Smuzhiyun 	u16 mng_vlan_id;
207*4882a593Smuzhiyun 	u32 bd_number;
208*4882a593Smuzhiyun 	u32 rx_buffer_len;
209*4882a593Smuzhiyun 	u32 wol;
210*4882a593Smuzhiyun 	u32 smartspeed;
211*4882a593Smuzhiyun 	u32 en_mng_pt;
212*4882a593Smuzhiyun 	u16 link_speed;
213*4882a593Smuzhiyun 	u16 link_duplex;
214*4882a593Smuzhiyun 	spinlock_t stats_lock;
215*4882a593Smuzhiyun 	unsigned int total_tx_bytes;
216*4882a593Smuzhiyun 	unsigned int total_tx_packets;
217*4882a593Smuzhiyun 	unsigned int total_rx_bytes;
218*4882a593Smuzhiyun 	unsigned int total_rx_packets;
219*4882a593Smuzhiyun 	/* Interrupt Throttle Rate */
220*4882a593Smuzhiyun 	u32 itr;
221*4882a593Smuzhiyun 	u32 itr_setting;
222*4882a593Smuzhiyun 	u16 tx_itr;
223*4882a593Smuzhiyun 	u16 rx_itr;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	u8 fc_autoneg;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	/* TX */
228*4882a593Smuzhiyun 	struct e1000_tx_ring *tx_ring;      /* One per active queue */
229*4882a593Smuzhiyun 	unsigned int restart_queue;
230*4882a593Smuzhiyun 	u32 txd_cmd;
231*4882a593Smuzhiyun 	u32 tx_int_delay;
232*4882a593Smuzhiyun 	u32 tx_abs_int_delay;
233*4882a593Smuzhiyun 	u32 gotcl;
234*4882a593Smuzhiyun 	u64 gotcl_old;
235*4882a593Smuzhiyun 	u64 tpt_old;
236*4882a593Smuzhiyun 	u64 colc_old;
237*4882a593Smuzhiyun 	u32 tx_timeout_count;
238*4882a593Smuzhiyun 	u32 tx_fifo_head;
239*4882a593Smuzhiyun 	u32 tx_head_addr;
240*4882a593Smuzhiyun 	u32 tx_fifo_size;
241*4882a593Smuzhiyun 	u8  tx_timeout_factor;
242*4882a593Smuzhiyun 	atomic_t tx_fifo_stall;
243*4882a593Smuzhiyun 	bool pcix_82544;
244*4882a593Smuzhiyun 	bool detect_tx_hung;
245*4882a593Smuzhiyun 	bool dump_buffers;
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	/* RX */
248*4882a593Smuzhiyun 	bool (*clean_rx)(struct e1000_adapter *adapter,
249*4882a593Smuzhiyun 			 struct e1000_rx_ring *rx_ring,
250*4882a593Smuzhiyun 			 int *work_done, int work_to_do);
251*4882a593Smuzhiyun 	void (*alloc_rx_buf)(struct e1000_adapter *adapter,
252*4882a593Smuzhiyun 			     struct e1000_rx_ring *rx_ring,
253*4882a593Smuzhiyun 			     int cleaned_count);
254*4882a593Smuzhiyun 	struct e1000_rx_ring *rx_ring;      /* One per active queue */
255*4882a593Smuzhiyun 	struct napi_struct napi;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	int num_tx_queues;
258*4882a593Smuzhiyun 	int num_rx_queues;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	u64 hw_csum_err;
261*4882a593Smuzhiyun 	u64 hw_csum_good;
262*4882a593Smuzhiyun 	u32 alloc_rx_buff_failed;
263*4882a593Smuzhiyun 	u32 rx_int_delay;
264*4882a593Smuzhiyun 	u32 rx_abs_int_delay;
265*4882a593Smuzhiyun 	bool rx_csum;
266*4882a593Smuzhiyun 	u32 gorcl;
267*4882a593Smuzhiyun 	u64 gorcl_old;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	/* OS defined structs */
270*4882a593Smuzhiyun 	struct net_device *netdev;
271*4882a593Smuzhiyun 	struct pci_dev *pdev;
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	/* structs defined in e1000_hw.h */
274*4882a593Smuzhiyun 	struct e1000_hw hw;
275*4882a593Smuzhiyun 	struct e1000_hw_stats stats;
276*4882a593Smuzhiyun 	struct e1000_phy_info phy_info;
277*4882a593Smuzhiyun 	struct e1000_phy_stats phy_stats;
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	u32 test_icr;
280*4882a593Smuzhiyun 	struct e1000_tx_ring test_tx_ring;
281*4882a593Smuzhiyun 	struct e1000_rx_ring test_rx_ring;
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	int msg_enable;
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	/* to not mess up cache alignment, always add to the bottom */
286*4882a593Smuzhiyun 	bool tso_force;
287*4882a593Smuzhiyun 	bool smart_power_down;	/* phy smart power down */
288*4882a593Smuzhiyun 	bool quad_port_a;
289*4882a593Smuzhiyun 	unsigned long flags;
290*4882a593Smuzhiyun 	u32 eeprom_wol;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	/* for ioport free */
293*4882a593Smuzhiyun 	int bars;
294*4882a593Smuzhiyun 	int need_ioport;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	bool discarding;
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	struct work_struct reset_task;
299*4882a593Smuzhiyun 	struct delayed_work watchdog_task;
300*4882a593Smuzhiyun 	struct delayed_work fifo_stall_task;
301*4882a593Smuzhiyun 	struct delayed_work phy_info_task;
302*4882a593Smuzhiyun };
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun enum e1000_state_t {
305*4882a593Smuzhiyun 	__E1000_TESTING,
306*4882a593Smuzhiyun 	__E1000_RESETTING,
307*4882a593Smuzhiyun 	__E1000_DOWN,
308*4882a593Smuzhiyun 	__E1000_DISABLED
309*4882a593Smuzhiyun };
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun #undef pr_fmt
312*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
315*4882a593Smuzhiyun #define e_dbg(format, arg...) \
316*4882a593Smuzhiyun 	netdev_dbg(e1000_get_hw_dev(hw), format, ## arg)
317*4882a593Smuzhiyun #define e_err(msglvl, format, arg...) \
318*4882a593Smuzhiyun 	netif_err(adapter, msglvl, adapter->netdev, format, ## arg)
319*4882a593Smuzhiyun #define e_info(msglvl, format, arg...) \
320*4882a593Smuzhiyun 	netif_info(adapter, msglvl, adapter->netdev, format, ## arg)
321*4882a593Smuzhiyun #define e_warn(msglvl, format, arg...) \
322*4882a593Smuzhiyun 	netif_warn(adapter, msglvl, adapter->netdev, format, ## arg)
323*4882a593Smuzhiyun #define e_notice(msglvl, format, arg...) \
324*4882a593Smuzhiyun 	netif_notice(adapter, msglvl, adapter->netdev, format, ## arg)
325*4882a593Smuzhiyun #define e_dev_info(format, arg...) \
326*4882a593Smuzhiyun 	dev_info(&adapter->pdev->dev, format, ## arg)
327*4882a593Smuzhiyun #define e_dev_warn(format, arg...) \
328*4882a593Smuzhiyun 	dev_warn(&adapter->pdev->dev, format, ## arg)
329*4882a593Smuzhiyun #define e_dev_err(format, arg...) \
330*4882a593Smuzhiyun 	dev_err(&adapter->pdev->dev, format, ## arg)
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun extern char e1000_driver_name[];
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun int e1000_open(struct net_device *netdev);
335*4882a593Smuzhiyun int e1000_close(struct net_device *netdev);
336*4882a593Smuzhiyun int e1000_up(struct e1000_adapter *adapter);
337*4882a593Smuzhiyun void e1000_down(struct e1000_adapter *adapter);
338*4882a593Smuzhiyun void e1000_reinit_locked(struct e1000_adapter *adapter);
339*4882a593Smuzhiyun void e1000_reset(struct e1000_adapter *adapter);
340*4882a593Smuzhiyun int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx);
341*4882a593Smuzhiyun int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
342*4882a593Smuzhiyun int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
343*4882a593Smuzhiyun void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
344*4882a593Smuzhiyun void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
345*4882a593Smuzhiyun void e1000_update_stats(struct e1000_adapter *adapter);
346*4882a593Smuzhiyun bool e1000_has_link(struct e1000_adapter *adapter);
347*4882a593Smuzhiyun void e1000_power_up_phy(struct e1000_adapter *);
348*4882a593Smuzhiyun void e1000_set_ethtool_ops(struct net_device *netdev);
349*4882a593Smuzhiyun void e1000_check_options(struct e1000_adapter *adapter);
350*4882a593Smuzhiyun char *e1000_get_hw_dev_name(struct e1000_hw *hw);
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun #endif /* _E1000_H_ */
353