xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright (c) 2020, Intel Corporation
3*4882a593Smuzhiyun  */
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/clk-provider.h>
6*4882a593Smuzhiyun #include <linux/pci.h>
7*4882a593Smuzhiyun #include <linux/dmi.h>
8*4882a593Smuzhiyun #include "dwmac-intel.h"
9*4882a593Smuzhiyun #include "dwmac4.h"
10*4882a593Smuzhiyun #include "stmmac.h"
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun struct intel_priv_data {
13*4882a593Smuzhiyun 	int mdio_adhoc_addr;	/* mdio address for serdes & etc */
14*4882a593Smuzhiyun };
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun /* This struct is used to associate PCI Function of MAC controller on a board,
17*4882a593Smuzhiyun  * discovered via DMI, with the address of PHY connected to the MAC. The
18*4882a593Smuzhiyun  * negative value of the address means that MAC controller is not connected
19*4882a593Smuzhiyun  * with PHY.
20*4882a593Smuzhiyun  */
21*4882a593Smuzhiyun struct stmmac_pci_func_data {
22*4882a593Smuzhiyun 	unsigned int func;
23*4882a593Smuzhiyun 	int phy_addr;
24*4882a593Smuzhiyun };
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun struct stmmac_pci_dmi_data {
27*4882a593Smuzhiyun 	const struct stmmac_pci_func_data *func;
28*4882a593Smuzhiyun 	size_t nfuncs;
29*4882a593Smuzhiyun };
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun struct stmmac_pci_info {
32*4882a593Smuzhiyun 	int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
33*4882a593Smuzhiyun };
34*4882a593Smuzhiyun 
stmmac_pci_find_phy_addr(struct pci_dev * pdev,const struct dmi_system_id * dmi_list)35*4882a593Smuzhiyun static int stmmac_pci_find_phy_addr(struct pci_dev *pdev,
36*4882a593Smuzhiyun 				    const struct dmi_system_id *dmi_list)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun 	const struct stmmac_pci_func_data *func_data;
39*4882a593Smuzhiyun 	const struct stmmac_pci_dmi_data *dmi_data;
40*4882a593Smuzhiyun 	const struct dmi_system_id *dmi_id;
41*4882a593Smuzhiyun 	int func = PCI_FUNC(pdev->devfn);
42*4882a593Smuzhiyun 	size_t n;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	dmi_id = dmi_first_match(dmi_list);
45*4882a593Smuzhiyun 	if (!dmi_id)
46*4882a593Smuzhiyun 		return -ENODEV;
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	dmi_data = dmi_id->driver_data;
49*4882a593Smuzhiyun 	func_data = dmi_data->func;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	for (n = 0; n < dmi_data->nfuncs; n++, func_data++)
52*4882a593Smuzhiyun 		if (func_data->func == func)
53*4882a593Smuzhiyun 			return func_data->phy_addr;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	return -ENODEV;
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun 
serdes_status_poll(struct stmmac_priv * priv,int phyaddr,int phyreg,u32 mask,u32 val)58*4882a593Smuzhiyun static int serdes_status_poll(struct stmmac_priv *priv, int phyaddr,
59*4882a593Smuzhiyun 			      int phyreg, u32 mask, u32 val)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun 	unsigned int retries = 10;
62*4882a593Smuzhiyun 	int val_rd;
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	do {
65*4882a593Smuzhiyun 		val_rd = mdiobus_read(priv->mii, phyaddr, phyreg);
66*4882a593Smuzhiyun 		if ((val_rd & mask) == (val & mask))
67*4882a593Smuzhiyun 			return 0;
68*4882a593Smuzhiyun 		udelay(POLL_DELAY_US);
69*4882a593Smuzhiyun 	} while (--retries);
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	return -ETIMEDOUT;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun 
intel_serdes_powerup(struct net_device * ndev,void * priv_data)74*4882a593Smuzhiyun static int intel_serdes_powerup(struct net_device *ndev, void *priv_data)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun 	struct intel_priv_data *intel_priv = priv_data;
77*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(ndev);
78*4882a593Smuzhiyun 	int serdes_phy_addr = 0;
79*4882a593Smuzhiyun 	u32 data = 0;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	if (!intel_priv->mdio_adhoc_addr)
82*4882a593Smuzhiyun 		return 0;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	serdes_phy_addr = intel_priv->mdio_adhoc_addr;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	/* assert clk_req */
87*4882a593Smuzhiyun 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
88*4882a593Smuzhiyun 	data |= SERDES_PLL_CLK;
89*4882a593Smuzhiyun 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	/* check for clk_ack assertion */
92*4882a593Smuzhiyun 	data = serdes_status_poll(priv, serdes_phy_addr,
93*4882a593Smuzhiyun 				  SERDES_GSR0,
94*4882a593Smuzhiyun 				  SERDES_PLL_CLK,
95*4882a593Smuzhiyun 				  SERDES_PLL_CLK);
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	if (data) {
98*4882a593Smuzhiyun 		dev_err(priv->device, "Serdes PLL clk request timeout\n");
99*4882a593Smuzhiyun 		return data;
100*4882a593Smuzhiyun 	}
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	/* assert lane reset */
103*4882a593Smuzhiyun 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
104*4882a593Smuzhiyun 	data |= SERDES_RST;
105*4882a593Smuzhiyun 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	/* check for assert lane reset reflection */
108*4882a593Smuzhiyun 	data = serdes_status_poll(priv, serdes_phy_addr,
109*4882a593Smuzhiyun 				  SERDES_GSR0,
110*4882a593Smuzhiyun 				  SERDES_RST,
111*4882a593Smuzhiyun 				  SERDES_RST);
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	if (data) {
114*4882a593Smuzhiyun 		dev_err(priv->device, "Serdes assert lane reset timeout\n");
115*4882a593Smuzhiyun 		return data;
116*4882a593Smuzhiyun 	}
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	/*  move power state to P0 */
119*4882a593Smuzhiyun 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	data &= ~SERDES_PWR_ST_MASK;
122*4882a593Smuzhiyun 	data |= SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT;
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	/* Check for P0 state */
127*4882a593Smuzhiyun 	data = serdes_status_poll(priv, serdes_phy_addr,
128*4882a593Smuzhiyun 				  SERDES_GSR0,
129*4882a593Smuzhiyun 				  SERDES_PWR_ST_MASK,
130*4882a593Smuzhiyun 				  SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT);
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	if (data) {
133*4882a593Smuzhiyun 		dev_err(priv->device, "Serdes power state P0 timeout.\n");
134*4882a593Smuzhiyun 		return data;
135*4882a593Smuzhiyun 	}
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	return 0;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun 
intel_serdes_powerdown(struct net_device * ndev,void * intel_data)140*4882a593Smuzhiyun static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun 	struct intel_priv_data *intel_priv = intel_data;
143*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(ndev);
144*4882a593Smuzhiyun 	int serdes_phy_addr = 0;
145*4882a593Smuzhiyun 	u32 data = 0;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	if (!intel_priv->mdio_adhoc_addr)
148*4882a593Smuzhiyun 		return;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	serdes_phy_addr = intel_priv->mdio_adhoc_addr;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	/*  move power state to P3 */
153*4882a593Smuzhiyun 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	data &= ~SERDES_PWR_ST_MASK;
156*4882a593Smuzhiyun 	data |= SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	/* Check for P3 state */
161*4882a593Smuzhiyun 	data = serdes_status_poll(priv, serdes_phy_addr,
162*4882a593Smuzhiyun 				  SERDES_GSR0,
163*4882a593Smuzhiyun 				  SERDES_PWR_ST_MASK,
164*4882a593Smuzhiyun 				  SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT);
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	if (data) {
167*4882a593Smuzhiyun 		dev_err(priv->device, "Serdes power state P3 timeout\n");
168*4882a593Smuzhiyun 		return;
169*4882a593Smuzhiyun 	}
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	/* de-assert clk_req */
172*4882a593Smuzhiyun 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
173*4882a593Smuzhiyun 	data &= ~SERDES_PLL_CLK;
174*4882a593Smuzhiyun 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	/* check for clk_ack de-assert */
177*4882a593Smuzhiyun 	data = serdes_status_poll(priv, serdes_phy_addr,
178*4882a593Smuzhiyun 				  SERDES_GSR0,
179*4882a593Smuzhiyun 				  SERDES_PLL_CLK,
180*4882a593Smuzhiyun 				  (u32)~SERDES_PLL_CLK);
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	if (data) {
183*4882a593Smuzhiyun 		dev_err(priv->device, "Serdes PLL clk de-assert timeout\n");
184*4882a593Smuzhiyun 		return;
185*4882a593Smuzhiyun 	}
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	/* de-assert lane reset */
188*4882a593Smuzhiyun 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
189*4882a593Smuzhiyun 	data &= ~SERDES_RST;
190*4882a593Smuzhiyun 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	/* check for de-assert lane reset reflection */
193*4882a593Smuzhiyun 	data = serdes_status_poll(priv, serdes_phy_addr,
194*4882a593Smuzhiyun 				  SERDES_GSR0,
195*4882a593Smuzhiyun 				  SERDES_RST,
196*4882a593Smuzhiyun 				  (u32)~SERDES_RST);
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	if (data) {
199*4882a593Smuzhiyun 		dev_err(priv->device, "Serdes de-assert lane reset timeout\n");
200*4882a593Smuzhiyun 		return;
201*4882a593Smuzhiyun 	}
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun 
common_default_data(struct plat_stmmacenet_data * plat)204*4882a593Smuzhiyun static void common_default_data(struct plat_stmmacenet_data *plat)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun 	plat->clk_csr = 2;	/* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
207*4882a593Smuzhiyun 	plat->has_gmac = 1;
208*4882a593Smuzhiyun 	plat->force_sf_dma_mode = 1;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	plat->mdio_bus_data->needs_reset = true;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	/* Set default value for multicast hash bins */
213*4882a593Smuzhiyun 	plat->multicast_filter_bins = HASH_TABLE_SIZE;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	/* Set default value for unicast filter entries */
216*4882a593Smuzhiyun 	plat->unicast_filter_entries = 1;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	/* Set the maxmtu to a default of JUMBO_LEN */
219*4882a593Smuzhiyun 	plat->maxmtu = JUMBO_LEN;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	/* Set default number of RX and TX queues to use */
222*4882a593Smuzhiyun 	plat->tx_queues_to_use = 1;
223*4882a593Smuzhiyun 	plat->rx_queues_to_use = 1;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	/* Disable Priority config by default */
226*4882a593Smuzhiyun 	plat->tx_queues_cfg[0].use_prio = false;
227*4882a593Smuzhiyun 	plat->rx_queues_cfg[0].use_prio = false;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	/* Disable RX queues routing by default */
230*4882a593Smuzhiyun 	plat->rx_queues_cfg[0].pkt_route = 0x0;
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun 
intel_mgbe_common_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)233*4882a593Smuzhiyun static int intel_mgbe_common_data(struct pci_dev *pdev,
234*4882a593Smuzhiyun 				  struct plat_stmmacenet_data *plat)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun 	char clk_name[20];
237*4882a593Smuzhiyun 	int ret;
238*4882a593Smuzhiyun 	int i;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	plat->phy_addr = -1;
241*4882a593Smuzhiyun 	plat->clk_csr = 5;
242*4882a593Smuzhiyun 	plat->has_gmac = 0;
243*4882a593Smuzhiyun 	plat->has_gmac4 = 1;
244*4882a593Smuzhiyun 	plat->force_sf_dma_mode = 0;
245*4882a593Smuzhiyun 	plat->tso_en = 1;
246*4882a593Smuzhiyun 	plat->sph_disable = 1;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	for (i = 0; i < plat->rx_queues_to_use; i++) {
251*4882a593Smuzhiyun 		plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
252*4882a593Smuzhiyun 		plat->rx_queues_cfg[i].chan = i;
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 		/* Disable Priority config by default */
255*4882a593Smuzhiyun 		plat->rx_queues_cfg[i].use_prio = false;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 		/* Disable RX queues routing by default */
258*4882a593Smuzhiyun 		plat->rx_queues_cfg[i].pkt_route = 0x0;
259*4882a593Smuzhiyun 	}
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	for (i = 0; i < plat->tx_queues_to_use; i++) {
262*4882a593Smuzhiyun 		plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 		/* Disable Priority config by default */
265*4882a593Smuzhiyun 		plat->tx_queues_cfg[i].use_prio = false;
266*4882a593Smuzhiyun 	}
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	/* FIFO size is 4096 bytes for 1 tx/rx queue */
269*4882a593Smuzhiyun 	plat->tx_fifo_size = plat->tx_queues_to_use * 4096;
270*4882a593Smuzhiyun 	plat->rx_fifo_size = plat->rx_queues_to_use * 4096;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
273*4882a593Smuzhiyun 	plat->tx_queues_cfg[0].weight = 0x09;
274*4882a593Smuzhiyun 	plat->tx_queues_cfg[1].weight = 0x0A;
275*4882a593Smuzhiyun 	plat->tx_queues_cfg[2].weight = 0x0B;
276*4882a593Smuzhiyun 	plat->tx_queues_cfg[3].weight = 0x0C;
277*4882a593Smuzhiyun 	plat->tx_queues_cfg[4].weight = 0x0D;
278*4882a593Smuzhiyun 	plat->tx_queues_cfg[5].weight = 0x0E;
279*4882a593Smuzhiyun 	plat->tx_queues_cfg[6].weight = 0x0F;
280*4882a593Smuzhiyun 	plat->tx_queues_cfg[7].weight = 0x10;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	plat->dma_cfg->pbl = 32;
283*4882a593Smuzhiyun 	plat->dma_cfg->pblx8 = true;
284*4882a593Smuzhiyun 	plat->dma_cfg->fixed_burst = 0;
285*4882a593Smuzhiyun 	plat->dma_cfg->mixed_burst = 0;
286*4882a593Smuzhiyun 	plat->dma_cfg->aal = 0;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi),
289*4882a593Smuzhiyun 				 GFP_KERNEL);
290*4882a593Smuzhiyun 	if (!plat->axi)
291*4882a593Smuzhiyun 		return -ENOMEM;
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	plat->axi->axi_lpi_en = 0;
294*4882a593Smuzhiyun 	plat->axi->axi_xit_frm = 0;
295*4882a593Smuzhiyun 	plat->axi->axi_wr_osr_lmt = 1;
296*4882a593Smuzhiyun 	plat->axi->axi_rd_osr_lmt = 1;
297*4882a593Smuzhiyun 	plat->axi->axi_blen[0] = 4;
298*4882a593Smuzhiyun 	plat->axi->axi_blen[1] = 8;
299*4882a593Smuzhiyun 	plat->axi->axi_blen[2] = 16;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	plat->ptp_max_adj = plat->clk_ptp_rate;
302*4882a593Smuzhiyun 	plat->eee_usecs_rate = plat->clk_ptp_rate;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	/* Set system clock */
305*4882a593Smuzhiyun 	sprintf(clk_name, "%s-%s", "stmmac", pci_name(pdev));
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	plat->stmmac_clk = clk_register_fixed_rate(&pdev->dev,
308*4882a593Smuzhiyun 						   clk_name, NULL, 0,
309*4882a593Smuzhiyun 						   plat->clk_ptp_rate);
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	if (IS_ERR(plat->stmmac_clk)) {
312*4882a593Smuzhiyun 		dev_warn(&pdev->dev, "Fail to register stmmac-clk\n");
313*4882a593Smuzhiyun 		plat->stmmac_clk = NULL;
314*4882a593Smuzhiyun 	}
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	ret = clk_prepare_enable(plat->stmmac_clk);
317*4882a593Smuzhiyun 	if (ret) {
318*4882a593Smuzhiyun 		clk_unregister_fixed_rate(plat->stmmac_clk);
319*4882a593Smuzhiyun 		return ret;
320*4882a593Smuzhiyun 	}
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	/* Set default value for multicast hash bins */
323*4882a593Smuzhiyun 	plat->multicast_filter_bins = HASH_TABLE_SIZE;
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	/* Set default value for unicast filter entries */
326*4882a593Smuzhiyun 	plat->unicast_filter_entries = 1;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	/* Set the maxmtu to a default of JUMBO_LEN */
329*4882a593Smuzhiyun 	plat->maxmtu = JUMBO_LEN;
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	plat->vlan_fail_q_en = true;
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	/* Use the last Rx queue */
334*4882a593Smuzhiyun 	plat->vlan_fail_q = plat->rx_queues_to_use - 1;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	return 0;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun 
ehl_common_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)339*4882a593Smuzhiyun static int ehl_common_data(struct pci_dev *pdev,
340*4882a593Smuzhiyun 			   struct plat_stmmacenet_data *plat)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun 	plat->rx_queues_to_use = 8;
343*4882a593Smuzhiyun 	plat->tx_queues_to_use = 8;
344*4882a593Smuzhiyun 	plat->clk_ptp_rate = 200000000;
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	return intel_mgbe_common_data(pdev, plat);
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun 
ehl_sgmii_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)349*4882a593Smuzhiyun static int ehl_sgmii_data(struct pci_dev *pdev,
350*4882a593Smuzhiyun 			  struct plat_stmmacenet_data *plat)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun 	plat->bus_id = 1;
353*4882a593Smuzhiyun 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	plat->serdes_powerup = intel_serdes_powerup;
356*4882a593Smuzhiyun 	plat->serdes_powerdown = intel_serdes_powerdown;
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	return ehl_common_data(pdev, plat);
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun static struct stmmac_pci_info ehl_sgmii1g_info = {
362*4882a593Smuzhiyun 	.setup = ehl_sgmii_data,
363*4882a593Smuzhiyun };
364*4882a593Smuzhiyun 
ehl_rgmii_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)365*4882a593Smuzhiyun static int ehl_rgmii_data(struct pci_dev *pdev,
366*4882a593Smuzhiyun 			  struct plat_stmmacenet_data *plat)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun 	plat->bus_id = 1;
369*4882a593Smuzhiyun 	plat->phy_interface = PHY_INTERFACE_MODE_RGMII;
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	return ehl_common_data(pdev, plat);
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun static struct stmmac_pci_info ehl_rgmii1g_info = {
375*4882a593Smuzhiyun 	.setup = ehl_rgmii_data,
376*4882a593Smuzhiyun };
377*4882a593Smuzhiyun 
ehl_pse0_common_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)378*4882a593Smuzhiyun static int ehl_pse0_common_data(struct pci_dev *pdev,
379*4882a593Smuzhiyun 				struct plat_stmmacenet_data *plat)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun 	plat->bus_id = 2;
382*4882a593Smuzhiyun 	plat->addr64 = 32;
383*4882a593Smuzhiyun 	return ehl_common_data(pdev, plat);
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun 
ehl_pse0_rgmii1g_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)386*4882a593Smuzhiyun static int ehl_pse0_rgmii1g_data(struct pci_dev *pdev,
387*4882a593Smuzhiyun 				 struct plat_stmmacenet_data *plat)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun 	plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
390*4882a593Smuzhiyun 	return ehl_pse0_common_data(pdev, plat);
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun static struct stmmac_pci_info ehl_pse0_rgmii1g_info = {
394*4882a593Smuzhiyun 	.setup = ehl_pse0_rgmii1g_data,
395*4882a593Smuzhiyun };
396*4882a593Smuzhiyun 
ehl_pse0_sgmii1g_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)397*4882a593Smuzhiyun static int ehl_pse0_sgmii1g_data(struct pci_dev *pdev,
398*4882a593Smuzhiyun 				 struct plat_stmmacenet_data *plat)
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
401*4882a593Smuzhiyun 	plat->serdes_powerup = intel_serdes_powerup;
402*4882a593Smuzhiyun 	plat->serdes_powerdown = intel_serdes_powerdown;
403*4882a593Smuzhiyun 	return ehl_pse0_common_data(pdev, plat);
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun static struct stmmac_pci_info ehl_pse0_sgmii1g_info = {
407*4882a593Smuzhiyun 	.setup = ehl_pse0_sgmii1g_data,
408*4882a593Smuzhiyun };
409*4882a593Smuzhiyun 
ehl_pse1_common_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)410*4882a593Smuzhiyun static int ehl_pse1_common_data(struct pci_dev *pdev,
411*4882a593Smuzhiyun 				struct plat_stmmacenet_data *plat)
412*4882a593Smuzhiyun {
413*4882a593Smuzhiyun 	plat->bus_id = 3;
414*4882a593Smuzhiyun 	plat->addr64 = 32;
415*4882a593Smuzhiyun 	return ehl_common_data(pdev, plat);
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun 
ehl_pse1_rgmii1g_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)418*4882a593Smuzhiyun static int ehl_pse1_rgmii1g_data(struct pci_dev *pdev,
419*4882a593Smuzhiyun 				 struct plat_stmmacenet_data *plat)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun 	plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
422*4882a593Smuzhiyun 	return ehl_pse1_common_data(pdev, plat);
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun static struct stmmac_pci_info ehl_pse1_rgmii1g_info = {
426*4882a593Smuzhiyun 	.setup = ehl_pse1_rgmii1g_data,
427*4882a593Smuzhiyun };
428*4882a593Smuzhiyun 
ehl_pse1_sgmii1g_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)429*4882a593Smuzhiyun static int ehl_pse1_sgmii1g_data(struct pci_dev *pdev,
430*4882a593Smuzhiyun 				 struct plat_stmmacenet_data *plat)
431*4882a593Smuzhiyun {
432*4882a593Smuzhiyun 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
433*4882a593Smuzhiyun 	plat->serdes_powerup = intel_serdes_powerup;
434*4882a593Smuzhiyun 	plat->serdes_powerdown = intel_serdes_powerdown;
435*4882a593Smuzhiyun 	return ehl_pse1_common_data(pdev, plat);
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun static struct stmmac_pci_info ehl_pse1_sgmii1g_info = {
439*4882a593Smuzhiyun 	.setup = ehl_pse1_sgmii1g_data,
440*4882a593Smuzhiyun };
441*4882a593Smuzhiyun 
tgl_common_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)442*4882a593Smuzhiyun static int tgl_common_data(struct pci_dev *pdev,
443*4882a593Smuzhiyun 			   struct plat_stmmacenet_data *plat)
444*4882a593Smuzhiyun {
445*4882a593Smuzhiyun 	plat->rx_queues_to_use = 6;
446*4882a593Smuzhiyun 	plat->tx_queues_to_use = 4;
447*4882a593Smuzhiyun 	plat->clk_ptp_rate = 200000000;
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	return intel_mgbe_common_data(pdev, plat);
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun 
tgl_sgmii_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)452*4882a593Smuzhiyun static int tgl_sgmii_data(struct pci_dev *pdev,
453*4882a593Smuzhiyun 			  struct plat_stmmacenet_data *plat)
454*4882a593Smuzhiyun {
455*4882a593Smuzhiyun 	plat->bus_id = 1;
456*4882a593Smuzhiyun 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
457*4882a593Smuzhiyun 	plat->serdes_powerup = intel_serdes_powerup;
458*4882a593Smuzhiyun 	plat->serdes_powerdown = intel_serdes_powerdown;
459*4882a593Smuzhiyun 	return tgl_common_data(pdev, plat);
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun static struct stmmac_pci_info tgl_sgmii1g_info = {
463*4882a593Smuzhiyun 	.setup = tgl_sgmii_data,
464*4882a593Smuzhiyun };
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = {
467*4882a593Smuzhiyun 	{
468*4882a593Smuzhiyun 		.func = 6,
469*4882a593Smuzhiyun 		.phy_addr = 1,
470*4882a593Smuzhiyun 	},
471*4882a593Smuzhiyun };
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun static const struct stmmac_pci_dmi_data galileo_stmmac_dmi_data = {
474*4882a593Smuzhiyun 	.func = galileo_stmmac_func_data,
475*4882a593Smuzhiyun 	.nfuncs = ARRAY_SIZE(galileo_stmmac_func_data),
476*4882a593Smuzhiyun };
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun static const struct stmmac_pci_func_data iot2040_stmmac_func_data[] = {
479*4882a593Smuzhiyun 	{
480*4882a593Smuzhiyun 		.func = 6,
481*4882a593Smuzhiyun 		.phy_addr = 1,
482*4882a593Smuzhiyun 	},
483*4882a593Smuzhiyun 	{
484*4882a593Smuzhiyun 		.func = 7,
485*4882a593Smuzhiyun 		.phy_addr = 1,
486*4882a593Smuzhiyun 	},
487*4882a593Smuzhiyun };
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun static const struct stmmac_pci_dmi_data iot2040_stmmac_dmi_data = {
490*4882a593Smuzhiyun 	.func = iot2040_stmmac_func_data,
491*4882a593Smuzhiyun 	.nfuncs = ARRAY_SIZE(iot2040_stmmac_func_data),
492*4882a593Smuzhiyun };
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun static const struct dmi_system_id quark_pci_dmi[] = {
495*4882a593Smuzhiyun 	{
496*4882a593Smuzhiyun 		.matches = {
497*4882a593Smuzhiyun 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "Galileo"),
498*4882a593Smuzhiyun 		},
499*4882a593Smuzhiyun 		.driver_data = (void *)&galileo_stmmac_dmi_data,
500*4882a593Smuzhiyun 	},
501*4882a593Smuzhiyun 	{
502*4882a593Smuzhiyun 		.matches = {
503*4882a593Smuzhiyun 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"),
504*4882a593Smuzhiyun 		},
505*4882a593Smuzhiyun 		.driver_data = (void *)&galileo_stmmac_dmi_data,
506*4882a593Smuzhiyun 	},
507*4882a593Smuzhiyun 	/* There are 2 types of SIMATIC IOT2000: IOT2020 and IOT2040.
508*4882a593Smuzhiyun 	 * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which
509*4882a593Smuzhiyun 	 * has only one pci network device while other asset tags are
510*4882a593Smuzhiyun 	 * for IOT2040 which has two.
511*4882a593Smuzhiyun 	 */
512*4882a593Smuzhiyun 	{
513*4882a593Smuzhiyun 		.matches = {
514*4882a593Smuzhiyun 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
515*4882a593Smuzhiyun 			DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
516*4882a593Smuzhiyun 					"6ES7647-0AA00-0YA2"),
517*4882a593Smuzhiyun 		},
518*4882a593Smuzhiyun 		.driver_data = (void *)&galileo_stmmac_dmi_data,
519*4882a593Smuzhiyun 	},
520*4882a593Smuzhiyun 	{
521*4882a593Smuzhiyun 		.matches = {
522*4882a593Smuzhiyun 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
523*4882a593Smuzhiyun 		},
524*4882a593Smuzhiyun 		.driver_data = (void *)&iot2040_stmmac_dmi_data,
525*4882a593Smuzhiyun 	},
526*4882a593Smuzhiyun 	{}
527*4882a593Smuzhiyun };
528*4882a593Smuzhiyun 
quark_default_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)529*4882a593Smuzhiyun static int quark_default_data(struct pci_dev *pdev,
530*4882a593Smuzhiyun 			      struct plat_stmmacenet_data *plat)
531*4882a593Smuzhiyun {
532*4882a593Smuzhiyun 	int ret;
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	/* Set common default data first */
535*4882a593Smuzhiyun 	common_default_data(plat);
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	/* Refuse to load the driver and register net device if MAC controller
538*4882a593Smuzhiyun 	 * does not connect to any PHY interface.
539*4882a593Smuzhiyun 	 */
540*4882a593Smuzhiyun 	ret = stmmac_pci_find_phy_addr(pdev, quark_pci_dmi);
541*4882a593Smuzhiyun 	if (ret < 0) {
542*4882a593Smuzhiyun 		/* Return error to the caller on DMI enabled boards. */
543*4882a593Smuzhiyun 		if (dmi_get_system_info(DMI_BOARD_NAME))
544*4882a593Smuzhiyun 			return ret;
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 		/* Galileo boards with old firmware don't support DMI. We always
547*4882a593Smuzhiyun 		 * use 1 here as PHY address, so at least the first found MAC
548*4882a593Smuzhiyun 		 * controller would be probed.
549*4882a593Smuzhiyun 		 */
550*4882a593Smuzhiyun 		ret = 1;
551*4882a593Smuzhiyun 	}
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	plat->bus_id = pci_dev_id(pdev);
554*4882a593Smuzhiyun 	plat->phy_addr = ret;
555*4882a593Smuzhiyun 	plat->phy_interface = PHY_INTERFACE_MODE_RMII;
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	plat->dma_cfg->pbl = 16;
558*4882a593Smuzhiyun 	plat->dma_cfg->pblx8 = true;
559*4882a593Smuzhiyun 	plat->dma_cfg->fixed_burst = 1;
560*4882a593Smuzhiyun 	/* AXI (TODO) */
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	return 0;
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun static const struct stmmac_pci_info quark_info = {
566*4882a593Smuzhiyun 	.setup = quark_default_data,
567*4882a593Smuzhiyun };
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun /**
570*4882a593Smuzhiyun  * intel_eth_pci_probe
571*4882a593Smuzhiyun  *
572*4882a593Smuzhiyun  * @pdev: pci device pointer
573*4882a593Smuzhiyun  * @id: pointer to table of device id/id's.
574*4882a593Smuzhiyun  *
575*4882a593Smuzhiyun  * Description: This probing function gets called for all PCI devices which
576*4882a593Smuzhiyun  * match the ID table and are not "owned" by other driver yet. This function
577*4882a593Smuzhiyun  * gets passed a "struct pci_dev *" for each device whose entry in the ID table
578*4882a593Smuzhiyun  * matches the device. The probe functions returns zero when the driver choose
579*4882a593Smuzhiyun  * to take "ownership" of the device or an error code(-ve no) otherwise.
580*4882a593Smuzhiyun  */
intel_eth_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)581*4882a593Smuzhiyun static int intel_eth_pci_probe(struct pci_dev *pdev,
582*4882a593Smuzhiyun 			       const struct pci_device_id *id)
583*4882a593Smuzhiyun {
584*4882a593Smuzhiyun 	struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data;
585*4882a593Smuzhiyun 	struct intel_priv_data *intel_priv;
586*4882a593Smuzhiyun 	struct plat_stmmacenet_data *plat;
587*4882a593Smuzhiyun 	struct stmmac_resources res;
588*4882a593Smuzhiyun 	int ret;
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 	intel_priv = devm_kzalloc(&pdev->dev, sizeof(*intel_priv), GFP_KERNEL);
591*4882a593Smuzhiyun 	if (!intel_priv)
592*4882a593Smuzhiyun 		return -ENOMEM;
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
595*4882a593Smuzhiyun 	if (!plat)
596*4882a593Smuzhiyun 		return -ENOMEM;
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
599*4882a593Smuzhiyun 					   sizeof(*plat->mdio_bus_data),
600*4882a593Smuzhiyun 					   GFP_KERNEL);
601*4882a593Smuzhiyun 	if (!plat->mdio_bus_data)
602*4882a593Smuzhiyun 		return -ENOMEM;
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 	plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg),
605*4882a593Smuzhiyun 				     GFP_KERNEL);
606*4882a593Smuzhiyun 	if (!plat->dma_cfg)
607*4882a593Smuzhiyun 		return -ENOMEM;
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 	/* Enable pci device */
610*4882a593Smuzhiyun 	ret = pci_enable_device(pdev);
611*4882a593Smuzhiyun 	if (ret) {
612*4882a593Smuzhiyun 		dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
613*4882a593Smuzhiyun 			__func__);
614*4882a593Smuzhiyun 		return ret;
615*4882a593Smuzhiyun 	}
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
618*4882a593Smuzhiyun 	if (ret)
619*4882a593Smuzhiyun 		return ret;
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 	pci_set_master(pdev);
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 	plat->bsp_priv = intel_priv;
624*4882a593Smuzhiyun 	intel_priv->mdio_adhoc_addr = 0x15;
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 	ret = info->setup(pdev, plat);
627*4882a593Smuzhiyun 	if (ret)
628*4882a593Smuzhiyun 		return ret;
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
631*4882a593Smuzhiyun 	if (ret < 0)
632*4882a593Smuzhiyun 		return ret;
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	memset(&res, 0, sizeof(res));
635*4882a593Smuzhiyun 	res.addr = pcim_iomap_table(pdev)[0];
636*4882a593Smuzhiyun 	res.wol_irq = pci_irq_vector(pdev, 0);
637*4882a593Smuzhiyun 	res.irq = pci_irq_vector(pdev, 0);
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 	if (plat->eee_usecs_rate > 0) {
640*4882a593Smuzhiyun 		u32 tx_lpi_usec;
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 		tx_lpi_usec = (plat->eee_usecs_rate / 1000000) - 1;
643*4882a593Smuzhiyun 		writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER);
644*4882a593Smuzhiyun 	}
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun 	ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
647*4882a593Smuzhiyun 	if (ret) {
648*4882a593Smuzhiyun 		pci_free_irq_vectors(pdev);
649*4882a593Smuzhiyun 		clk_disable_unprepare(plat->stmmac_clk);
650*4882a593Smuzhiyun 		clk_unregister_fixed_rate(plat->stmmac_clk);
651*4882a593Smuzhiyun 	}
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 	return ret;
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun /**
657*4882a593Smuzhiyun  * intel_eth_pci_remove
658*4882a593Smuzhiyun  *
659*4882a593Smuzhiyun  * @pdev: platform device pointer
660*4882a593Smuzhiyun  * Description: this function calls the main to free the net resources
661*4882a593Smuzhiyun  * and releases the PCI resources.
662*4882a593Smuzhiyun  */
intel_eth_pci_remove(struct pci_dev * pdev)663*4882a593Smuzhiyun static void intel_eth_pci_remove(struct pci_dev *pdev)
664*4882a593Smuzhiyun {
665*4882a593Smuzhiyun 	struct net_device *ndev = dev_get_drvdata(&pdev->dev);
666*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(ndev);
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 	stmmac_dvr_remove(&pdev->dev);
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 	pci_free_irq_vectors(pdev);
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	clk_disable_unprepare(priv->plat->stmmac_clk);
673*4882a593Smuzhiyun 	clk_unregister_fixed_rate(priv->plat->stmmac_clk);
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	pcim_iounmap_regions(pdev, BIT(0));
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 	pci_disable_device(pdev);
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun 
intel_eth_pci_suspend(struct device * dev)680*4882a593Smuzhiyun static int __maybe_unused intel_eth_pci_suspend(struct device *dev)
681*4882a593Smuzhiyun {
682*4882a593Smuzhiyun 	struct pci_dev *pdev = to_pci_dev(dev);
683*4882a593Smuzhiyun 	int ret;
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	ret = stmmac_suspend(dev);
686*4882a593Smuzhiyun 	if (ret)
687*4882a593Smuzhiyun 		return ret;
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun 	ret = pci_save_state(pdev);
690*4882a593Smuzhiyun 	if (ret)
691*4882a593Smuzhiyun 		return ret;
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	pci_disable_device(pdev);
694*4882a593Smuzhiyun 	pci_wake_from_d3(pdev, true);
695*4882a593Smuzhiyun 	return 0;
696*4882a593Smuzhiyun }
697*4882a593Smuzhiyun 
intel_eth_pci_resume(struct device * dev)698*4882a593Smuzhiyun static int __maybe_unused intel_eth_pci_resume(struct device *dev)
699*4882a593Smuzhiyun {
700*4882a593Smuzhiyun 	struct pci_dev *pdev = to_pci_dev(dev);
701*4882a593Smuzhiyun 	int ret;
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	pci_restore_state(pdev);
704*4882a593Smuzhiyun 	pci_set_power_state(pdev, PCI_D0);
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	ret = pci_enable_device(pdev);
707*4882a593Smuzhiyun 	if (ret)
708*4882a593Smuzhiyun 		return ret;
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 	pci_set_master(pdev);
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 	return stmmac_resume(dev);
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
716*4882a593Smuzhiyun 			 intel_eth_pci_resume);
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_QUARK_ID			0x0937
719*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_EHL_RGMII1G_ID		0x4b30
720*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_EHL_SGMII1G_ID		0x4b31
721*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_EHL_SGMII2G5_ID		0x4b32
722*4882a593Smuzhiyun /* Intel(R) Programmable Services Engine (Intel(R) PSE) consist of 2 MAC
723*4882a593Smuzhiyun  * which are named PSE0 and PSE1
724*4882a593Smuzhiyun  */
725*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_EHL_PSE0_RGMII1G_ID		0x4ba0
726*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII1G_ID		0x4ba1
727*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII2G5_ID	0x4ba2
728*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G_ID		0x4bb0
729*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G_ID		0x4bb1
730*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5_ID	0x4bb2
731*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_0_ID		0x43ac
732*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_1_ID		0x43a2
733*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_TGL_SGMII1G_ID		0xa0ac
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun static const struct pci_device_id intel_eth_pci_id_table[] = {
736*4882a593Smuzhiyun 	{ PCI_DEVICE_DATA(INTEL, QUARK_ID, &quark_info) },
737*4882a593Smuzhiyun 	{ PCI_DEVICE_DATA(INTEL, EHL_RGMII1G_ID, &ehl_rgmii1g_info) },
738*4882a593Smuzhiyun 	{ PCI_DEVICE_DATA(INTEL, EHL_SGMII1G_ID, &ehl_sgmii1g_info) },
739*4882a593Smuzhiyun 	{ PCI_DEVICE_DATA(INTEL, EHL_SGMII2G5_ID, &ehl_sgmii1g_info) },
740*4882a593Smuzhiyun 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE0_RGMII1G_ID, &ehl_pse0_rgmii1g_info) },
741*4882a593Smuzhiyun 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII1G_ID, &ehl_pse0_sgmii1g_info) },
742*4882a593Smuzhiyun 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII2G5_ID, &ehl_pse0_sgmii1g_info) },
743*4882a593Smuzhiyun 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_RGMII1G_ID, &ehl_pse1_rgmii1g_info) },
744*4882a593Smuzhiyun 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G_ID, &ehl_pse1_sgmii1g_info) },
745*4882a593Smuzhiyun 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5_ID, &ehl_pse1_sgmii1g_info) },
746*4882a593Smuzhiyun 	{ PCI_DEVICE_DATA(INTEL, TGL_SGMII1G_ID, &tgl_sgmii1g_info) },
747*4882a593Smuzhiyun 	{ PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0_ID, &tgl_sgmii1g_info) },
748*4882a593Smuzhiyun 	{ PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1_ID, &tgl_sgmii1g_info) },
749*4882a593Smuzhiyun 	{}
750*4882a593Smuzhiyun };
751*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun static struct pci_driver intel_eth_pci_driver = {
754*4882a593Smuzhiyun 	.name = "intel-eth-pci",
755*4882a593Smuzhiyun 	.id_table = intel_eth_pci_id_table,
756*4882a593Smuzhiyun 	.probe = intel_eth_pci_probe,
757*4882a593Smuzhiyun 	.remove = intel_eth_pci_remove,
758*4882a593Smuzhiyun 	.driver         = {
759*4882a593Smuzhiyun 		.pm     = &intel_eth_pm_ops,
760*4882a593Smuzhiyun 	},
761*4882a593Smuzhiyun };
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun module_pci_driver(intel_eth_pci_driver);
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun MODULE_DESCRIPTION("INTEL 10/100/1000 Ethernet PCI driver");
766*4882a593Smuzhiyun MODULE_AUTHOR("Voon Weifeng <weifeng.voon@intel.com>");
767*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
768