xref: /OK3568_Linux_fs/kernel/drivers/pci/controller/cadence/pci-j721e.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /**
3*4882a593Smuzhiyun  * pci-j721e - PCIe controller driver for TI's J721E SoCs
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
6*4882a593Smuzhiyun  * Author: Kishon Vijay Abraham I <kishon@ti.com>
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/delay.h>
10*4882a593Smuzhiyun #include <linux/gpio/consumer.h>
11*4882a593Smuzhiyun #include <linux/io.h>
12*4882a593Smuzhiyun #include <linux/irqchip/chained_irq.h>
13*4882a593Smuzhiyun #include <linux/irqdomain.h>
14*4882a593Smuzhiyun #include <linux/mfd/syscon.h>
15*4882a593Smuzhiyun #include <linux/of_device.h>
16*4882a593Smuzhiyun #include <linux/of_irq.h>
17*4882a593Smuzhiyun #include <linux/pci.h>
18*4882a593Smuzhiyun #include <linux/pm_runtime.h>
19*4882a593Smuzhiyun #include <linux/regmap.h>
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #include "../../pci.h"
22*4882a593Smuzhiyun #include "pcie-cadence.h"
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #define ENABLE_REG_SYS_2	0x108
25*4882a593Smuzhiyun #define STATUS_REG_SYS_2	0x508
26*4882a593Smuzhiyun #define STATUS_CLR_REG_SYS_2	0x708
27*4882a593Smuzhiyun #define LINK_DOWN		BIT(1)
28*4882a593Smuzhiyun #define J7200_LINK_DOWN		BIT(10)
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #define J721E_PCIE_USER_CMD_STATUS	0x4
31*4882a593Smuzhiyun #define LINK_TRAINING_ENABLE		BIT(0)
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #define J721E_PCIE_USER_LINKSTATUS	0x14
34*4882a593Smuzhiyun #define LINK_STATUS			GENMASK(1, 0)
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun enum link_status {
37*4882a593Smuzhiyun 	NO_RECEIVERS_DETECTED,
38*4882a593Smuzhiyun 	LINK_TRAINING_IN_PROGRESS,
39*4882a593Smuzhiyun 	LINK_UP_DL_IN_PROGRESS,
40*4882a593Smuzhiyun 	LINK_UP_DL_COMPLETED,
41*4882a593Smuzhiyun };
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun #define J721E_MODE_RC			BIT(7)
44*4882a593Smuzhiyun #define LANE_COUNT_MASK			BIT(8)
45*4882a593Smuzhiyun #define LANE_COUNT(n)			((n) << 8)
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun #define GENERATION_SEL_MASK		GENMASK(1, 0)
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun #define MAX_LANES			2
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun struct j721e_pcie {
52*4882a593Smuzhiyun 	struct device		*dev;
53*4882a593Smuzhiyun 	u32			mode;
54*4882a593Smuzhiyun 	u32			num_lanes;
55*4882a593Smuzhiyun 	struct cdns_pcie	*cdns_pcie;
56*4882a593Smuzhiyun 	void __iomem		*user_cfg_base;
57*4882a593Smuzhiyun 	void __iomem		*intd_cfg_base;
58*4882a593Smuzhiyun 	u32			linkdown_irq_regfield;
59*4882a593Smuzhiyun };
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun enum j721e_pcie_mode {
62*4882a593Smuzhiyun 	PCI_MODE_RC,
63*4882a593Smuzhiyun 	PCI_MODE_EP,
64*4882a593Smuzhiyun };
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun struct j721e_pcie_data {
67*4882a593Smuzhiyun 	enum j721e_pcie_mode	mode;
68*4882a593Smuzhiyun 	unsigned int		quirk_retrain_flag:1;
69*4882a593Smuzhiyun 	unsigned int		quirk_detect_quiet_flag:1;
70*4882a593Smuzhiyun 	u32			linkdown_irq_regfield;
71*4882a593Smuzhiyun 	unsigned int		byte_access_allowed:1;
72*4882a593Smuzhiyun };
73*4882a593Smuzhiyun 
j721e_pcie_user_readl(struct j721e_pcie * pcie,u32 offset)74*4882a593Smuzhiyun static inline u32 j721e_pcie_user_readl(struct j721e_pcie *pcie, u32 offset)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun 	return readl(pcie->user_cfg_base + offset);
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun 
j721e_pcie_user_writel(struct j721e_pcie * pcie,u32 offset,u32 value)79*4882a593Smuzhiyun static inline void j721e_pcie_user_writel(struct j721e_pcie *pcie, u32 offset,
80*4882a593Smuzhiyun 					  u32 value)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun 	writel(value, pcie->user_cfg_base + offset);
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun 
j721e_pcie_intd_readl(struct j721e_pcie * pcie,u32 offset)85*4882a593Smuzhiyun static inline u32 j721e_pcie_intd_readl(struct j721e_pcie *pcie, u32 offset)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun 	return readl(pcie->intd_cfg_base + offset);
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun 
j721e_pcie_intd_writel(struct j721e_pcie * pcie,u32 offset,u32 value)90*4882a593Smuzhiyun static inline void j721e_pcie_intd_writel(struct j721e_pcie *pcie, u32 offset,
91*4882a593Smuzhiyun 					  u32 value)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun 	writel(value, pcie->intd_cfg_base + offset);
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun 
j721e_pcie_link_irq_handler(int irq,void * priv)96*4882a593Smuzhiyun static irqreturn_t j721e_pcie_link_irq_handler(int irq, void *priv)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun 	struct j721e_pcie *pcie = priv;
99*4882a593Smuzhiyun 	struct device *dev = pcie->dev;
100*4882a593Smuzhiyun 	u32 reg;
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	reg = j721e_pcie_intd_readl(pcie, STATUS_REG_SYS_2);
103*4882a593Smuzhiyun 	if (!(reg & pcie->linkdown_irq_regfield))
104*4882a593Smuzhiyun 		return IRQ_NONE;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	dev_err(dev, "LINK DOWN!\n");
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	j721e_pcie_intd_writel(pcie, STATUS_CLR_REG_SYS_2, pcie->linkdown_irq_regfield);
109*4882a593Smuzhiyun 	return IRQ_HANDLED;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun 
j721e_pcie_config_link_irq(struct j721e_pcie * pcie)112*4882a593Smuzhiyun static void j721e_pcie_config_link_irq(struct j721e_pcie *pcie)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun 	u32 reg;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	reg = j721e_pcie_intd_readl(pcie, ENABLE_REG_SYS_2);
117*4882a593Smuzhiyun 	reg |= pcie->linkdown_irq_regfield;
118*4882a593Smuzhiyun 	j721e_pcie_intd_writel(pcie, ENABLE_REG_SYS_2, reg);
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun 
j721e_pcie_start_link(struct cdns_pcie * cdns_pcie)121*4882a593Smuzhiyun static int j721e_pcie_start_link(struct cdns_pcie *cdns_pcie)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun 	struct j721e_pcie *pcie = dev_get_drvdata(cdns_pcie->dev);
124*4882a593Smuzhiyun 	u32 reg;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	reg = j721e_pcie_user_readl(pcie, J721E_PCIE_USER_CMD_STATUS);
127*4882a593Smuzhiyun 	reg |= LINK_TRAINING_ENABLE;
128*4882a593Smuzhiyun 	j721e_pcie_user_writel(pcie, J721E_PCIE_USER_CMD_STATUS, reg);
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	return 0;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun 
j721e_pcie_stop_link(struct cdns_pcie * cdns_pcie)133*4882a593Smuzhiyun static void j721e_pcie_stop_link(struct cdns_pcie *cdns_pcie)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun 	struct j721e_pcie *pcie = dev_get_drvdata(cdns_pcie->dev);
136*4882a593Smuzhiyun 	u32 reg;
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	reg = j721e_pcie_user_readl(pcie, J721E_PCIE_USER_CMD_STATUS);
139*4882a593Smuzhiyun 	reg &= ~LINK_TRAINING_ENABLE;
140*4882a593Smuzhiyun 	j721e_pcie_user_writel(pcie, J721E_PCIE_USER_CMD_STATUS, reg);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun 
j721e_pcie_link_up(struct cdns_pcie * cdns_pcie)143*4882a593Smuzhiyun static bool j721e_pcie_link_up(struct cdns_pcie *cdns_pcie)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun 	struct j721e_pcie *pcie = dev_get_drvdata(cdns_pcie->dev);
146*4882a593Smuzhiyun 	u32 reg;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	reg = j721e_pcie_user_readl(pcie, J721E_PCIE_USER_LINKSTATUS);
149*4882a593Smuzhiyun 	reg &= LINK_STATUS;
150*4882a593Smuzhiyun 	if (reg == LINK_UP_DL_COMPLETED)
151*4882a593Smuzhiyun 		return true;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	return false;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun static const struct cdns_pcie_ops j721e_pcie_ops = {
157*4882a593Smuzhiyun 	.start_link = j721e_pcie_start_link,
158*4882a593Smuzhiyun 	.stop_link = j721e_pcie_stop_link,
159*4882a593Smuzhiyun 	.link_up = j721e_pcie_link_up,
160*4882a593Smuzhiyun };
161*4882a593Smuzhiyun 
j721e_pcie_set_mode(struct j721e_pcie * pcie,struct regmap * syscon)162*4882a593Smuzhiyun static int j721e_pcie_set_mode(struct j721e_pcie *pcie, struct regmap *syscon)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun 	struct device *dev = pcie->dev;
165*4882a593Smuzhiyun 	u32 mask = J721E_MODE_RC;
166*4882a593Smuzhiyun 	u32 mode = pcie->mode;
167*4882a593Smuzhiyun 	u32 val = 0;
168*4882a593Smuzhiyun 	int ret = 0;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	if (mode == PCI_MODE_RC)
171*4882a593Smuzhiyun 		val = J721E_MODE_RC;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	ret = regmap_update_bits(syscon, 0, mask, val);
174*4882a593Smuzhiyun 	if (ret)
175*4882a593Smuzhiyun 		dev_err(dev, "failed to set pcie mode\n");
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	return ret;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun 
j721e_pcie_set_link_speed(struct j721e_pcie * pcie,struct regmap * syscon)180*4882a593Smuzhiyun static int j721e_pcie_set_link_speed(struct j721e_pcie *pcie,
181*4882a593Smuzhiyun 				     struct regmap *syscon)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun 	struct device *dev = pcie->dev;
184*4882a593Smuzhiyun 	struct device_node *np = dev->of_node;
185*4882a593Smuzhiyun 	int link_speed;
186*4882a593Smuzhiyun 	u32 val = 0;
187*4882a593Smuzhiyun 	int ret;
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	link_speed = of_pci_get_max_link_speed(np);
190*4882a593Smuzhiyun 	if (link_speed < 2)
191*4882a593Smuzhiyun 		link_speed = 2;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	val = link_speed - 1;
194*4882a593Smuzhiyun 	ret = regmap_update_bits(syscon, 0, GENERATION_SEL_MASK, val);
195*4882a593Smuzhiyun 	if (ret)
196*4882a593Smuzhiyun 		dev_err(dev, "failed to set link speed\n");
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	return ret;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun 
j721e_pcie_set_lane_count(struct j721e_pcie * pcie,struct regmap * syscon)201*4882a593Smuzhiyun static int j721e_pcie_set_lane_count(struct j721e_pcie *pcie,
202*4882a593Smuzhiyun 				     struct regmap *syscon)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun 	struct device *dev = pcie->dev;
205*4882a593Smuzhiyun 	u32 lanes = pcie->num_lanes;
206*4882a593Smuzhiyun 	u32 val = 0;
207*4882a593Smuzhiyun 	int ret;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	val = LANE_COUNT(lanes - 1);
210*4882a593Smuzhiyun 	ret = regmap_update_bits(syscon, 0, LANE_COUNT_MASK, val);
211*4882a593Smuzhiyun 	if (ret)
212*4882a593Smuzhiyun 		dev_err(dev, "failed to set link count\n");
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	return ret;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun 
j721e_pcie_ctrl_init(struct j721e_pcie * pcie)217*4882a593Smuzhiyun static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun 	struct device *dev = pcie->dev;
220*4882a593Smuzhiyun 	struct device_node *node = dev->of_node;
221*4882a593Smuzhiyun 	struct regmap *syscon;
222*4882a593Smuzhiyun 	int ret;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	syscon = syscon_regmap_lookup_by_phandle(node, "ti,syscon-pcie-ctrl");
225*4882a593Smuzhiyun 	if (IS_ERR(syscon)) {
226*4882a593Smuzhiyun 		dev_err(dev, "Unable to get ti,syscon-pcie-ctrl regmap\n");
227*4882a593Smuzhiyun 		return PTR_ERR(syscon);
228*4882a593Smuzhiyun 	}
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	ret = j721e_pcie_set_mode(pcie, syscon);
231*4882a593Smuzhiyun 	if (ret < 0) {
232*4882a593Smuzhiyun 		dev_err(dev, "Failed to set pci mode\n");
233*4882a593Smuzhiyun 		return ret;
234*4882a593Smuzhiyun 	}
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	ret = j721e_pcie_set_link_speed(pcie, syscon);
237*4882a593Smuzhiyun 	if (ret < 0) {
238*4882a593Smuzhiyun 		dev_err(dev, "Failed to set link speed\n");
239*4882a593Smuzhiyun 		return ret;
240*4882a593Smuzhiyun 	}
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	ret = j721e_pcie_set_lane_count(pcie, syscon);
243*4882a593Smuzhiyun 	if (ret < 0) {
244*4882a593Smuzhiyun 		dev_err(dev, "Failed to set num-lanes\n");
245*4882a593Smuzhiyun 		return ret;
246*4882a593Smuzhiyun 	}
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	return 0;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun 
cdns_ti_pcie_config_read(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * value)251*4882a593Smuzhiyun static int cdns_ti_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
252*4882a593Smuzhiyun 				    int where, int size, u32 *value)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun 	if (pci_is_root_bus(bus))
255*4882a593Smuzhiyun 		return pci_generic_config_read32(bus, devfn, where, size,
256*4882a593Smuzhiyun 						 value);
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	return pci_generic_config_read(bus, devfn, where, size, value);
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun 
cdns_ti_pcie_config_write(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 value)261*4882a593Smuzhiyun static int cdns_ti_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
262*4882a593Smuzhiyun 				     int where, int size, u32 value)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun 	if (pci_is_root_bus(bus))
265*4882a593Smuzhiyun 		return pci_generic_config_write32(bus, devfn, where, size,
266*4882a593Smuzhiyun 						  value);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	return pci_generic_config_write(bus, devfn, where, size, value);
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun static struct pci_ops cdns_ti_pcie_host_ops = {
272*4882a593Smuzhiyun 	.map_bus	= cdns_pci_map_bus,
273*4882a593Smuzhiyun 	.read		= cdns_ti_pcie_config_read,
274*4882a593Smuzhiyun 	.write		= cdns_ti_pcie_config_write,
275*4882a593Smuzhiyun };
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun static const struct j721e_pcie_data j721e_pcie_rc_data = {
278*4882a593Smuzhiyun 	.mode = PCI_MODE_RC,
279*4882a593Smuzhiyun 	.quirk_retrain_flag = true,
280*4882a593Smuzhiyun 	.byte_access_allowed = false,
281*4882a593Smuzhiyun 	.linkdown_irq_regfield = LINK_DOWN,
282*4882a593Smuzhiyun };
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun static const struct j721e_pcie_data j721e_pcie_ep_data = {
285*4882a593Smuzhiyun 	.mode = PCI_MODE_EP,
286*4882a593Smuzhiyun 	.linkdown_irq_regfield = LINK_DOWN,
287*4882a593Smuzhiyun };
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun static const struct j721e_pcie_data j7200_pcie_rc_data = {
290*4882a593Smuzhiyun 	.mode = PCI_MODE_RC,
291*4882a593Smuzhiyun 	.quirk_detect_quiet_flag = true,
292*4882a593Smuzhiyun 	.linkdown_irq_regfield = J7200_LINK_DOWN,
293*4882a593Smuzhiyun 	.byte_access_allowed = true,
294*4882a593Smuzhiyun };
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun static const struct j721e_pcie_data j7200_pcie_ep_data = {
297*4882a593Smuzhiyun 	.mode = PCI_MODE_EP,
298*4882a593Smuzhiyun 	.quirk_detect_quiet_flag = true,
299*4882a593Smuzhiyun };
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun static const struct j721e_pcie_data am64_pcie_rc_data = {
302*4882a593Smuzhiyun 	.mode = PCI_MODE_RC,
303*4882a593Smuzhiyun 	.linkdown_irq_regfield = J7200_LINK_DOWN,
304*4882a593Smuzhiyun 	.byte_access_allowed = true,
305*4882a593Smuzhiyun };
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun static const struct j721e_pcie_data am64_pcie_ep_data = {
308*4882a593Smuzhiyun 	.mode = PCI_MODE_EP,
309*4882a593Smuzhiyun 	.linkdown_irq_regfield = J7200_LINK_DOWN,
310*4882a593Smuzhiyun };
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun static const struct of_device_id of_j721e_pcie_match[] = {
313*4882a593Smuzhiyun 	{
314*4882a593Smuzhiyun 		.compatible = "ti,j721e-pcie-host",
315*4882a593Smuzhiyun 		.data = &j721e_pcie_rc_data,
316*4882a593Smuzhiyun 	},
317*4882a593Smuzhiyun 	{
318*4882a593Smuzhiyun 		.compatible = "ti,j721e-pcie-ep",
319*4882a593Smuzhiyun 		.data = &j721e_pcie_ep_data,
320*4882a593Smuzhiyun 	},
321*4882a593Smuzhiyun 	{
322*4882a593Smuzhiyun 		.compatible = "ti,j7200-pcie-host",
323*4882a593Smuzhiyun 		.data = &j7200_pcie_rc_data,
324*4882a593Smuzhiyun 	},
325*4882a593Smuzhiyun 	{
326*4882a593Smuzhiyun 		.compatible = "ti,j7200-pcie-ep",
327*4882a593Smuzhiyun 		.data = &j7200_pcie_ep_data,
328*4882a593Smuzhiyun 	},
329*4882a593Smuzhiyun 	{
330*4882a593Smuzhiyun 		.compatible = "ti,am64-pcie-host",
331*4882a593Smuzhiyun 		.data = &am64_pcie_rc_data,
332*4882a593Smuzhiyun 	},
333*4882a593Smuzhiyun 	{
334*4882a593Smuzhiyun 		.compatible = "ti,am64-pcie-ep",
335*4882a593Smuzhiyun 		.data = &am64_pcie_ep_data,
336*4882a593Smuzhiyun 	},
337*4882a593Smuzhiyun 	{},
338*4882a593Smuzhiyun };
339*4882a593Smuzhiyun 
j721e_pcie_probe(struct platform_device * pdev)340*4882a593Smuzhiyun static int j721e_pcie_probe(struct platform_device *pdev)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
343*4882a593Smuzhiyun 	struct device_node *node = dev->of_node;
344*4882a593Smuzhiyun 	struct pci_host_bridge *bridge;
345*4882a593Smuzhiyun 	struct j721e_pcie_data *data;
346*4882a593Smuzhiyun 	struct cdns_pcie *cdns_pcie;
347*4882a593Smuzhiyun 	struct j721e_pcie *pcie;
348*4882a593Smuzhiyun 	struct cdns_pcie_rc *rc;
349*4882a593Smuzhiyun 	struct cdns_pcie_ep *ep;
350*4882a593Smuzhiyun 	struct gpio_desc *gpiod;
351*4882a593Smuzhiyun 	void __iomem *base;
352*4882a593Smuzhiyun 	u32 num_lanes;
353*4882a593Smuzhiyun 	u32 mode;
354*4882a593Smuzhiyun 	int ret;
355*4882a593Smuzhiyun 	int irq;
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	data = (struct j721e_pcie_data *)of_device_get_match_data(dev);
358*4882a593Smuzhiyun 	if (!data)
359*4882a593Smuzhiyun 		return -EINVAL;
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	mode = (u32)data->mode;
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
364*4882a593Smuzhiyun 	if (!pcie)
365*4882a593Smuzhiyun 		return -ENOMEM;
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	pcie->dev = dev;
368*4882a593Smuzhiyun 	pcie->mode = mode;
369*4882a593Smuzhiyun 	pcie->linkdown_irq_regfield = data->linkdown_irq_regfield;
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	base = devm_platform_ioremap_resource_byname(pdev, "intd_cfg");
372*4882a593Smuzhiyun 	if (IS_ERR(base))
373*4882a593Smuzhiyun 		return PTR_ERR(base);
374*4882a593Smuzhiyun 	pcie->intd_cfg_base = base;
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	base = devm_platform_ioremap_resource_byname(pdev, "user_cfg");
377*4882a593Smuzhiyun 	if (IS_ERR(base))
378*4882a593Smuzhiyun 		return PTR_ERR(base);
379*4882a593Smuzhiyun 	pcie->user_cfg_base = base;
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	ret = of_property_read_u32(node, "num-lanes", &num_lanes);
382*4882a593Smuzhiyun 	if (ret || num_lanes > MAX_LANES)
383*4882a593Smuzhiyun 		num_lanes = 1;
384*4882a593Smuzhiyun 	pcie->num_lanes = num_lanes;
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)))
387*4882a593Smuzhiyun 		return -EINVAL;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	irq = platform_get_irq_byname(pdev, "link_state");
390*4882a593Smuzhiyun 	if (irq < 0)
391*4882a593Smuzhiyun 		return irq;
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	dev_set_drvdata(dev, pcie);
394*4882a593Smuzhiyun 	pm_runtime_enable(dev);
395*4882a593Smuzhiyun 	ret = pm_runtime_get_sync(dev);
396*4882a593Smuzhiyun 	if (ret < 0) {
397*4882a593Smuzhiyun 		dev_err(dev, "pm_runtime_get_sync failed\n");
398*4882a593Smuzhiyun 		goto err_get_sync;
399*4882a593Smuzhiyun 	}
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	ret = j721e_pcie_ctrl_init(pcie);
402*4882a593Smuzhiyun 	if (ret < 0) {
403*4882a593Smuzhiyun 		dev_err(dev, "pm_runtime_get_sync failed\n");
404*4882a593Smuzhiyun 		goto err_get_sync;
405*4882a593Smuzhiyun 	}
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	ret = devm_request_irq(dev, irq, j721e_pcie_link_irq_handler, 0,
408*4882a593Smuzhiyun 			       "j721e-pcie-link-down-irq", pcie);
409*4882a593Smuzhiyun 	if (ret < 0) {
410*4882a593Smuzhiyun 		dev_err(dev, "failed to request link state IRQ %d\n", irq);
411*4882a593Smuzhiyun 		goto err_get_sync;
412*4882a593Smuzhiyun 	}
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	j721e_pcie_config_link_irq(pcie);
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	switch (mode) {
417*4882a593Smuzhiyun 	case PCI_MODE_RC:
418*4882a593Smuzhiyun 		if (!IS_ENABLED(CONFIG_PCIE_CADENCE_HOST)) {
419*4882a593Smuzhiyun 			ret = -ENODEV;
420*4882a593Smuzhiyun 			goto err_get_sync;
421*4882a593Smuzhiyun 		}
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 		bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
424*4882a593Smuzhiyun 		if (!bridge) {
425*4882a593Smuzhiyun 			ret = -ENOMEM;
426*4882a593Smuzhiyun 			goto err_get_sync;
427*4882a593Smuzhiyun 		}
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 		if (!data->byte_access_allowed)
430*4882a593Smuzhiyun 			bridge->ops = &cdns_ti_pcie_host_ops;
431*4882a593Smuzhiyun 		rc = pci_host_bridge_priv(bridge);
432*4882a593Smuzhiyun 		rc->quirk_retrain_flag = data->quirk_retrain_flag;
433*4882a593Smuzhiyun 		rc->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 		cdns_pcie = &rc->pcie;
436*4882a593Smuzhiyun 		cdns_pcie->dev = dev;
437*4882a593Smuzhiyun 		cdns_pcie->ops = &j721e_pcie_ops;
438*4882a593Smuzhiyun 		pcie->cdns_pcie = cdns_pcie;
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 		gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
441*4882a593Smuzhiyun 		if (IS_ERR(gpiod)) {
442*4882a593Smuzhiyun 			ret = PTR_ERR(gpiod);
443*4882a593Smuzhiyun 			if (ret != -EPROBE_DEFER)
444*4882a593Smuzhiyun 				dev_err(dev, "Failed to get reset GPIO\n");
445*4882a593Smuzhiyun 			goto err_get_sync;
446*4882a593Smuzhiyun 		}
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 		ret = cdns_pcie_init_phy(dev, cdns_pcie);
449*4882a593Smuzhiyun 		if (ret) {
450*4882a593Smuzhiyun 			dev_err(dev, "Failed to init phy\n");
451*4882a593Smuzhiyun 			goto err_get_sync;
452*4882a593Smuzhiyun 		}
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 		/*
455*4882a593Smuzhiyun 		 * "Power Sequencing and Reset Signal Timings" table in
456*4882a593Smuzhiyun 		 * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 3.0
457*4882a593Smuzhiyun 		 * indicates PERST# should be deasserted after minimum of 100us
458*4882a593Smuzhiyun 		 * once REFCLK is stable. The REFCLK to the connector in RC
459*4882a593Smuzhiyun 		 * mode is selected while enabling the PHY. So deassert PERST#
460*4882a593Smuzhiyun 		 * after 100 us.
461*4882a593Smuzhiyun 		 */
462*4882a593Smuzhiyun 		if (gpiod) {
463*4882a593Smuzhiyun 			usleep_range(100, 200);
464*4882a593Smuzhiyun 			gpiod_set_value_cansleep(gpiod, 1);
465*4882a593Smuzhiyun 		}
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 		ret = cdns_pcie_host_setup(rc);
468*4882a593Smuzhiyun 		if (ret < 0)
469*4882a593Smuzhiyun 			goto err_pcie_setup;
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 		break;
472*4882a593Smuzhiyun 	case PCI_MODE_EP:
473*4882a593Smuzhiyun 		if (!IS_ENABLED(CONFIG_PCIE_CADENCE_EP)) {
474*4882a593Smuzhiyun 			ret = -ENODEV;
475*4882a593Smuzhiyun 			goto err_get_sync;
476*4882a593Smuzhiyun 		}
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 		ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
479*4882a593Smuzhiyun 		if (!ep) {
480*4882a593Smuzhiyun 			ret = -ENOMEM;
481*4882a593Smuzhiyun 			goto err_get_sync;
482*4882a593Smuzhiyun 		}
483*4882a593Smuzhiyun 		ep->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 		cdns_pcie = &ep->pcie;
486*4882a593Smuzhiyun 		cdns_pcie->dev = dev;
487*4882a593Smuzhiyun 		cdns_pcie->ops = &j721e_pcie_ops;
488*4882a593Smuzhiyun 		pcie->cdns_pcie = cdns_pcie;
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 		ret = cdns_pcie_init_phy(dev, cdns_pcie);
491*4882a593Smuzhiyun 		if (ret) {
492*4882a593Smuzhiyun 			dev_err(dev, "Failed to init phy\n");
493*4882a593Smuzhiyun 			goto err_get_sync;
494*4882a593Smuzhiyun 		}
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 		ret = cdns_pcie_ep_setup(ep);
497*4882a593Smuzhiyun 		if (ret < 0)
498*4882a593Smuzhiyun 			goto err_pcie_setup;
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 		break;
501*4882a593Smuzhiyun 	default:
502*4882a593Smuzhiyun 		dev_err(dev, "INVALID device type %d\n", mode);
503*4882a593Smuzhiyun 	}
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	return 0;
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun err_pcie_setup:
508*4882a593Smuzhiyun 	cdns_pcie_disable_phy(cdns_pcie);
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun err_get_sync:
511*4882a593Smuzhiyun 	pm_runtime_put(dev);
512*4882a593Smuzhiyun 	pm_runtime_disable(dev);
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	return ret;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun 
j721e_pcie_remove(struct platform_device * pdev)517*4882a593Smuzhiyun static int j721e_pcie_remove(struct platform_device *pdev)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun 	struct j721e_pcie *pcie = platform_get_drvdata(pdev);
520*4882a593Smuzhiyun 	struct cdns_pcie *cdns_pcie = pcie->cdns_pcie;
521*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 	cdns_pcie_disable_phy(cdns_pcie);
524*4882a593Smuzhiyun 	pm_runtime_put(dev);
525*4882a593Smuzhiyun 	pm_runtime_disable(dev);
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	return 0;
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun static struct platform_driver j721e_pcie_driver = {
531*4882a593Smuzhiyun 	.probe  = j721e_pcie_probe,
532*4882a593Smuzhiyun 	.remove = j721e_pcie_remove,
533*4882a593Smuzhiyun 	.driver = {
534*4882a593Smuzhiyun 		.name	= "j721e-pcie",
535*4882a593Smuzhiyun 		.of_match_table = of_j721e_pcie_match,
536*4882a593Smuzhiyun 		.suppress_bind_attrs = true,
537*4882a593Smuzhiyun 	},
538*4882a593Smuzhiyun };
539*4882a593Smuzhiyun builtin_platform_driver(j721e_pcie_driver);
540