xref: /OK3568_Linux_fs/kernel/drivers/pci/controller/dwc/pcie-al.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * PCIe host controller driver for Amazon's Annapurna Labs IP (used in chips
4*4882a593Smuzhiyun  * such as Graviton and Alpine)
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Author: Jonathan Chocron <jonnyc@amazon.com>
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <linux/pci.h>
12*4882a593Smuzhiyun #include <linux/pci-ecam.h>
13*4882a593Smuzhiyun #include <linux/pci-acpi.h>
14*4882a593Smuzhiyun #include "../../pci.h"
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun struct al_pcie_acpi  {
19*4882a593Smuzhiyun 	void __iomem *dbi_base;
20*4882a593Smuzhiyun };
21*4882a593Smuzhiyun 
al_pcie_map_bus(struct pci_bus * bus,unsigned int devfn,int where)22*4882a593Smuzhiyun static void __iomem *al_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
23*4882a593Smuzhiyun 				     int where)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun 	struct pci_config_window *cfg = bus->sysdata;
26*4882a593Smuzhiyun 	struct al_pcie_acpi *pcie = cfg->priv;
27*4882a593Smuzhiyun 	void __iomem *dbi_base = pcie->dbi_base;
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 	if (bus->number == cfg->busr.start) {
30*4882a593Smuzhiyun 		/*
31*4882a593Smuzhiyun 		 * The DW PCIe core doesn't filter out transactions to other
32*4882a593Smuzhiyun 		 * devices/functions on the root bus num, so we do this here.
33*4882a593Smuzhiyun 		 */
34*4882a593Smuzhiyun 		if (PCI_SLOT(devfn) > 0)
35*4882a593Smuzhiyun 			return NULL;
36*4882a593Smuzhiyun 		else
37*4882a593Smuzhiyun 			return dbi_base + where;
38*4882a593Smuzhiyun 	}
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	return pci_ecam_map_bus(bus, devfn, where);
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun 
al_pcie_init(struct pci_config_window * cfg)43*4882a593Smuzhiyun static int al_pcie_init(struct pci_config_window *cfg)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun 	struct device *dev = cfg->parent;
46*4882a593Smuzhiyun 	struct acpi_device *adev = to_acpi_device(dev);
47*4882a593Smuzhiyun 	struct acpi_pci_root *root = acpi_driver_data(adev);
48*4882a593Smuzhiyun 	struct al_pcie_acpi *al_pcie;
49*4882a593Smuzhiyun 	struct resource *res;
50*4882a593Smuzhiyun 	int ret;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	al_pcie = devm_kzalloc(dev, sizeof(*al_pcie), GFP_KERNEL);
53*4882a593Smuzhiyun 	if (!al_pcie)
54*4882a593Smuzhiyun 		return -ENOMEM;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
57*4882a593Smuzhiyun 	if (!res)
58*4882a593Smuzhiyun 		return -ENOMEM;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	ret = acpi_get_rc_resources(dev, "AMZN0001", root->segment, res);
61*4882a593Smuzhiyun 	if (ret) {
62*4882a593Smuzhiyun 		dev_err(dev, "can't get rc dbi base address for SEG %d\n",
63*4882a593Smuzhiyun 			root->segment);
64*4882a593Smuzhiyun 		return ret;
65*4882a593Smuzhiyun 	}
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	dev_dbg(dev, "Root port dbi res: %pR\n", res);
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	al_pcie->dbi_base = devm_pci_remap_cfg_resource(dev, res);
70*4882a593Smuzhiyun 	if (IS_ERR(al_pcie->dbi_base))
71*4882a593Smuzhiyun 		return PTR_ERR(al_pcie->dbi_base);
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	cfg->priv = al_pcie;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	return 0;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun const struct pci_ecam_ops al_pcie_ops = {
79*4882a593Smuzhiyun 	.bus_shift    = 20,
80*4882a593Smuzhiyun 	.init         =  al_pcie_init,
81*4882a593Smuzhiyun 	.pci_ops      = {
82*4882a593Smuzhiyun 		.map_bus    = al_pcie_map_bus,
83*4882a593Smuzhiyun 		.read       = pci_generic_config_read,
84*4882a593Smuzhiyun 		.write      = pci_generic_config_write,
85*4882a593Smuzhiyun 	}
86*4882a593Smuzhiyun };
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun #endif /* defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) */
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun #ifdef CONFIG_PCIE_AL
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun #include <linux/of_pci.h>
93*4882a593Smuzhiyun #include "pcie-designware.h"
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun #define AL_PCIE_REV_ID_2	2
96*4882a593Smuzhiyun #define AL_PCIE_REV_ID_3	3
97*4882a593Smuzhiyun #define AL_PCIE_REV_ID_4	4
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun #define AXI_BASE_OFFSET		0x0
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun #define DEVICE_ID_OFFSET	0x16c
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun #define DEVICE_REV_ID			0x0
104*4882a593Smuzhiyun #define DEVICE_REV_ID_DEV_ID_MASK	GENMASK(31, 16)
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun #define DEVICE_REV_ID_DEV_ID_X4		0
107*4882a593Smuzhiyun #define DEVICE_REV_ID_DEV_ID_X8		2
108*4882a593Smuzhiyun #define DEVICE_REV_ID_DEV_ID_X16	4
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun #define OB_CTRL_REV1_2_OFFSET	0x0040
111*4882a593Smuzhiyun #define OB_CTRL_REV3_5_OFFSET	0x0030
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun #define CFG_TARGET_BUS			0x0
114*4882a593Smuzhiyun #define CFG_TARGET_BUS_MASK_MASK	GENMASK(7, 0)
115*4882a593Smuzhiyun #define CFG_TARGET_BUS_BUSNUM_MASK	GENMASK(15, 8)
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun #define CFG_CONTROL			0x4
118*4882a593Smuzhiyun #define CFG_CONTROL_SUBBUS_MASK		GENMASK(15, 8)
119*4882a593Smuzhiyun #define CFG_CONTROL_SEC_BUS_MASK	GENMASK(23, 16)
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun struct al_pcie_reg_offsets {
122*4882a593Smuzhiyun 	unsigned int ob_ctrl;
123*4882a593Smuzhiyun };
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun struct al_pcie_target_bus_cfg {
126*4882a593Smuzhiyun 	u8 reg_val;
127*4882a593Smuzhiyun 	u8 reg_mask;
128*4882a593Smuzhiyun 	u8 ecam_mask;
129*4882a593Smuzhiyun };
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun struct al_pcie {
132*4882a593Smuzhiyun 	struct dw_pcie *pci;
133*4882a593Smuzhiyun 	void __iomem *controller_base; /* base of PCIe unit (not DW core) */
134*4882a593Smuzhiyun 	struct device *dev;
135*4882a593Smuzhiyun 	resource_size_t ecam_size;
136*4882a593Smuzhiyun 	unsigned int controller_rev_id;
137*4882a593Smuzhiyun 	struct al_pcie_reg_offsets reg_offsets;
138*4882a593Smuzhiyun 	struct al_pcie_target_bus_cfg target_bus_cfg;
139*4882a593Smuzhiyun };
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun #define PCIE_ECAM_DEVFN(x)		(((x) & 0xff) << 12)
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun #define to_al_pcie(x)		dev_get_drvdata((x)->dev)
144*4882a593Smuzhiyun 
al_pcie_controller_readl(struct al_pcie * pcie,u32 offset)145*4882a593Smuzhiyun static inline u32 al_pcie_controller_readl(struct al_pcie *pcie, u32 offset)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun 	return readl_relaxed(pcie->controller_base + offset);
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun 
al_pcie_controller_writel(struct al_pcie * pcie,u32 offset,u32 val)150*4882a593Smuzhiyun static inline void al_pcie_controller_writel(struct al_pcie *pcie, u32 offset,
151*4882a593Smuzhiyun 					     u32 val)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun 	writel_relaxed(val, pcie->controller_base + offset);
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun 
al_pcie_rev_id_get(struct al_pcie * pcie,unsigned int * rev_id)156*4882a593Smuzhiyun static int al_pcie_rev_id_get(struct al_pcie *pcie, unsigned int *rev_id)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun 	u32 dev_rev_id_val;
159*4882a593Smuzhiyun 	u32 dev_id_val;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	dev_rev_id_val = al_pcie_controller_readl(pcie, AXI_BASE_OFFSET +
162*4882a593Smuzhiyun 						  DEVICE_ID_OFFSET +
163*4882a593Smuzhiyun 						  DEVICE_REV_ID);
164*4882a593Smuzhiyun 	dev_id_val = FIELD_GET(DEVICE_REV_ID_DEV_ID_MASK, dev_rev_id_val);
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	switch (dev_id_val) {
167*4882a593Smuzhiyun 	case DEVICE_REV_ID_DEV_ID_X4:
168*4882a593Smuzhiyun 		*rev_id = AL_PCIE_REV_ID_2;
169*4882a593Smuzhiyun 		break;
170*4882a593Smuzhiyun 	case DEVICE_REV_ID_DEV_ID_X8:
171*4882a593Smuzhiyun 		*rev_id = AL_PCIE_REV_ID_3;
172*4882a593Smuzhiyun 		break;
173*4882a593Smuzhiyun 	case DEVICE_REV_ID_DEV_ID_X16:
174*4882a593Smuzhiyun 		*rev_id = AL_PCIE_REV_ID_4;
175*4882a593Smuzhiyun 		break;
176*4882a593Smuzhiyun 	default:
177*4882a593Smuzhiyun 		dev_err(pcie->dev, "Unsupported dev_id_val (0x%x)\n",
178*4882a593Smuzhiyun 			dev_id_val);
179*4882a593Smuzhiyun 		return -EINVAL;
180*4882a593Smuzhiyun 	}
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	dev_dbg(pcie->dev, "dev_id_val: 0x%x\n", dev_id_val);
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	return 0;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun 
al_pcie_reg_offsets_set(struct al_pcie * pcie)187*4882a593Smuzhiyun static int al_pcie_reg_offsets_set(struct al_pcie *pcie)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun 	switch (pcie->controller_rev_id) {
190*4882a593Smuzhiyun 	case AL_PCIE_REV_ID_2:
191*4882a593Smuzhiyun 		pcie->reg_offsets.ob_ctrl = OB_CTRL_REV1_2_OFFSET;
192*4882a593Smuzhiyun 		break;
193*4882a593Smuzhiyun 	case AL_PCIE_REV_ID_3:
194*4882a593Smuzhiyun 	case AL_PCIE_REV_ID_4:
195*4882a593Smuzhiyun 		pcie->reg_offsets.ob_ctrl = OB_CTRL_REV3_5_OFFSET;
196*4882a593Smuzhiyun 		break;
197*4882a593Smuzhiyun 	default:
198*4882a593Smuzhiyun 		dev_err(pcie->dev, "Unsupported controller rev_id: 0x%x\n",
199*4882a593Smuzhiyun 			pcie->controller_rev_id);
200*4882a593Smuzhiyun 		return -EINVAL;
201*4882a593Smuzhiyun 	}
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	return 0;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun 
al_pcie_target_bus_set(struct al_pcie * pcie,u8 target_bus,u8 mask_target_bus)206*4882a593Smuzhiyun static inline void al_pcie_target_bus_set(struct al_pcie *pcie,
207*4882a593Smuzhiyun 					  u8 target_bus,
208*4882a593Smuzhiyun 					  u8 mask_target_bus)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun 	u32 reg;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	reg = FIELD_PREP(CFG_TARGET_BUS_MASK_MASK, mask_target_bus) |
213*4882a593Smuzhiyun 	      FIELD_PREP(CFG_TARGET_BUS_BUSNUM_MASK, target_bus);
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	al_pcie_controller_writel(pcie, AXI_BASE_OFFSET +
216*4882a593Smuzhiyun 				  pcie->reg_offsets.ob_ctrl + CFG_TARGET_BUS,
217*4882a593Smuzhiyun 				  reg);
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun 
al_pcie_conf_addr_map_bus(struct pci_bus * bus,unsigned int devfn,int where)220*4882a593Smuzhiyun static void __iomem *al_pcie_conf_addr_map_bus(struct pci_bus *bus,
221*4882a593Smuzhiyun 					       unsigned int devfn, int where)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun 	struct pcie_port *pp = bus->sysdata;
224*4882a593Smuzhiyun 	struct al_pcie *pcie = to_al_pcie(to_dw_pcie_from_pp(pp));
225*4882a593Smuzhiyun 	unsigned int busnr = bus->number;
226*4882a593Smuzhiyun 	struct al_pcie_target_bus_cfg *target_bus_cfg = &pcie->target_bus_cfg;
227*4882a593Smuzhiyun 	unsigned int busnr_ecam = busnr & target_bus_cfg->ecam_mask;
228*4882a593Smuzhiyun 	unsigned int busnr_reg = busnr & target_bus_cfg->reg_mask;
229*4882a593Smuzhiyun 	void __iomem *pci_base_addr;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	pci_base_addr = (void __iomem *)((uintptr_t)pp->va_cfg0_base +
232*4882a593Smuzhiyun 					 (busnr_ecam << 20) +
233*4882a593Smuzhiyun 					 PCIE_ECAM_DEVFN(devfn));
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	if (busnr_reg != target_bus_cfg->reg_val) {
236*4882a593Smuzhiyun 		dev_dbg(pcie->pci->dev, "Changing target bus busnum val from 0x%x to 0x%x\n",
237*4882a593Smuzhiyun 			target_bus_cfg->reg_val, busnr_reg);
238*4882a593Smuzhiyun 		target_bus_cfg->reg_val = busnr_reg;
239*4882a593Smuzhiyun 		al_pcie_target_bus_set(pcie,
240*4882a593Smuzhiyun 				       target_bus_cfg->reg_val,
241*4882a593Smuzhiyun 				       target_bus_cfg->reg_mask);
242*4882a593Smuzhiyun 	}
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	return pci_base_addr + where;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun static struct pci_ops al_child_pci_ops = {
248*4882a593Smuzhiyun 	.map_bus = al_pcie_conf_addr_map_bus,
249*4882a593Smuzhiyun 	.read = pci_generic_config_read,
250*4882a593Smuzhiyun 	.write = pci_generic_config_write,
251*4882a593Smuzhiyun };
252*4882a593Smuzhiyun 
al_pcie_config_prepare(struct al_pcie * pcie)253*4882a593Smuzhiyun static void al_pcie_config_prepare(struct al_pcie *pcie)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun 	struct al_pcie_target_bus_cfg *target_bus_cfg;
256*4882a593Smuzhiyun 	struct pcie_port *pp = &pcie->pci->pp;
257*4882a593Smuzhiyun 	unsigned int ecam_bus_mask;
258*4882a593Smuzhiyun 	u32 cfg_control_offset;
259*4882a593Smuzhiyun 	u8 subordinate_bus;
260*4882a593Smuzhiyun 	u8 secondary_bus;
261*4882a593Smuzhiyun 	u32 cfg_control;
262*4882a593Smuzhiyun 	u32 reg;
263*4882a593Smuzhiyun 	struct resource *bus = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS)->res;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	target_bus_cfg = &pcie->target_bus_cfg;
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	ecam_bus_mask = (pcie->ecam_size >> 20) - 1;
268*4882a593Smuzhiyun 	if (ecam_bus_mask > 255) {
269*4882a593Smuzhiyun 		dev_warn(pcie->dev, "ECAM window size is larger than 256MB. Cutting off at 256\n");
270*4882a593Smuzhiyun 		ecam_bus_mask = 255;
271*4882a593Smuzhiyun 	}
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	/* This portion is taken from the transaction address */
274*4882a593Smuzhiyun 	target_bus_cfg->ecam_mask = ecam_bus_mask;
275*4882a593Smuzhiyun 	/* This portion is taken from the cfg_target_bus reg */
276*4882a593Smuzhiyun 	target_bus_cfg->reg_mask = ~target_bus_cfg->ecam_mask;
277*4882a593Smuzhiyun 	target_bus_cfg->reg_val = bus->start & target_bus_cfg->reg_mask;
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	al_pcie_target_bus_set(pcie, target_bus_cfg->reg_val,
280*4882a593Smuzhiyun 			       target_bus_cfg->reg_mask);
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	secondary_bus = bus->start + 1;
283*4882a593Smuzhiyun 	subordinate_bus = bus->end;
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	/* Set the valid values of secondary and subordinate buses */
286*4882a593Smuzhiyun 	cfg_control_offset = AXI_BASE_OFFSET + pcie->reg_offsets.ob_ctrl +
287*4882a593Smuzhiyun 			     CFG_CONTROL;
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	cfg_control = al_pcie_controller_readl(pcie, cfg_control_offset);
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	reg = cfg_control &
292*4882a593Smuzhiyun 	      ~(CFG_CONTROL_SEC_BUS_MASK | CFG_CONTROL_SUBBUS_MASK);
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	reg |= FIELD_PREP(CFG_CONTROL_SUBBUS_MASK, subordinate_bus) |
295*4882a593Smuzhiyun 	       FIELD_PREP(CFG_CONTROL_SEC_BUS_MASK, secondary_bus);
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	al_pcie_controller_writel(pcie, cfg_control_offset, reg);
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun 
al_pcie_host_init(struct pcie_port * pp)300*4882a593Smuzhiyun static int al_pcie_host_init(struct pcie_port *pp)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
303*4882a593Smuzhiyun 	struct al_pcie *pcie = to_al_pcie(pci);
304*4882a593Smuzhiyun 	int rc;
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	pp->bridge->child_ops = &al_child_pci_ops;
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	rc = al_pcie_rev_id_get(pcie, &pcie->controller_rev_id);
309*4882a593Smuzhiyun 	if (rc)
310*4882a593Smuzhiyun 		return rc;
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	rc = al_pcie_reg_offsets_set(pcie);
313*4882a593Smuzhiyun 	if (rc)
314*4882a593Smuzhiyun 		return rc;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	al_pcie_config_prepare(pcie);
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	return 0;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun static const struct dw_pcie_host_ops al_pcie_host_ops = {
322*4882a593Smuzhiyun 	.host_init = al_pcie_host_init,
323*4882a593Smuzhiyun };
324*4882a593Smuzhiyun 
al_add_pcie_port(struct pcie_port * pp,struct platform_device * pdev)325*4882a593Smuzhiyun static int al_add_pcie_port(struct pcie_port *pp,
326*4882a593Smuzhiyun 			    struct platform_device *pdev)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
329*4882a593Smuzhiyun 	int ret;
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	pp->ops = &al_pcie_host_ops;
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	ret = dw_pcie_host_init(pp);
334*4882a593Smuzhiyun 	if (ret) {
335*4882a593Smuzhiyun 		dev_err(dev, "failed to initialize host\n");
336*4882a593Smuzhiyun 		return ret;
337*4882a593Smuzhiyun 	}
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	return 0;
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun static const struct dw_pcie_ops dw_pcie_ops = {
343*4882a593Smuzhiyun };
344*4882a593Smuzhiyun 
al_pcie_probe(struct platform_device * pdev)345*4882a593Smuzhiyun static int al_pcie_probe(struct platform_device *pdev)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
348*4882a593Smuzhiyun 	struct resource *controller_res;
349*4882a593Smuzhiyun 	struct resource *ecam_res;
350*4882a593Smuzhiyun 	struct resource *dbi_res;
351*4882a593Smuzhiyun 	struct al_pcie *al_pcie;
352*4882a593Smuzhiyun 	struct dw_pcie *pci;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	al_pcie = devm_kzalloc(dev, sizeof(*al_pcie), GFP_KERNEL);
355*4882a593Smuzhiyun 	if (!al_pcie)
356*4882a593Smuzhiyun 		return -ENOMEM;
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
359*4882a593Smuzhiyun 	if (!pci)
360*4882a593Smuzhiyun 		return -ENOMEM;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	pci->dev = dev;
363*4882a593Smuzhiyun 	pci->ops = &dw_pcie_ops;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	al_pcie->pci = pci;
366*4882a593Smuzhiyun 	al_pcie->dev = dev;
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
369*4882a593Smuzhiyun 	pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_res);
370*4882a593Smuzhiyun 	if (IS_ERR(pci->dbi_base))
371*4882a593Smuzhiyun 		return PTR_ERR(pci->dbi_base);
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	ecam_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
374*4882a593Smuzhiyun 	if (!ecam_res) {
375*4882a593Smuzhiyun 		dev_err(dev, "couldn't find 'config' reg in DT\n");
376*4882a593Smuzhiyun 		return -ENOENT;
377*4882a593Smuzhiyun 	}
378*4882a593Smuzhiyun 	al_pcie->ecam_size = resource_size(ecam_res);
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	controller_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
381*4882a593Smuzhiyun 						      "controller");
382*4882a593Smuzhiyun 	al_pcie->controller_base = devm_ioremap_resource(dev, controller_res);
383*4882a593Smuzhiyun 	if (IS_ERR(al_pcie->controller_base)) {
384*4882a593Smuzhiyun 		dev_err(dev, "couldn't remap controller base %pR\n",
385*4882a593Smuzhiyun 			controller_res);
386*4882a593Smuzhiyun 		return PTR_ERR(al_pcie->controller_base);
387*4882a593Smuzhiyun 	}
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	dev_dbg(dev, "From DT: dbi_base: %pR, controller_base: %pR\n",
390*4882a593Smuzhiyun 		dbi_res, controller_res);
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	platform_set_drvdata(pdev, al_pcie);
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	return al_add_pcie_port(&pci->pp, pdev);
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun static const struct of_device_id al_pcie_of_match[] = {
398*4882a593Smuzhiyun 	{ .compatible = "amazon,al-alpine-v2-pcie",
399*4882a593Smuzhiyun 	},
400*4882a593Smuzhiyun 	{ .compatible = "amazon,al-alpine-v3-pcie",
401*4882a593Smuzhiyun 	},
402*4882a593Smuzhiyun 	{},
403*4882a593Smuzhiyun };
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun static struct platform_driver al_pcie_driver = {
406*4882a593Smuzhiyun 	.driver = {
407*4882a593Smuzhiyun 		.name	= "al-pcie",
408*4882a593Smuzhiyun 		.of_match_table = al_pcie_of_match,
409*4882a593Smuzhiyun 		.suppress_bind_attrs = true,
410*4882a593Smuzhiyun 	},
411*4882a593Smuzhiyun 	.probe = al_pcie_probe,
412*4882a593Smuzhiyun };
413*4882a593Smuzhiyun builtin_platform_driver(al_pcie_driver);
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun #endif /* CONFIG_PCIE_AL*/
416