xref: /OK3568_Linux_fs/kernel/drivers/pci/controller/dwc/pcie-dw-ep-rockchip.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * PCIe EP controller driver for Rockchip SoCs
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2021 Rockchip Electronics Co., Ltd.
6*4882a593Smuzhiyun  *		http://www.rock-chips.com
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Author: Simon Xue <xxm@rock-chips.com>
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <linux/clk.h>
12*4882a593Smuzhiyun #include <linux/gpio/consumer.h>
13*4882a593Smuzhiyun #include <linux/miscdevice.h>
14*4882a593Smuzhiyun #include <linux/mfd/syscon.h>
15*4882a593Smuzhiyun #include <linux/module.h>
16*4882a593Smuzhiyun #include <linux/of_address.h>
17*4882a593Smuzhiyun #include <linux/of_device.h>
18*4882a593Smuzhiyun #include <linux/phy/phy.h>
19*4882a593Smuzhiyun #include <linux/platform_device.h>
20*4882a593Smuzhiyun #include <linux/regmap.h>
21*4882a593Smuzhiyun #include <linux/reset.h>
22*4882a593Smuzhiyun #include <linux/uaccess.h>
23*4882a593Smuzhiyun #include <uapi/linux/rk-pcie-ep.h>
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #include "../rockchip-pcie-dma.h"
26*4882a593Smuzhiyun #include "pcie-designware.h"
27*4882a593Smuzhiyun #include "pcie-dw-dmatest.h"
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun /*
30*4882a593Smuzhiyun  * The upper 16 bits of PCIE_CLIENT_CONFIG are a write
31*4882a593Smuzhiyun  * mask for the lower 16 bits.
32*4882a593Smuzhiyun  */
33*4882a593Smuzhiyun #define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val))
34*4882a593Smuzhiyun #define HIWORD_UPDATE_BIT(val)	HIWORD_UPDATE(val, val)
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #define to_rockchip_pcie(x) dev_get_drvdata((x)->dev)
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun #define PCIE_DMA_OFFSET			0x380000
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #define PCIE_DMA_CTRL_OFF		0x8
41*4882a593Smuzhiyun #define PCIE_DMA_WR_ENB			0xc
42*4882a593Smuzhiyun #define PCIE_DMA_WR_CTRL_LO		0x200
43*4882a593Smuzhiyun #define PCIE_DMA_WR_CTRL_HI		0x204
44*4882a593Smuzhiyun #define PCIE_DMA_WR_XFERSIZE		0x208
45*4882a593Smuzhiyun #define PCIE_DMA_WR_SAR_PTR_LO		0x20c
46*4882a593Smuzhiyun #define PCIE_DMA_WR_SAR_PTR_HI		0x210
47*4882a593Smuzhiyun #define PCIE_DMA_WR_DAR_PTR_LO		0x214
48*4882a593Smuzhiyun #define PCIE_DMA_WR_DAR_PTR_HI		0x218
49*4882a593Smuzhiyun #define PCIE_DMA_WR_WEILO		0x18
50*4882a593Smuzhiyun #define PCIE_DMA_WR_WEIHI		0x1c
51*4882a593Smuzhiyun #define PCIE_DMA_WR_DOORBELL		0x10
52*4882a593Smuzhiyun #define PCIE_DMA_WR_INT_STATUS		0x4c
53*4882a593Smuzhiyun #define PCIE_DMA_WR_INT_MASK		0x54
54*4882a593Smuzhiyun #define PCIE_DMA_WR_INT_CLEAR		0x58
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun #define PCIE_DMA_RD_ENB			0x2c
57*4882a593Smuzhiyun #define PCIE_DMA_RD_CTRL_LO		0x300
58*4882a593Smuzhiyun #define PCIE_DMA_RD_CTRL_HI		0x304
59*4882a593Smuzhiyun #define PCIE_DMA_RD_XFERSIZE		0x308
60*4882a593Smuzhiyun #define PCIE_DMA_RD_SAR_PTR_LO		0x30c
61*4882a593Smuzhiyun #define PCIE_DMA_RD_SAR_PTR_HI		0x310
62*4882a593Smuzhiyun #define PCIE_DMA_RD_DAR_PTR_LO		0x314
63*4882a593Smuzhiyun #define PCIE_DMA_RD_DAR_PTR_HI		0x318
64*4882a593Smuzhiyun #define PCIE_DMA_RD_WEILO		0x38
65*4882a593Smuzhiyun #define PCIE_DMA_RD_WEIHI		0x3c
66*4882a593Smuzhiyun #define PCIE_DMA_RD_DOORBELL		0x30
67*4882a593Smuzhiyun #define PCIE_DMA_RD_INT_STATUS		0xa0
68*4882a593Smuzhiyun #define PCIE_DMA_RD_INT_MASK		0xa8
69*4882a593Smuzhiyun #define PCIE_DMA_RD_INT_CLEAR		0xac
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun #define PCIE_DMA_CHANEL_MAX_NUM		2
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun #define PCIE_CLIENT_RC_MODE		HIWORD_UPDATE_BIT(0x40)
74*4882a593Smuzhiyun #define PCIE_CLIENT_ENABLE_LTSSM	HIWORD_UPDATE_BIT(0xc)
75*4882a593Smuzhiyun #define PCIE_CLIENT_INTR_STATUS_MISC	0x10
76*4882a593Smuzhiyun #define PCIE_SMLH_LINKUP		BIT(16)
77*4882a593Smuzhiyun #define PCIE_RDLH_LINKUP		BIT(17)
78*4882a593Smuzhiyun #define PCIE_L0S_ENTRY			0x11
79*4882a593Smuzhiyun #define PCIE_CLIENT_GENERAL_CONTROL	0x0
80*4882a593Smuzhiyun #define PCIE_CLIENT_GENERAL_DEBUG	0x104
81*4882a593Smuzhiyun #define PCIE_CLIENT_HOT_RESET_CTRL      0x180
82*4882a593Smuzhiyun #define PCIE_CLIENT_LTSSM_STATUS	0x300
83*4882a593Smuzhiyun #define PCIE_CLIENT_INTR_MASK		0x24
84*4882a593Smuzhiyun #define PCIE_LTSSM_APP_DLY1_EN		BIT(0)
85*4882a593Smuzhiyun #define PCIE_LTSSM_APP_DLY2_EN		BIT(1)
86*4882a593Smuzhiyun #define PCIE_LTSSM_APP_DLY1_DONE	BIT(2)
87*4882a593Smuzhiyun #define PCIE_LTSSM_APP_DLY2_DONE	BIT(3)
88*4882a593Smuzhiyun #define PCIE_LTSSM_ENABLE_ENHANCE       BIT(4)
89*4882a593Smuzhiyun #define PCIE_CLIENT_MSI_GEN_CON		0x38
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun #define PCIe_CLIENT_MSI_OBJ_IRQ		0	/* rockchip ep object special irq */
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun #define PCIE_ELBI_REG_NUM		0x2
94*4882a593Smuzhiyun #define PCIE_ELBI_LOCAL_BASE		0x200e00
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun #define PCIE_ELBI_APP_ELBI_INT_GEN0		0x0
97*4882a593Smuzhiyun #define PCIE_ELBI_APP_ELBI_INT_GEN0_SIGIO	BIT(0)
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun #define PCIE_ELBI_APP_ELBI_INT_GEN1		0x4
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun #define PCIE_ELBI_LOCAL_ENABLE_OFF	0x8
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun #define PCIE_DIRECT_SPEED_CHANGE	BIT(17)
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun #define PCIE_TYPE0_STATUS_COMMAND_REG	0x4
106*4882a593Smuzhiyun #define PCIE_TYPE0_HDR_DBI2_OFFSET	0x100000
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun #define PCIE_DBI_SIZE			0x400000
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun #define PCIE_EP_OBJ_INFO_DRV_VERSION	0x00000001
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun #define PCIE_BAR_MAX_NUM		6
113*4882a593Smuzhiyun #define PCIE_HOTRESET_TMOUT_US		10000
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun struct rockchip_pcie {
116*4882a593Smuzhiyun 	struct dw_pcie			pci;
117*4882a593Smuzhiyun 	void __iomem			*apb_base;
118*4882a593Smuzhiyun 	struct phy			*phy;
119*4882a593Smuzhiyun 	struct clk_bulk_data		*clks;
120*4882a593Smuzhiyun 	unsigned int			clk_cnt;
121*4882a593Smuzhiyun 	struct reset_control		*rst;
122*4882a593Smuzhiyun 	struct gpio_desc		*rst_gpio;
123*4882a593Smuzhiyun 	struct regulator                *vpcie3v3;
124*4882a593Smuzhiyun 	unsigned long			*ib_window_map;
125*4882a593Smuzhiyun 	unsigned long			*ob_window_map;
126*4882a593Smuzhiyun 	u32				num_ib_windows;
127*4882a593Smuzhiyun 	u32				num_ob_windows;
128*4882a593Smuzhiyun 	phys_addr_t			*outbound_addr;
129*4882a593Smuzhiyun 	u8				bar_to_atu[PCIE_BAR_MAX_NUM];
130*4882a593Smuzhiyun 	dma_addr_t			ib_target_address[PCIE_BAR_MAX_NUM];
131*4882a593Smuzhiyun 	u32				ib_target_size[PCIE_BAR_MAX_NUM];
132*4882a593Smuzhiyun 	void				*ib_target_base[PCIE_BAR_MAX_NUM];
133*4882a593Smuzhiyun 	struct dma_trx_obj		*dma_obj;
134*4882a593Smuzhiyun 	struct fasync_struct		*async;
135*4882a593Smuzhiyun 	phys_addr_t			dbi_base_physical;
136*4882a593Smuzhiyun 	struct pcie_ep_obj_info		*obj_info;
137*4882a593Smuzhiyun 	enum pcie_ep_mmap_resource	cur_mmap_res;
138*4882a593Smuzhiyun 	struct workqueue_struct		*hot_rst_wq;
139*4882a593Smuzhiyun 	struct work_struct		hot_rst_work;
140*4882a593Smuzhiyun };
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun struct rockchip_pcie_misc_dev {
143*4882a593Smuzhiyun 	struct miscdevice dev;
144*4882a593Smuzhiyun 	struct rockchip_pcie *pcie;
145*4882a593Smuzhiyun };
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun static const struct of_device_id rockchip_pcie_ep_of_match[] = {
148*4882a593Smuzhiyun 	{
149*4882a593Smuzhiyun 		.compatible = "rockchip,rk3568-pcie-std-ep",
150*4882a593Smuzhiyun 	},
151*4882a593Smuzhiyun 	{
152*4882a593Smuzhiyun 		.compatible = "rockchip,rk3588-pcie-std-ep",
153*4882a593Smuzhiyun 	},
154*4882a593Smuzhiyun 	{},
155*4882a593Smuzhiyun };
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, rockchip_pcie_ep_of_match);
158*4882a593Smuzhiyun 
rockchip_pcie_devmode_update(struct rockchip_pcie * rockchip,int mode,int submode)159*4882a593Smuzhiyun static void rockchip_pcie_devmode_update(struct rockchip_pcie *rockchip, int mode, int submode)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun 	rockchip->obj_info->devmode.mode = mode;
162*4882a593Smuzhiyun 	rockchip->obj_info->devmode.submode = submode;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun 
rockchip_pcie_readl_apb(struct rockchip_pcie * rockchip,u32 reg)165*4882a593Smuzhiyun static int rockchip_pcie_readl_apb(struct rockchip_pcie *rockchip, u32 reg)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun 	return readl(rockchip->apb_base + reg);
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun 
rockchip_pcie_writel_apb(struct rockchip_pcie * rockchip,u32 val,u32 reg)170*4882a593Smuzhiyun static void rockchip_pcie_writel_apb(struct rockchip_pcie *rockchip, u32 val, u32 reg)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun 	writel(val, rockchip->apb_base + reg);
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun 
rockchip_pcie_map_kernel(phys_addr_t start,size_t len)175*4882a593Smuzhiyun static void *rockchip_pcie_map_kernel(phys_addr_t start, size_t len)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun 	int i;
178*4882a593Smuzhiyun 	void *vaddr;
179*4882a593Smuzhiyun 	pgprot_t pgprot;
180*4882a593Smuzhiyun 	phys_addr_t phys;
181*4882a593Smuzhiyun 	int npages = PAGE_ALIGN(len) / PAGE_SIZE;
182*4882a593Smuzhiyun 	struct page **p = vmalloc(sizeof(struct page *) * npages);
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	if (!p)
185*4882a593Smuzhiyun 		return NULL;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	pgprot = pgprot_noncached(PAGE_KERNEL);
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	phys = start;
190*4882a593Smuzhiyun 	for (i = 0; i < npages; i++) {
191*4882a593Smuzhiyun 		p[i] = phys_to_page(phys);
192*4882a593Smuzhiyun 		phys += PAGE_SIZE;
193*4882a593Smuzhiyun 	}
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	vaddr = vmap(p, npages, VM_MAP, pgprot);
196*4882a593Smuzhiyun 	vfree(p);
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	return vaddr;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun 
rockchip_pcie_resource_get(struct platform_device * pdev,struct rockchip_pcie * rockchip)201*4882a593Smuzhiyun static int rockchip_pcie_resource_get(struct platform_device *pdev,
202*4882a593Smuzhiyun 				      struct rockchip_pcie *rockchip)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun 	int ret;
205*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
206*4882a593Smuzhiyun 	struct device_node *np = dev->of_node;
207*4882a593Smuzhiyun 	void *addr;
208*4882a593Smuzhiyun 	struct resource *dbi_base;
209*4882a593Smuzhiyun 	struct device_node *mem;
210*4882a593Smuzhiyun 	struct resource reg;
211*4882a593Smuzhiyun 	char name[8];
212*4882a593Smuzhiyun 	int i, idx;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM,
215*4882a593Smuzhiyun 						"pcie-dbi");
216*4882a593Smuzhiyun 	if (!dbi_base) {
217*4882a593Smuzhiyun 		dev_err(&pdev->dev, "get pcie-dbi failed\n");
218*4882a593Smuzhiyun 		return -ENODEV;
219*4882a593Smuzhiyun 	}
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	rockchip->pci.dbi_base = devm_ioremap_resource(dev, dbi_base);
222*4882a593Smuzhiyun 	if (IS_ERR(rockchip->pci.dbi_base))
223*4882a593Smuzhiyun 		return PTR_ERR(rockchip->pci.dbi_base);
224*4882a593Smuzhiyun 	rockchip->pci.atu_base = rockchip->pci.dbi_base + DEFAULT_DBI_ATU_OFFSET;
225*4882a593Smuzhiyun 	rockchip->dbi_base_physical = dbi_base->start;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	rockchip->apb_base = devm_platform_ioremap_resource_byname(pdev, "pcie-apb");
228*4882a593Smuzhiyun 	if (!rockchip->apb_base) {
229*4882a593Smuzhiyun 		dev_err(dev, "get pcie-apb failed\n");
230*4882a593Smuzhiyun 		return -ENODEV;
231*4882a593Smuzhiyun 	}
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	rockchip->rst_gpio = devm_gpiod_get_optional(dev, "reset",
234*4882a593Smuzhiyun 						     GPIOD_OUT_HIGH);
235*4882a593Smuzhiyun 	if (IS_ERR(rockchip->rst_gpio))
236*4882a593Smuzhiyun 		return PTR_ERR(rockchip->rst_gpio);
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	ret = device_property_read_u32(dev, "num-ib-windows", &rockchip->num_ib_windows);
239*4882a593Smuzhiyun 	if (ret < 0) {
240*4882a593Smuzhiyun 		dev_err(dev, "unable to read *num-ib-windows* property\n");
241*4882a593Smuzhiyun 		return ret;
242*4882a593Smuzhiyun 	}
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	if (rockchip->num_ib_windows > MAX_IATU_IN) {
245*4882a593Smuzhiyun 		dev_err(dev, "Invalid *num-ib-windows*\n");
246*4882a593Smuzhiyun 		return -EINVAL;
247*4882a593Smuzhiyun 	}
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	ret = device_property_read_u32(dev, "num-ob-windows", &rockchip->num_ob_windows);
250*4882a593Smuzhiyun 	if (ret < 0) {
251*4882a593Smuzhiyun 		dev_err(dev, "Unable to read *num-ob-windows* property\n");
252*4882a593Smuzhiyun 		return ret;
253*4882a593Smuzhiyun 	}
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	if (rockchip->num_ob_windows > MAX_IATU_OUT) {
256*4882a593Smuzhiyun 		dev_err(dev, "Invalid *num-ob-windows*\n");
257*4882a593Smuzhiyun 		return -EINVAL;
258*4882a593Smuzhiyun 	}
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	rockchip->ib_window_map = devm_kcalloc(dev,
261*4882a593Smuzhiyun 					BITS_TO_LONGS(rockchip->num_ib_windows),
262*4882a593Smuzhiyun 					sizeof(long), GFP_KERNEL);
263*4882a593Smuzhiyun 	if (!rockchip->ib_window_map)
264*4882a593Smuzhiyun 		return -ENOMEM;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	rockchip->ob_window_map = devm_kcalloc(dev,
267*4882a593Smuzhiyun 					BITS_TO_LONGS(rockchip->num_ob_windows),
268*4882a593Smuzhiyun 					sizeof(long), GFP_KERNEL);
269*4882a593Smuzhiyun 	if (!rockchip->ob_window_map)
270*4882a593Smuzhiyun 		return -ENOMEM;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	addr = devm_kcalloc(dev, rockchip->num_ob_windows, sizeof(phys_addr_t),
273*4882a593Smuzhiyun 			    GFP_KERNEL);
274*4882a593Smuzhiyun 	if (!addr)
275*4882a593Smuzhiyun 		return -ENOMEM;
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	rockchip->outbound_addr = addr;
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	for (i = 0; i < PCIE_BAR_MAX_NUM; i++) {
280*4882a593Smuzhiyun 		snprintf(name, sizeof(name), "bar%d", i);
281*4882a593Smuzhiyun 		idx = of_property_match_string(np, "memory-region-names", name);
282*4882a593Smuzhiyun 		if (idx < 0)
283*4882a593Smuzhiyun 			continue;
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 		mem = of_parse_phandle(np, "memory-region", idx);
286*4882a593Smuzhiyun 		if (!mem) {
287*4882a593Smuzhiyun 			dev_err(dev, "missing \"memory-region\" %s property\n", name);
288*4882a593Smuzhiyun 			return -ENODEV;
289*4882a593Smuzhiyun 		}
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 		ret = of_address_to_resource(mem, 0, &reg);
292*4882a593Smuzhiyun 		if (ret < 0) {
293*4882a593Smuzhiyun 			dev_err(dev, "missing \"reg\" %s property\n", name);
294*4882a593Smuzhiyun 			return -ENODEV;
295*4882a593Smuzhiyun 		}
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 		rockchip->ib_target_address[i] = reg.start;
298*4882a593Smuzhiyun 		rockchip->ib_target_size[i] = resource_size(&reg);
299*4882a593Smuzhiyun 		rockchip->ib_target_base[i] = rockchip_pcie_map_kernel(reg.start,
300*4882a593Smuzhiyun 							resource_size(&reg));
301*4882a593Smuzhiyun 		dev_info(dev, "%s: assigned [0x%llx-%llx]\n", name, rockchip->ib_target_address[i],
302*4882a593Smuzhiyun 			rockchip->ib_target_address[i] + rockchip->ib_target_size[i] - 1);
303*4882a593Smuzhiyun 	}
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	if (rockchip->ib_target_size[0]) {
306*4882a593Smuzhiyun 		rockchip->obj_info = (struct pcie_ep_obj_info *)rockchip->ib_target_base[0];
307*4882a593Smuzhiyun 		memset_io(rockchip->obj_info, 0, sizeof(struct pcie_ep_obj_info));
308*4882a593Smuzhiyun 		rockchip->obj_info->magic = PCIE_EP_OBJ_INFO_MAGIC;
309*4882a593Smuzhiyun 		rockchip->obj_info->version = PCIE_EP_OBJ_INFO_DRV_VERSION;
310*4882a593Smuzhiyun 		rockchip_pcie_devmode_update(rockchip, RKEP_MODE_KERNEL, RKEP_SMODE_INIT);
311*4882a593Smuzhiyun 	} else {
312*4882a593Smuzhiyun 		dev_err(dev, "missing bar0 memory region\n");
313*4882a593Smuzhiyun 		return -ENODEV;
314*4882a593Smuzhiyun 	}
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	return 0;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun 
rockchip_pcie_enable_ltssm(struct rockchip_pcie * rockchip)319*4882a593Smuzhiyun static void rockchip_pcie_enable_ltssm(struct rockchip_pcie *rockchip)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun 	/* Set ep mode */
322*4882a593Smuzhiyun 	rockchip_pcie_writel_apb(rockchip, 0xf00000, 0x0);
323*4882a593Smuzhiyun 	rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_ENABLE_LTSSM,
324*4882a593Smuzhiyun 				 PCIE_CLIENT_GENERAL_CONTROL);
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun 
rockchip_pcie_link_up(struct dw_pcie * pci)327*4882a593Smuzhiyun static int rockchip_pcie_link_up(struct dw_pcie *pci)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun 	struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
330*4882a593Smuzhiyun 	u32 val = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_LTSSM_STATUS);
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	if ((val & (PCIE_RDLH_LINKUP | PCIE_SMLH_LINKUP)) == 0x30000)
333*4882a593Smuzhiyun 		return 1;
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	return 0;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun 
rockchip_pcie_start_link(struct dw_pcie * pci)338*4882a593Smuzhiyun static int rockchip_pcie_start_link(struct dw_pcie *pci)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun 	struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	/* Reset device */
343*4882a593Smuzhiyun 	gpiod_set_value_cansleep(rockchip->rst_gpio, 0);
344*4882a593Smuzhiyun 	msleep(100);
345*4882a593Smuzhiyun 	gpiod_set_value_cansleep(rockchip->rst_gpio, 1);
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	rockchip_pcie_enable_ltssm(rockchip);
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	return 0;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun 
rockchip_pcie_phy_init(struct rockchip_pcie * rockchip)352*4882a593Smuzhiyun static int rockchip_pcie_phy_init(struct rockchip_pcie *rockchip)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun 	int ret;
355*4882a593Smuzhiyun 	struct device *dev = rockchip->pci.dev;
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	rockchip->phy = devm_phy_get(dev, "pcie-phy");
358*4882a593Smuzhiyun 	if (IS_ERR(rockchip->phy)) {
359*4882a593Smuzhiyun 		dev_err(dev, "missing phy\n");
360*4882a593Smuzhiyun 		return PTR_ERR(rockchip->phy);
361*4882a593Smuzhiyun 	}
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	ret = phy_init(rockchip->phy);
364*4882a593Smuzhiyun 	if (ret < 0)
365*4882a593Smuzhiyun 		return ret;
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	phy_power_on(rockchip->phy);
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	return 0;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun 
rockchip_pcie_phy_deinit(struct rockchip_pcie * rockchip)372*4882a593Smuzhiyun static void rockchip_pcie_phy_deinit(struct rockchip_pcie *rockchip)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun 	phy_exit(rockchip->phy);
375*4882a593Smuzhiyun 	phy_power_off(rockchip->phy);
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun 
rockchip_pcie_reset_control_release(struct rockchip_pcie * rockchip)378*4882a593Smuzhiyun static int rockchip_pcie_reset_control_release(struct rockchip_pcie *rockchip)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun 	struct device *dev = rockchip->pci.dev;
381*4882a593Smuzhiyun 	int ret;
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	rockchip->rst = devm_reset_control_array_get_exclusive(dev);
384*4882a593Smuzhiyun 	if (IS_ERR(rockchip->rst)) {
385*4882a593Smuzhiyun 		dev_err(dev, "failed to get reset lines\n");
386*4882a593Smuzhiyun 		return PTR_ERR(rockchip->rst);
387*4882a593Smuzhiyun 	}
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	ret = reset_control_deassert(rockchip->rst);
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	return ret;
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun 
rockchip_pcie_clk_init(struct rockchip_pcie * rockchip)394*4882a593Smuzhiyun static int rockchip_pcie_clk_init(struct rockchip_pcie *rockchip)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun 	struct device *dev = rockchip->pci.dev;
397*4882a593Smuzhiyun 	int ret;
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	ret = devm_clk_bulk_get_all(dev, &rockchip->clks);
400*4882a593Smuzhiyun 	if (ret < 0)
401*4882a593Smuzhiyun 		return ret;
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	rockchip->clk_cnt = ret;
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	ret = clk_bulk_prepare_enable(rockchip->clk_cnt, rockchip->clks);
406*4882a593Smuzhiyun 	if (ret)
407*4882a593Smuzhiyun 		return ret;
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	return 0;
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun 
rockchip_pci_find_resbar_capability(struct rockchip_pcie * rockchip)412*4882a593Smuzhiyun static int rockchip_pci_find_resbar_capability(struct rockchip_pcie *rockchip)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun 	u32 header;
415*4882a593Smuzhiyun 	int ttl;
416*4882a593Smuzhiyun 	int start = 0;
417*4882a593Smuzhiyun 	int pos = PCI_CFG_SPACE_SIZE;
418*4882a593Smuzhiyun 	int cap = PCI_EXT_CAP_ID_REBAR;
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	/* minimum 8 bytes per capability */
421*4882a593Smuzhiyun 	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	header = dw_pcie_readl_dbi(&rockchip->pci, pos);
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	/*
426*4882a593Smuzhiyun 	 * If we have no capabilities, this is indicated by cap ID,
427*4882a593Smuzhiyun 	 * cap version and next pointer all being 0.
428*4882a593Smuzhiyun 	 */
429*4882a593Smuzhiyun 	if (header == 0)
430*4882a593Smuzhiyun 		return 0;
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	while (ttl-- > 0) {
433*4882a593Smuzhiyun 		if (PCI_EXT_CAP_ID(header) == cap && pos != start)
434*4882a593Smuzhiyun 			return pos;
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 		pos = PCI_EXT_CAP_NEXT(header);
437*4882a593Smuzhiyun 		if (pos < PCI_CFG_SPACE_SIZE)
438*4882a593Smuzhiyun 			break;
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 		header = dw_pcie_readl_dbi(&rockchip->pci, pos);
441*4882a593Smuzhiyun 		if (!header)
442*4882a593Smuzhiyun 			break;
443*4882a593Smuzhiyun 	}
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	return 0;
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun 
rockchip_pcie_ep_set_bar_flag(struct rockchip_pcie * rockchip,enum pci_barno barno,int flags)448*4882a593Smuzhiyun static int rockchip_pcie_ep_set_bar_flag(struct rockchip_pcie *rockchip, enum pci_barno barno,
449*4882a593Smuzhiyun 					 int flags)
450*4882a593Smuzhiyun {
451*4882a593Smuzhiyun 	enum pci_barno bar = barno;
452*4882a593Smuzhiyun 	u32 reg;
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	reg = PCI_BASE_ADDRESS_0 + (4 * bar);
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	/* Disabled the upper 32bits BAR to make a 64bits bar pair */
457*4882a593Smuzhiyun 	if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
458*4882a593Smuzhiyun 		dw_pcie_writel_dbi(&rockchip->pci, reg + PCIE_TYPE0_HDR_DBI2_OFFSET + 4, 0);
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	dw_pcie_writel_dbi(&rockchip->pci, reg, flags);
461*4882a593Smuzhiyun 	if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
462*4882a593Smuzhiyun 		dw_pcie_writel_dbi(&rockchip->pci, reg + 4, 0);
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	return 0;
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun 
rockchip_pcie_resize_bar(struct rockchip_pcie * rockchip)467*4882a593Smuzhiyun static void rockchip_pcie_resize_bar(struct rockchip_pcie *rockchip)
468*4882a593Smuzhiyun {
469*4882a593Smuzhiyun 	struct dw_pcie *pci = &rockchip->pci;
470*4882a593Smuzhiyun 	struct device *dev = pci->dev;
471*4882a593Smuzhiyun 	struct device_node *np = dev->of_node;
472*4882a593Smuzhiyun 	int bar, ret;
473*4882a593Smuzhiyun 	u32 resbar_base, lanes, val;
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	ret = of_property_read_u32(np, "num-lanes", &lanes);
476*4882a593Smuzhiyun 	if (ret)
477*4882a593Smuzhiyun 		lanes = 0;
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	/* Set the number of lanes */
480*4882a593Smuzhiyun 	val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
481*4882a593Smuzhiyun 	val &= ~PORT_LINK_MODE_MASK;
482*4882a593Smuzhiyun 	switch (lanes) {
483*4882a593Smuzhiyun 	case 1:
484*4882a593Smuzhiyun 		val |= PORT_LINK_MODE_1_LANES;
485*4882a593Smuzhiyun 		break;
486*4882a593Smuzhiyun 	case 2:
487*4882a593Smuzhiyun 		val |= PORT_LINK_MODE_2_LANES;
488*4882a593Smuzhiyun 		break;
489*4882a593Smuzhiyun 	case 4:
490*4882a593Smuzhiyun 		val |= PORT_LINK_MODE_4_LANES;
491*4882a593Smuzhiyun 		break;
492*4882a593Smuzhiyun 	case 8:
493*4882a593Smuzhiyun 		val |= PORT_LINK_MODE_8_LANES;
494*4882a593Smuzhiyun 		break;
495*4882a593Smuzhiyun 	default:
496*4882a593Smuzhiyun 		dev_err(dev, "num-lanes %u: invalid value\n", lanes);
497*4882a593Smuzhiyun 		return;
498*4882a593Smuzhiyun 	}
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	/* Set link width speed control register */
503*4882a593Smuzhiyun 	val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
504*4882a593Smuzhiyun 	val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
505*4882a593Smuzhiyun 	switch (lanes) {
506*4882a593Smuzhiyun 	case 1:
507*4882a593Smuzhiyun 		val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
508*4882a593Smuzhiyun 		break;
509*4882a593Smuzhiyun 	case 2:
510*4882a593Smuzhiyun 		val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
511*4882a593Smuzhiyun 		break;
512*4882a593Smuzhiyun 	case 4:
513*4882a593Smuzhiyun 		val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
514*4882a593Smuzhiyun 		break;
515*4882a593Smuzhiyun 	case 8:
516*4882a593Smuzhiyun 		val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
517*4882a593Smuzhiyun 		break;
518*4882a593Smuzhiyun 	}
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	val |= PCIE_DIRECT_SPEED_CHANGE;
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	/* Enable bus master and memory space */
525*4882a593Smuzhiyun 	dw_pcie_writel_dbi(pci, PCIE_TYPE0_STATUS_COMMAND_REG, 0x6);
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	resbar_base = rockchip_pci_find_resbar_capability(rockchip);
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	/* Resize BAR0 4M 32bits, BAR2 64M 64bits-pref, BAR4 1MB 32bits */
530*4882a593Smuzhiyun 	bar = BAR_0;
531*4882a593Smuzhiyun 	dw_pcie_writel_dbi(pci, resbar_base + 0x4 + bar * 0x8, 0xfffff0);
532*4882a593Smuzhiyun 	dw_pcie_writel_dbi(pci, resbar_base + 0x8 + bar * 0x8, 0x2c0);
533*4882a593Smuzhiyun 	rockchip_pcie_ep_set_bar_flag(rockchip, bar, PCI_BASE_ADDRESS_MEM_TYPE_32);
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	bar = BAR_2;
536*4882a593Smuzhiyun 	dw_pcie_writel_dbi(pci, resbar_base + 0x4 + bar * 0x8, 0xfffff0);
537*4882a593Smuzhiyun 	dw_pcie_writel_dbi(pci, resbar_base + 0x8 + bar * 0x8, 0x6c0);
538*4882a593Smuzhiyun 	rockchip_pcie_ep_set_bar_flag(rockchip, bar,
539*4882a593Smuzhiyun 				      PCI_BASE_ADDRESS_MEM_PREFETCH | PCI_BASE_ADDRESS_MEM_TYPE_64);
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	bar = BAR_4;
542*4882a593Smuzhiyun 	dw_pcie_writel_dbi(pci, resbar_base + 0x4 + bar * 0x8, 0xfffff0);
543*4882a593Smuzhiyun 	dw_pcie_writel_dbi(pci, resbar_base + 0x8 + bar * 0x8, 0xc0);
544*4882a593Smuzhiyun 	rockchip_pcie_ep_set_bar_flag(rockchip, bar, PCI_BASE_ADDRESS_MEM_TYPE_32);
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	/* Disable BAR1 BAR5*/
547*4882a593Smuzhiyun 	bar = BAR_1;
548*4882a593Smuzhiyun 	dw_pcie_writel_dbi(pci, PCIE_TYPE0_HDR_DBI2_OFFSET + 0x10 + bar * 4, 0);
549*4882a593Smuzhiyun 	bar = BAR_5;
550*4882a593Smuzhiyun 	dw_pcie_writel_dbi(pci, PCIE_TYPE0_HDR_DBI2_OFFSET + 0x10 + bar * 4, 0);
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun 
rockchip_pcie_init_id(struct rockchip_pcie * rockchip)553*4882a593Smuzhiyun static void rockchip_pcie_init_id(struct rockchip_pcie *rockchip)
554*4882a593Smuzhiyun {
555*4882a593Smuzhiyun 	struct dw_pcie *pci = &rockchip->pci;
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, 0x356a);
558*4882a593Smuzhiyun 	dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, 0x0580);
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun 
rockchip_pcie_ep_set_bar(struct rockchip_pcie * rockchip,enum pci_barno bar,dma_addr_t cpu_addr)561*4882a593Smuzhiyun static int rockchip_pcie_ep_set_bar(struct rockchip_pcie *rockchip, enum pci_barno bar,
562*4882a593Smuzhiyun 				    dma_addr_t cpu_addr)
563*4882a593Smuzhiyun {
564*4882a593Smuzhiyun 	int ret;
565*4882a593Smuzhiyun 	u32 free_win;
566*4882a593Smuzhiyun 	struct dw_pcie *pci = &rockchip->pci;
567*4882a593Smuzhiyun 	enum dw_pcie_as_type as_type;
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	free_win = find_first_zero_bit(rockchip->ib_window_map,
570*4882a593Smuzhiyun 				       rockchip->num_ib_windows);
571*4882a593Smuzhiyun 	if (free_win >= rockchip->num_ib_windows) {
572*4882a593Smuzhiyun 		dev_err(pci->dev, "No free inbound window\n");
573*4882a593Smuzhiyun 		return -EINVAL;
574*4882a593Smuzhiyun 	}
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	as_type = DW_PCIE_AS_MEM;
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	ret = dw_pcie_prog_inbound_atu(pci, 0, free_win, bar, cpu_addr, as_type);
579*4882a593Smuzhiyun 	if (ret < 0) {
580*4882a593Smuzhiyun 		dev_err(pci->dev, "Failed to program IB window\n");
581*4882a593Smuzhiyun 		return ret;
582*4882a593Smuzhiyun 	}
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	rockchip->bar_to_atu[bar] = free_win;
585*4882a593Smuzhiyun 	set_bit(free_win, rockchip->ib_window_map);
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	return 0;
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun 
rockchip_pcie_fast_link_setup(struct rockchip_pcie * rockchip)590*4882a593Smuzhiyun static void rockchip_pcie_fast_link_setup(struct rockchip_pcie *rockchip)
591*4882a593Smuzhiyun {
592*4882a593Smuzhiyun 	u32 val;
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	/* LTSSM EN ctrl mode */
595*4882a593Smuzhiyun 	val = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_HOT_RESET_CTRL);
596*4882a593Smuzhiyun 	val |= (PCIE_LTSSM_ENABLE_ENHANCE | PCIE_LTSSM_APP_DLY2_EN) |
597*4882a593Smuzhiyun 		((PCIE_LTSSM_ENABLE_ENHANCE | PCIE_LTSSM_APP_DLY2_EN) << 16);
598*4882a593Smuzhiyun 	rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun 
rockchip_pcie_iatu_unroll_enabled(struct dw_pcie * pci)601*4882a593Smuzhiyun static u8 rockchip_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
602*4882a593Smuzhiyun {
603*4882a593Smuzhiyun 	u32 val;
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
606*4882a593Smuzhiyun 	if (val == 0xffffffff)
607*4882a593Smuzhiyun 		return 1;
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 	return 0;
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun 
rockchip_pcie_local_elbi_enable(struct rockchip_pcie * rockchip)612*4882a593Smuzhiyun static void rockchip_pcie_local_elbi_enable(struct rockchip_pcie *rockchip)
613*4882a593Smuzhiyun {
614*4882a593Smuzhiyun 	int i;
615*4882a593Smuzhiyun 	u32 elbi_reg;
616*4882a593Smuzhiyun 	struct dw_pcie *pci = &rockchip->pci;
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun 	for (i = 0; i < PCIE_ELBI_REG_NUM; i++) {
619*4882a593Smuzhiyun 		elbi_reg = PCIE_ELBI_LOCAL_BASE + PCIE_ELBI_LOCAL_ENABLE_OFF +
620*4882a593Smuzhiyun 			   i * 4;
621*4882a593Smuzhiyun 		dw_pcie_writel_dbi(pci, elbi_reg, 0xffff0000);
622*4882a593Smuzhiyun 	}
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun 
rockchip_pcie_elbi_clear(struct rockchip_pcie * rockchip)625*4882a593Smuzhiyun static void rockchip_pcie_elbi_clear(struct rockchip_pcie *rockchip)
626*4882a593Smuzhiyun {
627*4882a593Smuzhiyun 	int i;
628*4882a593Smuzhiyun 	u32 elbi_reg;
629*4882a593Smuzhiyun 	struct dw_pcie *pci = &rockchip->pci;
630*4882a593Smuzhiyun 	u32 val;
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	for (i = 0; i < PCIE_ELBI_REG_NUM; i++) {
633*4882a593Smuzhiyun 		elbi_reg = PCIE_ELBI_LOCAL_BASE + i * 4;
634*4882a593Smuzhiyun 		val = dw_pcie_readl_dbi(pci, elbi_reg);
635*4882a593Smuzhiyun 		val <<= 16;
636*4882a593Smuzhiyun 		dw_pcie_writel_dbi(pci, elbi_reg, val);
637*4882a593Smuzhiyun 	}
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun 
rockchip_pcie_raise_msi_irq(struct rockchip_pcie * rockchip,u8 interrupt_num)640*4882a593Smuzhiyun static void rockchip_pcie_raise_msi_irq(struct rockchip_pcie *rockchip, u8 interrupt_num)
641*4882a593Smuzhiyun {
642*4882a593Smuzhiyun 	rockchip_pcie_writel_apb(rockchip, BIT(interrupt_num), PCIE_CLIENT_MSI_GEN_CON);
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun 
rockchip_pcie_sys_irq_handler(int irq,void * arg)645*4882a593Smuzhiyun static irqreturn_t rockchip_pcie_sys_irq_handler(int irq, void *arg)
646*4882a593Smuzhiyun {
647*4882a593Smuzhiyun 	struct rockchip_pcie *rockchip = arg;
648*4882a593Smuzhiyun 	struct dw_pcie *pci = &rockchip->pci;
649*4882a593Smuzhiyun 	u32 elbi_reg;
650*4882a593Smuzhiyun 	u32 chn;
651*4882a593Smuzhiyun 	union int_status wr_status, rd_status;
652*4882a593Smuzhiyun 	union int_clear clears;
653*4882a593Smuzhiyun 	u32 reg, mask;
654*4882a593Smuzhiyun 	bool sigio = false;
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 	/* ELBI helper, only check the valid bits, and discard the rest interrupts */
657*4882a593Smuzhiyun 	elbi_reg = dw_pcie_readl_dbi(pci, PCIE_ELBI_LOCAL_BASE + PCIE_ELBI_APP_ELBI_INT_GEN0);
658*4882a593Smuzhiyun 	if (elbi_reg & PCIE_ELBI_APP_ELBI_INT_GEN0_SIGIO) {
659*4882a593Smuzhiyun 		sigio = true;
660*4882a593Smuzhiyun 		rockchip->obj_info->irq_type_ep = OBJ_IRQ_ELBI;
661*4882a593Smuzhiyun 		rockchip_pcie_elbi_clear(rockchip);
662*4882a593Smuzhiyun 		goto out;
663*4882a593Smuzhiyun 	}
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun 	/* DMA helper */
666*4882a593Smuzhiyun 	mask = dw_pcie_readl_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_INT_MASK);
667*4882a593Smuzhiyun 	wr_status.asdword = dw_pcie_readl_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_INT_STATUS) & (~mask);
668*4882a593Smuzhiyun 	mask = dw_pcie_readl_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_INT_MASK);
669*4882a593Smuzhiyun 	rd_status.asdword = dw_pcie_readl_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_INT_STATUS) & (~mask);
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 	for (chn = 0; chn < PCIE_DMA_CHANEL_MAX_NUM; chn++) {
672*4882a593Smuzhiyun 		if (wr_status.donesta & BIT(chn)) {
673*4882a593Smuzhiyun 			clears.doneclr = BIT(chn);
674*4882a593Smuzhiyun 			dw_pcie_writel_dbi(pci, PCIE_DMA_OFFSET +
675*4882a593Smuzhiyun 					PCIE_DMA_WR_INT_CLEAR, clears.asdword);
676*4882a593Smuzhiyun 			if (rockchip->dma_obj && rockchip->dma_obj->cb)
677*4882a593Smuzhiyun 				rockchip->dma_obj->cb(rockchip->dma_obj, chn, DMA_TO_BUS);
678*4882a593Smuzhiyun 		}
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 		if (wr_status.abortsta & BIT(chn)) {
681*4882a593Smuzhiyun 			dev_err(pci->dev, "%s, abort\n", __func__);
682*4882a593Smuzhiyun 			clears.abortclr = BIT(chn);
683*4882a593Smuzhiyun 			dw_pcie_writel_dbi(pci, PCIE_DMA_OFFSET +
684*4882a593Smuzhiyun 					PCIE_DMA_WR_INT_CLEAR, clears.asdword);
685*4882a593Smuzhiyun 		}
686*4882a593Smuzhiyun 	}
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 	for (chn = 0; chn < PCIE_DMA_CHANEL_MAX_NUM; chn++) {
689*4882a593Smuzhiyun 		if (rd_status.donesta & BIT(chn)) {
690*4882a593Smuzhiyun 			clears.doneclr = BIT(chn);
691*4882a593Smuzhiyun 			dw_pcie_writel_dbi(pci, PCIE_DMA_OFFSET +
692*4882a593Smuzhiyun 					PCIE_DMA_RD_INT_CLEAR, clears.asdword);
693*4882a593Smuzhiyun 			if (rockchip->dma_obj && rockchip->dma_obj->cb)
694*4882a593Smuzhiyun 				rockchip->dma_obj->cb(rockchip->dma_obj, chn, DMA_FROM_BUS);
695*4882a593Smuzhiyun 		}
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 		if (rd_status.abortsta & BIT(chn)) {
698*4882a593Smuzhiyun 			dev_err(pci->dev, "%s, abort\n", __func__);
699*4882a593Smuzhiyun 			clears.abortclr = BIT(chn);
700*4882a593Smuzhiyun 			dw_pcie_writel_dbi(pci, PCIE_DMA_OFFSET +
701*4882a593Smuzhiyun 					PCIE_DMA_RD_INT_CLEAR, clears.asdword);
702*4882a593Smuzhiyun 		}
703*4882a593Smuzhiyun 	}
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun 	if (wr_status.asdword || rd_status.asdword) {
706*4882a593Smuzhiyun 		rockchip->obj_info->irq_type_rc = OBJ_IRQ_DMA;
707*4882a593Smuzhiyun 		rockchip->obj_info->dma_status_rc.wr |= wr_status.asdword;
708*4882a593Smuzhiyun 		rockchip->obj_info->dma_status_rc.rd |= rd_status.asdword;
709*4882a593Smuzhiyun 		rockchip_pcie_raise_msi_irq(rockchip, PCIe_CLIENT_MSI_OBJ_IRQ);
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun 		rockchip->obj_info->irq_type_ep = OBJ_IRQ_DMA;
712*4882a593Smuzhiyun 		rockchip->obj_info->dma_status_ep.wr |= wr_status.asdword;
713*4882a593Smuzhiyun 		rockchip->obj_info->dma_status_ep.rd |= rd_status.asdword;
714*4882a593Smuzhiyun 		sigio = true;
715*4882a593Smuzhiyun 	}
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun out:
718*4882a593Smuzhiyun 	if (sigio) {
719*4882a593Smuzhiyun 		dev_dbg(rockchip->pci.dev, "SIGIO\n");
720*4882a593Smuzhiyun 		kill_fasync(&rockchip->async, SIGIO, POLL_IN);
721*4882a593Smuzhiyun 	}
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 	reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_MISC);
724*4882a593Smuzhiyun 	if (reg & BIT(2))
725*4882a593Smuzhiyun 		queue_work(rockchip->hot_rst_wq, &rockchip->hot_rst_work);
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 	rockchip_pcie_writel_apb(rockchip, reg, PCIE_CLIENT_INTR_STATUS_MISC);
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 	return IRQ_HANDLED;
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun 
rockchip_pcie_request_sys_irq(struct rockchip_pcie * rockchip,struct platform_device * pdev)732*4882a593Smuzhiyun static int rockchip_pcie_request_sys_irq(struct rockchip_pcie *rockchip,
733*4882a593Smuzhiyun 					struct platform_device *pdev)
734*4882a593Smuzhiyun {
735*4882a593Smuzhiyun 	int irq;
736*4882a593Smuzhiyun 	int ret;
737*4882a593Smuzhiyun 	struct device *dev = rockchip->pci.dev;
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun 	irq = platform_get_irq_byname(pdev, "sys");
740*4882a593Smuzhiyun 	if (irq < 0) {
741*4882a593Smuzhiyun 		dev_err(dev, "missing sys IRQ resource\n");
742*4882a593Smuzhiyun 		return -EINVAL;
743*4882a593Smuzhiyun 	}
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 	ret = devm_request_irq(dev, irq, rockchip_pcie_sys_irq_handler,
746*4882a593Smuzhiyun 			       IRQF_SHARED, "pcie-sys", rockchip);
747*4882a593Smuzhiyun 	if (ret) {
748*4882a593Smuzhiyun 		dev_err(dev, "failed to request PCIe subsystem IRQ\n");
749*4882a593Smuzhiyun 		return ret;
750*4882a593Smuzhiyun 	}
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun 	return 0;
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun 
rockchip_pcie_udma_enabled(struct rockchip_pcie * rockchip)755*4882a593Smuzhiyun static bool rockchip_pcie_udma_enabled(struct rockchip_pcie *rockchip)
756*4882a593Smuzhiyun {
757*4882a593Smuzhiyun 	struct dw_pcie *pci = &rockchip->pci;
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun 	return dw_pcie_readl_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_CTRL_OFF);
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun 
rockchip_pcie_init_dma_trx(struct rockchip_pcie * rockchip)762*4882a593Smuzhiyun static int rockchip_pcie_init_dma_trx(struct rockchip_pcie *rockchip)
763*4882a593Smuzhiyun {
764*4882a593Smuzhiyun 	struct dw_pcie *pci = &rockchip->pci;
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 	if (!rockchip_pcie_udma_enabled(rockchip))
767*4882a593Smuzhiyun 		return 0;
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 	rockchip->dma_obj = pcie_dw_dmatest_register(pci->dev, true);
770*4882a593Smuzhiyun 	if (IS_ERR(rockchip->dma_obj)) {
771*4882a593Smuzhiyun 		dev_err(rockchip->pci.dev, "failed to prepare dmatest\n");
772*4882a593Smuzhiyun 		return -EINVAL;
773*4882a593Smuzhiyun 	}
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun 	/* Enable client write and read interrupt */
776*4882a593Smuzhiyun 	rockchip_pcie_writel_apb(rockchip, 0xc000000, PCIE_CLIENT_INTR_MASK);
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun 	/* Enable core write interrupt */
779*4882a593Smuzhiyun 	dw_pcie_writel_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_INT_MASK, 0x0);
780*4882a593Smuzhiyun 	/* Enable core read interrupt */
781*4882a593Smuzhiyun 	dw_pcie_writel_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_INT_MASK, 0x0);
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 	return 0;
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun 
rockchip_pcie_start_dma_rd(struct dma_trx_obj * obj,struct dma_table * cur,int ctr_off)786*4882a593Smuzhiyun static void rockchip_pcie_start_dma_rd(struct dma_trx_obj *obj, struct dma_table *cur, int ctr_off)
787*4882a593Smuzhiyun {
788*4882a593Smuzhiyun 	struct rockchip_pcie *rockchip = dev_get_drvdata(obj->dev);
789*4882a593Smuzhiyun 	struct dw_pcie *pci = &rockchip->pci;
790*4882a593Smuzhiyun 
791*4882a593Smuzhiyun 	dw_pcie_writel_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_ENB,
792*4882a593Smuzhiyun 			   cur->enb.asdword);
793*4882a593Smuzhiyun 	dw_pcie_writel_dbi(pci, ctr_off + PCIE_DMA_RD_CTRL_LO,
794*4882a593Smuzhiyun 			   cur->ctx_reg.ctrllo.asdword);
795*4882a593Smuzhiyun 	dw_pcie_writel_dbi(pci, ctr_off + PCIE_DMA_RD_CTRL_HI,
796*4882a593Smuzhiyun 			   cur->ctx_reg.ctrlhi.asdword);
797*4882a593Smuzhiyun 	dw_pcie_writel_dbi(pci, ctr_off + PCIE_DMA_RD_XFERSIZE,
798*4882a593Smuzhiyun 			   cur->ctx_reg.xfersize);
799*4882a593Smuzhiyun 	dw_pcie_writel_dbi(pci, ctr_off + PCIE_DMA_RD_SAR_PTR_LO,
800*4882a593Smuzhiyun 			   cur->ctx_reg.sarptrlo);
801*4882a593Smuzhiyun 	dw_pcie_writel_dbi(pci, ctr_off + PCIE_DMA_RD_SAR_PTR_HI,
802*4882a593Smuzhiyun 			   cur->ctx_reg.sarptrhi);
803*4882a593Smuzhiyun 	dw_pcie_writel_dbi(pci, ctr_off + PCIE_DMA_RD_DAR_PTR_LO,
804*4882a593Smuzhiyun 			   cur->ctx_reg.darptrlo);
805*4882a593Smuzhiyun 	dw_pcie_writel_dbi(pci, ctr_off + PCIE_DMA_RD_DAR_PTR_HI,
806*4882a593Smuzhiyun 			   cur->ctx_reg.darptrhi);
807*4882a593Smuzhiyun 	dw_pcie_writel_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_DOORBELL,
808*4882a593Smuzhiyun 			   cur->start.asdword);
809*4882a593Smuzhiyun }
810*4882a593Smuzhiyun 
rockchip_pcie_start_dma_wr(struct dma_trx_obj * obj,struct dma_table * cur,int ctr_off)811*4882a593Smuzhiyun static void rockchip_pcie_start_dma_wr(struct dma_trx_obj *obj, struct dma_table *cur, int ctr_off)
812*4882a593Smuzhiyun {
813*4882a593Smuzhiyun 	struct rockchip_pcie *rockchip = dev_get_drvdata(obj->dev);
814*4882a593Smuzhiyun 	struct dw_pcie *pci = &rockchip->pci;
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 	dw_pcie_writel_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_ENB,
817*4882a593Smuzhiyun 			   cur->enb.asdword);
818*4882a593Smuzhiyun 	dw_pcie_writel_dbi(pci, ctr_off + PCIE_DMA_WR_CTRL_LO,
819*4882a593Smuzhiyun 			   cur->ctx_reg.ctrllo.asdword);
820*4882a593Smuzhiyun 	dw_pcie_writel_dbi(pci, ctr_off + PCIE_DMA_WR_CTRL_HI,
821*4882a593Smuzhiyun 			   cur->ctx_reg.ctrlhi.asdword);
822*4882a593Smuzhiyun 	dw_pcie_writel_dbi(pci, ctr_off + PCIE_DMA_WR_XFERSIZE,
823*4882a593Smuzhiyun 			   cur->ctx_reg.xfersize);
824*4882a593Smuzhiyun 	dw_pcie_writel_dbi(pci, ctr_off + PCIE_DMA_WR_SAR_PTR_LO,
825*4882a593Smuzhiyun 			   cur->ctx_reg.sarptrlo);
826*4882a593Smuzhiyun 	dw_pcie_writel_dbi(pci, ctr_off + PCIE_DMA_WR_SAR_PTR_HI,
827*4882a593Smuzhiyun 			   cur->ctx_reg.sarptrhi);
828*4882a593Smuzhiyun 	dw_pcie_writel_dbi(pci, ctr_off + PCIE_DMA_WR_DAR_PTR_LO,
829*4882a593Smuzhiyun 			   cur->ctx_reg.darptrlo);
830*4882a593Smuzhiyun 	dw_pcie_writel_dbi(pci, ctr_off + PCIE_DMA_WR_DAR_PTR_HI,
831*4882a593Smuzhiyun 			   cur->ctx_reg.darptrhi);
832*4882a593Smuzhiyun 	dw_pcie_writel_dbi(pci, ctr_off + PCIE_DMA_WR_WEILO,
833*4882a593Smuzhiyun 			   cur->weilo.asdword);
834*4882a593Smuzhiyun 	dw_pcie_writel_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_DOORBELL,
835*4882a593Smuzhiyun 			   cur->start.asdword);
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun 
rockchip_pcie_start_dma_dwc(struct dma_trx_obj * obj,struct dma_table * table)838*4882a593Smuzhiyun static void rockchip_pcie_start_dma_dwc(struct dma_trx_obj *obj, struct dma_table *table)
839*4882a593Smuzhiyun {
840*4882a593Smuzhiyun 	int dir = table->dir;
841*4882a593Smuzhiyun 	int chn = table->chn;
842*4882a593Smuzhiyun 
843*4882a593Smuzhiyun 	int ctr_off = PCIE_DMA_OFFSET + chn * 0x200;
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 	if (dir == DMA_FROM_BUS)
846*4882a593Smuzhiyun 		rockchip_pcie_start_dma_rd(obj, table, ctr_off);
847*4882a593Smuzhiyun 	else if (dir == DMA_TO_BUS)
848*4882a593Smuzhiyun 		rockchip_pcie_start_dma_wr(obj, table, ctr_off);
849*4882a593Smuzhiyun }
850*4882a593Smuzhiyun 
rockchip_pcie_config_dma_dwc(struct dma_table * table)851*4882a593Smuzhiyun static void rockchip_pcie_config_dma_dwc(struct dma_table *table)
852*4882a593Smuzhiyun {
853*4882a593Smuzhiyun 	table->enb.enb = 0x1;
854*4882a593Smuzhiyun 	table->ctx_reg.ctrllo.lie = 0x1;
855*4882a593Smuzhiyun 	table->ctx_reg.ctrllo.rie = 0x0;
856*4882a593Smuzhiyun 	table->ctx_reg.ctrllo.td = 0x1;
857*4882a593Smuzhiyun 	table->ctx_reg.ctrlhi.asdword = 0x0;
858*4882a593Smuzhiyun 	table->ctx_reg.xfersize = table->buf_size;
859*4882a593Smuzhiyun 	if (table->dir == DMA_FROM_BUS) {
860*4882a593Smuzhiyun 		table->ctx_reg.sarptrlo = (u32)(table->bus & 0xffffffff);
861*4882a593Smuzhiyun 		table->ctx_reg.sarptrhi = (u32)(table->bus >> 32);
862*4882a593Smuzhiyun 		table->ctx_reg.darptrlo = (u32)(table->local & 0xffffffff);
863*4882a593Smuzhiyun 		table->ctx_reg.darptrhi = (u32)(table->local >> 32);
864*4882a593Smuzhiyun 	} else if (table->dir == DMA_TO_BUS) {
865*4882a593Smuzhiyun 		table->ctx_reg.sarptrlo = (u32)(table->local & 0xffffffff);
866*4882a593Smuzhiyun 		table->ctx_reg.sarptrhi = (u32)(table->local >> 32);
867*4882a593Smuzhiyun 		table->ctx_reg.darptrlo = (u32)(table->bus & 0xffffffff);
868*4882a593Smuzhiyun 		table->ctx_reg.darptrhi = (u32)(table->bus >> 32);
869*4882a593Smuzhiyun 	}
870*4882a593Smuzhiyun 	table->weilo.weight0 = 0x0;
871*4882a593Smuzhiyun 	table->start.stop = 0x0;
872*4882a593Smuzhiyun 	table->start.chnl = table->chn;
873*4882a593Smuzhiyun }
874*4882a593Smuzhiyun 
rockchip_pcie_hot_rst_work(struct work_struct * work)875*4882a593Smuzhiyun static void rockchip_pcie_hot_rst_work(struct work_struct *work)
876*4882a593Smuzhiyun {
877*4882a593Smuzhiyun 	struct rockchip_pcie *rockchip = container_of(work, struct rockchip_pcie, hot_rst_work);
878*4882a593Smuzhiyun 	u32 status;
879*4882a593Smuzhiyun 	int ret;
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun 	if (rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_HOT_RESET_CTRL) & PCIE_LTSSM_APP_DLY2_EN) {
882*4882a593Smuzhiyun 		ret = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_LTSSM_STATUS,
883*4882a593Smuzhiyun 			 status, ((status & 0x3F) == 0), 100, PCIE_HOTRESET_TMOUT_US);
884*4882a593Smuzhiyun 		if (ret)
885*4882a593Smuzhiyun 			dev_err(rockchip->pci.dev, "wait for detect quiet failed!\n");
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 		rockchip_pcie_writel_apb(rockchip, (PCIE_LTSSM_APP_DLY2_DONE) | ((PCIE_LTSSM_APP_DLY2_DONE) << 16),
888*4882a593Smuzhiyun 					PCIE_CLIENT_HOT_RESET_CTRL);
889*4882a593Smuzhiyun 	}
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun 
rockchip_pcie_get_dma_status(struct dma_trx_obj * obj,u8 chn,enum dma_dir dir)892*4882a593Smuzhiyun static int rockchip_pcie_get_dma_status(struct dma_trx_obj *obj, u8 chn, enum dma_dir dir)
893*4882a593Smuzhiyun {
894*4882a593Smuzhiyun 	struct rockchip_pcie *rockchip = dev_get_drvdata(obj->dev);
895*4882a593Smuzhiyun 	struct dw_pcie *pci = &rockchip->pci;
896*4882a593Smuzhiyun 	union int_status status;
897*4882a593Smuzhiyun 	union int_clear clears;
898*4882a593Smuzhiyun 	int ret = 0;
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun 	dev_dbg(pci->dev, "%s %x %x\n", __func__,
901*4882a593Smuzhiyun 		dw_pcie_readl_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_INT_STATUS),
902*4882a593Smuzhiyun 		dw_pcie_readl_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_INT_STATUS));
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 	if (dir == DMA_TO_BUS) {
905*4882a593Smuzhiyun 		status.asdword = dw_pcie_readl_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_INT_STATUS);
906*4882a593Smuzhiyun 		if (status.donesta & BIT(chn)) {
907*4882a593Smuzhiyun 			clears.doneclr = BIT(chn);
908*4882a593Smuzhiyun 			dw_pcie_writel_dbi(pci, PCIE_DMA_OFFSET +
909*4882a593Smuzhiyun 					   PCIE_DMA_WR_INT_CLEAR, clears.asdword);
910*4882a593Smuzhiyun 			ret = 1;
911*4882a593Smuzhiyun 		}
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun 		if (status.abortsta & BIT(chn)) {
914*4882a593Smuzhiyun 			dev_err(pci->dev, "%s, write abort\n", __func__);
915*4882a593Smuzhiyun 			clears.abortclr = BIT(chn);
916*4882a593Smuzhiyun 			dw_pcie_writel_dbi(pci, PCIE_DMA_OFFSET +
917*4882a593Smuzhiyun 					   PCIE_DMA_WR_INT_CLEAR, clears.asdword);
918*4882a593Smuzhiyun 			ret = -1;
919*4882a593Smuzhiyun 		}
920*4882a593Smuzhiyun 	} else {
921*4882a593Smuzhiyun 		status.asdword = dw_pcie_readl_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_INT_STATUS);
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun 		if (status.donesta & BIT(chn)) {
924*4882a593Smuzhiyun 			clears.doneclr = BIT(chn);
925*4882a593Smuzhiyun 			dw_pcie_writel_dbi(pci, PCIE_DMA_OFFSET +
926*4882a593Smuzhiyun 					   PCIE_DMA_RD_INT_CLEAR, clears.asdword);
927*4882a593Smuzhiyun 			ret = 1;
928*4882a593Smuzhiyun 		}
929*4882a593Smuzhiyun 
930*4882a593Smuzhiyun 		if (status.abortsta & BIT(chn)) {
931*4882a593Smuzhiyun 			dev_err(pci->dev, "%s, read abort %x\n", __func__, status.asdword);
932*4882a593Smuzhiyun 			clears.abortclr = BIT(chn);
933*4882a593Smuzhiyun 			dw_pcie_writel_dbi(pci, PCIE_DMA_OFFSET +
934*4882a593Smuzhiyun 					   PCIE_DMA_RD_INT_CLEAR, clears.asdword);
935*4882a593Smuzhiyun 			ret = -1;
936*4882a593Smuzhiyun 		}
937*4882a593Smuzhiyun 	}
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun 	return ret;
940*4882a593Smuzhiyun }
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun static const struct dw_pcie_ops dw_pcie_ops = {
943*4882a593Smuzhiyun 	.start_link = rockchip_pcie_start_link,
944*4882a593Smuzhiyun 	.link_up = rockchip_pcie_link_up,
945*4882a593Smuzhiyun };
946*4882a593Smuzhiyun 
pcie_ep_fasync(int fd,struct file * file,int mode)947*4882a593Smuzhiyun static int pcie_ep_fasync(int fd, struct file *file, int mode)
948*4882a593Smuzhiyun {
949*4882a593Smuzhiyun 	struct rockchip_pcie *rockchip = (struct rockchip_pcie *)file->private_data;
950*4882a593Smuzhiyun 
951*4882a593Smuzhiyun 	return fasync_helper(fd, file, mode, &rockchip->async);
952*4882a593Smuzhiyun }
953*4882a593Smuzhiyun 
pcie_ep_open(struct inode * inode,struct file * file)954*4882a593Smuzhiyun static int pcie_ep_open(struct inode *inode, struct file *file)
955*4882a593Smuzhiyun {
956*4882a593Smuzhiyun 	struct miscdevice *miscdev = file->private_data;
957*4882a593Smuzhiyun 	struct rockchip_pcie_misc_dev *pcie_misc_dev;
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun 	pcie_misc_dev = container_of(miscdev, struct rockchip_pcie_misc_dev, dev);
960*4882a593Smuzhiyun 	file->private_data = pcie_misc_dev->pcie;
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun 	return 0;
963*4882a593Smuzhiyun }
964*4882a593Smuzhiyun 
pcie_ep_release(struct inode * inode,struct file * file)965*4882a593Smuzhiyun static int pcie_ep_release(struct inode *inode, struct file *file)
966*4882a593Smuzhiyun {
967*4882a593Smuzhiyun 	return pcie_ep_fasync(-1, file, 0);
968*4882a593Smuzhiyun }
969*4882a593Smuzhiyun 
pcie_ep_ioctl(struct file * file,unsigned int cmd,unsigned long arg)970*4882a593Smuzhiyun static long pcie_ep_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
971*4882a593Smuzhiyun {
972*4882a593Smuzhiyun 	struct rockchip_pcie *rockchip = (struct rockchip_pcie *)file->private_data;
973*4882a593Smuzhiyun 	struct pcie_ep_user_data msg;
974*4882a593Smuzhiyun 	struct pcie_ep_dma_cache_cfg cfg;
975*4882a593Smuzhiyun 	void __user *uarg = (void __user *)arg;
976*4882a593Smuzhiyun 	int i, ret;
977*4882a593Smuzhiyun 	enum pcie_ep_mmap_resource mmap_res;
978*4882a593Smuzhiyun 
979*4882a593Smuzhiyun 	switch (cmd) {
980*4882a593Smuzhiyun 	case PCIE_DMA_GET_ELBI_DATA:
981*4882a593Smuzhiyun 		for (i = 4; i <= 6; i++)
982*4882a593Smuzhiyun 			msg.elbi_app_user[i - 4] = dw_pcie_readl_dbi(&rockchip->pci,
983*4882a593Smuzhiyun 								     PCIE_ELBI_LOCAL_BASE + i * 4);
984*4882a593Smuzhiyun 		for (i = 8; i <= 15; i++)
985*4882a593Smuzhiyun 			msg.elbi_app_user[i - 5] = dw_pcie_readl_dbi(&rockchip->pci,
986*4882a593Smuzhiyun 								     PCIE_ELBI_LOCAL_BASE + i * 4);
987*4882a593Smuzhiyun 
988*4882a593Smuzhiyun 		ret = copy_to_user(uarg, &msg, sizeof(msg));
989*4882a593Smuzhiyun 		if (ret) {
990*4882a593Smuzhiyun 			dev_err(rockchip->pci.dev, "failed to get elbi data\n");
991*4882a593Smuzhiyun 			return -EFAULT;
992*4882a593Smuzhiyun 		}
993*4882a593Smuzhiyun 		break;
994*4882a593Smuzhiyun 	case PCIE_DMA_CACHE_INVALIDE:
995*4882a593Smuzhiyun 		ret = copy_from_user(&cfg, uarg, sizeof(cfg));
996*4882a593Smuzhiyun 		if (ret) {
997*4882a593Smuzhiyun 			dev_err(rockchip->pci.dev, "failed to get copy from\n");
998*4882a593Smuzhiyun 			return -EFAULT;
999*4882a593Smuzhiyun 		}
1000*4882a593Smuzhiyun 		dma_sync_single_for_cpu(rockchip->pci.dev, cfg.addr, cfg.size, DMA_FROM_DEVICE);
1001*4882a593Smuzhiyun 		break;
1002*4882a593Smuzhiyun 	case PCIE_DMA_CACHE_FLUSH:
1003*4882a593Smuzhiyun 		ret = copy_from_user(&cfg, uarg, sizeof(cfg));
1004*4882a593Smuzhiyun 		if (ret) {
1005*4882a593Smuzhiyun 			dev_err(rockchip->pci.dev, "failed to get copy from\n");
1006*4882a593Smuzhiyun 			return -EFAULT;
1007*4882a593Smuzhiyun 		}
1008*4882a593Smuzhiyun 		dma_sync_single_for_device(rockchip->pci.dev, cfg.addr, cfg.size, DMA_TO_DEVICE);
1009*4882a593Smuzhiyun 		break;
1010*4882a593Smuzhiyun 	case PCIE_DMA_IRQ_MASK_ALL:
1011*4882a593Smuzhiyun 		dw_pcie_writel_dbi(&rockchip->pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_INT_MASK,
1012*4882a593Smuzhiyun 				   0xffffffff);
1013*4882a593Smuzhiyun 		dw_pcie_writel_dbi(&rockchip->pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_INT_MASK,
1014*4882a593Smuzhiyun 				   0xffffffff);
1015*4882a593Smuzhiyun 		break;
1016*4882a593Smuzhiyun 	case PCIE_DMA_RAISE_MSI_OBJ_IRQ_USER:
1017*4882a593Smuzhiyun 		rockchip->obj_info->irq_type_rc = OBJ_IRQ_USER;
1018*4882a593Smuzhiyun 		rockchip_pcie_raise_msi_irq(rockchip, PCIe_CLIENT_MSI_OBJ_IRQ);
1019*4882a593Smuzhiyun 		break;
1020*4882a593Smuzhiyun 	case PCIE_EP_GET_USER_INFO:
1021*4882a593Smuzhiyun 		msg.bar0_phys_addr = rockchip->ib_target_address[0];
1022*4882a593Smuzhiyun 
1023*4882a593Smuzhiyun 		ret = copy_to_user(uarg, &msg, sizeof(msg));
1024*4882a593Smuzhiyun 		if (ret) {
1025*4882a593Smuzhiyun 			dev_err(rockchip->pci.dev, "failed to get elbi data\n");
1026*4882a593Smuzhiyun 			return -EFAULT;
1027*4882a593Smuzhiyun 		}
1028*4882a593Smuzhiyun 		break;
1029*4882a593Smuzhiyun 	case PCIE_EP_SET_MMAP_RESOURCE:
1030*4882a593Smuzhiyun 		ret = copy_from_user(&mmap_res, uarg, sizeof(mmap_res));
1031*4882a593Smuzhiyun 		if (ret) {
1032*4882a593Smuzhiyun 			dev_err(rockchip->pci.dev, "failed to get copy from\n");
1033*4882a593Smuzhiyun 			return -EFAULT;
1034*4882a593Smuzhiyun 		}
1035*4882a593Smuzhiyun 
1036*4882a593Smuzhiyun 		if (mmap_res >= PCIE_EP_MMAP_RESOURCE_MAX) {
1037*4882a593Smuzhiyun 			dev_err(rockchip->pci.dev, "mmap index %d is out of number\n", mmap_res);
1038*4882a593Smuzhiyun 			return -EINVAL;
1039*4882a593Smuzhiyun 		}
1040*4882a593Smuzhiyun 
1041*4882a593Smuzhiyun 		rockchip->cur_mmap_res = mmap_res;
1042*4882a593Smuzhiyun 		break;
1043*4882a593Smuzhiyun 	default:
1044*4882a593Smuzhiyun 		break;
1045*4882a593Smuzhiyun 	}
1046*4882a593Smuzhiyun 	return 0;
1047*4882a593Smuzhiyun }
1048*4882a593Smuzhiyun 
pcie_ep_mmap(struct file * file,struct vm_area_struct * vma)1049*4882a593Smuzhiyun static int pcie_ep_mmap(struct file *file, struct vm_area_struct *vma)
1050*4882a593Smuzhiyun {
1051*4882a593Smuzhiyun 	struct rockchip_pcie *rockchip = (struct rockchip_pcie *)file->private_data;
1052*4882a593Smuzhiyun 	size_t size = vma->vm_end - vma->vm_start;
1053*4882a593Smuzhiyun 	int err;
1054*4882a593Smuzhiyun 	unsigned long addr;
1055*4882a593Smuzhiyun 
1056*4882a593Smuzhiyun 	switch (rockchip->cur_mmap_res) {
1057*4882a593Smuzhiyun 	case PCIE_EP_MMAP_RESOURCE_DBI:
1058*4882a593Smuzhiyun 		if (size > PCIE_DBI_SIZE) {
1059*4882a593Smuzhiyun 			dev_warn(rockchip->pci.dev, "dbi mmap size is out of limitation\n");
1060*4882a593Smuzhiyun 			return -EINVAL;
1061*4882a593Smuzhiyun 		}
1062*4882a593Smuzhiyun 		addr = rockchip->dbi_base_physical;
1063*4882a593Smuzhiyun 		break;
1064*4882a593Smuzhiyun 	case PCIE_EP_MMAP_RESOURCE_BAR0:
1065*4882a593Smuzhiyun 		if (size > rockchip->ib_target_size[0]) {
1066*4882a593Smuzhiyun 			dev_warn(rockchip->pci.dev, "bar0 mmap size is out of limitation\n");
1067*4882a593Smuzhiyun 			return -EINVAL;
1068*4882a593Smuzhiyun 		}
1069*4882a593Smuzhiyun 		addr = rockchip->ib_target_address[0];
1070*4882a593Smuzhiyun 		break;
1071*4882a593Smuzhiyun 	case PCIE_EP_MMAP_RESOURCE_BAR2:
1072*4882a593Smuzhiyun 		if (size > rockchip->ib_target_size[2]) {
1073*4882a593Smuzhiyun 			dev_warn(rockchip->pci.dev, "bar2 mmap size is out of limitation\n");
1074*4882a593Smuzhiyun 			return -EINVAL;
1075*4882a593Smuzhiyun 		}
1076*4882a593Smuzhiyun 		addr = rockchip->ib_target_address[2];
1077*4882a593Smuzhiyun 		break;
1078*4882a593Smuzhiyun 	default:
1079*4882a593Smuzhiyun 		dev_err(rockchip->pci.dev, "cur mmap_res %d is unsurreport\n", rockchip->cur_mmap_res);
1080*4882a593Smuzhiyun 		return -EINVAL;
1081*4882a593Smuzhiyun 	}
1082*4882a593Smuzhiyun 
1083*4882a593Smuzhiyun 	vma->vm_flags |= VM_IO;
1084*4882a593Smuzhiyun 	vma->vm_flags |= (VM_DONTEXPAND | VM_DONTDUMP);
1085*4882a593Smuzhiyun 
1086*4882a593Smuzhiyun 	if (rockchip->cur_mmap_res == PCIE_EP_MMAP_RESOURCE_BAR2)
1087*4882a593Smuzhiyun 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1088*4882a593Smuzhiyun 	else
1089*4882a593Smuzhiyun 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1090*4882a593Smuzhiyun 
1091*4882a593Smuzhiyun 	err = remap_pfn_range(vma, vma->vm_start,
1092*4882a593Smuzhiyun 			      __phys_to_pfn(addr),
1093*4882a593Smuzhiyun 			      size, vma->vm_page_prot);
1094*4882a593Smuzhiyun 	if (err)
1095*4882a593Smuzhiyun 		return -EAGAIN;
1096*4882a593Smuzhiyun 
1097*4882a593Smuzhiyun 	return 0;
1098*4882a593Smuzhiyun }
1099*4882a593Smuzhiyun 
1100*4882a593Smuzhiyun static const struct file_operations pcie_ep_ops = {
1101*4882a593Smuzhiyun 	.owner = THIS_MODULE,
1102*4882a593Smuzhiyun 	.open = pcie_ep_open,
1103*4882a593Smuzhiyun 	.release = pcie_ep_release,
1104*4882a593Smuzhiyun 	.unlocked_ioctl = pcie_ep_ioctl,
1105*4882a593Smuzhiyun 	.fasync = pcie_ep_fasync,
1106*4882a593Smuzhiyun 	.mmap = pcie_ep_mmap,
1107*4882a593Smuzhiyun };
1108*4882a593Smuzhiyun 
rockchip_pcie_add_misc(struct rockchip_pcie * rockchip)1109*4882a593Smuzhiyun static int rockchip_pcie_add_misc(struct rockchip_pcie *rockchip)
1110*4882a593Smuzhiyun {
1111*4882a593Smuzhiyun 	int ret;
1112*4882a593Smuzhiyun 	struct rockchip_pcie_misc_dev *pcie_dev;
1113*4882a593Smuzhiyun 
1114*4882a593Smuzhiyun 	pcie_dev = devm_kzalloc(rockchip->pci.dev, sizeof(struct rockchip_pcie_misc_dev),
1115*4882a593Smuzhiyun 				GFP_KERNEL);
1116*4882a593Smuzhiyun 	if (!pcie_dev)
1117*4882a593Smuzhiyun 		return -ENOMEM;
1118*4882a593Smuzhiyun 
1119*4882a593Smuzhiyun 	pcie_dev->dev.minor = MISC_DYNAMIC_MINOR;
1120*4882a593Smuzhiyun 	pcie_dev->dev.name = "pcie_ep";
1121*4882a593Smuzhiyun 	pcie_dev->dev.fops = &pcie_ep_ops;
1122*4882a593Smuzhiyun 	pcie_dev->dev.parent = rockchip->pci.dev;
1123*4882a593Smuzhiyun 
1124*4882a593Smuzhiyun 	ret = misc_register(&pcie_dev->dev);
1125*4882a593Smuzhiyun 	if (ret) {
1126*4882a593Smuzhiyun 		dev_err(rockchip->pci.dev, "pcie: failed to register misc device.\n");
1127*4882a593Smuzhiyun 		return ret;
1128*4882a593Smuzhiyun 	}
1129*4882a593Smuzhiyun 
1130*4882a593Smuzhiyun 	pcie_dev->pcie = rockchip;
1131*4882a593Smuzhiyun 
1132*4882a593Smuzhiyun 	dev_info(rockchip->pci.dev, "register misc device pcie_ep\n");
1133*4882a593Smuzhiyun 
1134*4882a593Smuzhiyun 	return 0;
1135*4882a593Smuzhiyun }
1136*4882a593Smuzhiyun 
rockchip_pcie_ep_probe(struct platform_device * pdev)1137*4882a593Smuzhiyun static int rockchip_pcie_ep_probe(struct platform_device *pdev)
1138*4882a593Smuzhiyun {
1139*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
1140*4882a593Smuzhiyun 	struct rockchip_pcie *rockchip;
1141*4882a593Smuzhiyun 	int ret;
1142*4882a593Smuzhiyun 	int retry, i;
1143*4882a593Smuzhiyun 	u32 reg;
1144*4882a593Smuzhiyun 
1145*4882a593Smuzhiyun 	rockchip = devm_kzalloc(dev, sizeof(*rockchip), GFP_KERNEL);
1146*4882a593Smuzhiyun 	if (!rockchip)
1147*4882a593Smuzhiyun 		return -ENOMEM;
1148*4882a593Smuzhiyun 
1149*4882a593Smuzhiyun 	platform_set_drvdata(pdev, rockchip);
1150*4882a593Smuzhiyun 
1151*4882a593Smuzhiyun 	rockchip->pci.dev = dev;
1152*4882a593Smuzhiyun 	rockchip->pci.ops = &dw_pcie_ops;
1153*4882a593Smuzhiyun 
1154*4882a593Smuzhiyun 	ret = rockchip_pcie_resource_get(pdev, rockchip);
1155*4882a593Smuzhiyun 	if (ret)
1156*4882a593Smuzhiyun 		return ret;
1157*4882a593Smuzhiyun 
1158*4882a593Smuzhiyun 	/* DON'T MOVE ME: must be enable before phy init */
1159*4882a593Smuzhiyun 	rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
1160*4882a593Smuzhiyun 	if (IS_ERR(rockchip->vpcie3v3)) {
1161*4882a593Smuzhiyun 		if (PTR_ERR(rockchip->vpcie3v3) != -ENODEV)
1162*4882a593Smuzhiyun 			return PTR_ERR(rockchip->vpcie3v3);
1163*4882a593Smuzhiyun 		dev_info(dev, "no vpcie3v3 regulator found\n");
1164*4882a593Smuzhiyun 	}
1165*4882a593Smuzhiyun 
1166*4882a593Smuzhiyun 	if (!IS_ERR(rockchip->vpcie3v3)) {
1167*4882a593Smuzhiyun 		ret = regulator_enable(rockchip->vpcie3v3);
1168*4882a593Smuzhiyun 		if (ret) {
1169*4882a593Smuzhiyun 			dev_err(dev, "fail to enable vpcie3v3 regulator\n");
1170*4882a593Smuzhiyun 			return ret;
1171*4882a593Smuzhiyun 		}
1172*4882a593Smuzhiyun 	}
1173*4882a593Smuzhiyun 
1174*4882a593Smuzhiyun 	ret = rockchip_pcie_clk_init(rockchip);
1175*4882a593Smuzhiyun 	if (ret)
1176*4882a593Smuzhiyun 		goto disable_regulator;
1177*4882a593Smuzhiyun 
1178*4882a593Smuzhiyun 	if (dw_pcie_link_up(&rockchip->pci)) {
1179*4882a593Smuzhiyun 		dev_info(dev, "already linkup\n");
1180*4882a593Smuzhiyun 		goto already_linkup;
1181*4882a593Smuzhiyun 	} else {
1182*4882a593Smuzhiyun 		dev_info(dev, "initial\n");
1183*4882a593Smuzhiyun 	}
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun 	ret = rockchip_pcie_phy_init(rockchip);
1186*4882a593Smuzhiyun 	if (ret)
1187*4882a593Smuzhiyun 		goto deinit_clk;
1188*4882a593Smuzhiyun 
1189*4882a593Smuzhiyun 	ret = rockchip_pcie_reset_control_release(rockchip);
1190*4882a593Smuzhiyun 	if (ret)
1191*4882a593Smuzhiyun 		goto deinit_phy;
1192*4882a593Smuzhiyun 
1193*4882a593Smuzhiyun 	dw_pcie_setup(&rockchip->pci);
1194*4882a593Smuzhiyun 
1195*4882a593Smuzhiyun 	dw_pcie_dbi_ro_wr_en(&rockchip->pci);
1196*4882a593Smuzhiyun 	rockchip_pcie_resize_bar(rockchip);
1197*4882a593Smuzhiyun 	rockchip_pcie_init_id(rockchip);
1198*4882a593Smuzhiyun 	dw_pcie_dbi_ro_wr_dis(&rockchip->pci);
1199*4882a593Smuzhiyun 
1200*4882a593Smuzhiyun 	rockchip_pcie_fast_link_setup(rockchip);
1201*4882a593Smuzhiyun 
1202*4882a593Smuzhiyun 	rockchip_pcie_start_link(&rockchip->pci);
1203*4882a593Smuzhiyun 	rockchip_pcie_devmode_update(rockchip, RKEP_MODE_KERNEL, RKEP_SMODE_LNKRDY);
1204*4882a593Smuzhiyun 
1205*4882a593Smuzhiyun 	rockchip->hot_rst_wq = create_singlethread_workqueue("rkep_hot_rst_wq");
1206*4882a593Smuzhiyun 	if (!rockchip->hot_rst_wq) {
1207*4882a593Smuzhiyun 		dev_err(dev, "failed to create hot_rst workqueue\n");
1208*4882a593Smuzhiyun 		ret = -ENOMEM;
1209*4882a593Smuzhiyun 		goto deinit_phy;
1210*4882a593Smuzhiyun 	}
1211*4882a593Smuzhiyun 	INIT_WORK(&rockchip->hot_rst_work, rockchip_pcie_hot_rst_work);
1212*4882a593Smuzhiyun 
1213*4882a593Smuzhiyun 	reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_MISC);
1214*4882a593Smuzhiyun 	if ((reg & BIT(2)) &&
1215*4882a593Smuzhiyun 	    (rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_HOT_RESET_CTRL) & PCIE_LTSSM_APP_DLY2_EN)) {
1216*4882a593Smuzhiyun 		rockchip_pcie_writel_apb(rockchip, PCIE_LTSSM_APP_DLY2_DONE | (PCIE_LTSSM_APP_DLY2_DONE << 16),
1217*4882a593Smuzhiyun 					 PCIE_CLIENT_HOT_RESET_CTRL);
1218*4882a593Smuzhiyun 		dev_info(dev, "hot reset ever\n");
1219*4882a593Smuzhiyun 	}
1220*4882a593Smuzhiyun 	rockchip_pcie_writel_apb(rockchip, reg, PCIE_CLIENT_INTR_STATUS_MISC);
1221*4882a593Smuzhiyun 
1222*4882a593Smuzhiyun 	/* Enable client reset or link down interrupt */
1223*4882a593Smuzhiyun 	rockchip_pcie_writel_apb(rockchip, 0x40000, PCIE_CLIENT_INTR_MASK);
1224*4882a593Smuzhiyun 
1225*4882a593Smuzhiyun 	for (retry = 0; retry < 10000; retry++) {
1226*4882a593Smuzhiyun 		if (dw_pcie_link_up(&rockchip->pci)) {
1227*4882a593Smuzhiyun 			/*
1228*4882a593Smuzhiyun 			 * We may be here in case of L0 in Gen1. But if EP is capable
1229*4882a593Smuzhiyun 			 * of Gen2 or Gen3, Gen switch may happen just in this time, but
1230*4882a593Smuzhiyun 			 * we keep on accessing devices in unstable link status. Given
1231*4882a593Smuzhiyun 			 * that LTSSM max timeout is 24ms per period, we can wait a bit
1232*4882a593Smuzhiyun 			 * more for Gen switch.
1233*4882a593Smuzhiyun 			 */
1234*4882a593Smuzhiyun 			msleep(2000);
1235*4882a593Smuzhiyun 			dev_info(dev, "PCIe Link up, LTSSM is 0x%x\n",
1236*4882a593Smuzhiyun 				 rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_LTSSM_STATUS));
1237*4882a593Smuzhiyun 			break;
1238*4882a593Smuzhiyun 		}
1239*4882a593Smuzhiyun 
1240*4882a593Smuzhiyun 		dev_info_ratelimited(dev, "PCIe Linking... LTSSM is 0x%x\n",
1241*4882a593Smuzhiyun 				     rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_LTSSM_STATUS));
1242*4882a593Smuzhiyun 		msleep(20);
1243*4882a593Smuzhiyun 	}
1244*4882a593Smuzhiyun 
1245*4882a593Smuzhiyun 	if (retry >= 10000) {
1246*4882a593Smuzhiyun 		ret = -ENODEV;
1247*4882a593Smuzhiyun 		goto deinit_phy;
1248*4882a593Smuzhiyun 	}
1249*4882a593Smuzhiyun 
1250*4882a593Smuzhiyun already_linkup:
1251*4882a593Smuzhiyun 	rockchip_pcie_devmode_update(rockchip, RKEP_MODE_KERNEL, RKEP_SMODE_LNKUP);
1252*4882a593Smuzhiyun 	rockchip->pci.iatu_unroll_enabled = rockchip_pcie_iatu_unroll_enabled(&rockchip->pci);
1253*4882a593Smuzhiyun 	for (i = 0; i < PCIE_BAR_MAX_NUM; i++)
1254*4882a593Smuzhiyun 		if (rockchip->ib_target_size[i])
1255*4882a593Smuzhiyun 			rockchip_pcie_ep_set_bar(rockchip, i, rockchip->ib_target_address[i]);
1256*4882a593Smuzhiyun 
1257*4882a593Smuzhiyun 	ret = rockchip_pcie_init_dma_trx(rockchip);
1258*4882a593Smuzhiyun 	if (ret) {
1259*4882a593Smuzhiyun 		dev_err(dev, "failed to add dma extension\n");
1260*4882a593Smuzhiyun 		return ret;
1261*4882a593Smuzhiyun 	}
1262*4882a593Smuzhiyun 
1263*4882a593Smuzhiyun 	if (rockchip->dma_obj) {
1264*4882a593Smuzhiyun 		rockchip->dma_obj->start_dma_func = rockchip_pcie_start_dma_dwc;
1265*4882a593Smuzhiyun 		rockchip->dma_obj->config_dma_func = rockchip_pcie_config_dma_dwc;
1266*4882a593Smuzhiyun 		rockchip->dma_obj->get_dma_status = rockchip_pcie_get_dma_status;
1267*4882a593Smuzhiyun 	}
1268*4882a593Smuzhiyun 
1269*4882a593Smuzhiyun 	/* Enable client ELBI interrupt */
1270*4882a593Smuzhiyun 	rockchip_pcie_writel_apb(rockchip, 0x80000000, PCIE_CLIENT_INTR_MASK);
1271*4882a593Smuzhiyun 	/* Enable ELBI interrupt */
1272*4882a593Smuzhiyun 	rockchip_pcie_local_elbi_enable(rockchip);
1273*4882a593Smuzhiyun 
1274*4882a593Smuzhiyun 	ret = rockchip_pcie_request_sys_irq(rockchip, pdev);
1275*4882a593Smuzhiyun 	if (ret)
1276*4882a593Smuzhiyun 		goto deinit_phy;
1277*4882a593Smuzhiyun 
1278*4882a593Smuzhiyun 	rockchip_pcie_add_misc(rockchip);
1279*4882a593Smuzhiyun 
1280*4882a593Smuzhiyun 	return 0;
1281*4882a593Smuzhiyun 
1282*4882a593Smuzhiyun deinit_phy:
1283*4882a593Smuzhiyun 	rockchip_pcie_phy_deinit(rockchip);
1284*4882a593Smuzhiyun deinit_clk:
1285*4882a593Smuzhiyun 	clk_bulk_disable_unprepare(rockchip->clk_cnt, rockchip->clks);
1286*4882a593Smuzhiyun disable_regulator:
1287*4882a593Smuzhiyun 	if (!IS_ERR(rockchip->vpcie3v3))
1288*4882a593Smuzhiyun 		regulator_disable(rockchip->vpcie3v3);
1289*4882a593Smuzhiyun 
1290*4882a593Smuzhiyun 	return ret;
1291*4882a593Smuzhiyun }
1292*4882a593Smuzhiyun 
1293*4882a593Smuzhiyun static struct platform_driver rk_plat_pcie_driver = {
1294*4882a593Smuzhiyun 	.driver = {
1295*4882a593Smuzhiyun 		.name	= "rk-pcie-ep",
1296*4882a593Smuzhiyun 		.of_match_table = rockchip_pcie_ep_of_match,
1297*4882a593Smuzhiyun 		.suppress_bind_attrs = true,
1298*4882a593Smuzhiyun 	},
1299*4882a593Smuzhiyun 	.probe = rockchip_pcie_ep_probe,
1300*4882a593Smuzhiyun };
1301*4882a593Smuzhiyun 
1302*4882a593Smuzhiyun module_platform_driver(rk_plat_pcie_driver);
1303*4882a593Smuzhiyun 
1304*4882a593Smuzhiyun MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com>");
1305*4882a593Smuzhiyun MODULE_DESCRIPTION("RockChip PCIe Controller EP driver");
1306*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1307