xref: /OK3568_Linux_fs/kernel/drivers/pci/controller/cadence/pcie-cadence.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun // Copyright (c) 2017 Cadence
3*4882a593Smuzhiyun // Cadence PCIe controller driver.
4*4882a593Smuzhiyun // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/kernel.h>
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include "pcie-cadence.h"
9*4882a593Smuzhiyun 
cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie * pcie)10*4882a593Smuzhiyun void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie)
11*4882a593Smuzhiyun {
12*4882a593Smuzhiyun 	u32 delay = 0x3;
13*4882a593Smuzhiyun 	u32 ltssm_control_cap;
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun 	/*
16*4882a593Smuzhiyun 	 * Set the LTSSM Detect Quiet state min. delay to 2ms.
17*4882a593Smuzhiyun 	 */
18*4882a593Smuzhiyun 	ltssm_control_cap = cdns_pcie_readl(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP);
19*4882a593Smuzhiyun 	ltssm_control_cap = ((ltssm_control_cap &
20*4882a593Smuzhiyun 			    ~CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK) |
21*4882a593Smuzhiyun 			    CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay));
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun 	cdns_pcie_writel(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP, ltssm_control_cap);
24*4882a593Smuzhiyun }
25*4882a593Smuzhiyun 
cdns_pcie_set_outbound_region(struct cdns_pcie * pcie,u8 busnr,u8 fn,u32 r,bool is_io,u64 cpu_addr,u64 pci_addr,size_t size)26*4882a593Smuzhiyun void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
27*4882a593Smuzhiyun 				   u32 r, bool is_io,
28*4882a593Smuzhiyun 				   u64 cpu_addr, u64 pci_addr, size_t size)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun 	/*
31*4882a593Smuzhiyun 	 * roundup_pow_of_two() returns an unsigned long, which is not suited
32*4882a593Smuzhiyun 	 * for 64bit values.
33*4882a593Smuzhiyun 	 */
34*4882a593Smuzhiyun 	u64 sz = 1ULL << fls64(size - 1);
35*4882a593Smuzhiyun 	int nbits = ilog2(sz);
36*4882a593Smuzhiyun 	u32 addr0, addr1, desc0, desc1;
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	if (nbits < 8)
39*4882a593Smuzhiyun 		nbits = 8;
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	/* Set the PCI address */
42*4882a593Smuzhiyun 	addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) |
43*4882a593Smuzhiyun 		(lower_32_bits(pci_addr) & GENMASK(31, 8));
44*4882a593Smuzhiyun 	addr1 = upper_32_bits(pci_addr);
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), addr0);
47*4882a593Smuzhiyun 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), addr1);
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 	/* Set the PCIe header descriptor */
50*4882a593Smuzhiyun 	if (is_io)
51*4882a593Smuzhiyun 		desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO;
52*4882a593Smuzhiyun 	else
53*4882a593Smuzhiyun 		desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM;
54*4882a593Smuzhiyun 	desc1 = 0;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	/*
57*4882a593Smuzhiyun 	 * Whatever Bit [23] is set or not inside DESC0 register of the outbound
58*4882a593Smuzhiyun 	 * PCIe descriptor, the PCI function number must be set into
59*4882a593Smuzhiyun 	 * Bits [26:24] of DESC0 anyway.
60*4882a593Smuzhiyun 	 *
61*4882a593Smuzhiyun 	 * In Root Complex mode, the function number is always 0 but in Endpoint
62*4882a593Smuzhiyun 	 * mode, the PCIe controller may support more than one function. This
63*4882a593Smuzhiyun 	 * function number needs to be set properly into the outbound PCIe
64*4882a593Smuzhiyun 	 * descriptor.
65*4882a593Smuzhiyun 	 *
66*4882a593Smuzhiyun 	 * Besides, setting Bit [23] is mandatory when in Root Complex mode:
67*4882a593Smuzhiyun 	 * then the driver must provide the bus, resp. device, number in
68*4882a593Smuzhiyun 	 * Bits [7:0] of DESC1, resp. Bits[31:27] of DESC0. Like the function
69*4882a593Smuzhiyun 	 * number, the device number is always 0 in Root Complex mode.
70*4882a593Smuzhiyun 	 *
71*4882a593Smuzhiyun 	 * However when in Endpoint mode, we can clear Bit [23] of DESC0, hence
72*4882a593Smuzhiyun 	 * the PCIe controller will use the captured values for the bus and
73*4882a593Smuzhiyun 	 * device numbers.
74*4882a593Smuzhiyun 	 */
75*4882a593Smuzhiyun 	if (pcie->is_rc) {
76*4882a593Smuzhiyun 		/* The device and function numbers are always 0. */
77*4882a593Smuzhiyun 		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
78*4882a593Smuzhiyun 			 CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
79*4882a593Smuzhiyun 		desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr);
80*4882a593Smuzhiyun 	} else {
81*4882a593Smuzhiyun 		/*
82*4882a593Smuzhiyun 		 * Use captured values for bus and device numbers but still
83*4882a593Smuzhiyun 		 * need to set the function number.
84*4882a593Smuzhiyun 		 */
85*4882a593Smuzhiyun 		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn);
86*4882a593Smuzhiyun 	}
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0);
89*4882a593Smuzhiyun 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1);
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	/* Set the CPU address */
92*4882a593Smuzhiyun 	if (pcie->ops->cpu_addr_fixup)
93*4882a593Smuzhiyun 		cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr);
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) |
96*4882a593Smuzhiyun 		(lower_32_bits(cpu_addr) & GENMASK(31, 8));
97*4882a593Smuzhiyun 	addr1 = upper_32_bits(cpu_addr);
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
100*4882a593Smuzhiyun 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun 
cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie * pcie,u8 busnr,u8 fn,u32 r,u64 cpu_addr)103*4882a593Smuzhiyun void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
104*4882a593Smuzhiyun 						  u8 busnr, u8 fn,
105*4882a593Smuzhiyun 						  u32 r, u64 cpu_addr)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	u32 addr0, addr1, desc0, desc1;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG;
110*4882a593Smuzhiyun 	desc1 = 0;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	/* See cdns_pcie_set_outbound_region() comments above. */
113*4882a593Smuzhiyun 	if (pcie->is_rc) {
114*4882a593Smuzhiyun 		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
115*4882a593Smuzhiyun 			 CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
116*4882a593Smuzhiyun 		desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr);
117*4882a593Smuzhiyun 	} else {
118*4882a593Smuzhiyun 		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn);
119*4882a593Smuzhiyun 	}
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	/* Set the CPU address */
122*4882a593Smuzhiyun 	if (pcie->ops->cpu_addr_fixup)
123*4882a593Smuzhiyun 		cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr);
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(17) |
126*4882a593Smuzhiyun 		(lower_32_bits(cpu_addr) & GENMASK(31, 8));
127*4882a593Smuzhiyun 	addr1 = upper_32_bits(cpu_addr);
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0);
130*4882a593Smuzhiyun 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0);
131*4882a593Smuzhiyun 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0);
132*4882a593Smuzhiyun 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1);
133*4882a593Smuzhiyun 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
134*4882a593Smuzhiyun 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun 
cdns_pcie_reset_outbound_region(struct cdns_pcie * pcie,u32 r)137*4882a593Smuzhiyun void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0);
140*4882a593Smuzhiyun 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0);
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), 0);
143*4882a593Smuzhiyun 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), 0);
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), 0);
146*4882a593Smuzhiyun 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), 0);
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun 
cdns_pcie_disable_phy(struct cdns_pcie * pcie)149*4882a593Smuzhiyun void cdns_pcie_disable_phy(struct cdns_pcie *pcie)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun 	int i = pcie->phy_count;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	while (i--) {
154*4882a593Smuzhiyun 		phy_power_off(pcie->phy[i]);
155*4882a593Smuzhiyun 		phy_exit(pcie->phy[i]);
156*4882a593Smuzhiyun 	}
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
cdns_pcie_enable_phy(struct cdns_pcie * pcie)159*4882a593Smuzhiyun int cdns_pcie_enable_phy(struct cdns_pcie *pcie)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun 	int ret;
162*4882a593Smuzhiyun 	int i;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	for (i = 0; i < pcie->phy_count; i++) {
165*4882a593Smuzhiyun 		ret = phy_init(pcie->phy[i]);
166*4882a593Smuzhiyun 		if (ret < 0)
167*4882a593Smuzhiyun 			goto err_phy;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 		ret = phy_power_on(pcie->phy[i]);
170*4882a593Smuzhiyun 		if (ret < 0) {
171*4882a593Smuzhiyun 			phy_exit(pcie->phy[i]);
172*4882a593Smuzhiyun 			goto err_phy;
173*4882a593Smuzhiyun 		}
174*4882a593Smuzhiyun 	}
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	return 0;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun err_phy:
179*4882a593Smuzhiyun 	while (--i >= 0) {
180*4882a593Smuzhiyun 		phy_power_off(pcie->phy[i]);
181*4882a593Smuzhiyun 		phy_exit(pcie->phy[i]);
182*4882a593Smuzhiyun 	}
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	return ret;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun 
cdns_pcie_init_phy(struct device * dev,struct cdns_pcie * pcie)187*4882a593Smuzhiyun int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun 	struct device_node *np = dev->of_node;
190*4882a593Smuzhiyun 	int phy_count;
191*4882a593Smuzhiyun 	struct phy **phy;
192*4882a593Smuzhiyun 	struct device_link **link;
193*4882a593Smuzhiyun 	int i;
194*4882a593Smuzhiyun 	int ret;
195*4882a593Smuzhiyun 	const char *name;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	phy_count = of_property_count_strings(np, "phy-names");
198*4882a593Smuzhiyun 	if (phy_count < 1) {
199*4882a593Smuzhiyun 		dev_err(dev, "no phy-names.  PHY will not be initialized\n");
200*4882a593Smuzhiyun 		pcie->phy_count = 0;
201*4882a593Smuzhiyun 		return 0;
202*4882a593Smuzhiyun 	}
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL);
205*4882a593Smuzhiyun 	if (!phy)
206*4882a593Smuzhiyun 		return -ENOMEM;
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL);
209*4882a593Smuzhiyun 	if (!link)
210*4882a593Smuzhiyun 		return -ENOMEM;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	for (i = 0; i < phy_count; i++) {
213*4882a593Smuzhiyun 		of_property_read_string_index(np, "phy-names", i, &name);
214*4882a593Smuzhiyun 		phy[i] = devm_phy_get(dev, name);
215*4882a593Smuzhiyun 		if (IS_ERR(phy[i])) {
216*4882a593Smuzhiyun 			ret = PTR_ERR(phy[i]);
217*4882a593Smuzhiyun 			goto err_phy;
218*4882a593Smuzhiyun 		}
219*4882a593Smuzhiyun 		link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
220*4882a593Smuzhiyun 		if (!link[i]) {
221*4882a593Smuzhiyun 			devm_phy_put(dev, phy[i]);
222*4882a593Smuzhiyun 			ret = -EINVAL;
223*4882a593Smuzhiyun 			goto err_phy;
224*4882a593Smuzhiyun 		}
225*4882a593Smuzhiyun 	}
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	pcie->phy_count = phy_count;
228*4882a593Smuzhiyun 	pcie->phy = phy;
229*4882a593Smuzhiyun 	pcie->link = link;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	ret =  cdns_pcie_enable_phy(pcie);
232*4882a593Smuzhiyun 	if (ret)
233*4882a593Smuzhiyun 		goto err_phy;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	return 0;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun err_phy:
238*4882a593Smuzhiyun 	while (--i >= 0) {
239*4882a593Smuzhiyun 		device_link_del(link[i]);
240*4882a593Smuzhiyun 		devm_phy_put(dev, phy[i]);
241*4882a593Smuzhiyun 	}
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	return ret;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
cdns_pcie_suspend_noirq(struct device * dev)247*4882a593Smuzhiyun static int cdns_pcie_suspend_noirq(struct device *dev)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun 	struct cdns_pcie *pcie = dev_get_drvdata(dev);
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	cdns_pcie_disable_phy(pcie);
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	return 0;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun 
cdns_pcie_resume_noirq(struct device * dev)256*4882a593Smuzhiyun static int cdns_pcie_resume_noirq(struct device *dev)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun 	struct cdns_pcie *pcie = dev_get_drvdata(dev);
259*4882a593Smuzhiyun 	int ret;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	ret = cdns_pcie_enable_phy(pcie);
262*4882a593Smuzhiyun 	if (ret) {
263*4882a593Smuzhiyun 		dev_err(dev, "failed to enable phy\n");
264*4882a593Smuzhiyun 		return ret;
265*4882a593Smuzhiyun 	}
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	return 0;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun #endif
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun const struct dev_pm_ops cdns_pcie_pm_ops = {
272*4882a593Smuzhiyun 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq,
273*4882a593Smuzhiyun 				      cdns_pcie_resume_noirq)
274*4882a593Smuzhiyun };
275