xref: /OK3568_Linux_fs/kernel/drivers/pci/controller/cadence/pcie-cadence-ep.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun // Copyright (c) 2017 Cadence
3*4882a593Smuzhiyun // Cadence PCIe endpoint controller driver.
4*4882a593Smuzhiyun // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/delay.h>
7*4882a593Smuzhiyun #include <linux/kernel.h>
8*4882a593Smuzhiyun #include <linux/of.h>
9*4882a593Smuzhiyun #include <linux/pci-epc.h>
10*4882a593Smuzhiyun #include <linux/platform_device.h>
11*4882a593Smuzhiyun #include <linux/sizes.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include "pcie-cadence.h"
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #define CDNS_PCIE_EP_MIN_APERTURE		128	/* 128 bytes */
16*4882a593Smuzhiyun #define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE		0x1
17*4882a593Smuzhiyun #define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY	0x3
18*4882a593Smuzhiyun 
cdns_pcie_ep_write_header(struct pci_epc * epc,u8 fn,struct pci_epf_header * hdr)19*4882a593Smuzhiyun static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
20*4882a593Smuzhiyun 				     struct pci_epf_header *hdr)
21*4882a593Smuzhiyun {
22*4882a593Smuzhiyun 	struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
23*4882a593Smuzhiyun 	struct cdns_pcie *pcie = &ep->pcie;
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun 	cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid);
26*4882a593Smuzhiyun 	cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid);
27*4882a593Smuzhiyun 	cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CLASS_PROG, hdr->progif_code);
28*4882a593Smuzhiyun 	cdns_pcie_ep_fn_writew(pcie, fn, PCI_CLASS_DEVICE,
29*4882a593Smuzhiyun 			       hdr->subclass_code | hdr->baseclass_code << 8);
30*4882a593Smuzhiyun 	cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CACHE_LINE_SIZE,
31*4882a593Smuzhiyun 			       hdr->cache_line_size);
32*4882a593Smuzhiyun 	cdns_pcie_ep_fn_writew(pcie, fn, PCI_SUBSYSTEM_ID, hdr->subsys_id);
33*4882a593Smuzhiyun 	cdns_pcie_ep_fn_writeb(pcie, fn, PCI_INTERRUPT_PIN, hdr->interrupt_pin);
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	/*
36*4882a593Smuzhiyun 	 * Vendor ID can only be modified from function 0, all other functions
37*4882a593Smuzhiyun 	 * use the same vendor ID as function 0.
38*4882a593Smuzhiyun 	 */
39*4882a593Smuzhiyun 	if (fn == 0) {
40*4882a593Smuzhiyun 		/* Update the vendor IDs. */
41*4882a593Smuzhiyun 		u32 id = CDNS_PCIE_LM_ID_VENDOR(hdr->vendorid) |
42*4882a593Smuzhiyun 			 CDNS_PCIE_LM_ID_SUBSYS(hdr->subsys_vendor_id);
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 		cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id);
45*4882a593Smuzhiyun 	}
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	return 0;
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun 
cdns_pcie_ep_set_bar(struct pci_epc * epc,u8 fn,struct pci_epf_bar * epf_bar)50*4882a593Smuzhiyun static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
51*4882a593Smuzhiyun 				struct pci_epf_bar *epf_bar)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun 	struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
54*4882a593Smuzhiyun 	struct cdns_pcie_epf *epf = &ep->epf[fn];
55*4882a593Smuzhiyun 	struct cdns_pcie *pcie = &ep->pcie;
56*4882a593Smuzhiyun 	dma_addr_t bar_phys = epf_bar->phys_addr;
57*4882a593Smuzhiyun 	enum pci_barno bar = epf_bar->barno;
58*4882a593Smuzhiyun 	int flags = epf_bar->flags;
59*4882a593Smuzhiyun 	u32 addr0, addr1, reg, cfg, b, aperture, ctrl;
60*4882a593Smuzhiyun 	u64 sz;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	/* BAR size is 2^(aperture + 7) */
63*4882a593Smuzhiyun 	sz = max_t(size_t, epf_bar->size, CDNS_PCIE_EP_MIN_APERTURE);
64*4882a593Smuzhiyun 	/*
65*4882a593Smuzhiyun 	 * roundup_pow_of_two() returns an unsigned long, which is not suited
66*4882a593Smuzhiyun 	 * for 64bit values.
67*4882a593Smuzhiyun 	 */
68*4882a593Smuzhiyun 	sz = 1ULL << fls64(sz - 1);
69*4882a593Smuzhiyun 	aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
72*4882a593Smuzhiyun 		ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS;
73*4882a593Smuzhiyun 	} else {
74*4882a593Smuzhiyun 		bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
75*4882a593Smuzhiyun 		bool is_64bits = sz > SZ_2G;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 		if (is_64bits && (bar & 1))
78*4882a593Smuzhiyun 			return -EINVAL;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 		if (is_64bits && !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
81*4882a593Smuzhiyun 			epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 		if (is_64bits && is_prefetch)
84*4882a593Smuzhiyun 			ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
85*4882a593Smuzhiyun 		else if (is_prefetch)
86*4882a593Smuzhiyun 			ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS;
87*4882a593Smuzhiyun 		else if (is_64bits)
88*4882a593Smuzhiyun 			ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS;
89*4882a593Smuzhiyun 		else
90*4882a593Smuzhiyun 			ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS;
91*4882a593Smuzhiyun 	}
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	addr0 = lower_32_bits(bar_phys);
94*4882a593Smuzhiyun 	addr1 = upper_32_bits(bar_phys);
95*4882a593Smuzhiyun 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar),
96*4882a593Smuzhiyun 			 addr0);
97*4882a593Smuzhiyun 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar),
98*4882a593Smuzhiyun 			 addr1);
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	if (bar < BAR_4) {
101*4882a593Smuzhiyun 		reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
102*4882a593Smuzhiyun 		b = bar;
103*4882a593Smuzhiyun 	} else {
104*4882a593Smuzhiyun 		reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
105*4882a593Smuzhiyun 		b = bar - BAR_4;
106*4882a593Smuzhiyun 	}
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	cfg = cdns_pcie_readl(pcie, reg);
109*4882a593Smuzhiyun 	cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
110*4882a593Smuzhiyun 		 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
111*4882a593Smuzhiyun 	cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
112*4882a593Smuzhiyun 		CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
113*4882a593Smuzhiyun 	cdns_pcie_writel(pcie, reg, cfg);
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	epf->epf_bar[bar] = epf_bar;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	return 0;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun 
cdns_pcie_ep_clear_bar(struct pci_epc * epc,u8 fn,struct pci_epf_bar * epf_bar)120*4882a593Smuzhiyun static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
121*4882a593Smuzhiyun 				   struct pci_epf_bar *epf_bar)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun 	struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
124*4882a593Smuzhiyun 	struct cdns_pcie_epf *epf = &ep->epf[fn];
125*4882a593Smuzhiyun 	struct cdns_pcie *pcie = &ep->pcie;
126*4882a593Smuzhiyun 	enum pci_barno bar = epf_bar->barno;
127*4882a593Smuzhiyun 	u32 reg, cfg, b, ctrl;
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	if (bar < BAR_4) {
130*4882a593Smuzhiyun 		reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
131*4882a593Smuzhiyun 		b = bar;
132*4882a593Smuzhiyun 	} else {
133*4882a593Smuzhiyun 		reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
134*4882a593Smuzhiyun 		b = bar - BAR_4;
135*4882a593Smuzhiyun 	}
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
138*4882a593Smuzhiyun 	cfg = cdns_pcie_readl(pcie, reg);
139*4882a593Smuzhiyun 	cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
140*4882a593Smuzhiyun 		 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
141*4882a593Smuzhiyun 	cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
142*4882a593Smuzhiyun 	cdns_pcie_writel(pcie, reg, cfg);
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0);
145*4882a593Smuzhiyun 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0);
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	epf->epf_bar[bar] = NULL;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun 
cdns_pcie_ep_map_addr(struct pci_epc * epc,u8 fn,phys_addr_t addr,u64 pci_addr,size_t size)150*4882a593Smuzhiyun static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr,
151*4882a593Smuzhiyun 				 u64 pci_addr, size_t size)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun 	struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
154*4882a593Smuzhiyun 	struct cdns_pcie *pcie = &ep->pcie;
155*4882a593Smuzhiyun 	u32 r;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	r = find_first_zero_bit(&ep->ob_region_map, BITS_PER_LONG);
158*4882a593Smuzhiyun 	if (r >= ep->max_regions - 1) {
159*4882a593Smuzhiyun 		dev_err(&epc->dev, "no free outbound region\n");
160*4882a593Smuzhiyun 		return -EINVAL;
161*4882a593Smuzhiyun 	}
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	cdns_pcie_set_outbound_region(pcie, 0, fn, r, false, addr, pci_addr, size);
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	set_bit(r, &ep->ob_region_map);
166*4882a593Smuzhiyun 	ep->ob_addr[r] = addr;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	return 0;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun 
cdns_pcie_ep_unmap_addr(struct pci_epc * epc,u8 fn,phys_addr_t addr)171*4882a593Smuzhiyun static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
172*4882a593Smuzhiyun 				    phys_addr_t addr)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun 	struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
175*4882a593Smuzhiyun 	struct cdns_pcie *pcie = &ep->pcie;
176*4882a593Smuzhiyun 	u32 r;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	for (r = 0; r < ep->max_regions - 1; r++)
179*4882a593Smuzhiyun 		if (ep->ob_addr[r] == addr)
180*4882a593Smuzhiyun 			break;
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	if (r == ep->max_regions - 1)
183*4882a593Smuzhiyun 		return;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	cdns_pcie_reset_outbound_region(pcie, r);
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	ep->ob_addr[r] = 0;
188*4882a593Smuzhiyun 	clear_bit(r, &ep->ob_region_map);
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun 
cdns_pcie_ep_set_msi(struct pci_epc * epc,u8 fn,u8 mmc)191*4882a593Smuzhiyun static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 mmc)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun 	struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
194*4882a593Smuzhiyun 	struct cdns_pcie *pcie = &ep->pcie;
195*4882a593Smuzhiyun 	u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
196*4882a593Smuzhiyun 	u16 flags;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	/*
199*4882a593Smuzhiyun 	 * Set the Multiple Message Capable bitfield into the Message Control
200*4882a593Smuzhiyun 	 * register.
201*4882a593Smuzhiyun 	 */
202*4882a593Smuzhiyun 	flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
203*4882a593Smuzhiyun 	flags = (flags & ~PCI_MSI_FLAGS_QMASK) | (mmc << 1);
204*4882a593Smuzhiyun 	flags |= PCI_MSI_FLAGS_64BIT;
205*4882a593Smuzhiyun 	flags &= ~PCI_MSI_FLAGS_MASKBIT;
206*4882a593Smuzhiyun 	cdns_pcie_ep_fn_writew(pcie, fn, cap + PCI_MSI_FLAGS, flags);
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	return 0;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun 
cdns_pcie_ep_get_msi(struct pci_epc * epc,u8 fn)211*4882a593Smuzhiyun static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun 	struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
214*4882a593Smuzhiyun 	struct cdns_pcie *pcie = &ep->pcie;
215*4882a593Smuzhiyun 	u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
216*4882a593Smuzhiyun 	u16 flags, mme;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	/* Validate that the MSI feature is actually enabled. */
219*4882a593Smuzhiyun 	flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
220*4882a593Smuzhiyun 	if (!(flags & PCI_MSI_FLAGS_ENABLE))
221*4882a593Smuzhiyun 		return -EINVAL;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	/*
224*4882a593Smuzhiyun 	 * Get the Multiple Message Enable bitfield from the Message Control
225*4882a593Smuzhiyun 	 * register.
226*4882a593Smuzhiyun 	 */
227*4882a593Smuzhiyun 	mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	return mme;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun 
cdns_pcie_ep_get_msix(struct pci_epc * epc,u8 func_no)232*4882a593Smuzhiyun static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun 	struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
235*4882a593Smuzhiyun 	struct cdns_pcie *pcie = &ep->pcie;
236*4882a593Smuzhiyun 	u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
237*4882a593Smuzhiyun 	u32 val, reg;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	reg = cap + PCI_MSIX_FLAGS;
240*4882a593Smuzhiyun 	val = cdns_pcie_ep_fn_readw(pcie, func_no, reg);
241*4882a593Smuzhiyun 	if (!(val & PCI_MSIX_FLAGS_ENABLE))
242*4882a593Smuzhiyun 		return -EINVAL;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	val &= PCI_MSIX_FLAGS_QSIZE;
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	return val;
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun 
cdns_pcie_ep_set_msix(struct pci_epc * epc,u8 fn,u16 interrupts,enum pci_barno bir,u32 offset)249*4882a593Smuzhiyun static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u16 interrupts,
250*4882a593Smuzhiyun 				 enum pci_barno bir, u32 offset)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun 	struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
253*4882a593Smuzhiyun 	struct cdns_pcie *pcie = &ep->pcie;
254*4882a593Smuzhiyun 	u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
255*4882a593Smuzhiyun 	u32 val, reg;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	reg = cap + PCI_MSIX_FLAGS;
258*4882a593Smuzhiyun 	val = cdns_pcie_ep_fn_readw(pcie, fn, reg);
259*4882a593Smuzhiyun 	val &= ~PCI_MSIX_FLAGS_QSIZE;
260*4882a593Smuzhiyun 	val |= interrupts;
261*4882a593Smuzhiyun 	cdns_pcie_ep_fn_writew(pcie, fn, reg, val);
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	/* Set MSIX BAR and offset */
264*4882a593Smuzhiyun 	reg = cap + PCI_MSIX_TABLE;
265*4882a593Smuzhiyun 	val = offset | bir;
266*4882a593Smuzhiyun 	cdns_pcie_ep_fn_writel(pcie, fn, reg, val);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	/* Set PBA BAR and offset.  BAR must match MSIX BAR */
269*4882a593Smuzhiyun 	reg = cap + PCI_MSIX_PBA;
270*4882a593Smuzhiyun 	val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;
271*4882a593Smuzhiyun 	cdns_pcie_ep_fn_writel(pcie, fn, reg, val);
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	return 0;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun 
cdns_pcie_ep_assert_intx(struct cdns_pcie_ep * ep,u8 fn,u8 intx,bool is_asserted)276*4882a593Smuzhiyun static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn,
277*4882a593Smuzhiyun 				     u8 intx, bool is_asserted)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun 	struct cdns_pcie *pcie = &ep->pcie;
280*4882a593Smuzhiyun 	unsigned long flags;
281*4882a593Smuzhiyun 	u32 offset;
282*4882a593Smuzhiyun 	u16 status;
283*4882a593Smuzhiyun 	u8 msg_code;
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	intx &= 3;
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	/* Set the outbound region if needed. */
288*4882a593Smuzhiyun 	if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY ||
289*4882a593Smuzhiyun 		     ep->irq_pci_fn != fn)) {
290*4882a593Smuzhiyun 		/* First region was reserved for IRQ writes. */
291*4882a593Smuzhiyun 		cdns_pcie_set_outbound_region_for_normal_msg(pcie, 0, fn, 0,
292*4882a593Smuzhiyun 							     ep->irq_phys_addr);
293*4882a593Smuzhiyun 		ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY;
294*4882a593Smuzhiyun 		ep->irq_pci_fn = fn;
295*4882a593Smuzhiyun 	}
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	if (is_asserted) {
298*4882a593Smuzhiyun 		ep->irq_pending |= BIT(intx);
299*4882a593Smuzhiyun 		msg_code = MSG_CODE_ASSERT_INTA + intx;
300*4882a593Smuzhiyun 	} else {
301*4882a593Smuzhiyun 		ep->irq_pending &= ~BIT(intx);
302*4882a593Smuzhiyun 		msg_code = MSG_CODE_DEASSERT_INTA + intx;
303*4882a593Smuzhiyun 	}
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	spin_lock_irqsave(&ep->lock, flags);
306*4882a593Smuzhiyun 	status = cdns_pcie_ep_fn_readw(pcie, fn, PCI_STATUS);
307*4882a593Smuzhiyun 	if (((status & PCI_STATUS_INTERRUPT) != 0) ^ (ep->irq_pending != 0)) {
308*4882a593Smuzhiyun 		status ^= PCI_STATUS_INTERRUPT;
309*4882a593Smuzhiyun 		cdns_pcie_ep_fn_writew(pcie, fn, PCI_STATUS, status);
310*4882a593Smuzhiyun 	}
311*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ep->lock, flags);
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) |
314*4882a593Smuzhiyun 		 CDNS_PCIE_NORMAL_MSG_CODE(msg_code) |
315*4882a593Smuzhiyun 		 CDNS_PCIE_MSG_NO_DATA;
316*4882a593Smuzhiyun 	writel(0, ep->irq_cpu_addr + offset);
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun 
cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep * ep,u8 fn,u8 intx)319*4882a593Smuzhiyun static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 intx)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun 	u16 cmd;
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	cmd = cdns_pcie_ep_fn_readw(&ep->pcie, fn, PCI_COMMAND);
324*4882a593Smuzhiyun 	if (cmd & PCI_COMMAND_INTX_DISABLE)
325*4882a593Smuzhiyun 		return -EINVAL;
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	cdns_pcie_ep_assert_intx(ep, fn, intx, true);
328*4882a593Smuzhiyun 	/*
329*4882a593Smuzhiyun 	 * The mdelay() value was taken from dra7xx_pcie_raise_legacy_irq()
330*4882a593Smuzhiyun 	 */
331*4882a593Smuzhiyun 	mdelay(1);
332*4882a593Smuzhiyun 	cdns_pcie_ep_assert_intx(ep, fn, intx, false);
333*4882a593Smuzhiyun 	return 0;
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun 
cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep * ep,u8 fn,u8 interrupt_num)336*4882a593Smuzhiyun static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
337*4882a593Smuzhiyun 				     u8 interrupt_num)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun 	struct cdns_pcie *pcie = &ep->pcie;
340*4882a593Smuzhiyun 	u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
341*4882a593Smuzhiyun 	u16 flags, mme, data, data_mask;
342*4882a593Smuzhiyun 	u8 msi_count;
343*4882a593Smuzhiyun 	u64 pci_addr, pci_addr_mask = 0xff;
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	/* Check whether the MSI feature has been enabled by the PCI host. */
346*4882a593Smuzhiyun 	flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
347*4882a593Smuzhiyun 	if (!(flags & PCI_MSI_FLAGS_ENABLE))
348*4882a593Smuzhiyun 		return -EINVAL;
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	/* Get the number of enabled MSIs */
351*4882a593Smuzhiyun 	mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
352*4882a593Smuzhiyun 	msi_count = 1 << mme;
353*4882a593Smuzhiyun 	if (!interrupt_num || interrupt_num > msi_count)
354*4882a593Smuzhiyun 		return -EINVAL;
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	/* Compute the data value to be written. */
357*4882a593Smuzhiyun 	data_mask = msi_count - 1;
358*4882a593Smuzhiyun 	data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64);
359*4882a593Smuzhiyun 	data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask);
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	/* Get the PCI address where to write the data into. */
362*4882a593Smuzhiyun 	pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI);
363*4882a593Smuzhiyun 	pci_addr <<= 32;
364*4882a593Smuzhiyun 	pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO);
365*4882a593Smuzhiyun 	pci_addr &= GENMASK_ULL(63, 2);
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	/* Set the outbound region if needed. */
368*4882a593Smuzhiyun 	if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) ||
369*4882a593Smuzhiyun 		     ep->irq_pci_fn != fn)) {
370*4882a593Smuzhiyun 		/* First region was reserved for IRQ writes. */
371*4882a593Smuzhiyun 		cdns_pcie_set_outbound_region(pcie, 0, fn, 0,
372*4882a593Smuzhiyun 					      false,
373*4882a593Smuzhiyun 					      ep->irq_phys_addr,
374*4882a593Smuzhiyun 					      pci_addr & ~pci_addr_mask,
375*4882a593Smuzhiyun 					      pci_addr_mask + 1);
376*4882a593Smuzhiyun 		ep->irq_pci_addr = (pci_addr & ~pci_addr_mask);
377*4882a593Smuzhiyun 		ep->irq_pci_fn = fn;
378*4882a593Smuzhiyun 	}
379*4882a593Smuzhiyun 	writel(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask));
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	return 0;
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun 
cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep * ep,u8 fn,u16 interrupt_num)384*4882a593Smuzhiyun static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn,
385*4882a593Smuzhiyun 				      u16 interrupt_num)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun 	u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
388*4882a593Smuzhiyun 	u32 tbl_offset, msg_data, reg;
389*4882a593Smuzhiyun 	struct cdns_pcie *pcie = &ep->pcie;
390*4882a593Smuzhiyun 	struct pci_epf_msix_tbl *msix_tbl;
391*4882a593Smuzhiyun 	struct cdns_pcie_epf *epf;
392*4882a593Smuzhiyun 	u64 pci_addr_mask = 0xff;
393*4882a593Smuzhiyun 	u64 msg_addr;
394*4882a593Smuzhiyun 	u16 flags;
395*4882a593Smuzhiyun 	u8 bir;
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	/* Check whether the MSI-X feature has been enabled by the PCI host. */
398*4882a593Smuzhiyun 	flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSIX_FLAGS);
399*4882a593Smuzhiyun 	if (!(flags & PCI_MSIX_FLAGS_ENABLE))
400*4882a593Smuzhiyun 		return -EINVAL;
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	reg = cap + PCI_MSIX_TABLE;
403*4882a593Smuzhiyun 	tbl_offset = cdns_pcie_ep_fn_readl(pcie, fn, reg);
404*4882a593Smuzhiyun 	bir = tbl_offset & PCI_MSIX_TABLE_BIR;
405*4882a593Smuzhiyun 	tbl_offset &= PCI_MSIX_TABLE_OFFSET;
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	epf = &ep->epf[fn];
408*4882a593Smuzhiyun 	msix_tbl = epf->epf_bar[bir]->addr + tbl_offset;
409*4882a593Smuzhiyun 	msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr;
410*4882a593Smuzhiyun 	msg_data = msix_tbl[(interrupt_num - 1)].msg_data;
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	/* Set the outbound region if needed. */
413*4882a593Smuzhiyun 	if (ep->irq_pci_addr != (msg_addr & ~pci_addr_mask) ||
414*4882a593Smuzhiyun 	    ep->irq_pci_fn != fn) {
415*4882a593Smuzhiyun 		/* First region was reserved for IRQ writes. */
416*4882a593Smuzhiyun 		cdns_pcie_set_outbound_region(pcie, 0, fn, 0,
417*4882a593Smuzhiyun 					      false,
418*4882a593Smuzhiyun 					      ep->irq_phys_addr,
419*4882a593Smuzhiyun 					      msg_addr & ~pci_addr_mask,
420*4882a593Smuzhiyun 					      pci_addr_mask + 1);
421*4882a593Smuzhiyun 		ep->irq_pci_addr = (msg_addr & ~pci_addr_mask);
422*4882a593Smuzhiyun 		ep->irq_pci_fn = fn;
423*4882a593Smuzhiyun 	}
424*4882a593Smuzhiyun 	writel(msg_data, ep->irq_cpu_addr + (msg_addr & pci_addr_mask));
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	return 0;
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun 
cdns_pcie_ep_raise_irq(struct pci_epc * epc,u8 fn,enum pci_epc_irq_type type,u16 interrupt_num)429*4882a593Smuzhiyun static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
430*4882a593Smuzhiyun 				  enum pci_epc_irq_type type,
431*4882a593Smuzhiyun 				  u16 interrupt_num)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun 	struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	switch (type) {
436*4882a593Smuzhiyun 	case PCI_EPC_IRQ_LEGACY:
437*4882a593Smuzhiyun 		return cdns_pcie_ep_send_legacy_irq(ep, fn, 0);
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	case PCI_EPC_IRQ_MSI:
440*4882a593Smuzhiyun 		return cdns_pcie_ep_send_msi_irq(ep, fn, interrupt_num);
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	case PCI_EPC_IRQ_MSIX:
443*4882a593Smuzhiyun 		return cdns_pcie_ep_send_msix_irq(ep, fn, interrupt_num);
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	default:
446*4882a593Smuzhiyun 		break;
447*4882a593Smuzhiyun 	}
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	return -EINVAL;
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun 
cdns_pcie_ep_start(struct pci_epc * epc)452*4882a593Smuzhiyun static int cdns_pcie_ep_start(struct pci_epc *epc)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun 	struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
455*4882a593Smuzhiyun 	struct cdns_pcie *pcie = &ep->pcie;
456*4882a593Smuzhiyun 	struct device *dev = pcie->dev;
457*4882a593Smuzhiyun 	struct pci_epf *epf;
458*4882a593Smuzhiyun 	u32 cfg;
459*4882a593Smuzhiyun 	int ret;
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 	/*
462*4882a593Smuzhiyun 	 * BIT(0) is hardwired to 1, hence function 0 is always enabled
463*4882a593Smuzhiyun 	 * and can't be disabled anyway.
464*4882a593Smuzhiyun 	 */
465*4882a593Smuzhiyun 	cfg = BIT(0);
466*4882a593Smuzhiyun 	list_for_each_entry(epf, &epc->pci_epf, list)
467*4882a593Smuzhiyun 		cfg |= BIT(epf->func_no);
468*4882a593Smuzhiyun 	cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, cfg);
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 	ret = cdns_pcie_start_link(pcie);
471*4882a593Smuzhiyun 	if (ret) {
472*4882a593Smuzhiyun 		dev_err(dev, "Failed to start link\n");
473*4882a593Smuzhiyun 		return ret;
474*4882a593Smuzhiyun 	}
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	return 0;
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun static const struct pci_epc_features cdns_pcie_epc_features = {
480*4882a593Smuzhiyun 	.linkup_notifier = false,
481*4882a593Smuzhiyun 	.msi_capable = true,
482*4882a593Smuzhiyun 	.msix_capable = true,
483*4882a593Smuzhiyun };
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun static const struct pci_epc_features*
cdns_pcie_ep_get_features(struct pci_epc * epc,u8 func_no)486*4882a593Smuzhiyun cdns_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
487*4882a593Smuzhiyun {
488*4882a593Smuzhiyun 	return &cdns_pcie_epc_features;
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun static const struct pci_epc_ops cdns_pcie_epc_ops = {
492*4882a593Smuzhiyun 	.write_header	= cdns_pcie_ep_write_header,
493*4882a593Smuzhiyun 	.set_bar	= cdns_pcie_ep_set_bar,
494*4882a593Smuzhiyun 	.clear_bar	= cdns_pcie_ep_clear_bar,
495*4882a593Smuzhiyun 	.map_addr	= cdns_pcie_ep_map_addr,
496*4882a593Smuzhiyun 	.unmap_addr	= cdns_pcie_ep_unmap_addr,
497*4882a593Smuzhiyun 	.set_msi	= cdns_pcie_ep_set_msi,
498*4882a593Smuzhiyun 	.get_msi	= cdns_pcie_ep_get_msi,
499*4882a593Smuzhiyun 	.set_msix	= cdns_pcie_ep_set_msix,
500*4882a593Smuzhiyun 	.get_msix	= cdns_pcie_ep_get_msix,
501*4882a593Smuzhiyun 	.raise_irq	= cdns_pcie_ep_raise_irq,
502*4882a593Smuzhiyun 	.start		= cdns_pcie_ep_start,
503*4882a593Smuzhiyun 	.get_features	= cdns_pcie_ep_get_features,
504*4882a593Smuzhiyun };
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 
cdns_pcie_ep_setup(struct cdns_pcie_ep * ep)507*4882a593Smuzhiyun int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
508*4882a593Smuzhiyun {
509*4882a593Smuzhiyun 	struct device *dev = ep->pcie.dev;
510*4882a593Smuzhiyun 	struct platform_device *pdev = to_platform_device(dev);
511*4882a593Smuzhiyun 	struct device_node *np = dev->of_node;
512*4882a593Smuzhiyun 	struct cdns_pcie *pcie = &ep->pcie;
513*4882a593Smuzhiyun 	struct resource *res;
514*4882a593Smuzhiyun 	struct pci_epc *epc;
515*4882a593Smuzhiyun 	int ret;
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	pcie->is_rc = false;
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	pcie->reg_base = devm_platform_ioremap_resource_byname(pdev, "reg");
520*4882a593Smuzhiyun 	if (IS_ERR(pcie->reg_base)) {
521*4882a593Smuzhiyun 		dev_err(dev, "missing \"reg\"\n");
522*4882a593Smuzhiyun 		return PTR_ERR(pcie->reg_base);
523*4882a593Smuzhiyun 	}
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem");
526*4882a593Smuzhiyun 	if (!res) {
527*4882a593Smuzhiyun 		dev_err(dev, "missing \"mem\"\n");
528*4882a593Smuzhiyun 		return -EINVAL;
529*4882a593Smuzhiyun 	}
530*4882a593Smuzhiyun 	pcie->mem_res = res;
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	ret = of_property_read_u32(np, "cdns,max-outbound-regions",
533*4882a593Smuzhiyun 				   &ep->max_regions);
534*4882a593Smuzhiyun 	if (ret < 0) {
535*4882a593Smuzhiyun 		dev_err(dev, "missing \"cdns,max-outbound-regions\"\n");
536*4882a593Smuzhiyun 		return ret;
537*4882a593Smuzhiyun 	}
538*4882a593Smuzhiyun 	ep->ob_addr = devm_kcalloc(dev,
539*4882a593Smuzhiyun 				   ep->max_regions, sizeof(*ep->ob_addr),
540*4882a593Smuzhiyun 				   GFP_KERNEL);
541*4882a593Smuzhiyun 	if (!ep->ob_addr)
542*4882a593Smuzhiyun 		return -ENOMEM;
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	/* Disable all but function 0 (anyway BIT(0) is hardwired to 1). */
545*4882a593Smuzhiyun 	cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, BIT(0));
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	epc = devm_pci_epc_create(dev, &cdns_pcie_epc_ops);
548*4882a593Smuzhiyun 	if (IS_ERR(epc)) {
549*4882a593Smuzhiyun 		dev_err(dev, "failed to create epc device\n");
550*4882a593Smuzhiyun 		return PTR_ERR(epc);
551*4882a593Smuzhiyun 	}
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	epc_set_drvdata(epc, ep);
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	if (of_property_read_u8(np, "max-functions", &epc->max_functions) < 0)
556*4882a593Smuzhiyun 		epc->max_functions = 1;
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	ep->epf = devm_kcalloc(dev, epc->max_functions, sizeof(*ep->epf),
559*4882a593Smuzhiyun 			       GFP_KERNEL);
560*4882a593Smuzhiyun 	if (!ep->epf)
561*4882a593Smuzhiyun 		return -ENOMEM;
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 	ret = pci_epc_mem_init(epc, pcie->mem_res->start,
564*4882a593Smuzhiyun 			       resource_size(pcie->mem_res), PAGE_SIZE);
565*4882a593Smuzhiyun 	if (ret < 0) {
566*4882a593Smuzhiyun 		dev_err(dev, "failed to initialize the memory space\n");
567*4882a593Smuzhiyun 		return ret;
568*4882a593Smuzhiyun 	}
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr,
571*4882a593Smuzhiyun 						  SZ_128K);
572*4882a593Smuzhiyun 	if (!ep->irq_cpu_addr) {
573*4882a593Smuzhiyun 		dev_err(dev, "failed to reserve memory space for MSI\n");
574*4882a593Smuzhiyun 		ret = -ENOMEM;
575*4882a593Smuzhiyun 		goto free_epc_mem;
576*4882a593Smuzhiyun 	}
577*4882a593Smuzhiyun 	ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE;
578*4882a593Smuzhiyun 	/* Reserve region 0 for IRQs */
579*4882a593Smuzhiyun 	set_bit(0, &ep->ob_region_map);
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	if (ep->quirk_detect_quiet_flag)
582*4882a593Smuzhiyun 		cdns_pcie_detect_quiet_min_delay_set(&ep->pcie);
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	spin_lock_init(&ep->lock);
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun 	return 0;
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun  free_epc_mem:
589*4882a593Smuzhiyun 	pci_epc_mem_exit(epc);
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	return ret;
592*4882a593Smuzhiyun }
593