1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright 2017 NXP
3*4882a593Smuzhiyun * Copyright 2014-2015 Freescale Semiconductor, Inc.
4*4882a593Smuzhiyun * Layerscape PCIe driver
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * SPDX-License-Identifier: GPL-2.0+
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <common.h>
10*4882a593Smuzhiyun #include <asm/arch/fsl_serdes.h>
11*4882a593Smuzhiyun #include <pci.h>
12*4882a593Smuzhiyun #include <asm/io.h>
13*4882a593Smuzhiyun #include <errno.h>
14*4882a593Smuzhiyun #include <malloc.h>
15*4882a593Smuzhiyun #include <dm.h>
16*4882a593Smuzhiyun #if defined(CONFIG_FSL_LSCH2) || defined(CONFIG_FSL_LSCH3) || \
17*4882a593Smuzhiyun defined(CONFIG_ARM)
18*4882a593Smuzhiyun #include <asm/arch/clock.h>
19*4882a593Smuzhiyun #endif
20*4882a593Smuzhiyun #include "pcie_layerscape.h"
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun DECLARE_GLOBAL_DATA_PTR;
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun LIST_HEAD(ls_pcie_list);
25*4882a593Smuzhiyun
dbi_readl(struct ls_pcie * pcie,unsigned int offset)26*4882a593Smuzhiyun static unsigned int dbi_readl(struct ls_pcie *pcie, unsigned int offset)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun return in_le32(pcie->dbi + offset);
29*4882a593Smuzhiyun }
30*4882a593Smuzhiyun
dbi_writel(struct ls_pcie * pcie,unsigned int value,unsigned int offset)31*4882a593Smuzhiyun static void dbi_writel(struct ls_pcie *pcie, unsigned int value,
32*4882a593Smuzhiyun unsigned int offset)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun out_le32(pcie->dbi + offset, value);
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun
ctrl_readl(struct ls_pcie * pcie,unsigned int offset)37*4882a593Smuzhiyun static unsigned int ctrl_readl(struct ls_pcie *pcie, unsigned int offset)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun if (pcie->big_endian)
40*4882a593Smuzhiyun return in_be32(pcie->ctrl + offset);
41*4882a593Smuzhiyun else
42*4882a593Smuzhiyun return in_le32(pcie->ctrl + offset);
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun
ctrl_writel(struct ls_pcie * pcie,unsigned int value,unsigned int offset)45*4882a593Smuzhiyun static void ctrl_writel(struct ls_pcie *pcie, unsigned int value,
46*4882a593Smuzhiyun unsigned int offset)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun if (pcie->big_endian)
49*4882a593Smuzhiyun out_be32(pcie->ctrl + offset, value);
50*4882a593Smuzhiyun else
51*4882a593Smuzhiyun out_le32(pcie->ctrl + offset, value);
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun
ls_pcie_ltssm(struct ls_pcie * pcie)54*4882a593Smuzhiyun static int ls_pcie_ltssm(struct ls_pcie *pcie)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun u32 state;
57*4882a593Smuzhiyun uint svr;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun svr = get_svr();
60*4882a593Smuzhiyun if (((svr >> SVR_VAR_PER_SHIFT) & SVR_LS102XA_MASK) == SVR_LS102XA) {
61*4882a593Smuzhiyun state = ctrl_readl(pcie, LS1021_PEXMSCPORTSR(pcie->idx));
62*4882a593Smuzhiyun state = (state >> LS1021_LTSSM_STATE_SHIFT) & LTSSM_STATE_MASK;
63*4882a593Smuzhiyun } else {
64*4882a593Smuzhiyun state = ctrl_readl(pcie, PCIE_PF_DBG) & LTSSM_STATE_MASK;
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun return state;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
ls_pcie_link_up(struct ls_pcie * pcie)70*4882a593Smuzhiyun static int ls_pcie_link_up(struct ls_pcie *pcie)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun int ltssm;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun ltssm = ls_pcie_ltssm(pcie);
75*4882a593Smuzhiyun if (ltssm < LTSSM_PCIE_L0)
76*4882a593Smuzhiyun return 0;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun return 1;
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun
ls_pcie_cfg0_set_busdev(struct ls_pcie * pcie,u32 busdev)81*4882a593Smuzhiyun static void ls_pcie_cfg0_set_busdev(struct ls_pcie *pcie, u32 busdev)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun dbi_writel(pcie, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0,
84*4882a593Smuzhiyun PCIE_ATU_VIEWPORT);
85*4882a593Smuzhiyun dbi_writel(pcie, busdev, PCIE_ATU_LOWER_TARGET);
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
ls_pcie_cfg1_set_busdev(struct ls_pcie * pcie,u32 busdev)88*4882a593Smuzhiyun static void ls_pcie_cfg1_set_busdev(struct ls_pcie *pcie, u32 busdev)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun dbi_writel(pcie, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
91*4882a593Smuzhiyun PCIE_ATU_VIEWPORT);
92*4882a593Smuzhiyun dbi_writel(pcie, busdev, PCIE_ATU_LOWER_TARGET);
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
ls_pcie_atu_outbound_set(struct ls_pcie * pcie,int idx,int type,u64 phys,u64 bus_addr,pci_size_t size)95*4882a593Smuzhiyun static void ls_pcie_atu_outbound_set(struct ls_pcie *pcie, int idx, int type,
96*4882a593Smuzhiyun u64 phys, u64 bus_addr, pci_size_t size)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun dbi_writel(pcie, PCIE_ATU_REGION_OUTBOUND | idx, PCIE_ATU_VIEWPORT);
99*4882a593Smuzhiyun dbi_writel(pcie, (u32)phys, PCIE_ATU_LOWER_BASE);
100*4882a593Smuzhiyun dbi_writel(pcie, phys >> 32, PCIE_ATU_UPPER_BASE);
101*4882a593Smuzhiyun dbi_writel(pcie, (u32)phys + size - 1, PCIE_ATU_LIMIT);
102*4882a593Smuzhiyun dbi_writel(pcie, (u32)bus_addr, PCIE_ATU_LOWER_TARGET);
103*4882a593Smuzhiyun dbi_writel(pcie, bus_addr >> 32, PCIE_ATU_UPPER_TARGET);
104*4882a593Smuzhiyun dbi_writel(pcie, type, PCIE_ATU_CR1);
105*4882a593Smuzhiyun dbi_writel(pcie, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun /* Use bar match mode and MEM type as default */
ls_pcie_atu_inbound_set(struct ls_pcie * pcie,int idx,int bar,u64 phys)109*4882a593Smuzhiyun static void ls_pcie_atu_inbound_set(struct ls_pcie *pcie, int idx,
110*4882a593Smuzhiyun int bar, u64 phys)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun dbi_writel(pcie, PCIE_ATU_REGION_INBOUND | idx, PCIE_ATU_VIEWPORT);
113*4882a593Smuzhiyun dbi_writel(pcie, (u32)phys, PCIE_ATU_LOWER_TARGET);
114*4882a593Smuzhiyun dbi_writel(pcie, phys >> 32, PCIE_ATU_UPPER_TARGET);
115*4882a593Smuzhiyun dbi_writel(pcie, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1);
116*4882a593Smuzhiyun dbi_writel(pcie, PCIE_ATU_ENABLE | PCIE_ATU_BAR_MODE_ENABLE |
117*4882a593Smuzhiyun PCIE_ATU_BAR_NUM(bar), PCIE_ATU_CR2);
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
ls_pcie_dump_atu(struct ls_pcie * pcie)120*4882a593Smuzhiyun static void ls_pcie_dump_atu(struct ls_pcie *pcie)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun int i;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun for (i = 0; i < PCIE_ATU_REGION_NUM; i++) {
125*4882a593Smuzhiyun dbi_writel(pcie, PCIE_ATU_REGION_OUTBOUND | i,
126*4882a593Smuzhiyun PCIE_ATU_VIEWPORT);
127*4882a593Smuzhiyun debug("iATU%d:\n", i);
128*4882a593Smuzhiyun debug("\tLOWER PHYS 0x%08x\n",
129*4882a593Smuzhiyun dbi_readl(pcie, PCIE_ATU_LOWER_BASE));
130*4882a593Smuzhiyun debug("\tUPPER PHYS 0x%08x\n",
131*4882a593Smuzhiyun dbi_readl(pcie, PCIE_ATU_UPPER_BASE));
132*4882a593Smuzhiyun debug("\tLOWER BUS 0x%08x\n",
133*4882a593Smuzhiyun dbi_readl(pcie, PCIE_ATU_LOWER_TARGET));
134*4882a593Smuzhiyun debug("\tUPPER BUS 0x%08x\n",
135*4882a593Smuzhiyun dbi_readl(pcie, PCIE_ATU_UPPER_TARGET));
136*4882a593Smuzhiyun debug("\tLIMIT 0x%08x\n",
137*4882a593Smuzhiyun readl(pcie->dbi + PCIE_ATU_LIMIT));
138*4882a593Smuzhiyun debug("\tCR1 0x%08x\n",
139*4882a593Smuzhiyun dbi_readl(pcie, PCIE_ATU_CR1));
140*4882a593Smuzhiyun debug("\tCR2 0x%08x\n",
141*4882a593Smuzhiyun dbi_readl(pcie, PCIE_ATU_CR2));
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
ls_pcie_setup_atu(struct ls_pcie * pcie)145*4882a593Smuzhiyun static void ls_pcie_setup_atu(struct ls_pcie *pcie)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun struct pci_region *io, *mem, *pref;
148*4882a593Smuzhiyun unsigned long long offset = 0;
149*4882a593Smuzhiyun int idx = 0;
150*4882a593Smuzhiyun uint svr;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun svr = get_svr();
153*4882a593Smuzhiyun if (((svr >> SVR_VAR_PER_SHIFT) & SVR_LS102XA_MASK) == SVR_LS102XA) {
154*4882a593Smuzhiyun offset = LS1021_PCIE_SPACE_OFFSET +
155*4882a593Smuzhiyun LS1021_PCIE_SPACE_SIZE * pcie->idx;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun /* ATU 0 : OUTBOUND : CFG0 */
159*4882a593Smuzhiyun ls_pcie_atu_outbound_set(pcie, PCIE_ATU_REGION_INDEX0,
160*4882a593Smuzhiyun PCIE_ATU_TYPE_CFG0,
161*4882a593Smuzhiyun pcie->cfg_res.start + offset,
162*4882a593Smuzhiyun 0,
163*4882a593Smuzhiyun fdt_resource_size(&pcie->cfg_res) / 2);
164*4882a593Smuzhiyun /* ATU 1 : OUTBOUND : CFG1 */
165*4882a593Smuzhiyun ls_pcie_atu_outbound_set(pcie, PCIE_ATU_REGION_INDEX1,
166*4882a593Smuzhiyun PCIE_ATU_TYPE_CFG1,
167*4882a593Smuzhiyun pcie->cfg_res.start + offset +
168*4882a593Smuzhiyun fdt_resource_size(&pcie->cfg_res) / 2,
169*4882a593Smuzhiyun 0,
170*4882a593Smuzhiyun fdt_resource_size(&pcie->cfg_res) / 2);
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun pci_get_regions(pcie->bus, &io, &mem, &pref);
173*4882a593Smuzhiyun idx = PCIE_ATU_REGION_INDEX1 + 1;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /* Fix the pcie memory map for LS2088A series SoCs */
176*4882a593Smuzhiyun svr = (svr >> SVR_VAR_PER_SHIFT) & 0xFFFFFE;
177*4882a593Smuzhiyun if (svr == SVR_LS2088A || svr == SVR_LS2084A ||
178*4882a593Smuzhiyun svr == SVR_LS2048A || svr == SVR_LS2044A ||
179*4882a593Smuzhiyun svr == SVR_LS2081A || svr == SVR_LS2041A) {
180*4882a593Smuzhiyun if (io)
181*4882a593Smuzhiyun io->phys_start = (io->phys_start &
182*4882a593Smuzhiyun (PCIE_PHYS_SIZE - 1)) +
183*4882a593Smuzhiyun LS2088A_PCIE1_PHYS_ADDR +
184*4882a593Smuzhiyun LS2088A_PCIE_PHYS_SIZE * pcie->idx;
185*4882a593Smuzhiyun if (mem)
186*4882a593Smuzhiyun mem->phys_start = (mem->phys_start &
187*4882a593Smuzhiyun (PCIE_PHYS_SIZE - 1)) +
188*4882a593Smuzhiyun LS2088A_PCIE1_PHYS_ADDR +
189*4882a593Smuzhiyun LS2088A_PCIE_PHYS_SIZE * pcie->idx;
190*4882a593Smuzhiyun if (pref)
191*4882a593Smuzhiyun pref->phys_start = (pref->phys_start &
192*4882a593Smuzhiyun (PCIE_PHYS_SIZE - 1)) +
193*4882a593Smuzhiyun LS2088A_PCIE1_PHYS_ADDR +
194*4882a593Smuzhiyun LS2088A_PCIE_PHYS_SIZE * pcie->idx;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun if (io)
198*4882a593Smuzhiyun /* ATU : OUTBOUND : IO */
199*4882a593Smuzhiyun ls_pcie_atu_outbound_set(pcie, idx++,
200*4882a593Smuzhiyun PCIE_ATU_TYPE_IO,
201*4882a593Smuzhiyun io->phys_start + offset,
202*4882a593Smuzhiyun io->bus_start,
203*4882a593Smuzhiyun io->size);
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun if (mem)
206*4882a593Smuzhiyun /* ATU : OUTBOUND : MEM */
207*4882a593Smuzhiyun ls_pcie_atu_outbound_set(pcie, idx++,
208*4882a593Smuzhiyun PCIE_ATU_TYPE_MEM,
209*4882a593Smuzhiyun mem->phys_start + offset,
210*4882a593Smuzhiyun mem->bus_start,
211*4882a593Smuzhiyun mem->size);
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun if (pref)
214*4882a593Smuzhiyun /* ATU : OUTBOUND : pref */
215*4882a593Smuzhiyun ls_pcie_atu_outbound_set(pcie, idx++,
216*4882a593Smuzhiyun PCIE_ATU_TYPE_MEM,
217*4882a593Smuzhiyun pref->phys_start + offset,
218*4882a593Smuzhiyun pref->bus_start,
219*4882a593Smuzhiyun pref->size);
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun ls_pcie_dump_atu(pcie);
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun /* Return 0 if the address is valid, -errno if not valid */
ls_pcie_addr_valid(struct ls_pcie * pcie,pci_dev_t bdf)225*4882a593Smuzhiyun static int ls_pcie_addr_valid(struct ls_pcie *pcie, pci_dev_t bdf)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun struct udevice *bus = pcie->bus;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun if (!pcie->enabled)
230*4882a593Smuzhiyun return -ENXIO;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun if (PCI_BUS(bdf) < bus->seq)
233*4882a593Smuzhiyun return -EINVAL;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun if ((PCI_BUS(bdf) > bus->seq) && (!ls_pcie_link_up(pcie)))
236*4882a593Smuzhiyun return -EINVAL;
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun if (PCI_BUS(bdf) <= (bus->seq + 1) && (PCI_DEV(bdf) > 0))
239*4882a593Smuzhiyun return -EINVAL;
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun return 0;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun
ls_pcie_conf_address(struct ls_pcie * pcie,pci_dev_t bdf,int offset)244*4882a593Smuzhiyun void *ls_pcie_conf_address(struct ls_pcie *pcie, pci_dev_t bdf,
245*4882a593Smuzhiyun int offset)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun struct udevice *bus = pcie->bus;
248*4882a593Smuzhiyun u32 busdev;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun if (PCI_BUS(bdf) == bus->seq)
251*4882a593Smuzhiyun return pcie->dbi + offset;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun busdev = PCIE_ATU_BUS(PCI_BUS(bdf)) |
254*4882a593Smuzhiyun PCIE_ATU_DEV(PCI_DEV(bdf)) |
255*4882a593Smuzhiyun PCIE_ATU_FUNC(PCI_FUNC(bdf));
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun if (PCI_BUS(bdf) == bus->seq + 1) {
258*4882a593Smuzhiyun ls_pcie_cfg0_set_busdev(pcie, busdev);
259*4882a593Smuzhiyun return pcie->cfg0 + offset;
260*4882a593Smuzhiyun } else {
261*4882a593Smuzhiyun ls_pcie_cfg1_set_busdev(pcie, busdev);
262*4882a593Smuzhiyun return pcie->cfg1 + offset;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
ls_pcie_read_config(struct udevice * bus,pci_dev_t bdf,uint offset,ulong * valuep,enum pci_size_t size)266*4882a593Smuzhiyun static int ls_pcie_read_config(struct udevice *bus, pci_dev_t bdf,
267*4882a593Smuzhiyun uint offset, ulong *valuep,
268*4882a593Smuzhiyun enum pci_size_t size)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun struct ls_pcie *pcie = dev_get_priv(bus);
271*4882a593Smuzhiyun void *address;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun if (ls_pcie_addr_valid(pcie, bdf)) {
274*4882a593Smuzhiyun *valuep = pci_get_ff(size);
275*4882a593Smuzhiyun return 0;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun address = ls_pcie_conf_address(pcie, bdf, offset);
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun switch (size) {
281*4882a593Smuzhiyun case PCI_SIZE_8:
282*4882a593Smuzhiyun *valuep = readb(address);
283*4882a593Smuzhiyun return 0;
284*4882a593Smuzhiyun case PCI_SIZE_16:
285*4882a593Smuzhiyun *valuep = readw(address);
286*4882a593Smuzhiyun return 0;
287*4882a593Smuzhiyun case PCI_SIZE_32:
288*4882a593Smuzhiyun *valuep = readl(address);
289*4882a593Smuzhiyun return 0;
290*4882a593Smuzhiyun default:
291*4882a593Smuzhiyun return -EINVAL;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun
ls_pcie_write_config(struct udevice * bus,pci_dev_t bdf,uint offset,ulong value,enum pci_size_t size)295*4882a593Smuzhiyun static int ls_pcie_write_config(struct udevice *bus, pci_dev_t bdf,
296*4882a593Smuzhiyun uint offset, ulong value,
297*4882a593Smuzhiyun enum pci_size_t size)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun struct ls_pcie *pcie = dev_get_priv(bus);
300*4882a593Smuzhiyun void *address;
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun if (ls_pcie_addr_valid(pcie, bdf))
303*4882a593Smuzhiyun return 0;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun address = ls_pcie_conf_address(pcie, bdf, offset);
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun switch (size) {
308*4882a593Smuzhiyun case PCI_SIZE_8:
309*4882a593Smuzhiyun writeb(value, address);
310*4882a593Smuzhiyun return 0;
311*4882a593Smuzhiyun case PCI_SIZE_16:
312*4882a593Smuzhiyun writew(value, address);
313*4882a593Smuzhiyun return 0;
314*4882a593Smuzhiyun case PCI_SIZE_32:
315*4882a593Smuzhiyun writel(value, address);
316*4882a593Smuzhiyun return 0;
317*4882a593Smuzhiyun default:
318*4882a593Smuzhiyun return -EINVAL;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun /* Clear multi-function bit */
ls_pcie_clear_multifunction(struct ls_pcie * pcie)323*4882a593Smuzhiyun static void ls_pcie_clear_multifunction(struct ls_pcie *pcie)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun writeb(PCI_HEADER_TYPE_BRIDGE, pcie->dbi + PCI_HEADER_TYPE);
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun /* Fix class value */
ls_pcie_fix_class(struct ls_pcie * pcie)329*4882a593Smuzhiyun static void ls_pcie_fix_class(struct ls_pcie *pcie)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun writew(PCI_CLASS_BRIDGE_PCI, pcie->dbi + PCI_CLASS_DEVICE);
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun /* Drop MSG TLP except for Vendor MSG */
ls_pcie_drop_msg_tlp(struct ls_pcie * pcie)335*4882a593Smuzhiyun static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
336*4882a593Smuzhiyun {
337*4882a593Smuzhiyun u32 val;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun val = dbi_readl(pcie, PCIE_STRFMR1);
340*4882a593Smuzhiyun val &= 0xDFFFFFFF;
341*4882a593Smuzhiyun dbi_writel(pcie, val, PCIE_STRFMR1);
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun /* Disable all bars in RC mode */
ls_pcie_disable_bars(struct ls_pcie * pcie)345*4882a593Smuzhiyun static void ls_pcie_disable_bars(struct ls_pcie *pcie)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun u32 sriov;
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun sriov = in_le32(pcie->dbi + PCIE_SRIOV);
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun /*
352*4882a593Smuzhiyun * TODO: For PCIe controller with SRIOV, the method to disable bars
353*4882a593Smuzhiyun * is different and more complex, so will add later.
354*4882a593Smuzhiyun */
355*4882a593Smuzhiyun if (PCI_EXT_CAP_ID(sriov) == PCI_EXT_CAP_ID_SRIOV)
356*4882a593Smuzhiyun return;
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun dbi_writel(pcie, 0, PCIE_CS2_OFFSET + PCI_BASE_ADDRESS_0);
359*4882a593Smuzhiyun dbi_writel(pcie, 0, PCIE_CS2_OFFSET + PCI_BASE_ADDRESS_1);
360*4882a593Smuzhiyun dbi_writel(pcie, 0, PCIE_CS2_OFFSET + PCI_ROM_ADDRESS1);
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun
ls_pcie_setup_ctrl(struct ls_pcie * pcie)363*4882a593Smuzhiyun static void ls_pcie_setup_ctrl(struct ls_pcie *pcie)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun ls_pcie_setup_atu(pcie);
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun dbi_writel(pcie, 1, PCIE_DBI_RO_WR_EN);
368*4882a593Smuzhiyun ls_pcie_fix_class(pcie);
369*4882a593Smuzhiyun ls_pcie_clear_multifunction(pcie);
370*4882a593Smuzhiyun ls_pcie_drop_msg_tlp(pcie);
371*4882a593Smuzhiyun dbi_writel(pcie, 0, PCIE_DBI_RO_WR_EN);
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun ls_pcie_disable_bars(pcie);
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun
ls_pcie_ep_setup_atu(struct ls_pcie * pcie)376*4882a593Smuzhiyun static void ls_pcie_ep_setup_atu(struct ls_pcie *pcie)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun u64 phys = CONFIG_SYS_PCI_EP_MEMORY_BASE;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun /* ATU 0 : INBOUND : map BAR0 */
381*4882a593Smuzhiyun ls_pcie_atu_inbound_set(pcie, 0, 0, phys);
382*4882a593Smuzhiyun /* ATU 1 : INBOUND : map BAR1 */
383*4882a593Smuzhiyun phys += PCIE_BAR1_SIZE;
384*4882a593Smuzhiyun ls_pcie_atu_inbound_set(pcie, 1, 1, phys);
385*4882a593Smuzhiyun /* ATU 2 : INBOUND : map BAR2 */
386*4882a593Smuzhiyun phys += PCIE_BAR2_SIZE;
387*4882a593Smuzhiyun ls_pcie_atu_inbound_set(pcie, 2, 2, phys);
388*4882a593Smuzhiyun /* ATU 3 : INBOUND : map BAR4 */
389*4882a593Smuzhiyun phys = CONFIG_SYS_PCI_EP_MEMORY_BASE + PCIE_BAR4_SIZE;
390*4882a593Smuzhiyun ls_pcie_atu_inbound_set(pcie, 3, 4, phys);
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun /* ATU 0 : OUTBOUND : map MEM */
393*4882a593Smuzhiyun ls_pcie_atu_outbound_set(pcie, 0,
394*4882a593Smuzhiyun PCIE_ATU_TYPE_MEM,
395*4882a593Smuzhiyun pcie->cfg_res.start,
396*4882a593Smuzhiyun 0,
397*4882a593Smuzhiyun CONFIG_SYS_PCI_MEMORY_SIZE);
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun /* BAR0 and BAR1 are 32bit BAR2 and BAR4 are 64bit */
ls_pcie_ep_setup_bar(void * bar_base,int bar,u32 size)401*4882a593Smuzhiyun static void ls_pcie_ep_setup_bar(void *bar_base, int bar, u32 size)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun /* The least inbound window is 4KiB */
404*4882a593Smuzhiyun if (size < 4 * 1024)
405*4882a593Smuzhiyun return;
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun switch (bar) {
408*4882a593Smuzhiyun case 0:
409*4882a593Smuzhiyun writel(size - 1, bar_base + PCI_BASE_ADDRESS_0);
410*4882a593Smuzhiyun break;
411*4882a593Smuzhiyun case 1:
412*4882a593Smuzhiyun writel(size - 1, bar_base + PCI_BASE_ADDRESS_1);
413*4882a593Smuzhiyun break;
414*4882a593Smuzhiyun case 2:
415*4882a593Smuzhiyun writel(size - 1, bar_base + PCI_BASE_ADDRESS_2);
416*4882a593Smuzhiyun writel(0, bar_base + PCI_BASE_ADDRESS_3);
417*4882a593Smuzhiyun break;
418*4882a593Smuzhiyun case 4:
419*4882a593Smuzhiyun writel(size - 1, bar_base + PCI_BASE_ADDRESS_4);
420*4882a593Smuzhiyun writel(0, bar_base + PCI_BASE_ADDRESS_5);
421*4882a593Smuzhiyun break;
422*4882a593Smuzhiyun default:
423*4882a593Smuzhiyun break;
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun
ls_pcie_ep_setup_bars(void * bar_base)427*4882a593Smuzhiyun static void ls_pcie_ep_setup_bars(void *bar_base)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun /* BAR0 - 32bit - 4K configuration */
430*4882a593Smuzhiyun ls_pcie_ep_setup_bar(bar_base, 0, PCIE_BAR0_SIZE);
431*4882a593Smuzhiyun /* BAR1 - 32bit - 8K MSIX*/
432*4882a593Smuzhiyun ls_pcie_ep_setup_bar(bar_base, 1, PCIE_BAR1_SIZE);
433*4882a593Smuzhiyun /* BAR2 - 64bit - 4K MEM desciptor */
434*4882a593Smuzhiyun ls_pcie_ep_setup_bar(bar_base, 2, PCIE_BAR2_SIZE);
435*4882a593Smuzhiyun /* BAR4 - 64bit - 1M MEM*/
436*4882a593Smuzhiyun ls_pcie_ep_setup_bar(bar_base, 4, PCIE_BAR4_SIZE);
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun
ls_pcie_ep_enable_cfg(struct ls_pcie * pcie)439*4882a593Smuzhiyun static void ls_pcie_ep_enable_cfg(struct ls_pcie *pcie)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun ctrl_writel(pcie, PCIE_CONFIG_READY, PCIE_PF_CONFIG);
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun
ls_pcie_setup_ep(struct ls_pcie * pcie)444*4882a593Smuzhiyun static void ls_pcie_setup_ep(struct ls_pcie *pcie)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun u32 sriov;
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun sriov = readl(pcie->dbi + PCIE_SRIOV);
449*4882a593Smuzhiyun if (PCI_EXT_CAP_ID(sriov) == PCI_EXT_CAP_ID_SRIOV) {
450*4882a593Smuzhiyun int pf, vf;
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun for (pf = 0; pf < PCIE_PF_NUM; pf++) {
453*4882a593Smuzhiyun for (vf = 0; vf <= PCIE_VF_NUM; vf++) {
454*4882a593Smuzhiyun ctrl_writel(pcie, PCIE_LCTRL0_VAL(pf, vf),
455*4882a593Smuzhiyun PCIE_PF_VF_CTRL);
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun ls_pcie_ep_setup_bars(pcie->dbi);
458*4882a593Smuzhiyun ls_pcie_ep_setup_atu(pcie);
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun /* Disable CFG2 */
462*4882a593Smuzhiyun ctrl_writel(pcie, 0, PCIE_PF_VF_CTRL);
463*4882a593Smuzhiyun } else {
464*4882a593Smuzhiyun ls_pcie_ep_setup_bars(pcie->dbi + PCIE_NO_SRIOV_BAR_BASE);
465*4882a593Smuzhiyun ls_pcie_ep_setup_atu(pcie);
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun ls_pcie_ep_enable_cfg(pcie);
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun
ls_pcie_probe(struct udevice * dev)471*4882a593Smuzhiyun static int ls_pcie_probe(struct udevice *dev)
472*4882a593Smuzhiyun {
473*4882a593Smuzhiyun struct ls_pcie *pcie = dev_get_priv(dev);
474*4882a593Smuzhiyun const void *fdt = gd->fdt_blob;
475*4882a593Smuzhiyun int node = dev_of_offset(dev);
476*4882a593Smuzhiyun u8 header_type;
477*4882a593Smuzhiyun u16 link_sta;
478*4882a593Smuzhiyun bool ep_mode;
479*4882a593Smuzhiyun uint svr;
480*4882a593Smuzhiyun int ret;
481*4882a593Smuzhiyun fdt_size_t cfg_size;
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun pcie->bus = dev;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
486*4882a593Smuzhiyun "dbi", &pcie->dbi_res);
487*4882a593Smuzhiyun if (ret) {
488*4882a593Smuzhiyun printf("ls-pcie: resource \"dbi\" not found\n");
489*4882a593Smuzhiyun return ret;
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun pcie->idx = (pcie->dbi_res.start - PCIE_SYS_BASE_ADDR) / PCIE_CCSR_SIZE;
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun list_add(&pcie->list, &ls_pcie_list);
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun pcie->enabled = is_serdes_configured(PCIE_SRDS_PRTCL(pcie->idx));
497*4882a593Smuzhiyun if (!pcie->enabled) {
498*4882a593Smuzhiyun printf("PCIe%d: %s disabled\n", pcie->idx, dev->name);
499*4882a593Smuzhiyun return 0;
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun pcie->dbi = map_physmem(pcie->dbi_res.start,
503*4882a593Smuzhiyun fdt_resource_size(&pcie->dbi_res),
504*4882a593Smuzhiyun MAP_NOCACHE);
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
507*4882a593Smuzhiyun "lut", &pcie->lut_res);
508*4882a593Smuzhiyun if (!ret)
509*4882a593Smuzhiyun pcie->lut = map_physmem(pcie->lut_res.start,
510*4882a593Smuzhiyun fdt_resource_size(&pcie->lut_res),
511*4882a593Smuzhiyun MAP_NOCACHE);
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
514*4882a593Smuzhiyun "ctrl", &pcie->ctrl_res);
515*4882a593Smuzhiyun if (!ret)
516*4882a593Smuzhiyun pcie->ctrl = map_physmem(pcie->ctrl_res.start,
517*4882a593Smuzhiyun fdt_resource_size(&pcie->ctrl_res),
518*4882a593Smuzhiyun MAP_NOCACHE);
519*4882a593Smuzhiyun if (!pcie->ctrl)
520*4882a593Smuzhiyun pcie->ctrl = pcie->lut;
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun if (!pcie->ctrl) {
523*4882a593Smuzhiyun printf("%s: NOT find CTRL\n", dev->name);
524*4882a593Smuzhiyun return -1;
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
528*4882a593Smuzhiyun "config", &pcie->cfg_res);
529*4882a593Smuzhiyun if (ret) {
530*4882a593Smuzhiyun printf("%s: resource \"config\" not found\n", dev->name);
531*4882a593Smuzhiyun return ret;
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun /*
535*4882a593Smuzhiyun * Fix the pcie memory map address and PF control registers address
536*4882a593Smuzhiyun * for LS2088A series SoCs
537*4882a593Smuzhiyun */
538*4882a593Smuzhiyun svr = get_svr();
539*4882a593Smuzhiyun svr = (svr >> SVR_VAR_PER_SHIFT) & 0xFFFFFE;
540*4882a593Smuzhiyun if (svr == SVR_LS2088A || svr == SVR_LS2084A ||
541*4882a593Smuzhiyun svr == SVR_LS2048A || svr == SVR_LS2044A ||
542*4882a593Smuzhiyun svr == SVR_LS2081A || svr == SVR_LS2041A) {
543*4882a593Smuzhiyun cfg_size = fdt_resource_size(&pcie->cfg_res);
544*4882a593Smuzhiyun pcie->cfg_res.start = LS2088A_PCIE1_PHYS_ADDR +
545*4882a593Smuzhiyun LS2088A_PCIE_PHYS_SIZE * pcie->idx;
546*4882a593Smuzhiyun pcie->cfg_res.end = pcie->cfg_res.start + cfg_size;
547*4882a593Smuzhiyun pcie->ctrl = pcie->lut + 0x40000;
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun pcie->cfg0 = map_physmem(pcie->cfg_res.start,
551*4882a593Smuzhiyun fdt_resource_size(&pcie->cfg_res),
552*4882a593Smuzhiyun MAP_NOCACHE);
553*4882a593Smuzhiyun pcie->cfg1 = pcie->cfg0 + fdt_resource_size(&pcie->cfg_res) / 2;
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun pcie->big_endian = fdtdec_get_bool(fdt, node, "big-endian");
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun debug("%s dbi:%lx lut:%lx ctrl:0x%lx cfg0:0x%lx, big-endian:%d\n",
558*4882a593Smuzhiyun dev->name, (unsigned long)pcie->dbi, (unsigned long)pcie->lut,
559*4882a593Smuzhiyun (unsigned long)pcie->ctrl, (unsigned long)pcie->cfg0,
560*4882a593Smuzhiyun pcie->big_endian);
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun header_type = readb(pcie->dbi + PCI_HEADER_TYPE);
563*4882a593Smuzhiyun ep_mode = (header_type & 0x7f) == PCI_HEADER_TYPE_NORMAL;
564*4882a593Smuzhiyun printf("PCIe%u: %s %s", pcie->idx, dev->name,
565*4882a593Smuzhiyun ep_mode ? "Endpoint" : "Root Complex");
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun if (ep_mode)
568*4882a593Smuzhiyun ls_pcie_setup_ep(pcie);
569*4882a593Smuzhiyun else
570*4882a593Smuzhiyun ls_pcie_setup_ctrl(pcie);
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun if (!ls_pcie_link_up(pcie)) {
573*4882a593Smuzhiyun /* Let the user know there's no PCIe link */
574*4882a593Smuzhiyun printf(": no link\n");
575*4882a593Smuzhiyun return 0;
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun /* Print the negotiated PCIe link width */
579*4882a593Smuzhiyun link_sta = readw(pcie->dbi + PCIE_LINK_STA);
580*4882a593Smuzhiyun printf(": x%d gen%d\n", (link_sta & PCIE_LINK_WIDTH_MASK) >> 4,
581*4882a593Smuzhiyun link_sta & PCIE_LINK_SPEED_MASK);
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun return 0;
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun static const struct dm_pci_ops ls_pcie_ops = {
587*4882a593Smuzhiyun .read_config = ls_pcie_read_config,
588*4882a593Smuzhiyun .write_config = ls_pcie_write_config,
589*4882a593Smuzhiyun };
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun static const struct udevice_id ls_pcie_ids[] = {
592*4882a593Smuzhiyun { .compatible = "fsl,ls-pcie" },
593*4882a593Smuzhiyun { }
594*4882a593Smuzhiyun };
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun U_BOOT_DRIVER(pci_layerscape) = {
597*4882a593Smuzhiyun .name = "pci_layerscape",
598*4882a593Smuzhiyun .id = UCLASS_PCI,
599*4882a593Smuzhiyun .of_match = ls_pcie_ids,
600*4882a593Smuzhiyun .ops = &ls_pcie_ops,
601*4882a593Smuzhiyun .probe = ls_pcie_probe,
602*4882a593Smuzhiyun .priv_auto_alloc_size = sizeof(struct ls_pcie),
603*4882a593Smuzhiyun };
604