1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * MediaTek PCIe host controller driver.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2017 MediaTek Inc.
6*4882a593Smuzhiyun * Author: Ryder Lee <ryder.lee@mediatek.com>
7*4882a593Smuzhiyun * Honghui Zhang <honghui.zhang@mediatek.com>
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/clk.h>
11*4882a593Smuzhiyun #include <linux/delay.h>
12*4882a593Smuzhiyun #include <linux/iopoll.h>
13*4882a593Smuzhiyun #include <linux/irq.h>
14*4882a593Smuzhiyun #include <linux/irqchip/chained_irq.h>
15*4882a593Smuzhiyun #include <linux/irqdomain.h>
16*4882a593Smuzhiyun #include <linux/kernel.h>
17*4882a593Smuzhiyun #include <linux/msi.h>
18*4882a593Smuzhiyun #include <linux/module.h>
19*4882a593Smuzhiyun #include <linux/of_address.h>
20*4882a593Smuzhiyun #include <linux/of_pci.h>
21*4882a593Smuzhiyun #include <linux/of_platform.h>
22*4882a593Smuzhiyun #include <linux/pci.h>
23*4882a593Smuzhiyun #include <linux/phy/phy.h>
24*4882a593Smuzhiyun #include <linux/platform_device.h>
25*4882a593Smuzhiyun #include <linux/pm_runtime.h>
26*4882a593Smuzhiyun #include <linux/reset.h>
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #include "../pci.h"
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun /* PCIe shared registers */
31*4882a593Smuzhiyun #define PCIE_SYS_CFG 0x00
32*4882a593Smuzhiyun #define PCIE_INT_ENABLE 0x0c
33*4882a593Smuzhiyun #define PCIE_CFG_ADDR 0x20
34*4882a593Smuzhiyun #define PCIE_CFG_DATA 0x24
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun /* PCIe per port registers */
37*4882a593Smuzhiyun #define PCIE_BAR0_SETUP 0x10
38*4882a593Smuzhiyun #define PCIE_CLASS 0x34
39*4882a593Smuzhiyun #define PCIE_LINK_STATUS 0x50
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun #define PCIE_PORT_INT_EN(x) BIT(20 + (x))
42*4882a593Smuzhiyun #define PCIE_PORT_PERST(x) BIT(1 + (x))
43*4882a593Smuzhiyun #define PCIE_PORT_LINKUP BIT(0)
44*4882a593Smuzhiyun #define PCIE_BAR_MAP_MAX GENMASK(31, 16)
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun #define PCIE_BAR_ENABLE BIT(0)
47*4882a593Smuzhiyun #define PCIE_REVISION_ID BIT(0)
48*4882a593Smuzhiyun #define PCIE_CLASS_CODE (0x60400 << 8)
49*4882a593Smuzhiyun #define PCIE_CONF_REG(regn) (((regn) & GENMASK(7, 2)) | \
50*4882a593Smuzhiyun ((((regn) >> 8) & GENMASK(3, 0)) << 24))
51*4882a593Smuzhiyun #define PCIE_CONF_FUN(fun) (((fun) << 8) & GENMASK(10, 8))
52*4882a593Smuzhiyun #define PCIE_CONF_DEV(dev) (((dev) << 11) & GENMASK(15, 11))
53*4882a593Smuzhiyun #define PCIE_CONF_BUS(bus) (((bus) << 16) & GENMASK(23, 16))
54*4882a593Smuzhiyun #define PCIE_CONF_ADDR(regn, fun, dev, bus) \
55*4882a593Smuzhiyun (PCIE_CONF_REG(regn) | PCIE_CONF_FUN(fun) | \
56*4882a593Smuzhiyun PCIE_CONF_DEV(dev) | PCIE_CONF_BUS(bus))
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /* MediaTek specific configuration registers */
59*4882a593Smuzhiyun #define PCIE_FTS_NUM 0x70c
60*4882a593Smuzhiyun #define PCIE_FTS_NUM_MASK GENMASK(15, 8)
61*4882a593Smuzhiyun #define PCIE_FTS_NUM_L0(x) ((x) & 0xff << 8)
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun #define PCIE_FC_CREDIT 0x73c
64*4882a593Smuzhiyun #define PCIE_FC_CREDIT_MASK (GENMASK(31, 31) | GENMASK(28, 16))
65*4882a593Smuzhiyun #define PCIE_FC_CREDIT_VAL(x) ((x) << 16)
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun /* PCIe V2 share registers */
68*4882a593Smuzhiyun #define PCIE_SYS_CFG_V2 0x0
69*4882a593Smuzhiyun #define PCIE_CSR_LTSSM_EN(x) BIT(0 + (x) * 8)
70*4882a593Smuzhiyun #define PCIE_CSR_ASPM_L1_EN(x) BIT(1 + (x) * 8)
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /* PCIe V2 per-port registers */
73*4882a593Smuzhiyun #define PCIE_MSI_VECTOR 0x0c0
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun #define PCIE_CONF_VEND_ID 0x100
76*4882a593Smuzhiyun #define PCIE_CONF_DEVICE_ID 0x102
77*4882a593Smuzhiyun #define PCIE_CONF_CLASS_ID 0x106
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun #define PCIE_INT_MASK 0x420
80*4882a593Smuzhiyun #define INTX_MASK GENMASK(19, 16)
81*4882a593Smuzhiyun #define INTX_SHIFT 16
82*4882a593Smuzhiyun #define PCIE_INT_STATUS 0x424
83*4882a593Smuzhiyun #define MSI_STATUS BIT(23)
84*4882a593Smuzhiyun #define PCIE_IMSI_STATUS 0x42c
85*4882a593Smuzhiyun #define PCIE_IMSI_ADDR 0x430
86*4882a593Smuzhiyun #define MSI_MASK BIT(23)
87*4882a593Smuzhiyun #define MTK_MSI_IRQS_NUM 32
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun #define PCIE_AHB_TRANS_BASE0_L 0x438
90*4882a593Smuzhiyun #define PCIE_AHB_TRANS_BASE0_H 0x43c
91*4882a593Smuzhiyun #define AHB2PCIE_SIZE(x) ((x) & GENMASK(4, 0))
92*4882a593Smuzhiyun #define PCIE_AXI_WINDOW0 0x448
93*4882a593Smuzhiyun #define WIN_ENABLE BIT(7)
94*4882a593Smuzhiyun /*
95*4882a593Smuzhiyun * Define PCIe to AHB window size as 2^33 to support max 8GB address space
96*4882a593Smuzhiyun * translate, support least 4GB DRAM size access from EP DMA(physical DRAM
97*4882a593Smuzhiyun * start from 0x40000000).
98*4882a593Smuzhiyun */
99*4882a593Smuzhiyun #define PCIE2AHB_SIZE 0x21
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun /* PCIe V2 configuration transaction header */
102*4882a593Smuzhiyun #define PCIE_CFG_HEADER0 0x460
103*4882a593Smuzhiyun #define PCIE_CFG_HEADER1 0x464
104*4882a593Smuzhiyun #define PCIE_CFG_HEADER2 0x468
105*4882a593Smuzhiyun #define PCIE_CFG_WDATA 0x470
106*4882a593Smuzhiyun #define PCIE_APP_TLP_REQ 0x488
107*4882a593Smuzhiyun #define PCIE_CFG_RDATA 0x48c
108*4882a593Smuzhiyun #define APP_CFG_REQ BIT(0)
109*4882a593Smuzhiyun #define APP_CPL_STATUS GENMASK(7, 5)
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun #define CFG_WRRD_TYPE_0 4
112*4882a593Smuzhiyun #define CFG_WR_FMT 2
113*4882a593Smuzhiyun #define CFG_RD_FMT 0
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun #define CFG_DW0_LENGTH(length) ((length) & GENMASK(9, 0))
116*4882a593Smuzhiyun #define CFG_DW0_TYPE(type) (((type) << 24) & GENMASK(28, 24))
117*4882a593Smuzhiyun #define CFG_DW0_FMT(fmt) (((fmt) << 29) & GENMASK(31, 29))
118*4882a593Smuzhiyun #define CFG_DW2_REGN(regn) ((regn) & GENMASK(11, 2))
119*4882a593Smuzhiyun #define CFG_DW2_FUN(fun) (((fun) << 16) & GENMASK(18, 16))
120*4882a593Smuzhiyun #define CFG_DW2_DEV(dev) (((dev) << 19) & GENMASK(23, 19))
121*4882a593Smuzhiyun #define CFG_DW2_BUS(bus) (((bus) << 24) & GENMASK(31, 24))
122*4882a593Smuzhiyun #define CFG_HEADER_DW0(type, fmt) \
123*4882a593Smuzhiyun (CFG_DW0_LENGTH(1) | CFG_DW0_TYPE(type) | CFG_DW0_FMT(fmt))
124*4882a593Smuzhiyun #define CFG_HEADER_DW1(where, size) \
125*4882a593Smuzhiyun (GENMASK(((size) - 1), 0) << ((where) & 0x3))
126*4882a593Smuzhiyun #define CFG_HEADER_DW2(regn, fun, dev, bus) \
127*4882a593Smuzhiyun (CFG_DW2_REGN(regn) | CFG_DW2_FUN(fun) | \
128*4882a593Smuzhiyun CFG_DW2_DEV(dev) | CFG_DW2_BUS(bus))
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun #define PCIE_RST_CTRL 0x510
131*4882a593Smuzhiyun #define PCIE_PHY_RSTB BIT(0)
132*4882a593Smuzhiyun #define PCIE_PIPE_SRSTB BIT(1)
133*4882a593Smuzhiyun #define PCIE_MAC_SRSTB BIT(2)
134*4882a593Smuzhiyun #define PCIE_CRSTB BIT(3)
135*4882a593Smuzhiyun #define PCIE_PERSTB BIT(8)
136*4882a593Smuzhiyun #define PCIE_LINKDOWN_RST_EN GENMASK(15, 13)
137*4882a593Smuzhiyun #define PCIE_LINK_STATUS_V2 0x804
138*4882a593Smuzhiyun #define PCIE_PORT_LINKUP_V2 BIT(10)
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun struct mtk_pcie_port;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /**
143*4882a593Smuzhiyun * struct mtk_pcie_soc - differentiate between host generations
144*4882a593Smuzhiyun * @need_fix_class_id: whether this host's class ID needed to be fixed or not
145*4882a593Smuzhiyun * @need_fix_device_id: whether this host's device ID needed to be fixed or not
146*4882a593Smuzhiyun * @device_id: device ID which this host need to be fixed
147*4882a593Smuzhiyun * @ops: pointer to configuration access functions
148*4882a593Smuzhiyun * @startup: pointer to controller setting functions
149*4882a593Smuzhiyun * @setup_irq: pointer to initialize IRQ functions
150*4882a593Smuzhiyun */
151*4882a593Smuzhiyun struct mtk_pcie_soc {
152*4882a593Smuzhiyun bool need_fix_class_id;
153*4882a593Smuzhiyun bool need_fix_device_id;
154*4882a593Smuzhiyun unsigned int device_id;
155*4882a593Smuzhiyun struct pci_ops *ops;
156*4882a593Smuzhiyun int (*startup)(struct mtk_pcie_port *port);
157*4882a593Smuzhiyun int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node);
158*4882a593Smuzhiyun };
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun /**
161*4882a593Smuzhiyun * struct mtk_pcie_port - PCIe port information
162*4882a593Smuzhiyun * @base: IO mapped register base
163*4882a593Smuzhiyun * @list: port list
164*4882a593Smuzhiyun * @pcie: pointer to PCIe host info
165*4882a593Smuzhiyun * @reset: pointer to port reset control
166*4882a593Smuzhiyun * @sys_ck: pointer to transaction/data link layer clock
167*4882a593Smuzhiyun * @ahb_ck: pointer to AHB slave interface operating clock for CSR access
168*4882a593Smuzhiyun * and RC initiated MMIO access
169*4882a593Smuzhiyun * @axi_ck: pointer to application layer MMIO channel operating clock
170*4882a593Smuzhiyun * @aux_ck: pointer to pe2_mac_bridge and pe2_mac_core operating clock
171*4882a593Smuzhiyun * when pcie_mac_ck/pcie_pipe_ck is turned off
172*4882a593Smuzhiyun * @obff_ck: pointer to OBFF functional block operating clock
173*4882a593Smuzhiyun * @pipe_ck: pointer to LTSSM and PHY/MAC layer operating clock
174*4882a593Smuzhiyun * @phy: pointer to PHY control block
175*4882a593Smuzhiyun * @slot: port slot
176*4882a593Smuzhiyun * @irq: GIC irq
177*4882a593Smuzhiyun * @irq_domain: legacy INTx IRQ domain
178*4882a593Smuzhiyun * @inner_domain: inner IRQ domain
179*4882a593Smuzhiyun * @msi_domain: MSI IRQ domain
180*4882a593Smuzhiyun * @lock: protect the msi_irq_in_use bitmap
181*4882a593Smuzhiyun * @msi_irq_in_use: bit map for assigned MSI IRQ
182*4882a593Smuzhiyun */
183*4882a593Smuzhiyun struct mtk_pcie_port {
184*4882a593Smuzhiyun void __iomem *base;
185*4882a593Smuzhiyun struct list_head list;
186*4882a593Smuzhiyun struct mtk_pcie *pcie;
187*4882a593Smuzhiyun struct reset_control *reset;
188*4882a593Smuzhiyun struct clk *sys_ck;
189*4882a593Smuzhiyun struct clk *ahb_ck;
190*4882a593Smuzhiyun struct clk *axi_ck;
191*4882a593Smuzhiyun struct clk *aux_ck;
192*4882a593Smuzhiyun struct clk *obff_ck;
193*4882a593Smuzhiyun struct clk *pipe_ck;
194*4882a593Smuzhiyun struct phy *phy;
195*4882a593Smuzhiyun u32 slot;
196*4882a593Smuzhiyun int irq;
197*4882a593Smuzhiyun struct irq_domain *irq_domain;
198*4882a593Smuzhiyun struct irq_domain *inner_domain;
199*4882a593Smuzhiyun struct irq_domain *msi_domain;
200*4882a593Smuzhiyun struct mutex lock;
201*4882a593Smuzhiyun DECLARE_BITMAP(msi_irq_in_use, MTK_MSI_IRQS_NUM);
202*4882a593Smuzhiyun };
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun /**
205*4882a593Smuzhiyun * struct mtk_pcie - PCIe host information
206*4882a593Smuzhiyun * @dev: pointer to PCIe device
207*4882a593Smuzhiyun * @base: IO mapped register base
208*4882a593Smuzhiyun * @free_ck: free-run reference clock
209*4882a593Smuzhiyun * @mem: non-prefetchable memory resource
210*4882a593Smuzhiyun * @ports: pointer to PCIe port information
211*4882a593Smuzhiyun * @soc: pointer to SoC-dependent operations
212*4882a593Smuzhiyun */
213*4882a593Smuzhiyun struct mtk_pcie {
214*4882a593Smuzhiyun struct device *dev;
215*4882a593Smuzhiyun void __iomem *base;
216*4882a593Smuzhiyun struct clk *free_ck;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun struct list_head ports;
219*4882a593Smuzhiyun const struct mtk_pcie_soc *soc;
220*4882a593Smuzhiyun };
221*4882a593Smuzhiyun
mtk_pcie_subsys_powerdown(struct mtk_pcie * pcie)222*4882a593Smuzhiyun static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun struct device *dev = pcie->dev;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun clk_disable_unprepare(pcie->free_ck);
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun pm_runtime_put_sync(dev);
229*4882a593Smuzhiyun pm_runtime_disable(dev);
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
mtk_pcie_port_free(struct mtk_pcie_port * port)232*4882a593Smuzhiyun static void mtk_pcie_port_free(struct mtk_pcie_port *port)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun struct mtk_pcie *pcie = port->pcie;
235*4882a593Smuzhiyun struct device *dev = pcie->dev;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun devm_iounmap(dev, port->base);
238*4882a593Smuzhiyun list_del(&port->list);
239*4882a593Smuzhiyun devm_kfree(dev, port);
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun
mtk_pcie_put_resources(struct mtk_pcie * pcie)242*4882a593Smuzhiyun static void mtk_pcie_put_resources(struct mtk_pcie *pcie)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun struct mtk_pcie_port *port, *tmp;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
247*4882a593Smuzhiyun phy_power_off(port->phy);
248*4882a593Smuzhiyun phy_exit(port->phy);
249*4882a593Smuzhiyun clk_disable_unprepare(port->pipe_ck);
250*4882a593Smuzhiyun clk_disable_unprepare(port->obff_ck);
251*4882a593Smuzhiyun clk_disable_unprepare(port->axi_ck);
252*4882a593Smuzhiyun clk_disable_unprepare(port->aux_ck);
253*4882a593Smuzhiyun clk_disable_unprepare(port->ahb_ck);
254*4882a593Smuzhiyun clk_disable_unprepare(port->sys_ck);
255*4882a593Smuzhiyun mtk_pcie_port_free(port);
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun mtk_pcie_subsys_powerdown(pcie);
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
mtk_pcie_check_cfg_cpld(struct mtk_pcie_port * port)261*4882a593Smuzhiyun static int mtk_pcie_check_cfg_cpld(struct mtk_pcie_port *port)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun u32 val;
264*4882a593Smuzhiyun int err;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun err = readl_poll_timeout_atomic(port->base + PCIE_APP_TLP_REQ, val,
267*4882a593Smuzhiyun !(val & APP_CFG_REQ), 10,
268*4882a593Smuzhiyun 100 * USEC_PER_MSEC);
269*4882a593Smuzhiyun if (err)
270*4882a593Smuzhiyun return PCIBIOS_SET_FAILED;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun if (readl(port->base + PCIE_APP_TLP_REQ) & APP_CPL_STATUS)
273*4882a593Smuzhiyun return PCIBIOS_SET_FAILED;
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun return PCIBIOS_SUCCESSFUL;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
mtk_pcie_hw_rd_cfg(struct mtk_pcie_port * port,u32 bus,u32 devfn,int where,int size,u32 * val)278*4882a593Smuzhiyun static int mtk_pcie_hw_rd_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn,
279*4882a593Smuzhiyun int where, int size, u32 *val)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun u32 tmp;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun /* Write PCIe configuration transaction header for Cfgrd */
284*4882a593Smuzhiyun writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_RD_FMT),
285*4882a593Smuzhiyun port->base + PCIE_CFG_HEADER0);
286*4882a593Smuzhiyun writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1);
287*4882a593Smuzhiyun writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus),
288*4882a593Smuzhiyun port->base + PCIE_CFG_HEADER2);
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun /* Trigger h/w to transmit Cfgrd TLP */
291*4882a593Smuzhiyun tmp = readl(port->base + PCIE_APP_TLP_REQ);
292*4882a593Smuzhiyun tmp |= APP_CFG_REQ;
293*4882a593Smuzhiyun writel(tmp, port->base + PCIE_APP_TLP_REQ);
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun /* Check completion status */
296*4882a593Smuzhiyun if (mtk_pcie_check_cfg_cpld(port))
297*4882a593Smuzhiyun return PCIBIOS_SET_FAILED;
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun /* Read cpld payload of Cfgrd */
300*4882a593Smuzhiyun *val = readl(port->base + PCIE_CFG_RDATA);
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun if (size == 1)
303*4882a593Smuzhiyun *val = (*val >> (8 * (where & 3))) & 0xff;
304*4882a593Smuzhiyun else if (size == 2)
305*4882a593Smuzhiyun *val = (*val >> (8 * (where & 3))) & 0xffff;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun return PCIBIOS_SUCCESSFUL;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun
mtk_pcie_hw_wr_cfg(struct mtk_pcie_port * port,u32 bus,u32 devfn,int where,int size,u32 val)310*4882a593Smuzhiyun static int mtk_pcie_hw_wr_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn,
311*4882a593Smuzhiyun int where, int size, u32 val)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun /* Write PCIe configuration transaction header for Cfgwr */
314*4882a593Smuzhiyun writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_WR_FMT),
315*4882a593Smuzhiyun port->base + PCIE_CFG_HEADER0);
316*4882a593Smuzhiyun writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1);
317*4882a593Smuzhiyun writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus),
318*4882a593Smuzhiyun port->base + PCIE_CFG_HEADER2);
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun /* Write Cfgwr data */
321*4882a593Smuzhiyun val = val << 8 * (where & 3);
322*4882a593Smuzhiyun writel(val, port->base + PCIE_CFG_WDATA);
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun /* Trigger h/w to transmit Cfgwr TLP */
325*4882a593Smuzhiyun val = readl(port->base + PCIE_APP_TLP_REQ);
326*4882a593Smuzhiyun val |= APP_CFG_REQ;
327*4882a593Smuzhiyun writel(val, port->base + PCIE_APP_TLP_REQ);
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun /* Check completion status */
330*4882a593Smuzhiyun return mtk_pcie_check_cfg_cpld(port);
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun
mtk_pcie_find_port(struct pci_bus * bus,unsigned int devfn)333*4882a593Smuzhiyun static struct mtk_pcie_port *mtk_pcie_find_port(struct pci_bus *bus,
334*4882a593Smuzhiyun unsigned int devfn)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun struct mtk_pcie *pcie = bus->sysdata;
337*4882a593Smuzhiyun struct mtk_pcie_port *port;
338*4882a593Smuzhiyun struct pci_dev *dev = NULL;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun /*
341*4882a593Smuzhiyun * Walk the bus hierarchy to get the devfn value
342*4882a593Smuzhiyun * of the port in the root bus.
343*4882a593Smuzhiyun */
344*4882a593Smuzhiyun while (bus && bus->number) {
345*4882a593Smuzhiyun dev = bus->self;
346*4882a593Smuzhiyun bus = dev->bus;
347*4882a593Smuzhiyun devfn = dev->devfn;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun list_for_each_entry(port, &pcie->ports, list)
351*4882a593Smuzhiyun if (port->slot == PCI_SLOT(devfn))
352*4882a593Smuzhiyun return port;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun return NULL;
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
mtk_pcie_config_read(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)357*4882a593Smuzhiyun static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
358*4882a593Smuzhiyun int where, int size, u32 *val)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun struct mtk_pcie_port *port;
361*4882a593Smuzhiyun u32 bn = bus->number;
362*4882a593Smuzhiyun int ret;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun port = mtk_pcie_find_port(bus, devfn);
365*4882a593Smuzhiyun if (!port) {
366*4882a593Smuzhiyun *val = ~0;
367*4882a593Smuzhiyun return PCIBIOS_DEVICE_NOT_FOUND;
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun ret = mtk_pcie_hw_rd_cfg(port, bn, devfn, where, size, val);
371*4882a593Smuzhiyun if (ret)
372*4882a593Smuzhiyun *val = ~0;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun return ret;
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun
mtk_pcie_config_write(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)377*4882a593Smuzhiyun static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
378*4882a593Smuzhiyun int where, int size, u32 val)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun struct mtk_pcie_port *port;
381*4882a593Smuzhiyun u32 bn = bus->number;
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun port = mtk_pcie_find_port(bus, devfn);
384*4882a593Smuzhiyun if (!port)
385*4882a593Smuzhiyun return PCIBIOS_DEVICE_NOT_FOUND;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun return mtk_pcie_hw_wr_cfg(port, bn, devfn, where, size, val);
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun static struct pci_ops mtk_pcie_ops_v2 = {
391*4882a593Smuzhiyun .read = mtk_pcie_config_read,
392*4882a593Smuzhiyun .write = mtk_pcie_config_write,
393*4882a593Smuzhiyun };
394*4882a593Smuzhiyun
mtk_compose_msi_msg(struct irq_data * data,struct msi_msg * msg)395*4882a593Smuzhiyun static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
398*4882a593Smuzhiyun phys_addr_t addr;
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun /* MT2712/MT7622 only support 32-bit MSI addresses */
401*4882a593Smuzhiyun addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
402*4882a593Smuzhiyun msg->address_hi = 0;
403*4882a593Smuzhiyun msg->address_lo = lower_32_bits(addr);
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun msg->data = data->hwirq;
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun dev_dbg(port->pcie->dev, "msi#%d address_hi %#x address_lo %#x\n",
408*4882a593Smuzhiyun (int)data->hwirq, msg->address_hi, msg->address_lo);
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun
mtk_msi_set_affinity(struct irq_data * irq_data,const struct cpumask * mask,bool force)411*4882a593Smuzhiyun static int mtk_msi_set_affinity(struct irq_data *irq_data,
412*4882a593Smuzhiyun const struct cpumask *mask, bool force)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun return -EINVAL;
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun
mtk_msi_ack_irq(struct irq_data * data)417*4882a593Smuzhiyun static void mtk_msi_ack_irq(struct irq_data *data)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
420*4882a593Smuzhiyun u32 hwirq = data->hwirq;
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun writel(1 << hwirq, port->base + PCIE_IMSI_STATUS);
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun static struct irq_chip mtk_msi_bottom_irq_chip = {
426*4882a593Smuzhiyun .name = "MTK MSI",
427*4882a593Smuzhiyun .irq_compose_msi_msg = mtk_compose_msi_msg,
428*4882a593Smuzhiyun .irq_set_affinity = mtk_msi_set_affinity,
429*4882a593Smuzhiyun .irq_ack = mtk_msi_ack_irq,
430*4882a593Smuzhiyun };
431*4882a593Smuzhiyun
mtk_pcie_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)432*4882a593Smuzhiyun static int mtk_pcie_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
433*4882a593Smuzhiyun unsigned int nr_irqs, void *args)
434*4882a593Smuzhiyun {
435*4882a593Smuzhiyun struct mtk_pcie_port *port = domain->host_data;
436*4882a593Smuzhiyun unsigned long bit;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun WARN_ON(nr_irqs != 1);
439*4882a593Smuzhiyun mutex_lock(&port->lock);
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun bit = find_first_zero_bit(port->msi_irq_in_use, MTK_MSI_IRQS_NUM);
442*4882a593Smuzhiyun if (bit >= MTK_MSI_IRQS_NUM) {
443*4882a593Smuzhiyun mutex_unlock(&port->lock);
444*4882a593Smuzhiyun return -ENOSPC;
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun __set_bit(bit, port->msi_irq_in_use);
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun mutex_unlock(&port->lock);
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun irq_domain_set_info(domain, virq, bit, &mtk_msi_bottom_irq_chip,
452*4882a593Smuzhiyun domain->host_data, handle_edge_irq,
453*4882a593Smuzhiyun NULL, NULL);
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun return 0;
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun
mtk_pcie_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)458*4882a593Smuzhiyun static void mtk_pcie_irq_domain_free(struct irq_domain *domain,
459*4882a593Smuzhiyun unsigned int virq, unsigned int nr_irqs)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun struct irq_data *d = irq_domain_get_irq_data(domain, virq);
462*4882a593Smuzhiyun struct mtk_pcie_port *port = irq_data_get_irq_chip_data(d);
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun mutex_lock(&port->lock);
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun if (!test_bit(d->hwirq, port->msi_irq_in_use))
467*4882a593Smuzhiyun dev_err(port->pcie->dev, "trying to free unused MSI#%lu\n",
468*4882a593Smuzhiyun d->hwirq);
469*4882a593Smuzhiyun else
470*4882a593Smuzhiyun __clear_bit(d->hwirq, port->msi_irq_in_use);
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun mutex_unlock(&port->lock);
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun irq_domain_free_irqs_parent(domain, virq, nr_irqs);
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun static const struct irq_domain_ops msi_domain_ops = {
478*4882a593Smuzhiyun .alloc = mtk_pcie_irq_domain_alloc,
479*4882a593Smuzhiyun .free = mtk_pcie_irq_domain_free,
480*4882a593Smuzhiyun };
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun static struct irq_chip mtk_msi_irq_chip = {
483*4882a593Smuzhiyun .name = "MTK PCIe MSI",
484*4882a593Smuzhiyun .irq_ack = irq_chip_ack_parent,
485*4882a593Smuzhiyun .irq_mask = pci_msi_mask_irq,
486*4882a593Smuzhiyun .irq_unmask = pci_msi_unmask_irq,
487*4882a593Smuzhiyun };
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun static struct msi_domain_info mtk_msi_domain_info = {
490*4882a593Smuzhiyun .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
491*4882a593Smuzhiyun MSI_FLAG_PCI_MSIX),
492*4882a593Smuzhiyun .chip = &mtk_msi_irq_chip,
493*4882a593Smuzhiyun };
494*4882a593Smuzhiyun
mtk_pcie_allocate_msi_domains(struct mtk_pcie_port * port)495*4882a593Smuzhiyun static int mtk_pcie_allocate_msi_domains(struct mtk_pcie_port *port)
496*4882a593Smuzhiyun {
497*4882a593Smuzhiyun struct fwnode_handle *fwnode = of_node_to_fwnode(port->pcie->dev->of_node);
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun mutex_init(&port->lock);
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun port->inner_domain = irq_domain_create_linear(fwnode, MTK_MSI_IRQS_NUM,
502*4882a593Smuzhiyun &msi_domain_ops, port);
503*4882a593Smuzhiyun if (!port->inner_domain) {
504*4882a593Smuzhiyun dev_err(port->pcie->dev, "failed to create IRQ domain\n");
505*4882a593Smuzhiyun return -ENOMEM;
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun port->msi_domain = pci_msi_create_irq_domain(fwnode, &mtk_msi_domain_info,
509*4882a593Smuzhiyun port->inner_domain);
510*4882a593Smuzhiyun if (!port->msi_domain) {
511*4882a593Smuzhiyun dev_err(port->pcie->dev, "failed to create MSI domain\n");
512*4882a593Smuzhiyun irq_domain_remove(port->inner_domain);
513*4882a593Smuzhiyun return -ENOMEM;
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun return 0;
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun
mtk_pcie_enable_msi(struct mtk_pcie_port * port)519*4882a593Smuzhiyun static void mtk_pcie_enable_msi(struct mtk_pcie_port *port)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun u32 val;
522*4882a593Smuzhiyun phys_addr_t msg_addr;
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun msg_addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
525*4882a593Smuzhiyun val = lower_32_bits(msg_addr);
526*4882a593Smuzhiyun writel(val, port->base + PCIE_IMSI_ADDR);
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun val = readl(port->base + PCIE_INT_MASK);
529*4882a593Smuzhiyun val &= ~MSI_MASK;
530*4882a593Smuzhiyun writel(val, port->base + PCIE_INT_MASK);
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun
mtk_pcie_irq_teardown(struct mtk_pcie * pcie)533*4882a593Smuzhiyun static void mtk_pcie_irq_teardown(struct mtk_pcie *pcie)
534*4882a593Smuzhiyun {
535*4882a593Smuzhiyun struct mtk_pcie_port *port, *tmp;
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
538*4882a593Smuzhiyun irq_set_chained_handler_and_data(port->irq, NULL, NULL);
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun if (port->irq_domain)
541*4882a593Smuzhiyun irq_domain_remove(port->irq_domain);
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_PCI_MSI)) {
544*4882a593Smuzhiyun if (port->msi_domain)
545*4882a593Smuzhiyun irq_domain_remove(port->msi_domain);
546*4882a593Smuzhiyun if (port->inner_domain)
547*4882a593Smuzhiyun irq_domain_remove(port->inner_domain);
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun irq_dispose_mapping(port->irq);
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun
mtk_pcie_intx_map(struct irq_domain * domain,unsigned int irq,irq_hw_number_t hwirq)554*4882a593Smuzhiyun static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
555*4882a593Smuzhiyun irq_hw_number_t hwirq)
556*4882a593Smuzhiyun {
557*4882a593Smuzhiyun irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
558*4882a593Smuzhiyun irq_set_chip_data(irq, domain->host_data);
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun return 0;
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun static const struct irq_domain_ops intx_domain_ops = {
564*4882a593Smuzhiyun .map = mtk_pcie_intx_map,
565*4882a593Smuzhiyun };
566*4882a593Smuzhiyun
mtk_pcie_init_irq_domain(struct mtk_pcie_port * port,struct device_node * node)567*4882a593Smuzhiyun static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port,
568*4882a593Smuzhiyun struct device_node *node)
569*4882a593Smuzhiyun {
570*4882a593Smuzhiyun struct device *dev = port->pcie->dev;
571*4882a593Smuzhiyun struct device_node *pcie_intc_node;
572*4882a593Smuzhiyun int ret;
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun /* Setup INTx */
575*4882a593Smuzhiyun pcie_intc_node = of_get_next_child(node, NULL);
576*4882a593Smuzhiyun if (!pcie_intc_node) {
577*4882a593Smuzhiyun dev_err(dev, "no PCIe Intc node found\n");
578*4882a593Smuzhiyun return -ENODEV;
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun port->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
582*4882a593Smuzhiyun &intx_domain_ops, port);
583*4882a593Smuzhiyun of_node_put(pcie_intc_node);
584*4882a593Smuzhiyun if (!port->irq_domain) {
585*4882a593Smuzhiyun dev_err(dev, "failed to get INTx IRQ domain\n");
586*4882a593Smuzhiyun return -ENODEV;
587*4882a593Smuzhiyun }
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_PCI_MSI)) {
590*4882a593Smuzhiyun ret = mtk_pcie_allocate_msi_domains(port);
591*4882a593Smuzhiyun if (ret)
592*4882a593Smuzhiyun return ret;
593*4882a593Smuzhiyun }
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun return 0;
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun
mtk_pcie_intr_handler(struct irq_desc * desc)598*4882a593Smuzhiyun static void mtk_pcie_intr_handler(struct irq_desc *desc)
599*4882a593Smuzhiyun {
600*4882a593Smuzhiyun struct mtk_pcie_port *port = irq_desc_get_handler_data(desc);
601*4882a593Smuzhiyun struct irq_chip *irqchip = irq_desc_get_chip(desc);
602*4882a593Smuzhiyun unsigned long status;
603*4882a593Smuzhiyun u32 virq;
604*4882a593Smuzhiyun u32 bit = INTX_SHIFT;
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun chained_irq_enter(irqchip, desc);
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun status = readl(port->base + PCIE_INT_STATUS);
609*4882a593Smuzhiyun if (status & INTX_MASK) {
610*4882a593Smuzhiyun for_each_set_bit_from(bit, &status, PCI_NUM_INTX + INTX_SHIFT) {
611*4882a593Smuzhiyun /* Clear the INTx */
612*4882a593Smuzhiyun writel(1 << bit, port->base + PCIE_INT_STATUS);
613*4882a593Smuzhiyun virq = irq_find_mapping(port->irq_domain,
614*4882a593Smuzhiyun bit - INTX_SHIFT);
615*4882a593Smuzhiyun generic_handle_irq(virq);
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_PCI_MSI)) {
620*4882a593Smuzhiyun if (status & MSI_STATUS){
621*4882a593Smuzhiyun unsigned long imsi_status;
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) {
624*4882a593Smuzhiyun for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM) {
625*4882a593Smuzhiyun virq = irq_find_mapping(port->inner_domain, bit);
626*4882a593Smuzhiyun generic_handle_irq(virq);
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun /* Clear MSI interrupt status */
630*4882a593Smuzhiyun writel(MSI_STATUS, port->base + PCIE_INT_STATUS);
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun chained_irq_exit(irqchip, desc);
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun
mtk_pcie_setup_irq(struct mtk_pcie_port * port,struct device_node * node)637*4882a593Smuzhiyun static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
638*4882a593Smuzhiyun struct device_node *node)
639*4882a593Smuzhiyun {
640*4882a593Smuzhiyun struct mtk_pcie *pcie = port->pcie;
641*4882a593Smuzhiyun struct device *dev = pcie->dev;
642*4882a593Smuzhiyun struct platform_device *pdev = to_platform_device(dev);
643*4882a593Smuzhiyun int err;
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun err = mtk_pcie_init_irq_domain(port, node);
646*4882a593Smuzhiyun if (err) {
647*4882a593Smuzhiyun dev_err(dev, "failed to init PCIe IRQ domain\n");
648*4882a593Smuzhiyun return err;
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun port->irq = platform_get_irq(pdev, port->slot);
652*4882a593Smuzhiyun if (port->irq < 0)
653*4882a593Smuzhiyun return port->irq;
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun irq_set_chained_handler_and_data(port->irq,
656*4882a593Smuzhiyun mtk_pcie_intr_handler, port);
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun return 0;
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun
mtk_pcie_startup_port_v2(struct mtk_pcie_port * port)661*4882a593Smuzhiyun static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
662*4882a593Smuzhiyun {
663*4882a593Smuzhiyun struct mtk_pcie *pcie = port->pcie;
664*4882a593Smuzhiyun struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
665*4882a593Smuzhiyun struct resource *mem = NULL;
666*4882a593Smuzhiyun struct resource_entry *entry;
667*4882a593Smuzhiyun const struct mtk_pcie_soc *soc = port->pcie->soc;
668*4882a593Smuzhiyun u32 val;
669*4882a593Smuzhiyun int err;
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun entry = resource_list_first_type(&host->windows, IORESOURCE_MEM);
672*4882a593Smuzhiyun if (entry)
673*4882a593Smuzhiyun mem = entry->res;
674*4882a593Smuzhiyun if (!mem)
675*4882a593Smuzhiyun return -EINVAL;
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */
678*4882a593Smuzhiyun if (pcie->base) {
679*4882a593Smuzhiyun val = readl(pcie->base + PCIE_SYS_CFG_V2);
680*4882a593Smuzhiyun val |= PCIE_CSR_LTSSM_EN(port->slot) |
681*4882a593Smuzhiyun PCIE_CSR_ASPM_L1_EN(port->slot);
682*4882a593Smuzhiyun writel(val, pcie->base + PCIE_SYS_CFG_V2);
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun /* Assert all reset signals */
686*4882a593Smuzhiyun writel(0, port->base + PCIE_RST_CTRL);
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun /*
689*4882a593Smuzhiyun * Enable PCIe link down reset, if link status changed from link up to
690*4882a593Smuzhiyun * link down, this will reset MAC control registers and configuration
691*4882a593Smuzhiyun * space.
692*4882a593Smuzhiyun */
693*4882a593Smuzhiyun writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL);
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun /* De-assert PHY, PE, PIPE, MAC and configuration reset */
696*4882a593Smuzhiyun val = readl(port->base + PCIE_RST_CTRL);
697*4882a593Smuzhiyun val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB |
698*4882a593Smuzhiyun PCIE_MAC_SRSTB | PCIE_CRSTB;
699*4882a593Smuzhiyun writel(val, port->base + PCIE_RST_CTRL);
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun /* Set up vendor ID and class code */
702*4882a593Smuzhiyun if (soc->need_fix_class_id) {
703*4882a593Smuzhiyun val = PCI_VENDOR_ID_MEDIATEK;
704*4882a593Smuzhiyun writew(val, port->base + PCIE_CONF_VEND_ID);
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun val = PCI_CLASS_BRIDGE_PCI;
707*4882a593Smuzhiyun writew(val, port->base + PCIE_CONF_CLASS_ID);
708*4882a593Smuzhiyun }
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun if (soc->need_fix_device_id)
711*4882a593Smuzhiyun writew(soc->device_id, port->base + PCIE_CONF_DEVICE_ID);
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun /* 100ms timeout value should be enough for Gen1/2 training */
714*4882a593Smuzhiyun err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val,
715*4882a593Smuzhiyun !!(val & PCIE_PORT_LINKUP_V2), 20,
716*4882a593Smuzhiyun 100 * USEC_PER_MSEC);
717*4882a593Smuzhiyun if (err)
718*4882a593Smuzhiyun return -ETIMEDOUT;
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun /* Set INTx mask */
721*4882a593Smuzhiyun val = readl(port->base + PCIE_INT_MASK);
722*4882a593Smuzhiyun val &= ~INTX_MASK;
723*4882a593Smuzhiyun writel(val, port->base + PCIE_INT_MASK);
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_PCI_MSI))
726*4882a593Smuzhiyun mtk_pcie_enable_msi(port);
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun /* Set AHB to PCIe translation windows */
729*4882a593Smuzhiyun val = lower_32_bits(mem->start) |
730*4882a593Smuzhiyun AHB2PCIE_SIZE(fls(resource_size(mem)));
731*4882a593Smuzhiyun writel(val, port->base + PCIE_AHB_TRANS_BASE0_L);
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun val = upper_32_bits(mem->start);
734*4882a593Smuzhiyun writel(val, port->base + PCIE_AHB_TRANS_BASE0_H);
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun /* Set PCIe to AXI translation memory space.*/
737*4882a593Smuzhiyun val = PCIE2AHB_SIZE | WIN_ENABLE;
738*4882a593Smuzhiyun writel(val, port->base + PCIE_AXI_WINDOW0);
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun return 0;
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun
mtk_pcie_map_bus(struct pci_bus * bus,unsigned int devfn,int where)743*4882a593Smuzhiyun static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus,
744*4882a593Smuzhiyun unsigned int devfn, int where)
745*4882a593Smuzhiyun {
746*4882a593Smuzhiyun struct mtk_pcie *pcie = bus->sysdata;
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun writel(PCIE_CONF_ADDR(where, PCI_FUNC(devfn), PCI_SLOT(devfn),
749*4882a593Smuzhiyun bus->number), pcie->base + PCIE_CFG_ADDR);
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun return pcie->base + PCIE_CFG_DATA + (where & 3);
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun static struct pci_ops mtk_pcie_ops = {
755*4882a593Smuzhiyun .map_bus = mtk_pcie_map_bus,
756*4882a593Smuzhiyun .read = pci_generic_config_read,
757*4882a593Smuzhiyun .write = pci_generic_config_write,
758*4882a593Smuzhiyun };
759*4882a593Smuzhiyun
mtk_pcie_startup_port(struct mtk_pcie_port * port)760*4882a593Smuzhiyun static int mtk_pcie_startup_port(struct mtk_pcie_port *port)
761*4882a593Smuzhiyun {
762*4882a593Smuzhiyun struct mtk_pcie *pcie = port->pcie;
763*4882a593Smuzhiyun u32 func = PCI_FUNC(port->slot << 3);
764*4882a593Smuzhiyun u32 slot = PCI_SLOT(port->slot << 3);
765*4882a593Smuzhiyun u32 val;
766*4882a593Smuzhiyun int err;
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun /* assert port PERST_N */
769*4882a593Smuzhiyun val = readl(pcie->base + PCIE_SYS_CFG);
770*4882a593Smuzhiyun val |= PCIE_PORT_PERST(port->slot);
771*4882a593Smuzhiyun writel(val, pcie->base + PCIE_SYS_CFG);
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun /* de-assert port PERST_N */
774*4882a593Smuzhiyun val = readl(pcie->base + PCIE_SYS_CFG);
775*4882a593Smuzhiyun val &= ~PCIE_PORT_PERST(port->slot);
776*4882a593Smuzhiyun writel(val, pcie->base + PCIE_SYS_CFG);
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun /* 100ms timeout value should be enough for Gen1/2 training */
779*4882a593Smuzhiyun err = readl_poll_timeout(port->base + PCIE_LINK_STATUS, val,
780*4882a593Smuzhiyun !!(val & PCIE_PORT_LINKUP), 20,
781*4882a593Smuzhiyun 100 * USEC_PER_MSEC);
782*4882a593Smuzhiyun if (err)
783*4882a593Smuzhiyun return -ETIMEDOUT;
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun /* enable interrupt */
786*4882a593Smuzhiyun val = readl(pcie->base + PCIE_INT_ENABLE);
787*4882a593Smuzhiyun val |= PCIE_PORT_INT_EN(port->slot);
788*4882a593Smuzhiyun writel(val, pcie->base + PCIE_INT_ENABLE);
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun /* map to all DDR region. We need to set it before cfg operation. */
791*4882a593Smuzhiyun writel(PCIE_BAR_MAP_MAX | PCIE_BAR_ENABLE,
792*4882a593Smuzhiyun port->base + PCIE_BAR0_SETUP);
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun /* configure class code and revision ID */
795*4882a593Smuzhiyun writel(PCIE_CLASS_CODE | PCIE_REVISION_ID, port->base + PCIE_CLASS);
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun /* configure FC credit */
798*4882a593Smuzhiyun writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0),
799*4882a593Smuzhiyun pcie->base + PCIE_CFG_ADDR);
800*4882a593Smuzhiyun val = readl(pcie->base + PCIE_CFG_DATA);
801*4882a593Smuzhiyun val &= ~PCIE_FC_CREDIT_MASK;
802*4882a593Smuzhiyun val |= PCIE_FC_CREDIT_VAL(0x806c);
803*4882a593Smuzhiyun writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0),
804*4882a593Smuzhiyun pcie->base + PCIE_CFG_ADDR);
805*4882a593Smuzhiyun writel(val, pcie->base + PCIE_CFG_DATA);
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun /* configure RC FTS number to 250 when it leaves L0s */
808*4882a593Smuzhiyun writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0),
809*4882a593Smuzhiyun pcie->base + PCIE_CFG_ADDR);
810*4882a593Smuzhiyun val = readl(pcie->base + PCIE_CFG_DATA);
811*4882a593Smuzhiyun val &= ~PCIE_FTS_NUM_MASK;
812*4882a593Smuzhiyun val |= PCIE_FTS_NUM_L0(0x50);
813*4882a593Smuzhiyun writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0),
814*4882a593Smuzhiyun pcie->base + PCIE_CFG_ADDR);
815*4882a593Smuzhiyun writel(val, pcie->base + PCIE_CFG_DATA);
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun return 0;
818*4882a593Smuzhiyun }
819*4882a593Smuzhiyun
mtk_pcie_enable_port(struct mtk_pcie_port * port)820*4882a593Smuzhiyun static void mtk_pcie_enable_port(struct mtk_pcie_port *port)
821*4882a593Smuzhiyun {
822*4882a593Smuzhiyun struct mtk_pcie *pcie = port->pcie;
823*4882a593Smuzhiyun struct device *dev = pcie->dev;
824*4882a593Smuzhiyun int err;
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun err = clk_prepare_enable(port->sys_ck);
827*4882a593Smuzhiyun if (err) {
828*4882a593Smuzhiyun dev_err(dev, "failed to enable sys_ck%d clock\n", port->slot);
829*4882a593Smuzhiyun goto err_sys_clk;
830*4882a593Smuzhiyun }
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun err = clk_prepare_enable(port->ahb_ck);
833*4882a593Smuzhiyun if (err) {
834*4882a593Smuzhiyun dev_err(dev, "failed to enable ahb_ck%d\n", port->slot);
835*4882a593Smuzhiyun goto err_ahb_clk;
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun err = clk_prepare_enable(port->aux_ck);
839*4882a593Smuzhiyun if (err) {
840*4882a593Smuzhiyun dev_err(dev, "failed to enable aux_ck%d\n", port->slot);
841*4882a593Smuzhiyun goto err_aux_clk;
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun err = clk_prepare_enable(port->axi_ck);
845*4882a593Smuzhiyun if (err) {
846*4882a593Smuzhiyun dev_err(dev, "failed to enable axi_ck%d\n", port->slot);
847*4882a593Smuzhiyun goto err_axi_clk;
848*4882a593Smuzhiyun }
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun err = clk_prepare_enable(port->obff_ck);
851*4882a593Smuzhiyun if (err) {
852*4882a593Smuzhiyun dev_err(dev, "failed to enable obff_ck%d\n", port->slot);
853*4882a593Smuzhiyun goto err_obff_clk;
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun err = clk_prepare_enable(port->pipe_ck);
857*4882a593Smuzhiyun if (err) {
858*4882a593Smuzhiyun dev_err(dev, "failed to enable pipe_ck%d\n", port->slot);
859*4882a593Smuzhiyun goto err_pipe_clk;
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun reset_control_assert(port->reset);
863*4882a593Smuzhiyun reset_control_deassert(port->reset);
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun err = phy_init(port->phy);
866*4882a593Smuzhiyun if (err) {
867*4882a593Smuzhiyun dev_err(dev, "failed to initialize port%d phy\n", port->slot);
868*4882a593Smuzhiyun goto err_phy_init;
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun err = phy_power_on(port->phy);
872*4882a593Smuzhiyun if (err) {
873*4882a593Smuzhiyun dev_err(dev, "failed to power on port%d phy\n", port->slot);
874*4882a593Smuzhiyun goto err_phy_on;
875*4882a593Smuzhiyun }
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun if (!pcie->soc->startup(port))
878*4882a593Smuzhiyun return;
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun dev_info(dev, "Port%d link down\n", port->slot);
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun phy_power_off(port->phy);
883*4882a593Smuzhiyun err_phy_on:
884*4882a593Smuzhiyun phy_exit(port->phy);
885*4882a593Smuzhiyun err_phy_init:
886*4882a593Smuzhiyun clk_disable_unprepare(port->pipe_ck);
887*4882a593Smuzhiyun err_pipe_clk:
888*4882a593Smuzhiyun clk_disable_unprepare(port->obff_ck);
889*4882a593Smuzhiyun err_obff_clk:
890*4882a593Smuzhiyun clk_disable_unprepare(port->axi_ck);
891*4882a593Smuzhiyun err_axi_clk:
892*4882a593Smuzhiyun clk_disable_unprepare(port->aux_ck);
893*4882a593Smuzhiyun err_aux_clk:
894*4882a593Smuzhiyun clk_disable_unprepare(port->ahb_ck);
895*4882a593Smuzhiyun err_ahb_clk:
896*4882a593Smuzhiyun clk_disable_unprepare(port->sys_ck);
897*4882a593Smuzhiyun err_sys_clk:
898*4882a593Smuzhiyun mtk_pcie_port_free(port);
899*4882a593Smuzhiyun }
900*4882a593Smuzhiyun
mtk_pcie_parse_port(struct mtk_pcie * pcie,struct device_node * node,int slot)901*4882a593Smuzhiyun static int mtk_pcie_parse_port(struct mtk_pcie *pcie,
902*4882a593Smuzhiyun struct device_node *node,
903*4882a593Smuzhiyun int slot)
904*4882a593Smuzhiyun {
905*4882a593Smuzhiyun struct mtk_pcie_port *port;
906*4882a593Smuzhiyun struct device *dev = pcie->dev;
907*4882a593Smuzhiyun struct platform_device *pdev = to_platform_device(dev);
908*4882a593Smuzhiyun char name[10];
909*4882a593Smuzhiyun int err;
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
912*4882a593Smuzhiyun if (!port)
913*4882a593Smuzhiyun return -ENOMEM;
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun snprintf(name, sizeof(name), "port%d", slot);
916*4882a593Smuzhiyun port->base = devm_platform_ioremap_resource_byname(pdev, name);
917*4882a593Smuzhiyun if (IS_ERR(port->base)) {
918*4882a593Smuzhiyun dev_err(dev, "failed to map port%d base\n", slot);
919*4882a593Smuzhiyun return PTR_ERR(port->base);
920*4882a593Smuzhiyun }
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun snprintf(name, sizeof(name), "sys_ck%d", slot);
923*4882a593Smuzhiyun port->sys_ck = devm_clk_get(dev, name);
924*4882a593Smuzhiyun if (IS_ERR(port->sys_ck)) {
925*4882a593Smuzhiyun dev_err(dev, "failed to get sys_ck%d clock\n", slot);
926*4882a593Smuzhiyun return PTR_ERR(port->sys_ck);
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun /* sys_ck might be divided into the following parts in some chips */
930*4882a593Smuzhiyun snprintf(name, sizeof(name), "ahb_ck%d", slot);
931*4882a593Smuzhiyun port->ahb_ck = devm_clk_get_optional(dev, name);
932*4882a593Smuzhiyun if (IS_ERR(port->ahb_ck))
933*4882a593Smuzhiyun return PTR_ERR(port->ahb_ck);
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun snprintf(name, sizeof(name), "axi_ck%d", slot);
936*4882a593Smuzhiyun port->axi_ck = devm_clk_get_optional(dev, name);
937*4882a593Smuzhiyun if (IS_ERR(port->axi_ck))
938*4882a593Smuzhiyun return PTR_ERR(port->axi_ck);
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun snprintf(name, sizeof(name), "aux_ck%d", slot);
941*4882a593Smuzhiyun port->aux_ck = devm_clk_get_optional(dev, name);
942*4882a593Smuzhiyun if (IS_ERR(port->aux_ck))
943*4882a593Smuzhiyun return PTR_ERR(port->aux_ck);
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun snprintf(name, sizeof(name), "obff_ck%d", slot);
946*4882a593Smuzhiyun port->obff_ck = devm_clk_get_optional(dev, name);
947*4882a593Smuzhiyun if (IS_ERR(port->obff_ck))
948*4882a593Smuzhiyun return PTR_ERR(port->obff_ck);
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun snprintf(name, sizeof(name), "pipe_ck%d", slot);
951*4882a593Smuzhiyun port->pipe_ck = devm_clk_get_optional(dev, name);
952*4882a593Smuzhiyun if (IS_ERR(port->pipe_ck))
953*4882a593Smuzhiyun return PTR_ERR(port->pipe_ck);
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun snprintf(name, sizeof(name), "pcie-rst%d", slot);
956*4882a593Smuzhiyun port->reset = devm_reset_control_get_optional_exclusive(dev, name);
957*4882a593Smuzhiyun if (PTR_ERR(port->reset) == -EPROBE_DEFER)
958*4882a593Smuzhiyun return PTR_ERR(port->reset);
959*4882a593Smuzhiyun
960*4882a593Smuzhiyun /* some platforms may use default PHY setting */
961*4882a593Smuzhiyun snprintf(name, sizeof(name), "pcie-phy%d", slot);
962*4882a593Smuzhiyun port->phy = devm_phy_optional_get(dev, name);
963*4882a593Smuzhiyun if (IS_ERR(port->phy))
964*4882a593Smuzhiyun return PTR_ERR(port->phy);
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun port->slot = slot;
967*4882a593Smuzhiyun port->pcie = pcie;
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun if (pcie->soc->setup_irq) {
970*4882a593Smuzhiyun err = pcie->soc->setup_irq(port, node);
971*4882a593Smuzhiyun if (err)
972*4882a593Smuzhiyun return err;
973*4882a593Smuzhiyun }
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun INIT_LIST_HEAD(&port->list);
976*4882a593Smuzhiyun list_add_tail(&port->list, &pcie->ports);
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun return 0;
979*4882a593Smuzhiyun }
980*4882a593Smuzhiyun
mtk_pcie_subsys_powerup(struct mtk_pcie * pcie)981*4882a593Smuzhiyun static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie)
982*4882a593Smuzhiyun {
983*4882a593Smuzhiyun struct device *dev = pcie->dev;
984*4882a593Smuzhiyun struct platform_device *pdev = to_platform_device(dev);
985*4882a593Smuzhiyun struct resource *regs;
986*4882a593Smuzhiyun int err;
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun /* get shared registers, which are optional */
989*4882a593Smuzhiyun regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "subsys");
990*4882a593Smuzhiyun if (regs) {
991*4882a593Smuzhiyun pcie->base = devm_ioremap_resource(dev, regs);
992*4882a593Smuzhiyun if (IS_ERR(pcie->base)) {
993*4882a593Smuzhiyun dev_err(dev, "failed to map shared register\n");
994*4882a593Smuzhiyun return PTR_ERR(pcie->base);
995*4882a593Smuzhiyun }
996*4882a593Smuzhiyun }
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun pcie->free_ck = devm_clk_get(dev, "free_ck");
999*4882a593Smuzhiyun if (IS_ERR(pcie->free_ck)) {
1000*4882a593Smuzhiyun if (PTR_ERR(pcie->free_ck) == -EPROBE_DEFER)
1001*4882a593Smuzhiyun return -EPROBE_DEFER;
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun pcie->free_ck = NULL;
1004*4882a593Smuzhiyun }
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun pm_runtime_enable(dev);
1007*4882a593Smuzhiyun pm_runtime_get_sync(dev);
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun /* enable top level clock */
1010*4882a593Smuzhiyun err = clk_prepare_enable(pcie->free_ck);
1011*4882a593Smuzhiyun if (err) {
1012*4882a593Smuzhiyun dev_err(dev, "failed to enable free_ck\n");
1013*4882a593Smuzhiyun goto err_free_ck;
1014*4882a593Smuzhiyun }
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyun return 0;
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun err_free_ck:
1019*4882a593Smuzhiyun pm_runtime_put_sync(dev);
1020*4882a593Smuzhiyun pm_runtime_disable(dev);
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun return err;
1023*4882a593Smuzhiyun }
1024*4882a593Smuzhiyun
mtk_pcie_setup(struct mtk_pcie * pcie)1025*4882a593Smuzhiyun static int mtk_pcie_setup(struct mtk_pcie *pcie)
1026*4882a593Smuzhiyun {
1027*4882a593Smuzhiyun struct device *dev = pcie->dev;
1028*4882a593Smuzhiyun struct device_node *node = dev->of_node, *child;
1029*4882a593Smuzhiyun struct mtk_pcie_port *port, *tmp;
1030*4882a593Smuzhiyun int err;
1031*4882a593Smuzhiyun
1032*4882a593Smuzhiyun for_each_available_child_of_node(node, child) {
1033*4882a593Smuzhiyun int slot;
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun err = of_pci_get_devfn(child);
1036*4882a593Smuzhiyun if (err < 0) {
1037*4882a593Smuzhiyun dev_err(dev, "failed to parse devfn: %d\n", err);
1038*4882a593Smuzhiyun goto error_put_node;
1039*4882a593Smuzhiyun }
1040*4882a593Smuzhiyun
1041*4882a593Smuzhiyun slot = PCI_SLOT(err);
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun err = mtk_pcie_parse_port(pcie, child, slot);
1044*4882a593Smuzhiyun if (err)
1045*4882a593Smuzhiyun goto error_put_node;
1046*4882a593Smuzhiyun }
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun err = mtk_pcie_subsys_powerup(pcie);
1049*4882a593Smuzhiyun if (err)
1050*4882a593Smuzhiyun return err;
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun /* enable each port, and then check link status */
1053*4882a593Smuzhiyun list_for_each_entry_safe(port, tmp, &pcie->ports, list)
1054*4882a593Smuzhiyun mtk_pcie_enable_port(port);
1055*4882a593Smuzhiyun
1056*4882a593Smuzhiyun /* power down PCIe subsys if slots are all empty (link down) */
1057*4882a593Smuzhiyun if (list_empty(&pcie->ports))
1058*4882a593Smuzhiyun mtk_pcie_subsys_powerdown(pcie);
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun return 0;
1061*4882a593Smuzhiyun error_put_node:
1062*4882a593Smuzhiyun of_node_put(child);
1063*4882a593Smuzhiyun return err;
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun
mtk_pcie_probe(struct platform_device * pdev)1066*4882a593Smuzhiyun static int mtk_pcie_probe(struct platform_device *pdev)
1067*4882a593Smuzhiyun {
1068*4882a593Smuzhiyun struct device *dev = &pdev->dev;
1069*4882a593Smuzhiyun struct mtk_pcie *pcie;
1070*4882a593Smuzhiyun struct pci_host_bridge *host;
1071*4882a593Smuzhiyun int err;
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
1074*4882a593Smuzhiyun if (!host)
1075*4882a593Smuzhiyun return -ENOMEM;
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun pcie = pci_host_bridge_priv(host);
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun pcie->dev = dev;
1080*4882a593Smuzhiyun pcie->soc = of_device_get_match_data(dev);
1081*4882a593Smuzhiyun platform_set_drvdata(pdev, pcie);
1082*4882a593Smuzhiyun INIT_LIST_HEAD(&pcie->ports);
1083*4882a593Smuzhiyun
1084*4882a593Smuzhiyun err = mtk_pcie_setup(pcie);
1085*4882a593Smuzhiyun if (err)
1086*4882a593Smuzhiyun return err;
1087*4882a593Smuzhiyun
1088*4882a593Smuzhiyun host->ops = pcie->soc->ops;
1089*4882a593Smuzhiyun host->sysdata = pcie;
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun err = pci_host_probe(host);
1092*4882a593Smuzhiyun if (err)
1093*4882a593Smuzhiyun goto put_resources;
1094*4882a593Smuzhiyun
1095*4882a593Smuzhiyun return 0;
1096*4882a593Smuzhiyun
1097*4882a593Smuzhiyun put_resources:
1098*4882a593Smuzhiyun if (!list_empty(&pcie->ports))
1099*4882a593Smuzhiyun mtk_pcie_put_resources(pcie);
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun return err;
1102*4882a593Smuzhiyun }
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun
mtk_pcie_free_resources(struct mtk_pcie * pcie)1105*4882a593Smuzhiyun static void mtk_pcie_free_resources(struct mtk_pcie *pcie)
1106*4882a593Smuzhiyun {
1107*4882a593Smuzhiyun struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
1108*4882a593Smuzhiyun struct list_head *windows = &host->windows;
1109*4882a593Smuzhiyun
1110*4882a593Smuzhiyun pci_free_resource_list(windows);
1111*4882a593Smuzhiyun }
1112*4882a593Smuzhiyun
mtk_pcie_remove(struct platform_device * pdev)1113*4882a593Smuzhiyun static int mtk_pcie_remove(struct platform_device *pdev)
1114*4882a593Smuzhiyun {
1115*4882a593Smuzhiyun struct mtk_pcie *pcie = platform_get_drvdata(pdev);
1116*4882a593Smuzhiyun struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
1117*4882a593Smuzhiyun
1118*4882a593Smuzhiyun pci_stop_root_bus(host->bus);
1119*4882a593Smuzhiyun pci_remove_root_bus(host->bus);
1120*4882a593Smuzhiyun mtk_pcie_free_resources(pcie);
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun mtk_pcie_irq_teardown(pcie);
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun mtk_pcie_put_resources(pcie);
1125*4882a593Smuzhiyun
1126*4882a593Smuzhiyun return 0;
1127*4882a593Smuzhiyun }
1128*4882a593Smuzhiyun
mtk_pcie_suspend_noirq(struct device * dev)1129*4882a593Smuzhiyun static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)
1130*4882a593Smuzhiyun {
1131*4882a593Smuzhiyun struct mtk_pcie *pcie = dev_get_drvdata(dev);
1132*4882a593Smuzhiyun struct mtk_pcie_port *port;
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun if (list_empty(&pcie->ports))
1135*4882a593Smuzhiyun return 0;
1136*4882a593Smuzhiyun
1137*4882a593Smuzhiyun list_for_each_entry(port, &pcie->ports, list) {
1138*4882a593Smuzhiyun clk_disable_unprepare(port->pipe_ck);
1139*4882a593Smuzhiyun clk_disable_unprepare(port->obff_ck);
1140*4882a593Smuzhiyun clk_disable_unprepare(port->axi_ck);
1141*4882a593Smuzhiyun clk_disable_unprepare(port->aux_ck);
1142*4882a593Smuzhiyun clk_disable_unprepare(port->ahb_ck);
1143*4882a593Smuzhiyun clk_disable_unprepare(port->sys_ck);
1144*4882a593Smuzhiyun phy_power_off(port->phy);
1145*4882a593Smuzhiyun phy_exit(port->phy);
1146*4882a593Smuzhiyun }
1147*4882a593Smuzhiyun
1148*4882a593Smuzhiyun clk_disable_unprepare(pcie->free_ck);
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyun return 0;
1151*4882a593Smuzhiyun }
1152*4882a593Smuzhiyun
mtk_pcie_resume_noirq(struct device * dev)1153*4882a593Smuzhiyun static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)
1154*4882a593Smuzhiyun {
1155*4882a593Smuzhiyun struct mtk_pcie *pcie = dev_get_drvdata(dev);
1156*4882a593Smuzhiyun struct mtk_pcie_port *port, *tmp;
1157*4882a593Smuzhiyun
1158*4882a593Smuzhiyun if (list_empty(&pcie->ports))
1159*4882a593Smuzhiyun return 0;
1160*4882a593Smuzhiyun
1161*4882a593Smuzhiyun clk_prepare_enable(pcie->free_ck);
1162*4882a593Smuzhiyun
1163*4882a593Smuzhiyun list_for_each_entry_safe(port, tmp, &pcie->ports, list)
1164*4882a593Smuzhiyun mtk_pcie_enable_port(port);
1165*4882a593Smuzhiyun
1166*4882a593Smuzhiyun /* In case of EP was removed while system suspend. */
1167*4882a593Smuzhiyun if (list_empty(&pcie->ports))
1168*4882a593Smuzhiyun clk_disable_unprepare(pcie->free_ck);
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyun return 0;
1171*4882a593Smuzhiyun }
1172*4882a593Smuzhiyun
1173*4882a593Smuzhiyun static const struct dev_pm_ops mtk_pcie_pm_ops = {
1174*4882a593Smuzhiyun SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
1175*4882a593Smuzhiyun mtk_pcie_resume_noirq)
1176*4882a593Smuzhiyun };
1177*4882a593Smuzhiyun
1178*4882a593Smuzhiyun static const struct mtk_pcie_soc mtk_pcie_soc_v1 = {
1179*4882a593Smuzhiyun .ops = &mtk_pcie_ops,
1180*4882a593Smuzhiyun .startup = mtk_pcie_startup_port,
1181*4882a593Smuzhiyun };
1182*4882a593Smuzhiyun
1183*4882a593Smuzhiyun static const struct mtk_pcie_soc mtk_pcie_soc_mt2712 = {
1184*4882a593Smuzhiyun .ops = &mtk_pcie_ops_v2,
1185*4882a593Smuzhiyun .startup = mtk_pcie_startup_port_v2,
1186*4882a593Smuzhiyun .setup_irq = mtk_pcie_setup_irq,
1187*4882a593Smuzhiyun };
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun static const struct mtk_pcie_soc mtk_pcie_soc_mt7622 = {
1190*4882a593Smuzhiyun .need_fix_class_id = true,
1191*4882a593Smuzhiyun .ops = &mtk_pcie_ops_v2,
1192*4882a593Smuzhiyun .startup = mtk_pcie_startup_port_v2,
1193*4882a593Smuzhiyun .setup_irq = mtk_pcie_setup_irq,
1194*4882a593Smuzhiyun };
1195*4882a593Smuzhiyun
1196*4882a593Smuzhiyun static const struct mtk_pcie_soc mtk_pcie_soc_mt7629 = {
1197*4882a593Smuzhiyun .need_fix_class_id = true,
1198*4882a593Smuzhiyun .need_fix_device_id = true,
1199*4882a593Smuzhiyun .device_id = PCI_DEVICE_ID_MEDIATEK_7629,
1200*4882a593Smuzhiyun .ops = &mtk_pcie_ops_v2,
1201*4882a593Smuzhiyun .startup = mtk_pcie_startup_port_v2,
1202*4882a593Smuzhiyun .setup_irq = mtk_pcie_setup_irq,
1203*4882a593Smuzhiyun };
1204*4882a593Smuzhiyun
1205*4882a593Smuzhiyun static const struct of_device_id mtk_pcie_ids[] = {
1206*4882a593Smuzhiyun { .compatible = "mediatek,mt2701-pcie", .data = &mtk_pcie_soc_v1 },
1207*4882a593Smuzhiyun { .compatible = "mediatek,mt7623-pcie", .data = &mtk_pcie_soc_v1 },
1208*4882a593Smuzhiyun { .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_mt2712 },
1209*4882a593Smuzhiyun { .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_mt7622 },
1210*4882a593Smuzhiyun { .compatible = "mediatek,mt7629-pcie", .data = &mtk_pcie_soc_mt7629 },
1211*4882a593Smuzhiyun {},
1212*4882a593Smuzhiyun };
1213*4882a593Smuzhiyun
1214*4882a593Smuzhiyun static struct platform_driver mtk_pcie_driver = {
1215*4882a593Smuzhiyun .probe = mtk_pcie_probe,
1216*4882a593Smuzhiyun .remove = mtk_pcie_remove,
1217*4882a593Smuzhiyun .driver = {
1218*4882a593Smuzhiyun .name = "mtk-pcie",
1219*4882a593Smuzhiyun .of_match_table = mtk_pcie_ids,
1220*4882a593Smuzhiyun .suppress_bind_attrs = true,
1221*4882a593Smuzhiyun .pm = &mtk_pcie_pm_ops,
1222*4882a593Smuzhiyun },
1223*4882a593Smuzhiyun };
1224*4882a593Smuzhiyun module_platform_driver(mtk_pcie_driver);
1225*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
1226