1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * PCIe host controller driver for Mobiveil PCIe Host controller
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2018 Mobiveil Inc.
6*4882a593Smuzhiyun * Copyright 2019 NXP
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
9*4882a593Smuzhiyun * Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/delay.h>
13*4882a593Smuzhiyun #include <linux/init.h>
14*4882a593Smuzhiyun #include <linux/kernel.h>
15*4882a593Smuzhiyun #include <linux/pci.h>
16*4882a593Smuzhiyun #include <linux/platform_device.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include "pcie-mobiveil.h"
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun /*
21*4882a593Smuzhiyun * mobiveil_pcie_sel_page - routine to access paged register
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * Registers whose address greater than PAGED_ADDR_BNDRY (0xc00) are paged,
24*4882a593Smuzhiyun * for this scheme to work extracted higher 6 bits of the offset will be
25*4882a593Smuzhiyun * written to pg_sel field of PAB_CTRL register and rest of the lower 10
26*4882a593Smuzhiyun * bits enabled with PAGED_ADDR_BNDRY are used as offset of the register.
27*4882a593Smuzhiyun */
mobiveil_pcie_sel_page(struct mobiveil_pcie * pcie,u8 pg_idx)28*4882a593Smuzhiyun static void mobiveil_pcie_sel_page(struct mobiveil_pcie *pcie, u8 pg_idx)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun u32 val;
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun val = readl(pcie->csr_axi_slave_base + PAB_CTRL);
33*4882a593Smuzhiyun val &= ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT);
34*4882a593Smuzhiyun val |= (pg_idx & PAGE_SEL_MASK) << PAGE_SEL_SHIFT;
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun writel(val, pcie->csr_axi_slave_base + PAB_CTRL);
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun
mobiveil_pcie_comp_addr(struct mobiveil_pcie * pcie,u32 off)39*4882a593Smuzhiyun static void __iomem *mobiveil_pcie_comp_addr(struct mobiveil_pcie *pcie,
40*4882a593Smuzhiyun u32 off)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun if (off < PAGED_ADDR_BNDRY) {
43*4882a593Smuzhiyun /* For directly accessed registers, clear the pg_sel field */
44*4882a593Smuzhiyun mobiveil_pcie_sel_page(pcie, 0);
45*4882a593Smuzhiyun return pcie->csr_axi_slave_base + off;
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun mobiveil_pcie_sel_page(pcie, OFFSET_TO_PAGE_IDX(off));
49*4882a593Smuzhiyun return pcie->csr_axi_slave_base + OFFSET_TO_PAGE_ADDR(off);
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun
mobiveil_pcie_read(void __iomem * addr,int size,u32 * val)52*4882a593Smuzhiyun static int mobiveil_pcie_read(void __iomem *addr, int size, u32 *val)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun if ((uintptr_t)addr & (size - 1)) {
55*4882a593Smuzhiyun *val = 0;
56*4882a593Smuzhiyun return PCIBIOS_BAD_REGISTER_NUMBER;
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun switch (size) {
60*4882a593Smuzhiyun case 4:
61*4882a593Smuzhiyun *val = readl(addr);
62*4882a593Smuzhiyun break;
63*4882a593Smuzhiyun case 2:
64*4882a593Smuzhiyun *val = readw(addr);
65*4882a593Smuzhiyun break;
66*4882a593Smuzhiyun case 1:
67*4882a593Smuzhiyun *val = readb(addr);
68*4882a593Smuzhiyun break;
69*4882a593Smuzhiyun default:
70*4882a593Smuzhiyun *val = 0;
71*4882a593Smuzhiyun return PCIBIOS_BAD_REGISTER_NUMBER;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun return PCIBIOS_SUCCESSFUL;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
mobiveil_pcie_write(void __iomem * addr,int size,u32 val)77*4882a593Smuzhiyun static int mobiveil_pcie_write(void __iomem *addr, int size, u32 val)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun if ((uintptr_t)addr & (size - 1))
80*4882a593Smuzhiyun return PCIBIOS_BAD_REGISTER_NUMBER;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun switch (size) {
83*4882a593Smuzhiyun case 4:
84*4882a593Smuzhiyun writel(val, addr);
85*4882a593Smuzhiyun break;
86*4882a593Smuzhiyun case 2:
87*4882a593Smuzhiyun writew(val, addr);
88*4882a593Smuzhiyun break;
89*4882a593Smuzhiyun case 1:
90*4882a593Smuzhiyun writeb(val, addr);
91*4882a593Smuzhiyun break;
92*4882a593Smuzhiyun default:
93*4882a593Smuzhiyun return PCIBIOS_BAD_REGISTER_NUMBER;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun return PCIBIOS_SUCCESSFUL;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
mobiveil_csr_read(struct mobiveil_pcie * pcie,u32 off,size_t size)99*4882a593Smuzhiyun u32 mobiveil_csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun void __iomem *addr;
102*4882a593Smuzhiyun u32 val;
103*4882a593Smuzhiyun int ret;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun addr = mobiveil_pcie_comp_addr(pcie, off);
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun ret = mobiveil_pcie_read(addr, size, &val);
108*4882a593Smuzhiyun if (ret)
109*4882a593Smuzhiyun dev_err(&pcie->pdev->dev, "read CSR address failed\n");
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun return val;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
mobiveil_csr_write(struct mobiveil_pcie * pcie,u32 val,u32 off,size_t size)114*4882a593Smuzhiyun void mobiveil_csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off,
115*4882a593Smuzhiyun size_t size)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun void __iomem *addr;
118*4882a593Smuzhiyun int ret;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun addr = mobiveil_pcie_comp_addr(pcie, off);
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun ret = mobiveil_pcie_write(addr, size, val);
123*4882a593Smuzhiyun if (ret)
124*4882a593Smuzhiyun dev_err(&pcie->pdev->dev, "write CSR address failed\n");
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun
mobiveil_pcie_link_up(struct mobiveil_pcie * pcie)127*4882a593Smuzhiyun bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun if (pcie->ops->link_up)
130*4882a593Smuzhiyun return pcie->ops->link_up(pcie);
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun return (mobiveil_csr_readl(pcie, LTSSM_STATUS) &
133*4882a593Smuzhiyun LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
program_ib_windows(struct mobiveil_pcie * pcie,int win_num,u64 cpu_addr,u64 pci_addr,u32 type,u64 size)136*4882a593Smuzhiyun void program_ib_windows(struct mobiveil_pcie *pcie, int win_num,
137*4882a593Smuzhiyun u64 cpu_addr, u64 pci_addr, u32 type, u64 size)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun u32 value;
140*4882a593Smuzhiyun u64 size64 = ~(size - 1);
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun if (win_num >= pcie->ppio_wins) {
143*4882a593Smuzhiyun dev_err(&pcie->pdev->dev,
144*4882a593Smuzhiyun "ERROR: max inbound windows reached !\n");
145*4882a593Smuzhiyun return;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun value = mobiveil_csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num));
149*4882a593Smuzhiyun value &= ~(AMAP_CTRL_TYPE_MASK << AMAP_CTRL_TYPE_SHIFT | WIN_SIZE_MASK);
150*4882a593Smuzhiyun value |= type << AMAP_CTRL_TYPE_SHIFT | 1 << AMAP_CTRL_EN_SHIFT |
151*4882a593Smuzhiyun (lower_32_bits(size64) & WIN_SIZE_MASK);
152*4882a593Smuzhiyun mobiveil_csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num));
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun mobiveil_csr_writel(pcie, upper_32_bits(size64),
155*4882a593Smuzhiyun PAB_EXT_PEX_AMAP_SIZEN(win_num));
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun mobiveil_csr_writel(pcie, lower_32_bits(cpu_addr),
158*4882a593Smuzhiyun PAB_PEX_AMAP_AXI_WIN(win_num));
159*4882a593Smuzhiyun mobiveil_csr_writel(pcie, upper_32_bits(cpu_addr),
160*4882a593Smuzhiyun PAB_EXT_PEX_AMAP_AXI_WIN(win_num));
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun mobiveil_csr_writel(pcie, lower_32_bits(pci_addr),
163*4882a593Smuzhiyun PAB_PEX_AMAP_PEX_WIN_L(win_num));
164*4882a593Smuzhiyun mobiveil_csr_writel(pcie, upper_32_bits(pci_addr),
165*4882a593Smuzhiyun PAB_PEX_AMAP_PEX_WIN_H(win_num));
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun pcie->ib_wins_configured++;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun /*
171*4882a593Smuzhiyun * routine to program the outbound windows
172*4882a593Smuzhiyun */
program_ob_windows(struct mobiveil_pcie * pcie,int win_num,u64 cpu_addr,u64 pci_addr,u32 type,u64 size)173*4882a593Smuzhiyun void program_ob_windows(struct mobiveil_pcie *pcie, int win_num,
174*4882a593Smuzhiyun u64 cpu_addr, u64 pci_addr, u32 type, u64 size)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun u32 value;
177*4882a593Smuzhiyun u64 size64 = ~(size - 1);
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun if (win_num >= pcie->apio_wins) {
180*4882a593Smuzhiyun dev_err(&pcie->pdev->dev,
181*4882a593Smuzhiyun "ERROR: max outbound windows reached !\n");
182*4882a593Smuzhiyun return;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun /*
186*4882a593Smuzhiyun * program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit
187*4882a593Smuzhiyun * to 4 KB in PAB_AXI_AMAP_CTRL register
188*4882a593Smuzhiyun */
189*4882a593Smuzhiyun value = mobiveil_csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
190*4882a593Smuzhiyun value &= ~(WIN_TYPE_MASK << WIN_TYPE_SHIFT | WIN_SIZE_MASK);
191*4882a593Smuzhiyun value |= 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
192*4882a593Smuzhiyun (lower_32_bits(size64) & WIN_SIZE_MASK);
193*4882a593Smuzhiyun mobiveil_csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num));
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun mobiveil_csr_writel(pcie, upper_32_bits(size64),
196*4882a593Smuzhiyun PAB_EXT_AXI_AMAP_SIZE(win_num));
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /*
199*4882a593Smuzhiyun * program AXI window base with appropriate value in
200*4882a593Smuzhiyun * PAB_AXI_AMAP_AXI_WIN0 register
201*4882a593Smuzhiyun */
202*4882a593Smuzhiyun mobiveil_csr_writel(pcie,
203*4882a593Smuzhiyun lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK),
204*4882a593Smuzhiyun PAB_AXI_AMAP_AXI_WIN(win_num));
205*4882a593Smuzhiyun mobiveil_csr_writel(pcie, upper_32_bits(cpu_addr),
206*4882a593Smuzhiyun PAB_EXT_AXI_AMAP_AXI_WIN(win_num));
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun mobiveil_csr_writel(pcie, lower_32_bits(pci_addr),
209*4882a593Smuzhiyun PAB_AXI_AMAP_PEX_WIN_L(win_num));
210*4882a593Smuzhiyun mobiveil_csr_writel(pcie, upper_32_bits(pci_addr),
211*4882a593Smuzhiyun PAB_AXI_AMAP_PEX_WIN_H(win_num));
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun pcie->ob_wins_configured++;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
mobiveil_bringup_link(struct mobiveil_pcie * pcie)216*4882a593Smuzhiyun int mobiveil_bringup_link(struct mobiveil_pcie *pcie)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun int retries;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun /* check if the link is up or not */
221*4882a593Smuzhiyun for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
222*4882a593Smuzhiyun if (mobiveil_pcie_link_up(pcie))
223*4882a593Smuzhiyun return 0;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun dev_err(&pcie->pdev->dev, "link never came up\n");
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun return -ETIMEDOUT;
231*4882a593Smuzhiyun }
232