1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Rockchip PCIe PHY driver
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2016 Shawn Lin <shawn.lin@rock-chips.com>
6*4882a593Smuzhiyun * Copyright (C) 2016 ROCKCHIP, Inc.
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/clk.h>
10*4882a593Smuzhiyun #include <linux/delay.h>
11*4882a593Smuzhiyun #include <linux/io.h>
12*4882a593Smuzhiyun #include <linux/mfd/syscon.h>
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/of.h>
15*4882a593Smuzhiyun #include <linux/of_address.h>
16*4882a593Smuzhiyun #include <linux/of_platform.h>
17*4882a593Smuzhiyun #include <linux/phy/phy.h>
18*4882a593Smuzhiyun #include <linux/platform_device.h>
19*4882a593Smuzhiyun #include <linux/regmap.h>
20*4882a593Smuzhiyun #include <linux/reset.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun /*
23*4882a593Smuzhiyun * The higher 16-bit of this register is used for write protection
24*4882a593Smuzhiyun * only if BIT(x + 16) set to 1 the BIT(x) can be written.
25*4882a593Smuzhiyun */
26*4882a593Smuzhiyun #define HIWORD_UPDATE(val, mask, shift) \
27*4882a593Smuzhiyun ((val) << (shift) | (mask) << ((shift) + 16))
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #define PHY_MAX_LANE_NUM 4
30*4882a593Smuzhiyun #define PHY_CFG_DATA_SHIFT 7
31*4882a593Smuzhiyun #define PHY_CFG_ADDR_SHIFT 1
32*4882a593Smuzhiyun #define PHY_CFG_DATA_MASK 0xf
33*4882a593Smuzhiyun #define PHY_CFG_ADDR_MASK 0x3f
34*4882a593Smuzhiyun #define PHY_CFG_RD_MASK 0x3ff
35*4882a593Smuzhiyun #define PHY_CFG_WR_ENABLE 1
36*4882a593Smuzhiyun #define PHY_CFG_WR_DISABLE 1
37*4882a593Smuzhiyun #define PHY_CFG_WR_SHIFT 0
38*4882a593Smuzhiyun #define PHY_CFG_WR_MASK 1
39*4882a593Smuzhiyun #define PHY_CFG_PLL_LOCK 0x10
40*4882a593Smuzhiyun #define PHY_CFG_CLK_TEST 0x10
41*4882a593Smuzhiyun #define PHY_CFG_CLK_SCC 0x12
42*4882a593Smuzhiyun #define PHY_CFG_SEPE_RATE BIT(3)
43*4882a593Smuzhiyun #define PHY_CFG_PLL_100M BIT(3)
44*4882a593Smuzhiyun #define PHY_PLL_LOCKED BIT(9)
45*4882a593Smuzhiyun #define PHY_PLL_OUTPUT BIT(10)
46*4882a593Smuzhiyun #define PHY_LANE_A_STATUS 0x30
47*4882a593Smuzhiyun #define PHY_LANE_B_STATUS 0x31
48*4882a593Smuzhiyun #define PHY_LANE_C_STATUS 0x32
49*4882a593Smuzhiyun #define PHY_LANE_D_STATUS 0x33
50*4882a593Smuzhiyun #define PHY_LANE_RX_DET_SHIFT 11
51*4882a593Smuzhiyun #define PHY_LANE_RX_DET_TH 0x1
52*4882a593Smuzhiyun #define PHY_LANE_IDLE_OFF 0x1
53*4882a593Smuzhiyun #define PHY_LANE_IDLE_MASK 0x1
54*4882a593Smuzhiyun #define PHY_LANE_IDLE_A_SHIFT 3
55*4882a593Smuzhiyun #define PHY_LANE_IDLE_B_SHIFT 4
56*4882a593Smuzhiyun #define PHY_LANE_IDLE_C_SHIFT 5
57*4882a593Smuzhiyun #define PHY_LANE_IDLE_D_SHIFT 6
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun struct rockchip_pcie_data {
60*4882a593Smuzhiyun unsigned int pcie_conf;
61*4882a593Smuzhiyun unsigned int pcie_status;
62*4882a593Smuzhiyun unsigned int pcie_laneoff;
63*4882a593Smuzhiyun };
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun struct rockchip_pcie_phy {
66*4882a593Smuzhiyun struct rockchip_pcie_data *phy_data;
67*4882a593Smuzhiyun struct regmap *reg_base;
68*4882a593Smuzhiyun struct phy_pcie_instance {
69*4882a593Smuzhiyun struct phy *phy;
70*4882a593Smuzhiyun u32 index;
71*4882a593Smuzhiyun } phys[PHY_MAX_LANE_NUM];
72*4882a593Smuzhiyun struct mutex pcie_mutex;
73*4882a593Smuzhiyun struct reset_control *phy_rst;
74*4882a593Smuzhiyun struct clk *clk_pciephy_ref;
75*4882a593Smuzhiyun int pwr_cnt;
76*4882a593Smuzhiyun int init_cnt;
77*4882a593Smuzhiyun };
78*4882a593Smuzhiyun
to_pcie_phy(struct phy_pcie_instance * inst)79*4882a593Smuzhiyun static struct rockchip_pcie_phy *to_pcie_phy(struct phy_pcie_instance *inst)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun return container_of(inst, struct rockchip_pcie_phy,
82*4882a593Smuzhiyun phys[inst->index]);
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
rockchip_pcie_phy_of_xlate(struct device * dev,struct of_phandle_args * args)85*4882a593Smuzhiyun static struct phy *rockchip_pcie_phy_of_xlate(struct device *dev,
86*4882a593Smuzhiyun struct of_phandle_args *args)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun struct rockchip_pcie_phy *rk_phy = dev_get_drvdata(dev);
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun if (args->args_count == 0)
91*4882a593Smuzhiyun return rk_phy->phys[0].phy;
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun if (WARN_ON(args->args[0] >= PHY_MAX_LANE_NUM))
94*4882a593Smuzhiyun return ERR_PTR(-ENODEV);
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun return rk_phy->phys[args->args[0]].phy;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun
phy_wr_cfg(struct rockchip_pcie_phy * rk_phy,u32 addr,u32 data)100*4882a593Smuzhiyun static inline void phy_wr_cfg(struct rockchip_pcie_phy *rk_phy,
101*4882a593Smuzhiyun u32 addr, u32 data)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf,
104*4882a593Smuzhiyun HIWORD_UPDATE(data,
105*4882a593Smuzhiyun PHY_CFG_DATA_MASK,
106*4882a593Smuzhiyun PHY_CFG_DATA_SHIFT) |
107*4882a593Smuzhiyun HIWORD_UPDATE(addr,
108*4882a593Smuzhiyun PHY_CFG_ADDR_MASK,
109*4882a593Smuzhiyun PHY_CFG_ADDR_SHIFT));
110*4882a593Smuzhiyun udelay(1);
111*4882a593Smuzhiyun regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf,
112*4882a593Smuzhiyun HIWORD_UPDATE(PHY_CFG_WR_ENABLE,
113*4882a593Smuzhiyun PHY_CFG_WR_MASK,
114*4882a593Smuzhiyun PHY_CFG_WR_SHIFT));
115*4882a593Smuzhiyun udelay(1);
116*4882a593Smuzhiyun regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf,
117*4882a593Smuzhiyun HIWORD_UPDATE(PHY_CFG_WR_DISABLE,
118*4882a593Smuzhiyun PHY_CFG_WR_MASK,
119*4882a593Smuzhiyun PHY_CFG_WR_SHIFT));
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun
phy_rd_cfg(struct rockchip_pcie_phy * rk_phy,u32 addr)122*4882a593Smuzhiyun static inline u32 phy_rd_cfg(struct rockchip_pcie_phy *rk_phy,
123*4882a593Smuzhiyun u32 addr)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun u32 val;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf,
128*4882a593Smuzhiyun HIWORD_UPDATE(addr,
129*4882a593Smuzhiyun PHY_CFG_RD_MASK,
130*4882a593Smuzhiyun PHY_CFG_ADDR_SHIFT));
131*4882a593Smuzhiyun regmap_read(rk_phy->reg_base,
132*4882a593Smuzhiyun rk_phy->phy_data->pcie_status,
133*4882a593Smuzhiyun &val);
134*4882a593Smuzhiyun return val;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
rockchip_pcie_phy_power_off(struct phy * phy)137*4882a593Smuzhiyun static int rockchip_pcie_phy_power_off(struct phy *phy)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun struct phy_pcie_instance *inst = phy_get_drvdata(phy);
140*4882a593Smuzhiyun struct rockchip_pcie_phy *rk_phy = to_pcie_phy(inst);
141*4882a593Smuzhiyun int err = 0;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun mutex_lock(&rk_phy->pcie_mutex);
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun regmap_write(rk_phy->reg_base,
146*4882a593Smuzhiyun rk_phy->phy_data->pcie_laneoff,
147*4882a593Smuzhiyun HIWORD_UPDATE(PHY_LANE_IDLE_OFF,
148*4882a593Smuzhiyun PHY_LANE_IDLE_MASK,
149*4882a593Smuzhiyun PHY_LANE_IDLE_A_SHIFT + inst->index));
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun if (--rk_phy->pwr_cnt)
152*4882a593Smuzhiyun goto err_out;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun err = reset_control_assert(rk_phy->phy_rst);
155*4882a593Smuzhiyun if (err) {
156*4882a593Smuzhiyun dev_err(&phy->dev, "assert phy_rst err %d\n", err);
157*4882a593Smuzhiyun goto err_restore;
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun err_out:
161*4882a593Smuzhiyun mutex_unlock(&rk_phy->pcie_mutex);
162*4882a593Smuzhiyun return 0;
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun err_restore:
165*4882a593Smuzhiyun rk_phy->pwr_cnt++;
166*4882a593Smuzhiyun regmap_write(rk_phy->reg_base,
167*4882a593Smuzhiyun rk_phy->phy_data->pcie_laneoff,
168*4882a593Smuzhiyun HIWORD_UPDATE(!PHY_LANE_IDLE_OFF,
169*4882a593Smuzhiyun PHY_LANE_IDLE_MASK,
170*4882a593Smuzhiyun PHY_LANE_IDLE_A_SHIFT + inst->index));
171*4882a593Smuzhiyun mutex_unlock(&rk_phy->pcie_mutex);
172*4882a593Smuzhiyun return err;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
rockchip_pcie_phy_power_on(struct phy * phy)175*4882a593Smuzhiyun static int rockchip_pcie_phy_power_on(struct phy *phy)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun struct phy_pcie_instance *inst = phy_get_drvdata(phy);
178*4882a593Smuzhiyun struct rockchip_pcie_phy *rk_phy = to_pcie_phy(inst);
179*4882a593Smuzhiyun int err = 0;
180*4882a593Smuzhiyun u32 status;
181*4882a593Smuzhiyun unsigned long timeout;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun mutex_lock(&rk_phy->pcie_mutex);
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun regmap_write(rk_phy->reg_base,
186*4882a593Smuzhiyun rk_phy->phy_data->pcie_laneoff,
187*4882a593Smuzhiyun HIWORD_UPDATE(!PHY_LANE_IDLE_OFF,
188*4882a593Smuzhiyun PHY_LANE_IDLE_MASK,
189*4882a593Smuzhiyun PHY_LANE_IDLE_A_SHIFT + inst->index));
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun if (rk_phy->pwr_cnt++)
192*4882a593Smuzhiyun goto err_out;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun err = reset_control_deassert(rk_phy->phy_rst);
195*4882a593Smuzhiyun if (err) {
196*4882a593Smuzhiyun dev_err(&phy->dev, "deassert phy_rst err %d\n", err);
197*4882a593Smuzhiyun goto err_pwr_cnt;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf,
201*4882a593Smuzhiyun HIWORD_UPDATE(PHY_CFG_PLL_LOCK,
202*4882a593Smuzhiyun PHY_CFG_ADDR_MASK,
203*4882a593Smuzhiyun PHY_CFG_ADDR_SHIFT));
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun /*
206*4882a593Smuzhiyun * No documented timeout value for phy operation below,
207*4882a593Smuzhiyun * so we make it large enough here. And we use loop-break
208*4882a593Smuzhiyun * method which should not be harmful.
209*4882a593Smuzhiyun */
210*4882a593Smuzhiyun timeout = jiffies + msecs_to_jiffies(1000);
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun err = -EINVAL;
213*4882a593Smuzhiyun while (time_before(jiffies, timeout)) {
214*4882a593Smuzhiyun regmap_read(rk_phy->reg_base,
215*4882a593Smuzhiyun rk_phy->phy_data->pcie_status,
216*4882a593Smuzhiyun &status);
217*4882a593Smuzhiyun if (status & PHY_PLL_LOCKED) {
218*4882a593Smuzhiyun dev_dbg(&phy->dev, "pll locked!\n");
219*4882a593Smuzhiyun err = 0;
220*4882a593Smuzhiyun break;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun msleep(20);
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun if (err) {
226*4882a593Smuzhiyun dev_err(&phy->dev, "pll lock timeout!\n");
227*4882a593Smuzhiyun goto err_pll_lock;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun phy_wr_cfg(rk_phy, PHY_CFG_CLK_TEST, PHY_CFG_SEPE_RATE);
231*4882a593Smuzhiyun phy_wr_cfg(rk_phy, PHY_CFG_CLK_SCC, PHY_CFG_PLL_100M);
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun err = -ETIMEDOUT;
234*4882a593Smuzhiyun while (time_before(jiffies, timeout)) {
235*4882a593Smuzhiyun regmap_read(rk_phy->reg_base,
236*4882a593Smuzhiyun rk_phy->phy_data->pcie_status,
237*4882a593Smuzhiyun &status);
238*4882a593Smuzhiyun if (!(status & PHY_PLL_OUTPUT)) {
239*4882a593Smuzhiyun dev_dbg(&phy->dev, "pll output enable done!\n");
240*4882a593Smuzhiyun err = 0;
241*4882a593Smuzhiyun break;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun msleep(20);
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun if (err) {
247*4882a593Smuzhiyun dev_err(&phy->dev, "pll output enable timeout!\n");
248*4882a593Smuzhiyun goto err_pll_lock;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf,
252*4882a593Smuzhiyun HIWORD_UPDATE(PHY_CFG_PLL_LOCK,
253*4882a593Smuzhiyun PHY_CFG_ADDR_MASK,
254*4882a593Smuzhiyun PHY_CFG_ADDR_SHIFT));
255*4882a593Smuzhiyun err = -EINVAL;
256*4882a593Smuzhiyun while (time_before(jiffies, timeout)) {
257*4882a593Smuzhiyun regmap_read(rk_phy->reg_base,
258*4882a593Smuzhiyun rk_phy->phy_data->pcie_status,
259*4882a593Smuzhiyun &status);
260*4882a593Smuzhiyun if (status & PHY_PLL_LOCKED) {
261*4882a593Smuzhiyun dev_dbg(&phy->dev, "pll relocked!\n");
262*4882a593Smuzhiyun err = 0;
263*4882a593Smuzhiyun break;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun msleep(20);
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun if (err) {
269*4882a593Smuzhiyun dev_err(&phy->dev, "pll relock timeout!\n");
270*4882a593Smuzhiyun goto err_pll_lock;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun err_out:
274*4882a593Smuzhiyun mutex_unlock(&rk_phy->pcie_mutex);
275*4882a593Smuzhiyun return 0;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun err_pll_lock:
278*4882a593Smuzhiyun reset_control_assert(rk_phy->phy_rst);
279*4882a593Smuzhiyun err_pwr_cnt:
280*4882a593Smuzhiyun rk_phy->pwr_cnt--;
281*4882a593Smuzhiyun mutex_unlock(&rk_phy->pcie_mutex);
282*4882a593Smuzhiyun return err;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
rockchip_pcie_phy_init(struct phy * phy)285*4882a593Smuzhiyun static int rockchip_pcie_phy_init(struct phy *phy)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun struct phy_pcie_instance *inst = phy_get_drvdata(phy);
288*4882a593Smuzhiyun struct rockchip_pcie_phy *rk_phy = to_pcie_phy(inst);
289*4882a593Smuzhiyun int err = 0;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun mutex_lock(&rk_phy->pcie_mutex);
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun if (rk_phy->init_cnt++)
294*4882a593Smuzhiyun goto err_out;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun err = clk_prepare_enable(rk_phy->clk_pciephy_ref);
297*4882a593Smuzhiyun if (err) {
298*4882a593Smuzhiyun dev_err(&phy->dev, "Fail to enable pcie ref clock.\n");
299*4882a593Smuzhiyun goto err_refclk;
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun err = reset_control_assert(rk_phy->phy_rst);
303*4882a593Smuzhiyun if (err) {
304*4882a593Smuzhiyun dev_err(&phy->dev, "assert phy_rst err %d\n", err);
305*4882a593Smuzhiyun goto err_reset;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun err_out:
309*4882a593Smuzhiyun mutex_unlock(&rk_phy->pcie_mutex);
310*4882a593Smuzhiyun return 0;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun err_reset:
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun clk_disable_unprepare(rk_phy->clk_pciephy_ref);
315*4882a593Smuzhiyun err_refclk:
316*4882a593Smuzhiyun rk_phy->init_cnt--;
317*4882a593Smuzhiyun mutex_unlock(&rk_phy->pcie_mutex);
318*4882a593Smuzhiyun return err;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
rockchip_pcie_phy_exit(struct phy * phy)321*4882a593Smuzhiyun static int rockchip_pcie_phy_exit(struct phy *phy)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun struct phy_pcie_instance *inst = phy_get_drvdata(phy);
324*4882a593Smuzhiyun struct rockchip_pcie_phy *rk_phy = to_pcie_phy(inst);
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun mutex_lock(&rk_phy->pcie_mutex);
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun if (--rk_phy->init_cnt)
329*4882a593Smuzhiyun goto err_init_cnt;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun clk_disable_unprepare(rk_phy->clk_pciephy_ref);
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun err_init_cnt:
334*4882a593Smuzhiyun mutex_unlock(&rk_phy->pcie_mutex);
335*4882a593Smuzhiyun return 0;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun static const struct phy_ops ops = {
339*4882a593Smuzhiyun .init = rockchip_pcie_phy_init,
340*4882a593Smuzhiyun .exit = rockchip_pcie_phy_exit,
341*4882a593Smuzhiyun .power_on = rockchip_pcie_phy_power_on,
342*4882a593Smuzhiyun .power_off = rockchip_pcie_phy_power_off,
343*4882a593Smuzhiyun .owner = THIS_MODULE,
344*4882a593Smuzhiyun };
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun static const struct rockchip_pcie_data rk3399_pcie_data = {
347*4882a593Smuzhiyun .pcie_conf = 0xe220,
348*4882a593Smuzhiyun .pcie_status = 0xe2a4,
349*4882a593Smuzhiyun .pcie_laneoff = 0xe214,
350*4882a593Smuzhiyun };
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun static const struct of_device_id rockchip_pcie_phy_dt_ids[] = {
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun .compatible = "rockchip,rk3399-pcie-phy",
355*4882a593Smuzhiyun .data = &rk3399_pcie_data,
356*4882a593Smuzhiyun },
357*4882a593Smuzhiyun {}
358*4882a593Smuzhiyun };
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, rockchip_pcie_phy_dt_ids);
361*4882a593Smuzhiyun
rockchip_pcie_phy_probe(struct platform_device * pdev)362*4882a593Smuzhiyun static int rockchip_pcie_phy_probe(struct platform_device *pdev)
363*4882a593Smuzhiyun {
364*4882a593Smuzhiyun struct device *dev = &pdev->dev;
365*4882a593Smuzhiyun struct rockchip_pcie_phy *rk_phy;
366*4882a593Smuzhiyun struct phy_provider *phy_provider;
367*4882a593Smuzhiyun struct regmap *grf;
368*4882a593Smuzhiyun const struct of_device_id *of_id;
369*4882a593Smuzhiyun int i;
370*4882a593Smuzhiyun u32 phy_num;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun grf = syscon_node_to_regmap(dev->parent->of_node);
373*4882a593Smuzhiyun if (IS_ERR(grf)) {
374*4882a593Smuzhiyun dev_err(dev, "Cannot find GRF syscon\n");
375*4882a593Smuzhiyun return PTR_ERR(grf);
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun rk_phy = devm_kzalloc(dev, sizeof(*rk_phy), GFP_KERNEL);
379*4882a593Smuzhiyun if (!rk_phy)
380*4882a593Smuzhiyun return -ENOMEM;
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun of_id = of_match_device(rockchip_pcie_phy_dt_ids, &pdev->dev);
383*4882a593Smuzhiyun if (!of_id)
384*4882a593Smuzhiyun return -EINVAL;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun rk_phy->phy_data = (struct rockchip_pcie_data *)of_id->data;
387*4882a593Smuzhiyun rk_phy->reg_base = grf;
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun mutex_init(&rk_phy->pcie_mutex);
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun rk_phy->phy_rst = devm_reset_control_get(dev, "phy");
392*4882a593Smuzhiyun if (IS_ERR(rk_phy->phy_rst)) {
393*4882a593Smuzhiyun if (PTR_ERR(rk_phy->phy_rst) != -EPROBE_DEFER)
394*4882a593Smuzhiyun dev_err(dev,
395*4882a593Smuzhiyun "missing phy property for reset controller\n");
396*4882a593Smuzhiyun return PTR_ERR(rk_phy->phy_rst);
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun rk_phy->clk_pciephy_ref = devm_clk_get(dev, "refclk");
400*4882a593Smuzhiyun if (IS_ERR(rk_phy->clk_pciephy_ref)) {
401*4882a593Smuzhiyun dev_err(dev, "refclk not found.\n");
402*4882a593Smuzhiyun return PTR_ERR(rk_phy->clk_pciephy_ref);
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun /* parse #phy-cells to see if it's legacy PHY model */
406*4882a593Smuzhiyun if (of_property_read_u32(dev->of_node, "#phy-cells", &phy_num))
407*4882a593Smuzhiyun return -ENOENT;
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun phy_num = (phy_num == 0) ? 1 : PHY_MAX_LANE_NUM;
410*4882a593Smuzhiyun dev_dbg(dev, "phy number is %d\n", phy_num);
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun for (i = 0; i < phy_num; i++) {
413*4882a593Smuzhiyun rk_phy->phys[i].phy = devm_phy_create(dev, dev->of_node, &ops);
414*4882a593Smuzhiyun if (IS_ERR(rk_phy->phys[i].phy)) {
415*4882a593Smuzhiyun dev_err(dev, "failed to create PHY%d\n", i);
416*4882a593Smuzhiyun return PTR_ERR(rk_phy->phys[i].phy);
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun rk_phy->phys[i].index = i;
419*4882a593Smuzhiyun phy_set_drvdata(rk_phy->phys[i].phy, &rk_phy->phys[i]);
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun platform_set_drvdata(pdev, rk_phy);
423*4882a593Smuzhiyun phy_provider = devm_of_phy_provider_register(dev,
424*4882a593Smuzhiyun rockchip_pcie_phy_of_xlate);
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun return PTR_ERR_OR_ZERO(phy_provider);
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun static struct platform_driver rockchip_pcie_driver = {
430*4882a593Smuzhiyun .probe = rockchip_pcie_phy_probe,
431*4882a593Smuzhiyun .driver = {
432*4882a593Smuzhiyun .name = "rockchip-pcie-phy",
433*4882a593Smuzhiyun .of_match_table = rockchip_pcie_phy_dt_ids,
434*4882a593Smuzhiyun },
435*4882a593Smuzhiyun };
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun module_platform_driver(rockchip_pcie_driver);
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun MODULE_AUTHOR("Shawn Lin <shawn.lin@rock-chips.com>");
440*4882a593Smuzhiyun MODULE_DESCRIPTION("Rockchip PCIe PHY driver");
441*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
442