xref: /OK3568_Linux_fs/kernel/drivers/phy/intel/phy-intel-lgm-combo.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Intel Combo-PHY driver
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2019-2020 Intel Corporation.
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/bitfield.h>
9*4882a593Smuzhiyun #include <linux/clk.h>
10*4882a593Smuzhiyun #include <linux/iopoll.h>
11*4882a593Smuzhiyun #include <linux/mfd/syscon.h>
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun #include <linux/mutex.h>
14*4882a593Smuzhiyun #include <linux/of.h>
15*4882a593Smuzhiyun #include <linux/phy/phy.h>
16*4882a593Smuzhiyun #include <linux/platform_device.h>
17*4882a593Smuzhiyun #include <linux/regmap.h>
18*4882a593Smuzhiyun #include <linux/reset.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #include <dt-bindings/phy/phy.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #define PCIE_PHY_GEN_CTRL	0x00
23*4882a593Smuzhiyun #define PCIE_PHY_CLK_PAD	BIT(17)
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #define PAD_DIS_CFG		0x174
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #define PCS_XF_ATE_OVRD_IN_2	0x3008
28*4882a593Smuzhiyun #define ADAPT_REQ_MSK		GENMASK(5, 4)
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #define PCS_XF_RX_ADAPT_ACK	0x3010
31*4882a593Smuzhiyun #define RX_ADAPT_ACK_BIT	BIT(0)
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #define CR_ADDR(addr, lane)	(((addr) + (lane) * 0x100) << 2)
34*4882a593Smuzhiyun #define REG_COMBO_MODE(x)	((x) * 0x200)
35*4882a593Smuzhiyun #define REG_CLK_DISABLE(x)	((x) * 0x200 + 0x124)
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #define COMBO_PHY_ID(x)		((x)->parent->id)
38*4882a593Smuzhiyun #define PHY_ID(x)		((x)->id)
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #define CLK_100MHZ		100000000
41*4882a593Smuzhiyun #define CLK_156_25MHZ		156250000
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun static const unsigned long intel_iphy_clk_rates[] = {
44*4882a593Smuzhiyun 	CLK_100MHZ, CLK_156_25MHZ, CLK_100MHZ,
45*4882a593Smuzhiyun };
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun enum {
48*4882a593Smuzhiyun 	PHY_0,
49*4882a593Smuzhiyun 	PHY_1,
50*4882a593Smuzhiyun 	PHY_MAX_NUM
51*4882a593Smuzhiyun };
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun /*
54*4882a593Smuzhiyun  * Clock Register bit fields to enable clocks
55*4882a593Smuzhiyun  * for ComboPhy according to the mode.
56*4882a593Smuzhiyun  */
57*4882a593Smuzhiyun enum intel_phy_mode {
58*4882a593Smuzhiyun 	PHY_PCIE_MODE = 0,
59*4882a593Smuzhiyun 	PHY_XPCS_MODE,
60*4882a593Smuzhiyun 	PHY_SATA_MODE,
61*4882a593Smuzhiyun };
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun /* ComboPhy mode Register values */
64*4882a593Smuzhiyun enum intel_combo_mode {
65*4882a593Smuzhiyun 	PCIE0_PCIE1_MODE = 0,
66*4882a593Smuzhiyun 	PCIE_DL_MODE,
67*4882a593Smuzhiyun 	RXAUI_MODE,
68*4882a593Smuzhiyun 	XPCS0_XPCS1_MODE,
69*4882a593Smuzhiyun 	SATA0_SATA1_MODE,
70*4882a593Smuzhiyun };
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun enum aggregated_mode {
73*4882a593Smuzhiyun 	PHY_SL_MODE,
74*4882a593Smuzhiyun 	PHY_DL_MODE,
75*4882a593Smuzhiyun };
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun struct intel_combo_phy;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun struct intel_cbphy_iphy {
80*4882a593Smuzhiyun 	struct phy		*phy;
81*4882a593Smuzhiyun 	struct intel_combo_phy	*parent;
82*4882a593Smuzhiyun 	struct reset_control	*app_rst;
83*4882a593Smuzhiyun 	u32			id;
84*4882a593Smuzhiyun };
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun struct intel_combo_phy {
87*4882a593Smuzhiyun 	struct device		*dev;
88*4882a593Smuzhiyun 	struct clk		*core_clk;
89*4882a593Smuzhiyun 	unsigned long		clk_rate;
90*4882a593Smuzhiyun 	void __iomem		*app_base;
91*4882a593Smuzhiyun 	void __iomem		*cr_base;
92*4882a593Smuzhiyun 	struct regmap		*syscfg;
93*4882a593Smuzhiyun 	struct regmap		*hsiocfg;
94*4882a593Smuzhiyun 	u32			id;
95*4882a593Smuzhiyun 	u32			bid;
96*4882a593Smuzhiyun 	struct reset_control	*phy_rst;
97*4882a593Smuzhiyun 	struct reset_control	*core_rst;
98*4882a593Smuzhiyun 	struct intel_cbphy_iphy	iphy[PHY_MAX_NUM];
99*4882a593Smuzhiyun 	enum intel_phy_mode	phy_mode;
100*4882a593Smuzhiyun 	enum aggregated_mode	aggr_mode;
101*4882a593Smuzhiyun 	u32			init_cnt;
102*4882a593Smuzhiyun 	struct mutex		lock;
103*4882a593Smuzhiyun };
104*4882a593Smuzhiyun 
intel_cbphy_iphy_enable(struct intel_cbphy_iphy * iphy,bool set)105*4882a593Smuzhiyun static int intel_cbphy_iphy_enable(struct intel_cbphy_iphy *iphy, bool set)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	struct intel_combo_phy *cbphy = iphy->parent;
108*4882a593Smuzhiyun 	u32 mask = BIT(cbphy->phy_mode * 2 + iphy->id);
109*4882a593Smuzhiyun 	u32 val;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	/* Register: 0 is enable, 1 is disable */
112*4882a593Smuzhiyun 	val = set ? 0 : mask;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	return regmap_update_bits(cbphy->hsiocfg, REG_CLK_DISABLE(cbphy->bid),
115*4882a593Smuzhiyun 				  mask, val);
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun 
intel_cbphy_pcie_refclk_cfg(struct intel_cbphy_iphy * iphy,bool set)118*4882a593Smuzhiyun static int intel_cbphy_pcie_refclk_cfg(struct intel_cbphy_iphy *iphy, bool set)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun 	struct intel_combo_phy *cbphy = iphy->parent;
121*4882a593Smuzhiyun 	u32 mask = BIT(cbphy->id * 2 + iphy->id);
122*4882a593Smuzhiyun 	u32 val;
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	/* Register: 0 is enable, 1 is disable */
125*4882a593Smuzhiyun 	val = set ? 0 : mask;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	return regmap_update_bits(cbphy->syscfg, PAD_DIS_CFG, mask, val);
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun 
combo_phy_w32_off_mask(void __iomem * base,unsigned int reg,u32 mask,u32 val)130*4882a593Smuzhiyun static inline void combo_phy_w32_off_mask(void __iomem *base, unsigned int reg,
131*4882a593Smuzhiyun 					  u32 mask, u32 val)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun 	u32 reg_val;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	reg_val = readl(base + reg);
136*4882a593Smuzhiyun 	reg_val &= ~mask;
137*4882a593Smuzhiyun 	reg_val |= val;
138*4882a593Smuzhiyun 	writel(reg_val, base + reg);
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun 
intel_cbphy_iphy_cfg(struct intel_cbphy_iphy * iphy,int (* phy_cfg)(struct intel_cbphy_iphy *))141*4882a593Smuzhiyun static int intel_cbphy_iphy_cfg(struct intel_cbphy_iphy *iphy,
142*4882a593Smuzhiyun 				int (*phy_cfg)(struct intel_cbphy_iphy *))
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun 	struct intel_combo_phy *cbphy = iphy->parent;
145*4882a593Smuzhiyun 	int ret;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	ret = phy_cfg(iphy);
148*4882a593Smuzhiyun 	if (ret)
149*4882a593Smuzhiyun 		return ret;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	if (cbphy->aggr_mode != PHY_DL_MODE)
152*4882a593Smuzhiyun 		return 0;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	return phy_cfg(&cbphy->iphy[PHY_1]);
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun 
intel_cbphy_pcie_en_pad_refclk(struct intel_cbphy_iphy * iphy)157*4882a593Smuzhiyun static int intel_cbphy_pcie_en_pad_refclk(struct intel_cbphy_iphy *iphy)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun 	struct intel_combo_phy *cbphy = iphy->parent;
160*4882a593Smuzhiyun 	int ret;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	ret = intel_cbphy_pcie_refclk_cfg(iphy, true);
163*4882a593Smuzhiyun 	if (ret) {
164*4882a593Smuzhiyun 		dev_err(cbphy->dev, "Failed to enable PCIe pad refclk\n");
165*4882a593Smuzhiyun 		return ret;
166*4882a593Smuzhiyun 	}
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	if (cbphy->init_cnt)
169*4882a593Smuzhiyun 		return 0;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	combo_phy_w32_off_mask(cbphy->app_base, PCIE_PHY_GEN_CTRL,
172*4882a593Smuzhiyun 			       PCIE_PHY_CLK_PAD, FIELD_PREP(PCIE_PHY_CLK_PAD, 0));
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	/* Delay for stable clock PLL */
175*4882a593Smuzhiyun 	usleep_range(50, 100);
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	return 0;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun 
intel_cbphy_pcie_dis_pad_refclk(struct intel_cbphy_iphy * iphy)180*4882a593Smuzhiyun static int intel_cbphy_pcie_dis_pad_refclk(struct intel_cbphy_iphy *iphy)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun 	struct intel_combo_phy *cbphy = iphy->parent;
183*4882a593Smuzhiyun 	int ret;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	ret = intel_cbphy_pcie_refclk_cfg(iphy, false);
186*4882a593Smuzhiyun 	if (ret) {
187*4882a593Smuzhiyun 		dev_err(cbphy->dev, "Failed to disable PCIe pad refclk\n");
188*4882a593Smuzhiyun 		return ret;
189*4882a593Smuzhiyun 	}
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	if (cbphy->init_cnt)
192*4882a593Smuzhiyun 		return 0;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	combo_phy_w32_off_mask(cbphy->app_base, PCIE_PHY_GEN_CTRL,
195*4882a593Smuzhiyun 			       PCIE_PHY_CLK_PAD, FIELD_PREP(PCIE_PHY_CLK_PAD, 1));
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	return 0;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun 
intel_cbphy_set_mode(struct intel_combo_phy * cbphy)200*4882a593Smuzhiyun static int intel_cbphy_set_mode(struct intel_combo_phy *cbphy)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun 	enum intel_combo_mode cb_mode;
203*4882a593Smuzhiyun 	enum aggregated_mode aggr = cbphy->aggr_mode;
204*4882a593Smuzhiyun 	struct device *dev = cbphy->dev;
205*4882a593Smuzhiyun 	enum intel_phy_mode mode;
206*4882a593Smuzhiyun 	int ret;
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	mode = cbphy->phy_mode;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	switch (mode) {
211*4882a593Smuzhiyun 	case PHY_PCIE_MODE:
212*4882a593Smuzhiyun 		cb_mode = (aggr == PHY_DL_MODE) ? PCIE_DL_MODE : PCIE0_PCIE1_MODE;
213*4882a593Smuzhiyun 		break;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	case PHY_XPCS_MODE:
216*4882a593Smuzhiyun 		cb_mode = (aggr == PHY_DL_MODE) ? RXAUI_MODE : XPCS0_XPCS1_MODE;
217*4882a593Smuzhiyun 		break;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	case PHY_SATA_MODE:
220*4882a593Smuzhiyun 		if (aggr == PHY_DL_MODE) {
221*4882a593Smuzhiyun 			dev_err(dev, "Mode:%u not support dual lane!\n", mode);
222*4882a593Smuzhiyun 			return -EINVAL;
223*4882a593Smuzhiyun 		}
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 		cb_mode = SATA0_SATA1_MODE;
226*4882a593Smuzhiyun 		break;
227*4882a593Smuzhiyun 	default:
228*4882a593Smuzhiyun 		return -EINVAL;
229*4882a593Smuzhiyun 	}
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	ret = regmap_write(cbphy->hsiocfg, REG_COMBO_MODE(cbphy->bid), cb_mode);
232*4882a593Smuzhiyun 	if (ret)
233*4882a593Smuzhiyun 		dev_err(dev, "Failed to set ComboPhy mode: %d\n", ret);
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	return ret;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun 
intel_cbphy_rst_assert(struct intel_combo_phy * cbphy)238*4882a593Smuzhiyun static void intel_cbphy_rst_assert(struct intel_combo_phy *cbphy)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun 	reset_control_assert(cbphy->core_rst);
241*4882a593Smuzhiyun 	reset_control_assert(cbphy->phy_rst);
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
intel_cbphy_rst_deassert(struct intel_combo_phy * cbphy)244*4882a593Smuzhiyun static void intel_cbphy_rst_deassert(struct intel_combo_phy *cbphy)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun 	reset_control_deassert(cbphy->core_rst);
247*4882a593Smuzhiyun 	reset_control_deassert(cbphy->phy_rst);
248*4882a593Smuzhiyun 	/* Delay to ensure reset process is done */
249*4882a593Smuzhiyun 	usleep_range(10, 20);
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun 
intel_cbphy_iphy_power_on(struct intel_cbphy_iphy * iphy)252*4882a593Smuzhiyun static int intel_cbphy_iphy_power_on(struct intel_cbphy_iphy *iphy)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun 	struct intel_combo_phy *cbphy = iphy->parent;
255*4882a593Smuzhiyun 	int ret;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	if (!cbphy->init_cnt) {
258*4882a593Smuzhiyun 		ret = clk_prepare_enable(cbphy->core_clk);
259*4882a593Smuzhiyun 		if (ret) {
260*4882a593Smuzhiyun 			dev_err(cbphy->dev, "Clock enable failed!\n");
261*4882a593Smuzhiyun 			return ret;
262*4882a593Smuzhiyun 		}
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 		ret = clk_set_rate(cbphy->core_clk, cbphy->clk_rate);
265*4882a593Smuzhiyun 		if (ret) {
266*4882a593Smuzhiyun 			dev_err(cbphy->dev, "Clock freq set to %lu failed!\n",
267*4882a593Smuzhiyun 				cbphy->clk_rate);
268*4882a593Smuzhiyun 			goto clk_err;
269*4882a593Smuzhiyun 		}
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 		intel_cbphy_rst_assert(cbphy);
272*4882a593Smuzhiyun 		intel_cbphy_rst_deassert(cbphy);
273*4882a593Smuzhiyun 		ret = intel_cbphy_set_mode(cbphy);
274*4882a593Smuzhiyun 		if (ret)
275*4882a593Smuzhiyun 			goto clk_err;
276*4882a593Smuzhiyun 	}
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	ret = intel_cbphy_iphy_enable(iphy, true);
279*4882a593Smuzhiyun 	if (ret) {
280*4882a593Smuzhiyun 		dev_err(cbphy->dev, "Failed enabling PHY core\n");
281*4882a593Smuzhiyun 		goto clk_err;
282*4882a593Smuzhiyun 	}
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	ret = reset_control_deassert(iphy->app_rst);
285*4882a593Smuzhiyun 	if (ret) {
286*4882a593Smuzhiyun 		dev_err(cbphy->dev, "PHY(%u:%u) reset deassert failed!\n",
287*4882a593Smuzhiyun 			COMBO_PHY_ID(iphy), PHY_ID(iphy));
288*4882a593Smuzhiyun 		goto clk_err;
289*4882a593Smuzhiyun 	}
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	/* Delay to ensure reset process is done */
292*4882a593Smuzhiyun 	udelay(1);
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	return 0;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun clk_err:
297*4882a593Smuzhiyun 	clk_disable_unprepare(cbphy->core_clk);
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	return ret;
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun 
intel_cbphy_iphy_power_off(struct intel_cbphy_iphy * iphy)302*4882a593Smuzhiyun static int intel_cbphy_iphy_power_off(struct intel_cbphy_iphy *iphy)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun 	struct intel_combo_phy *cbphy = iphy->parent;
305*4882a593Smuzhiyun 	int ret;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	ret = reset_control_assert(iphy->app_rst);
308*4882a593Smuzhiyun 	if (ret) {
309*4882a593Smuzhiyun 		dev_err(cbphy->dev, "PHY(%u:%u) reset assert failed!\n",
310*4882a593Smuzhiyun 			COMBO_PHY_ID(iphy), PHY_ID(iphy));
311*4882a593Smuzhiyun 		return ret;
312*4882a593Smuzhiyun 	}
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	ret = intel_cbphy_iphy_enable(iphy, false);
315*4882a593Smuzhiyun 	if (ret) {
316*4882a593Smuzhiyun 		dev_err(cbphy->dev, "Failed disabling PHY core\n");
317*4882a593Smuzhiyun 		return ret;
318*4882a593Smuzhiyun 	}
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	if (cbphy->init_cnt)
321*4882a593Smuzhiyun 		return 0;
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	clk_disable_unprepare(cbphy->core_clk);
324*4882a593Smuzhiyun 	intel_cbphy_rst_assert(cbphy);
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	return 0;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun 
intel_cbphy_init(struct phy * phy)329*4882a593Smuzhiyun static int intel_cbphy_init(struct phy *phy)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun 	struct intel_cbphy_iphy *iphy = phy_get_drvdata(phy);
332*4882a593Smuzhiyun 	struct intel_combo_phy *cbphy = iphy->parent;
333*4882a593Smuzhiyun 	int ret;
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	mutex_lock(&cbphy->lock);
336*4882a593Smuzhiyun 	ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_iphy_power_on);
337*4882a593Smuzhiyun 	if (ret)
338*4882a593Smuzhiyun 		goto err;
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	if (cbphy->phy_mode == PHY_PCIE_MODE) {
341*4882a593Smuzhiyun 		ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_pcie_en_pad_refclk);
342*4882a593Smuzhiyun 		if (ret)
343*4882a593Smuzhiyun 			goto err;
344*4882a593Smuzhiyun 	}
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	cbphy->init_cnt++;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun err:
349*4882a593Smuzhiyun 	mutex_unlock(&cbphy->lock);
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	return ret;
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun 
intel_cbphy_exit(struct phy * phy)354*4882a593Smuzhiyun static int intel_cbphy_exit(struct phy *phy)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun 	struct intel_cbphy_iphy *iphy = phy_get_drvdata(phy);
357*4882a593Smuzhiyun 	struct intel_combo_phy *cbphy = iphy->parent;
358*4882a593Smuzhiyun 	int ret;
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	mutex_lock(&cbphy->lock);
361*4882a593Smuzhiyun 	cbphy->init_cnt--;
362*4882a593Smuzhiyun 	if (cbphy->phy_mode == PHY_PCIE_MODE) {
363*4882a593Smuzhiyun 		ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_pcie_dis_pad_refclk);
364*4882a593Smuzhiyun 		if (ret)
365*4882a593Smuzhiyun 			goto err;
366*4882a593Smuzhiyun 	}
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_iphy_power_off);
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun err:
371*4882a593Smuzhiyun 	mutex_unlock(&cbphy->lock);
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	return ret;
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun 
intel_cbphy_calibrate(struct phy * phy)376*4882a593Smuzhiyun static int intel_cbphy_calibrate(struct phy *phy)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun 	struct intel_cbphy_iphy *iphy = phy_get_drvdata(phy);
379*4882a593Smuzhiyun 	struct intel_combo_phy *cbphy = iphy->parent;
380*4882a593Smuzhiyun 	void __iomem *cr_base = cbphy->cr_base;
381*4882a593Smuzhiyun 	int val, ret, id;
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	if (cbphy->phy_mode != PHY_XPCS_MODE)
384*4882a593Smuzhiyun 		return 0;
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	id = PHY_ID(iphy);
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	/* trigger auto RX adaptation */
389*4882a593Smuzhiyun 	combo_phy_w32_off_mask(cr_base, CR_ADDR(PCS_XF_ATE_OVRD_IN_2, id),
390*4882a593Smuzhiyun 			       ADAPT_REQ_MSK, FIELD_PREP(ADAPT_REQ_MSK, 3));
391*4882a593Smuzhiyun 	/* Wait RX adaptation to finish */
392*4882a593Smuzhiyun 	ret = readl_poll_timeout(cr_base + CR_ADDR(PCS_XF_RX_ADAPT_ACK, id),
393*4882a593Smuzhiyun 				 val, val & RX_ADAPT_ACK_BIT, 10, 5000);
394*4882a593Smuzhiyun 	if (ret)
395*4882a593Smuzhiyun 		dev_err(cbphy->dev, "RX Adaptation failed!\n");
396*4882a593Smuzhiyun 	else
397*4882a593Smuzhiyun 		dev_dbg(cbphy->dev, "RX Adaptation success!\n");
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	/* Stop RX adaptation */
400*4882a593Smuzhiyun 	combo_phy_w32_off_mask(cr_base, CR_ADDR(PCS_XF_ATE_OVRD_IN_2, id),
401*4882a593Smuzhiyun 			       ADAPT_REQ_MSK, FIELD_PREP(ADAPT_REQ_MSK, 0));
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	return ret;
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun 
intel_cbphy_fwnode_parse(struct intel_combo_phy * cbphy)406*4882a593Smuzhiyun static int intel_cbphy_fwnode_parse(struct intel_combo_phy *cbphy)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun 	struct device *dev = cbphy->dev;
409*4882a593Smuzhiyun 	struct platform_device *pdev = to_platform_device(dev);
410*4882a593Smuzhiyun 	struct fwnode_handle *fwnode = dev_fwnode(dev);
411*4882a593Smuzhiyun 	struct fwnode_reference_args ref;
412*4882a593Smuzhiyun 	int ret;
413*4882a593Smuzhiyun 	u32 val;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	cbphy->core_clk = devm_clk_get(dev, NULL);
416*4882a593Smuzhiyun 	if (IS_ERR(cbphy->core_clk)) {
417*4882a593Smuzhiyun 		ret = PTR_ERR(cbphy->core_clk);
418*4882a593Smuzhiyun 		if (ret != -EPROBE_DEFER)
419*4882a593Smuzhiyun 			dev_err(dev, "Get clk failed:%d!\n", ret);
420*4882a593Smuzhiyun 		return ret;
421*4882a593Smuzhiyun 	}
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	cbphy->core_rst = devm_reset_control_get_optional(dev, "core");
424*4882a593Smuzhiyun 	if (IS_ERR(cbphy->core_rst)) {
425*4882a593Smuzhiyun 		ret = PTR_ERR(cbphy->core_rst);
426*4882a593Smuzhiyun 		if (ret != -EPROBE_DEFER)
427*4882a593Smuzhiyun 			dev_err(dev, "Get core reset control err: %d!\n", ret);
428*4882a593Smuzhiyun 		return ret;
429*4882a593Smuzhiyun 	}
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	cbphy->phy_rst = devm_reset_control_get_optional(dev, "phy");
432*4882a593Smuzhiyun 	if (IS_ERR(cbphy->phy_rst)) {
433*4882a593Smuzhiyun 		ret = PTR_ERR(cbphy->phy_rst);
434*4882a593Smuzhiyun 		if (ret != -EPROBE_DEFER)
435*4882a593Smuzhiyun 			dev_err(dev, "Get PHY reset control err: %d!\n", ret);
436*4882a593Smuzhiyun 		return ret;
437*4882a593Smuzhiyun 	}
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	cbphy->iphy[0].app_rst = devm_reset_control_get_optional(dev, "iphy0");
440*4882a593Smuzhiyun 	if (IS_ERR(cbphy->iphy[0].app_rst)) {
441*4882a593Smuzhiyun 		ret = PTR_ERR(cbphy->iphy[0].app_rst);
442*4882a593Smuzhiyun 		if (ret != -EPROBE_DEFER)
443*4882a593Smuzhiyun 			dev_err(dev, "Get phy0 reset control err: %d!\n", ret);
444*4882a593Smuzhiyun 		return ret;
445*4882a593Smuzhiyun 	}
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	cbphy->iphy[1].app_rst = devm_reset_control_get_optional(dev, "iphy1");
448*4882a593Smuzhiyun 	if (IS_ERR(cbphy->iphy[1].app_rst)) {
449*4882a593Smuzhiyun 		ret = PTR_ERR(cbphy->iphy[1].app_rst);
450*4882a593Smuzhiyun 		if (ret != -EPROBE_DEFER)
451*4882a593Smuzhiyun 			dev_err(dev, "Get phy1 reset control err: %d!\n", ret);
452*4882a593Smuzhiyun 		return ret;
453*4882a593Smuzhiyun 	}
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	cbphy->app_base = devm_platform_ioremap_resource_byname(pdev, "app");
456*4882a593Smuzhiyun 	if (IS_ERR(cbphy->app_base))
457*4882a593Smuzhiyun 		return PTR_ERR(cbphy->app_base);
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	cbphy->cr_base = devm_platform_ioremap_resource_byname(pdev, "core");
460*4882a593Smuzhiyun 	if (IS_ERR(cbphy->cr_base))
461*4882a593Smuzhiyun 		return PTR_ERR(cbphy->cr_base);
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	/*
464*4882a593Smuzhiyun 	 * syscfg and hsiocfg variables stores the handle of the registers set
465*4882a593Smuzhiyun 	 * in which ComboPhy subsytem specific registers are subset. Using
466*4882a593Smuzhiyun 	 * Register map framework to access the registers set.
467*4882a593Smuzhiyun 	 */
468*4882a593Smuzhiyun 	ret = fwnode_property_get_reference_args(fwnode, "intel,syscfg", NULL,
469*4882a593Smuzhiyun 						 1, 0, &ref);
470*4882a593Smuzhiyun 	if (ret < 0)
471*4882a593Smuzhiyun 		return ret;
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	cbphy->id = ref.args[0];
474*4882a593Smuzhiyun 	cbphy->syscfg = device_node_to_regmap(to_of_node(ref.fwnode));
475*4882a593Smuzhiyun 	fwnode_handle_put(ref.fwnode);
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	ret = fwnode_property_get_reference_args(fwnode, "intel,hsio", NULL, 1,
478*4882a593Smuzhiyun 						 0, &ref);
479*4882a593Smuzhiyun 	if (ret < 0)
480*4882a593Smuzhiyun 		return ret;
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	cbphy->bid = ref.args[0];
483*4882a593Smuzhiyun 	cbphy->hsiocfg = device_node_to_regmap(to_of_node(ref.fwnode));
484*4882a593Smuzhiyun 	fwnode_handle_put(ref.fwnode);
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	ret = fwnode_property_read_u32_array(fwnode, "intel,phy-mode", &val, 1);
487*4882a593Smuzhiyun 	if (ret)
488*4882a593Smuzhiyun 		return ret;
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	switch (val) {
491*4882a593Smuzhiyun 	case PHY_TYPE_PCIE:
492*4882a593Smuzhiyun 		cbphy->phy_mode = PHY_PCIE_MODE;
493*4882a593Smuzhiyun 		break;
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	case PHY_TYPE_SATA:
496*4882a593Smuzhiyun 		cbphy->phy_mode = PHY_SATA_MODE;
497*4882a593Smuzhiyun 		break;
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	case PHY_TYPE_XPCS:
500*4882a593Smuzhiyun 		cbphy->phy_mode = PHY_XPCS_MODE;
501*4882a593Smuzhiyun 		break;
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	default:
504*4882a593Smuzhiyun 		dev_err(dev, "Invalid PHY mode: %u\n", val);
505*4882a593Smuzhiyun 		return -EINVAL;
506*4882a593Smuzhiyun 	}
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 	cbphy->clk_rate = intel_iphy_clk_rates[cbphy->phy_mode];
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	if (fwnode_property_present(fwnode, "intel,aggregation"))
511*4882a593Smuzhiyun 		cbphy->aggr_mode = PHY_DL_MODE;
512*4882a593Smuzhiyun 	else
513*4882a593Smuzhiyun 		cbphy->aggr_mode = PHY_SL_MODE;
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	return 0;
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun static const struct phy_ops intel_cbphy_ops = {
519*4882a593Smuzhiyun 	.init		= intel_cbphy_init,
520*4882a593Smuzhiyun 	.exit		= intel_cbphy_exit,
521*4882a593Smuzhiyun 	.calibrate	= intel_cbphy_calibrate,
522*4882a593Smuzhiyun 	.owner		= THIS_MODULE,
523*4882a593Smuzhiyun };
524*4882a593Smuzhiyun 
intel_cbphy_xlate(struct device * dev,struct of_phandle_args * args)525*4882a593Smuzhiyun static struct phy *intel_cbphy_xlate(struct device *dev,
526*4882a593Smuzhiyun 				     struct of_phandle_args *args)
527*4882a593Smuzhiyun {
528*4882a593Smuzhiyun 	struct intel_combo_phy *cbphy = dev_get_drvdata(dev);
529*4882a593Smuzhiyun 	u32 iphy_id;
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	if (args->args_count < 1) {
532*4882a593Smuzhiyun 		dev_err(dev, "Invalid number of arguments\n");
533*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
534*4882a593Smuzhiyun 	}
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	iphy_id = args->args[0];
537*4882a593Smuzhiyun 	if (iphy_id >= PHY_MAX_NUM) {
538*4882a593Smuzhiyun 		dev_err(dev, "Invalid phy instance %d\n", iphy_id);
539*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
540*4882a593Smuzhiyun 	}
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	if (cbphy->aggr_mode == PHY_DL_MODE && iphy_id == PHY_1) {
543*4882a593Smuzhiyun 		dev_err(dev, "Invalid. ComboPhy is in Dual lane mode %d\n", iphy_id);
544*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
545*4882a593Smuzhiyun 	}
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	return cbphy->iphy[iphy_id].phy;
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun 
intel_cbphy_create(struct intel_combo_phy * cbphy)550*4882a593Smuzhiyun static int intel_cbphy_create(struct intel_combo_phy *cbphy)
551*4882a593Smuzhiyun {
552*4882a593Smuzhiyun 	struct phy_provider *phy_provider;
553*4882a593Smuzhiyun 	struct device *dev = cbphy->dev;
554*4882a593Smuzhiyun 	struct intel_cbphy_iphy *iphy;
555*4882a593Smuzhiyun 	int i;
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	for (i = 0; i < PHY_MAX_NUM; i++) {
558*4882a593Smuzhiyun 		iphy = &cbphy->iphy[i];
559*4882a593Smuzhiyun 		iphy->parent = cbphy;
560*4882a593Smuzhiyun 		iphy->id = i;
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 		/* In dual lane mode skip phy creation for the second phy */
563*4882a593Smuzhiyun 		if (cbphy->aggr_mode == PHY_DL_MODE && iphy->id == PHY_1)
564*4882a593Smuzhiyun 			continue;
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 		iphy->phy = devm_phy_create(dev, NULL, &intel_cbphy_ops);
567*4882a593Smuzhiyun 		if (IS_ERR(iphy->phy)) {
568*4882a593Smuzhiyun 			dev_err(dev, "PHY[%u:%u]: create PHY instance failed!\n",
569*4882a593Smuzhiyun 				COMBO_PHY_ID(iphy), PHY_ID(iphy));
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 			return PTR_ERR(iphy->phy);
572*4882a593Smuzhiyun 		}
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 		phy_set_drvdata(iphy->phy, iphy);
575*4882a593Smuzhiyun 	}
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	dev_set_drvdata(dev, cbphy);
578*4882a593Smuzhiyun 	phy_provider = devm_of_phy_provider_register(dev, intel_cbphy_xlate);
579*4882a593Smuzhiyun 	if (IS_ERR(phy_provider))
580*4882a593Smuzhiyun 		dev_err(dev, "Register PHY provider failed!\n");
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	return PTR_ERR_OR_ZERO(phy_provider);
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun 
intel_cbphy_probe(struct platform_device * pdev)585*4882a593Smuzhiyun static int intel_cbphy_probe(struct platform_device *pdev)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
588*4882a593Smuzhiyun 	struct intel_combo_phy *cbphy;
589*4882a593Smuzhiyun 	int ret;
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	cbphy = devm_kzalloc(dev, sizeof(*cbphy), GFP_KERNEL);
592*4882a593Smuzhiyun 	if (!cbphy)
593*4882a593Smuzhiyun 		return -ENOMEM;
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	cbphy->dev = dev;
596*4882a593Smuzhiyun 	cbphy->init_cnt = 0;
597*4882a593Smuzhiyun 	mutex_init(&cbphy->lock);
598*4882a593Smuzhiyun 	ret = intel_cbphy_fwnode_parse(cbphy);
599*4882a593Smuzhiyun 	if (ret)
600*4882a593Smuzhiyun 		return ret;
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 	platform_set_drvdata(pdev, cbphy);
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 	return intel_cbphy_create(cbphy);
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun 
intel_cbphy_remove(struct platform_device * pdev)607*4882a593Smuzhiyun static int intel_cbphy_remove(struct platform_device *pdev)
608*4882a593Smuzhiyun {
609*4882a593Smuzhiyun 	struct intel_combo_phy *cbphy = platform_get_drvdata(pdev);
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	intel_cbphy_rst_assert(cbphy);
612*4882a593Smuzhiyun 	clk_disable_unprepare(cbphy->core_clk);
613*4882a593Smuzhiyun 	return 0;
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun static const struct of_device_id of_intel_cbphy_match[] = {
617*4882a593Smuzhiyun 	{ .compatible = "intel,combo-phy" },
618*4882a593Smuzhiyun 	{ .compatible = "intel,combophy-lgm" },
619*4882a593Smuzhiyun 	{}
620*4882a593Smuzhiyun };
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun static struct platform_driver intel_cbphy_driver = {
623*4882a593Smuzhiyun 	.probe = intel_cbphy_probe,
624*4882a593Smuzhiyun 	.remove = intel_cbphy_remove,
625*4882a593Smuzhiyun 	.driver = {
626*4882a593Smuzhiyun 		.name = "intel-combo-phy",
627*4882a593Smuzhiyun 		.of_match_table = of_intel_cbphy_match,
628*4882a593Smuzhiyun 	}
629*4882a593Smuzhiyun };
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun module_platform_driver(intel_cbphy_driver);
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun MODULE_DESCRIPTION("Intel Combo-phy driver");
634*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
635