1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * UFS Host Controller driver for Exynos specific extensions
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2014-2015 Samsung Electronics Co., Ltd.
6*4882a593Smuzhiyun * Author: Seungwon Jeon <essuuj@gmail.com>
7*4882a593Smuzhiyun * Author: Alim Akhtar <alim.akhtar@samsung.com>
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/clk.h>
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun #include <linux/of.h>
14*4882a593Smuzhiyun #include <linux/of_address.h>
15*4882a593Smuzhiyun #include <linux/phy/phy.h>
16*4882a593Smuzhiyun #include <linux/platform_device.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include "ufshcd.h"
19*4882a593Smuzhiyun #include "ufshcd-pltfrm.h"
20*4882a593Smuzhiyun #include "ufshci.h"
21*4882a593Smuzhiyun #include "unipro.h"
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #include "ufs-exynos.h"
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun * Exynos's Vendor specific registers for UFSHCI
27*4882a593Smuzhiyun */
28*4882a593Smuzhiyun #define HCI_TXPRDT_ENTRY_SIZE 0x00
29*4882a593Smuzhiyun #define PRDT_PREFECT_EN BIT(31)
30*4882a593Smuzhiyun #define PRDT_SET_SIZE(x) ((x) & 0x1F)
31*4882a593Smuzhiyun #define HCI_RXPRDT_ENTRY_SIZE 0x04
32*4882a593Smuzhiyun #define HCI_1US_TO_CNT_VAL 0x0C
33*4882a593Smuzhiyun #define CNT_VAL_1US_MASK 0x3FF
34*4882a593Smuzhiyun #define HCI_UTRL_NEXUS_TYPE 0x40
35*4882a593Smuzhiyun #define HCI_UTMRL_NEXUS_TYPE 0x44
36*4882a593Smuzhiyun #define HCI_SW_RST 0x50
37*4882a593Smuzhiyun #define UFS_LINK_SW_RST BIT(0)
38*4882a593Smuzhiyun #define UFS_UNIPRO_SW_RST BIT(1)
39*4882a593Smuzhiyun #define UFS_SW_RST_MASK (UFS_UNIPRO_SW_RST | UFS_LINK_SW_RST)
40*4882a593Smuzhiyun #define HCI_DATA_REORDER 0x60
41*4882a593Smuzhiyun #define HCI_UNIPRO_APB_CLK_CTRL 0x68
42*4882a593Smuzhiyun #define UNIPRO_APB_CLK(v, x) (((v) & ~0xF) | ((x) & 0xF))
43*4882a593Smuzhiyun #define HCI_AXIDMA_RWDATA_BURST_LEN 0x6C
44*4882a593Smuzhiyun #define HCI_GPIO_OUT 0x70
45*4882a593Smuzhiyun #define HCI_ERR_EN_PA_LAYER 0x78
46*4882a593Smuzhiyun #define HCI_ERR_EN_DL_LAYER 0x7C
47*4882a593Smuzhiyun #define HCI_ERR_EN_N_LAYER 0x80
48*4882a593Smuzhiyun #define HCI_ERR_EN_T_LAYER 0x84
49*4882a593Smuzhiyun #define HCI_ERR_EN_DME_LAYER 0x88
50*4882a593Smuzhiyun #define HCI_CLKSTOP_CTRL 0xB0
51*4882a593Smuzhiyun #define REFCLK_STOP BIT(2)
52*4882a593Smuzhiyun #define UNIPRO_MCLK_STOP BIT(1)
53*4882a593Smuzhiyun #define UNIPRO_PCLK_STOP BIT(0)
54*4882a593Smuzhiyun #define CLK_STOP_MASK (REFCLK_STOP |\
55*4882a593Smuzhiyun UNIPRO_MCLK_STOP |\
56*4882a593Smuzhiyun UNIPRO_PCLK_STOP)
57*4882a593Smuzhiyun #define HCI_MISC 0xB4
58*4882a593Smuzhiyun #define REFCLK_CTRL_EN BIT(7)
59*4882a593Smuzhiyun #define UNIPRO_PCLK_CTRL_EN BIT(6)
60*4882a593Smuzhiyun #define UNIPRO_MCLK_CTRL_EN BIT(5)
61*4882a593Smuzhiyun #define HCI_CORECLK_CTRL_EN BIT(4)
62*4882a593Smuzhiyun #define CLK_CTRL_EN_MASK (REFCLK_CTRL_EN |\
63*4882a593Smuzhiyun UNIPRO_PCLK_CTRL_EN |\
64*4882a593Smuzhiyun UNIPRO_MCLK_CTRL_EN)
65*4882a593Smuzhiyun /* Device fatal error */
66*4882a593Smuzhiyun #define DFES_ERR_EN BIT(31)
67*4882a593Smuzhiyun #define DFES_DEF_L2_ERRS (UIC_DATA_LINK_LAYER_ERROR_RX_BUF_OF |\
68*4882a593Smuzhiyun UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
69*4882a593Smuzhiyun #define DFES_DEF_L3_ERRS (UIC_NETWORK_UNSUPPORTED_HEADER_TYPE |\
70*4882a593Smuzhiyun UIC_NETWORK_BAD_DEVICEID_ENC |\
71*4882a593Smuzhiyun UIC_NETWORK_LHDR_TRAP_PACKET_DROPPING)
72*4882a593Smuzhiyun #define DFES_DEF_L4_ERRS (UIC_TRANSPORT_UNSUPPORTED_HEADER_TYPE |\
73*4882a593Smuzhiyun UIC_TRANSPORT_UNKNOWN_CPORTID |\
74*4882a593Smuzhiyun UIC_TRANSPORT_NO_CONNECTION_RX |\
75*4882a593Smuzhiyun UIC_TRANSPORT_BAD_TC)
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun enum {
78*4882a593Smuzhiyun UNIPRO_L1_5 = 0,/* PHY Adapter */
79*4882a593Smuzhiyun UNIPRO_L2, /* Data Link */
80*4882a593Smuzhiyun UNIPRO_L3, /* Network */
81*4882a593Smuzhiyun UNIPRO_L4, /* Transport */
82*4882a593Smuzhiyun UNIPRO_DME, /* DME */
83*4882a593Smuzhiyun };
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun /*
86*4882a593Smuzhiyun * UNIPRO registers
87*4882a593Smuzhiyun */
88*4882a593Smuzhiyun #define UNIPRO_COMP_VERSION 0x000
89*4882a593Smuzhiyun #define UNIPRO_DME_PWR_REQ 0x090
90*4882a593Smuzhiyun #define UNIPRO_DME_PWR_REQ_POWERMODE 0x094
91*4882a593Smuzhiyun #define UNIPRO_DME_PWR_REQ_LOCALL2TIMER0 0x098
92*4882a593Smuzhiyun #define UNIPRO_DME_PWR_REQ_LOCALL2TIMER1 0x09C
93*4882a593Smuzhiyun #define UNIPRO_DME_PWR_REQ_LOCALL2TIMER2 0x0A0
94*4882a593Smuzhiyun #define UNIPRO_DME_PWR_REQ_REMOTEL2TIMER0 0x0A4
95*4882a593Smuzhiyun #define UNIPRO_DME_PWR_REQ_REMOTEL2TIMER1 0x0A8
96*4882a593Smuzhiyun #define UNIPRO_DME_PWR_REQ_REMOTEL2TIMER2 0x0AC
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun /*
99*4882a593Smuzhiyun * UFS Protector registers
100*4882a593Smuzhiyun */
101*4882a593Smuzhiyun #define UFSPRSECURITY 0x010
102*4882a593Smuzhiyun #define NSSMU BIT(14)
103*4882a593Smuzhiyun #define UFSPSBEGIN0 0x200
104*4882a593Smuzhiyun #define UFSPSEND0 0x204
105*4882a593Smuzhiyun #define UFSPSLUN0 0x208
106*4882a593Smuzhiyun #define UFSPSCTRL0 0x20C
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun #define CNTR_DIV_VAL 40
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun static void exynos_ufs_auto_ctrl_hcc(struct exynos_ufs *ufs, bool en);
111*4882a593Smuzhiyun static void exynos_ufs_ctrl_clkstop(struct exynos_ufs *ufs, bool en);
112*4882a593Smuzhiyun
exynos_ufs_enable_auto_ctrl_hcc(struct exynos_ufs * ufs)113*4882a593Smuzhiyun static inline void exynos_ufs_enable_auto_ctrl_hcc(struct exynos_ufs *ufs)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun exynos_ufs_auto_ctrl_hcc(ufs, true);
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
exynos_ufs_disable_auto_ctrl_hcc(struct exynos_ufs * ufs)118*4882a593Smuzhiyun static inline void exynos_ufs_disable_auto_ctrl_hcc(struct exynos_ufs *ufs)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun exynos_ufs_auto_ctrl_hcc(ufs, false);
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
exynos_ufs_disable_auto_ctrl_hcc_save(struct exynos_ufs * ufs,u32 * val)123*4882a593Smuzhiyun static inline void exynos_ufs_disable_auto_ctrl_hcc_save(
124*4882a593Smuzhiyun struct exynos_ufs *ufs, u32 *val)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun *val = hci_readl(ufs, HCI_MISC);
127*4882a593Smuzhiyun exynos_ufs_auto_ctrl_hcc(ufs, false);
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
exynos_ufs_auto_ctrl_hcc_restore(struct exynos_ufs * ufs,u32 * val)130*4882a593Smuzhiyun static inline void exynos_ufs_auto_ctrl_hcc_restore(
131*4882a593Smuzhiyun struct exynos_ufs *ufs, u32 *val)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun hci_writel(ufs, *val, HCI_MISC);
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
exynos_ufs_gate_clks(struct exynos_ufs * ufs)136*4882a593Smuzhiyun static inline void exynos_ufs_gate_clks(struct exynos_ufs *ufs)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun exynos_ufs_ctrl_clkstop(ufs, true);
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
exynos_ufs_ungate_clks(struct exynos_ufs * ufs)141*4882a593Smuzhiyun static inline void exynos_ufs_ungate_clks(struct exynos_ufs *ufs)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun exynos_ufs_ctrl_clkstop(ufs, false);
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
exynos7_ufs_drv_init(struct device * dev,struct exynos_ufs * ufs)146*4882a593Smuzhiyun static int exynos7_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun return 0;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
exynos7_ufs_pre_link(struct exynos_ufs * ufs)151*4882a593Smuzhiyun static int exynos7_ufs_pre_link(struct exynos_ufs *ufs)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun struct ufs_hba *hba = ufs->hba;
154*4882a593Smuzhiyun u32 val = ufs->drv_data->uic_attr->pa_dbg_option_suite;
155*4882a593Smuzhiyun int i;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun exynos_ufs_enable_ov_tm(hba);
158*4882a593Smuzhiyun for_each_ufs_tx_lane(ufs, i)
159*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x297, i), 0x17);
160*4882a593Smuzhiyun for_each_ufs_rx_lane(ufs, i) {
161*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x362, i), 0xff);
162*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x363, i), 0x00);
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun exynos_ufs_disable_ov_tm(hba);
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun for_each_ufs_tx_lane(ufs, i)
167*4882a593Smuzhiyun ufshcd_dme_set(hba,
168*4882a593Smuzhiyun UIC_ARG_MIB_SEL(TX_HIBERN8_CONTROL, i), 0x0);
169*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_TXPHY_CFGUPDT), 0x1);
170*4882a593Smuzhiyun udelay(1);
171*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), val | (1 << 12));
172*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_SKIP_RESET_PHY), 0x1);
173*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_SKIP_LINE_RESET), 0x1);
174*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_LINE_RESET_REQ), 0x1);
175*4882a593Smuzhiyun udelay(1600);
176*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), val);
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun return 0;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
exynos7_ufs_post_link(struct exynos_ufs * ufs)181*4882a593Smuzhiyun static int exynos7_ufs_post_link(struct exynos_ufs *ufs)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun struct ufs_hba *hba = ufs->hba;
184*4882a593Smuzhiyun int i;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun exynos_ufs_enable_ov_tm(hba);
187*4882a593Smuzhiyun for_each_ufs_tx_lane(ufs, i) {
188*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x28b, i), 0x83);
189*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x29a, i), 0x07);
190*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x277, i),
191*4882a593Smuzhiyun TX_LINERESET_N(exynos_ufs_calc_time_cntr(ufs, 200000)));
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun exynos_ufs_disable_ov_tm(hba);
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun exynos_ufs_enable_dbg_mode(hba);
196*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB(PA_SAVECONFIGTIME), 0xbb8);
197*4882a593Smuzhiyun exynos_ufs_disable_dbg_mode(hba);
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun return 0;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
exynos7_ufs_pre_pwr_change(struct exynos_ufs * ufs,struct ufs_pa_layer_attr * pwr)202*4882a593Smuzhiyun static int exynos7_ufs_pre_pwr_change(struct exynos_ufs *ufs,
203*4882a593Smuzhiyun struct ufs_pa_layer_attr *pwr)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun unipro_writel(ufs, 0x22, UNIPRO_DBG_FORCE_DME_CTRL_STATE);
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun return 0;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
exynos7_ufs_post_pwr_change(struct exynos_ufs * ufs,struct ufs_pa_layer_attr * pwr)210*4882a593Smuzhiyun static int exynos7_ufs_post_pwr_change(struct exynos_ufs *ufs,
211*4882a593Smuzhiyun struct ufs_pa_layer_attr *pwr)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun struct ufs_hba *hba = ufs->hba;
214*4882a593Smuzhiyun int lanes = max_t(u32, pwr->lane_rx, pwr->lane_tx);
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_RXPHY_CFGUPDT), 0x1);
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun if (lanes == 1) {
219*4882a593Smuzhiyun exynos_ufs_enable_dbg_mode(hba);
220*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), 0x1);
221*4882a593Smuzhiyun exynos_ufs_disable_dbg_mode(hba);
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun return 0;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun /*
228*4882a593Smuzhiyun * exynos_ufs_auto_ctrl_hcc - HCI core clock control by h/w
229*4882a593Smuzhiyun * Control should be disabled in the below cases
230*4882a593Smuzhiyun * - Before host controller S/W reset
231*4882a593Smuzhiyun * - Access to UFS protector's register
232*4882a593Smuzhiyun */
exynos_ufs_auto_ctrl_hcc(struct exynos_ufs * ufs,bool en)233*4882a593Smuzhiyun static void exynos_ufs_auto_ctrl_hcc(struct exynos_ufs *ufs, bool en)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun u32 misc = hci_readl(ufs, HCI_MISC);
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun if (en)
238*4882a593Smuzhiyun hci_writel(ufs, misc | HCI_CORECLK_CTRL_EN, HCI_MISC);
239*4882a593Smuzhiyun else
240*4882a593Smuzhiyun hci_writel(ufs, misc & ~HCI_CORECLK_CTRL_EN, HCI_MISC);
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
exynos_ufs_ctrl_clkstop(struct exynos_ufs * ufs,bool en)243*4882a593Smuzhiyun static void exynos_ufs_ctrl_clkstop(struct exynos_ufs *ufs, bool en)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun u32 ctrl = hci_readl(ufs, HCI_CLKSTOP_CTRL);
246*4882a593Smuzhiyun u32 misc = hci_readl(ufs, HCI_MISC);
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun if (en) {
249*4882a593Smuzhiyun hci_writel(ufs, misc | CLK_CTRL_EN_MASK, HCI_MISC);
250*4882a593Smuzhiyun hci_writel(ufs, ctrl | CLK_STOP_MASK, HCI_CLKSTOP_CTRL);
251*4882a593Smuzhiyun } else {
252*4882a593Smuzhiyun hci_writel(ufs, ctrl & ~CLK_STOP_MASK, HCI_CLKSTOP_CTRL);
253*4882a593Smuzhiyun hci_writel(ufs, misc & ~CLK_CTRL_EN_MASK, HCI_MISC);
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
exynos_ufs_get_clk_info(struct exynos_ufs * ufs)257*4882a593Smuzhiyun static int exynos_ufs_get_clk_info(struct exynos_ufs *ufs)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun struct ufs_hba *hba = ufs->hba;
260*4882a593Smuzhiyun struct list_head *head = &hba->clk_list_head;
261*4882a593Smuzhiyun struct ufs_clk_info *clki;
262*4882a593Smuzhiyun unsigned long pclk_rate;
263*4882a593Smuzhiyun u32 f_min, f_max;
264*4882a593Smuzhiyun u8 div = 0;
265*4882a593Smuzhiyun int ret = 0;
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun if (list_empty(head))
268*4882a593Smuzhiyun goto out;
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun list_for_each_entry(clki, head, list) {
271*4882a593Smuzhiyun if (!IS_ERR(clki->clk)) {
272*4882a593Smuzhiyun if (!strcmp(clki->name, "core_clk"))
273*4882a593Smuzhiyun ufs->clk_hci_core = clki->clk;
274*4882a593Smuzhiyun else if (!strcmp(clki->name, "sclk_unipro_main"))
275*4882a593Smuzhiyun ufs->clk_unipro_main = clki->clk;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun if (!ufs->clk_hci_core || !ufs->clk_unipro_main) {
280*4882a593Smuzhiyun dev_err(hba->dev, "failed to get clk info\n");
281*4882a593Smuzhiyun ret = -EINVAL;
282*4882a593Smuzhiyun goto out;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun ufs->mclk_rate = clk_get_rate(ufs->clk_unipro_main);
286*4882a593Smuzhiyun pclk_rate = clk_get_rate(ufs->clk_hci_core);
287*4882a593Smuzhiyun f_min = ufs->pclk_avail_min;
288*4882a593Smuzhiyun f_max = ufs->pclk_avail_max;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun if (ufs->opts & EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL) {
291*4882a593Smuzhiyun do {
292*4882a593Smuzhiyun pclk_rate /= (div + 1);
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun if (pclk_rate <= f_max)
295*4882a593Smuzhiyun break;
296*4882a593Smuzhiyun div++;
297*4882a593Smuzhiyun } while (pclk_rate >= f_min);
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun if (unlikely(pclk_rate < f_min || pclk_rate > f_max)) {
301*4882a593Smuzhiyun dev_err(hba->dev, "not available pclk range %lu\n", pclk_rate);
302*4882a593Smuzhiyun ret = -EINVAL;
303*4882a593Smuzhiyun goto out;
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun ufs->pclk_rate = pclk_rate;
307*4882a593Smuzhiyun ufs->pclk_div = div;
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun out:
310*4882a593Smuzhiyun return ret;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun
exynos_ufs_set_unipro_pclk_div(struct exynos_ufs * ufs)313*4882a593Smuzhiyun static void exynos_ufs_set_unipro_pclk_div(struct exynos_ufs *ufs)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun if (ufs->opts & EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL) {
316*4882a593Smuzhiyun u32 val;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun val = hci_readl(ufs, HCI_UNIPRO_APB_CLK_CTRL);
319*4882a593Smuzhiyun hci_writel(ufs, UNIPRO_APB_CLK(val, ufs->pclk_div),
320*4882a593Smuzhiyun HCI_UNIPRO_APB_CLK_CTRL);
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
exynos_ufs_set_pwm_clk_div(struct exynos_ufs * ufs)324*4882a593Smuzhiyun static void exynos_ufs_set_pwm_clk_div(struct exynos_ufs *ufs)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun struct ufs_hba *hba = ufs->hba;
327*4882a593Smuzhiyun struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun ufshcd_dme_set(hba,
330*4882a593Smuzhiyun UIC_ARG_MIB(CMN_PWM_CLK_CTRL), attr->cmn_pwm_clk_ctrl);
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun
exynos_ufs_calc_pwm_clk_div(struct exynos_ufs * ufs)333*4882a593Smuzhiyun static void exynos_ufs_calc_pwm_clk_div(struct exynos_ufs *ufs)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun struct ufs_hba *hba = ufs->hba;
336*4882a593Smuzhiyun struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
337*4882a593Smuzhiyun const unsigned int div = 30, mult = 20;
338*4882a593Smuzhiyun const unsigned long pwm_min = 3 * 1000 * 1000;
339*4882a593Smuzhiyun const unsigned long pwm_max = 9 * 1000 * 1000;
340*4882a593Smuzhiyun const int divs[] = {32, 16, 8, 4};
341*4882a593Smuzhiyun unsigned long clk = 0, _clk, clk_period;
342*4882a593Smuzhiyun int i = 0, clk_idx = -1;
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun clk_period = UNIPRO_PCLK_PERIOD(ufs);
345*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(divs); i++) {
346*4882a593Smuzhiyun _clk = NSEC_PER_SEC * mult / (clk_period * divs[i] * div);
347*4882a593Smuzhiyun if (_clk >= pwm_min && _clk <= pwm_max) {
348*4882a593Smuzhiyun if (_clk > clk) {
349*4882a593Smuzhiyun clk_idx = i;
350*4882a593Smuzhiyun clk = _clk;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun if (clk_idx == -1) {
356*4882a593Smuzhiyun ufshcd_dme_get(hba, UIC_ARG_MIB(CMN_PWM_CLK_CTRL), &clk_idx);
357*4882a593Smuzhiyun dev_err(hba->dev,
358*4882a593Smuzhiyun "failed to decide pwm clock divider, will not change\n");
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun attr->cmn_pwm_clk_ctrl = clk_idx & PWM_CLK_CTRL_MASK;
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun
exynos_ufs_calc_time_cntr(struct exynos_ufs * ufs,long period)364*4882a593Smuzhiyun long exynos_ufs_calc_time_cntr(struct exynos_ufs *ufs, long period)
365*4882a593Smuzhiyun {
366*4882a593Smuzhiyun const int precise = 10;
367*4882a593Smuzhiyun long pclk_rate = ufs->pclk_rate;
368*4882a593Smuzhiyun long clk_period, fraction;
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun clk_period = UNIPRO_PCLK_PERIOD(ufs);
371*4882a593Smuzhiyun fraction = ((NSEC_PER_SEC % pclk_rate) * precise) / pclk_rate;
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun return (period * precise) / ((clk_period * precise) + fraction);
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun
exynos_ufs_specify_phy_time_attr(struct exynos_ufs * ufs)376*4882a593Smuzhiyun static void exynos_ufs_specify_phy_time_attr(struct exynos_ufs *ufs)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
379*4882a593Smuzhiyun struct ufs_phy_time_cfg *t_cfg = &ufs->t_cfg;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun t_cfg->tx_linereset_p =
382*4882a593Smuzhiyun exynos_ufs_calc_time_cntr(ufs, attr->tx_dif_p_nsec);
383*4882a593Smuzhiyun t_cfg->tx_linereset_n =
384*4882a593Smuzhiyun exynos_ufs_calc_time_cntr(ufs, attr->tx_dif_n_nsec);
385*4882a593Smuzhiyun t_cfg->tx_high_z_cnt =
386*4882a593Smuzhiyun exynos_ufs_calc_time_cntr(ufs, attr->tx_high_z_cnt_nsec);
387*4882a593Smuzhiyun t_cfg->tx_base_n_val =
388*4882a593Smuzhiyun exynos_ufs_calc_time_cntr(ufs, attr->tx_base_unit_nsec);
389*4882a593Smuzhiyun t_cfg->tx_gran_n_val =
390*4882a593Smuzhiyun exynos_ufs_calc_time_cntr(ufs, attr->tx_gran_unit_nsec);
391*4882a593Smuzhiyun t_cfg->tx_sleep_cnt =
392*4882a593Smuzhiyun exynos_ufs_calc_time_cntr(ufs, attr->tx_sleep_cnt);
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun t_cfg->rx_linereset =
395*4882a593Smuzhiyun exynos_ufs_calc_time_cntr(ufs, attr->rx_dif_p_nsec);
396*4882a593Smuzhiyun t_cfg->rx_hibern8_wait =
397*4882a593Smuzhiyun exynos_ufs_calc_time_cntr(ufs, attr->rx_hibern8_wait_nsec);
398*4882a593Smuzhiyun t_cfg->rx_base_n_val =
399*4882a593Smuzhiyun exynos_ufs_calc_time_cntr(ufs, attr->rx_base_unit_nsec);
400*4882a593Smuzhiyun t_cfg->rx_gran_n_val =
401*4882a593Smuzhiyun exynos_ufs_calc_time_cntr(ufs, attr->rx_gran_unit_nsec);
402*4882a593Smuzhiyun t_cfg->rx_sleep_cnt =
403*4882a593Smuzhiyun exynos_ufs_calc_time_cntr(ufs, attr->rx_sleep_cnt);
404*4882a593Smuzhiyun t_cfg->rx_stall_cnt =
405*4882a593Smuzhiyun exynos_ufs_calc_time_cntr(ufs, attr->rx_stall_cnt);
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun
exynos_ufs_config_phy_time_attr(struct exynos_ufs * ufs)408*4882a593Smuzhiyun static void exynos_ufs_config_phy_time_attr(struct exynos_ufs *ufs)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun struct ufs_hba *hba = ufs->hba;
411*4882a593Smuzhiyun struct ufs_phy_time_cfg *t_cfg = &ufs->t_cfg;
412*4882a593Smuzhiyun int i;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun exynos_ufs_set_pwm_clk_div(ufs);
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun exynos_ufs_enable_ov_tm(hba);
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun for_each_ufs_rx_lane(ufs, i) {
419*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_FILLER_ENABLE, i),
420*4882a593Smuzhiyun ufs->drv_data->uic_attr->rx_filler_enable);
421*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_LINERESET_VAL, i),
422*4882a593Smuzhiyun RX_LINERESET(t_cfg->rx_linereset));
423*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_BASE_NVAL_07_00, i),
424*4882a593Smuzhiyun RX_BASE_NVAL_L(t_cfg->rx_base_n_val));
425*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_BASE_NVAL_15_08, i),
426*4882a593Smuzhiyun RX_BASE_NVAL_H(t_cfg->rx_base_n_val));
427*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_GRAN_NVAL_07_00, i),
428*4882a593Smuzhiyun RX_GRAN_NVAL_L(t_cfg->rx_gran_n_val));
429*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_GRAN_NVAL_10_08, i),
430*4882a593Smuzhiyun RX_GRAN_NVAL_H(t_cfg->rx_gran_n_val));
431*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_OV_SLEEP_CNT_TIMER, i),
432*4882a593Smuzhiyun RX_OV_SLEEP_CNT(t_cfg->rx_sleep_cnt));
433*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_OV_STALL_CNT_TIMER, i),
434*4882a593Smuzhiyun RX_OV_STALL_CNT(t_cfg->rx_stall_cnt));
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun for_each_ufs_tx_lane(ufs, i) {
438*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_LINERESET_P_VAL, i),
439*4882a593Smuzhiyun TX_LINERESET_P(t_cfg->tx_linereset_p));
440*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_HIGH_Z_CNT_07_00, i),
441*4882a593Smuzhiyun TX_HIGH_Z_CNT_L(t_cfg->tx_high_z_cnt));
442*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_HIGH_Z_CNT_11_08, i),
443*4882a593Smuzhiyun TX_HIGH_Z_CNT_H(t_cfg->tx_high_z_cnt));
444*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_BASE_NVAL_07_00, i),
445*4882a593Smuzhiyun TX_BASE_NVAL_L(t_cfg->tx_base_n_val));
446*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_BASE_NVAL_15_08, i),
447*4882a593Smuzhiyun TX_BASE_NVAL_H(t_cfg->tx_base_n_val));
448*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_GRAN_NVAL_07_00, i),
449*4882a593Smuzhiyun TX_GRAN_NVAL_L(t_cfg->tx_gran_n_val));
450*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_GRAN_NVAL_10_08, i),
451*4882a593Smuzhiyun TX_GRAN_NVAL_H(t_cfg->tx_gran_n_val));
452*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_OV_SLEEP_CNT_TIMER, i),
453*4882a593Smuzhiyun TX_OV_H8_ENTER_EN |
454*4882a593Smuzhiyun TX_OV_SLEEP_CNT(t_cfg->tx_sleep_cnt));
455*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_MIN_ACTIVATETIME, i),
456*4882a593Smuzhiyun ufs->drv_data->uic_attr->tx_min_activatetime);
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun exynos_ufs_disable_ov_tm(hba);
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun
exynos_ufs_config_phy_cap_attr(struct exynos_ufs * ufs)462*4882a593Smuzhiyun static void exynos_ufs_config_phy_cap_attr(struct exynos_ufs *ufs)
463*4882a593Smuzhiyun {
464*4882a593Smuzhiyun struct ufs_hba *hba = ufs->hba;
465*4882a593Smuzhiyun struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
466*4882a593Smuzhiyun int i;
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun exynos_ufs_enable_ov_tm(hba);
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun for_each_ufs_rx_lane(ufs, i) {
471*4882a593Smuzhiyun ufshcd_dme_set(hba,
472*4882a593Smuzhiyun UIC_ARG_MIB_SEL(RX_HS_G1_SYNC_LENGTH_CAP, i),
473*4882a593Smuzhiyun attr->rx_hs_g1_sync_len_cap);
474*4882a593Smuzhiyun ufshcd_dme_set(hba,
475*4882a593Smuzhiyun UIC_ARG_MIB_SEL(RX_HS_G2_SYNC_LENGTH_CAP, i),
476*4882a593Smuzhiyun attr->rx_hs_g2_sync_len_cap);
477*4882a593Smuzhiyun ufshcd_dme_set(hba,
478*4882a593Smuzhiyun UIC_ARG_MIB_SEL(RX_HS_G3_SYNC_LENGTH_CAP, i),
479*4882a593Smuzhiyun attr->rx_hs_g3_sync_len_cap);
480*4882a593Smuzhiyun ufshcd_dme_set(hba,
481*4882a593Smuzhiyun UIC_ARG_MIB_SEL(RX_HS_G1_PREP_LENGTH_CAP, i),
482*4882a593Smuzhiyun attr->rx_hs_g1_prep_sync_len_cap);
483*4882a593Smuzhiyun ufshcd_dme_set(hba,
484*4882a593Smuzhiyun UIC_ARG_MIB_SEL(RX_HS_G2_PREP_LENGTH_CAP, i),
485*4882a593Smuzhiyun attr->rx_hs_g2_prep_sync_len_cap);
486*4882a593Smuzhiyun ufshcd_dme_set(hba,
487*4882a593Smuzhiyun UIC_ARG_MIB_SEL(RX_HS_G3_PREP_LENGTH_CAP, i),
488*4882a593Smuzhiyun attr->rx_hs_g3_prep_sync_len_cap);
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun if (attr->rx_adv_fine_gran_sup_en == 0) {
492*4882a593Smuzhiyun for_each_ufs_rx_lane(ufs, i) {
493*4882a593Smuzhiyun ufshcd_dme_set(hba,
494*4882a593Smuzhiyun UIC_ARG_MIB_SEL(RX_ADV_GRANULARITY_CAP, i), 0);
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun if (attr->rx_min_actv_time_cap)
497*4882a593Smuzhiyun ufshcd_dme_set(hba,
498*4882a593Smuzhiyun UIC_ARG_MIB_SEL(RX_MIN_ACTIVATETIME_CAP,
499*4882a593Smuzhiyun i), attr->rx_min_actv_time_cap);
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun if (attr->rx_hibern8_time_cap)
502*4882a593Smuzhiyun ufshcd_dme_set(hba,
503*4882a593Smuzhiyun UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAP, i),
504*4882a593Smuzhiyun attr->rx_hibern8_time_cap);
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun } else if (attr->rx_adv_fine_gran_sup_en == 1) {
507*4882a593Smuzhiyun for_each_ufs_rx_lane(ufs, i) {
508*4882a593Smuzhiyun if (attr->rx_adv_fine_gran_step)
509*4882a593Smuzhiyun ufshcd_dme_set(hba,
510*4882a593Smuzhiyun UIC_ARG_MIB_SEL(RX_ADV_GRANULARITY_CAP,
511*4882a593Smuzhiyun i), RX_ADV_FINE_GRAN_STEP(
512*4882a593Smuzhiyun attr->rx_adv_fine_gran_step));
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun if (attr->rx_adv_min_actv_time_cap)
515*4882a593Smuzhiyun ufshcd_dme_set(hba,
516*4882a593Smuzhiyun UIC_ARG_MIB_SEL(
517*4882a593Smuzhiyun RX_ADV_MIN_ACTIVATETIME_CAP, i),
518*4882a593Smuzhiyun attr->rx_adv_min_actv_time_cap);
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun if (attr->rx_adv_hibern8_time_cap)
521*4882a593Smuzhiyun ufshcd_dme_set(hba,
522*4882a593Smuzhiyun UIC_ARG_MIB_SEL(RX_ADV_HIBERN8TIME_CAP,
523*4882a593Smuzhiyun i),
524*4882a593Smuzhiyun attr->rx_adv_hibern8_time_cap);
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun exynos_ufs_disable_ov_tm(hba);
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun
exynos_ufs_establish_connt(struct exynos_ufs * ufs)531*4882a593Smuzhiyun static void exynos_ufs_establish_connt(struct exynos_ufs *ufs)
532*4882a593Smuzhiyun {
533*4882a593Smuzhiyun struct ufs_hba *hba = ufs->hba;
534*4882a593Smuzhiyun enum {
535*4882a593Smuzhiyun DEV_ID = 0x00,
536*4882a593Smuzhiyun PEER_DEV_ID = 0x01,
537*4882a593Smuzhiyun PEER_CPORT_ID = 0x00,
538*4882a593Smuzhiyun TRAFFIC_CLASS = 0x00,
539*4882a593Smuzhiyun };
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun /* allow cport attributes to be set */
542*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), CPORT_IDLE);
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun /* local unipro attributes */
545*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB(N_DEVICEID), DEV_ID);
546*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB(N_DEVICEID_VALID), TRUE);
547*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERDEVICEID), PEER_DEV_ID);
548*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERCPORTID), PEER_CPORT_ID);
549*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTFLAGS), CPORT_DEF_FLAGS);
550*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB(T_TRAFFICCLASS), TRAFFIC_CLASS);
551*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), CPORT_CONNECTED);
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun
exynos_ufs_config_smu(struct exynos_ufs * ufs)554*4882a593Smuzhiyun static void exynos_ufs_config_smu(struct exynos_ufs *ufs)
555*4882a593Smuzhiyun {
556*4882a593Smuzhiyun u32 reg, val;
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun exynos_ufs_disable_auto_ctrl_hcc_save(ufs, &val);
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun /* make encryption disabled by default */
561*4882a593Smuzhiyun reg = ufsp_readl(ufs, UFSPRSECURITY);
562*4882a593Smuzhiyun ufsp_writel(ufs, reg | NSSMU, UFSPRSECURITY);
563*4882a593Smuzhiyun ufsp_writel(ufs, 0x0, UFSPSBEGIN0);
564*4882a593Smuzhiyun ufsp_writel(ufs, 0xffffffff, UFSPSEND0);
565*4882a593Smuzhiyun ufsp_writel(ufs, 0xff, UFSPSLUN0);
566*4882a593Smuzhiyun ufsp_writel(ufs, 0xf1, UFSPSCTRL0);
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun exynos_ufs_auto_ctrl_hcc_restore(ufs, &val);
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun
exynos_ufs_config_sync_pattern_mask(struct exynos_ufs * ufs,struct ufs_pa_layer_attr * pwr)571*4882a593Smuzhiyun static void exynos_ufs_config_sync_pattern_mask(struct exynos_ufs *ufs,
572*4882a593Smuzhiyun struct ufs_pa_layer_attr *pwr)
573*4882a593Smuzhiyun {
574*4882a593Smuzhiyun struct ufs_hba *hba = ufs->hba;
575*4882a593Smuzhiyun u8 g = max_t(u32, pwr->gear_rx, pwr->gear_tx);
576*4882a593Smuzhiyun u32 mask, sync_len;
577*4882a593Smuzhiyun enum {
578*4882a593Smuzhiyun SYNC_LEN_G1 = 80 * 1000, /* 80us */
579*4882a593Smuzhiyun SYNC_LEN_G2 = 40 * 1000, /* 44us */
580*4882a593Smuzhiyun SYNC_LEN_G3 = 20 * 1000, /* 20us */
581*4882a593Smuzhiyun };
582*4882a593Smuzhiyun int i;
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun if (g == 1)
585*4882a593Smuzhiyun sync_len = SYNC_LEN_G1;
586*4882a593Smuzhiyun else if (g == 2)
587*4882a593Smuzhiyun sync_len = SYNC_LEN_G2;
588*4882a593Smuzhiyun else if (g == 3)
589*4882a593Smuzhiyun sync_len = SYNC_LEN_G3;
590*4882a593Smuzhiyun else
591*4882a593Smuzhiyun return;
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun mask = exynos_ufs_calc_time_cntr(ufs, sync_len);
594*4882a593Smuzhiyun mask = (mask >> 8) & 0xff;
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun exynos_ufs_enable_ov_tm(hba);
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun for_each_ufs_rx_lane(ufs, i)
599*4882a593Smuzhiyun ufshcd_dme_set(hba,
600*4882a593Smuzhiyun UIC_ARG_MIB_SEL(RX_SYNC_MASK_LENGTH, i), mask);
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun exynos_ufs_disable_ov_tm(hba);
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun
exynos_ufs_pre_pwr_mode(struct ufs_hba * hba,struct ufs_pa_layer_attr * dev_max_params,struct ufs_pa_layer_attr * dev_req_params)605*4882a593Smuzhiyun static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba,
606*4882a593Smuzhiyun struct ufs_pa_layer_attr *dev_max_params,
607*4882a593Smuzhiyun struct ufs_pa_layer_attr *dev_req_params)
608*4882a593Smuzhiyun {
609*4882a593Smuzhiyun struct exynos_ufs *ufs = ufshcd_get_variant(hba);
610*4882a593Smuzhiyun struct phy *generic_phy = ufs->phy;
611*4882a593Smuzhiyun struct ufs_dev_params ufs_exynos_cap;
612*4882a593Smuzhiyun int ret;
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun if (!dev_req_params) {
615*4882a593Smuzhiyun pr_err("%s: incoming dev_req_params is NULL\n", __func__);
616*4882a593Smuzhiyun ret = -EINVAL;
617*4882a593Smuzhiyun goto out;
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun ufs_exynos_cap.tx_lanes = UFS_EXYNOS_LIMIT_NUM_LANES_TX;
622*4882a593Smuzhiyun ufs_exynos_cap.rx_lanes = UFS_EXYNOS_LIMIT_NUM_LANES_RX;
623*4882a593Smuzhiyun ufs_exynos_cap.hs_rx_gear = UFS_EXYNOS_LIMIT_HSGEAR_RX;
624*4882a593Smuzhiyun ufs_exynos_cap.hs_tx_gear = UFS_EXYNOS_LIMIT_HSGEAR_TX;
625*4882a593Smuzhiyun ufs_exynos_cap.pwm_rx_gear = UFS_EXYNOS_LIMIT_PWMGEAR_RX;
626*4882a593Smuzhiyun ufs_exynos_cap.pwm_tx_gear = UFS_EXYNOS_LIMIT_PWMGEAR_TX;
627*4882a593Smuzhiyun ufs_exynos_cap.rx_pwr_pwm = UFS_EXYNOS_LIMIT_RX_PWR_PWM;
628*4882a593Smuzhiyun ufs_exynos_cap.tx_pwr_pwm = UFS_EXYNOS_LIMIT_TX_PWR_PWM;
629*4882a593Smuzhiyun ufs_exynos_cap.rx_pwr_hs = UFS_EXYNOS_LIMIT_RX_PWR_HS;
630*4882a593Smuzhiyun ufs_exynos_cap.tx_pwr_hs = UFS_EXYNOS_LIMIT_TX_PWR_HS;
631*4882a593Smuzhiyun ufs_exynos_cap.hs_rate = UFS_EXYNOS_LIMIT_HS_RATE;
632*4882a593Smuzhiyun ufs_exynos_cap.desired_working_mode =
633*4882a593Smuzhiyun UFS_EXYNOS_LIMIT_DESIRED_MODE;
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun ret = ufshcd_get_pwr_dev_param(&ufs_exynos_cap,
636*4882a593Smuzhiyun dev_max_params, dev_req_params);
637*4882a593Smuzhiyun if (ret) {
638*4882a593Smuzhiyun pr_err("%s: failed to determine capabilities\n", __func__);
639*4882a593Smuzhiyun goto out;
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun if (ufs->drv_data->pre_pwr_change)
643*4882a593Smuzhiyun ufs->drv_data->pre_pwr_change(ufs, dev_req_params);
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun if (ufshcd_is_hs_mode(dev_req_params)) {
646*4882a593Smuzhiyun exynos_ufs_config_sync_pattern_mask(ufs, dev_req_params);
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun switch (dev_req_params->hs_rate) {
649*4882a593Smuzhiyun case PA_HS_MODE_A:
650*4882a593Smuzhiyun case PA_HS_MODE_B:
651*4882a593Smuzhiyun phy_calibrate(generic_phy);
652*4882a593Smuzhiyun break;
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun /* setting for three timeout values for traffic class #0 */
657*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB(DL_FC0PROTTIMEOUTVAL), 8064);
658*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB(DL_TC0REPLAYTIMEOUTVAL), 28224);
659*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB(DL_AFC0REQTIMEOUTVAL), 20160);
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun return 0;
662*4882a593Smuzhiyun out:
663*4882a593Smuzhiyun return ret;
664*4882a593Smuzhiyun }
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun #define PWR_MODE_STR_LEN 64
exynos_ufs_post_pwr_mode(struct ufs_hba * hba,struct ufs_pa_layer_attr * pwr_max,struct ufs_pa_layer_attr * pwr_req)667*4882a593Smuzhiyun static int exynos_ufs_post_pwr_mode(struct ufs_hba *hba,
668*4882a593Smuzhiyun struct ufs_pa_layer_attr *pwr_max,
669*4882a593Smuzhiyun struct ufs_pa_layer_attr *pwr_req)
670*4882a593Smuzhiyun {
671*4882a593Smuzhiyun struct exynos_ufs *ufs = ufshcd_get_variant(hba);
672*4882a593Smuzhiyun struct phy *generic_phy = ufs->phy;
673*4882a593Smuzhiyun int gear = max_t(u32, pwr_req->gear_rx, pwr_req->gear_tx);
674*4882a593Smuzhiyun int lanes = max_t(u32, pwr_req->lane_rx, pwr_req->lane_tx);
675*4882a593Smuzhiyun char pwr_str[PWR_MODE_STR_LEN] = "";
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun /* let default be PWM Gear 1, Lane 1 */
678*4882a593Smuzhiyun if (!gear)
679*4882a593Smuzhiyun gear = 1;
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun if (!lanes)
682*4882a593Smuzhiyun lanes = 1;
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun if (ufs->drv_data->post_pwr_change)
685*4882a593Smuzhiyun ufs->drv_data->post_pwr_change(ufs, pwr_req);
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun if ((ufshcd_is_hs_mode(pwr_req))) {
688*4882a593Smuzhiyun switch (pwr_req->hs_rate) {
689*4882a593Smuzhiyun case PA_HS_MODE_A:
690*4882a593Smuzhiyun case PA_HS_MODE_B:
691*4882a593Smuzhiyun phy_calibrate(generic_phy);
692*4882a593Smuzhiyun break;
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun snprintf(pwr_str, PWR_MODE_STR_LEN, "%s series_%s G_%d L_%d",
696*4882a593Smuzhiyun "FAST", pwr_req->hs_rate == PA_HS_MODE_A ? "A" : "B",
697*4882a593Smuzhiyun gear, lanes);
698*4882a593Smuzhiyun } else {
699*4882a593Smuzhiyun snprintf(pwr_str, PWR_MODE_STR_LEN, "%s G_%d L_%d",
700*4882a593Smuzhiyun "SLOW", gear, lanes);
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun dev_info(hba->dev, "Power mode changed to : %s\n", pwr_str);
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun return 0;
706*4882a593Smuzhiyun }
707*4882a593Smuzhiyun
exynos_ufs_specify_nexus_t_xfer_req(struct ufs_hba * hba,int tag,bool op)708*4882a593Smuzhiyun static void exynos_ufs_specify_nexus_t_xfer_req(struct ufs_hba *hba,
709*4882a593Smuzhiyun int tag, bool op)
710*4882a593Smuzhiyun {
711*4882a593Smuzhiyun struct exynos_ufs *ufs = ufshcd_get_variant(hba);
712*4882a593Smuzhiyun u32 type;
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun type = hci_readl(ufs, HCI_UTRL_NEXUS_TYPE);
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun if (op)
717*4882a593Smuzhiyun hci_writel(ufs, type | (1 << tag), HCI_UTRL_NEXUS_TYPE);
718*4882a593Smuzhiyun else
719*4882a593Smuzhiyun hci_writel(ufs, type & ~(1 << tag), HCI_UTRL_NEXUS_TYPE);
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun
exynos_ufs_specify_nexus_t_tm_req(struct ufs_hba * hba,int tag,u8 func)722*4882a593Smuzhiyun static void exynos_ufs_specify_nexus_t_tm_req(struct ufs_hba *hba,
723*4882a593Smuzhiyun int tag, u8 func)
724*4882a593Smuzhiyun {
725*4882a593Smuzhiyun struct exynos_ufs *ufs = ufshcd_get_variant(hba);
726*4882a593Smuzhiyun u32 type;
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun type = hci_readl(ufs, HCI_UTMRL_NEXUS_TYPE);
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun switch (func) {
731*4882a593Smuzhiyun case UFS_ABORT_TASK:
732*4882a593Smuzhiyun case UFS_QUERY_TASK:
733*4882a593Smuzhiyun hci_writel(ufs, type | (1 << tag), HCI_UTMRL_NEXUS_TYPE);
734*4882a593Smuzhiyun break;
735*4882a593Smuzhiyun case UFS_ABORT_TASK_SET:
736*4882a593Smuzhiyun case UFS_CLEAR_TASK_SET:
737*4882a593Smuzhiyun case UFS_LOGICAL_RESET:
738*4882a593Smuzhiyun case UFS_QUERY_TASK_SET:
739*4882a593Smuzhiyun hci_writel(ufs, type & ~(1 << tag), HCI_UTMRL_NEXUS_TYPE);
740*4882a593Smuzhiyun break;
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun
exynos_ufs_phy_init(struct exynos_ufs * ufs)744*4882a593Smuzhiyun static int exynos_ufs_phy_init(struct exynos_ufs *ufs)
745*4882a593Smuzhiyun {
746*4882a593Smuzhiyun struct ufs_hba *hba = ufs->hba;
747*4882a593Smuzhiyun struct phy *generic_phy = ufs->phy;
748*4882a593Smuzhiyun int ret = 0;
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun if (ufs->avail_ln_rx == 0 || ufs->avail_ln_tx == 0) {
751*4882a593Smuzhiyun ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILRXDATALANES),
752*4882a593Smuzhiyun &ufs->avail_ln_rx);
753*4882a593Smuzhiyun ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILTXDATALANES),
754*4882a593Smuzhiyun &ufs->avail_ln_tx);
755*4882a593Smuzhiyun WARN(ufs->avail_ln_rx != ufs->avail_ln_tx,
756*4882a593Smuzhiyun "available data lane is not equal(rx:%d, tx:%d)\n",
757*4882a593Smuzhiyun ufs->avail_ln_rx, ufs->avail_ln_tx);
758*4882a593Smuzhiyun }
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun phy_set_bus_width(generic_phy, ufs->avail_ln_rx);
761*4882a593Smuzhiyun ret = phy_init(generic_phy);
762*4882a593Smuzhiyun if (ret) {
763*4882a593Smuzhiyun dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
764*4882a593Smuzhiyun __func__, ret);
765*4882a593Smuzhiyun goto out_exit_phy;
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun return 0;
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun out_exit_phy:
771*4882a593Smuzhiyun phy_exit(generic_phy);
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun return ret;
774*4882a593Smuzhiyun }
775*4882a593Smuzhiyun
exynos_ufs_config_unipro(struct exynos_ufs * ufs)776*4882a593Smuzhiyun static void exynos_ufs_config_unipro(struct exynos_ufs *ufs)
777*4882a593Smuzhiyun {
778*4882a593Smuzhiyun struct ufs_hba *hba = ufs->hba;
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_CLK_PERIOD),
781*4882a593Smuzhiyun DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
782*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTRAILINGCLOCKS),
783*4882a593Smuzhiyun ufs->drv_data->uic_attr->tx_trailingclks);
784*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE),
785*4882a593Smuzhiyun ufs->drv_data->uic_attr->pa_dbg_option_suite);
786*4882a593Smuzhiyun }
787*4882a593Smuzhiyun
exynos_ufs_config_intr(struct exynos_ufs * ufs,u32 errs,u8 index)788*4882a593Smuzhiyun static void exynos_ufs_config_intr(struct exynos_ufs *ufs, u32 errs, u8 index)
789*4882a593Smuzhiyun {
790*4882a593Smuzhiyun switch (index) {
791*4882a593Smuzhiyun case UNIPRO_L1_5:
792*4882a593Smuzhiyun hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_PA_LAYER);
793*4882a593Smuzhiyun break;
794*4882a593Smuzhiyun case UNIPRO_L2:
795*4882a593Smuzhiyun hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_DL_LAYER);
796*4882a593Smuzhiyun break;
797*4882a593Smuzhiyun case UNIPRO_L3:
798*4882a593Smuzhiyun hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_N_LAYER);
799*4882a593Smuzhiyun break;
800*4882a593Smuzhiyun case UNIPRO_L4:
801*4882a593Smuzhiyun hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_T_LAYER);
802*4882a593Smuzhiyun break;
803*4882a593Smuzhiyun case UNIPRO_DME:
804*4882a593Smuzhiyun hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_DME_LAYER);
805*4882a593Smuzhiyun break;
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun }
808*4882a593Smuzhiyun
exynos_ufs_pre_link(struct ufs_hba * hba)809*4882a593Smuzhiyun static int exynos_ufs_pre_link(struct ufs_hba *hba)
810*4882a593Smuzhiyun {
811*4882a593Smuzhiyun struct exynos_ufs *ufs = ufshcd_get_variant(hba);
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun /* hci */
814*4882a593Smuzhiyun exynos_ufs_config_intr(ufs, DFES_DEF_L2_ERRS, UNIPRO_L2);
815*4882a593Smuzhiyun exynos_ufs_config_intr(ufs, DFES_DEF_L3_ERRS, UNIPRO_L3);
816*4882a593Smuzhiyun exynos_ufs_config_intr(ufs, DFES_DEF_L4_ERRS, UNIPRO_L4);
817*4882a593Smuzhiyun exynos_ufs_set_unipro_pclk_div(ufs);
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun /* unipro */
820*4882a593Smuzhiyun exynos_ufs_config_unipro(ufs);
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun /* m-phy */
823*4882a593Smuzhiyun exynos_ufs_phy_init(ufs);
824*4882a593Smuzhiyun exynos_ufs_config_phy_time_attr(ufs);
825*4882a593Smuzhiyun exynos_ufs_config_phy_cap_attr(ufs);
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun if (ufs->drv_data->pre_link)
828*4882a593Smuzhiyun ufs->drv_data->pre_link(ufs);
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun return 0;
831*4882a593Smuzhiyun }
832*4882a593Smuzhiyun
exynos_ufs_fit_aggr_timeout(struct exynos_ufs * ufs)833*4882a593Smuzhiyun static void exynos_ufs_fit_aggr_timeout(struct exynos_ufs *ufs)
834*4882a593Smuzhiyun {
835*4882a593Smuzhiyun u32 val;
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun val = exynos_ufs_calc_time_cntr(ufs, IATOVAL_NSEC / CNTR_DIV_VAL);
838*4882a593Smuzhiyun hci_writel(ufs, val & CNT_VAL_1US_MASK, HCI_1US_TO_CNT_VAL);
839*4882a593Smuzhiyun }
840*4882a593Smuzhiyun
exynos_ufs_post_link(struct ufs_hba * hba)841*4882a593Smuzhiyun static int exynos_ufs_post_link(struct ufs_hba *hba)
842*4882a593Smuzhiyun {
843*4882a593Smuzhiyun struct exynos_ufs *ufs = ufshcd_get_variant(hba);
844*4882a593Smuzhiyun struct phy *generic_phy = ufs->phy;
845*4882a593Smuzhiyun struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun exynos_ufs_establish_connt(ufs);
848*4882a593Smuzhiyun exynos_ufs_fit_aggr_timeout(ufs);
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun hci_writel(ufs, 0xa, HCI_DATA_REORDER);
851*4882a593Smuzhiyun hci_writel(ufs, PRDT_SET_SIZE(12), HCI_TXPRDT_ENTRY_SIZE);
852*4882a593Smuzhiyun hci_writel(ufs, PRDT_SET_SIZE(12), HCI_RXPRDT_ENTRY_SIZE);
853*4882a593Smuzhiyun hci_writel(ufs, (1 << hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE);
854*4882a593Smuzhiyun hci_writel(ufs, (1 << hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE);
855*4882a593Smuzhiyun hci_writel(ufs, 0xf, HCI_AXIDMA_RWDATA_BURST_LEN);
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun if (ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB)
858*4882a593Smuzhiyun ufshcd_dme_set(hba,
859*4882a593Smuzhiyun UIC_ARG_MIB(T_DBG_SKIP_INIT_HIBERN8_EXIT), TRUE);
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun if (attr->pa_granularity) {
862*4882a593Smuzhiyun exynos_ufs_enable_dbg_mode(hba);
863*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB(PA_GRANULARITY),
864*4882a593Smuzhiyun attr->pa_granularity);
865*4882a593Smuzhiyun exynos_ufs_disable_dbg_mode(hba);
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun if (attr->pa_tactivate)
868*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
869*4882a593Smuzhiyun attr->pa_tactivate);
870*4882a593Smuzhiyun if (attr->pa_hibern8time &&
871*4882a593Smuzhiyun !(ufs->opts & EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER))
872*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
873*4882a593Smuzhiyun attr->pa_hibern8time);
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun if (ufs->opts & EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER) {
877*4882a593Smuzhiyun if (!attr->pa_granularity)
878*4882a593Smuzhiyun ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
879*4882a593Smuzhiyun &attr->pa_granularity);
880*4882a593Smuzhiyun if (!attr->pa_hibern8time)
881*4882a593Smuzhiyun ufshcd_dme_get(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
882*4882a593Smuzhiyun &attr->pa_hibern8time);
883*4882a593Smuzhiyun /*
884*4882a593Smuzhiyun * not wait for HIBERN8 time to exit hibernation
885*4882a593Smuzhiyun */
886*4882a593Smuzhiyun ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 0);
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun if (attr->pa_granularity < 1 || attr->pa_granularity > 6) {
889*4882a593Smuzhiyun /* Valid range for granularity: 1 ~ 6 */
890*4882a593Smuzhiyun dev_warn(hba->dev,
891*4882a593Smuzhiyun "%s: pa_granularity %d is invalid, assuming backwards compatibility\n",
892*4882a593Smuzhiyun __func__,
893*4882a593Smuzhiyun attr->pa_granularity);
894*4882a593Smuzhiyun attr->pa_granularity = 6;
895*4882a593Smuzhiyun }
896*4882a593Smuzhiyun }
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun phy_calibrate(generic_phy);
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun if (ufs->drv_data->post_link)
901*4882a593Smuzhiyun ufs->drv_data->post_link(ufs);
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun return 0;
904*4882a593Smuzhiyun }
905*4882a593Smuzhiyun
exynos_ufs_parse_dt(struct device * dev,struct exynos_ufs * ufs)906*4882a593Smuzhiyun static int exynos_ufs_parse_dt(struct device *dev, struct exynos_ufs *ufs)
907*4882a593Smuzhiyun {
908*4882a593Smuzhiyun struct device_node *np = dev->of_node;
909*4882a593Smuzhiyun struct exynos_ufs_drv_data *drv_data = &exynos_ufs_drvs;
910*4882a593Smuzhiyun struct exynos_ufs_uic_attr *attr;
911*4882a593Smuzhiyun int ret = 0;
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun while (drv_data->compatible) {
914*4882a593Smuzhiyun if (of_device_is_compatible(np, drv_data->compatible)) {
915*4882a593Smuzhiyun ufs->drv_data = drv_data;
916*4882a593Smuzhiyun break;
917*4882a593Smuzhiyun }
918*4882a593Smuzhiyun drv_data++;
919*4882a593Smuzhiyun }
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun if (ufs->drv_data && ufs->drv_data->uic_attr) {
922*4882a593Smuzhiyun attr = ufs->drv_data->uic_attr;
923*4882a593Smuzhiyun } else {
924*4882a593Smuzhiyun dev_err(dev, "failed to get uic attributes\n");
925*4882a593Smuzhiyun ret = -EINVAL;
926*4882a593Smuzhiyun goto out;
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun ufs->pclk_avail_min = PCLK_AVAIL_MIN;
930*4882a593Smuzhiyun ufs->pclk_avail_max = PCLK_AVAIL_MAX;
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun attr->rx_adv_fine_gran_sup_en = RX_ADV_FINE_GRAN_SUP_EN;
933*4882a593Smuzhiyun attr->rx_adv_fine_gran_step = RX_ADV_FINE_GRAN_STEP_VAL;
934*4882a593Smuzhiyun attr->rx_adv_min_actv_time_cap = RX_ADV_MIN_ACTV_TIME_CAP;
935*4882a593Smuzhiyun attr->pa_granularity = PA_GRANULARITY_VAL;
936*4882a593Smuzhiyun attr->pa_tactivate = PA_TACTIVATE_VAL;
937*4882a593Smuzhiyun attr->pa_hibern8time = PA_HIBERN8TIME_VAL;
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun out:
940*4882a593Smuzhiyun return ret;
941*4882a593Smuzhiyun }
942*4882a593Smuzhiyun
exynos_ufs_init(struct ufs_hba * hba)943*4882a593Smuzhiyun static int exynos_ufs_init(struct ufs_hba *hba)
944*4882a593Smuzhiyun {
945*4882a593Smuzhiyun struct device *dev = hba->dev;
946*4882a593Smuzhiyun struct platform_device *pdev = to_platform_device(dev);
947*4882a593Smuzhiyun struct exynos_ufs *ufs;
948*4882a593Smuzhiyun int ret;
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun ufs = devm_kzalloc(dev, sizeof(*ufs), GFP_KERNEL);
951*4882a593Smuzhiyun if (!ufs)
952*4882a593Smuzhiyun return -ENOMEM;
953*4882a593Smuzhiyun
954*4882a593Smuzhiyun /* exynos-specific hci */
955*4882a593Smuzhiyun ufs->reg_hci = devm_platform_ioremap_resource_byname(pdev, "vs_hci");
956*4882a593Smuzhiyun if (IS_ERR(ufs->reg_hci)) {
957*4882a593Smuzhiyun dev_err(dev, "cannot ioremap for hci vendor register\n");
958*4882a593Smuzhiyun return PTR_ERR(ufs->reg_hci);
959*4882a593Smuzhiyun }
960*4882a593Smuzhiyun
961*4882a593Smuzhiyun /* unipro */
962*4882a593Smuzhiyun ufs->reg_unipro = devm_platform_ioremap_resource_byname(pdev, "unipro");
963*4882a593Smuzhiyun if (IS_ERR(ufs->reg_unipro)) {
964*4882a593Smuzhiyun dev_err(dev, "cannot ioremap for unipro register\n");
965*4882a593Smuzhiyun return PTR_ERR(ufs->reg_unipro);
966*4882a593Smuzhiyun }
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun /* ufs protector */
969*4882a593Smuzhiyun ufs->reg_ufsp = devm_platform_ioremap_resource_byname(pdev, "ufsp");
970*4882a593Smuzhiyun if (IS_ERR(ufs->reg_ufsp)) {
971*4882a593Smuzhiyun dev_err(dev, "cannot ioremap for ufs protector register\n");
972*4882a593Smuzhiyun return PTR_ERR(ufs->reg_ufsp);
973*4882a593Smuzhiyun }
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun ret = exynos_ufs_parse_dt(dev, ufs);
976*4882a593Smuzhiyun if (ret) {
977*4882a593Smuzhiyun dev_err(dev, "failed to get dt info.\n");
978*4882a593Smuzhiyun goto out;
979*4882a593Smuzhiyun }
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun ufs->phy = devm_phy_get(dev, "ufs-phy");
982*4882a593Smuzhiyun if (IS_ERR(ufs->phy)) {
983*4882a593Smuzhiyun ret = PTR_ERR(ufs->phy);
984*4882a593Smuzhiyun dev_err(dev, "failed to get ufs-phy\n");
985*4882a593Smuzhiyun goto out;
986*4882a593Smuzhiyun }
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun ret = phy_power_on(ufs->phy);
989*4882a593Smuzhiyun if (ret)
990*4882a593Smuzhiyun goto phy_off;
991*4882a593Smuzhiyun
992*4882a593Smuzhiyun ufs->hba = hba;
993*4882a593Smuzhiyun ufs->opts = ufs->drv_data->opts;
994*4882a593Smuzhiyun ufs->rx_sel_idx = PA_MAXDATALANES;
995*4882a593Smuzhiyun if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX)
996*4882a593Smuzhiyun ufs->rx_sel_idx = 0;
997*4882a593Smuzhiyun hba->priv = (void *)ufs;
998*4882a593Smuzhiyun hba->quirks = ufs->drv_data->quirks;
999*4882a593Smuzhiyun if (ufs->drv_data->drv_init) {
1000*4882a593Smuzhiyun ret = ufs->drv_data->drv_init(dev, ufs);
1001*4882a593Smuzhiyun if (ret) {
1002*4882a593Smuzhiyun dev_err(dev, "failed to init drv-data\n");
1003*4882a593Smuzhiyun goto out;
1004*4882a593Smuzhiyun }
1005*4882a593Smuzhiyun }
1006*4882a593Smuzhiyun
1007*4882a593Smuzhiyun ret = exynos_ufs_get_clk_info(ufs);
1008*4882a593Smuzhiyun if (ret)
1009*4882a593Smuzhiyun goto out;
1010*4882a593Smuzhiyun exynos_ufs_specify_phy_time_attr(ufs);
1011*4882a593Smuzhiyun exynos_ufs_config_smu(ufs);
1012*4882a593Smuzhiyun return 0;
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun phy_off:
1015*4882a593Smuzhiyun phy_power_off(ufs->phy);
1016*4882a593Smuzhiyun out:
1017*4882a593Smuzhiyun hba->priv = NULL;
1018*4882a593Smuzhiyun return ret;
1019*4882a593Smuzhiyun }
1020*4882a593Smuzhiyun
exynos_ufs_host_reset(struct ufs_hba * hba)1021*4882a593Smuzhiyun static int exynos_ufs_host_reset(struct ufs_hba *hba)
1022*4882a593Smuzhiyun {
1023*4882a593Smuzhiyun struct exynos_ufs *ufs = ufshcd_get_variant(hba);
1024*4882a593Smuzhiyun unsigned long timeout = jiffies + msecs_to_jiffies(1);
1025*4882a593Smuzhiyun u32 val;
1026*4882a593Smuzhiyun int ret = 0;
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun exynos_ufs_disable_auto_ctrl_hcc_save(ufs, &val);
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun hci_writel(ufs, UFS_SW_RST_MASK, HCI_SW_RST);
1031*4882a593Smuzhiyun
1032*4882a593Smuzhiyun do {
1033*4882a593Smuzhiyun if (!(hci_readl(ufs, HCI_SW_RST) & UFS_SW_RST_MASK))
1034*4882a593Smuzhiyun goto out;
1035*4882a593Smuzhiyun } while (time_before(jiffies, timeout));
1036*4882a593Smuzhiyun
1037*4882a593Smuzhiyun dev_err(hba->dev, "timeout host sw-reset\n");
1038*4882a593Smuzhiyun ret = -ETIMEDOUT;
1039*4882a593Smuzhiyun
1040*4882a593Smuzhiyun out:
1041*4882a593Smuzhiyun exynos_ufs_auto_ctrl_hcc_restore(ufs, &val);
1042*4882a593Smuzhiyun return ret;
1043*4882a593Smuzhiyun }
1044*4882a593Smuzhiyun
exynos_ufs_dev_hw_reset(struct ufs_hba * hba)1045*4882a593Smuzhiyun static void exynos_ufs_dev_hw_reset(struct ufs_hba *hba)
1046*4882a593Smuzhiyun {
1047*4882a593Smuzhiyun struct exynos_ufs *ufs = ufshcd_get_variant(hba);
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun hci_writel(ufs, 0 << 0, HCI_GPIO_OUT);
1050*4882a593Smuzhiyun udelay(5);
1051*4882a593Smuzhiyun hci_writel(ufs, 1 << 0, HCI_GPIO_OUT);
1052*4882a593Smuzhiyun }
1053*4882a593Smuzhiyun
exynos_ufs_pre_hibern8(struct ufs_hba * hba,u8 enter)1054*4882a593Smuzhiyun static void exynos_ufs_pre_hibern8(struct ufs_hba *hba, u8 enter)
1055*4882a593Smuzhiyun {
1056*4882a593Smuzhiyun struct exynos_ufs *ufs = ufshcd_get_variant(hba);
1057*4882a593Smuzhiyun struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun if (!enter) {
1060*4882a593Smuzhiyun if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)
1061*4882a593Smuzhiyun exynos_ufs_disable_auto_ctrl_hcc(ufs);
1062*4882a593Smuzhiyun exynos_ufs_ungate_clks(ufs);
1063*4882a593Smuzhiyun
1064*4882a593Smuzhiyun if (ufs->opts & EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER) {
1065*4882a593Smuzhiyun const unsigned int granularity_tbl[] = {
1066*4882a593Smuzhiyun 1, 4, 8, 16, 32, 100
1067*4882a593Smuzhiyun };
1068*4882a593Smuzhiyun int h8_time = attr->pa_hibern8time *
1069*4882a593Smuzhiyun granularity_tbl[attr->pa_granularity - 1];
1070*4882a593Smuzhiyun unsigned long us;
1071*4882a593Smuzhiyun s64 delta;
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun do {
1074*4882a593Smuzhiyun delta = h8_time - ktime_us_delta(ktime_get(),
1075*4882a593Smuzhiyun ufs->entry_hibern8_t);
1076*4882a593Smuzhiyun if (delta <= 0)
1077*4882a593Smuzhiyun break;
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun us = min_t(s64, delta, USEC_PER_MSEC);
1080*4882a593Smuzhiyun if (us >= 10)
1081*4882a593Smuzhiyun usleep_range(us, us + 10);
1082*4882a593Smuzhiyun } while (1);
1083*4882a593Smuzhiyun }
1084*4882a593Smuzhiyun }
1085*4882a593Smuzhiyun }
1086*4882a593Smuzhiyun
exynos_ufs_post_hibern8(struct ufs_hba * hba,u8 enter)1087*4882a593Smuzhiyun static void exynos_ufs_post_hibern8(struct ufs_hba *hba, u8 enter)
1088*4882a593Smuzhiyun {
1089*4882a593Smuzhiyun struct exynos_ufs *ufs = ufshcd_get_variant(hba);
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun if (!enter) {
1092*4882a593Smuzhiyun u32 cur_mode = 0;
1093*4882a593Smuzhiyun u32 pwrmode;
1094*4882a593Smuzhiyun
1095*4882a593Smuzhiyun if (ufshcd_is_hs_mode(&ufs->dev_req_params))
1096*4882a593Smuzhiyun pwrmode = FAST_MODE;
1097*4882a593Smuzhiyun else
1098*4882a593Smuzhiyun pwrmode = SLOW_MODE;
1099*4882a593Smuzhiyun
1100*4882a593Smuzhiyun ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &cur_mode);
1101*4882a593Smuzhiyun if (cur_mode != (pwrmode << 4 | pwrmode)) {
1102*4882a593Smuzhiyun dev_warn(hba->dev, "%s: power mode change\n", __func__);
1103*4882a593Smuzhiyun hba->pwr_info.pwr_rx = (cur_mode >> 4) & 0xf;
1104*4882a593Smuzhiyun hba->pwr_info.pwr_tx = cur_mode & 0xf;
1105*4882a593Smuzhiyun ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
1106*4882a593Smuzhiyun }
1107*4882a593Smuzhiyun
1108*4882a593Smuzhiyun if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB))
1109*4882a593Smuzhiyun exynos_ufs_establish_connt(ufs);
1110*4882a593Smuzhiyun } else {
1111*4882a593Smuzhiyun ufs->entry_hibern8_t = ktime_get();
1112*4882a593Smuzhiyun exynos_ufs_gate_clks(ufs);
1113*4882a593Smuzhiyun if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)
1114*4882a593Smuzhiyun exynos_ufs_enable_auto_ctrl_hcc(ufs);
1115*4882a593Smuzhiyun }
1116*4882a593Smuzhiyun }
1117*4882a593Smuzhiyun
exynos_ufs_hce_enable_notify(struct ufs_hba * hba,enum ufs_notify_change_status status)1118*4882a593Smuzhiyun static int exynos_ufs_hce_enable_notify(struct ufs_hba *hba,
1119*4882a593Smuzhiyun enum ufs_notify_change_status status)
1120*4882a593Smuzhiyun {
1121*4882a593Smuzhiyun struct exynos_ufs *ufs = ufshcd_get_variant(hba);
1122*4882a593Smuzhiyun int ret = 0;
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun switch (status) {
1125*4882a593Smuzhiyun case PRE_CHANGE:
1126*4882a593Smuzhiyun ret = exynos_ufs_host_reset(hba);
1127*4882a593Smuzhiyun if (ret)
1128*4882a593Smuzhiyun return ret;
1129*4882a593Smuzhiyun exynos_ufs_dev_hw_reset(hba);
1130*4882a593Smuzhiyun break;
1131*4882a593Smuzhiyun case POST_CHANGE:
1132*4882a593Smuzhiyun exynos_ufs_calc_pwm_clk_div(ufs);
1133*4882a593Smuzhiyun if (!(ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL))
1134*4882a593Smuzhiyun exynos_ufs_enable_auto_ctrl_hcc(ufs);
1135*4882a593Smuzhiyun break;
1136*4882a593Smuzhiyun }
1137*4882a593Smuzhiyun
1138*4882a593Smuzhiyun return ret;
1139*4882a593Smuzhiyun }
1140*4882a593Smuzhiyun
exynos_ufs_link_startup_notify(struct ufs_hba * hba,enum ufs_notify_change_status status)1141*4882a593Smuzhiyun static int exynos_ufs_link_startup_notify(struct ufs_hba *hba,
1142*4882a593Smuzhiyun enum ufs_notify_change_status status)
1143*4882a593Smuzhiyun {
1144*4882a593Smuzhiyun int ret = 0;
1145*4882a593Smuzhiyun
1146*4882a593Smuzhiyun switch (status) {
1147*4882a593Smuzhiyun case PRE_CHANGE:
1148*4882a593Smuzhiyun ret = exynos_ufs_pre_link(hba);
1149*4882a593Smuzhiyun break;
1150*4882a593Smuzhiyun case POST_CHANGE:
1151*4882a593Smuzhiyun ret = exynos_ufs_post_link(hba);
1152*4882a593Smuzhiyun break;
1153*4882a593Smuzhiyun }
1154*4882a593Smuzhiyun
1155*4882a593Smuzhiyun return ret;
1156*4882a593Smuzhiyun }
1157*4882a593Smuzhiyun
exynos_ufs_pwr_change_notify(struct ufs_hba * hba,enum ufs_notify_change_status status,struct ufs_pa_layer_attr * dev_max_params,struct ufs_pa_layer_attr * dev_req_params)1158*4882a593Smuzhiyun static int exynos_ufs_pwr_change_notify(struct ufs_hba *hba,
1159*4882a593Smuzhiyun enum ufs_notify_change_status status,
1160*4882a593Smuzhiyun struct ufs_pa_layer_attr *dev_max_params,
1161*4882a593Smuzhiyun struct ufs_pa_layer_attr *dev_req_params)
1162*4882a593Smuzhiyun {
1163*4882a593Smuzhiyun int ret = 0;
1164*4882a593Smuzhiyun
1165*4882a593Smuzhiyun switch (status) {
1166*4882a593Smuzhiyun case PRE_CHANGE:
1167*4882a593Smuzhiyun ret = exynos_ufs_pre_pwr_mode(hba, dev_max_params,
1168*4882a593Smuzhiyun dev_req_params);
1169*4882a593Smuzhiyun break;
1170*4882a593Smuzhiyun case POST_CHANGE:
1171*4882a593Smuzhiyun ret = exynos_ufs_post_pwr_mode(hba, NULL, dev_req_params);
1172*4882a593Smuzhiyun break;
1173*4882a593Smuzhiyun }
1174*4882a593Smuzhiyun
1175*4882a593Smuzhiyun return ret;
1176*4882a593Smuzhiyun }
1177*4882a593Smuzhiyun
exynos_ufs_hibern8_notify(struct ufs_hba * hba,enum uic_cmd_dme enter,enum ufs_notify_change_status notify)1178*4882a593Smuzhiyun static void exynos_ufs_hibern8_notify(struct ufs_hba *hba,
1179*4882a593Smuzhiyun enum uic_cmd_dme enter,
1180*4882a593Smuzhiyun enum ufs_notify_change_status notify)
1181*4882a593Smuzhiyun {
1182*4882a593Smuzhiyun switch ((u8)notify) {
1183*4882a593Smuzhiyun case PRE_CHANGE:
1184*4882a593Smuzhiyun exynos_ufs_pre_hibern8(hba, enter);
1185*4882a593Smuzhiyun break;
1186*4882a593Smuzhiyun case POST_CHANGE:
1187*4882a593Smuzhiyun exynos_ufs_post_hibern8(hba, enter);
1188*4882a593Smuzhiyun break;
1189*4882a593Smuzhiyun }
1190*4882a593Smuzhiyun }
1191*4882a593Smuzhiyun
exynos_ufs_suspend(struct ufs_hba * hba,enum ufs_pm_op pm_op)1192*4882a593Smuzhiyun static int exynos_ufs_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
1193*4882a593Smuzhiyun {
1194*4882a593Smuzhiyun struct exynos_ufs *ufs = ufshcd_get_variant(hba);
1195*4882a593Smuzhiyun
1196*4882a593Smuzhiyun if (!ufshcd_is_link_active(hba))
1197*4882a593Smuzhiyun phy_power_off(ufs->phy);
1198*4882a593Smuzhiyun
1199*4882a593Smuzhiyun return 0;
1200*4882a593Smuzhiyun }
1201*4882a593Smuzhiyun
exynos_ufs_resume(struct ufs_hba * hba,enum ufs_pm_op pm_op)1202*4882a593Smuzhiyun static int exynos_ufs_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
1203*4882a593Smuzhiyun {
1204*4882a593Smuzhiyun struct exynos_ufs *ufs = ufshcd_get_variant(hba);
1205*4882a593Smuzhiyun
1206*4882a593Smuzhiyun if (!ufshcd_is_link_active(hba))
1207*4882a593Smuzhiyun phy_power_on(ufs->phy);
1208*4882a593Smuzhiyun
1209*4882a593Smuzhiyun exynos_ufs_config_smu(ufs);
1210*4882a593Smuzhiyun
1211*4882a593Smuzhiyun return 0;
1212*4882a593Smuzhiyun }
1213*4882a593Smuzhiyun
1214*4882a593Smuzhiyun static struct ufs_hba_variant_ops ufs_hba_exynos_ops = {
1215*4882a593Smuzhiyun .name = "exynos_ufs",
1216*4882a593Smuzhiyun .init = exynos_ufs_init,
1217*4882a593Smuzhiyun .hce_enable_notify = exynos_ufs_hce_enable_notify,
1218*4882a593Smuzhiyun .link_startup_notify = exynos_ufs_link_startup_notify,
1219*4882a593Smuzhiyun .pwr_change_notify = exynos_ufs_pwr_change_notify,
1220*4882a593Smuzhiyun .setup_xfer_req = exynos_ufs_specify_nexus_t_xfer_req,
1221*4882a593Smuzhiyun .setup_task_mgmt = exynos_ufs_specify_nexus_t_tm_req,
1222*4882a593Smuzhiyun .hibern8_notify = exynos_ufs_hibern8_notify,
1223*4882a593Smuzhiyun .suspend = exynos_ufs_suspend,
1224*4882a593Smuzhiyun .resume = exynos_ufs_resume,
1225*4882a593Smuzhiyun };
1226*4882a593Smuzhiyun
exynos_ufs_probe(struct platform_device * pdev)1227*4882a593Smuzhiyun static int exynos_ufs_probe(struct platform_device *pdev)
1228*4882a593Smuzhiyun {
1229*4882a593Smuzhiyun int err;
1230*4882a593Smuzhiyun struct device *dev = &pdev->dev;
1231*4882a593Smuzhiyun
1232*4882a593Smuzhiyun err = ufshcd_pltfrm_init(pdev, &ufs_hba_exynos_ops);
1233*4882a593Smuzhiyun if (err)
1234*4882a593Smuzhiyun dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
1235*4882a593Smuzhiyun
1236*4882a593Smuzhiyun return err;
1237*4882a593Smuzhiyun }
1238*4882a593Smuzhiyun
exynos_ufs_remove(struct platform_device * pdev)1239*4882a593Smuzhiyun static int exynos_ufs_remove(struct platform_device *pdev)
1240*4882a593Smuzhiyun {
1241*4882a593Smuzhiyun struct ufs_hba *hba = platform_get_drvdata(pdev);
1242*4882a593Smuzhiyun
1243*4882a593Smuzhiyun pm_runtime_get_sync(&(pdev)->dev);
1244*4882a593Smuzhiyun ufshcd_remove(hba);
1245*4882a593Smuzhiyun return 0;
1246*4882a593Smuzhiyun }
1247*4882a593Smuzhiyun
1248*4882a593Smuzhiyun struct exynos_ufs_drv_data exynos_ufs_drvs = {
1249*4882a593Smuzhiyun
1250*4882a593Smuzhiyun .compatible = "samsung,exynos7-ufs",
1251*4882a593Smuzhiyun .uic_attr = &exynos7_uic_attr,
1252*4882a593Smuzhiyun .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
1253*4882a593Smuzhiyun UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR |
1254*4882a593Smuzhiyun UFSHCI_QUIRK_BROKEN_HCE |
1255*4882a593Smuzhiyun UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR |
1256*4882a593Smuzhiyun UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
1257*4882a593Smuzhiyun UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL |
1258*4882a593Smuzhiyun UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING |
1259*4882a593Smuzhiyun UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE,
1260*4882a593Smuzhiyun .opts = EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL |
1261*4882a593Smuzhiyun EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
1262*4882a593Smuzhiyun EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX |
1263*4882a593Smuzhiyun EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB |
1264*4882a593Smuzhiyun EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER |
1265*4882a593Smuzhiyun UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE,
1266*4882a593Smuzhiyun .drv_init = exynos7_ufs_drv_init,
1267*4882a593Smuzhiyun .pre_link = exynos7_ufs_pre_link,
1268*4882a593Smuzhiyun .post_link = exynos7_ufs_post_link,
1269*4882a593Smuzhiyun .pre_pwr_change = exynos7_ufs_pre_pwr_change,
1270*4882a593Smuzhiyun .post_pwr_change = exynos7_ufs_post_pwr_change,
1271*4882a593Smuzhiyun };
1272*4882a593Smuzhiyun
1273*4882a593Smuzhiyun static const struct of_device_id exynos_ufs_of_match[] = {
1274*4882a593Smuzhiyun { .compatible = "samsung,exynos7-ufs",
1275*4882a593Smuzhiyun .data = &exynos_ufs_drvs },
1276*4882a593Smuzhiyun {},
1277*4882a593Smuzhiyun };
1278*4882a593Smuzhiyun
1279*4882a593Smuzhiyun static const struct dev_pm_ops exynos_ufs_pm_ops = {
1280*4882a593Smuzhiyun .suspend = ufshcd_pltfrm_suspend,
1281*4882a593Smuzhiyun .resume = ufshcd_pltfrm_resume,
1282*4882a593Smuzhiyun .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
1283*4882a593Smuzhiyun .runtime_resume = ufshcd_pltfrm_runtime_resume,
1284*4882a593Smuzhiyun .runtime_idle = ufshcd_pltfrm_runtime_idle,
1285*4882a593Smuzhiyun };
1286*4882a593Smuzhiyun
1287*4882a593Smuzhiyun static struct platform_driver exynos_ufs_pltform = {
1288*4882a593Smuzhiyun .probe = exynos_ufs_probe,
1289*4882a593Smuzhiyun .remove = exynos_ufs_remove,
1290*4882a593Smuzhiyun .shutdown = ufshcd_pltfrm_shutdown,
1291*4882a593Smuzhiyun .driver = {
1292*4882a593Smuzhiyun .name = "exynos-ufshc",
1293*4882a593Smuzhiyun .pm = &exynos_ufs_pm_ops,
1294*4882a593Smuzhiyun .of_match_table = of_match_ptr(exynos_ufs_of_match),
1295*4882a593Smuzhiyun },
1296*4882a593Smuzhiyun };
1297*4882a593Smuzhiyun module_platform_driver(exynos_ufs_pltform);
1298*4882a593Smuzhiyun
1299*4882a593Smuzhiyun MODULE_AUTHOR("Alim Akhtar <alim.akhtar@samsung.com>");
1300*4882a593Smuzhiyun MODULE_AUTHOR("Seungwon Jeon <essuuj@gmail.com>");
1301*4882a593Smuzhiyun MODULE_DESCRIPTION("Exynos UFS HCI Driver");
1302*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
1303