xref: /OK3568_Linux_fs/kernel/drivers/scsi/ufs/ufshcd-pci.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Universal Flash Storage Host controller PCI glue driver
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * This code is based on drivers/scsi/ufs/ufshcd-pci.c
6*4882a593Smuzhiyun  * Copyright (C) 2011-2013 Samsung India Software Operations
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Authors:
9*4882a593Smuzhiyun  *	Santosh Yaraganavi <santosh.sy@samsung.com>
10*4882a593Smuzhiyun  *	Vinayak Holikatti <h.vinayak@samsung.com>
11*4882a593Smuzhiyun  */
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include "ufshcd.h"
14*4882a593Smuzhiyun #include <linux/pci.h>
15*4882a593Smuzhiyun #include <linux/pm_runtime.h>
16*4882a593Smuzhiyun #include <linux/pm_qos.h>
17*4882a593Smuzhiyun #include <linux/debugfs.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun struct intel_host {
20*4882a593Smuzhiyun 	u32		active_ltr;
21*4882a593Smuzhiyun 	u32		idle_ltr;
22*4882a593Smuzhiyun 	struct dentry	*debugfs_root;
23*4882a593Smuzhiyun };
24*4882a593Smuzhiyun 
ufs_intel_disable_lcc(struct ufs_hba * hba)25*4882a593Smuzhiyun static int ufs_intel_disable_lcc(struct ufs_hba *hba)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun 	u32 attr = UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE);
28*4882a593Smuzhiyun 	u32 lcc_enable = 0;
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun 	ufshcd_dme_get(hba, attr, &lcc_enable);
31*4882a593Smuzhiyun 	if (lcc_enable)
32*4882a593Smuzhiyun 		ufshcd_disable_host_tx_lcc(hba);
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	return 0;
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun 
ufs_intel_link_startup_notify(struct ufs_hba * hba,enum ufs_notify_change_status status)37*4882a593Smuzhiyun static int ufs_intel_link_startup_notify(struct ufs_hba *hba,
38*4882a593Smuzhiyun 					 enum ufs_notify_change_status status)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun 	int err = 0;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	switch (status) {
43*4882a593Smuzhiyun 	case PRE_CHANGE:
44*4882a593Smuzhiyun 		err = ufs_intel_disable_lcc(hba);
45*4882a593Smuzhiyun 		break;
46*4882a593Smuzhiyun 	case POST_CHANGE:
47*4882a593Smuzhiyun 		break;
48*4882a593Smuzhiyun 	default:
49*4882a593Smuzhiyun 		break;
50*4882a593Smuzhiyun 	}
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	return err;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun #define INTEL_ACTIVELTR		0x804
56*4882a593Smuzhiyun #define INTEL_IDLELTR		0x808
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun #define INTEL_LTR_REQ		BIT(15)
59*4882a593Smuzhiyun #define INTEL_LTR_SCALE_MASK	GENMASK(11, 10)
60*4882a593Smuzhiyun #define INTEL_LTR_SCALE_1US	(2 << 10)
61*4882a593Smuzhiyun #define INTEL_LTR_SCALE_32US	(3 << 10)
62*4882a593Smuzhiyun #define INTEL_LTR_VALUE_MASK	GENMASK(9, 0)
63*4882a593Smuzhiyun 
intel_cache_ltr(struct ufs_hba * hba)64*4882a593Smuzhiyun static void intel_cache_ltr(struct ufs_hba *hba)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	struct intel_host *host = ufshcd_get_variant(hba);
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	host->active_ltr = readl(hba->mmio_base + INTEL_ACTIVELTR);
69*4882a593Smuzhiyun 	host->idle_ltr = readl(hba->mmio_base + INTEL_IDLELTR);
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun 
intel_ltr_set(struct device * dev,s32 val)72*4882a593Smuzhiyun static void intel_ltr_set(struct device *dev, s32 val)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	struct ufs_hba *hba = dev_get_drvdata(dev);
75*4882a593Smuzhiyun 	struct intel_host *host = ufshcd_get_variant(hba);
76*4882a593Smuzhiyun 	u32 ltr;
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	pm_runtime_get_sync(dev);
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	/*
81*4882a593Smuzhiyun 	 * Program latency tolerance (LTR) accordingly what has been asked
82*4882a593Smuzhiyun 	 * by the PM QoS layer or disable it in case we were passed
83*4882a593Smuzhiyun 	 * negative value or PM_QOS_LATENCY_ANY.
84*4882a593Smuzhiyun 	 */
85*4882a593Smuzhiyun 	ltr = readl(hba->mmio_base + INTEL_ACTIVELTR);
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	if (val == PM_QOS_LATENCY_ANY || val < 0) {
88*4882a593Smuzhiyun 		ltr &= ~INTEL_LTR_REQ;
89*4882a593Smuzhiyun 	} else {
90*4882a593Smuzhiyun 		ltr |= INTEL_LTR_REQ;
91*4882a593Smuzhiyun 		ltr &= ~INTEL_LTR_SCALE_MASK;
92*4882a593Smuzhiyun 		ltr &= ~INTEL_LTR_VALUE_MASK;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 		if (val > INTEL_LTR_VALUE_MASK) {
95*4882a593Smuzhiyun 			val >>= 5;
96*4882a593Smuzhiyun 			if (val > INTEL_LTR_VALUE_MASK)
97*4882a593Smuzhiyun 				val = INTEL_LTR_VALUE_MASK;
98*4882a593Smuzhiyun 			ltr |= INTEL_LTR_SCALE_32US | val;
99*4882a593Smuzhiyun 		} else {
100*4882a593Smuzhiyun 			ltr |= INTEL_LTR_SCALE_1US | val;
101*4882a593Smuzhiyun 		}
102*4882a593Smuzhiyun 	}
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	if (ltr == host->active_ltr)
105*4882a593Smuzhiyun 		goto out;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	writel(ltr, hba->mmio_base + INTEL_ACTIVELTR);
108*4882a593Smuzhiyun 	writel(ltr, hba->mmio_base + INTEL_IDLELTR);
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	/* Cache the values into intel_host structure */
111*4882a593Smuzhiyun 	intel_cache_ltr(hba);
112*4882a593Smuzhiyun out:
113*4882a593Smuzhiyun 	pm_runtime_put(dev);
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun 
intel_ltr_expose(struct device * dev)116*4882a593Smuzhiyun static void intel_ltr_expose(struct device *dev)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	dev->power.set_latency_tolerance = intel_ltr_set;
119*4882a593Smuzhiyun 	dev_pm_qos_expose_latency_tolerance(dev);
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun 
intel_ltr_hide(struct device * dev)122*4882a593Smuzhiyun static void intel_ltr_hide(struct device *dev)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun 	dev_pm_qos_hide_latency_tolerance(dev);
125*4882a593Smuzhiyun 	dev->power.set_latency_tolerance = NULL;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun 
intel_add_debugfs(struct ufs_hba * hba)128*4882a593Smuzhiyun static void intel_add_debugfs(struct ufs_hba *hba)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	struct dentry *dir = debugfs_create_dir(dev_name(hba->dev), NULL);
131*4882a593Smuzhiyun 	struct intel_host *host = ufshcd_get_variant(hba);
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	intel_cache_ltr(hba);
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	host->debugfs_root = dir;
136*4882a593Smuzhiyun 	debugfs_create_x32("active_ltr", 0444, dir, &host->active_ltr);
137*4882a593Smuzhiyun 	debugfs_create_x32("idle_ltr", 0444, dir, &host->idle_ltr);
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun 
intel_remove_debugfs(struct ufs_hba * hba)140*4882a593Smuzhiyun static void intel_remove_debugfs(struct ufs_hba *hba)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun 	struct intel_host *host = ufshcd_get_variant(hba);
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	debugfs_remove_recursive(host->debugfs_root);
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun 
ufs_intel_common_init(struct ufs_hba * hba)147*4882a593Smuzhiyun static int ufs_intel_common_init(struct ufs_hba *hba)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun 	struct intel_host *host;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	host = devm_kzalloc(hba->dev, sizeof(*host), GFP_KERNEL);
154*4882a593Smuzhiyun 	if (!host)
155*4882a593Smuzhiyun 		return -ENOMEM;
156*4882a593Smuzhiyun 	ufshcd_set_variant(hba, host);
157*4882a593Smuzhiyun 	intel_ltr_expose(hba->dev);
158*4882a593Smuzhiyun 	intel_add_debugfs(hba);
159*4882a593Smuzhiyun 	return 0;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun 
ufs_intel_common_exit(struct ufs_hba * hba)162*4882a593Smuzhiyun static void ufs_intel_common_exit(struct ufs_hba *hba)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun 	intel_remove_debugfs(hba);
165*4882a593Smuzhiyun 	intel_ltr_hide(hba->dev);
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun 
ufs_intel_resume(struct ufs_hba * hba,enum ufs_pm_op op)168*4882a593Smuzhiyun static int ufs_intel_resume(struct ufs_hba *hba, enum ufs_pm_op op)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	/*
171*4882a593Smuzhiyun 	 * To support S4 (suspend-to-disk) with spm_lvl other than 5, the base
172*4882a593Smuzhiyun 	 * address registers must be restored because the restore kernel can
173*4882a593Smuzhiyun 	 * have used different addresses.
174*4882a593Smuzhiyun 	 */
175*4882a593Smuzhiyun 	ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
176*4882a593Smuzhiyun 		      REG_UTP_TRANSFER_REQ_LIST_BASE_L);
177*4882a593Smuzhiyun 	ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
178*4882a593Smuzhiyun 		      REG_UTP_TRANSFER_REQ_LIST_BASE_H);
179*4882a593Smuzhiyun 	ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
180*4882a593Smuzhiyun 		      REG_UTP_TASK_REQ_LIST_BASE_L);
181*4882a593Smuzhiyun 	ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
182*4882a593Smuzhiyun 		      REG_UTP_TASK_REQ_LIST_BASE_H);
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	if (ufshcd_is_link_hibern8(hba)) {
185*4882a593Smuzhiyun 		int ret = ufshcd_uic_hibern8_exit(hba);
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 		if (!ret) {
188*4882a593Smuzhiyun 			ufshcd_set_link_active(hba);
189*4882a593Smuzhiyun 		} else {
190*4882a593Smuzhiyun 			dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
191*4882a593Smuzhiyun 				__func__, ret);
192*4882a593Smuzhiyun 			/*
193*4882a593Smuzhiyun 			 * Force reset and restore. Any other actions can lead
194*4882a593Smuzhiyun 			 * to an unrecoverable state.
195*4882a593Smuzhiyun 			 */
196*4882a593Smuzhiyun 			ufshcd_set_link_off(hba);
197*4882a593Smuzhiyun 		}
198*4882a593Smuzhiyun 	}
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	return 0;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun 
ufs_intel_ehl_init(struct ufs_hba * hba)203*4882a593Smuzhiyun static int ufs_intel_ehl_init(struct ufs_hba *hba)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun 	hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
206*4882a593Smuzhiyun 	return ufs_intel_common_init(hba);
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
210*4882a593Smuzhiyun 	.name                   = "intel-pci",
211*4882a593Smuzhiyun 	.init			= ufs_intel_common_init,
212*4882a593Smuzhiyun 	.exit			= ufs_intel_common_exit,
213*4882a593Smuzhiyun 	.link_startup_notify	= ufs_intel_link_startup_notify,
214*4882a593Smuzhiyun 	.resume			= ufs_intel_resume,
215*4882a593Smuzhiyun };
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
218*4882a593Smuzhiyun 	.name                   = "intel-pci",
219*4882a593Smuzhiyun 	.init			= ufs_intel_ehl_init,
220*4882a593Smuzhiyun 	.exit			= ufs_intel_common_exit,
221*4882a593Smuzhiyun 	.link_startup_notify	= ufs_intel_link_startup_notify,
222*4882a593Smuzhiyun 	.resume			= ufs_intel_resume,
223*4882a593Smuzhiyun };
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
226*4882a593Smuzhiyun /**
227*4882a593Smuzhiyun  * ufshcd_pci_suspend - suspend power management function
228*4882a593Smuzhiyun  * @dev: pointer to PCI device handle
229*4882a593Smuzhiyun  *
230*4882a593Smuzhiyun  * Returns 0 if successful
231*4882a593Smuzhiyun  * Returns non-zero otherwise
232*4882a593Smuzhiyun  */
ufshcd_pci_suspend(struct device * dev)233*4882a593Smuzhiyun static int ufshcd_pci_suspend(struct device *dev)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun 	return ufshcd_system_suspend(dev_get_drvdata(dev));
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun /**
239*4882a593Smuzhiyun  * ufshcd_pci_resume - resume power management function
240*4882a593Smuzhiyun  * @dev: pointer to PCI device handle
241*4882a593Smuzhiyun  *
242*4882a593Smuzhiyun  * Returns 0 if successful
243*4882a593Smuzhiyun  * Returns non-zero otherwise
244*4882a593Smuzhiyun  */
ufshcd_pci_resume(struct device * dev)245*4882a593Smuzhiyun static int ufshcd_pci_resume(struct device *dev)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun 	return ufshcd_system_resume(dev_get_drvdata(dev));
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun /**
251*4882a593Smuzhiyun  * ufshcd_pci_poweroff - suspend-to-disk poweroff function
252*4882a593Smuzhiyun  * @dev: pointer to PCI device handle
253*4882a593Smuzhiyun  *
254*4882a593Smuzhiyun  * Returns 0 if successful
255*4882a593Smuzhiyun  * Returns non-zero otherwise
256*4882a593Smuzhiyun  */
ufshcd_pci_poweroff(struct device * dev)257*4882a593Smuzhiyun static int ufshcd_pci_poweroff(struct device *dev)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun 	struct ufs_hba *hba = dev_get_drvdata(dev);
260*4882a593Smuzhiyun 	int spm_lvl = hba->spm_lvl;
261*4882a593Smuzhiyun 	int ret;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	/*
264*4882a593Smuzhiyun 	 * For poweroff we need to set the UFS device to PowerDown mode.
265*4882a593Smuzhiyun 	 * Force spm_lvl to ensure that.
266*4882a593Smuzhiyun 	 */
267*4882a593Smuzhiyun 	hba->spm_lvl = 5;
268*4882a593Smuzhiyun 	ret = ufshcd_system_suspend(hba);
269*4882a593Smuzhiyun 	hba->spm_lvl = spm_lvl;
270*4882a593Smuzhiyun 	return ret;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun #endif /* !CONFIG_PM_SLEEP */
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun #ifdef CONFIG_PM
ufshcd_pci_runtime_suspend(struct device * dev)276*4882a593Smuzhiyun static int ufshcd_pci_runtime_suspend(struct device *dev)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun 	return ufshcd_runtime_suspend(dev_get_drvdata(dev));
279*4882a593Smuzhiyun }
ufshcd_pci_runtime_resume(struct device * dev)280*4882a593Smuzhiyun static int ufshcd_pci_runtime_resume(struct device *dev)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun 	return ufshcd_runtime_resume(dev_get_drvdata(dev));
283*4882a593Smuzhiyun }
ufshcd_pci_runtime_idle(struct device * dev)284*4882a593Smuzhiyun static int ufshcd_pci_runtime_idle(struct device *dev)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun 	return ufshcd_runtime_idle(dev_get_drvdata(dev));
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun #endif /* !CONFIG_PM */
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun /**
291*4882a593Smuzhiyun  * ufshcd_pci_shutdown - main function to put the controller in reset state
292*4882a593Smuzhiyun  * @pdev: pointer to PCI device handle
293*4882a593Smuzhiyun  */
ufshcd_pci_shutdown(struct pci_dev * pdev)294*4882a593Smuzhiyun static void ufshcd_pci_shutdown(struct pci_dev *pdev)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun 	ufshcd_shutdown((struct ufs_hba *)pci_get_drvdata(pdev));
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun /**
300*4882a593Smuzhiyun  * ufshcd_pci_remove - de-allocate PCI/SCSI host and host memory space
301*4882a593Smuzhiyun  *		data structure memory
302*4882a593Smuzhiyun  * @pdev: pointer to PCI handle
303*4882a593Smuzhiyun  */
ufshcd_pci_remove(struct pci_dev * pdev)304*4882a593Smuzhiyun static void ufshcd_pci_remove(struct pci_dev *pdev)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun 	struct ufs_hba *hba = pci_get_drvdata(pdev);
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	pm_runtime_forbid(&pdev->dev);
309*4882a593Smuzhiyun 	pm_runtime_get_noresume(&pdev->dev);
310*4882a593Smuzhiyun 	ufshcd_remove(hba);
311*4882a593Smuzhiyun 	ufshcd_dealloc_host(hba);
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun /**
315*4882a593Smuzhiyun  * ufshcd_pci_probe - probe routine of the driver
316*4882a593Smuzhiyun  * @pdev: pointer to PCI device handle
317*4882a593Smuzhiyun  * @id: PCI device id
318*4882a593Smuzhiyun  *
319*4882a593Smuzhiyun  * Returns 0 on success, non-zero value on failure
320*4882a593Smuzhiyun  */
321*4882a593Smuzhiyun static int
ufshcd_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)322*4882a593Smuzhiyun ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun 	struct ufs_hba *hba;
325*4882a593Smuzhiyun 	void __iomem *mmio_base;
326*4882a593Smuzhiyun 	int err;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	err = pcim_enable_device(pdev);
329*4882a593Smuzhiyun 	if (err) {
330*4882a593Smuzhiyun 		dev_err(&pdev->dev, "pcim_enable_device failed\n");
331*4882a593Smuzhiyun 		return err;
332*4882a593Smuzhiyun 	}
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	pci_set_master(pdev);
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD);
337*4882a593Smuzhiyun 	if (err < 0) {
338*4882a593Smuzhiyun 		dev_err(&pdev->dev, "request and iomap failed\n");
339*4882a593Smuzhiyun 		return err;
340*4882a593Smuzhiyun 	}
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	mmio_base = pcim_iomap_table(pdev)[0];
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	err = ufshcd_alloc_host(&pdev->dev, &hba);
345*4882a593Smuzhiyun 	if (err) {
346*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Allocation failed\n");
347*4882a593Smuzhiyun 		return err;
348*4882a593Smuzhiyun 	}
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	hba->vops = (struct ufs_hba_variant_ops *)id->driver_data;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	err = ufshcd_init(hba, mmio_base, pdev->irq);
353*4882a593Smuzhiyun 	if (err) {
354*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Initialization failed\n");
355*4882a593Smuzhiyun 		ufshcd_dealloc_host(hba);
356*4882a593Smuzhiyun 		return err;
357*4882a593Smuzhiyun 	}
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	pm_runtime_put_noidle(&pdev->dev);
360*4882a593Smuzhiyun 	pm_runtime_allow(&pdev->dev);
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	return 0;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun static const struct dev_pm_ops ufshcd_pci_pm_ops = {
366*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
367*4882a593Smuzhiyun 	.suspend	= ufshcd_pci_suspend,
368*4882a593Smuzhiyun 	.resume		= ufshcd_pci_resume,
369*4882a593Smuzhiyun 	.freeze		= ufshcd_pci_suspend,
370*4882a593Smuzhiyun 	.thaw		= ufshcd_pci_resume,
371*4882a593Smuzhiyun 	.poweroff	= ufshcd_pci_poweroff,
372*4882a593Smuzhiyun 	.restore	= ufshcd_pci_resume,
373*4882a593Smuzhiyun #endif
374*4882a593Smuzhiyun 	SET_RUNTIME_PM_OPS(ufshcd_pci_runtime_suspend,
375*4882a593Smuzhiyun 			   ufshcd_pci_runtime_resume,
376*4882a593Smuzhiyun 			   ufshcd_pci_runtime_idle)
377*4882a593Smuzhiyun };
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun static const struct pci_device_id ufshcd_pci_tbl[] = {
380*4882a593Smuzhiyun 	{ PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
381*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
382*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
383*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
384*4882a593Smuzhiyun 	{ }	/* terminate list */
385*4882a593Smuzhiyun };
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl);
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun static struct pci_driver ufshcd_pci_driver = {
390*4882a593Smuzhiyun 	.name = UFSHCD,
391*4882a593Smuzhiyun 	.id_table = ufshcd_pci_tbl,
392*4882a593Smuzhiyun 	.probe = ufshcd_pci_probe,
393*4882a593Smuzhiyun 	.remove = ufshcd_pci_remove,
394*4882a593Smuzhiyun 	.shutdown = ufshcd_pci_shutdown,
395*4882a593Smuzhiyun 	.driver = {
396*4882a593Smuzhiyun 		.pm = &ufshcd_pci_pm_ops
397*4882a593Smuzhiyun 	},
398*4882a593Smuzhiyun };
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun module_pci_driver(ufshcd_pci_driver);
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
403*4882a593Smuzhiyun MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
404*4882a593Smuzhiyun MODULE_DESCRIPTION("UFS host controller PCI glue driver");
405*4882a593Smuzhiyun MODULE_LICENSE("GPL");
406*4882a593Smuzhiyun MODULE_VERSION(UFSHCD_DRIVER_VERSION);
407