1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Intel Sunrisepoint LPSS core support.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2015, Intel Corporation
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
8*4882a593Smuzhiyun * Mika Westerberg <mika.westerberg@linux.intel.com>
9*4882a593Smuzhiyun * Heikki Krogerus <heikki.krogerus@linux.intel.com>
10*4882a593Smuzhiyun * Jarkko Nikula <jarkko.nikula@linux.intel.com>
11*4882a593Smuzhiyun */
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <linux/clk.h>
14*4882a593Smuzhiyun #include <linux/clkdev.h>
15*4882a593Smuzhiyun #include <linux/clk-provider.h>
16*4882a593Smuzhiyun #include <linux/debugfs.h>
17*4882a593Smuzhiyun #include <linux/idr.h>
18*4882a593Smuzhiyun #include <linux/io.h>
19*4882a593Smuzhiyun #include <linux/ioport.h>
20*4882a593Smuzhiyun #include <linux/kernel.h>
21*4882a593Smuzhiyun #include <linux/module.h>
22*4882a593Smuzhiyun #include <linux/mfd/core.h>
23*4882a593Smuzhiyun #include <linux/pm_qos.h>
24*4882a593Smuzhiyun #include <linux/pm_runtime.h>
25*4882a593Smuzhiyun #include <linux/property.h>
26*4882a593Smuzhiyun #include <linux/seq_file.h>
27*4882a593Smuzhiyun #include <linux/io-64-nonatomic-lo-hi.h>
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #include <linux/dma/idma64.h>
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #include "intel-lpss.h"
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #define LPSS_DEV_OFFSET 0x000
34*4882a593Smuzhiyun #define LPSS_DEV_SIZE 0x200
35*4882a593Smuzhiyun #define LPSS_PRIV_OFFSET 0x200
36*4882a593Smuzhiyun #define LPSS_PRIV_SIZE 0x100
37*4882a593Smuzhiyun #define LPSS_PRIV_REG_COUNT (LPSS_PRIV_SIZE / 4)
38*4882a593Smuzhiyun #define LPSS_IDMA64_OFFSET 0x800
39*4882a593Smuzhiyun #define LPSS_IDMA64_SIZE 0x800
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun /* Offsets from lpss->priv */
42*4882a593Smuzhiyun #define LPSS_PRIV_RESETS 0x04
43*4882a593Smuzhiyun #define LPSS_PRIV_RESETS_IDMA BIT(2)
44*4882a593Smuzhiyun #define LPSS_PRIV_RESETS_FUNC 0x3
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun #define LPSS_PRIV_ACTIVELTR 0x10
47*4882a593Smuzhiyun #define LPSS_PRIV_IDLELTR 0x14
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun #define LPSS_PRIV_LTR_REQ BIT(15)
50*4882a593Smuzhiyun #define LPSS_PRIV_LTR_SCALE_MASK GENMASK(11, 10)
51*4882a593Smuzhiyun #define LPSS_PRIV_LTR_SCALE_1US (2 << 10)
52*4882a593Smuzhiyun #define LPSS_PRIV_LTR_SCALE_32US (3 << 10)
53*4882a593Smuzhiyun #define LPSS_PRIV_LTR_VALUE_MASK GENMASK(9, 0)
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun #define LPSS_PRIV_SSP_REG 0x20
56*4882a593Smuzhiyun #define LPSS_PRIV_SSP_REG_DIS_DMA_FIN BIT(0)
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun #define LPSS_PRIV_REMAP_ADDR 0x40
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun #define LPSS_PRIV_CAPS 0xfc
61*4882a593Smuzhiyun #define LPSS_PRIV_CAPS_NO_IDMA BIT(8)
62*4882a593Smuzhiyun #define LPSS_PRIV_CAPS_TYPE_MASK GENMASK(7, 4)
63*4882a593Smuzhiyun #define LPSS_PRIV_CAPS_TYPE_SHIFT 4
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /* This matches the type field in CAPS register */
66*4882a593Smuzhiyun enum intel_lpss_dev_type {
67*4882a593Smuzhiyun LPSS_DEV_I2C = 0,
68*4882a593Smuzhiyun LPSS_DEV_UART,
69*4882a593Smuzhiyun LPSS_DEV_SPI,
70*4882a593Smuzhiyun };
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun struct intel_lpss {
73*4882a593Smuzhiyun const struct intel_lpss_platform_info *info;
74*4882a593Smuzhiyun enum intel_lpss_dev_type type;
75*4882a593Smuzhiyun struct clk *clk;
76*4882a593Smuzhiyun struct clk_lookup *clock;
77*4882a593Smuzhiyun struct mfd_cell *cell;
78*4882a593Smuzhiyun struct device *dev;
79*4882a593Smuzhiyun void __iomem *priv;
80*4882a593Smuzhiyun u32 priv_ctx[LPSS_PRIV_REG_COUNT];
81*4882a593Smuzhiyun int devid;
82*4882a593Smuzhiyun u32 caps;
83*4882a593Smuzhiyun u32 active_ltr;
84*4882a593Smuzhiyun u32 idle_ltr;
85*4882a593Smuzhiyun struct dentry *debugfs;
86*4882a593Smuzhiyun };
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun static const struct resource intel_lpss_dev_resources[] = {
89*4882a593Smuzhiyun DEFINE_RES_MEM_NAMED(LPSS_DEV_OFFSET, LPSS_DEV_SIZE, "lpss_dev"),
90*4882a593Smuzhiyun DEFINE_RES_MEM_NAMED(LPSS_PRIV_OFFSET, LPSS_PRIV_SIZE, "lpss_priv"),
91*4882a593Smuzhiyun DEFINE_RES_IRQ(0),
92*4882a593Smuzhiyun };
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun static const struct resource intel_lpss_idma64_resources[] = {
95*4882a593Smuzhiyun DEFINE_RES_MEM(LPSS_IDMA64_OFFSET, LPSS_IDMA64_SIZE),
96*4882a593Smuzhiyun DEFINE_RES_IRQ(0),
97*4882a593Smuzhiyun };
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun /*
100*4882a593Smuzhiyun * Cells needs to be ordered so that the iDMA is created first. This is
101*4882a593Smuzhiyun * because we need to be sure the DMA is available when the host controller
102*4882a593Smuzhiyun * driver is probed.
103*4882a593Smuzhiyun */
104*4882a593Smuzhiyun static const struct mfd_cell intel_lpss_idma64_cell = {
105*4882a593Smuzhiyun .name = LPSS_IDMA64_DRIVER_NAME,
106*4882a593Smuzhiyun .num_resources = ARRAY_SIZE(intel_lpss_idma64_resources),
107*4882a593Smuzhiyun .resources = intel_lpss_idma64_resources,
108*4882a593Smuzhiyun };
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun static const struct mfd_cell intel_lpss_i2c_cell = {
111*4882a593Smuzhiyun .name = "i2c_designware",
112*4882a593Smuzhiyun .num_resources = ARRAY_SIZE(intel_lpss_dev_resources),
113*4882a593Smuzhiyun .resources = intel_lpss_dev_resources,
114*4882a593Smuzhiyun };
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun static const struct mfd_cell intel_lpss_uart_cell = {
117*4882a593Smuzhiyun .name = "dw-apb-uart",
118*4882a593Smuzhiyun .num_resources = ARRAY_SIZE(intel_lpss_dev_resources),
119*4882a593Smuzhiyun .resources = intel_lpss_dev_resources,
120*4882a593Smuzhiyun };
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun static const struct mfd_cell intel_lpss_spi_cell = {
123*4882a593Smuzhiyun .name = "pxa2xx-spi",
124*4882a593Smuzhiyun .num_resources = ARRAY_SIZE(intel_lpss_dev_resources),
125*4882a593Smuzhiyun .resources = intel_lpss_dev_resources,
126*4882a593Smuzhiyun };
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun static DEFINE_IDA(intel_lpss_devid_ida);
129*4882a593Smuzhiyun static struct dentry *intel_lpss_debugfs;
130*4882a593Smuzhiyun
intel_lpss_cache_ltr(struct intel_lpss * lpss)131*4882a593Smuzhiyun static void intel_lpss_cache_ltr(struct intel_lpss *lpss)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun lpss->active_ltr = readl(lpss->priv + LPSS_PRIV_ACTIVELTR);
134*4882a593Smuzhiyun lpss->idle_ltr = readl(lpss->priv + LPSS_PRIV_IDLELTR);
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
intel_lpss_debugfs_add(struct intel_lpss * lpss)137*4882a593Smuzhiyun static int intel_lpss_debugfs_add(struct intel_lpss *lpss)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun struct dentry *dir;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun dir = debugfs_create_dir(dev_name(lpss->dev), intel_lpss_debugfs);
142*4882a593Smuzhiyun if (IS_ERR(dir))
143*4882a593Smuzhiyun return PTR_ERR(dir);
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun /* Cache the values into lpss structure */
146*4882a593Smuzhiyun intel_lpss_cache_ltr(lpss);
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun debugfs_create_x32("capabilities", S_IRUGO, dir, &lpss->caps);
149*4882a593Smuzhiyun debugfs_create_x32("active_ltr", S_IRUGO, dir, &lpss->active_ltr);
150*4882a593Smuzhiyun debugfs_create_x32("idle_ltr", S_IRUGO, dir, &lpss->idle_ltr);
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun lpss->debugfs = dir;
153*4882a593Smuzhiyun return 0;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
intel_lpss_debugfs_remove(struct intel_lpss * lpss)156*4882a593Smuzhiyun static void intel_lpss_debugfs_remove(struct intel_lpss *lpss)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun debugfs_remove_recursive(lpss->debugfs);
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
intel_lpss_ltr_set(struct device * dev,s32 val)161*4882a593Smuzhiyun static void intel_lpss_ltr_set(struct device *dev, s32 val)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun struct intel_lpss *lpss = dev_get_drvdata(dev);
164*4882a593Smuzhiyun u32 ltr;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun /*
167*4882a593Smuzhiyun * Program latency tolerance (LTR) accordingly what has been asked
168*4882a593Smuzhiyun * by the PM QoS layer or disable it in case we were passed
169*4882a593Smuzhiyun * negative value or PM_QOS_LATENCY_ANY.
170*4882a593Smuzhiyun */
171*4882a593Smuzhiyun ltr = readl(lpss->priv + LPSS_PRIV_ACTIVELTR);
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun if (val == PM_QOS_LATENCY_ANY || val < 0) {
174*4882a593Smuzhiyun ltr &= ~LPSS_PRIV_LTR_REQ;
175*4882a593Smuzhiyun } else {
176*4882a593Smuzhiyun ltr |= LPSS_PRIV_LTR_REQ;
177*4882a593Smuzhiyun ltr &= ~LPSS_PRIV_LTR_SCALE_MASK;
178*4882a593Smuzhiyun ltr &= ~LPSS_PRIV_LTR_VALUE_MASK;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun if (val > LPSS_PRIV_LTR_VALUE_MASK)
181*4882a593Smuzhiyun ltr |= LPSS_PRIV_LTR_SCALE_32US | val >> 5;
182*4882a593Smuzhiyun else
183*4882a593Smuzhiyun ltr |= LPSS_PRIV_LTR_SCALE_1US | val;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun if (ltr == lpss->active_ltr)
187*4882a593Smuzhiyun return;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun writel(ltr, lpss->priv + LPSS_PRIV_ACTIVELTR);
190*4882a593Smuzhiyun writel(ltr, lpss->priv + LPSS_PRIV_IDLELTR);
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun /* Cache the values into lpss structure */
193*4882a593Smuzhiyun intel_lpss_cache_ltr(lpss);
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
intel_lpss_ltr_expose(struct intel_lpss * lpss)196*4882a593Smuzhiyun static void intel_lpss_ltr_expose(struct intel_lpss *lpss)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun lpss->dev->power.set_latency_tolerance = intel_lpss_ltr_set;
199*4882a593Smuzhiyun dev_pm_qos_expose_latency_tolerance(lpss->dev);
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
intel_lpss_ltr_hide(struct intel_lpss * lpss)202*4882a593Smuzhiyun static void intel_lpss_ltr_hide(struct intel_lpss *lpss)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun dev_pm_qos_hide_latency_tolerance(lpss->dev);
205*4882a593Smuzhiyun lpss->dev->power.set_latency_tolerance = NULL;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
intel_lpss_assign_devs(struct intel_lpss * lpss)208*4882a593Smuzhiyun static int intel_lpss_assign_devs(struct intel_lpss *lpss)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun const struct mfd_cell *cell;
211*4882a593Smuzhiyun unsigned int type;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun type = lpss->caps & LPSS_PRIV_CAPS_TYPE_MASK;
214*4882a593Smuzhiyun type >>= LPSS_PRIV_CAPS_TYPE_SHIFT;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun switch (type) {
217*4882a593Smuzhiyun case LPSS_DEV_I2C:
218*4882a593Smuzhiyun cell = &intel_lpss_i2c_cell;
219*4882a593Smuzhiyun break;
220*4882a593Smuzhiyun case LPSS_DEV_UART:
221*4882a593Smuzhiyun cell = &intel_lpss_uart_cell;
222*4882a593Smuzhiyun break;
223*4882a593Smuzhiyun case LPSS_DEV_SPI:
224*4882a593Smuzhiyun cell = &intel_lpss_spi_cell;
225*4882a593Smuzhiyun break;
226*4882a593Smuzhiyun default:
227*4882a593Smuzhiyun return -ENODEV;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun lpss->cell = devm_kmemdup(lpss->dev, cell, sizeof(*cell), GFP_KERNEL);
231*4882a593Smuzhiyun if (!lpss->cell)
232*4882a593Smuzhiyun return -ENOMEM;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun lpss->type = type;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun return 0;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun
intel_lpss_has_idma(const struct intel_lpss * lpss)239*4882a593Smuzhiyun static bool intel_lpss_has_idma(const struct intel_lpss *lpss)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun return (lpss->caps & LPSS_PRIV_CAPS_NO_IDMA) == 0;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun
intel_lpss_set_remap_addr(const struct intel_lpss * lpss)244*4882a593Smuzhiyun static void intel_lpss_set_remap_addr(const struct intel_lpss *lpss)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun resource_size_t addr = lpss->info->mem->start;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun lo_hi_writeq(addr, lpss->priv + LPSS_PRIV_REMAP_ADDR);
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
intel_lpss_deassert_reset(const struct intel_lpss * lpss)251*4882a593Smuzhiyun static void intel_lpss_deassert_reset(const struct intel_lpss *lpss)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun u32 value = LPSS_PRIV_RESETS_FUNC | LPSS_PRIV_RESETS_IDMA;
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun /* Bring out the device from reset */
256*4882a593Smuzhiyun writel(value, lpss->priv + LPSS_PRIV_RESETS);
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun
intel_lpss_init_dev(const struct intel_lpss * lpss)259*4882a593Smuzhiyun static void intel_lpss_init_dev(const struct intel_lpss *lpss)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun u32 value = LPSS_PRIV_SSP_REG_DIS_DMA_FIN;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun /* Set the device in reset state */
264*4882a593Smuzhiyun writel(0, lpss->priv + LPSS_PRIV_RESETS);
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun intel_lpss_deassert_reset(lpss);
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun intel_lpss_set_remap_addr(lpss);
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun if (!intel_lpss_has_idma(lpss))
271*4882a593Smuzhiyun return;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun /* Make sure that SPI multiblock DMA transfers are re-enabled */
274*4882a593Smuzhiyun if (lpss->type == LPSS_DEV_SPI)
275*4882a593Smuzhiyun writel(value, lpss->priv + LPSS_PRIV_SSP_REG);
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
intel_lpss_unregister_clock_tree(struct clk * clk)278*4882a593Smuzhiyun static void intel_lpss_unregister_clock_tree(struct clk *clk)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun struct clk *parent;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun while (clk) {
283*4882a593Smuzhiyun parent = clk_get_parent(clk);
284*4882a593Smuzhiyun clk_unregister(clk);
285*4882a593Smuzhiyun clk = parent;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
intel_lpss_register_clock_divider(struct intel_lpss * lpss,const char * devname,struct clk ** clk)289*4882a593Smuzhiyun static int intel_lpss_register_clock_divider(struct intel_lpss *lpss,
290*4882a593Smuzhiyun const char *devname,
291*4882a593Smuzhiyun struct clk **clk)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun char name[32];
294*4882a593Smuzhiyun struct clk *tmp = *clk;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun snprintf(name, sizeof(name), "%s-enable", devname);
297*4882a593Smuzhiyun tmp = clk_register_gate(NULL, name, __clk_get_name(tmp), 0,
298*4882a593Smuzhiyun lpss->priv, 0, 0, NULL);
299*4882a593Smuzhiyun if (IS_ERR(tmp))
300*4882a593Smuzhiyun return PTR_ERR(tmp);
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun snprintf(name, sizeof(name), "%s-div", devname);
303*4882a593Smuzhiyun tmp = clk_register_fractional_divider(NULL, name, __clk_get_name(tmp),
304*4882a593Smuzhiyun 0, lpss->priv, 1, 15, 16, 15, 0,
305*4882a593Smuzhiyun NULL);
306*4882a593Smuzhiyun if (IS_ERR(tmp))
307*4882a593Smuzhiyun return PTR_ERR(tmp);
308*4882a593Smuzhiyun *clk = tmp;
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun snprintf(name, sizeof(name), "%s-update", devname);
311*4882a593Smuzhiyun tmp = clk_register_gate(NULL, name, __clk_get_name(tmp),
312*4882a593Smuzhiyun CLK_SET_RATE_PARENT, lpss->priv, 31, 0, NULL);
313*4882a593Smuzhiyun if (IS_ERR(tmp))
314*4882a593Smuzhiyun return PTR_ERR(tmp);
315*4882a593Smuzhiyun *clk = tmp;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun return 0;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
intel_lpss_register_clock(struct intel_lpss * lpss)320*4882a593Smuzhiyun static int intel_lpss_register_clock(struct intel_lpss *lpss)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun const struct mfd_cell *cell = lpss->cell;
323*4882a593Smuzhiyun struct clk *clk;
324*4882a593Smuzhiyun char devname[24];
325*4882a593Smuzhiyun int ret;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun if (!lpss->info->clk_rate)
328*4882a593Smuzhiyun return 0;
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun /* Root clock */
331*4882a593Smuzhiyun clk = clk_register_fixed_rate(NULL, dev_name(lpss->dev), NULL, 0,
332*4882a593Smuzhiyun lpss->info->clk_rate);
333*4882a593Smuzhiyun if (IS_ERR(clk))
334*4882a593Smuzhiyun return PTR_ERR(clk);
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun snprintf(devname, sizeof(devname), "%s.%d", cell->name, lpss->devid);
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun /*
339*4882a593Smuzhiyun * Support for clock divider only if it has some preset value.
340*4882a593Smuzhiyun * Otherwise we assume that the divider is not used.
341*4882a593Smuzhiyun */
342*4882a593Smuzhiyun if (lpss->type != LPSS_DEV_I2C) {
343*4882a593Smuzhiyun ret = intel_lpss_register_clock_divider(lpss, devname, &clk);
344*4882a593Smuzhiyun if (ret)
345*4882a593Smuzhiyun goto err_clk_register;
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun ret = -ENOMEM;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun /* Clock for the host controller */
351*4882a593Smuzhiyun lpss->clock = clkdev_create(clk, lpss->info->clk_con_id, "%s", devname);
352*4882a593Smuzhiyun if (!lpss->clock)
353*4882a593Smuzhiyun goto err_clk_register;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun lpss->clk = clk;
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun return 0;
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun err_clk_register:
360*4882a593Smuzhiyun intel_lpss_unregister_clock_tree(clk);
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun return ret;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun
intel_lpss_unregister_clock(struct intel_lpss * lpss)365*4882a593Smuzhiyun static void intel_lpss_unregister_clock(struct intel_lpss *lpss)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun if (IS_ERR_OR_NULL(lpss->clk))
368*4882a593Smuzhiyun return;
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun clkdev_drop(lpss->clock);
371*4882a593Smuzhiyun intel_lpss_unregister_clock_tree(lpss->clk);
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun
intel_lpss_probe(struct device * dev,const struct intel_lpss_platform_info * info)374*4882a593Smuzhiyun int intel_lpss_probe(struct device *dev,
375*4882a593Smuzhiyun const struct intel_lpss_platform_info *info)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun struct intel_lpss *lpss;
378*4882a593Smuzhiyun int ret;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun if (!info || !info->mem || info->irq <= 0)
381*4882a593Smuzhiyun return -EINVAL;
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun lpss = devm_kzalloc(dev, sizeof(*lpss), GFP_KERNEL);
384*4882a593Smuzhiyun if (!lpss)
385*4882a593Smuzhiyun return -ENOMEM;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun lpss->priv = devm_ioremap_uc(dev, info->mem->start + LPSS_PRIV_OFFSET,
388*4882a593Smuzhiyun LPSS_PRIV_SIZE);
389*4882a593Smuzhiyun if (!lpss->priv)
390*4882a593Smuzhiyun return -ENOMEM;
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun lpss->info = info;
393*4882a593Smuzhiyun lpss->dev = dev;
394*4882a593Smuzhiyun lpss->caps = readl(lpss->priv + LPSS_PRIV_CAPS);
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun dev_set_drvdata(dev, lpss);
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun ret = intel_lpss_assign_devs(lpss);
399*4882a593Smuzhiyun if (ret)
400*4882a593Smuzhiyun return ret;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun lpss->cell->properties = info->properties;
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun intel_lpss_init_dev(lpss);
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun lpss->devid = ida_simple_get(&intel_lpss_devid_ida, 0, 0, GFP_KERNEL);
407*4882a593Smuzhiyun if (lpss->devid < 0)
408*4882a593Smuzhiyun return lpss->devid;
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun ret = intel_lpss_register_clock(lpss);
411*4882a593Smuzhiyun if (ret)
412*4882a593Smuzhiyun goto err_clk_register;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun intel_lpss_ltr_expose(lpss);
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun ret = intel_lpss_debugfs_add(lpss);
417*4882a593Smuzhiyun if (ret)
418*4882a593Smuzhiyun dev_warn(dev, "Failed to create debugfs entries\n");
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun if (intel_lpss_has_idma(lpss)) {
421*4882a593Smuzhiyun ret = mfd_add_devices(dev, lpss->devid, &intel_lpss_idma64_cell,
422*4882a593Smuzhiyun 1, info->mem, info->irq, NULL);
423*4882a593Smuzhiyun if (ret)
424*4882a593Smuzhiyun dev_warn(dev, "Failed to add %s, fallback to PIO\n",
425*4882a593Smuzhiyun LPSS_IDMA64_DRIVER_NAME);
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun ret = mfd_add_devices(dev, lpss->devid, lpss->cell,
429*4882a593Smuzhiyun 1, info->mem, info->irq, NULL);
430*4882a593Smuzhiyun if (ret)
431*4882a593Smuzhiyun goto err_remove_ltr;
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun dev_pm_set_driver_flags(dev, DPM_FLAG_SMART_SUSPEND);
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun return 0;
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun err_remove_ltr:
438*4882a593Smuzhiyun intel_lpss_debugfs_remove(lpss);
439*4882a593Smuzhiyun intel_lpss_ltr_hide(lpss);
440*4882a593Smuzhiyun intel_lpss_unregister_clock(lpss);
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun err_clk_register:
443*4882a593Smuzhiyun ida_simple_remove(&intel_lpss_devid_ida, lpss->devid);
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun return ret;
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(intel_lpss_probe);
448*4882a593Smuzhiyun
intel_lpss_remove(struct device * dev)449*4882a593Smuzhiyun void intel_lpss_remove(struct device *dev)
450*4882a593Smuzhiyun {
451*4882a593Smuzhiyun struct intel_lpss *lpss = dev_get_drvdata(dev);
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun mfd_remove_devices(dev);
454*4882a593Smuzhiyun intel_lpss_debugfs_remove(lpss);
455*4882a593Smuzhiyun intel_lpss_ltr_hide(lpss);
456*4882a593Smuzhiyun intel_lpss_unregister_clock(lpss);
457*4882a593Smuzhiyun ida_simple_remove(&intel_lpss_devid_ida, lpss->devid);
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(intel_lpss_remove);
460*4882a593Smuzhiyun
resume_lpss_device(struct device * dev,void * data)461*4882a593Smuzhiyun static int resume_lpss_device(struct device *dev, void *data)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
464*4882a593Smuzhiyun pm_runtime_resume(dev);
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun return 0;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun
intel_lpss_prepare(struct device * dev)469*4882a593Smuzhiyun int intel_lpss_prepare(struct device *dev)
470*4882a593Smuzhiyun {
471*4882a593Smuzhiyun /*
472*4882a593Smuzhiyun * Resume both child devices before entering system sleep. This
473*4882a593Smuzhiyun * ensures that they are in proper state before they get suspended.
474*4882a593Smuzhiyun */
475*4882a593Smuzhiyun device_for_each_child_reverse(dev, NULL, resume_lpss_device);
476*4882a593Smuzhiyun return 0;
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(intel_lpss_prepare);
479*4882a593Smuzhiyun
intel_lpss_suspend(struct device * dev)480*4882a593Smuzhiyun int intel_lpss_suspend(struct device *dev)
481*4882a593Smuzhiyun {
482*4882a593Smuzhiyun struct intel_lpss *lpss = dev_get_drvdata(dev);
483*4882a593Smuzhiyun unsigned int i;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun /* Save device context */
486*4882a593Smuzhiyun for (i = 0; i < LPSS_PRIV_REG_COUNT; i++)
487*4882a593Smuzhiyun lpss->priv_ctx[i] = readl(lpss->priv + i * 4);
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun /*
490*4882a593Smuzhiyun * If the device type is not UART, then put the controller into
491*4882a593Smuzhiyun * reset. UART cannot be put into reset since S3/S0ix fail when
492*4882a593Smuzhiyun * no_console_suspend flag is enabled.
493*4882a593Smuzhiyun */
494*4882a593Smuzhiyun if (lpss->type != LPSS_DEV_UART)
495*4882a593Smuzhiyun writel(0, lpss->priv + LPSS_PRIV_RESETS);
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun return 0;
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(intel_lpss_suspend);
500*4882a593Smuzhiyun
intel_lpss_resume(struct device * dev)501*4882a593Smuzhiyun int intel_lpss_resume(struct device *dev)
502*4882a593Smuzhiyun {
503*4882a593Smuzhiyun struct intel_lpss *lpss = dev_get_drvdata(dev);
504*4882a593Smuzhiyun unsigned int i;
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun intel_lpss_deassert_reset(lpss);
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun /* Restore device context */
509*4882a593Smuzhiyun for (i = 0; i < LPSS_PRIV_REG_COUNT; i++)
510*4882a593Smuzhiyun writel(lpss->priv_ctx[i], lpss->priv + i * 4);
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun return 0;
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(intel_lpss_resume);
515*4882a593Smuzhiyun
intel_lpss_init(void)516*4882a593Smuzhiyun static int __init intel_lpss_init(void)
517*4882a593Smuzhiyun {
518*4882a593Smuzhiyun intel_lpss_debugfs = debugfs_create_dir("intel_lpss", NULL);
519*4882a593Smuzhiyun return 0;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun module_init(intel_lpss_init);
522*4882a593Smuzhiyun
intel_lpss_exit(void)523*4882a593Smuzhiyun static void __exit intel_lpss_exit(void)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun ida_destroy(&intel_lpss_devid_ida);
526*4882a593Smuzhiyun debugfs_remove(intel_lpss_debugfs);
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun module_exit(intel_lpss_exit);
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
531*4882a593Smuzhiyun MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
532*4882a593Smuzhiyun MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>");
533*4882a593Smuzhiyun MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@linux.intel.com>");
534*4882a593Smuzhiyun MODULE_DESCRIPTION("Intel LPSS core driver");
535*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
536*4882a593Smuzhiyun /*
537*4882a593Smuzhiyun * Ensure the DMA driver is loaded before the host controller device appears,
538*4882a593Smuzhiyun * so that the host controller driver can request its DMA channels as early
539*4882a593Smuzhiyun * as possible.
540*4882a593Smuzhiyun *
541*4882a593Smuzhiyun * If the DMA module is not there that's OK as well.
542*4882a593Smuzhiyun */
543*4882a593Smuzhiyun MODULE_SOFTDEP("pre: platform:" LPSS_IDMA64_DRIVER_NAME);
544