1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * ACPI support for Intel Lynxpoint LPSS.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2013, Intel Corporation
6*4882a593Smuzhiyun * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
7*4882a593Smuzhiyun * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/acpi.h>
11*4882a593Smuzhiyun #include <linux/clkdev.h>
12*4882a593Smuzhiyun #include <linux/clk-provider.h>
13*4882a593Smuzhiyun #include <linux/dmi.h>
14*4882a593Smuzhiyun #include <linux/err.h>
15*4882a593Smuzhiyun #include <linux/io.h>
16*4882a593Smuzhiyun #include <linux/mutex.h>
17*4882a593Smuzhiyun #include <linux/pci.h>
18*4882a593Smuzhiyun #include <linux/platform_device.h>
19*4882a593Smuzhiyun #include <linux/platform_data/x86/clk-lpss.h>
20*4882a593Smuzhiyun #include <linux/platform_data/x86/pmc_atom.h>
21*4882a593Smuzhiyun #include <linux/pm_domain.h>
22*4882a593Smuzhiyun #include <linux/pm_runtime.h>
23*4882a593Smuzhiyun #include <linux/pwm.h>
24*4882a593Smuzhiyun #include <linux/suspend.h>
25*4882a593Smuzhiyun #include <linux/delay.h>
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #include "internal.h"
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #ifdef CONFIG_X86_INTEL_LPSS
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #include <asm/cpu_device_id.h>
32*4882a593Smuzhiyun #include <asm/intel-family.h>
33*4882a593Smuzhiyun #include <asm/iosf_mbi.h>
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #define LPSS_ADDR(desc) ((unsigned long)&desc)
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun #define LPSS_CLK_SIZE 0x04
38*4882a593Smuzhiyun #define LPSS_LTR_SIZE 0x18
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun /* Offsets relative to LPSS_PRIVATE_OFFSET */
41*4882a593Smuzhiyun #define LPSS_CLK_DIVIDER_DEF_MASK (BIT(1) | BIT(16))
42*4882a593Smuzhiyun #define LPSS_RESETS 0x04
43*4882a593Smuzhiyun #define LPSS_RESETS_RESET_FUNC BIT(0)
44*4882a593Smuzhiyun #define LPSS_RESETS_RESET_APB BIT(1)
45*4882a593Smuzhiyun #define LPSS_GENERAL 0x08
46*4882a593Smuzhiyun #define LPSS_GENERAL_LTR_MODE_SW BIT(2)
47*4882a593Smuzhiyun #define LPSS_GENERAL_UART_RTS_OVRD BIT(3)
48*4882a593Smuzhiyun #define LPSS_SW_LTR 0x10
49*4882a593Smuzhiyun #define LPSS_AUTO_LTR 0x14
50*4882a593Smuzhiyun #define LPSS_LTR_SNOOP_REQ BIT(15)
51*4882a593Smuzhiyun #define LPSS_LTR_SNOOP_MASK 0x0000FFFF
52*4882a593Smuzhiyun #define LPSS_LTR_SNOOP_LAT_1US 0x800
53*4882a593Smuzhiyun #define LPSS_LTR_SNOOP_LAT_32US 0xC00
54*4882a593Smuzhiyun #define LPSS_LTR_SNOOP_LAT_SHIFT 5
55*4882a593Smuzhiyun #define LPSS_LTR_SNOOP_LAT_CUTOFF 3000
56*4882a593Smuzhiyun #define LPSS_LTR_MAX_VAL 0x3FF
57*4882a593Smuzhiyun #define LPSS_TX_INT 0x20
58*4882a593Smuzhiyun #define LPSS_TX_INT_MASK BIT(1)
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun #define LPSS_PRV_REG_COUNT 9
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun /* LPSS Flags */
63*4882a593Smuzhiyun #define LPSS_CLK BIT(0)
64*4882a593Smuzhiyun #define LPSS_CLK_GATE BIT(1)
65*4882a593Smuzhiyun #define LPSS_CLK_DIVIDER BIT(2)
66*4882a593Smuzhiyun #define LPSS_LTR BIT(3)
67*4882a593Smuzhiyun #define LPSS_SAVE_CTX BIT(4)
68*4882a593Smuzhiyun /*
69*4882a593Smuzhiyun * For some devices the DSDT AML code for another device turns off the device
70*4882a593Smuzhiyun * before our suspend handler runs, causing us to read/save all 1-s (0xffffffff)
71*4882a593Smuzhiyun * as ctx register values.
72*4882a593Smuzhiyun * Luckily these devices always use the same ctx register values, so we can
73*4882a593Smuzhiyun * work around this by saving the ctx registers once on activation.
74*4882a593Smuzhiyun */
75*4882a593Smuzhiyun #define LPSS_SAVE_CTX_ONCE BIT(5)
76*4882a593Smuzhiyun #define LPSS_NO_D3_DELAY BIT(6)
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun struct lpss_private_data;
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun struct lpss_device_desc {
81*4882a593Smuzhiyun unsigned int flags;
82*4882a593Smuzhiyun const char *clk_con_id;
83*4882a593Smuzhiyun unsigned int prv_offset;
84*4882a593Smuzhiyun size_t prv_size_override;
85*4882a593Smuzhiyun struct property_entry *properties;
86*4882a593Smuzhiyun void (*setup)(struct lpss_private_data *pdata);
87*4882a593Smuzhiyun bool resume_from_noirq;
88*4882a593Smuzhiyun };
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun static const struct lpss_device_desc lpss_dma_desc = {
91*4882a593Smuzhiyun .flags = LPSS_CLK,
92*4882a593Smuzhiyun };
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun struct lpss_private_data {
95*4882a593Smuzhiyun struct acpi_device *adev;
96*4882a593Smuzhiyun void __iomem *mmio_base;
97*4882a593Smuzhiyun resource_size_t mmio_size;
98*4882a593Smuzhiyun unsigned int fixed_clk_rate;
99*4882a593Smuzhiyun struct clk *clk;
100*4882a593Smuzhiyun const struct lpss_device_desc *dev_desc;
101*4882a593Smuzhiyun u32 prv_reg_ctx[LPSS_PRV_REG_COUNT];
102*4882a593Smuzhiyun };
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun /* Devices which need to be in D3 before lpss_iosf_enter_d3_state() proceeds */
105*4882a593Smuzhiyun static u32 pmc_atom_d3_mask = 0xfe000ffe;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun /* LPSS run time quirks */
108*4882a593Smuzhiyun static unsigned int lpss_quirks;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /*
111*4882a593Smuzhiyun * LPSS_QUIRK_ALWAYS_POWER_ON: override power state for LPSS DMA device.
112*4882a593Smuzhiyun *
113*4882a593Smuzhiyun * The LPSS DMA controller has neither _PS0 nor _PS3 method. Moreover
114*4882a593Smuzhiyun * it can be powered off automatically whenever the last LPSS device goes down.
115*4882a593Smuzhiyun * In case of no power any access to the DMA controller will hang the system.
116*4882a593Smuzhiyun * The behaviour is reproduced on some HP laptops based on Intel BayTrail as
117*4882a593Smuzhiyun * well as on ASuS T100TA transformer.
118*4882a593Smuzhiyun *
119*4882a593Smuzhiyun * This quirk overrides power state of entire LPSS island to keep DMA powered
120*4882a593Smuzhiyun * on whenever we have at least one other device in use.
121*4882a593Smuzhiyun */
122*4882a593Smuzhiyun #define LPSS_QUIRK_ALWAYS_POWER_ON BIT(0)
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /* UART Component Parameter Register */
125*4882a593Smuzhiyun #define LPSS_UART_CPR 0xF4
126*4882a593Smuzhiyun #define LPSS_UART_CPR_AFCE BIT(4)
127*4882a593Smuzhiyun
lpss_uart_setup(struct lpss_private_data * pdata)128*4882a593Smuzhiyun static void lpss_uart_setup(struct lpss_private_data *pdata)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun unsigned int offset;
131*4882a593Smuzhiyun u32 val;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun offset = pdata->dev_desc->prv_offset + LPSS_TX_INT;
134*4882a593Smuzhiyun val = readl(pdata->mmio_base + offset);
135*4882a593Smuzhiyun writel(val | LPSS_TX_INT_MASK, pdata->mmio_base + offset);
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun val = readl(pdata->mmio_base + LPSS_UART_CPR);
138*4882a593Smuzhiyun if (!(val & LPSS_UART_CPR_AFCE)) {
139*4882a593Smuzhiyun offset = pdata->dev_desc->prv_offset + LPSS_GENERAL;
140*4882a593Smuzhiyun val = readl(pdata->mmio_base + offset);
141*4882a593Smuzhiyun val |= LPSS_GENERAL_UART_RTS_OVRD;
142*4882a593Smuzhiyun writel(val, pdata->mmio_base + offset);
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
lpss_deassert_reset(struct lpss_private_data * pdata)146*4882a593Smuzhiyun static void lpss_deassert_reset(struct lpss_private_data *pdata)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun unsigned int offset;
149*4882a593Smuzhiyun u32 val;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun offset = pdata->dev_desc->prv_offset + LPSS_RESETS;
152*4882a593Smuzhiyun val = readl(pdata->mmio_base + offset);
153*4882a593Smuzhiyun val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC;
154*4882a593Smuzhiyun writel(val, pdata->mmio_base + offset);
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun /*
158*4882a593Smuzhiyun * BYT PWM used for backlight control by the i915 driver on systems without
159*4882a593Smuzhiyun * the Crystal Cove PMIC.
160*4882a593Smuzhiyun */
161*4882a593Smuzhiyun static struct pwm_lookup byt_pwm_lookup[] = {
162*4882a593Smuzhiyun PWM_LOOKUP_WITH_MODULE("80860F09:00", 0, "0000:00:02.0",
163*4882a593Smuzhiyun "pwm_soc_backlight", 0, PWM_POLARITY_NORMAL,
164*4882a593Smuzhiyun "pwm-lpss-platform"),
165*4882a593Smuzhiyun };
166*4882a593Smuzhiyun
byt_pwm_setup(struct lpss_private_data * pdata)167*4882a593Smuzhiyun static void byt_pwm_setup(struct lpss_private_data *pdata)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun struct acpi_device *adev = pdata->adev;
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun /* Only call pwm_add_table for the first PWM controller */
172*4882a593Smuzhiyun if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
173*4882a593Smuzhiyun return;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup));
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun #define LPSS_I2C_ENABLE 0x6c
179*4882a593Smuzhiyun
byt_i2c_setup(struct lpss_private_data * pdata)180*4882a593Smuzhiyun static void byt_i2c_setup(struct lpss_private_data *pdata)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun const char *uid_str = acpi_device_uid(pdata->adev);
183*4882a593Smuzhiyun acpi_handle handle = pdata->adev->handle;
184*4882a593Smuzhiyun unsigned long long shared_host = 0;
185*4882a593Smuzhiyun acpi_status status;
186*4882a593Smuzhiyun long uid = 0;
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun /* Expected to always be true, but better safe then sorry */
189*4882a593Smuzhiyun if (uid_str)
190*4882a593Smuzhiyun uid = simple_strtol(uid_str, NULL, 10);
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun /* Detect I2C bus shared with PUNIT and ignore its d3 status */
193*4882a593Smuzhiyun status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host);
194*4882a593Smuzhiyun if (ACPI_SUCCESS(status) && shared_host && uid)
195*4882a593Smuzhiyun pmc_atom_d3_mask &= ~(BIT_LPSS2_F1_I2C1 << (uid - 1));
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun lpss_deassert_reset(pdata);
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun if (readl(pdata->mmio_base + pdata->dev_desc->prv_offset))
200*4882a593Smuzhiyun pdata->fixed_clk_rate = 133000000;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun writel(0, pdata->mmio_base + LPSS_I2C_ENABLE);
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun /* BSW PWM used for backlight control by the i915 driver */
206*4882a593Smuzhiyun static struct pwm_lookup bsw_pwm_lookup[] = {
207*4882a593Smuzhiyun PWM_LOOKUP_WITH_MODULE("80862288:00", 0, "0000:00:02.0",
208*4882a593Smuzhiyun "pwm_soc_backlight", 0, PWM_POLARITY_NORMAL,
209*4882a593Smuzhiyun "pwm-lpss-platform"),
210*4882a593Smuzhiyun };
211*4882a593Smuzhiyun
bsw_pwm_setup(struct lpss_private_data * pdata)212*4882a593Smuzhiyun static void bsw_pwm_setup(struct lpss_private_data *pdata)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun struct acpi_device *adev = pdata->adev;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun /* Only call pwm_add_table for the first PWM controller */
217*4882a593Smuzhiyun if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
218*4882a593Smuzhiyun return;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup));
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun static const struct lpss_device_desc lpt_dev_desc = {
224*4882a593Smuzhiyun .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR
225*4882a593Smuzhiyun | LPSS_SAVE_CTX,
226*4882a593Smuzhiyun .prv_offset = 0x800,
227*4882a593Smuzhiyun };
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun static const struct lpss_device_desc lpt_i2c_dev_desc = {
230*4882a593Smuzhiyun .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR | LPSS_SAVE_CTX,
231*4882a593Smuzhiyun .prv_offset = 0x800,
232*4882a593Smuzhiyun };
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun static struct property_entry uart_properties[] = {
235*4882a593Smuzhiyun PROPERTY_ENTRY_U32("reg-io-width", 4),
236*4882a593Smuzhiyun PROPERTY_ENTRY_U32("reg-shift", 2),
237*4882a593Smuzhiyun PROPERTY_ENTRY_BOOL("snps,uart-16550-compatible"),
238*4882a593Smuzhiyun { },
239*4882a593Smuzhiyun };
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun static const struct lpss_device_desc lpt_uart_dev_desc = {
242*4882a593Smuzhiyun .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR
243*4882a593Smuzhiyun | LPSS_SAVE_CTX,
244*4882a593Smuzhiyun .clk_con_id = "baudclk",
245*4882a593Smuzhiyun .prv_offset = 0x800,
246*4882a593Smuzhiyun .setup = lpss_uart_setup,
247*4882a593Smuzhiyun .properties = uart_properties,
248*4882a593Smuzhiyun };
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun static const struct lpss_device_desc lpt_sdio_dev_desc = {
251*4882a593Smuzhiyun .flags = LPSS_LTR,
252*4882a593Smuzhiyun .prv_offset = 0x1000,
253*4882a593Smuzhiyun .prv_size_override = 0x1018,
254*4882a593Smuzhiyun };
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun static const struct lpss_device_desc byt_pwm_dev_desc = {
257*4882a593Smuzhiyun .flags = LPSS_SAVE_CTX,
258*4882a593Smuzhiyun .prv_offset = 0x800,
259*4882a593Smuzhiyun .setup = byt_pwm_setup,
260*4882a593Smuzhiyun };
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun static const struct lpss_device_desc bsw_pwm_dev_desc = {
263*4882a593Smuzhiyun .flags = LPSS_SAVE_CTX_ONCE | LPSS_NO_D3_DELAY,
264*4882a593Smuzhiyun .prv_offset = 0x800,
265*4882a593Smuzhiyun .setup = bsw_pwm_setup,
266*4882a593Smuzhiyun .resume_from_noirq = true,
267*4882a593Smuzhiyun };
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun static const struct lpss_device_desc byt_uart_dev_desc = {
270*4882a593Smuzhiyun .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
271*4882a593Smuzhiyun .clk_con_id = "baudclk",
272*4882a593Smuzhiyun .prv_offset = 0x800,
273*4882a593Smuzhiyun .setup = lpss_uart_setup,
274*4882a593Smuzhiyun .properties = uart_properties,
275*4882a593Smuzhiyun };
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun static const struct lpss_device_desc bsw_uart_dev_desc = {
278*4882a593Smuzhiyun .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
279*4882a593Smuzhiyun | LPSS_NO_D3_DELAY,
280*4882a593Smuzhiyun .clk_con_id = "baudclk",
281*4882a593Smuzhiyun .prv_offset = 0x800,
282*4882a593Smuzhiyun .setup = lpss_uart_setup,
283*4882a593Smuzhiyun .properties = uart_properties,
284*4882a593Smuzhiyun };
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun static const struct lpss_device_desc byt_spi_dev_desc = {
287*4882a593Smuzhiyun .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
288*4882a593Smuzhiyun .prv_offset = 0x400,
289*4882a593Smuzhiyun };
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun static const struct lpss_device_desc byt_sdio_dev_desc = {
292*4882a593Smuzhiyun .flags = LPSS_CLK,
293*4882a593Smuzhiyun };
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun static const struct lpss_device_desc byt_i2c_dev_desc = {
296*4882a593Smuzhiyun .flags = LPSS_CLK | LPSS_SAVE_CTX,
297*4882a593Smuzhiyun .prv_offset = 0x800,
298*4882a593Smuzhiyun .setup = byt_i2c_setup,
299*4882a593Smuzhiyun .resume_from_noirq = true,
300*4882a593Smuzhiyun };
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun static const struct lpss_device_desc bsw_i2c_dev_desc = {
303*4882a593Smuzhiyun .flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
304*4882a593Smuzhiyun .prv_offset = 0x800,
305*4882a593Smuzhiyun .setup = byt_i2c_setup,
306*4882a593Smuzhiyun .resume_from_noirq = true,
307*4882a593Smuzhiyun };
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun static const struct lpss_device_desc bsw_spi_dev_desc = {
310*4882a593Smuzhiyun .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
311*4882a593Smuzhiyun | LPSS_NO_D3_DELAY,
312*4882a593Smuzhiyun .prv_offset = 0x400,
313*4882a593Smuzhiyun .setup = lpss_deassert_reset,
314*4882a593Smuzhiyun };
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun static const struct x86_cpu_id lpss_cpu_ids[] = {
317*4882a593Smuzhiyun X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, NULL),
318*4882a593Smuzhiyun X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, NULL),
319*4882a593Smuzhiyun {}
320*4882a593Smuzhiyun };
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun #else
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun #define LPSS_ADDR(desc) (0UL)
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun #endif /* CONFIG_X86_INTEL_LPSS */
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun static const struct acpi_device_id acpi_lpss_device_ids[] = {
329*4882a593Smuzhiyun /* Generic LPSS devices */
330*4882a593Smuzhiyun { "INTL9C60", LPSS_ADDR(lpss_dma_desc) },
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun /* Lynxpoint LPSS devices */
333*4882a593Smuzhiyun { "INT33C0", LPSS_ADDR(lpt_dev_desc) },
334*4882a593Smuzhiyun { "INT33C1", LPSS_ADDR(lpt_dev_desc) },
335*4882a593Smuzhiyun { "INT33C2", LPSS_ADDR(lpt_i2c_dev_desc) },
336*4882a593Smuzhiyun { "INT33C3", LPSS_ADDR(lpt_i2c_dev_desc) },
337*4882a593Smuzhiyun { "INT33C4", LPSS_ADDR(lpt_uart_dev_desc) },
338*4882a593Smuzhiyun { "INT33C5", LPSS_ADDR(lpt_uart_dev_desc) },
339*4882a593Smuzhiyun { "INT33C6", LPSS_ADDR(lpt_sdio_dev_desc) },
340*4882a593Smuzhiyun { "INT33C7", },
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun /* BayTrail LPSS devices */
343*4882a593Smuzhiyun { "80860F09", LPSS_ADDR(byt_pwm_dev_desc) },
344*4882a593Smuzhiyun { "80860F0A", LPSS_ADDR(byt_uart_dev_desc) },
345*4882a593Smuzhiyun { "80860F0E", LPSS_ADDR(byt_spi_dev_desc) },
346*4882a593Smuzhiyun { "80860F14", LPSS_ADDR(byt_sdio_dev_desc) },
347*4882a593Smuzhiyun { "80860F41", LPSS_ADDR(byt_i2c_dev_desc) },
348*4882a593Smuzhiyun { "INT33B2", },
349*4882a593Smuzhiyun { "INT33FC", },
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun /* Braswell LPSS devices */
352*4882a593Smuzhiyun { "80862286", LPSS_ADDR(lpss_dma_desc) },
353*4882a593Smuzhiyun { "80862288", LPSS_ADDR(bsw_pwm_dev_desc) },
354*4882a593Smuzhiyun { "8086228A", LPSS_ADDR(bsw_uart_dev_desc) },
355*4882a593Smuzhiyun { "8086228E", LPSS_ADDR(bsw_spi_dev_desc) },
356*4882a593Smuzhiyun { "808622C0", LPSS_ADDR(lpss_dma_desc) },
357*4882a593Smuzhiyun { "808622C1", LPSS_ADDR(bsw_i2c_dev_desc) },
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun /* Broadwell LPSS devices */
360*4882a593Smuzhiyun { "INT3430", LPSS_ADDR(lpt_dev_desc) },
361*4882a593Smuzhiyun { "INT3431", LPSS_ADDR(lpt_dev_desc) },
362*4882a593Smuzhiyun { "INT3432", LPSS_ADDR(lpt_i2c_dev_desc) },
363*4882a593Smuzhiyun { "INT3433", LPSS_ADDR(lpt_i2c_dev_desc) },
364*4882a593Smuzhiyun { "INT3434", LPSS_ADDR(lpt_uart_dev_desc) },
365*4882a593Smuzhiyun { "INT3435", LPSS_ADDR(lpt_uart_dev_desc) },
366*4882a593Smuzhiyun { "INT3436", LPSS_ADDR(lpt_sdio_dev_desc) },
367*4882a593Smuzhiyun { "INT3437", },
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun /* Wildcat Point LPSS devices */
370*4882a593Smuzhiyun { "INT3438", LPSS_ADDR(lpt_dev_desc) },
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun { }
373*4882a593Smuzhiyun };
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun #ifdef CONFIG_X86_INTEL_LPSS
376*4882a593Smuzhiyun
is_memory(struct acpi_resource * res,void * not_used)377*4882a593Smuzhiyun static int is_memory(struct acpi_resource *res, void *not_used)
378*4882a593Smuzhiyun {
379*4882a593Smuzhiyun struct resource r;
380*4882a593Smuzhiyun return !acpi_dev_resource_memory(res, &r);
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun /* LPSS main clock device. */
384*4882a593Smuzhiyun static struct platform_device *lpss_clk_dev;
385*4882a593Smuzhiyun
lpt_register_clock_device(void)386*4882a593Smuzhiyun static inline void lpt_register_clock_device(void)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun lpss_clk_dev = platform_device_register_simple("clk-lpt", -1, NULL, 0);
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
register_device_clock(struct acpi_device * adev,struct lpss_private_data * pdata)391*4882a593Smuzhiyun static int register_device_clock(struct acpi_device *adev,
392*4882a593Smuzhiyun struct lpss_private_data *pdata)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun const struct lpss_device_desc *dev_desc = pdata->dev_desc;
395*4882a593Smuzhiyun const char *devname = dev_name(&adev->dev);
396*4882a593Smuzhiyun struct clk *clk;
397*4882a593Smuzhiyun struct lpss_clk_data *clk_data;
398*4882a593Smuzhiyun const char *parent, *clk_name;
399*4882a593Smuzhiyun void __iomem *prv_base;
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun if (!lpss_clk_dev)
402*4882a593Smuzhiyun lpt_register_clock_device();
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun if (IS_ERR(lpss_clk_dev))
405*4882a593Smuzhiyun return PTR_ERR(lpss_clk_dev);
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun clk_data = platform_get_drvdata(lpss_clk_dev);
408*4882a593Smuzhiyun if (!clk_data)
409*4882a593Smuzhiyun return -ENODEV;
410*4882a593Smuzhiyun clk = clk_data->clk;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun if (!pdata->mmio_base
413*4882a593Smuzhiyun || pdata->mmio_size < dev_desc->prv_offset + LPSS_CLK_SIZE)
414*4882a593Smuzhiyun return -ENODATA;
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun parent = clk_data->name;
417*4882a593Smuzhiyun prv_base = pdata->mmio_base + dev_desc->prv_offset;
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun if (pdata->fixed_clk_rate) {
420*4882a593Smuzhiyun clk = clk_register_fixed_rate(NULL, devname, parent, 0,
421*4882a593Smuzhiyun pdata->fixed_clk_rate);
422*4882a593Smuzhiyun goto out;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun if (dev_desc->flags & LPSS_CLK_GATE) {
426*4882a593Smuzhiyun clk = clk_register_gate(NULL, devname, parent, 0,
427*4882a593Smuzhiyun prv_base, 0, 0, NULL);
428*4882a593Smuzhiyun parent = devname;
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun if (dev_desc->flags & LPSS_CLK_DIVIDER) {
432*4882a593Smuzhiyun /* Prevent division by zero */
433*4882a593Smuzhiyun if (!readl(prv_base))
434*4882a593Smuzhiyun writel(LPSS_CLK_DIVIDER_DEF_MASK, prv_base);
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun clk_name = kasprintf(GFP_KERNEL, "%s-div", devname);
437*4882a593Smuzhiyun if (!clk_name)
438*4882a593Smuzhiyun return -ENOMEM;
439*4882a593Smuzhiyun clk = clk_register_fractional_divider(NULL, clk_name, parent,
440*4882a593Smuzhiyun 0, prv_base,
441*4882a593Smuzhiyun 1, 15, 16, 15, 0, NULL);
442*4882a593Smuzhiyun parent = clk_name;
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun clk_name = kasprintf(GFP_KERNEL, "%s-update", devname);
445*4882a593Smuzhiyun if (!clk_name) {
446*4882a593Smuzhiyun kfree(parent);
447*4882a593Smuzhiyun return -ENOMEM;
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun clk = clk_register_gate(NULL, clk_name, parent,
450*4882a593Smuzhiyun CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
451*4882a593Smuzhiyun prv_base, 31, 0, NULL);
452*4882a593Smuzhiyun kfree(parent);
453*4882a593Smuzhiyun kfree(clk_name);
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun out:
456*4882a593Smuzhiyun if (IS_ERR(clk))
457*4882a593Smuzhiyun return PTR_ERR(clk);
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun pdata->clk = clk;
460*4882a593Smuzhiyun clk_register_clkdev(clk, dev_desc->clk_con_id, devname);
461*4882a593Smuzhiyun return 0;
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun struct lpss_device_links {
465*4882a593Smuzhiyun const char *supplier_hid;
466*4882a593Smuzhiyun const char *supplier_uid;
467*4882a593Smuzhiyun const char *consumer_hid;
468*4882a593Smuzhiyun const char *consumer_uid;
469*4882a593Smuzhiyun u32 flags;
470*4882a593Smuzhiyun const struct dmi_system_id *dep_missing_ids;
471*4882a593Smuzhiyun };
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun /* Please keep this list sorted alphabetically by vendor and model */
474*4882a593Smuzhiyun static const struct dmi_system_id i2c1_dep_missing_dmi_ids[] = {
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun .matches = {
477*4882a593Smuzhiyun DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
478*4882a593Smuzhiyun DMI_MATCH(DMI_PRODUCT_NAME, "T200TA"),
479*4882a593Smuzhiyun },
480*4882a593Smuzhiyun },
481*4882a593Smuzhiyun {}
482*4882a593Smuzhiyun };
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun /*
485*4882a593Smuzhiyun * The _DEP method is used to identify dependencies but instead of creating
486*4882a593Smuzhiyun * device links for every handle in _DEP, only links in the following list are
487*4882a593Smuzhiyun * created. That is necessary because, in the general case, _DEP can refer to
488*4882a593Smuzhiyun * devices that might not have drivers, or that are on different buses, or where
489*4882a593Smuzhiyun * the supplier is not enumerated until after the consumer is probed.
490*4882a593Smuzhiyun */
491*4882a593Smuzhiyun static const struct lpss_device_links lpss_device_links[] = {
492*4882a593Smuzhiyun /* CHT External sdcard slot controller depends on PMIC I2C ctrl */
493*4882a593Smuzhiyun {"808622C1", "7", "80860F14", "3", DL_FLAG_PM_RUNTIME},
494*4882a593Smuzhiyun /* CHT iGPU depends on PMIC I2C controller */
495*4882a593Smuzhiyun {"808622C1", "7", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME},
496*4882a593Smuzhiyun /* BYT iGPU depends on the Embedded Controller I2C controller (UID 1) */
497*4882a593Smuzhiyun {"80860F41", "1", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME,
498*4882a593Smuzhiyun i2c1_dep_missing_dmi_ids},
499*4882a593Smuzhiyun /* BYT CR iGPU depends on PMIC I2C controller (UID 5 on CR) */
500*4882a593Smuzhiyun {"80860F41", "5", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME},
501*4882a593Smuzhiyun /* BYT iGPU depends on PMIC I2C controller (UID 7 on non CR) */
502*4882a593Smuzhiyun {"80860F41", "7", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME},
503*4882a593Smuzhiyun };
504*4882a593Smuzhiyun
acpi_lpss_is_supplier(struct acpi_device * adev,const struct lpss_device_links * link)505*4882a593Smuzhiyun static bool acpi_lpss_is_supplier(struct acpi_device *adev,
506*4882a593Smuzhiyun const struct lpss_device_links *link)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun return acpi_dev_hid_uid_match(adev, link->supplier_hid, link->supplier_uid);
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun
acpi_lpss_is_consumer(struct acpi_device * adev,const struct lpss_device_links * link)511*4882a593Smuzhiyun static bool acpi_lpss_is_consumer(struct acpi_device *adev,
512*4882a593Smuzhiyun const struct lpss_device_links *link)
513*4882a593Smuzhiyun {
514*4882a593Smuzhiyun return acpi_dev_hid_uid_match(adev, link->consumer_hid, link->consumer_uid);
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun struct hid_uid {
518*4882a593Smuzhiyun const char *hid;
519*4882a593Smuzhiyun const char *uid;
520*4882a593Smuzhiyun };
521*4882a593Smuzhiyun
match_hid_uid(struct device * dev,const void * data)522*4882a593Smuzhiyun static int match_hid_uid(struct device *dev, const void *data)
523*4882a593Smuzhiyun {
524*4882a593Smuzhiyun struct acpi_device *adev = ACPI_COMPANION(dev);
525*4882a593Smuzhiyun const struct hid_uid *id = data;
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun if (!adev)
528*4882a593Smuzhiyun return 0;
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun return acpi_dev_hid_uid_match(adev, id->hid, id->uid);
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun
acpi_lpss_find_device(const char * hid,const char * uid)533*4882a593Smuzhiyun static struct device *acpi_lpss_find_device(const char *hid, const char *uid)
534*4882a593Smuzhiyun {
535*4882a593Smuzhiyun struct device *dev;
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun struct hid_uid data = {
538*4882a593Smuzhiyun .hid = hid,
539*4882a593Smuzhiyun .uid = uid,
540*4882a593Smuzhiyun };
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun dev = bus_find_device(&platform_bus_type, NULL, &data, match_hid_uid);
543*4882a593Smuzhiyun if (dev)
544*4882a593Smuzhiyun return dev;
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun return bus_find_device(&pci_bus_type, NULL, &data, match_hid_uid);
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun
acpi_lpss_dep(struct acpi_device * adev,acpi_handle handle)549*4882a593Smuzhiyun static bool acpi_lpss_dep(struct acpi_device *adev, acpi_handle handle)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun struct acpi_handle_list dep_devices;
552*4882a593Smuzhiyun acpi_status status;
553*4882a593Smuzhiyun int i;
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun if (!acpi_has_method(adev->handle, "_DEP"))
556*4882a593Smuzhiyun return false;
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun status = acpi_evaluate_reference(adev->handle, "_DEP", NULL,
559*4882a593Smuzhiyun &dep_devices);
560*4882a593Smuzhiyun if (ACPI_FAILURE(status)) {
561*4882a593Smuzhiyun dev_dbg(&adev->dev, "Failed to evaluate _DEP.\n");
562*4882a593Smuzhiyun return false;
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun for (i = 0; i < dep_devices.count; i++) {
566*4882a593Smuzhiyun if (dep_devices.handles[i] == handle)
567*4882a593Smuzhiyun return true;
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun return false;
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun
acpi_lpss_link_consumer(struct device * dev1,const struct lpss_device_links * link)573*4882a593Smuzhiyun static void acpi_lpss_link_consumer(struct device *dev1,
574*4882a593Smuzhiyun const struct lpss_device_links *link)
575*4882a593Smuzhiyun {
576*4882a593Smuzhiyun struct device *dev2;
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun dev2 = acpi_lpss_find_device(link->consumer_hid, link->consumer_uid);
579*4882a593Smuzhiyun if (!dev2)
580*4882a593Smuzhiyun return;
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun if ((link->dep_missing_ids && dmi_check_system(link->dep_missing_ids))
583*4882a593Smuzhiyun || acpi_lpss_dep(ACPI_COMPANION(dev2), ACPI_HANDLE(dev1)))
584*4882a593Smuzhiyun device_link_add(dev2, dev1, link->flags);
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun put_device(dev2);
587*4882a593Smuzhiyun }
588*4882a593Smuzhiyun
acpi_lpss_link_supplier(struct device * dev1,const struct lpss_device_links * link)589*4882a593Smuzhiyun static void acpi_lpss_link_supplier(struct device *dev1,
590*4882a593Smuzhiyun const struct lpss_device_links *link)
591*4882a593Smuzhiyun {
592*4882a593Smuzhiyun struct device *dev2;
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun dev2 = acpi_lpss_find_device(link->supplier_hid, link->supplier_uid);
595*4882a593Smuzhiyun if (!dev2)
596*4882a593Smuzhiyun return;
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun if ((link->dep_missing_ids && dmi_check_system(link->dep_missing_ids))
599*4882a593Smuzhiyun || acpi_lpss_dep(ACPI_COMPANION(dev1), ACPI_HANDLE(dev2)))
600*4882a593Smuzhiyun device_link_add(dev1, dev2, link->flags);
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun put_device(dev2);
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun
acpi_lpss_create_device_links(struct acpi_device * adev,struct platform_device * pdev)605*4882a593Smuzhiyun static void acpi_lpss_create_device_links(struct acpi_device *adev,
606*4882a593Smuzhiyun struct platform_device *pdev)
607*4882a593Smuzhiyun {
608*4882a593Smuzhiyun int i;
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(lpss_device_links); i++) {
611*4882a593Smuzhiyun const struct lpss_device_links *link = &lpss_device_links[i];
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun if (acpi_lpss_is_supplier(adev, link))
614*4882a593Smuzhiyun acpi_lpss_link_consumer(&pdev->dev, link);
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun if (acpi_lpss_is_consumer(adev, link))
617*4882a593Smuzhiyun acpi_lpss_link_supplier(&pdev->dev, link);
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun
acpi_lpss_create_device(struct acpi_device * adev,const struct acpi_device_id * id)621*4882a593Smuzhiyun static int acpi_lpss_create_device(struct acpi_device *adev,
622*4882a593Smuzhiyun const struct acpi_device_id *id)
623*4882a593Smuzhiyun {
624*4882a593Smuzhiyun const struct lpss_device_desc *dev_desc;
625*4882a593Smuzhiyun struct lpss_private_data *pdata;
626*4882a593Smuzhiyun struct resource_entry *rentry;
627*4882a593Smuzhiyun struct list_head resource_list;
628*4882a593Smuzhiyun struct platform_device *pdev;
629*4882a593Smuzhiyun int ret;
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun dev_desc = (const struct lpss_device_desc *)id->driver_data;
632*4882a593Smuzhiyun if (!dev_desc) {
633*4882a593Smuzhiyun pdev = acpi_create_platform_device(adev, NULL);
634*4882a593Smuzhiyun return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1;
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
637*4882a593Smuzhiyun if (!pdata)
638*4882a593Smuzhiyun return -ENOMEM;
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun INIT_LIST_HEAD(&resource_list);
641*4882a593Smuzhiyun ret = acpi_dev_get_resources(adev, &resource_list, is_memory, NULL);
642*4882a593Smuzhiyun if (ret < 0)
643*4882a593Smuzhiyun goto err_out;
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun list_for_each_entry(rentry, &resource_list, node)
646*4882a593Smuzhiyun if (resource_type(rentry->res) == IORESOURCE_MEM) {
647*4882a593Smuzhiyun if (dev_desc->prv_size_override)
648*4882a593Smuzhiyun pdata->mmio_size = dev_desc->prv_size_override;
649*4882a593Smuzhiyun else
650*4882a593Smuzhiyun pdata->mmio_size = resource_size(rentry->res);
651*4882a593Smuzhiyun pdata->mmio_base = ioremap(rentry->res->start,
652*4882a593Smuzhiyun pdata->mmio_size);
653*4882a593Smuzhiyun break;
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun acpi_dev_free_resource_list(&resource_list);
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun if (!pdata->mmio_base) {
659*4882a593Smuzhiyun /* Avoid acpi_bus_attach() instantiating a pdev for this dev. */
660*4882a593Smuzhiyun adev->pnp.type.platform_id = 0;
661*4882a593Smuzhiyun /* Skip the device, but continue the namespace scan. */
662*4882a593Smuzhiyun ret = 0;
663*4882a593Smuzhiyun goto err_out;
664*4882a593Smuzhiyun }
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun pdata->adev = adev;
667*4882a593Smuzhiyun pdata->dev_desc = dev_desc;
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun if (dev_desc->setup)
670*4882a593Smuzhiyun dev_desc->setup(pdata);
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun if (dev_desc->flags & LPSS_CLK) {
673*4882a593Smuzhiyun ret = register_device_clock(adev, pdata);
674*4882a593Smuzhiyun if (ret) {
675*4882a593Smuzhiyun /* Skip the device, but continue the namespace scan. */
676*4882a593Smuzhiyun ret = 0;
677*4882a593Smuzhiyun goto err_out;
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun /*
682*4882a593Smuzhiyun * This works around a known issue in ACPI tables where LPSS devices
683*4882a593Smuzhiyun * have _PS0 and _PS3 without _PSC (and no power resources), so
684*4882a593Smuzhiyun * acpi_bus_init_power() will assume that the BIOS has put them into D0.
685*4882a593Smuzhiyun */
686*4882a593Smuzhiyun acpi_device_fix_up_power(adev);
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun adev->driver_data = pdata;
689*4882a593Smuzhiyun pdev = acpi_create_platform_device(adev, dev_desc->properties);
690*4882a593Smuzhiyun if (!IS_ERR_OR_NULL(pdev)) {
691*4882a593Smuzhiyun acpi_lpss_create_device_links(adev, pdev);
692*4882a593Smuzhiyun return 1;
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun ret = PTR_ERR(pdev);
696*4882a593Smuzhiyun adev->driver_data = NULL;
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun err_out:
699*4882a593Smuzhiyun kfree(pdata);
700*4882a593Smuzhiyun return ret;
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun
__lpss_reg_read(struct lpss_private_data * pdata,unsigned int reg)703*4882a593Smuzhiyun static u32 __lpss_reg_read(struct lpss_private_data *pdata, unsigned int reg)
704*4882a593Smuzhiyun {
705*4882a593Smuzhiyun return readl(pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
706*4882a593Smuzhiyun }
707*4882a593Smuzhiyun
__lpss_reg_write(u32 val,struct lpss_private_data * pdata,unsigned int reg)708*4882a593Smuzhiyun static void __lpss_reg_write(u32 val, struct lpss_private_data *pdata,
709*4882a593Smuzhiyun unsigned int reg)
710*4882a593Smuzhiyun {
711*4882a593Smuzhiyun writel(val, pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
712*4882a593Smuzhiyun }
713*4882a593Smuzhiyun
lpss_reg_read(struct device * dev,unsigned int reg,u32 * val)714*4882a593Smuzhiyun static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val)
715*4882a593Smuzhiyun {
716*4882a593Smuzhiyun struct acpi_device *adev;
717*4882a593Smuzhiyun struct lpss_private_data *pdata;
718*4882a593Smuzhiyun unsigned long flags;
719*4882a593Smuzhiyun int ret;
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun ret = acpi_bus_get_device(ACPI_HANDLE(dev), &adev);
722*4882a593Smuzhiyun if (WARN_ON(ret))
723*4882a593Smuzhiyun return ret;
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun spin_lock_irqsave(&dev->power.lock, flags);
726*4882a593Smuzhiyun if (pm_runtime_suspended(dev)) {
727*4882a593Smuzhiyun ret = -EAGAIN;
728*4882a593Smuzhiyun goto out;
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun pdata = acpi_driver_data(adev);
731*4882a593Smuzhiyun if (WARN_ON(!pdata || !pdata->mmio_base)) {
732*4882a593Smuzhiyun ret = -ENODEV;
733*4882a593Smuzhiyun goto out;
734*4882a593Smuzhiyun }
735*4882a593Smuzhiyun *val = __lpss_reg_read(pdata, reg);
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun out:
738*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->power.lock, flags);
739*4882a593Smuzhiyun return ret;
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun
lpss_ltr_show(struct device * dev,struct device_attribute * attr,char * buf)742*4882a593Smuzhiyun static ssize_t lpss_ltr_show(struct device *dev, struct device_attribute *attr,
743*4882a593Smuzhiyun char *buf)
744*4882a593Smuzhiyun {
745*4882a593Smuzhiyun u32 ltr_value = 0;
746*4882a593Smuzhiyun unsigned int reg;
747*4882a593Smuzhiyun int ret;
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun reg = strcmp(attr->attr.name, "auto_ltr") ? LPSS_SW_LTR : LPSS_AUTO_LTR;
750*4882a593Smuzhiyun ret = lpss_reg_read(dev, reg, <r_value);
751*4882a593Smuzhiyun if (ret)
752*4882a593Smuzhiyun return ret;
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun return snprintf(buf, PAGE_SIZE, "%08x\n", ltr_value);
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun
lpss_ltr_mode_show(struct device * dev,struct device_attribute * attr,char * buf)757*4882a593Smuzhiyun static ssize_t lpss_ltr_mode_show(struct device *dev,
758*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
759*4882a593Smuzhiyun {
760*4882a593Smuzhiyun u32 ltr_mode = 0;
761*4882a593Smuzhiyun char *outstr;
762*4882a593Smuzhiyun int ret;
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun ret = lpss_reg_read(dev, LPSS_GENERAL, <r_mode);
765*4882a593Smuzhiyun if (ret)
766*4882a593Smuzhiyun return ret;
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun outstr = (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) ? "sw" : "auto";
769*4882a593Smuzhiyun return sprintf(buf, "%s\n", outstr);
770*4882a593Smuzhiyun }
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun static DEVICE_ATTR(auto_ltr, S_IRUSR, lpss_ltr_show, NULL);
773*4882a593Smuzhiyun static DEVICE_ATTR(sw_ltr, S_IRUSR, lpss_ltr_show, NULL);
774*4882a593Smuzhiyun static DEVICE_ATTR(ltr_mode, S_IRUSR, lpss_ltr_mode_show, NULL);
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun static struct attribute *lpss_attrs[] = {
777*4882a593Smuzhiyun &dev_attr_auto_ltr.attr,
778*4882a593Smuzhiyun &dev_attr_sw_ltr.attr,
779*4882a593Smuzhiyun &dev_attr_ltr_mode.attr,
780*4882a593Smuzhiyun NULL,
781*4882a593Smuzhiyun };
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun static const struct attribute_group lpss_attr_group = {
784*4882a593Smuzhiyun .attrs = lpss_attrs,
785*4882a593Smuzhiyun .name = "lpss_ltr",
786*4882a593Smuzhiyun };
787*4882a593Smuzhiyun
acpi_lpss_set_ltr(struct device * dev,s32 val)788*4882a593Smuzhiyun static void acpi_lpss_set_ltr(struct device *dev, s32 val)
789*4882a593Smuzhiyun {
790*4882a593Smuzhiyun struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
791*4882a593Smuzhiyun u32 ltr_mode, ltr_val;
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun ltr_mode = __lpss_reg_read(pdata, LPSS_GENERAL);
794*4882a593Smuzhiyun if (val < 0) {
795*4882a593Smuzhiyun if (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) {
796*4882a593Smuzhiyun ltr_mode &= ~LPSS_GENERAL_LTR_MODE_SW;
797*4882a593Smuzhiyun __lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL);
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun return;
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun ltr_val = __lpss_reg_read(pdata, LPSS_SW_LTR) & ~LPSS_LTR_SNOOP_MASK;
802*4882a593Smuzhiyun if (val >= LPSS_LTR_SNOOP_LAT_CUTOFF) {
803*4882a593Smuzhiyun ltr_val |= LPSS_LTR_SNOOP_LAT_32US;
804*4882a593Smuzhiyun val = LPSS_LTR_MAX_VAL;
805*4882a593Smuzhiyun } else if (val > LPSS_LTR_MAX_VAL) {
806*4882a593Smuzhiyun ltr_val |= LPSS_LTR_SNOOP_LAT_32US | LPSS_LTR_SNOOP_REQ;
807*4882a593Smuzhiyun val >>= LPSS_LTR_SNOOP_LAT_SHIFT;
808*4882a593Smuzhiyun } else {
809*4882a593Smuzhiyun ltr_val |= LPSS_LTR_SNOOP_LAT_1US | LPSS_LTR_SNOOP_REQ;
810*4882a593Smuzhiyun }
811*4882a593Smuzhiyun ltr_val |= val;
812*4882a593Smuzhiyun __lpss_reg_write(ltr_val, pdata, LPSS_SW_LTR);
813*4882a593Smuzhiyun if (!(ltr_mode & LPSS_GENERAL_LTR_MODE_SW)) {
814*4882a593Smuzhiyun ltr_mode |= LPSS_GENERAL_LTR_MODE_SW;
815*4882a593Smuzhiyun __lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL);
816*4882a593Smuzhiyun }
817*4882a593Smuzhiyun }
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun #ifdef CONFIG_PM
820*4882a593Smuzhiyun /**
821*4882a593Smuzhiyun * acpi_lpss_save_ctx() - Save the private registers of LPSS device
822*4882a593Smuzhiyun * @dev: LPSS device
823*4882a593Smuzhiyun * @pdata: pointer to the private data of the LPSS device
824*4882a593Smuzhiyun *
825*4882a593Smuzhiyun * Most LPSS devices have private registers which may loose their context when
826*4882a593Smuzhiyun * the device is powered down. acpi_lpss_save_ctx() saves those registers into
827*4882a593Smuzhiyun * prv_reg_ctx array.
828*4882a593Smuzhiyun */
acpi_lpss_save_ctx(struct device * dev,struct lpss_private_data * pdata)829*4882a593Smuzhiyun static void acpi_lpss_save_ctx(struct device *dev,
830*4882a593Smuzhiyun struct lpss_private_data *pdata)
831*4882a593Smuzhiyun {
832*4882a593Smuzhiyun unsigned int i;
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun for (i = 0; i < LPSS_PRV_REG_COUNT; i++) {
835*4882a593Smuzhiyun unsigned long offset = i * sizeof(u32);
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun pdata->prv_reg_ctx[i] = __lpss_reg_read(pdata, offset);
838*4882a593Smuzhiyun dev_dbg(dev, "saving 0x%08x from LPSS reg at offset 0x%02lx\n",
839*4882a593Smuzhiyun pdata->prv_reg_ctx[i], offset);
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun }
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun /**
844*4882a593Smuzhiyun * acpi_lpss_restore_ctx() - Restore the private registers of LPSS device
845*4882a593Smuzhiyun * @dev: LPSS device
846*4882a593Smuzhiyun * @pdata: pointer to the private data of the LPSS device
847*4882a593Smuzhiyun *
848*4882a593Smuzhiyun * Restores the registers that were previously stored with acpi_lpss_save_ctx().
849*4882a593Smuzhiyun */
acpi_lpss_restore_ctx(struct device * dev,struct lpss_private_data * pdata)850*4882a593Smuzhiyun static void acpi_lpss_restore_ctx(struct device *dev,
851*4882a593Smuzhiyun struct lpss_private_data *pdata)
852*4882a593Smuzhiyun {
853*4882a593Smuzhiyun unsigned int i;
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun for (i = 0; i < LPSS_PRV_REG_COUNT; i++) {
856*4882a593Smuzhiyun unsigned long offset = i * sizeof(u32);
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun __lpss_reg_write(pdata->prv_reg_ctx[i], pdata, offset);
859*4882a593Smuzhiyun dev_dbg(dev, "restoring 0x%08x to LPSS reg at offset 0x%02lx\n",
860*4882a593Smuzhiyun pdata->prv_reg_ctx[i], offset);
861*4882a593Smuzhiyun }
862*4882a593Smuzhiyun }
863*4882a593Smuzhiyun
acpi_lpss_d3_to_d0_delay(struct lpss_private_data * pdata)864*4882a593Smuzhiyun static void acpi_lpss_d3_to_d0_delay(struct lpss_private_data *pdata)
865*4882a593Smuzhiyun {
866*4882a593Smuzhiyun /*
867*4882a593Smuzhiyun * The following delay is needed or the subsequent write operations may
868*4882a593Smuzhiyun * fail. The LPSS devices are actually PCI devices and the PCI spec
869*4882a593Smuzhiyun * expects 10ms delay before the device can be accessed after D3 to D0
870*4882a593Smuzhiyun * transition. However some platforms like BSW does not need this delay.
871*4882a593Smuzhiyun */
872*4882a593Smuzhiyun unsigned int delay = 10; /* default 10ms delay */
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun if (pdata->dev_desc->flags & LPSS_NO_D3_DELAY)
875*4882a593Smuzhiyun delay = 0;
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun msleep(delay);
878*4882a593Smuzhiyun }
879*4882a593Smuzhiyun
acpi_lpss_activate(struct device * dev)880*4882a593Smuzhiyun static int acpi_lpss_activate(struct device *dev)
881*4882a593Smuzhiyun {
882*4882a593Smuzhiyun struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
883*4882a593Smuzhiyun int ret;
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun ret = acpi_dev_resume(dev);
886*4882a593Smuzhiyun if (ret)
887*4882a593Smuzhiyun return ret;
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun acpi_lpss_d3_to_d0_delay(pdata);
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun /*
892*4882a593Smuzhiyun * This is called only on ->probe() stage where a device is either in
893*4882a593Smuzhiyun * known state defined by BIOS or most likely powered off. Due to this
894*4882a593Smuzhiyun * we have to deassert reset line to be sure that ->probe() will
895*4882a593Smuzhiyun * recognize the device.
896*4882a593Smuzhiyun */
897*4882a593Smuzhiyun if (pdata->dev_desc->flags & (LPSS_SAVE_CTX | LPSS_SAVE_CTX_ONCE))
898*4882a593Smuzhiyun lpss_deassert_reset(pdata);
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun #ifdef CONFIG_PM
901*4882a593Smuzhiyun if (pdata->dev_desc->flags & LPSS_SAVE_CTX_ONCE)
902*4882a593Smuzhiyun acpi_lpss_save_ctx(dev, pdata);
903*4882a593Smuzhiyun #endif
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun return 0;
906*4882a593Smuzhiyun }
907*4882a593Smuzhiyun
acpi_lpss_dismiss(struct device * dev)908*4882a593Smuzhiyun static void acpi_lpss_dismiss(struct device *dev)
909*4882a593Smuzhiyun {
910*4882a593Smuzhiyun acpi_dev_suspend(dev, false);
911*4882a593Smuzhiyun }
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun /* IOSF SB for LPSS island */
914*4882a593Smuzhiyun #define LPSS_IOSF_UNIT_LPIOEP 0xA0
915*4882a593Smuzhiyun #define LPSS_IOSF_UNIT_LPIO1 0xAB
916*4882a593Smuzhiyun #define LPSS_IOSF_UNIT_LPIO2 0xAC
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun #define LPSS_IOSF_PMCSR 0x84
919*4882a593Smuzhiyun #define LPSS_PMCSR_D0 0
920*4882a593Smuzhiyun #define LPSS_PMCSR_D3hot 3
921*4882a593Smuzhiyun #define LPSS_PMCSR_Dx_MASK GENMASK(1, 0)
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun #define LPSS_IOSF_GPIODEF0 0x154
924*4882a593Smuzhiyun #define LPSS_GPIODEF0_DMA1_D3 BIT(2)
925*4882a593Smuzhiyun #define LPSS_GPIODEF0_DMA2_D3 BIT(3)
926*4882a593Smuzhiyun #define LPSS_GPIODEF0_DMA_D3_MASK GENMASK(3, 2)
927*4882a593Smuzhiyun #define LPSS_GPIODEF0_DMA_LLP BIT(13)
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun static DEFINE_MUTEX(lpss_iosf_mutex);
930*4882a593Smuzhiyun static bool lpss_iosf_d3_entered = true;
931*4882a593Smuzhiyun
lpss_iosf_enter_d3_state(void)932*4882a593Smuzhiyun static void lpss_iosf_enter_d3_state(void)
933*4882a593Smuzhiyun {
934*4882a593Smuzhiyun u32 value1 = 0;
935*4882a593Smuzhiyun u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP;
936*4882a593Smuzhiyun u32 value2 = LPSS_PMCSR_D3hot;
937*4882a593Smuzhiyun u32 mask2 = LPSS_PMCSR_Dx_MASK;
938*4882a593Smuzhiyun /*
939*4882a593Smuzhiyun * PMC provides an information about actual status of the LPSS devices.
940*4882a593Smuzhiyun * Here we read the values related to LPSS power island, i.e. LPSS
941*4882a593Smuzhiyun * devices, excluding both LPSS DMA controllers, along with SCC domain.
942*4882a593Smuzhiyun */
943*4882a593Smuzhiyun u32 func_dis, d3_sts_0, pmc_status;
944*4882a593Smuzhiyun int ret;
945*4882a593Smuzhiyun
946*4882a593Smuzhiyun ret = pmc_atom_read(PMC_FUNC_DIS, &func_dis);
947*4882a593Smuzhiyun if (ret)
948*4882a593Smuzhiyun return;
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun mutex_lock(&lpss_iosf_mutex);
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun ret = pmc_atom_read(PMC_D3_STS_0, &d3_sts_0);
953*4882a593Smuzhiyun if (ret)
954*4882a593Smuzhiyun goto exit;
955*4882a593Smuzhiyun
956*4882a593Smuzhiyun /*
957*4882a593Smuzhiyun * Get the status of entire LPSS power island per device basis.
958*4882a593Smuzhiyun * Shutdown both LPSS DMA controllers if and only if all other devices
959*4882a593Smuzhiyun * are already in D3hot.
960*4882a593Smuzhiyun */
961*4882a593Smuzhiyun pmc_status = (~(d3_sts_0 | func_dis)) & pmc_atom_d3_mask;
962*4882a593Smuzhiyun if (pmc_status)
963*4882a593Smuzhiyun goto exit;
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,
966*4882a593Smuzhiyun LPSS_IOSF_PMCSR, value2, mask2);
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE,
969*4882a593Smuzhiyun LPSS_IOSF_PMCSR, value2, mask2);
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
972*4882a593Smuzhiyun LPSS_IOSF_GPIODEF0, value1, mask1);
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun lpss_iosf_d3_entered = true;
975*4882a593Smuzhiyun
976*4882a593Smuzhiyun exit:
977*4882a593Smuzhiyun mutex_unlock(&lpss_iosf_mutex);
978*4882a593Smuzhiyun }
979*4882a593Smuzhiyun
lpss_iosf_exit_d3_state(void)980*4882a593Smuzhiyun static void lpss_iosf_exit_d3_state(void)
981*4882a593Smuzhiyun {
982*4882a593Smuzhiyun u32 value1 = LPSS_GPIODEF0_DMA1_D3 | LPSS_GPIODEF0_DMA2_D3 |
983*4882a593Smuzhiyun LPSS_GPIODEF0_DMA_LLP;
984*4882a593Smuzhiyun u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP;
985*4882a593Smuzhiyun u32 value2 = LPSS_PMCSR_D0;
986*4882a593Smuzhiyun u32 mask2 = LPSS_PMCSR_Dx_MASK;
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun mutex_lock(&lpss_iosf_mutex);
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun if (!lpss_iosf_d3_entered)
991*4882a593Smuzhiyun goto exit;
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun lpss_iosf_d3_entered = false;
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
996*4882a593Smuzhiyun LPSS_IOSF_GPIODEF0, value1, mask1);
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE,
999*4882a593Smuzhiyun LPSS_IOSF_PMCSR, value2, mask2);
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,
1002*4882a593Smuzhiyun LPSS_IOSF_PMCSR, value2, mask2);
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun exit:
1005*4882a593Smuzhiyun mutex_unlock(&lpss_iosf_mutex);
1006*4882a593Smuzhiyun }
1007*4882a593Smuzhiyun
acpi_lpss_suspend(struct device * dev,bool wakeup)1008*4882a593Smuzhiyun static int acpi_lpss_suspend(struct device *dev, bool wakeup)
1009*4882a593Smuzhiyun {
1010*4882a593Smuzhiyun struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1011*4882a593Smuzhiyun int ret;
1012*4882a593Smuzhiyun
1013*4882a593Smuzhiyun if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
1014*4882a593Smuzhiyun acpi_lpss_save_ctx(dev, pdata);
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyun ret = acpi_dev_suspend(dev, wakeup);
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun /*
1019*4882a593Smuzhiyun * This call must be last in the sequence, otherwise PMC will return
1020*4882a593Smuzhiyun * wrong status for devices being about to be powered off. See
1021*4882a593Smuzhiyun * lpss_iosf_enter_d3_state() for further information.
1022*4882a593Smuzhiyun */
1023*4882a593Smuzhiyun if (acpi_target_system_state() == ACPI_STATE_S0 &&
1024*4882a593Smuzhiyun lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
1025*4882a593Smuzhiyun lpss_iosf_enter_d3_state();
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun return ret;
1028*4882a593Smuzhiyun }
1029*4882a593Smuzhiyun
acpi_lpss_resume(struct device * dev)1030*4882a593Smuzhiyun static int acpi_lpss_resume(struct device *dev)
1031*4882a593Smuzhiyun {
1032*4882a593Smuzhiyun struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1033*4882a593Smuzhiyun int ret;
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun /*
1036*4882a593Smuzhiyun * This call is kept first to be in symmetry with
1037*4882a593Smuzhiyun * acpi_lpss_runtime_suspend() one.
1038*4882a593Smuzhiyun */
1039*4882a593Smuzhiyun if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
1040*4882a593Smuzhiyun lpss_iosf_exit_d3_state();
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun ret = acpi_dev_resume(dev);
1043*4882a593Smuzhiyun if (ret)
1044*4882a593Smuzhiyun return ret;
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun acpi_lpss_d3_to_d0_delay(pdata);
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun if (pdata->dev_desc->flags & (LPSS_SAVE_CTX | LPSS_SAVE_CTX_ONCE))
1049*4882a593Smuzhiyun acpi_lpss_restore_ctx(dev, pdata);
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun return 0;
1052*4882a593Smuzhiyun }
1053*4882a593Smuzhiyun
1054*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
acpi_lpss_do_suspend_late(struct device * dev)1055*4882a593Smuzhiyun static int acpi_lpss_do_suspend_late(struct device *dev)
1056*4882a593Smuzhiyun {
1057*4882a593Smuzhiyun int ret;
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun if (dev_pm_skip_suspend(dev))
1060*4882a593Smuzhiyun return 0;
1061*4882a593Smuzhiyun
1062*4882a593Smuzhiyun ret = pm_generic_suspend_late(dev);
1063*4882a593Smuzhiyun return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun
acpi_lpss_suspend_late(struct device * dev)1066*4882a593Smuzhiyun static int acpi_lpss_suspend_late(struct device *dev)
1067*4882a593Smuzhiyun {
1068*4882a593Smuzhiyun struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun if (pdata->dev_desc->resume_from_noirq)
1071*4882a593Smuzhiyun return 0;
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun return acpi_lpss_do_suspend_late(dev);
1074*4882a593Smuzhiyun }
1075*4882a593Smuzhiyun
acpi_lpss_suspend_noirq(struct device * dev)1076*4882a593Smuzhiyun static int acpi_lpss_suspend_noirq(struct device *dev)
1077*4882a593Smuzhiyun {
1078*4882a593Smuzhiyun struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1079*4882a593Smuzhiyun int ret;
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun if (pdata->dev_desc->resume_from_noirq) {
1082*4882a593Smuzhiyun /*
1083*4882a593Smuzhiyun * The driver's ->suspend_late callback will be invoked by
1084*4882a593Smuzhiyun * acpi_lpss_do_suspend_late(), with the assumption that the
1085*4882a593Smuzhiyun * driver really wanted to run that code in ->suspend_noirq, but
1086*4882a593Smuzhiyun * it could not run after acpi_dev_suspend() and the driver
1087*4882a593Smuzhiyun * expected the latter to be called in the "late" phase.
1088*4882a593Smuzhiyun */
1089*4882a593Smuzhiyun ret = acpi_lpss_do_suspend_late(dev);
1090*4882a593Smuzhiyun if (ret)
1091*4882a593Smuzhiyun return ret;
1092*4882a593Smuzhiyun }
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun return acpi_subsys_suspend_noirq(dev);
1095*4882a593Smuzhiyun }
1096*4882a593Smuzhiyun
acpi_lpss_do_resume_early(struct device * dev)1097*4882a593Smuzhiyun static int acpi_lpss_do_resume_early(struct device *dev)
1098*4882a593Smuzhiyun {
1099*4882a593Smuzhiyun int ret = acpi_lpss_resume(dev);
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun return ret ? ret : pm_generic_resume_early(dev);
1102*4882a593Smuzhiyun }
1103*4882a593Smuzhiyun
acpi_lpss_resume_early(struct device * dev)1104*4882a593Smuzhiyun static int acpi_lpss_resume_early(struct device *dev)
1105*4882a593Smuzhiyun {
1106*4882a593Smuzhiyun struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1107*4882a593Smuzhiyun
1108*4882a593Smuzhiyun if (pdata->dev_desc->resume_from_noirq)
1109*4882a593Smuzhiyun return 0;
1110*4882a593Smuzhiyun
1111*4882a593Smuzhiyun if (dev_pm_skip_resume(dev))
1112*4882a593Smuzhiyun return 0;
1113*4882a593Smuzhiyun
1114*4882a593Smuzhiyun return acpi_lpss_do_resume_early(dev);
1115*4882a593Smuzhiyun }
1116*4882a593Smuzhiyun
acpi_lpss_resume_noirq(struct device * dev)1117*4882a593Smuzhiyun static int acpi_lpss_resume_noirq(struct device *dev)
1118*4882a593Smuzhiyun {
1119*4882a593Smuzhiyun struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1120*4882a593Smuzhiyun int ret;
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun /* Follow acpi_subsys_resume_noirq(). */
1123*4882a593Smuzhiyun if (dev_pm_skip_resume(dev))
1124*4882a593Smuzhiyun return 0;
1125*4882a593Smuzhiyun
1126*4882a593Smuzhiyun ret = pm_generic_resume_noirq(dev);
1127*4882a593Smuzhiyun if (ret)
1128*4882a593Smuzhiyun return ret;
1129*4882a593Smuzhiyun
1130*4882a593Smuzhiyun if (!pdata->dev_desc->resume_from_noirq)
1131*4882a593Smuzhiyun return 0;
1132*4882a593Smuzhiyun
1133*4882a593Smuzhiyun /*
1134*4882a593Smuzhiyun * The driver's ->resume_early callback will be invoked by
1135*4882a593Smuzhiyun * acpi_lpss_do_resume_early(), with the assumption that the driver
1136*4882a593Smuzhiyun * really wanted to run that code in ->resume_noirq, but it could not
1137*4882a593Smuzhiyun * run before acpi_dev_resume() and the driver expected the latter to be
1138*4882a593Smuzhiyun * called in the "early" phase.
1139*4882a593Smuzhiyun */
1140*4882a593Smuzhiyun return acpi_lpss_do_resume_early(dev);
1141*4882a593Smuzhiyun }
1142*4882a593Smuzhiyun
acpi_lpss_do_restore_early(struct device * dev)1143*4882a593Smuzhiyun static int acpi_lpss_do_restore_early(struct device *dev)
1144*4882a593Smuzhiyun {
1145*4882a593Smuzhiyun int ret = acpi_lpss_resume(dev);
1146*4882a593Smuzhiyun
1147*4882a593Smuzhiyun return ret ? ret : pm_generic_restore_early(dev);
1148*4882a593Smuzhiyun }
1149*4882a593Smuzhiyun
acpi_lpss_restore_early(struct device * dev)1150*4882a593Smuzhiyun static int acpi_lpss_restore_early(struct device *dev)
1151*4882a593Smuzhiyun {
1152*4882a593Smuzhiyun struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1153*4882a593Smuzhiyun
1154*4882a593Smuzhiyun if (pdata->dev_desc->resume_from_noirq)
1155*4882a593Smuzhiyun return 0;
1156*4882a593Smuzhiyun
1157*4882a593Smuzhiyun return acpi_lpss_do_restore_early(dev);
1158*4882a593Smuzhiyun }
1159*4882a593Smuzhiyun
acpi_lpss_restore_noirq(struct device * dev)1160*4882a593Smuzhiyun static int acpi_lpss_restore_noirq(struct device *dev)
1161*4882a593Smuzhiyun {
1162*4882a593Smuzhiyun struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1163*4882a593Smuzhiyun int ret;
1164*4882a593Smuzhiyun
1165*4882a593Smuzhiyun ret = pm_generic_restore_noirq(dev);
1166*4882a593Smuzhiyun if (ret)
1167*4882a593Smuzhiyun return ret;
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun if (!pdata->dev_desc->resume_from_noirq)
1170*4882a593Smuzhiyun return 0;
1171*4882a593Smuzhiyun
1172*4882a593Smuzhiyun /* This is analogous to what happens in acpi_lpss_resume_noirq(). */
1173*4882a593Smuzhiyun return acpi_lpss_do_restore_early(dev);
1174*4882a593Smuzhiyun }
1175*4882a593Smuzhiyun
acpi_lpss_do_poweroff_late(struct device * dev)1176*4882a593Smuzhiyun static int acpi_lpss_do_poweroff_late(struct device *dev)
1177*4882a593Smuzhiyun {
1178*4882a593Smuzhiyun int ret = pm_generic_poweroff_late(dev);
1179*4882a593Smuzhiyun
1180*4882a593Smuzhiyun return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
1181*4882a593Smuzhiyun }
1182*4882a593Smuzhiyun
acpi_lpss_poweroff_late(struct device * dev)1183*4882a593Smuzhiyun static int acpi_lpss_poweroff_late(struct device *dev)
1184*4882a593Smuzhiyun {
1185*4882a593Smuzhiyun struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1186*4882a593Smuzhiyun
1187*4882a593Smuzhiyun if (dev_pm_skip_suspend(dev))
1188*4882a593Smuzhiyun return 0;
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun if (pdata->dev_desc->resume_from_noirq)
1191*4882a593Smuzhiyun return 0;
1192*4882a593Smuzhiyun
1193*4882a593Smuzhiyun return acpi_lpss_do_poweroff_late(dev);
1194*4882a593Smuzhiyun }
1195*4882a593Smuzhiyun
acpi_lpss_poweroff_noirq(struct device * dev)1196*4882a593Smuzhiyun static int acpi_lpss_poweroff_noirq(struct device *dev)
1197*4882a593Smuzhiyun {
1198*4882a593Smuzhiyun struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1199*4882a593Smuzhiyun
1200*4882a593Smuzhiyun if (dev_pm_skip_suspend(dev))
1201*4882a593Smuzhiyun return 0;
1202*4882a593Smuzhiyun
1203*4882a593Smuzhiyun if (pdata->dev_desc->resume_from_noirq) {
1204*4882a593Smuzhiyun /* This is analogous to the acpi_lpss_suspend_noirq() case. */
1205*4882a593Smuzhiyun int ret = acpi_lpss_do_poweroff_late(dev);
1206*4882a593Smuzhiyun if (ret)
1207*4882a593Smuzhiyun return ret;
1208*4882a593Smuzhiyun }
1209*4882a593Smuzhiyun
1210*4882a593Smuzhiyun return pm_generic_poweroff_noirq(dev);
1211*4882a593Smuzhiyun }
1212*4882a593Smuzhiyun #endif /* CONFIG_PM_SLEEP */
1213*4882a593Smuzhiyun
acpi_lpss_runtime_suspend(struct device * dev)1214*4882a593Smuzhiyun static int acpi_lpss_runtime_suspend(struct device *dev)
1215*4882a593Smuzhiyun {
1216*4882a593Smuzhiyun int ret = pm_generic_runtime_suspend(dev);
1217*4882a593Smuzhiyun
1218*4882a593Smuzhiyun return ret ? ret : acpi_lpss_suspend(dev, true);
1219*4882a593Smuzhiyun }
1220*4882a593Smuzhiyun
acpi_lpss_runtime_resume(struct device * dev)1221*4882a593Smuzhiyun static int acpi_lpss_runtime_resume(struct device *dev)
1222*4882a593Smuzhiyun {
1223*4882a593Smuzhiyun int ret = acpi_lpss_resume(dev);
1224*4882a593Smuzhiyun
1225*4882a593Smuzhiyun return ret ? ret : pm_generic_runtime_resume(dev);
1226*4882a593Smuzhiyun }
1227*4882a593Smuzhiyun #endif /* CONFIG_PM */
1228*4882a593Smuzhiyun
1229*4882a593Smuzhiyun static struct dev_pm_domain acpi_lpss_pm_domain = {
1230*4882a593Smuzhiyun #ifdef CONFIG_PM
1231*4882a593Smuzhiyun .activate = acpi_lpss_activate,
1232*4882a593Smuzhiyun .dismiss = acpi_lpss_dismiss,
1233*4882a593Smuzhiyun #endif
1234*4882a593Smuzhiyun .ops = {
1235*4882a593Smuzhiyun #ifdef CONFIG_PM
1236*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
1237*4882a593Smuzhiyun .prepare = acpi_subsys_prepare,
1238*4882a593Smuzhiyun .complete = acpi_subsys_complete,
1239*4882a593Smuzhiyun .suspend = acpi_subsys_suspend,
1240*4882a593Smuzhiyun .suspend_late = acpi_lpss_suspend_late,
1241*4882a593Smuzhiyun .suspend_noirq = acpi_lpss_suspend_noirq,
1242*4882a593Smuzhiyun .resume_noirq = acpi_lpss_resume_noirq,
1243*4882a593Smuzhiyun .resume_early = acpi_lpss_resume_early,
1244*4882a593Smuzhiyun .freeze = acpi_subsys_freeze,
1245*4882a593Smuzhiyun .poweroff = acpi_subsys_poweroff,
1246*4882a593Smuzhiyun .poweroff_late = acpi_lpss_poweroff_late,
1247*4882a593Smuzhiyun .poweroff_noirq = acpi_lpss_poweroff_noirq,
1248*4882a593Smuzhiyun .restore_noirq = acpi_lpss_restore_noirq,
1249*4882a593Smuzhiyun .restore_early = acpi_lpss_restore_early,
1250*4882a593Smuzhiyun #endif
1251*4882a593Smuzhiyun .runtime_suspend = acpi_lpss_runtime_suspend,
1252*4882a593Smuzhiyun .runtime_resume = acpi_lpss_runtime_resume,
1253*4882a593Smuzhiyun #endif
1254*4882a593Smuzhiyun },
1255*4882a593Smuzhiyun };
1256*4882a593Smuzhiyun
acpi_lpss_platform_notify(struct notifier_block * nb,unsigned long action,void * data)1257*4882a593Smuzhiyun static int acpi_lpss_platform_notify(struct notifier_block *nb,
1258*4882a593Smuzhiyun unsigned long action, void *data)
1259*4882a593Smuzhiyun {
1260*4882a593Smuzhiyun struct platform_device *pdev = to_platform_device(data);
1261*4882a593Smuzhiyun struct lpss_private_data *pdata;
1262*4882a593Smuzhiyun struct acpi_device *adev;
1263*4882a593Smuzhiyun const struct acpi_device_id *id;
1264*4882a593Smuzhiyun
1265*4882a593Smuzhiyun id = acpi_match_device(acpi_lpss_device_ids, &pdev->dev);
1266*4882a593Smuzhiyun if (!id || !id->driver_data)
1267*4882a593Smuzhiyun return 0;
1268*4882a593Smuzhiyun
1269*4882a593Smuzhiyun if (acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
1270*4882a593Smuzhiyun return 0;
1271*4882a593Smuzhiyun
1272*4882a593Smuzhiyun pdata = acpi_driver_data(adev);
1273*4882a593Smuzhiyun if (!pdata)
1274*4882a593Smuzhiyun return 0;
1275*4882a593Smuzhiyun
1276*4882a593Smuzhiyun if (pdata->mmio_base &&
1277*4882a593Smuzhiyun pdata->mmio_size < pdata->dev_desc->prv_offset + LPSS_LTR_SIZE) {
1278*4882a593Smuzhiyun dev_err(&pdev->dev, "MMIO size insufficient to access LTR\n");
1279*4882a593Smuzhiyun return 0;
1280*4882a593Smuzhiyun }
1281*4882a593Smuzhiyun
1282*4882a593Smuzhiyun switch (action) {
1283*4882a593Smuzhiyun case BUS_NOTIFY_BIND_DRIVER:
1284*4882a593Smuzhiyun dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain);
1285*4882a593Smuzhiyun break;
1286*4882a593Smuzhiyun case BUS_NOTIFY_DRIVER_NOT_BOUND:
1287*4882a593Smuzhiyun case BUS_NOTIFY_UNBOUND_DRIVER:
1288*4882a593Smuzhiyun dev_pm_domain_set(&pdev->dev, NULL);
1289*4882a593Smuzhiyun break;
1290*4882a593Smuzhiyun case BUS_NOTIFY_ADD_DEVICE:
1291*4882a593Smuzhiyun dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain);
1292*4882a593Smuzhiyun if (pdata->dev_desc->flags & LPSS_LTR)
1293*4882a593Smuzhiyun return sysfs_create_group(&pdev->dev.kobj,
1294*4882a593Smuzhiyun &lpss_attr_group);
1295*4882a593Smuzhiyun break;
1296*4882a593Smuzhiyun case BUS_NOTIFY_DEL_DEVICE:
1297*4882a593Smuzhiyun if (pdata->dev_desc->flags & LPSS_LTR)
1298*4882a593Smuzhiyun sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group);
1299*4882a593Smuzhiyun dev_pm_domain_set(&pdev->dev, NULL);
1300*4882a593Smuzhiyun break;
1301*4882a593Smuzhiyun default:
1302*4882a593Smuzhiyun break;
1303*4882a593Smuzhiyun }
1304*4882a593Smuzhiyun
1305*4882a593Smuzhiyun return 0;
1306*4882a593Smuzhiyun }
1307*4882a593Smuzhiyun
1308*4882a593Smuzhiyun static struct notifier_block acpi_lpss_nb = {
1309*4882a593Smuzhiyun .notifier_call = acpi_lpss_platform_notify,
1310*4882a593Smuzhiyun };
1311*4882a593Smuzhiyun
acpi_lpss_bind(struct device * dev)1312*4882a593Smuzhiyun static void acpi_lpss_bind(struct device *dev)
1313*4882a593Smuzhiyun {
1314*4882a593Smuzhiyun struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1315*4882a593Smuzhiyun
1316*4882a593Smuzhiyun if (!pdata || !pdata->mmio_base || !(pdata->dev_desc->flags & LPSS_LTR))
1317*4882a593Smuzhiyun return;
1318*4882a593Smuzhiyun
1319*4882a593Smuzhiyun if (pdata->mmio_size >= pdata->dev_desc->prv_offset + LPSS_LTR_SIZE)
1320*4882a593Smuzhiyun dev->power.set_latency_tolerance = acpi_lpss_set_ltr;
1321*4882a593Smuzhiyun else
1322*4882a593Smuzhiyun dev_err(dev, "MMIO size insufficient to access LTR\n");
1323*4882a593Smuzhiyun }
1324*4882a593Smuzhiyun
acpi_lpss_unbind(struct device * dev)1325*4882a593Smuzhiyun static void acpi_lpss_unbind(struct device *dev)
1326*4882a593Smuzhiyun {
1327*4882a593Smuzhiyun dev->power.set_latency_tolerance = NULL;
1328*4882a593Smuzhiyun }
1329*4882a593Smuzhiyun
1330*4882a593Smuzhiyun static struct acpi_scan_handler lpss_handler = {
1331*4882a593Smuzhiyun .ids = acpi_lpss_device_ids,
1332*4882a593Smuzhiyun .attach = acpi_lpss_create_device,
1333*4882a593Smuzhiyun .bind = acpi_lpss_bind,
1334*4882a593Smuzhiyun .unbind = acpi_lpss_unbind,
1335*4882a593Smuzhiyun };
1336*4882a593Smuzhiyun
acpi_lpss_init(void)1337*4882a593Smuzhiyun void __init acpi_lpss_init(void)
1338*4882a593Smuzhiyun {
1339*4882a593Smuzhiyun const struct x86_cpu_id *id;
1340*4882a593Smuzhiyun int ret;
1341*4882a593Smuzhiyun
1342*4882a593Smuzhiyun ret = lpt_clk_init();
1343*4882a593Smuzhiyun if (ret)
1344*4882a593Smuzhiyun return;
1345*4882a593Smuzhiyun
1346*4882a593Smuzhiyun id = x86_match_cpu(lpss_cpu_ids);
1347*4882a593Smuzhiyun if (id)
1348*4882a593Smuzhiyun lpss_quirks |= LPSS_QUIRK_ALWAYS_POWER_ON;
1349*4882a593Smuzhiyun
1350*4882a593Smuzhiyun bus_register_notifier(&platform_bus_type, &acpi_lpss_nb);
1351*4882a593Smuzhiyun acpi_scan_add_handler(&lpss_handler);
1352*4882a593Smuzhiyun }
1353*4882a593Smuzhiyun
1354*4882a593Smuzhiyun #else
1355*4882a593Smuzhiyun
1356*4882a593Smuzhiyun static struct acpi_scan_handler lpss_handler = {
1357*4882a593Smuzhiyun .ids = acpi_lpss_device_ids,
1358*4882a593Smuzhiyun };
1359*4882a593Smuzhiyun
acpi_lpss_init(void)1360*4882a593Smuzhiyun void __init acpi_lpss_init(void)
1361*4882a593Smuzhiyun {
1362*4882a593Smuzhiyun acpi_scan_add_handler(&lpss_handler);
1363*4882a593Smuzhiyun }
1364*4882a593Smuzhiyun
1365*4882a593Smuzhiyun #endif /* CONFIG_X86_INTEL_LPSS */
1366