1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * ACPI support for Intel Lynxpoint LPSS. |
4 | * |
5 | * Copyright (C) 2013, Intel Corporation |
6 | * Authors: Mika Westerberg <mika.westerberg@linux.intel.com> |
7 | * Rafael J. Wysocki <rafael.j.wysocki@intel.com> |
8 | */ |
9 | |
10 | #include <linux/acpi.h> |
11 | #include <linux/clkdev.h> |
12 | #include <linux/clk-provider.h> |
13 | #include <linux/dmi.h> |
14 | #include <linux/err.h> |
15 | #include <linux/io.h> |
16 | #include <linux/mutex.h> |
17 | #include <linux/pci.h> |
18 | #include <linux/platform_device.h> |
19 | #include <linux/platform_data/x86/clk-lpss.h> |
20 | #include <linux/platform_data/x86/pmc_atom.h> |
21 | #include <linux/pm_domain.h> |
22 | #include <linux/pm_runtime.h> |
23 | #include <linux/pwm.h> |
24 | #include <linux/pxa2xx_ssp.h> |
25 | #include <linux/suspend.h> |
26 | #include <linux/delay.h> |
27 | |
28 | #include "internal.h" |
29 | |
30 | #ifdef CONFIG_X86_INTEL_LPSS |
31 | |
32 | #include <asm/cpu_device_id.h> |
33 | #include <asm/intel-family.h> |
34 | #include <asm/iosf_mbi.h> |
35 | |
36 | #define LPSS_ADDR(desc) ((unsigned long)&desc) |
37 | |
38 | #define LPSS_CLK_SIZE 0x04 |
39 | #define LPSS_LTR_SIZE 0x18 |
40 | |
41 | /* Offsets relative to LPSS_PRIVATE_OFFSET */ |
42 | #define LPSS_CLK_DIVIDER_DEF_MASK (BIT(1) | BIT(16)) |
43 | #define LPSS_RESETS 0x04 |
44 | #define LPSS_RESETS_RESET_FUNC BIT(0) |
45 | #define LPSS_RESETS_RESET_APB BIT(1) |
46 | #define LPSS_GENERAL 0x08 |
47 | #define LPSS_GENERAL_LTR_MODE_SW BIT(2) |
48 | #define LPSS_GENERAL_UART_RTS_OVRD BIT(3) |
49 | #define LPSS_SW_LTR 0x10 |
50 | #define LPSS_AUTO_LTR 0x14 |
51 | #define LPSS_LTR_SNOOP_REQ BIT(15) |
52 | #define LPSS_LTR_SNOOP_MASK 0x0000FFFF |
53 | #define LPSS_LTR_SNOOP_LAT_1US 0x800 |
54 | #define LPSS_LTR_SNOOP_LAT_32US 0xC00 |
55 | #define LPSS_LTR_SNOOP_LAT_SHIFT 5 |
56 | #define LPSS_LTR_SNOOP_LAT_CUTOFF 3000 |
57 | #define LPSS_LTR_MAX_VAL 0x3FF |
58 | #define LPSS_TX_INT 0x20 |
59 | #define LPSS_TX_INT_MASK BIT(1) |
60 | |
61 | #define LPSS_PRV_REG_COUNT 9 |
62 | |
63 | /* LPSS Flags */ |
64 | #define LPSS_CLK BIT(0) |
65 | #define LPSS_CLK_GATE BIT(1) |
66 | #define LPSS_CLK_DIVIDER BIT(2) |
67 | #define LPSS_LTR BIT(3) |
68 | #define LPSS_SAVE_CTX BIT(4) |
69 | /* |
70 | * For some devices the DSDT AML code for another device turns off the device |
71 | * before our suspend handler runs, causing us to read/save all 1-s (0xffffffff) |
72 | * as ctx register values. |
73 | * Luckily these devices always use the same ctx register values, so we can |
74 | * work around this by saving the ctx registers once on activation. |
75 | */ |
76 | #define LPSS_SAVE_CTX_ONCE BIT(5) |
77 | #define LPSS_NO_D3_DELAY BIT(6) |
78 | |
79 | struct lpss_private_data; |
80 | |
81 | struct lpss_device_desc { |
82 | unsigned int flags; |
83 | const char *clk_con_id; |
84 | unsigned int prv_offset; |
85 | size_t prv_size_override; |
86 | const struct property_entry *properties; |
87 | void (*setup)(struct lpss_private_data *pdata); |
88 | bool resume_from_noirq; |
89 | }; |
90 | |
91 | static const struct lpss_device_desc lpss_dma_desc = { |
92 | .flags = LPSS_CLK, |
93 | }; |
94 | |
95 | struct lpss_private_data { |
96 | struct acpi_device *adev; |
97 | void __iomem *mmio_base; |
98 | resource_size_t mmio_size; |
99 | unsigned int fixed_clk_rate; |
100 | struct clk *clk; |
101 | const struct lpss_device_desc *dev_desc; |
102 | u32 prv_reg_ctx[LPSS_PRV_REG_COUNT]; |
103 | }; |
104 | |
105 | /* Devices which need to be in D3 before lpss_iosf_enter_d3_state() proceeds */ |
106 | static u32 pmc_atom_d3_mask = 0xfe000ffe; |
107 | |
108 | /* LPSS run time quirks */ |
109 | static unsigned int lpss_quirks; |
110 | |
111 | /* |
112 | * LPSS_QUIRK_ALWAYS_POWER_ON: override power state for LPSS DMA device. |
113 | * |
114 | * The LPSS DMA controller has neither _PS0 nor _PS3 method. Moreover |
115 | * it can be powered off automatically whenever the last LPSS device goes down. |
116 | * In case of no power any access to the DMA controller will hang the system. |
117 | * The behaviour is reproduced on some HP laptops based on Intel BayTrail as |
118 | * well as on ASuS T100TA transformer. |
119 | * |
120 | * This quirk overrides power state of entire LPSS island to keep DMA powered |
121 | * on whenever we have at least one other device in use. |
122 | */ |
123 | #define LPSS_QUIRK_ALWAYS_POWER_ON BIT(0) |
124 | |
125 | /* UART Component Parameter Register */ |
126 | #define LPSS_UART_CPR 0xF4 |
127 | #define LPSS_UART_CPR_AFCE BIT(4) |
128 | |
129 | static void lpss_uart_setup(struct lpss_private_data *pdata) |
130 | { |
131 | unsigned int offset; |
132 | u32 val; |
133 | |
134 | offset = pdata->dev_desc->prv_offset + LPSS_TX_INT; |
135 | val = readl(addr: pdata->mmio_base + offset); |
136 | writel(val: val | LPSS_TX_INT_MASK, addr: pdata->mmio_base + offset); |
137 | |
138 | val = readl(addr: pdata->mmio_base + LPSS_UART_CPR); |
139 | if (!(val & LPSS_UART_CPR_AFCE)) { |
140 | offset = pdata->dev_desc->prv_offset + LPSS_GENERAL; |
141 | val = readl(addr: pdata->mmio_base + offset); |
142 | val |= LPSS_GENERAL_UART_RTS_OVRD; |
143 | writel(val, addr: pdata->mmio_base + offset); |
144 | } |
145 | } |
146 | |
147 | static void lpss_deassert_reset(struct lpss_private_data *pdata) |
148 | { |
149 | unsigned int offset; |
150 | u32 val; |
151 | |
152 | offset = pdata->dev_desc->prv_offset + LPSS_RESETS; |
153 | val = readl(addr: pdata->mmio_base + offset); |
154 | val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC; |
155 | writel(val, addr: pdata->mmio_base + offset); |
156 | } |
157 | |
158 | /* |
159 | * BYT PWM used for backlight control by the i915 driver on systems without |
160 | * the Crystal Cove PMIC. |
161 | */ |
162 | static struct pwm_lookup byt_pwm_lookup[] = { |
163 | PWM_LOOKUP_WITH_MODULE("80860F09:00" , 0, "0000:00:02.0" , |
164 | "pwm_soc_backlight" , 0, PWM_POLARITY_NORMAL, |
165 | "pwm-lpss-platform" ), |
166 | }; |
167 | |
168 | static void byt_pwm_setup(struct lpss_private_data *pdata) |
169 | { |
170 | /* Only call pwm_add_table for the first PWM controller */ |
171 | if (acpi_dev_uid_match(pdata->adev, 1)) |
172 | pwm_add_table(table: byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup)); |
173 | } |
174 | |
175 | #define LPSS_I2C_ENABLE 0x6c |
176 | |
177 | static void byt_i2c_setup(struct lpss_private_data *pdata) |
178 | { |
179 | acpi_handle handle = pdata->adev->handle; |
180 | unsigned long long shared_host = 0; |
181 | acpi_status status; |
182 | u64 uid; |
183 | |
184 | /* Expected to always be successfull, but better safe then sorry */ |
185 | if (!acpi_dev_uid_to_integer(adev: pdata->adev, integer: &uid) && uid) { |
186 | /* Detect I2C bus shared with PUNIT and ignore its d3 status */ |
187 | status = acpi_evaluate_integer(handle, pathname: "_SEM" , NULL, data: &shared_host); |
188 | if (ACPI_SUCCESS(status) && shared_host) |
189 | pmc_atom_d3_mask &= ~(BIT_LPSS2_F1_I2C1 << (uid - 1)); |
190 | } |
191 | |
192 | lpss_deassert_reset(pdata); |
193 | |
194 | if (readl(addr: pdata->mmio_base + pdata->dev_desc->prv_offset)) |
195 | pdata->fixed_clk_rate = 133000000; |
196 | |
197 | writel(val: 0, addr: pdata->mmio_base + LPSS_I2C_ENABLE); |
198 | } |
199 | |
200 | /* |
201 | * BSW PWM1 is used for backlight control by the i915 driver |
202 | * BSW PWM2 is used for backlight control for fixed (etched into the glass) |
203 | * touch controls on some models. These touch-controls have specialized |
204 | * drivers which know they need the "pwm_soc_lpss_2" con-id. |
205 | */ |
206 | static struct pwm_lookup bsw_pwm_lookup[] = { |
207 | PWM_LOOKUP_WITH_MODULE("80862288:00" , 0, "0000:00:02.0" , |
208 | "pwm_soc_backlight" , 0, PWM_POLARITY_NORMAL, |
209 | "pwm-lpss-platform" ), |
210 | PWM_LOOKUP_WITH_MODULE("80862289:00" , 0, NULL, |
211 | "pwm_soc_lpss_2" , 0, PWM_POLARITY_NORMAL, |
212 | "pwm-lpss-platform" ), |
213 | }; |
214 | |
215 | static void bsw_pwm_setup(struct lpss_private_data *pdata) |
216 | { |
217 | /* Only call pwm_add_table for the first PWM controller */ |
218 | if (acpi_dev_uid_match(pdata->adev, 1)) |
219 | pwm_add_table(table: bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup)); |
220 | } |
221 | |
222 | static const struct property_entry lpt_spi_properties[] = { |
223 | PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type" , LPSS_LPT_SSP), |
224 | { } |
225 | }; |
226 | |
227 | static const struct lpss_device_desc lpt_spi_dev_desc = { |
228 | .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR |
229 | | LPSS_SAVE_CTX, |
230 | .prv_offset = 0x800, |
231 | .properties = lpt_spi_properties, |
232 | }; |
233 | |
234 | static const struct lpss_device_desc lpt_i2c_dev_desc = { |
235 | .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR | LPSS_SAVE_CTX, |
236 | .prv_offset = 0x800, |
237 | }; |
238 | |
239 | static struct property_entry uart_properties[] = { |
240 | PROPERTY_ENTRY_U32("reg-io-width" , 4), |
241 | PROPERTY_ENTRY_U32("reg-shift" , 2), |
242 | PROPERTY_ENTRY_BOOL("snps,uart-16550-compatible" ), |
243 | { }, |
244 | }; |
245 | |
246 | static const struct lpss_device_desc lpt_uart_dev_desc = { |
247 | .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR |
248 | | LPSS_SAVE_CTX, |
249 | .clk_con_id = "baudclk" , |
250 | .prv_offset = 0x800, |
251 | .setup = lpss_uart_setup, |
252 | .properties = uart_properties, |
253 | }; |
254 | |
255 | static const struct lpss_device_desc lpt_sdio_dev_desc = { |
256 | .flags = LPSS_LTR, |
257 | .prv_offset = 0x1000, |
258 | .prv_size_override = 0x1018, |
259 | }; |
260 | |
261 | static const struct lpss_device_desc byt_pwm_dev_desc = { |
262 | .flags = LPSS_SAVE_CTX, |
263 | .prv_offset = 0x800, |
264 | .setup = byt_pwm_setup, |
265 | }; |
266 | |
267 | static const struct lpss_device_desc bsw_pwm_dev_desc = { |
268 | .flags = LPSS_SAVE_CTX_ONCE | LPSS_NO_D3_DELAY, |
269 | .prv_offset = 0x800, |
270 | .setup = bsw_pwm_setup, |
271 | .resume_from_noirq = true, |
272 | }; |
273 | |
274 | static const struct lpss_device_desc bsw_pwm2_dev_desc = { |
275 | .flags = LPSS_SAVE_CTX_ONCE | LPSS_NO_D3_DELAY, |
276 | .prv_offset = 0x800, |
277 | .resume_from_noirq = true, |
278 | }; |
279 | |
280 | static const struct lpss_device_desc byt_uart_dev_desc = { |
281 | .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, |
282 | .clk_con_id = "baudclk" , |
283 | .prv_offset = 0x800, |
284 | .setup = lpss_uart_setup, |
285 | .properties = uart_properties, |
286 | }; |
287 | |
288 | static const struct lpss_device_desc bsw_uart_dev_desc = { |
289 | .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX |
290 | | LPSS_NO_D3_DELAY, |
291 | .clk_con_id = "baudclk" , |
292 | .prv_offset = 0x800, |
293 | .setup = lpss_uart_setup, |
294 | .properties = uart_properties, |
295 | }; |
296 | |
297 | static const struct property_entry byt_spi_properties[] = { |
298 | PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type" , LPSS_BYT_SSP), |
299 | { } |
300 | }; |
301 | |
302 | static const struct lpss_device_desc byt_spi_dev_desc = { |
303 | .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, |
304 | .prv_offset = 0x400, |
305 | .properties = byt_spi_properties, |
306 | }; |
307 | |
308 | static const struct lpss_device_desc byt_sdio_dev_desc = { |
309 | .flags = LPSS_CLK, |
310 | }; |
311 | |
312 | static const struct lpss_device_desc byt_i2c_dev_desc = { |
313 | .flags = LPSS_CLK | LPSS_SAVE_CTX, |
314 | .prv_offset = 0x800, |
315 | .setup = byt_i2c_setup, |
316 | .resume_from_noirq = true, |
317 | }; |
318 | |
319 | static const struct lpss_device_desc bsw_i2c_dev_desc = { |
320 | .flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_NO_D3_DELAY, |
321 | .prv_offset = 0x800, |
322 | .setup = byt_i2c_setup, |
323 | .resume_from_noirq = true, |
324 | }; |
325 | |
326 | static const struct property_entry bsw_spi_properties[] = { |
327 | PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type" , LPSS_BSW_SSP), |
328 | { } |
329 | }; |
330 | |
331 | static const struct lpss_device_desc bsw_spi_dev_desc = { |
332 | .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX |
333 | | LPSS_NO_D3_DELAY, |
334 | .prv_offset = 0x400, |
335 | .setup = lpss_deassert_reset, |
336 | .properties = bsw_spi_properties, |
337 | }; |
338 | |
339 | static const struct x86_cpu_id lpss_cpu_ids[] = { |
340 | X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, NULL), |
341 | X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, NULL), |
342 | {} |
343 | }; |
344 | |
345 | #else |
346 | |
347 | #define LPSS_ADDR(desc) (0UL) |
348 | |
349 | #endif /* CONFIG_X86_INTEL_LPSS */ |
350 | |
351 | static const struct acpi_device_id acpi_lpss_device_ids[] = { |
352 | /* Generic LPSS devices */ |
353 | { "INTL9C60" , LPSS_ADDR(lpss_dma_desc) }, |
354 | |
355 | /* Lynxpoint LPSS devices */ |
356 | { "INT33C0" , LPSS_ADDR(lpt_spi_dev_desc) }, |
357 | { "INT33C1" , LPSS_ADDR(lpt_spi_dev_desc) }, |
358 | { "INT33C2" , LPSS_ADDR(lpt_i2c_dev_desc) }, |
359 | { "INT33C3" , LPSS_ADDR(lpt_i2c_dev_desc) }, |
360 | { "INT33C4" , LPSS_ADDR(lpt_uart_dev_desc) }, |
361 | { "INT33C5" , LPSS_ADDR(lpt_uart_dev_desc) }, |
362 | { "INT33C6" , LPSS_ADDR(lpt_sdio_dev_desc) }, |
363 | |
364 | /* BayTrail LPSS devices */ |
365 | { "80860F09" , LPSS_ADDR(byt_pwm_dev_desc) }, |
366 | { "80860F0A" , LPSS_ADDR(byt_uart_dev_desc) }, |
367 | { "80860F0E" , LPSS_ADDR(byt_spi_dev_desc) }, |
368 | { "80860F14" , LPSS_ADDR(byt_sdio_dev_desc) }, |
369 | { "80860F41" , LPSS_ADDR(byt_i2c_dev_desc) }, |
370 | |
371 | /* Braswell LPSS devices */ |
372 | { "80862286" , LPSS_ADDR(lpss_dma_desc) }, |
373 | { "80862288" , LPSS_ADDR(bsw_pwm_dev_desc) }, |
374 | { "80862289" , LPSS_ADDR(bsw_pwm2_dev_desc) }, |
375 | { "8086228A" , LPSS_ADDR(bsw_uart_dev_desc) }, |
376 | { "8086228E" , LPSS_ADDR(bsw_spi_dev_desc) }, |
377 | { "808622C0" , LPSS_ADDR(lpss_dma_desc) }, |
378 | { "808622C1" , LPSS_ADDR(bsw_i2c_dev_desc) }, |
379 | |
380 | /* Broadwell LPSS devices */ |
381 | { "INT3430" , LPSS_ADDR(lpt_spi_dev_desc) }, |
382 | { "INT3431" , LPSS_ADDR(lpt_spi_dev_desc) }, |
383 | { "INT3432" , LPSS_ADDR(lpt_i2c_dev_desc) }, |
384 | { "INT3433" , LPSS_ADDR(lpt_i2c_dev_desc) }, |
385 | { "INT3434" , LPSS_ADDR(lpt_uart_dev_desc) }, |
386 | { "INT3435" , LPSS_ADDR(lpt_uart_dev_desc) }, |
387 | { "INT3436" , LPSS_ADDR(lpt_sdio_dev_desc) }, |
388 | |
389 | /* Wildcat Point LPSS devices */ |
390 | { "INT3438" , LPSS_ADDR(lpt_spi_dev_desc) }, |
391 | |
392 | { } |
393 | }; |
394 | |
395 | #ifdef CONFIG_X86_INTEL_LPSS |
396 | |
397 | /* LPSS main clock device. */ |
398 | static struct platform_device *lpss_clk_dev; |
399 | |
400 | static inline void lpt_register_clock_device(void) |
401 | { |
402 | lpss_clk_dev = platform_device_register_simple(name: "clk-lpss-atom" , |
403 | PLATFORM_DEVID_NONE, |
404 | NULL, num: 0); |
405 | } |
406 | |
407 | static int register_device_clock(struct acpi_device *adev, |
408 | struct lpss_private_data *pdata) |
409 | { |
410 | const struct lpss_device_desc *dev_desc = pdata->dev_desc; |
411 | const char *devname = dev_name(dev: &adev->dev); |
412 | struct clk *clk; |
413 | struct lpss_clk_data *clk_data; |
414 | const char *parent, *clk_name; |
415 | void __iomem *prv_base; |
416 | |
417 | if (!lpss_clk_dev) |
418 | lpt_register_clock_device(); |
419 | |
420 | if (IS_ERR(ptr: lpss_clk_dev)) |
421 | return PTR_ERR(ptr: lpss_clk_dev); |
422 | |
423 | clk_data = platform_get_drvdata(pdev: lpss_clk_dev); |
424 | if (!clk_data) |
425 | return -ENODEV; |
426 | clk = clk_data->clk; |
427 | |
428 | if (!pdata->mmio_base |
429 | || pdata->mmio_size < dev_desc->prv_offset + LPSS_CLK_SIZE) |
430 | return -ENODATA; |
431 | |
432 | parent = clk_data->name; |
433 | prv_base = pdata->mmio_base + dev_desc->prv_offset; |
434 | |
435 | if (pdata->fixed_clk_rate) { |
436 | clk = clk_register_fixed_rate(NULL, name: devname, parent_name: parent, flags: 0, |
437 | fixed_rate: pdata->fixed_clk_rate); |
438 | goto out; |
439 | } |
440 | |
441 | if (dev_desc->flags & LPSS_CLK_GATE) { |
442 | clk = clk_register_gate(NULL, name: devname, parent_name: parent, flags: 0, |
443 | reg: prv_base, bit_idx: 0, clk_gate_flags: 0, NULL); |
444 | parent = devname; |
445 | } |
446 | |
447 | if (dev_desc->flags & LPSS_CLK_DIVIDER) { |
448 | /* Prevent division by zero */ |
449 | if (!readl(addr: prv_base)) |
450 | writel(LPSS_CLK_DIVIDER_DEF_MASK, addr: prv_base); |
451 | |
452 | clk_name = kasprintf(GFP_KERNEL, fmt: "%s-div" , devname); |
453 | if (!clk_name) |
454 | return -ENOMEM; |
455 | clk = clk_register_fractional_divider(NULL, name: clk_name, parent_name: parent, |
456 | flags: 0, reg: prv_base, mshift: 1, mwidth: 15, nshift: 16, nwidth: 15, |
457 | CLK_FRAC_DIVIDER_POWER_OF_TWO_PS, |
458 | NULL); |
459 | parent = clk_name; |
460 | |
461 | clk_name = kasprintf(GFP_KERNEL, fmt: "%s-update" , devname); |
462 | if (!clk_name) { |
463 | kfree(objp: parent); |
464 | return -ENOMEM; |
465 | } |
466 | clk = clk_register_gate(NULL, name: clk_name, parent_name: parent, |
467 | CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, |
468 | reg: prv_base, bit_idx: 31, clk_gate_flags: 0, NULL); |
469 | kfree(objp: parent); |
470 | kfree(objp: clk_name); |
471 | } |
472 | out: |
473 | if (IS_ERR(ptr: clk)) |
474 | return PTR_ERR(ptr: clk); |
475 | |
476 | pdata->clk = clk; |
477 | clk_register_clkdev(clk, dev_desc->clk_con_id, devname); |
478 | return 0; |
479 | } |
480 | |
481 | struct lpss_device_links { |
482 | const char *supplier_hid; |
483 | const char *supplier_uid; |
484 | const char *consumer_hid; |
485 | const char *consumer_uid; |
486 | u32 flags; |
487 | const struct dmi_system_id *dep_missing_ids; |
488 | }; |
489 | |
490 | /* Please keep this list sorted alphabetically by vendor and model */ |
491 | static const struct dmi_system_id i2c1_dep_missing_dmi_ids[] = { |
492 | { |
493 | .matches = { |
494 | DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC." ), |
495 | DMI_MATCH(DMI_PRODUCT_NAME, "T200TA" ), |
496 | }, |
497 | }, |
498 | {} |
499 | }; |
500 | |
501 | /* |
502 | * The _DEP method is used to identify dependencies but instead of creating |
503 | * device links for every handle in _DEP, only links in the following list are |
504 | * created. That is necessary because, in the general case, _DEP can refer to |
505 | * devices that might not have drivers, or that are on different buses, or where |
506 | * the supplier is not enumerated until after the consumer is probed. |
507 | */ |
508 | static const struct lpss_device_links lpss_device_links[] = { |
509 | /* CHT External sdcard slot controller depends on PMIC I2C ctrl */ |
510 | {"808622C1" , "7" , "80860F14" , "3" , DL_FLAG_PM_RUNTIME}, |
511 | /* CHT iGPU depends on PMIC I2C controller */ |
512 | {"808622C1" , "7" , "LNXVIDEO" , NULL, DL_FLAG_PM_RUNTIME}, |
513 | /* BYT iGPU depends on the Embedded Controller I2C controller (UID 1) */ |
514 | {"80860F41" , "1" , "LNXVIDEO" , NULL, DL_FLAG_PM_RUNTIME, |
515 | i2c1_dep_missing_dmi_ids}, |
516 | /* BYT CR iGPU depends on PMIC I2C controller (UID 5 on CR) */ |
517 | {"80860F41" , "5" , "LNXVIDEO" , NULL, DL_FLAG_PM_RUNTIME}, |
518 | /* BYT iGPU depends on PMIC I2C controller (UID 7 on non CR) */ |
519 | {"80860F41" , "7" , "LNXVIDEO" , NULL, DL_FLAG_PM_RUNTIME}, |
520 | }; |
521 | |
522 | static bool acpi_lpss_is_supplier(struct acpi_device *adev, |
523 | const struct lpss_device_links *link) |
524 | { |
525 | return acpi_dev_hid_uid_match(adev, link->supplier_hid, link->supplier_uid); |
526 | } |
527 | |
528 | static bool acpi_lpss_is_consumer(struct acpi_device *adev, |
529 | const struct lpss_device_links *link) |
530 | { |
531 | return acpi_dev_hid_uid_match(adev, link->consumer_hid, link->consumer_uid); |
532 | } |
533 | |
534 | struct hid_uid { |
535 | const char *hid; |
536 | const char *uid; |
537 | }; |
538 | |
539 | static int match_hid_uid(struct device *dev, const void *data) |
540 | { |
541 | struct acpi_device *adev = ACPI_COMPANION(dev); |
542 | const struct hid_uid *id = data; |
543 | |
544 | if (!adev) |
545 | return 0; |
546 | |
547 | return acpi_dev_hid_uid_match(adev, id->hid, id->uid); |
548 | } |
549 | |
550 | static struct device *acpi_lpss_find_device(const char *hid, const char *uid) |
551 | { |
552 | struct device *dev; |
553 | |
554 | struct hid_uid data = { |
555 | .hid = hid, |
556 | .uid = uid, |
557 | }; |
558 | |
559 | dev = bus_find_device(bus: &platform_bus_type, NULL, data: &data, match: match_hid_uid); |
560 | if (dev) |
561 | return dev; |
562 | |
563 | return bus_find_device(bus: &pci_bus_type, NULL, data: &data, match: match_hid_uid); |
564 | } |
565 | |
566 | static void acpi_lpss_link_consumer(struct device *dev1, |
567 | const struct lpss_device_links *link) |
568 | { |
569 | struct device *dev2; |
570 | |
571 | dev2 = acpi_lpss_find_device(hid: link->consumer_hid, uid: link->consumer_uid); |
572 | if (!dev2) |
573 | return; |
574 | |
575 | if ((link->dep_missing_ids && dmi_check_system(list: link->dep_missing_ids)) |
576 | || acpi_device_dep(ACPI_HANDLE(dev2), ACPI_HANDLE(dev1))) |
577 | device_link_add(consumer: dev2, supplier: dev1, flags: link->flags); |
578 | |
579 | put_device(dev: dev2); |
580 | } |
581 | |
582 | static void acpi_lpss_link_supplier(struct device *dev1, |
583 | const struct lpss_device_links *link) |
584 | { |
585 | struct device *dev2; |
586 | |
587 | dev2 = acpi_lpss_find_device(hid: link->supplier_hid, uid: link->supplier_uid); |
588 | if (!dev2) |
589 | return; |
590 | |
591 | if ((link->dep_missing_ids && dmi_check_system(list: link->dep_missing_ids)) |
592 | || acpi_device_dep(ACPI_HANDLE(dev1), ACPI_HANDLE(dev2))) |
593 | device_link_add(consumer: dev1, supplier: dev2, flags: link->flags); |
594 | |
595 | put_device(dev: dev2); |
596 | } |
597 | |
598 | static void acpi_lpss_create_device_links(struct acpi_device *adev, |
599 | struct platform_device *pdev) |
600 | { |
601 | int i; |
602 | |
603 | for (i = 0; i < ARRAY_SIZE(lpss_device_links); i++) { |
604 | const struct lpss_device_links *link = &lpss_device_links[i]; |
605 | |
606 | if (acpi_lpss_is_supplier(adev, link)) |
607 | acpi_lpss_link_consumer(dev1: &pdev->dev, link); |
608 | |
609 | if (acpi_lpss_is_consumer(adev, link)) |
610 | acpi_lpss_link_supplier(dev1: &pdev->dev, link); |
611 | } |
612 | } |
613 | |
614 | static int acpi_lpss_create_device(struct acpi_device *adev, |
615 | const struct acpi_device_id *id) |
616 | { |
617 | const struct lpss_device_desc *dev_desc; |
618 | struct lpss_private_data *pdata; |
619 | struct resource_entry *rentry; |
620 | struct list_head resource_list; |
621 | struct platform_device *pdev; |
622 | int ret; |
623 | |
624 | dev_desc = (const struct lpss_device_desc *)id->driver_data; |
625 | if (!dev_desc) |
626 | return -EINVAL; |
627 | |
628 | pdata = kzalloc(size: sizeof(*pdata), GFP_KERNEL); |
629 | if (!pdata) |
630 | return -ENOMEM; |
631 | |
632 | INIT_LIST_HEAD(list: &resource_list); |
633 | ret = acpi_dev_get_memory_resources(adev, list: &resource_list); |
634 | if (ret < 0) |
635 | goto err_out; |
636 | |
637 | rentry = list_first_entry_or_null(&resource_list, struct resource_entry, node); |
638 | if (rentry) { |
639 | if (dev_desc->prv_size_override) |
640 | pdata->mmio_size = dev_desc->prv_size_override; |
641 | else |
642 | pdata->mmio_size = resource_size(res: rentry->res); |
643 | pdata->mmio_base = ioremap(offset: rentry->res->start, size: pdata->mmio_size); |
644 | } |
645 | |
646 | acpi_dev_free_resource_list(list: &resource_list); |
647 | |
648 | if (!pdata->mmio_base) { |
649 | /* Avoid acpi_bus_attach() instantiating a pdev for this dev. */ |
650 | adev->pnp.type.platform_id = 0; |
651 | goto out_free; |
652 | } |
653 | |
654 | pdata->adev = adev; |
655 | pdata->dev_desc = dev_desc; |
656 | |
657 | if (dev_desc->setup) |
658 | dev_desc->setup(pdata); |
659 | |
660 | if (dev_desc->flags & LPSS_CLK) { |
661 | ret = register_device_clock(adev, pdata); |
662 | if (ret) |
663 | goto out_free; |
664 | } |
665 | |
666 | /* |
667 | * This works around a known issue in ACPI tables where LPSS devices |
668 | * have _PS0 and _PS3 without _PSC (and no power resources), so |
669 | * acpi_bus_init_power() will assume that the BIOS has put them into D0. |
670 | */ |
671 | acpi_device_fix_up_power(device: adev); |
672 | |
673 | adev->driver_data = pdata; |
674 | pdev = acpi_create_platform_device(adev, dev_desc->properties); |
675 | if (IS_ERR_OR_NULL(ptr: pdev)) { |
676 | adev->driver_data = NULL; |
677 | ret = PTR_ERR(ptr: pdev); |
678 | goto err_out; |
679 | } |
680 | |
681 | acpi_lpss_create_device_links(adev, pdev); |
682 | return 1; |
683 | |
684 | out_free: |
685 | /* Skip the device, but continue the namespace scan */ |
686 | ret = 0; |
687 | err_out: |
688 | kfree(objp: pdata); |
689 | return ret; |
690 | } |
691 | |
692 | static u32 __lpss_reg_read(struct lpss_private_data *pdata, unsigned int reg) |
693 | { |
694 | return readl(addr: pdata->mmio_base + pdata->dev_desc->prv_offset + reg); |
695 | } |
696 | |
697 | static void __lpss_reg_write(u32 val, struct lpss_private_data *pdata, |
698 | unsigned int reg) |
699 | { |
700 | writel(val, addr: pdata->mmio_base + pdata->dev_desc->prv_offset + reg); |
701 | } |
702 | |
703 | static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val) |
704 | { |
705 | struct acpi_device *adev = ACPI_COMPANION(dev); |
706 | struct lpss_private_data *pdata; |
707 | unsigned long flags; |
708 | int ret; |
709 | |
710 | if (WARN_ON(!adev)) |
711 | return -ENODEV; |
712 | |
713 | spin_lock_irqsave(&dev->power.lock, flags); |
714 | if (pm_runtime_suspended(dev)) { |
715 | ret = -EAGAIN; |
716 | goto out; |
717 | } |
718 | pdata = acpi_driver_data(d: adev); |
719 | if (WARN_ON(!pdata || !pdata->mmio_base)) { |
720 | ret = -ENODEV; |
721 | goto out; |
722 | } |
723 | *val = __lpss_reg_read(pdata, reg); |
724 | ret = 0; |
725 | |
726 | out: |
727 | spin_unlock_irqrestore(lock: &dev->power.lock, flags); |
728 | return ret; |
729 | } |
730 | |
731 | static ssize_t lpss_ltr_show(struct device *dev, struct device_attribute *attr, |
732 | char *buf) |
733 | { |
734 | u32 ltr_value = 0; |
735 | unsigned int reg; |
736 | int ret; |
737 | |
738 | reg = strcmp(attr->attr.name, "auto_ltr" ) ? LPSS_SW_LTR : LPSS_AUTO_LTR; |
739 | ret = lpss_reg_read(dev, reg, val: <r_value); |
740 | if (ret) |
741 | return ret; |
742 | |
743 | return sysfs_emit(buf, fmt: "%08x\n" , ltr_value); |
744 | } |
745 | |
746 | static ssize_t lpss_ltr_mode_show(struct device *dev, |
747 | struct device_attribute *attr, char *buf) |
748 | { |
749 | u32 ltr_mode = 0; |
750 | char *outstr; |
751 | int ret; |
752 | |
753 | ret = lpss_reg_read(dev, LPSS_GENERAL, val: <r_mode); |
754 | if (ret) |
755 | return ret; |
756 | |
757 | outstr = (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) ? "sw" : "auto" ; |
758 | return sprintf(buf, fmt: "%s\n" , outstr); |
759 | } |
760 | |
761 | static DEVICE_ATTR(auto_ltr, S_IRUSR, lpss_ltr_show, NULL); |
762 | static DEVICE_ATTR(sw_ltr, S_IRUSR, lpss_ltr_show, NULL); |
763 | static DEVICE_ATTR(ltr_mode, S_IRUSR, lpss_ltr_mode_show, NULL); |
764 | |
765 | static struct attribute *lpss_attrs[] = { |
766 | &dev_attr_auto_ltr.attr, |
767 | &dev_attr_sw_ltr.attr, |
768 | &dev_attr_ltr_mode.attr, |
769 | NULL, |
770 | }; |
771 | |
772 | static const struct attribute_group lpss_attr_group = { |
773 | .attrs = lpss_attrs, |
774 | .name = "lpss_ltr" , |
775 | }; |
776 | |
777 | static void acpi_lpss_set_ltr(struct device *dev, s32 val) |
778 | { |
779 | struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); |
780 | u32 ltr_mode, ltr_val; |
781 | |
782 | ltr_mode = __lpss_reg_read(pdata, LPSS_GENERAL); |
783 | if (val < 0) { |
784 | if (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) { |
785 | ltr_mode &= ~LPSS_GENERAL_LTR_MODE_SW; |
786 | __lpss_reg_write(val: ltr_mode, pdata, LPSS_GENERAL); |
787 | } |
788 | return; |
789 | } |
790 | ltr_val = __lpss_reg_read(pdata, LPSS_SW_LTR) & ~LPSS_LTR_SNOOP_MASK; |
791 | if (val >= LPSS_LTR_SNOOP_LAT_CUTOFF) { |
792 | ltr_val |= LPSS_LTR_SNOOP_LAT_32US; |
793 | val = LPSS_LTR_MAX_VAL; |
794 | } else if (val > LPSS_LTR_MAX_VAL) { |
795 | ltr_val |= LPSS_LTR_SNOOP_LAT_32US | LPSS_LTR_SNOOP_REQ; |
796 | val >>= LPSS_LTR_SNOOP_LAT_SHIFT; |
797 | } else { |
798 | ltr_val |= LPSS_LTR_SNOOP_LAT_1US | LPSS_LTR_SNOOP_REQ; |
799 | } |
800 | ltr_val |= val; |
801 | __lpss_reg_write(val: ltr_val, pdata, LPSS_SW_LTR); |
802 | if (!(ltr_mode & LPSS_GENERAL_LTR_MODE_SW)) { |
803 | ltr_mode |= LPSS_GENERAL_LTR_MODE_SW; |
804 | __lpss_reg_write(val: ltr_mode, pdata, LPSS_GENERAL); |
805 | } |
806 | } |
807 | |
808 | #ifdef CONFIG_PM |
809 | /** |
810 | * acpi_lpss_save_ctx() - Save the private registers of LPSS device |
811 | * @dev: LPSS device |
812 | * @pdata: pointer to the private data of the LPSS device |
813 | * |
814 | * Most LPSS devices have private registers which may loose their context when |
815 | * the device is powered down. acpi_lpss_save_ctx() saves those registers into |
816 | * prv_reg_ctx array. |
817 | */ |
818 | static void acpi_lpss_save_ctx(struct device *dev, |
819 | struct lpss_private_data *pdata) |
820 | { |
821 | unsigned int i; |
822 | |
823 | for (i = 0; i < LPSS_PRV_REG_COUNT; i++) { |
824 | unsigned long offset = i * sizeof(u32); |
825 | |
826 | pdata->prv_reg_ctx[i] = __lpss_reg_read(pdata, reg: offset); |
827 | dev_dbg(dev, "saving 0x%08x from LPSS reg at offset 0x%02lx\n" , |
828 | pdata->prv_reg_ctx[i], offset); |
829 | } |
830 | } |
831 | |
832 | /** |
833 | * acpi_lpss_restore_ctx() - Restore the private registers of LPSS device |
834 | * @dev: LPSS device |
835 | * @pdata: pointer to the private data of the LPSS device |
836 | * |
837 | * Restores the registers that were previously stored with acpi_lpss_save_ctx(). |
838 | */ |
839 | static void acpi_lpss_restore_ctx(struct device *dev, |
840 | struct lpss_private_data *pdata) |
841 | { |
842 | unsigned int i; |
843 | |
844 | for (i = 0; i < LPSS_PRV_REG_COUNT; i++) { |
845 | unsigned long offset = i * sizeof(u32); |
846 | |
847 | __lpss_reg_write(val: pdata->prv_reg_ctx[i], pdata, reg: offset); |
848 | dev_dbg(dev, "restoring 0x%08x to LPSS reg at offset 0x%02lx\n" , |
849 | pdata->prv_reg_ctx[i], offset); |
850 | } |
851 | } |
852 | |
853 | static void acpi_lpss_d3_to_d0_delay(struct lpss_private_data *pdata) |
854 | { |
855 | /* |
856 | * The following delay is needed or the subsequent write operations may |
857 | * fail. The LPSS devices are actually PCI devices and the PCI spec |
858 | * expects 10ms delay before the device can be accessed after D3 to D0 |
859 | * transition. However some platforms like BSW does not need this delay. |
860 | */ |
861 | unsigned int delay = 10; /* default 10ms delay */ |
862 | |
863 | if (pdata->dev_desc->flags & LPSS_NO_D3_DELAY) |
864 | delay = 0; |
865 | |
866 | msleep(msecs: delay); |
867 | } |
868 | |
869 | static int acpi_lpss_activate(struct device *dev) |
870 | { |
871 | struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); |
872 | int ret; |
873 | |
874 | ret = acpi_dev_resume(dev); |
875 | if (ret) |
876 | return ret; |
877 | |
878 | acpi_lpss_d3_to_d0_delay(pdata); |
879 | |
880 | /* |
881 | * This is called only on ->probe() stage where a device is either in |
882 | * known state defined by BIOS or most likely powered off. Due to this |
883 | * we have to deassert reset line to be sure that ->probe() will |
884 | * recognize the device. |
885 | */ |
886 | if (pdata->dev_desc->flags & (LPSS_SAVE_CTX | LPSS_SAVE_CTX_ONCE)) |
887 | lpss_deassert_reset(pdata); |
888 | |
889 | #ifdef CONFIG_PM |
890 | if (pdata->dev_desc->flags & LPSS_SAVE_CTX_ONCE) |
891 | acpi_lpss_save_ctx(dev, pdata); |
892 | #endif |
893 | |
894 | return 0; |
895 | } |
896 | |
897 | static void acpi_lpss_dismiss(struct device *dev) |
898 | { |
899 | acpi_dev_suspend(dev, wakeup: false); |
900 | } |
901 | |
902 | /* IOSF SB for LPSS island */ |
903 | #define LPSS_IOSF_UNIT_LPIOEP 0xA0 |
904 | #define LPSS_IOSF_UNIT_LPIO1 0xAB |
905 | #define LPSS_IOSF_UNIT_LPIO2 0xAC |
906 | |
907 | #define LPSS_IOSF_PMCSR 0x84 |
908 | #define LPSS_PMCSR_D0 0 |
909 | #define LPSS_PMCSR_D3hot 3 |
910 | #define LPSS_PMCSR_Dx_MASK GENMASK(1, 0) |
911 | |
912 | #define LPSS_IOSF_GPIODEF0 0x154 |
913 | #define LPSS_GPIODEF0_DMA1_D3 BIT(2) |
914 | #define LPSS_GPIODEF0_DMA2_D3 BIT(3) |
915 | #define LPSS_GPIODEF0_DMA_D3_MASK GENMASK(3, 2) |
916 | #define LPSS_GPIODEF0_DMA_LLP BIT(13) |
917 | |
918 | static DEFINE_MUTEX(lpss_iosf_mutex); |
919 | static bool lpss_iosf_d3_entered = true; |
920 | |
921 | static void lpss_iosf_enter_d3_state(void) |
922 | { |
923 | u32 value1 = 0; |
924 | u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP; |
925 | u32 value2 = LPSS_PMCSR_D3hot; |
926 | u32 mask2 = LPSS_PMCSR_Dx_MASK; |
927 | /* |
928 | * PMC provides an information about actual status of the LPSS devices. |
929 | * Here we read the values related to LPSS power island, i.e. LPSS |
930 | * devices, excluding both LPSS DMA controllers, along with SCC domain. |
931 | */ |
932 | u32 func_dis, d3_sts_0, pmc_status; |
933 | int ret; |
934 | |
935 | ret = pmc_atom_read(PMC_FUNC_DIS, value: &func_dis); |
936 | if (ret) |
937 | return; |
938 | |
939 | mutex_lock(&lpss_iosf_mutex); |
940 | |
941 | ret = pmc_atom_read(PMC_D3_STS_0, value: &d3_sts_0); |
942 | if (ret) |
943 | goto exit; |
944 | |
945 | /* |
946 | * Get the status of entire LPSS power island per device basis. |
947 | * Shutdown both LPSS DMA controllers if and only if all other devices |
948 | * are already in D3hot. |
949 | */ |
950 | pmc_status = (~(d3_sts_0 | func_dis)) & pmc_atom_d3_mask; |
951 | if (pmc_status) |
952 | goto exit; |
953 | |
954 | iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE, |
955 | LPSS_IOSF_PMCSR, mdr: value2, mask: mask2); |
956 | |
957 | iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE, |
958 | LPSS_IOSF_PMCSR, mdr: value2, mask: mask2); |
959 | |
960 | iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE, |
961 | LPSS_IOSF_GPIODEF0, mdr: value1, mask: mask1); |
962 | |
963 | lpss_iosf_d3_entered = true; |
964 | |
965 | exit: |
966 | mutex_unlock(lock: &lpss_iosf_mutex); |
967 | } |
968 | |
969 | static void lpss_iosf_exit_d3_state(void) |
970 | { |
971 | u32 value1 = LPSS_GPIODEF0_DMA1_D3 | LPSS_GPIODEF0_DMA2_D3 | |
972 | LPSS_GPIODEF0_DMA_LLP; |
973 | u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP; |
974 | u32 value2 = LPSS_PMCSR_D0; |
975 | u32 mask2 = LPSS_PMCSR_Dx_MASK; |
976 | |
977 | mutex_lock(&lpss_iosf_mutex); |
978 | |
979 | if (!lpss_iosf_d3_entered) |
980 | goto exit; |
981 | |
982 | lpss_iosf_d3_entered = false; |
983 | |
984 | iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE, |
985 | LPSS_IOSF_GPIODEF0, mdr: value1, mask: mask1); |
986 | |
987 | iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE, |
988 | LPSS_IOSF_PMCSR, mdr: value2, mask: mask2); |
989 | |
990 | iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE, |
991 | LPSS_IOSF_PMCSR, mdr: value2, mask: mask2); |
992 | |
993 | exit: |
994 | mutex_unlock(lock: &lpss_iosf_mutex); |
995 | } |
996 | |
997 | static int acpi_lpss_suspend(struct device *dev, bool wakeup) |
998 | { |
999 | struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); |
1000 | int ret; |
1001 | |
1002 | if (pdata->dev_desc->flags & LPSS_SAVE_CTX) |
1003 | acpi_lpss_save_ctx(dev, pdata); |
1004 | |
1005 | ret = acpi_dev_suspend(dev, wakeup); |
1006 | |
1007 | /* |
1008 | * This call must be last in the sequence, otherwise PMC will return |
1009 | * wrong status for devices being about to be powered off. See |
1010 | * lpss_iosf_enter_d3_state() for further information. |
1011 | */ |
1012 | if (acpi_target_system_state() == ACPI_STATE_S0 && |
1013 | lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) |
1014 | lpss_iosf_enter_d3_state(); |
1015 | |
1016 | return ret; |
1017 | } |
1018 | |
1019 | static int acpi_lpss_resume(struct device *dev) |
1020 | { |
1021 | struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); |
1022 | int ret; |
1023 | |
1024 | /* |
1025 | * This call is kept first to be in symmetry with |
1026 | * acpi_lpss_runtime_suspend() one. |
1027 | */ |
1028 | if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) |
1029 | lpss_iosf_exit_d3_state(); |
1030 | |
1031 | ret = acpi_dev_resume(dev); |
1032 | if (ret) |
1033 | return ret; |
1034 | |
1035 | acpi_lpss_d3_to_d0_delay(pdata); |
1036 | |
1037 | if (pdata->dev_desc->flags & (LPSS_SAVE_CTX | LPSS_SAVE_CTX_ONCE)) |
1038 | acpi_lpss_restore_ctx(dev, pdata); |
1039 | |
1040 | return 0; |
1041 | } |
1042 | |
1043 | #ifdef CONFIG_PM_SLEEP |
1044 | static int acpi_lpss_do_suspend_late(struct device *dev) |
1045 | { |
1046 | int ret; |
1047 | |
1048 | if (dev_pm_skip_suspend(dev)) |
1049 | return 0; |
1050 | |
1051 | ret = pm_generic_suspend_late(dev); |
1052 | return ret ? ret : acpi_lpss_suspend(dev, wakeup: device_may_wakeup(dev)); |
1053 | } |
1054 | |
1055 | static int acpi_lpss_suspend_late(struct device *dev) |
1056 | { |
1057 | struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); |
1058 | |
1059 | if (pdata->dev_desc->resume_from_noirq) |
1060 | return 0; |
1061 | |
1062 | return acpi_lpss_do_suspend_late(dev); |
1063 | } |
1064 | |
1065 | static int acpi_lpss_suspend_noirq(struct device *dev) |
1066 | { |
1067 | struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); |
1068 | int ret; |
1069 | |
1070 | if (pdata->dev_desc->resume_from_noirq) { |
1071 | /* |
1072 | * The driver's ->suspend_late callback will be invoked by |
1073 | * acpi_lpss_do_suspend_late(), with the assumption that the |
1074 | * driver really wanted to run that code in ->suspend_noirq, but |
1075 | * it could not run after acpi_dev_suspend() and the driver |
1076 | * expected the latter to be called in the "late" phase. |
1077 | */ |
1078 | ret = acpi_lpss_do_suspend_late(dev); |
1079 | if (ret) |
1080 | return ret; |
1081 | } |
1082 | |
1083 | return acpi_subsys_suspend_noirq(dev); |
1084 | } |
1085 | |
1086 | static int acpi_lpss_do_resume_early(struct device *dev) |
1087 | { |
1088 | int ret = acpi_lpss_resume(dev); |
1089 | |
1090 | return ret ? ret : pm_generic_resume_early(dev); |
1091 | } |
1092 | |
1093 | static int acpi_lpss_resume_early(struct device *dev) |
1094 | { |
1095 | struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); |
1096 | |
1097 | if (pdata->dev_desc->resume_from_noirq) |
1098 | return 0; |
1099 | |
1100 | if (dev_pm_skip_resume(dev)) |
1101 | return 0; |
1102 | |
1103 | return acpi_lpss_do_resume_early(dev); |
1104 | } |
1105 | |
1106 | static int acpi_lpss_resume_noirq(struct device *dev) |
1107 | { |
1108 | struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); |
1109 | int ret; |
1110 | |
1111 | /* Follow acpi_subsys_resume_noirq(). */ |
1112 | if (dev_pm_skip_resume(dev)) |
1113 | return 0; |
1114 | |
1115 | ret = pm_generic_resume_noirq(dev); |
1116 | if (ret) |
1117 | return ret; |
1118 | |
1119 | if (!pdata->dev_desc->resume_from_noirq) |
1120 | return 0; |
1121 | |
1122 | /* |
1123 | * The driver's ->resume_early callback will be invoked by |
1124 | * acpi_lpss_do_resume_early(), with the assumption that the driver |
1125 | * really wanted to run that code in ->resume_noirq, but it could not |
1126 | * run before acpi_dev_resume() and the driver expected the latter to be |
1127 | * called in the "early" phase. |
1128 | */ |
1129 | return acpi_lpss_do_resume_early(dev); |
1130 | } |
1131 | |
1132 | static int acpi_lpss_do_restore_early(struct device *dev) |
1133 | { |
1134 | int ret = acpi_lpss_resume(dev); |
1135 | |
1136 | return ret ? ret : pm_generic_restore_early(dev); |
1137 | } |
1138 | |
1139 | static int acpi_lpss_restore_early(struct device *dev) |
1140 | { |
1141 | struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); |
1142 | |
1143 | if (pdata->dev_desc->resume_from_noirq) |
1144 | return 0; |
1145 | |
1146 | return acpi_lpss_do_restore_early(dev); |
1147 | } |
1148 | |
1149 | static int acpi_lpss_restore_noirq(struct device *dev) |
1150 | { |
1151 | struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); |
1152 | int ret; |
1153 | |
1154 | ret = pm_generic_restore_noirq(dev); |
1155 | if (ret) |
1156 | return ret; |
1157 | |
1158 | if (!pdata->dev_desc->resume_from_noirq) |
1159 | return 0; |
1160 | |
1161 | /* This is analogous to what happens in acpi_lpss_resume_noirq(). */ |
1162 | return acpi_lpss_do_restore_early(dev); |
1163 | } |
1164 | |
1165 | static int acpi_lpss_do_poweroff_late(struct device *dev) |
1166 | { |
1167 | int ret = pm_generic_poweroff_late(dev); |
1168 | |
1169 | return ret ? ret : acpi_lpss_suspend(dev, wakeup: device_may_wakeup(dev)); |
1170 | } |
1171 | |
1172 | static int acpi_lpss_poweroff_late(struct device *dev) |
1173 | { |
1174 | struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); |
1175 | |
1176 | if (dev_pm_skip_suspend(dev)) |
1177 | return 0; |
1178 | |
1179 | if (pdata->dev_desc->resume_from_noirq) |
1180 | return 0; |
1181 | |
1182 | return acpi_lpss_do_poweroff_late(dev); |
1183 | } |
1184 | |
1185 | static int acpi_lpss_poweroff_noirq(struct device *dev) |
1186 | { |
1187 | struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); |
1188 | |
1189 | if (dev_pm_skip_suspend(dev)) |
1190 | return 0; |
1191 | |
1192 | if (pdata->dev_desc->resume_from_noirq) { |
1193 | /* This is analogous to the acpi_lpss_suspend_noirq() case. */ |
1194 | int ret = acpi_lpss_do_poweroff_late(dev); |
1195 | |
1196 | if (ret) |
1197 | return ret; |
1198 | } |
1199 | |
1200 | return pm_generic_poweroff_noirq(dev); |
1201 | } |
1202 | #endif /* CONFIG_PM_SLEEP */ |
1203 | |
1204 | static int acpi_lpss_runtime_suspend(struct device *dev) |
1205 | { |
1206 | int ret = pm_generic_runtime_suspend(dev); |
1207 | |
1208 | return ret ? ret : acpi_lpss_suspend(dev, wakeup: true); |
1209 | } |
1210 | |
1211 | static int acpi_lpss_runtime_resume(struct device *dev) |
1212 | { |
1213 | int ret = acpi_lpss_resume(dev); |
1214 | |
1215 | return ret ? ret : pm_generic_runtime_resume(dev); |
1216 | } |
1217 | #endif /* CONFIG_PM */ |
1218 | |
1219 | static struct dev_pm_domain acpi_lpss_pm_domain = { |
1220 | #ifdef CONFIG_PM |
1221 | .activate = acpi_lpss_activate, |
1222 | .dismiss = acpi_lpss_dismiss, |
1223 | #endif |
1224 | .ops = { |
1225 | #ifdef CONFIG_PM |
1226 | #ifdef CONFIG_PM_SLEEP |
1227 | .prepare = acpi_subsys_prepare, |
1228 | .complete = acpi_subsys_complete, |
1229 | .suspend = acpi_subsys_suspend, |
1230 | .suspend_late = acpi_lpss_suspend_late, |
1231 | .suspend_noirq = acpi_lpss_suspend_noirq, |
1232 | .resume_noirq = acpi_lpss_resume_noirq, |
1233 | .resume_early = acpi_lpss_resume_early, |
1234 | .freeze = acpi_subsys_freeze, |
1235 | .poweroff = acpi_subsys_poweroff, |
1236 | .poweroff_late = acpi_lpss_poweroff_late, |
1237 | .poweroff_noirq = acpi_lpss_poweroff_noirq, |
1238 | .restore_noirq = acpi_lpss_restore_noirq, |
1239 | .restore_early = acpi_lpss_restore_early, |
1240 | #endif |
1241 | .runtime_suspend = acpi_lpss_runtime_suspend, |
1242 | .runtime_resume = acpi_lpss_runtime_resume, |
1243 | #endif |
1244 | }, |
1245 | }; |
1246 | |
1247 | static int acpi_lpss_platform_notify(struct notifier_block *nb, |
1248 | unsigned long action, void *data) |
1249 | { |
1250 | struct platform_device *pdev = to_platform_device(data); |
1251 | struct lpss_private_data *pdata; |
1252 | struct acpi_device *adev; |
1253 | const struct acpi_device_id *id; |
1254 | |
1255 | id = acpi_match_device(ids: acpi_lpss_device_ids, dev: &pdev->dev); |
1256 | if (!id || !id->driver_data) |
1257 | return 0; |
1258 | |
1259 | adev = ACPI_COMPANION(&pdev->dev); |
1260 | if (!adev) |
1261 | return 0; |
1262 | |
1263 | pdata = acpi_driver_data(d: adev); |
1264 | if (!pdata) |
1265 | return 0; |
1266 | |
1267 | if (pdata->mmio_base && |
1268 | pdata->mmio_size < pdata->dev_desc->prv_offset + LPSS_LTR_SIZE) { |
1269 | dev_err(&pdev->dev, "MMIO size insufficient to access LTR\n" ); |
1270 | return 0; |
1271 | } |
1272 | |
1273 | switch (action) { |
1274 | case BUS_NOTIFY_BIND_DRIVER: |
1275 | dev_pm_domain_set(dev: &pdev->dev, pd: &acpi_lpss_pm_domain); |
1276 | break; |
1277 | case BUS_NOTIFY_DRIVER_NOT_BOUND: |
1278 | case BUS_NOTIFY_UNBOUND_DRIVER: |
1279 | dev_pm_domain_set(dev: &pdev->dev, NULL); |
1280 | break; |
1281 | case BUS_NOTIFY_ADD_DEVICE: |
1282 | dev_pm_domain_set(dev: &pdev->dev, pd: &acpi_lpss_pm_domain); |
1283 | if (pdata->dev_desc->flags & LPSS_LTR) |
1284 | return sysfs_create_group(kobj: &pdev->dev.kobj, |
1285 | grp: &lpss_attr_group); |
1286 | break; |
1287 | case BUS_NOTIFY_DEL_DEVICE: |
1288 | if (pdata->dev_desc->flags & LPSS_LTR) |
1289 | sysfs_remove_group(kobj: &pdev->dev.kobj, grp: &lpss_attr_group); |
1290 | dev_pm_domain_set(dev: &pdev->dev, NULL); |
1291 | break; |
1292 | default: |
1293 | break; |
1294 | } |
1295 | |
1296 | return 0; |
1297 | } |
1298 | |
1299 | static struct notifier_block acpi_lpss_nb = { |
1300 | .notifier_call = acpi_lpss_platform_notify, |
1301 | }; |
1302 | |
1303 | static void acpi_lpss_bind(struct device *dev) |
1304 | { |
1305 | struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); |
1306 | |
1307 | if (!pdata || !pdata->mmio_base || !(pdata->dev_desc->flags & LPSS_LTR)) |
1308 | return; |
1309 | |
1310 | if (pdata->mmio_size >= pdata->dev_desc->prv_offset + LPSS_LTR_SIZE) |
1311 | dev->power.set_latency_tolerance = acpi_lpss_set_ltr; |
1312 | else |
1313 | dev_err(dev, "MMIO size insufficient to access LTR\n" ); |
1314 | } |
1315 | |
1316 | static void acpi_lpss_unbind(struct device *dev) |
1317 | { |
1318 | dev->power.set_latency_tolerance = NULL; |
1319 | } |
1320 | |
1321 | static struct acpi_scan_handler lpss_handler = { |
1322 | .ids = acpi_lpss_device_ids, |
1323 | .attach = acpi_lpss_create_device, |
1324 | .bind = acpi_lpss_bind, |
1325 | .unbind = acpi_lpss_unbind, |
1326 | }; |
1327 | |
1328 | void __init acpi_lpss_init(void) |
1329 | { |
1330 | const struct x86_cpu_id *id; |
1331 | int ret; |
1332 | |
1333 | ret = lpss_atom_clk_init(); |
1334 | if (ret) |
1335 | return; |
1336 | |
1337 | id = x86_match_cpu(match: lpss_cpu_ids); |
1338 | if (id) |
1339 | lpss_quirks |= LPSS_QUIRK_ALWAYS_POWER_ON; |
1340 | |
1341 | bus_register_notifier(bus: &platform_bus_type, nb: &acpi_lpss_nb); |
1342 | acpi_scan_add_handler(handler: &lpss_handler); |
1343 | } |
1344 | |
1345 | #else |
1346 | |
1347 | static struct acpi_scan_handler lpss_handler = { |
1348 | .ids = acpi_lpss_device_ids, |
1349 | }; |
1350 | |
1351 | void __init acpi_lpss_init(void) |
1352 | { |
1353 | acpi_scan_add_handler(&lpss_handler); |
1354 | } |
1355 | |
1356 | #endif /* CONFIG_X86_INTEL_LPSS */ |
1357 | |