1 | // SPDX-License-Identifier: GPL-2.0-or-later |
---|---|
2 | /* |
3 | * ec.c - ACPI Embedded Controller Driver (v3) |
4 | * |
5 | * Copyright (C) 2001-2015 Intel Corporation |
6 | * Author: 2014, 2015 Lv Zheng <lv.zheng@intel.com> |
7 | * 2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com> |
8 | * 2006 Denis Sadykov <denis.m.sadykov@intel.com> |
9 | * 2004 Luming Yu <luming.yu@intel.com> |
10 | * 2001, 2002 Andy Grover <andrew.grover@intel.com> |
11 | * 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> |
12 | * Copyright (C) 2008 Alexey Starikovskiy <astarikovskiy@suse.de> |
13 | */ |
14 | |
15 | /* Uncomment next line to get verbose printout */ |
16 | /* #define DEBUG */ |
17 | #define pr_fmt(fmt) "ACPI: EC: " fmt |
18 | |
19 | #include <linux/kernel.h> |
20 | #include <linux/module.h> |
21 | #include <linux/init.h> |
22 | #include <linux/types.h> |
23 | #include <linux/delay.h> |
24 | #include <linux/interrupt.h> |
25 | #include <linux/list.h> |
26 | #include <linux/spinlock.h> |
27 | #include <linux/slab.h> |
28 | #include <linux/suspend.h> |
29 | #include <linux/acpi.h> |
30 | #include <linux/dmi.h> |
31 | #include <asm/io.h> |
32 | |
33 | #include "internal.h" |
34 | |
35 | #define ACPI_EC_CLASS "embedded_controller" |
36 | #define ACPI_EC_DEVICE_NAME "Embedded Controller" |
37 | |
38 | /* EC status register */ |
39 | #define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */ |
40 | #define ACPI_EC_FLAG_IBF 0x02 /* Input buffer full */ |
41 | #define ACPI_EC_FLAG_CMD 0x08 /* Input buffer contains a command */ |
42 | #define ACPI_EC_FLAG_BURST 0x10 /* burst mode */ |
43 | #define ACPI_EC_FLAG_SCI 0x20 /* EC-SCI occurred */ |
44 | |
45 | /* |
46 | * The SCI_EVT clearing timing is not defined by the ACPI specification. |
47 | * This leads to lots of practical timing issues for the host EC driver. |
48 | * The following variations are defined (from the target EC firmware's |
49 | * perspective): |
50 | * STATUS: After indicating SCI_EVT edge triggered IRQ to the host, the |
51 | * target can clear SCI_EVT at any time so long as the host can see |
52 | * the indication by reading the status register (EC_SC). So the |
53 | * host should re-check SCI_EVT after the first time the SCI_EVT |
54 | * indication is seen, which is the same time the query request |
55 | * (QR_EC) is written to the command register (EC_CMD). SCI_EVT set |
56 | * at any later time could indicate another event. Normally such |
57 | * kind of EC firmware has implemented an event queue and will |
58 | * return 0x00 to indicate "no outstanding event". |
59 | * QUERY: After seeing the query request (QR_EC) written to the command |
60 | * register (EC_CMD) by the host and having prepared the responding |
61 | * event value in the data register (EC_DATA), the target can safely |
62 | * clear SCI_EVT because the target can confirm that the current |
63 | * event is being handled by the host. The host then should check |
64 | * SCI_EVT right after reading the event response from the data |
65 | * register (EC_DATA). |
66 | * EVENT: After seeing the event response read from the data register |
67 | * (EC_DATA) by the host, the target can clear SCI_EVT. As the |
68 | * target requires time to notice the change in the data register |
69 | * (EC_DATA), the host may be required to wait additional guarding |
70 | * time before checking the SCI_EVT again. Such guarding may not be |
71 | * necessary if the host is notified via another IRQ. |
72 | */ |
73 | #define ACPI_EC_EVT_TIMING_STATUS 0x00 |
74 | #define ACPI_EC_EVT_TIMING_QUERY 0x01 |
75 | #define ACPI_EC_EVT_TIMING_EVENT 0x02 |
76 | |
77 | /* EC commands */ |
78 | enum ec_command { |
79 | ACPI_EC_COMMAND_READ = 0x80, |
80 | ACPI_EC_COMMAND_WRITE = 0x81, |
81 | ACPI_EC_BURST_ENABLE = 0x82, |
82 | ACPI_EC_BURST_DISABLE = 0x83, |
83 | ACPI_EC_COMMAND_QUERY = 0x84, |
84 | }; |
85 | |
86 | #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */ |
87 | #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ |
88 | #define ACPI_EC_UDELAY_POLL 550 /* Wait 1ms for EC transaction polling */ |
89 | #define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query |
90 | * when trying to clear the EC */ |
91 | #define ACPI_EC_MAX_QUERIES 16 /* Maximum number of parallel queries */ |
92 | |
93 | enum { |
94 | EC_FLAGS_QUERY_ENABLED, /* Query is enabled */ |
95 | EC_FLAGS_EVENT_HANDLER_INSTALLED, /* Event handler installed */ |
96 | EC_FLAGS_EC_HANDLER_INSTALLED, /* OpReg handler installed */ |
97 | EC_FLAGS_EC_REG_CALLED, /* OpReg ACPI _REG method called */ |
98 | EC_FLAGS_QUERY_METHODS_INSTALLED, /* _Qxx handlers installed */ |
99 | EC_FLAGS_STARTED, /* Driver is started */ |
100 | EC_FLAGS_STOPPED, /* Driver is stopped */ |
101 | EC_FLAGS_EVENTS_MASKED, /* Events masked */ |
102 | }; |
103 | |
104 | #define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */ |
105 | #define ACPI_EC_COMMAND_COMPLETE 0x02 /* Completed last byte */ |
106 | |
107 | /* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */ |
108 | static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY; |
109 | module_param(ec_delay, uint, 0644); |
110 | MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes"); |
111 | |
112 | static unsigned int ec_max_queries __read_mostly = ACPI_EC_MAX_QUERIES; |
113 | module_param(ec_max_queries, uint, 0644); |
114 | MODULE_PARM_DESC(ec_max_queries, "Maximum parallel _Qxx evaluations"); |
115 | |
116 | static bool ec_busy_polling __read_mostly; |
117 | module_param(ec_busy_polling, bool, 0644); |
118 | MODULE_PARM_DESC(ec_busy_polling, "Use busy polling to advance EC transaction"); |
119 | |
120 | static unsigned int ec_polling_guard __read_mostly = ACPI_EC_UDELAY_POLL; |
121 | module_param(ec_polling_guard, uint, 0644); |
122 | MODULE_PARM_DESC(ec_polling_guard, "Guard time(us) between EC accesses in polling modes"); |
123 | |
124 | static unsigned int ec_event_clearing __read_mostly = ACPI_EC_EVT_TIMING_QUERY; |
125 | |
126 | /* |
127 | * If the number of false interrupts per one transaction exceeds |
128 | * this threshold, will think there is a GPE storm happened and |
129 | * will disable the GPE for normal transaction. |
130 | */ |
131 | static unsigned int ec_storm_threshold __read_mostly = 8; |
132 | module_param(ec_storm_threshold, uint, 0644); |
133 | MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm"); |
134 | |
135 | static bool ec_freeze_events __read_mostly; |
136 | module_param(ec_freeze_events, bool, 0644); |
137 | MODULE_PARM_DESC(ec_freeze_events, "Disabling event handling during suspend/resume"); |
138 | |
139 | static bool ec_no_wakeup __read_mostly; |
140 | module_param(ec_no_wakeup, bool, 0644); |
141 | MODULE_PARM_DESC(ec_no_wakeup, "Do not wake up from suspend-to-idle"); |
142 | |
143 | struct acpi_ec_query_handler { |
144 | struct list_head node; |
145 | acpi_ec_query_func func; |
146 | acpi_handle handle; |
147 | void *data; |
148 | u8 query_bit; |
149 | struct kref kref; |
150 | }; |
151 | |
152 | struct transaction { |
153 | const u8 *wdata; |
154 | u8 *rdata; |
155 | unsigned short irq_count; |
156 | u8 command; |
157 | u8 wi; |
158 | u8 ri; |
159 | u8 wlen; |
160 | u8 rlen; |
161 | u8 flags; |
162 | }; |
163 | |
164 | struct acpi_ec_query { |
165 | struct transaction transaction; |
166 | struct work_struct work; |
167 | struct acpi_ec_query_handler *handler; |
168 | struct acpi_ec *ec; |
169 | }; |
170 | |
171 | static int acpi_ec_submit_query(struct acpi_ec *ec); |
172 | static void advance_transaction(struct acpi_ec *ec, bool interrupt); |
173 | static void acpi_ec_event_handler(struct work_struct *work); |
174 | |
175 | struct acpi_ec *first_ec; |
176 | EXPORT_SYMBOL(first_ec); |
177 | |
178 | static struct acpi_ec *boot_ec; |
179 | static bool boot_ec_is_ecdt; |
180 | static struct workqueue_struct *ec_wq; |
181 | static struct workqueue_struct *ec_query_wq; |
182 | |
183 | static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */ |
184 | static int EC_FLAGS_TRUST_DSDT_GPE; /* Needs DSDT GPE as correction setting */ |
185 | static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */ |
186 | |
187 | /* -------------------------------------------------------------------------- |
188 | * Logging/Debugging |
189 | * -------------------------------------------------------------------------- */ |
190 | |
191 | /* |
192 | * Splitters used by the developers to track the boundary of the EC |
193 | * handling processes. |
194 | */ |
195 | #ifdef DEBUG |
196 | #define EC_DBG_SEP " " |
197 | #define EC_DBG_DRV "+++++" |
198 | #define EC_DBG_STM "=====" |
199 | #define EC_DBG_REQ "*****" |
200 | #define EC_DBG_EVT "#####" |
201 | #else |
202 | #define EC_DBG_SEP "" |
203 | #define EC_DBG_DRV |
204 | #define EC_DBG_STM |
205 | #define EC_DBG_REQ |
206 | #define EC_DBG_EVT |
207 | #endif |
208 | |
209 | #define ec_log_raw(fmt, ...) \ |
210 | pr_info(fmt "\n", ##__VA_ARGS__) |
211 | #define ec_dbg_raw(fmt, ...) \ |
212 | pr_debug(fmt "\n", ##__VA_ARGS__) |
213 | #define ec_log(filter, fmt, ...) \ |
214 | ec_log_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__) |
215 | #define ec_dbg(filter, fmt, ...) \ |
216 | ec_dbg_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__) |
217 | |
218 | #define ec_log_drv(fmt, ...) \ |
219 | ec_log(EC_DBG_DRV, fmt, ##__VA_ARGS__) |
220 | #define ec_dbg_drv(fmt, ...) \ |
221 | ec_dbg(EC_DBG_DRV, fmt, ##__VA_ARGS__) |
222 | #define ec_dbg_stm(fmt, ...) \ |
223 | ec_dbg(EC_DBG_STM, fmt, ##__VA_ARGS__) |
224 | #define ec_dbg_req(fmt, ...) \ |
225 | ec_dbg(EC_DBG_REQ, fmt, ##__VA_ARGS__) |
226 | #define ec_dbg_evt(fmt, ...) \ |
227 | ec_dbg(EC_DBG_EVT, fmt, ##__VA_ARGS__) |
228 | #define ec_dbg_ref(ec, fmt, ...) \ |
229 | ec_dbg_raw("%lu: " fmt, ec->reference_count, ## __VA_ARGS__) |
230 | |
231 | /* -------------------------------------------------------------------------- |
232 | * Device Flags |
233 | * -------------------------------------------------------------------------- */ |
234 | |
235 | static bool acpi_ec_started(struct acpi_ec *ec) |
236 | { |
237 | return test_bit(EC_FLAGS_STARTED, &ec->flags) && |
238 | !test_bit(EC_FLAGS_STOPPED, &ec->flags); |
239 | } |
240 | |
241 | static bool acpi_ec_event_enabled(struct acpi_ec *ec) |
242 | { |
243 | /* |
244 | * There is an OSPM early stage logic. During the early stages |
245 | * (boot/resume), OSPMs shouldn't enable the event handling, only |
246 | * the EC transactions are allowed to be performed. |
247 | */ |
248 | if (!test_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags)) |
249 | return false; |
250 | /* |
251 | * However, disabling the event handling is experimental for late |
252 | * stage (suspend), and is controlled by the boot parameter of |
253 | * "ec_freeze_events": |
254 | * 1. true: The EC event handling is disabled before entering |
255 | * the noirq stage. |
256 | * 2. false: The EC event handling is automatically disabled as |
257 | * soon as the EC driver is stopped. |
258 | */ |
259 | if (ec_freeze_events) |
260 | return acpi_ec_started(ec); |
261 | else |
262 | return test_bit(EC_FLAGS_STARTED, &ec->flags); |
263 | } |
264 | |
265 | static bool acpi_ec_flushed(struct acpi_ec *ec) |
266 | { |
267 | return ec->reference_count == 1; |
268 | } |
269 | |
270 | /* -------------------------------------------------------------------------- |
271 | * EC Registers |
272 | * -------------------------------------------------------------------------- */ |
273 | |
274 | static inline u8 acpi_ec_read_status(struct acpi_ec *ec) |
275 | { |
276 | u8 x = inb(port: ec->command_addr); |
277 | |
278 | ec_dbg_raw("EC_SC(R) = 0x%2.2x " |
279 | "SCI_EVT=%d BURST=%d CMD=%d IBF=%d OBF=%d", |
280 | x, |
281 | !!(x & ACPI_EC_FLAG_SCI), |
282 | !!(x & ACPI_EC_FLAG_BURST), |
283 | !!(x & ACPI_EC_FLAG_CMD), |
284 | !!(x & ACPI_EC_FLAG_IBF), |
285 | !!(x & ACPI_EC_FLAG_OBF)); |
286 | return x; |
287 | } |
288 | |
289 | static inline u8 acpi_ec_read_data(struct acpi_ec *ec) |
290 | { |
291 | u8 x = inb(port: ec->data_addr); |
292 | |
293 | ec->timestamp = jiffies; |
294 | ec_dbg_raw("EC_DATA(R) = 0x%2.2x", x); |
295 | return x; |
296 | } |
297 | |
298 | static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command) |
299 | { |
300 | ec_dbg_raw("EC_SC(W) = 0x%2.2x", command); |
301 | outb(value: command, port: ec->command_addr); |
302 | ec->timestamp = jiffies; |
303 | } |
304 | |
305 | static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data) |
306 | { |
307 | ec_dbg_raw("EC_DATA(W) = 0x%2.2x", data); |
308 | outb(value: data, port: ec->data_addr); |
309 | ec->timestamp = jiffies; |
310 | } |
311 | |
312 | #if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) |
313 | static const char *acpi_ec_cmd_string(u8 cmd) |
314 | { |
315 | switch (cmd) { |
316 | case 0x80: |
317 | return "RD_EC"; |
318 | case 0x81: |
319 | return "WR_EC"; |
320 | case 0x82: |
321 | return "BE_EC"; |
322 | case 0x83: |
323 | return "BD_EC"; |
324 | case 0x84: |
325 | return "QR_EC"; |
326 | } |
327 | return "UNKNOWN"; |
328 | } |
329 | #else |
330 | #define acpi_ec_cmd_string(cmd) "UNDEF" |
331 | #endif |
332 | |
333 | /* -------------------------------------------------------------------------- |
334 | * GPE Registers |
335 | * -------------------------------------------------------------------------- */ |
336 | |
337 | static inline bool acpi_ec_gpe_status_set(struct acpi_ec *ec) |
338 | { |
339 | acpi_event_status gpe_status = 0; |
340 | |
341 | (void)acpi_get_gpe_status(NULL, gpe_number: ec->gpe, event_status: &gpe_status); |
342 | return !!(gpe_status & ACPI_EVENT_FLAG_STATUS_SET); |
343 | } |
344 | |
345 | static inline void acpi_ec_enable_gpe(struct acpi_ec *ec, bool open) |
346 | { |
347 | if (open) |
348 | acpi_enable_gpe(NULL, gpe_number: ec->gpe); |
349 | else { |
350 | BUG_ON(ec->reference_count < 1); |
351 | acpi_set_gpe(NULL, gpe_number: ec->gpe, ACPI_GPE_ENABLE); |
352 | } |
353 | if (acpi_ec_gpe_status_set(ec)) { |
354 | /* |
355 | * On some platforms, EN=1 writes cannot trigger GPE. So |
356 | * software need to manually trigger a pseudo GPE event on |
357 | * EN=1 writes. |
358 | */ |
359 | ec_dbg_raw("Polling quirk"); |
360 | advance_transaction(ec, interrupt: false); |
361 | } |
362 | } |
363 | |
364 | static inline void acpi_ec_disable_gpe(struct acpi_ec *ec, bool close) |
365 | { |
366 | if (close) |
367 | acpi_disable_gpe(NULL, gpe_number: ec->gpe); |
368 | else { |
369 | BUG_ON(ec->reference_count < 1); |
370 | acpi_set_gpe(NULL, gpe_number: ec->gpe, ACPI_GPE_DISABLE); |
371 | } |
372 | } |
373 | |
374 | /* -------------------------------------------------------------------------- |
375 | * Transaction Management |
376 | * -------------------------------------------------------------------------- */ |
377 | |
378 | static void acpi_ec_submit_request(struct acpi_ec *ec) |
379 | { |
380 | ec->reference_count++; |
381 | if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags) && |
382 | ec->gpe >= 0 && ec->reference_count == 1) |
383 | acpi_ec_enable_gpe(ec, open: true); |
384 | } |
385 | |
386 | static void acpi_ec_complete_request(struct acpi_ec *ec) |
387 | { |
388 | bool flushed = false; |
389 | |
390 | ec->reference_count--; |
391 | if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags) && |
392 | ec->gpe >= 0 && ec->reference_count == 0) |
393 | acpi_ec_disable_gpe(ec, close: true); |
394 | flushed = acpi_ec_flushed(ec); |
395 | if (flushed) |
396 | wake_up(&ec->wait); |
397 | } |
398 | |
399 | static void acpi_ec_mask_events(struct acpi_ec *ec) |
400 | { |
401 | if (!test_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags)) { |
402 | if (ec->gpe >= 0) |
403 | acpi_ec_disable_gpe(ec, close: false); |
404 | else |
405 | disable_irq_nosync(irq: ec->irq); |
406 | |
407 | ec_dbg_drv("Polling enabled"); |
408 | set_bit(nr: EC_FLAGS_EVENTS_MASKED, addr: &ec->flags); |
409 | } |
410 | } |
411 | |
412 | static void acpi_ec_unmask_events(struct acpi_ec *ec) |
413 | { |
414 | if (test_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags)) { |
415 | clear_bit(nr: EC_FLAGS_EVENTS_MASKED, addr: &ec->flags); |
416 | if (ec->gpe >= 0) |
417 | acpi_ec_enable_gpe(ec, open: false); |
418 | else |
419 | enable_irq(irq: ec->irq); |
420 | |
421 | ec_dbg_drv("Polling disabled"); |
422 | } |
423 | } |
424 | |
425 | /* |
426 | * acpi_ec_submit_flushable_request() - Increase the reference count unless |
427 | * the flush operation is not in |
428 | * progress |
429 | * @ec: the EC device |
430 | * |
431 | * This function must be used before taking a new action that should hold |
432 | * the reference count. If this function returns false, then the action |
433 | * must be discarded or it will prevent the flush operation from being |
434 | * completed. |
435 | */ |
436 | static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec) |
437 | { |
438 | if (!acpi_ec_started(ec)) |
439 | return false; |
440 | acpi_ec_submit_request(ec); |
441 | return true; |
442 | } |
443 | |
444 | static void acpi_ec_submit_event(struct acpi_ec *ec) |
445 | { |
446 | /* |
447 | * It is safe to mask the events here, because acpi_ec_close_event() |
448 | * will run at least once after this. |
449 | */ |
450 | acpi_ec_mask_events(ec); |
451 | if (!acpi_ec_event_enabled(ec)) |
452 | return; |
453 | |
454 | if (ec->event_state != EC_EVENT_READY) |
455 | return; |
456 | |
457 | ec_dbg_evt("Command(%s) submitted/blocked", |
458 | acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY)); |
459 | |
460 | ec->event_state = EC_EVENT_IN_PROGRESS; |
461 | /* |
462 | * If events_to_process is greater than 0 at this point, the while () |
463 | * loop in acpi_ec_event_handler() is still running and incrementing |
464 | * events_to_process will cause it to invoke acpi_ec_submit_query() once |
465 | * more, so it is not necessary to queue up the event work to start the |
466 | * same loop again. |
467 | */ |
468 | if (ec->events_to_process++ > 0) |
469 | return; |
470 | |
471 | ec->events_in_progress++; |
472 | queue_work(wq: ec_wq, work: &ec->work); |
473 | } |
474 | |
475 | static void acpi_ec_complete_event(struct acpi_ec *ec) |
476 | { |
477 | if (ec->event_state == EC_EVENT_IN_PROGRESS) |
478 | ec->event_state = EC_EVENT_COMPLETE; |
479 | } |
480 | |
481 | static void acpi_ec_close_event(struct acpi_ec *ec) |
482 | { |
483 | if (ec->event_state != EC_EVENT_READY) |
484 | ec_dbg_evt("Command(%s) unblocked", |
485 | acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY)); |
486 | |
487 | ec->event_state = EC_EVENT_READY; |
488 | acpi_ec_unmask_events(ec); |
489 | } |
490 | |
491 | static inline void __acpi_ec_enable_event(struct acpi_ec *ec) |
492 | { |
493 | if (!test_and_set_bit(nr: EC_FLAGS_QUERY_ENABLED, addr: &ec->flags)) |
494 | ec_log_drv("event unblocked"); |
495 | /* |
496 | * Unconditionally invoke this once after enabling the event |
497 | * handling mechanism to detect the pending events. |
498 | */ |
499 | advance_transaction(ec, interrupt: false); |
500 | } |
501 | |
502 | static inline void __acpi_ec_disable_event(struct acpi_ec *ec) |
503 | { |
504 | if (test_and_clear_bit(nr: EC_FLAGS_QUERY_ENABLED, addr: &ec->flags)) |
505 | ec_log_drv("event blocked"); |
506 | } |
507 | |
508 | /* |
509 | * Process _Q events that might have accumulated in the EC. |
510 | * Run with locked ec mutex. |
511 | */ |
512 | static void acpi_ec_clear(struct acpi_ec *ec) |
513 | { |
514 | int i; |
515 | |
516 | for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) { |
517 | if (acpi_ec_submit_query(ec)) |
518 | break; |
519 | } |
520 | if (unlikely(i == ACPI_EC_CLEAR_MAX)) |
521 | pr_warn("Warning: Maximum of %d stale EC events cleared\n", i); |
522 | else |
523 | pr_info("%d stale EC events cleared\n", i); |
524 | } |
525 | |
526 | static void acpi_ec_enable_event(struct acpi_ec *ec) |
527 | { |
528 | unsigned long flags; |
529 | |
530 | spin_lock_irqsave(&ec->lock, flags); |
531 | if (acpi_ec_started(ec)) |
532 | __acpi_ec_enable_event(ec); |
533 | spin_unlock_irqrestore(lock: &ec->lock, flags); |
534 | |
535 | /* Drain additional events if hardware requires that */ |
536 | if (EC_FLAGS_CLEAR_ON_RESUME) |
537 | acpi_ec_clear(ec); |
538 | } |
539 | |
540 | #ifdef CONFIG_PM_SLEEP |
541 | static void __acpi_ec_flush_work(void) |
542 | { |
543 | flush_workqueue(ec_wq); /* flush ec->work */ |
544 | flush_workqueue(ec_query_wq); /* flush queries */ |
545 | } |
546 | |
547 | static void acpi_ec_disable_event(struct acpi_ec *ec) |
548 | { |
549 | unsigned long flags; |
550 | |
551 | spin_lock_irqsave(&ec->lock, flags); |
552 | __acpi_ec_disable_event(ec); |
553 | spin_unlock_irqrestore(lock: &ec->lock, flags); |
554 | |
555 | /* |
556 | * When ec_freeze_events is true, we need to flush events in |
557 | * the proper position before entering the noirq stage. |
558 | */ |
559 | __acpi_ec_flush_work(); |
560 | } |
561 | |
562 | void acpi_ec_flush_work(void) |
563 | { |
564 | /* Without ec_wq there is nothing to flush. */ |
565 | if (!ec_wq) |
566 | return; |
567 | |
568 | __acpi_ec_flush_work(); |
569 | } |
570 | #endif /* CONFIG_PM_SLEEP */ |
571 | |
572 | static bool acpi_ec_guard_event(struct acpi_ec *ec) |
573 | { |
574 | unsigned long flags; |
575 | bool guarded; |
576 | |
577 | spin_lock_irqsave(&ec->lock, flags); |
578 | /* |
579 | * If firmware SCI_EVT clearing timing is "event", we actually |
580 | * don't know when the SCI_EVT will be cleared by firmware after |
581 | * evaluating _Qxx, so we need to re-check SCI_EVT after waiting an |
582 | * acceptable period. |
583 | * |
584 | * The guarding period is applicable if the event state is not |
585 | * EC_EVENT_READY, but otherwise if the current transaction is of the |
586 | * ACPI_EC_COMMAND_QUERY type, the guarding should have elapsed already |
587 | * and it should not be applied to let the transaction transition into |
588 | * the ACPI_EC_COMMAND_POLL state immediately. |
589 | */ |
590 | guarded = ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT && |
591 | ec->event_state != EC_EVENT_READY && |
592 | (!ec->curr || ec->curr->command != ACPI_EC_COMMAND_QUERY); |
593 | spin_unlock_irqrestore(lock: &ec->lock, flags); |
594 | return guarded; |
595 | } |
596 | |
597 | static int ec_transaction_polled(struct acpi_ec *ec) |
598 | { |
599 | unsigned long flags; |
600 | int ret = 0; |
601 | |
602 | spin_lock_irqsave(&ec->lock, flags); |
603 | if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL)) |
604 | ret = 1; |
605 | spin_unlock_irqrestore(lock: &ec->lock, flags); |
606 | return ret; |
607 | } |
608 | |
609 | static int ec_transaction_completed(struct acpi_ec *ec) |
610 | { |
611 | unsigned long flags; |
612 | int ret = 0; |
613 | |
614 | spin_lock_irqsave(&ec->lock, flags); |
615 | if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE)) |
616 | ret = 1; |
617 | spin_unlock_irqrestore(lock: &ec->lock, flags); |
618 | return ret; |
619 | } |
620 | |
621 | static inline void ec_transaction_transition(struct acpi_ec *ec, unsigned long flag) |
622 | { |
623 | ec->curr->flags |= flag; |
624 | |
625 | if (ec->curr->command != ACPI_EC_COMMAND_QUERY) |
626 | return; |
627 | |
628 | switch (ec_event_clearing) { |
629 | case ACPI_EC_EVT_TIMING_STATUS: |
630 | if (flag == ACPI_EC_COMMAND_POLL) |
631 | acpi_ec_close_event(ec); |
632 | |
633 | return; |
634 | |
635 | case ACPI_EC_EVT_TIMING_QUERY: |
636 | if (flag == ACPI_EC_COMMAND_COMPLETE) |
637 | acpi_ec_close_event(ec); |
638 | |
639 | return; |
640 | |
641 | case ACPI_EC_EVT_TIMING_EVENT: |
642 | if (flag == ACPI_EC_COMMAND_COMPLETE) |
643 | acpi_ec_complete_event(ec); |
644 | } |
645 | } |
646 | |
647 | static void acpi_ec_spurious_interrupt(struct acpi_ec *ec, struct transaction *t) |
648 | { |
649 | if (t->irq_count < ec_storm_threshold) |
650 | ++t->irq_count; |
651 | |
652 | /* Trigger if the threshold is 0 too. */ |
653 | if (t->irq_count == ec_storm_threshold) |
654 | acpi_ec_mask_events(ec); |
655 | } |
656 | |
657 | static void advance_transaction(struct acpi_ec *ec, bool interrupt) |
658 | { |
659 | struct transaction *t = ec->curr; |
660 | bool wakeup = false; |
661 | u8 status; |
662 | |
663 | ec_dbg_stm("%s (%d)", interrupt ? "IRQ": "TASK", smp_processor_id()); |
664 | |
665 | status = acpi_ec_read_status(ec); |
666 | |
667 | /* |
668 | * Another IRQ or a guarded polling mode advancement is detected, |
669 | * the next QR_EC submission is then allowed. |
670 | */ |
671 | if (!t || !(t->flags & ACPI_EC_COMMAND_POLL)) { |
672 | if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT && |
673 | ec->event_state == EC_EVENT_COMPLETE) |
674 | acpi_ec_close_event(ec); |
675 | |
676 | if (!t) |
677 | goto out; |
678 | } |
679 | |
680 | if (t->flags & ACPI_EC_COMMAND_POLL) { |
681 | if (t->wlen > t->wi) { |
682 | if (!(status & ACPI_EC_FLAG_IBF)) |
683 | acpi_ec_write_data(ec, data: t->wdata[t->wi++]); |
684 | else if (interrupt && !(status & ACPI_EC_FLAG_SCI)) |
685 | acpi_ec_spurious_interrupt(ec, t); |
686 | } else if (t->rlen > t->ri) { |
687 | if (status & ACPI_EC_FLAG_OBF) { |
688 | t->rdata[t->ri++] = acpi_ec_read_data(ec); |
689 | if (t->rlen == t->ri) { |
690 | ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE); |
691 | wakeup = true; |
692 | if (t->command == ACPI_EC_COMMAND_QUERY) |
693 | ec_dbg_evt("Command(%s) completed by hardware", |
694 | acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY)); |
695 | } |
696 | } else if (interrupt && !(status & ACPI_EC_FLAG_SCI)) { |
697 | acpi_ec_spurious_interrupt(ec, t); |
698 | } |
699 | } else if (t->wlen == t->wi && !(status & ACPI_EC_FLAG_IBF)) { |
700 | ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE); |
701 | wakeup = true; |
702 | } |
703 | } else if (!(status & ACPI_EC_FLAG_IBF)) { |
704 | acpi_ec_write_cmd(ec, command: t->command); |
705 | ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL); |
706 | } |
707 | |
708 | out: |
709 | if (status & ACPI_EC_FLAG_SCI) |
710 | acpi_ec_submit_event(ec); |
711 | |
712 | if (wakeup && interrupt) |
713 | wake_up(&ec->wait); |
714 | } |
715 | |
716 | static void start_transaction(struct acpi_ec *ec) |
717 | { |
718 | ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0; |
719 | ec->curr->flags = 0; |
720 | } |
721 | |
722 | static int ec_guard(struct acpi_ec *ec) |
723 | { |
724 | unsigned long guard = usecs_to_jiffies(u: ec->polling_guard); |
725 | unsigned long timeout = ec->timestamp + guard; |
726 | |
727 | /* Ensure guarding period before polling EC status */ |
728 | do { |
729 | if (ec->busy_polling) { |
730 | /* Perform busy polling */ |
731 | if (ec_transaction_completed(ec)) |
732 | return 0; |
733 | udelay(usec: jiffies_to_usecs(j: guard)); |
734 | } else { |
735 | /* |
736 | * Perform wait polling |
737 | * 1. Wait the transaction to be completed by the |
738 | * GPE handler after the transaction enters |
739 | * ACPI_EC_COMMAND_POLL state. |
740 | * 2. A special guarding logic is also required |
741 | * for event clearing mode "event" before the |
742 | * transaction enters ACPI_EC_COMMAND_POLL |
743 | * state. |
744 | */ |
745 | if (!ec_transaction_polled(ec) && |
746 | !acpi_ec_guard_event(ec)) |
747 | break; |
748 | if (wait_event_timeout(ec->wait, |
749 | ec_transaction_completed(ec), |
750 | guard)) |
751 | return 0; |
752 | } |
753 | } while (time_before(jiffies, timeout)); |
754 | return -ETIME; |
755 | } |
756 | |
757 | static int ec_poll(struct acpi_ec *ec) |
758 | { |
759 | unsigned long flags; |
760 | int repeat = 5; /* number of command restarts */ |
761 | |
762 | while (repeat--) { |
763 | unsigned long delay = jiffies + |
764 | msecs_to_jiffies(m: ec_delay); |
765 | do { |
766 | if (!ec_guard(ec)) |
767 | return 0; |
768 | spin_lock_irqsave(&ec->lock, flags); |
769 | advance_transaction(ec, interrupt: false); |
770 | spin_unlock_irqrestore(lock: &ec->lock, flags); |
771 | } while (time_before(jiffies, delay)); |
772 | pr_debug("controller reset, restart transaction\n"); |
773 | spin_lock_irqsave(&ec->lock, flags); |
774 | start_transaction(ec); |
775 | spin_unlock_irqrestore(lock: &ec->lock, flags); |
776 | } |
777 | return -ETIME; |
778 | } |
779 | |
780 | static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, |
781 | struct transaction *t) |
782 | { |
783 | unsigned long tmp; |
784 | int ret = 0; |
785 | |
786 | if (t->rdata) |
787 | memset(t->rdata, 0, t->rlen); |
788 | |
789 | /* start transaction */ |
790 | spin_lock_irqsave(&ec->lock, tmp); |
791 | /* Enable GPE for command processing (IBF=0/OBF=1) */ |
792 | if (!acpi_ec_submit_flushable_request(ec)) { |
793 | ret = -EINVAL; |
794 | goto unlock; |
795 | } |
796 | ec_dbg_ref(ec, "Increase command"); |
797 | /* following two actions should be kept atomic */ |
798 | ec->curr = t; |
799 | ec_dbg_req("Command(%s) started", acpi_ec_cmd_string(t->command)); |
800 | start_transaction(ec); |
801 | spin_unlock_irqrestore(lock: &ec->lock, flags: tmp); |
802 | |
803 | ret = ec_poll(ec); |
804 | |
805 | spin_lock_irqsave(&ec->lock, tmp); |
806 | if (t->irq_count == ec_storm_threshold) |
807 | acpi_ec_unmask_events(ec); |
808 | ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command)); |
809 | ec->curr = NULL; |
810 | /* Disable GPE for command processing (IBF=0/OBF=1) */ |
811 | acpi_ec_complete_request(ec); |
812 | ec_dbg_ref(ec, "Decrease command"); |
813 | unlock: |
814 | spin_unlock_irqrestore(lock: &ec->lock, flags: tmp); |
815 | return ret; |
816 | } |
817 | |
818 | static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) |
819 | { |
820 | int status; |
821 | u32 glk; |
822 | |
823 | if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata)) |
824 | return -EINVAL; |
825 | |
826 | mutex_lock(&ec->mutex); |
827 | if (ec->global_lock) { |
828 | status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, handle: &glk); |
829 | if (ACPI_FAILURE(status)) { |
830 | status = -ENODEV; |
831 | goto unlock; |
832 | } |
833 | } |
834 | |
835 | status = acpi_ec_transaction_unlocked(ec, t); |
836 | |
837 | if (ec->global_lock) |
838 | acpi_release_global_lock(handle: glk); |
839 | unlock: |
840 | mutex_unlock(lock: &ec->mutex); |
841 | return status; |
842 | } |
843 | |
844 | static int acpi_ec_burst_enable(struct acpi_ec *ec) |
845 | { |
846 | u8 d; |
847 | struct transaction t = {.command = ACPI_EC_BURST_ENABLE, |
848 | .wdata = NULL, .rdata = &d, |
849 | .wlen = 0, .rlen = 1}; |
850 | |
851 | return acpi_ec_transaction_unlocked(ec, t: &t); |
852 | } |
853 | |
854 | static int acpi_ec_burst_disable(struct acpi_ec *ec) |
855 | { |
856 | struct transaction t = {.command = ACPI_EC_BURST_DISABLE, |
857 | .wdata = NULL, .rdata = NULL, |
858 | .wlen = 0, .rlen = 0}; |
859 | |
860 | return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ? |
861 | acpi_ec_transaction_unlocked(ec, t: &t) : 0; |
862 | } |
863 | |
864 | static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data) |
865 | { |
866 | int result; |
867 | u8 d; |
868 | struct transaction t = {.command = ACPI_EC_COMMAND_READ, |
869 | .wdata = &address, .rdata = &d, |
870 | .wlen = 1, .rlen = 1}; |
871 | |
872 | result = acpi_ec_transaction(ec, t: &t); |
873 | *data = d; |
874 | return result; |
875 | } |
876 | |
877 | static int acpi_ec_read_unlocked(struct acpi_ec *ec, u8 address, u8 *data) |
878 | { |
879 | int result; |
880 | u8 d; |
881 | struct transaction t = {.command = ACPI_EC_COMMAND_READ, |
882 | .wdata = &address, .rdata = &d, |
883 | .wlen = 1, .rlen = 1}; |
884 | |
885 | result = acpi_ec_transaction_unlocked(ec, t: &t); |
886 | *data = d; |
887 | return result; |
888 | } |
889 | |
890 | static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data) |
891 | { |
892 | u8 wdata[2] = { address, data }; |
893 | struct transaction t = {.command = ACPI_EC_COMMAND_WRITE, |
894 | .wdata = wdata, .rdata = NULL, |
895 | .wlen = 2, .rlen = 0}; |
896 | |
897 | return acpi_ec_transaction(ec, t: &t); |
898 | } |
899 | |
900 | static int acpi_ec_write_unlocked(struct acpi_ec *ec, u8 address, u8 data) |
901 | { |
902 | u8 wdata[2] = { address, data }; |
903 | struct transaction t = {.command = ACPI_EC_COMMAND_WRITE, |
904 | .wdata = wdata, .rdata = NULL, |
905 | .wlen = 2, .rlen = 0}; |
906 | |
907 | return acpi_ec_transaction_unlocked(ec, t: &t); |
908 | } |
909 | |
910 | int ec_read(u8 addr, u8 *val) |
911 | { |
912 | int err; |
913 | u8 temp_data; |
914 | |
915 | if (!first_ec) |
916 | return -ENODEV; |
917 | |
918 | err = acpi_ec_read(ec: first_ec, address: addr, data: &temp_data); |
919 | |
920 | if (!err) { |
921 | *val = temp_data; |
922 | return 0; |
923 | } |
924 | return err; |
925 | } |
926 | EXPORT_SYMBOL(ec_read); |
927 | |
928 | int ec_write(u8 addr, u8 val) |
929 | { |
930 | if (!first_ec) |
931 | return -ENODEV; |
932 | |
933 | return acpi_ec_write(ec: first_ec, address: addr, data: val); |
934 | } |
935 | EXPORT_SYMBOL(ec_write); |
936 | |
937 | int ec_transaction(u8 command, |
938 | const u8 *wdata, unsigned wdata_len, |
939 | u8 *rdata, unsigned rdata_len) |
940 | { |
941 | struct transaction t = {.command = command, |
942 | .wdata = wdata, .rdata = rdata, |
943 | .wlen = wdata_len, .rlen = rdata_len}; |
944 | |
945 | if (!first_ec) |
946 | return -ENODEV; |
947 | |
948 | return acpi_ec_transaction(ec: first_ec, t: &t); |
949 | } |
950 | EXPORT_SYMBOL(ec_transaction); |
951 | |
952 | /* Get the handle to the EC device */ |
953 | acpi_handle ec_get_handle(void) |
954 | { |
955 | if (!first_ec) |
956 | return NULL; |
957 | return first_ec->handle; |
958 | } |
959 | EXPORT_SYMBOL(ec_get_handle); |
960 | |
961 | static void acpi_ec_start(struct acpi_ec *ec, bool resuming) |
962 | { |
963 | unsigned long flags; |
964 | |
965 | spin_lock_irqsave(&ec->lock, flags); |
966 | if (!test_and_set_bit(nr: EC_FLAGS_STARTED, addr: &ec->flags)) { |
967 | ec_dbg_drv("Starting EC"); |
968 | /* Enable GPE for event processing (SCI_EVT=1) */ |
969 | if (!resuming) { |
970 | acpi_ec_submit_request(ec); |
971 | ec_dbg_ref(ec, "Increase driver"); |
972 | } |
973 | ec_log_drv("EC started"); |
974 | } |
975 | spin_unlock_irqrestore(lock: &ec->lock, flags); |
976 | } |
977 | |
978 | static bool acpi_ec_stopped(struct acpi_ec *ec) |
979 | { |
980 | unsigned long flags; |
981 | bool flushed; |
982 | |
983 | spin_lock_irqsave(&ec->lock, flags); |
984 | flushed = acpi_ec_flushed(ec); |
985 | spin_unlock_irqrestore(lock: &ec->lock, flags); |
986 | return flushed; |
987 | } |
988 | |
989 | static void acpi_ec_stop(struct acpi_ec *ec, bool suspending) |
990 | { |
991 | unsigned long flags; |
992 | |
993 | spin_lock_irqsave(&ec->lock, flags); |
994 | if (acpi_ec_started(ec)) { |
995 | ec_dbg_drv("Stopping EC"); |
996 | set_bit(nr: EC_FLAGS_STOPPED, addr: &ec->flags); |
997 | spin_unlock_irqrestore(lock: &ec->lock, flags); |
998 | wait_event(ec->wait, acpi_ec_stopped(ec)); |
999 | spin_lock_irqsave(&ec->lock, flags); |
1000 | /* Disable GPE for event processing (SCI_EVT=1) */ |
1001 | if (!suspending) { |
1002 | acpi_ec_complete_request(ec); |
1003 | ec_dbg_ref(ec, "Decrease driver"); |
1004 | } else if (!ec_freeze_events) |
1005 | __acpi_ec_disable_event(ec); |
1006 | clear_bit(nr: EC_FLAGS_STARTED, addr: &ec->flags); |
1007 | clear_bit(nr: EC_FLAGS_STOPPED, addr: &ec->flags); |
1008 | ec_log_drv("EC stopped"); |
1009 | } |
1010 | spin_unlock_irqrestore(lock: &ec->lock, flags); |
1011 | } |
1012 | |
1013 | static void acpi_ec_enter_noirq(struct acpi_ec *ec) |
1014 | { |
1015 | unsigned long flags; |
1016 | |
1017 | spin_lock_irqsave(&ec->lock, flags); |
1018 | ec->busy_polling = true; |
1019 | ec->polling_guard = 0; |
1020 | ec_log_drv("interrupt blocked"); |
1021 | spin_unlock_irqrestore(lock: &ec->lock, flags); |
1022 | } |
1023 | |
1024 | static void acpi_ec_leave_noirq(struct acpi_ec *ec) |
1025 | { |
1026 | unsigned long flags; |
1027 | |
1028 | spin_lock_irqsave(&ec->lock, flags); |
1029 | ec->busy_polling = ec_busy_polling; |
1030 | ec->polling_guard = ec_polling_guard; |
1031 | ec_log_drv("interrupt unblocked"); |
1032 | spin_unlock_irqrestore(lock: &ec->lock, flags); |
1033 | } |
1034 | |
1035 | void acpi_ec_block_transactions(void) |
1036 | { |
1037 | struct acpi_ec *ec = first_ec; |
1038 | |
1039 | if (!ec) |
1040 | return; |
1041 | |
1042 | mutex_lock(&ec->mutex); |
1043 | /* Prevent transactions from being carried out */ |
1044 | acpi_ec_stop(ec, suspending: true); |
1045 | mutex_unlock(lock: &ec->mutex); |
1046 | } |
1047 | |
1048 | void acpi_ec_unblock_transactions(void) |
1049 | { |
1050 | /* |
1051 | * Allow transactions to happen again (this function is called from |
1052 | * atomic context during wakeup, so we don't need to acquire the mutex). |
1053 | */ |
1054 | if (first_ec) |
1055 | acpi_ec_start(ec: first_ec, resuming: true); |
1056 | } |
1057 | |
1058 | /* -------------------------------------------------------------------------- |
1059 | Event Management |
1060 | -------------------------------------------------------------------------- */ |
1061 | static struct acpi_ec_query_handler * |
1062 | acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value) |
1063 | { |
1064 | struct acpi_ec_query_handler *handler; |
1065 | |
1066 | mutex_lock(&ec->mutex); |
1067 | list_for_each_entry(handler, &ec->list, node) { |
1068 | if (value == handler->query_bit) { |
1069 | kref_get(kref: &handler->kref); |
1070 | mutex_unlock(lock: &ec->mutex); |
1071 | return handler; |
1072 | } |
1073 | } |
1074 | mutex_unlock(lock: &ec->mutex); |
1075 | return NULL; |
1076 | } |
1077 | |
1078 | static void acpi_ec_query_handler_release(struct kref *kref) |
1079 | { |
1080 | struct acpi_ec_query_handler *handler = |
1081 | container_of(kref, struct acpi_ec_query_handler, kref); |
1082 | |
1083 | kfree(objp: handler); |
1084 | } |
1085 | |
1086 | static void acpi_ec_put_query_handler(struct acpi_ec_query_handler *handler) |
1087 | { |
1088 | kref_put(kref: &handler->kref, release: acpi_ec_query_handler_release); |
1089 | } |
1090 | |
1091 | int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, |
1092 | acpi_handle handle, acpi_ec_query_func func, |
1093 | void *data) |
1094 | { |
1095 | struct acpi_ec_query_handler *handler; |
1096 | |
1097 | if (!handle && !func) |
1098 | return -EINVAL; |
1099 | |
1100 | handler = kzalloc(sizeof(*handler), GFP_KERNEL); |
1101 | if (!handler) |
1102 | return -ENOMEM; |
1103 | |
1104 | handler->query_bit = query_bit; |
1105 | handler->handle = handle; |
1106 | handler->func = func; |
1107 | handler->data = data; |
1108 | mutex_lock(&ec->mutex); |
1109 | kref_init(kref: &handler->kref); |
1110 | list_add(new: &handler->node, head: &ec->list); |
1111 | mutex_unlock(lock: &ec->mutex); |
1112 | |
1113 | return 0; |
1114 | } |
1115 | EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler); |
1116 | |
1117 | static void acpi_ec_remove_query_handlers(struct acpi_ec *ec, |
1118 | bool remove_all, u8 query_bit) |
1119 | { |
1120 | struct acpi_ec_query_handler *handler, *tmp; |
1121 | LIST_HEAD(free_list); |
1122 | |
1123 | mutex_lock(&ec->mutex); |
1124 | list_for_each_entry_safe(handler, tmp, &ec->list, node) { |
1125 | /* |
1126 | * When remove_all is false, only remove custom query handlers |
1127 | * which have handler->func set. This is done to preserve query |
1128 | * handlers discovered thru ACPI, as they should continue handling |
1129 | * EC queries. |
1130 | */ |
1131 | if (remove_all || (handler->func && handler->query_bit == query_bit)) { |
1132 | list_del_init(entry: &handler->node); |
1133 | list_add(new: &handler->node, head: &free_list); |
1134 | |
1135 | } |
1136 | } |
1137 | mutex_unlock(lock: &ec->mutex); |
1138 | list_for_each_entry_safe(handler, tmp, &free_list, node) |
1139 | acpi_ec_put_query_handler(handler); |
1140 | } |
1141 | |
1142 | void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit) |
1143 | { |
1144 | acpi_ec_remove_query_handlers(ec, remove_all: false, query_bit); |
1145 | flush_workqueue(ec_query_wq); |
1146 | } |
1147 | EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler); |
1148 | |
1149 | static void acpi_ec_event_processor(struct work_struct *work) |
1150 | { |
1151 | struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work); |
1152 | struct acpi_ec_query_handler *handler = q->handler; |
1153 | struct acpi_ec *ec = q->ec; |
1154 | |
1155 | ec_dbg_evt("Query(0x%02x) started", handler->query_bit); |
1156 | |
1157 | if (handler->func) |
1158 | handler->func(handler->data); |
1159 | else if (handler->handle) |
1160 | acpi_evaluate_object(object: handler->handle, NULL, NULL, NULL); |
1161 | |
1162 | ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit); |
1163 | |
1164 | spin_lock_irq(lock: &ec->lock); |
1165 | ec->queries_in_progress--; |
1166 | spin_unlock_irq(lock: &ec->lock); |
1167 | |
1168 | acpi_ec_put_query_handler(handler); |
1169 | kfree(objp: q); |
1170 | } |
1171 | |
1172 | static struct acpi_ec_query *acpi_ec_create_query(struct acpi_ec *ec, u8 *pval) |
1173 | { |
1174 | struct acpi_ec_query *q; |
1175 | struct transaction *t; |
1176 | |
1177 | q = kzalloc(sizeof (struct acpi_ec_query), GFP_KERNEL); |
1178 | if (!q) |
1179 | return NULL; |
1180 | |
1181 | INIT_WORK(&q->work, acpi_ec_event_processor); |
1182 | t = &q->transaction; |
1183 | t->command = ACPI_EC_COMMAND_QUERY; |
1184 | t->rdata = pval; |
1185 | t->rlen = 1; |
1186 | q->ec = ec; |
1187 | return q; |
1188 | } |
1189 | |
1190 | static int acpi_ec_submit_query(struct acpi_ec *ec) |
1191 | { |
1192 | struct acpi_ec_query *q; |
1193 | u8 value = 0; |
1194 | int result; |
1195 | |
1196 | q = acpi_ec_create_query(ec, pval: &value); |
1197 | if (!q) |
1198 | return -ENOMEM; |
1199 | |
1200 | /* |
1201 | * Query the EC to find out which _Qxx method we need to evaluate. |
1202 | * Note that successful completion of the query causes the ACPI_EC_SCI |
1203 | * bit to be cleared (and thus clearing the interrupt source). |
1204 | */ |
1205 | result = acpi_ec_transaction(ec, t: &q->transaction); |
1206 | if (result) |
1207 | goto err_exit; |
1208 | |
1209 | if (!value) { |
1210 | result = -ENODATA; |
1211 | goto err_exit; |
1212 | } |
1213 | |
1214 | q->handler = acpi_ec_get_query_handler_by_value(ec, value); |
1215 | if (!q->handler) { |
1216 | result = -ENODATA; |
1217 | goto err_exit; |
1218 | } |
1219 | |
1220 | /* |
1221 | * It is reported that _Qxx are evaluated in a parallel way on Windows: |
1222 | * https://bugzilla.kernel.org/show_bug.cgi?id=94411 |
1223 | * |
1224 | * Put this log entry before queue_work() to make it appear in the log |
1225 | * before any other messages emitted during workqueue handling. |
1226 | */ |
1227 | ec_dbg_evt("Query(0x%02x) scheduled", value); |
1228 | |
1229 | spin_lock_irq(lock: &ec->lock); |
1230 | |
1231 | ec->queries_in_progress++; |
1232 | queue_work(wq: ec_query_wq, work: &q->work); |
1233 | |
1234 | spin_unlock_irq(lock: &ec->lock); |
1235 | |
1236 | return 0; |
1237 | |
1238 | err_exit: |
1239 | kfree(objp: q); |
1240 | |
1241 | return result; |
1242 | } |
1243 | |
1244 | static void acpi_ec_event_handler(struct work_struct *work) |
1245 | { |
1246 | struct acpi_ec *ec = container_of(work, struct acpi_ec, work); |
1247 | |
1248 | ec_dbg_evt("Event started"); |
1249 | |
1250 | spin_lock_irq(lock: &ec->lock); |
1251 | |
1252 | while (ec->events_to_process) { |
1253 | spin_unlock_irq(lock: &ec->lock); |
1254 | |
1255 | acpi_ec_submit_query(ec); |
1256 | |
1257 | spin_lock_irq(lock: &ec->lock); |
1258 | |
1259 | ec->events_to_process--; |
1260 | } |
1261 | |
1262 | /* |
1263 | * Before exit, make sure that the it will be possible to queue up the |
1264 | * event handling work again regardless of whether or not the query |
1265 | * queued up above is processed successfully. |
1266 | */ |
1267 | if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT) { |
1268 | bool guard_timeout; |
1269 | |
1270 | acpi_ec_complete_event(ec); |
1271 | |
1272 | ec_dbg_evt("Event stopped"); |
1273 | |
1274 | spin_unlock_irq(lock: &ec->lock); |
1275 | |
1276 | guard_timeout = !!ec_guard(ec); |
1277 | |
1278 | spin_lock_irq(lock: &ec->lock); |
1279 | |
1280 | /* Take care of SCI_EVT unless someone else is doing that. */ |
1281 | if (guard_timeout && !ec->curr) |
1282 | advance_transaction(ec, interrupt: false); |
1283 | } else { |
1284 | acpi_ec_close_event(ec); |
1285 | |
1286 | ec_dbg_evt("Event stopped"); |
1287 | } |
1288 | |
1289 | ec->events_in_progress--; |
1290 | |
1291 | spin_unlock_irq(lock: &ec->lock); |
1292 | } |
1293 | |
1294 | static void clear_gpe_and_advance_transaction(struct acpi_ec *ec, bool interrupt) |
1295 | { |
1296 | /* |
1297 | * Clear GPE_STS upfront to allow subsequent hardware GPE_STS 0->1 |
1298 | * changes to always trigger a GPE interrupt. |
1299 | * |
1300 | * GPE STS is a W1C register, which means: |
1301 | * |
1302 | * 1. Software can clear it without worrying about clearing the other |
1303 | * GPEs' STS bits when the hardware sets them in parallel. |
1304 | * |
1305 | * 2. As long as software can ensure only clearing it when it is set, |
1306 | * hardware won't set it in parallel. |
1307 | */ |
1308 | if (ec->gpe >= 0 && acpi_ec_gpe_status_set(ec)) |
1309 | acpi_clear_gpe(NULL, gpe_number: ec->gpe); |
1310 | |
1311 | advance_transaction(ec, interrupt: true); |
1312 | } |
1313 | |
1314 | static void acpi_ec_handle_interrupt(struct acpi_ec *ec) |
1315 | { |
1316 | unsigned long flags; |
1317 | |
1318 | spin_lock_irqsave(&ec->lock, flags); |
1319 | |
1320 | clear_gpe_and_advance_transaction(ec, interrupt: true); |
1321 | |
1322 | spin_unlock_irqrestore(lock: &ec->lock, flags); |
1323 | } |
1324 | |
1325 | static u32 acpi_ec_gpe_handler(acpi_handle gpe_device, |
1326 | u32 gpe_number, void *data) |
1327 | { |
1328 | acpi_ec_handle_interrupt(ec: data); |
1329 | return ACPI_INTERRUPT_HANDLED; |
1330 | } |
1331 | |
1332 | static irqreturn_t acpi_ec_irq_handler(int irq, void *data) |
1333 | { |
1334 | acpi_ec_handle_interrupt(ec: data); |
1335 | return IRQ_HANDLED; |
1336 | } |
1337 | |
1338 | /* -------------------------------------------------------------------------- |
1339 | * Address Space Management |
1340 | * -------------------------------------------------------------------------- */ |
1341 | |
1342 | static acpi_status |
1343 | acpi_ec_space_handler(u32 function, acpi_physical_address address, |
1344 | u32 bits, u64 *value64, |
1345 | void *handler_context, void *region_context) |
1346 | { |
1347 | struct acpi_ec *ec = handler_context; |
1348 | int result = 0, i, bytes = bits / 8; |
1349 | u8 *value = (u8 *)value64; |
1350 | u32 glk; |
1351 | |
1352 | if ((address > 0xFF) || !value || !handler_context) |
1353 | return AE_BAD_PARAMETER; |
1354 | |
1355 | if (function != ACPI_READ && function != ACPI_WRITE) |
1356 | return AE_BAD_PARAMETER; |
1357 | |
1358 | mutex_lock(&ec->mutex); |
1359 | |
1360 | if (ec->global_lock) { |
1361 | acpi_status status; |
1362 | |
1363 | status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, handle: &glk); |
1364 | if (ACPI_FAILURE(status)) { |
1365 | result = -ENODEV; |
1366 | goto unlock; |
1367 | } |
1368 | } |
1369 | |
1370 | if (ec->busy_polling || bits > 8) |
1371 | acpi_ec_burst_enable(ec); |
1372 | |
1373 | for (i = 0; i < bytes; ++i, ++address, ++value) { |
1374 | result = (function == ACPI_READ) ? |
1375 | acpi_ec_read_unlocked(ec, address, data: value) : |
1376 | acpi_ec_write_unlocked(ec, address, data: *value); |
1377 | if (result < 0) |
1378 | break; |
1379 | } |
1380 | |
1381 | if (ec->busy_polling || bits > 8) |
1382 | acpi_ec_burst_disable(ec); |
1383 | |
1384 | if (ec->global_lock) |
1385 | acpi_release_global_lock(handle: glk); |
1386 | |
1387 | unlock: |
1388 | mutex_unlock(lock: &ec->mutex); |
1389 | |
1390 | switch (result) { |
1391 | case -EINVAL: |
1392 | return AE_BAD_PARAMETER; |
1393 | case -ENODEV: |
1394 | return AE_NOT_FOUND; |
1395 | case -ETIME: |
1396 | return AE_TIME; |
1397 | case 0: |
1398 | return AE_OK; |
1399 | default: |
1400 | return AE_ERROR; |
1401 | } |
1402 | } |
1403 | |
1404 | /* -------------------------------------------------------------------------- |
1405 | * Driver Interface |
1406 | * -------------------------------------------------------------------------- */ |
1407 | |
1408 | static acpi_status |
1409 | ec_parse_io_ports(struct acpi_resource *resource, void *context); |
1410 | |
1411 | static void acpi_ec_free(struct acpi_ec *ec) |
1412 | { |
1413 | if (first_ec == ec) |
1414 | first_ec = NULL; |
1415 | if (boot_ec == ec) |
1416 | boot_ec = NULL; |
1417 | kfree(objp: ec); |
1418 | } |
1419 | |
1420 | static struct acpi_ec *acpi_ec_alloc(void) |
1421 | { |
1422 | struct acpi_ec *ec = kzalloc(sizeof(struct acpi_ec), GFP_KERNEL); |
1423 | |
1424 | if (!ec) |
1425 | return NULL; |
1426 | mutex_init(&ec->mutex); |
1427 | init_waitqueue_head(&ec->wait); |
1428 | INIT_LIST_HEAD(list: &ec->list); |
1429 | spin_lock_init(&ec->lock); |
1430 | INIT_WORK(&ec->work, acpi_ec_event_handler); |
1431 | ec->timestamp = jiffies; |
1432 | ec->busy_polling = true; |
1433 | ec->polling_guard = 0; |
1434 | ec->gpe = -1; |
1435 | ec->irq = -1; |
1436 | return ec; |
1437 | } |
1438 | |
1439 | static acpi_status |
1440 | acpi_ec_register_query_methods(acpi_handle handle, u32 level, |
1441 | void *context, void **return_value) |
1442 | { |
1443 | char node_name[5]; |
1444 | struct acpi_buffer buffer = { sizeof(node_name), node_name }; |
1445 | struct acpi_ec *ec = context; |
1446 | int value = 0; |
1447 | acpi_status status; |
1448 | |
1449 | status = acpi_get_name(object: handle, ACPI_SINGLE_NAME, ret_path_ptr: &buffer); |
1450 | |
1451 | if (ACPI_SUCCESS(status) && sscanf(node_name, "_Q%x", &value) == 1) |
1452 | acpi_ec_add_query_handler(ec, value, handle, NULL, NULL); |
1453 | return AE_OK; |
1454 | } |
1455 | |
1456 | static acpi_status |
1457 | ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval) |
1458 | { |
1459 | acpi_status status; |
1460 | unsigned long long tmp = 0; |
1461 | struct acpi_ec *ec = context; |
1462 | |
1463 | /* clear addr values, ec_parse_io_ports depend on it */ |
1464 | ec->command_addr = ec->data_addr = 0; |
1465 | |
1466 | status = acpi_walk_resources(device: handle, METHOD_NAME__CRS, |
1467 | user_function: ec_parse_io_ports, context: ec); |
1468 | if (ACPI_FAILURE(status)) |
1469 | return status; |
1470 | if (ec->data_addr == 0 || ec->command_addr == 0) |
1471 | return AE_OK; |
1472 | |
1473 | /* Get GPE bit assignment (EC events). */ |
1474 | /* TODO: Add support for _GPE returning a package */ |
1475 | status = acpi_evaluate_integer(handle, pathname: "_GPE", NULL, data: &tmp); |
1476 | if (ACPI_SUCCESS(status)) |
1477 | ec->gpe = tmp; |
1478 | /* |
1479 | * Errors are non-fatal, allowing for ACPI Reduced Hardware |
1480 | * platforms which use GpioInt instead of GPE. |
1481 | */ |
1482 | |
1483 | /* Use the global lock for all EC transactions? */ |
1484 | tmp = 0; |
1485 | acpi_evaluate_integer(handle, pathname: "_GLK", NULL, data: &tmp); |
1486 | ec->global_lock = tmp; |
1487 | ec->handle = handle; |
1488 | return AE_CTRL_TERMINATE; |
1489 | } |
1490 | |
1491 | static bool install_gpe_event_handler(struct acpi_ec *ec) |
1492 | { |
1493 | acpi_status status; |
1494 | |
1495 | status = acpi_install_gpe_raw_handler(NULL, gpe_number: ec->gpe, |
1496 | ACPI_GPE_EDGE_TRIGGERED, |
1497 | address: &acpi_ec_gpe_handler, context: ec); |
1498 | if (ACPI_FAILURE(status)) |
1499 | return false; |
1500 | |
1501 | if (test_bit(EC_FLAGS_STARTED, &ec->flags) && ec->reference_count >= 1) |
1502 | acpi_ec_enable_gpe(ec, open: true); |
1503 | |
1504 | return true; |
1505 | } |
1506 | |
1507 | static bool install_gpio_irq_event_handler(struct acpi_ec *ec) |
1508 | { |
1509 | return request_threaded_irq(irq: ec->irq, NULL, thread_fn: acpi_ec_irq_handler, |
1510 | IRQF_SHARED | IRQF_ONESHOT, name: "ACPI EC", dev: ec) >= 0; |
1511 | } |
1512 | |
1513 | /** |
1514 | * ec_install_handlers - Install service callbacks and register query methods. |
1515 | * @ec: Target EC. |
1516 | * @device: ACPI device object corresponding to @ec. |
1517 | * @call_reg: If _REG should be called to notify OpRegion availability |
1518 | * |
1519 | * Install a handler for the EC address space type unless it has been installed |
1520 | * already. If @device is not NULL, also look for EC query methods in the |
1521 | * namespace and register them, and install an event (either GPE or GPIO IRQ) |
1522 | * handler for the EC, if possible. |
1523 | * |
1524 | * Return: |
1525 | * -ENODEV if the address space handler cannot be installed, which means |
1526 | * "unable to handle transactions", |
1527 | * -EPROBE_DEFER if GPIO IRQ acquisition needs to be deferred, |
1528 | * or 0 (success) otherwise. |
1529 | */ |
1530 | static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device, |
1531 | bool call_reg) |
1532 | { |
1533 | acpi_status status; |
1534 | |
1535 | acpi_ec_start(ec, resuming: false); |
1536 | |
1537 | if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) { |
1538 | acpi_handle scope_handle = ec == first_ec ? ACPI_ROOT_OBJECT : ec->handle; |
1539 | |
1540 | acpi_ec_enter_noirq(ec); |
1541 | status = acpi_install_address_space_handler_no_reg(device: scope_handle, |
1542 | ACPI_ADR_SPACE_EC, |
1543 | handler: &acpi_ec_space_handler, |
1544 | NULL, context: ec); |
1545 | if (ACPI_FAILURE(status)) { |
1546 | acpi_ec_stop(ec, suspending: false); |
1547 | return -ENODEV; |
1548 | } |
1549 | set_bit(nr: EC_FLAGS_EC_HANDLER_INSTALLED, addr: &ec->flags); |
1550 | } |
1551 | |
1552 | if (call_reg && !test_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags)) { |
1553 | acpi_execute_reg_methods(device: ec->handle, ACPI_UINT32_MAX, ACPI_ADR_SPACE_EC); |
1554 | set_bit(nr: EC_FLAGS_EC_REG_CALLED, addr: &ec->flags); |
1555 | } |
1556 | |
1557 | if (!device) |
1558 | return 0; |
1559 | |
1560 | if (ec->gpe < 0) { |
1561 | /* ACPI reduced hardware platforms use a GpioInt from _CRS. */ |
1562 | int irq = acpi_dev_gpio_irq_get(adev: device, index: 0); |
1563 | /* |
1564 | * Bail out right away for deferred probing or complete the |
1565 | * initialization regardless of any other errors. |
1566 | */ |
1567 | if (irq == -EPROBE_DEFER) |
1568 | return -EPROBE_DEFER; |
1569 | else if (irq >= 0) |
1570 | ec->irq = irq; |
1571 | } |
1572 | |
1573 | if (!test_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags)) { |
1574 | /* Find and register all query methods */ |
1575 | acpi_walk_namespace(ACPI_TYPE_METHOD, start_object: ec->handle, max_depth: 1, |
1576 | descending_callback: acpi_ec_register_query_methods, |
1577 | NULL, context: ec, NULL); |
1578 | set_bit(nr: EC_FLAGS_QUERY_METHODS_INSTALLED, addr: &ec->flags); |
1579 | } |
1580 | if (!test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) { |
1581 | bool ready = false; |
1582 | |
1583 | if (ec->gpe >= 0) |
1584 | ready = install_gpe_event_handler(ec); |
1585 | else if (ec->irq >= 0) |
1586 | ready = install_gpio_irq_event_handler(ec); |
1587 | |
1588 | if (ready) { |
1589 | set_bit(nr: EC_FLAGS_EVENT_HANDLER_INSTALLED, addr: &ec->flags); |
1590 | acpi_ec_leave_noirq(ec); |
1591 | } |
1592 | /* |
1593 | * Failures to install an event handler are not fatal, because |
1594 | * the EC can be polled for events. |
1595 | */ |
1596 | } |
1597 | /* EC is fully operational, allow queries */ |
1598 | acpi_ec_enable_event(ec); |
1599 | |
1600 | return 0; |
1601 | } |
1602 | |
1603 | static void ec_remove_handlers(struct acpi_ec *ec) |
1604 | { |
1605 | acpi_handle scope_handle = ec == first_ec ? ACPI_ROOT_OBJECT : ec->handle; |
1606 | |
1607 | if (test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) { |
1608 | if (ACPI_FAILURE(acpi_remove_address_space_handler( |
1609 | scope_handle, |
1610 | ACPI_ADR_SPACE_EC, |
1611 | &acpi_ec_space_handler))) |
1612 | pr_err("failed to remove space handler\n"); |
1613 | clear_bit(nr: EC_FLAGS_EC_HANDLER_INSTALLED, addr: &ec->flags); |
1614 | } |
1615 | |
1616 | /* |
1617 | * Stops handling the EC transactions after removing the operation |
1618 | * region handler. This is required because _REG(DISCONNECT) |
1619 | * invoked during the removal can result in new EC transactions. |
1620 | * |
1621 | * Flushes the EC requests and thus disables the GPE before |
1622 | * removing the GPE handler. This is required by the current ACPICA |
1623 | * GPE core. ACPICA GPE core will automatically disable a GPE when |
1624 | * it is indicated but there is no way to handle it. So the drivers |
1625 | * must disable the GPEs prior to removing the GPE handlers. |
1626 | */ |
1627 | acpi_ec_stop(ec, suspending: false); |
1628 | |
1629 | if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) { |
1630 | if (ec->gpe >= 0 && |
1631 | ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe, |
1632 | &acpi_ec_gpe_handler))) |
1633 | pr_err("failed to remove gpe handler\n"); |
1634 | |
1635 | if (ec->irq >= 0) |
1636 | free_irq(ec->irq, ec); |
1637 | |
1638 | clear_bit(nr: EC_FLAGS_EVENT_HANDLER_INSTALLED, addr: &ec->flags); |
1639 | } |
1640 | if (test_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags)) { |
1641 | acpi_ec_remove_query_handlers(ec, remove_all: true, query_bit: 0); |
1642 | clear_bit(nr: EC_FLAGS_QUERY_METHODS_INSTALLED, addr: &ec->flags); |
1643 | } |
1644 | } |
1645 | |
1646 | static int acpi_ec_setup(struct acpi_ec *ec, struct acpi_device *device, bool call_reg) |
1647 | { |
1648 | int ret; |
1649 | |
1650 | /* First EC capable of handling transactions */ |
1651 | if (!first_ec) |
1652 | first_ec = ec; |
1653 | |
1654 | ret = ec_install_handlers(ec, device, call_reg); |
1655 | if (ret) { |
1656 | if (ec == first_ec) |
1657 | first_ec = NULL; |
1658 | |
1659 | return ret; |
1660 | } |
1661 | |
1662 | pr_info("EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n", ec->command_addr, |
1663 | ec->data_addr); |
1664 | |
1665 | if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) { |
1666 | if (ec->gpe >= 0) |
1667 | pr_info("GPE=0x%x\n", ec->gpe); |
1668 | else |
1669 | pr_info("IRQ=%d\n", ec->irq); |
1670 | } |
1671 | |
1672 | return ret; |
1673 | } |
1674 | |
1675 | static int acpi_ec_add(struct acpi_device *device) |
1676 | { |
1677 | struct acpi_ec *ec; |
1678 | int ret; |
1679 | |
1680 | strscpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME); |
1681 | strscpy(acpi_device_class(device), ACPI_EC_CLASS); |
1682 | |
1683 | if (boot_ec && (boot_ec->handle == device->handle || |
1684 | !strcmp(acpi_device_hid(device), ACPI_ECDT_HID))) { |
1685 | /* Fast path: this device corresponds to the boot EC. */ |
1686 | ec = boot_ec; |
1687 | } else { |
1688 | acpi_status status; |
1689 | |
1690 | ec = acpi_ec_alloc(); |
1691 | if (!ec) |
1692 | return -ENOMEM; |
1693 | |
1694 | status = ec_parse_device(handle: device->handle, Level: 0, context: ec, NULL); |
1695 | if (status != AE_CTRL_TERMINATE) { |
1696 | ret = -EINVAL; |
1697 | goto err; |
1698 | } |
1699 | |
1700 | if (boot_ec && ec->command_addr == boot_ec->command_addr && |
1701 | ec->data_addr == boot_ec->data_addr) { |
1702 | /* |
1703 | * Trust PNP0C09 namespace location rather than ECDT ID. |
1704 | * But trust ECDT GPE rather than _GPE because of ASUS |
1705 | * quirks. So do not change boot_ec->gpe to ec->gpe, |
1706 | * except when the TRUST_DSDT_GPE quirk is set. |
1707 | */ |
1708 | boot_ec->handle = ec->handle; |
1709 | |
1710 | if (EC_FLAGS_TRUST_DSDT_GPE) |
1711 | boot_ec->gpe = ec->gpe; |
1712 | |
1713 | acpi_handle_debug(ec->handle, "duplicated.\n"); |
1714 | acpi_ec_free(ec); |
1715 | ec = boot_ec; |
1716 | } |
1717 | } |
1718 | |
1719 | ret = acpi_ec_setup(ec, device, call_reg: true); |
1720 | if (ret) |
1721 | goto err; |
1722 | |
1723 | if (ec == boot_ec) |
1724 | acpi_handle_info(boot_ec->handle, |
1725 | "Boot %s EC initialization complete\n", |
1726 | boot_ec_is_ecdt ? "ECDT": "DSDT"); |
1727 | |
1728 | acpi_handle_info(ec->handle, |
1729 | "EC: Used to handle transactions and events\n"); |
1730 | |
1731 | device->driver_data = ec; |
1732 | |
1733 | ret = !!request_region(ec->data_addr, 1, "EC data"); |
1734 | WARN(!ret, "Could not request EC data io port 0x%lx", ec->data_addr); |
1735 | ret = !!request_region(ec->command_addr, 1, "EC cmd"); |
1736 | WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr); |
1737 | |
1738 | /* Reprobe devices depending on the EC */ |
1739 | acpi_dev_clear_dependencies(supplier: device); |
1740 | |
1741 | acpi_handle_debug(ec->handle, "enumerated.\n"); |
1742 | return 0; |
1743 | |
1744 | err: |
1745 | if (ec != boot_ec) |
1746 | acpi_ec_free(ec); |
1747 | |
1748 | return ret; |
1749 | } |
1750 | |
1751 | static void acpi_ec_remove(struct acpi_device *device) |
1752 | { |
1753 | struct acpi_ec *ec; |
1754 | |
1755 | if (!device) |
1756 | return; |
1757 | |
1758 | ec = acpi_driver_data(d: device); |
1759 | release_region(ec->data_addr, 1); |
1760 | release_region(ec->command_addr, 1); |
1761 | device->driver_data = NULL; |
1762 | if (ec != boot_ec) { |
1763 | ec_remove_handlers(ec); |
1764 | acpi_ec_free(ec); |
1765 | } |
1766 | } |
1767 | |
1768 | void acpi_ec_register_opregions(struct acpi_device *adev) |
1769 | { |
1770 | if (first_ec && first_ec->handle != adev->handle) |
1771 | acpi_execute_reg_methods(device: adev->handle, nax_depth: 1, ACPI_ADR_SPACE_EC); |
1772 | } |
1773 | |
1774 | static acpi_status |
1775 | ec_parse_io_ports(struct acpi_resource *resource, void *context) |
1776 | { |
1777 | struct acpi_ec *ec = context; |
1778 | |
1779 | if (resource->type != ACPI_RESOURCE_TYPE_IO) |
1780 | return AE_OK; |
1781 | |
1782 | /* |
1783 | * The first address region returned is the data port, and |
1784 | * the second address region returned is the status/command |
1785 | * port. |
1786 | */ |
1787 | if (ec->data_addr == 0) |
1788 | ec->data_addr = resource->data.io.minimum; |
1789 | else if (ec->command_addr == 0) |
1790 | ec->command_addr = resource->data.io.minimum; |
1791 | else |
1792 | return AE_CTRL_TERMINATE; |
1793 | |
1794 | return AE_OK; |
1795 | } |
1796 | |
1797 | static const struct acpi_device_id ec_device_ids[] = { |
1798 | {"PNP0C09", 0}, |
1799 | {ACPI_ECDT_HID, 0}, |
1800 | {"", 0}, |
1801 | }; |
1802 | |
1803 | /* |
1804 | * This function is not Windows-compatible as Windows never enumerates the |
1805 | * namespace EC before the main ACPI device enumeration process. It is |
1806 | * retained for historical reason and will be deprecated in the future. |
1807 | */ |
1808 | void __init acpi_ec_dsdt_probe(void) |
1809 | { |
1810 | struct acpi_ec *ec; |
1811 | acpi_status status; |
1812 | int ret; |
1813 | |
1814 | /* |
1815 | * If a platform has ECDT, there is no need to proceed as the |
1816 | * following probe is not a part of the ACPI device enumeration, |
1817 | * executing _STA is not safe, and thus this probe may risk of |
1818 | * picking up an invalid EC device. |
1819 | */ |
1820 | if (boot_ec) |
1821 | return; |
1822 | |
1823 | ec = acpi_ec_alloc(); |
1824 | if (!ec) |
1825 | return; |
1826 | |
1827 | /* |
1828 | * At this point, the namespace is initialized, so start to find |
1829 | * the namespace objects. |
1830 | */ |
1831 | status = acpi_get_devices(HID: ec_device_ids[0].id, user_function: ec_parse_device, context: ec, NULL); |
1832 | if (ACPI_FAILURE(status) || !ec->handle) { |
1833 | acpi_ec_free(ec); |
1834 | return; |
1835 | } |
1836 | |
1837 | /* |
1838 | * When the DSDT EC is available, always re-configure boot EC to |
1839 | * have _REG evaluated. _REG can only be evaluated after the |
1840 | * namespace initialization. |
1841 | * At this point, the GPE is not fully initialized, so do not to |
1842 | * handle the events. |
1843 | */ |
1844 | ret = acpi_ec_setup(ec, NULL, call_reg: true); |
1845 | if (ret) { |
1846 | acpi_ec_free(ec); |
1847 | return; |
1848 | } |
1849 | |
1850 | boot_ec = ec; |
1851 | |
1852 | acpi_handle_info(ec->handle, |
1853 | "Boot DSDT EC used to handle transactions\n"); |
1854 | } |
1855 | |
1856 | /* |
1857 | * acpi_ec_ecdt_start - Finalize the boot ECDT EC initialization. |
1858 | * |
1859 | * First, look for an ACPI handle for the boot ECDT EC if acpi_ec_add() has not |
1860 | * found a matching object in the namespace. |
1861 | * |
1862 | * Next, in case the DSDT EC is not functioning, it is still necessary to |
1863 | * provide a functional ECDT EC to handle events, so add an extra device object |
1864 | * to represent it (see https://bugzilla.kernel.org/show_bug.cgi?id=115021). |
1865 | * |
1866 | * This is useful on platforms with valid ECDT and invalid DSDT EC settings, |
1867 | * like ASUS X550ZE (see https://bugzilla.kernel.org/show_bug.cgi?id=196847). |
1868 | */ |
1869 | static void __init acpi_ec_ecdt_start(void) |
1870 | { |
1871 | struct acpi_table_ecdt *ecdt_ptr; |
1872 | acpi_handle handle; |
1873 | acpi_status status; |
1874 | |
1875 | /* Bail out if a matching EC has been found in the namespace. */ |
1876 | if (!boot_ec || boot_ec->handle != ACPI_ROOT_OBJECT) |
1877 | return; |
1878 | |
1879 | /* Look up the object pointed to from the ECDT in the namespace. */ |
1880 | status = acpi_get_table(ACPI_SIG_ECDT, instance: 1, |
1881 | out_table: (struct acpi_table_header **)&ecdt_ptr); |
1882 | if (ACPI_FAILURE(status)) |
1883 | return; |
1884 | |
1885 | status = acpi_get_handle(NULL, pathname: ecdt_ptr->id, ret_handle: &handle); |
1886 | if (ACPI_SUCCESS(status)) { |
1887 | boot_ec->handle = handle; |
1888 | |
1889 | /* Add a special ACPI device object to represent the boot EC. */ |
1890 | acpi_bus_register_early_device(type: ACPI_BUS_TYPE_ECDT_EC); |
1891 | } |
1892 | |
1893 | acpi_put_table(table: (struct acpi_table_header *)ecdt_ptr); |
1894 | } |
1895 | |
1896 | /* |
1897 | * On some hardware it is necessary to clear events accumulated by the EC during |
1898 | * sleep. These ECs stop reporting GPEs until they are manually polled, if too |
1899 | * many events are accumulated. (e.g. Samsung Series 5/9 notebooks) |
1900 | * |
1901 | * https://bugzilla.kernel.org/show_bug.cgi?id=44161 |
1902 | * |
1903 | * Ideally, the EC should also be instructed NOT to accumulate events during |
1904 | * sleep (which Windows seems to do somehow), but the interface to control this |
1905 | * behaviour is not known at this time. |
1906 | * |
1907 | * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx, |
1908 | * however it is very likely that other Samsung models are affected. |
1909 | * |
1910 | * On systems which don't accumulate _Q events during sleep, this extra check |
1911 | * should be harmless. |
1912 | */ |
1913 | static int ec_clear_on_resume(const struct dmi_system_id *id) |
1914 | { |
1915 | pr_debug("Detected system needing EC poll on resume.\n"); |
1916 | EC_FLAGS_CLEAR_ON_RESUME = 1; |
1917 | ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS; |
1918 | return 0; |
1919 | } |
1920 | |
1921 | /* |
1922 | * Some ECDTs contain wrong register addresses. |
1923 | * MSI MS-171F |
1924 | * https://bugzilla.kernel.org/show_bug.cgi?id=12461 |
1925 | */ |
1926 | static int ec_correct_ecdt(const struct dmi_system_id *id) |
1927 | { |
1928 | pr_debug("Detected system needing ECDT address correction.\n"); |
1929 | EC_FLAGS_CORRECT_ECDT = 1; |
1930 | return 0; |
1931 | } |
1932 | |
1933 | /* |
1934 | * Some ECDTs contain wrong GPE setting, but they share the same port addresses |
1935 | * with DSDT EC, don't duplicate the DSDT EC with ECDT EC in this case. |
1936 | * https://bugzilla.kernel.org/show_bug.cgi?id=209989 |
1937 | */ |
1938 | static int ec_honor_dsdt_gpe(const struct dmi_system_id *id) |
1939 | { |
1940 | pr_debug("Detected system needing DSDT GPE setting.\n"); |
1941 | EC_FLAGS_TRUST_DSDT_GPE = 1; |
1942 | return 0; |
1943 | } |
1944 | |
1945 | static const struct dmi_system_id ec_dmi_table[] __initconst = { |
1946 | { |
1947 | /* |
1948 | * MSI MS-171F |
1949 | * https://bugzilla.kernel.org/show_bug.cgi?id=12461 |
1950 | */ |
1951 | .callback = ec_correct_ecdt, |
1952 | .matches = { |
1953 | DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"), |
1954 | DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"), |
1955 | }, |
1956 | }, |
1957 | { |
1958 | /* |
1959 | * HP Pavilion Gaming Laptop 15-cx0xxx |
1960 | * https://bugzilla.kernel.org/show_bug.cgi?id=209989 |
1961 | */ |
1962 | .callback = ec_honor_dsdt_gpe, |
1963 | .matches = { |
1964 | DMI_MATCH(DMI_SYS_VENDOR, "HP"), |
1965 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-cx0xxx"), |
1966 | }, |
1967 | }, |
1968 | { |
1969 | /* |
1970 | * HP Pavilion Gaming Laptop 15-cx0041ur |
1971 | */ |
1972 | .callback = ec_honor_dsdt_gpe, |
1973 | .matches = { |
1974 | DMI_MATCH(DMI_SYS_VENDOR, "HP"), |
1975 | DMI_MATCH(DMI_PRODUCT_NAME, "HP 15-cx0041ur"), |
1976 | }, |
1977 | }, |
1978 | { |
1979 | /* |
1980 | * HP Pavilion Gaming Laptop 15-dk1xxx |
1981 | * https://github.com/systemd/systemd/issues/28942 |
1982 | */ |
1983 | .callback = ec_honor_dsdt_gpe, |
1984 | .matches = { |
1985 | DMI_MATCH(DMI_SYS_VENDOR, "HP"), |
1986 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-dk1xxx"), |
1987 | }, |
1988 | }, |
1989 | { |
1990 | /* |
1991 | * HP 250 G7 Notebook PC |
1992 | */ |
1993 | .callback = ec_honor_dsdt_gpe, |
1994 | .matches = { |
1995 | DMI_MATCH(DMI_SYS_VENDOR, "HP"), |
1996 | DMI_MATCH(DMI_PRODUCT_NAME, "HP 250 G7 Notebook PC"), |
1997 | }, |
1998 | }, |
1999 | { |
2000 | /* |
2001 | * Samsung hardware |
2002 | * https://bugzilla.kernel.org/show_bug.cgi?id=44161 |
2003 | */ |
2004 | .callback = ec_clear_on_resume, |
2005 | .matches = { |
2006 | DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), |
2007 | }, |
2008 | }, |
2009 | {} |
2010 | }; |
2011 | |
2012 | void __init acpi_ec_ecdt_probe(void) |
2013 | { |
2014 | struct acpi_table_ecdt *ecdt_ptr; |
2015 | struct acpi_ec *ec; |
2016 | acpi_status status; |
2017 | int ret; |
2018 | |
2019 | /* Generate a boot ec context. */ |
2020 | dmi_check_system(list: ec_dmi_table); |
2021 | status = acpi_get_table(ACPI_SIG_ECDT, instance: 1, |
2022 | out_table: (struct acpi_table_header **)&ecdt_ptr); |
2023 | if (ACPI_FAILURE(status)) |
2024 | return; |
2025 | |
2026 | if (!ecdt_ptr->control.address || !ecdt_ptr->data.address) { |
2027 | /* |
2028 | * Asus X50GL: |
2029 | * https://bugzilla.kernel.org/show_bug.cgi?id=11880 |
2030 | */ |
2031 | goto out; |
2032 | } |
2033 | |
2034 | ec = acpi_ec_alloc(); |
2035 | if (!ec) |
2036 | goto out; |
2037 | |
2038 | if (EC_FLAGS_CORRECT_ECDT) { |
2039 | ec->command_addr = ecdt_ptr->data.address; |
2040 | ec->data_addr = ecdt_ptr->control.address; |
2041 | } else { |
2042 | ec->command_addr = ecdt_ptr->control.address; |
2043 | ec->data_addr = ecdt_ptr->data.address; |
2044 | } |
2045 | |
2046 | /* |
2047 | * Ignore the GPE value on Reduced Hardware platforms. |
2048 | * Some products have this set to an erroneous value. |
2049 | */ |
2050 | if (!acpi_gbl_reduced_hardware) |
2051 | ec->gpe = ecdt_ptr->gpe; |
2052 | |
2053 | ec->handle = ACPI_ROOT_OBJECT; |
2054 | |
2055 | /* |
2056 | * At this point, the namespace is not initialized, so do not find |
2057 | * the namespace objects, or handle the events. |
2058 | */ |
2059 | ret = acpi_ec_setup(ec, NULL, call_reg: false); |
2060 | if (ret) { |
2061 | acpi_ec_free(ec); |
2062 | goto out; |
2063 | } |
2064 | |
2065 | boot_ec = ec; |
2066 | boot_ec_is_ecdt = true; |
2067 | |
2068 | pr_info("Boot ECDT EC used to handle transactions\n"); |
2069 | |
2070 | out: |
2071 | acpi_put_table(table: (struct acpi_table_header *)ecdt_ptr); |
2072 | } |
2073 | |
2074 | #ifdef CONFIG_PM_SLEEP |
2075 | static int acpi_ec_suspend(struct device *dev) |
2076 | { |
2077 | struct acpi_ec *ec = |
2078 | acpi_driver_data(to_acpi_device(dev)); |
2079 | |
2080 | if (!pm_suspend_no_platform() && ec_freeze_events) |
2081 | acpi_ec_disable_event(ec); |
2082 | return 0; |
2083 | } |
2084 | |
2085 | static int acpi_ec_suspend_noirq(struct device *dev) |
2086 | { |
2087 | struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev)); |
2088 | |
2089 | /* |
2090 | * The SCI handler doesn't run at this point, so the GPE can be |
2091 | * masked at the low level without side effects. |
2092 | */ |
2093 | if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) && |
2094 | ec->gpe >= 0 && ec->reference_count >= 1) |
2095 | acpi_set_gpe(NULL, gpe_number: ec->gpe, ACPI_GPE_DISABLE); |
2096 | |
2097 | acpi_ec_enter_noirq(ec); |
2098 | |
2099 | return 0; |
2100 | } |
2101 | |
2102 | static int acpi_ec_resume_noirq(struct device *dev) |
2103 | { |
2104 | struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev)); |
2105 | |
2106 | acpi_ec_leave_noirq(ec); |
2107 | |
2108 | if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) && |
2109 | ec->gpe >= 0 && ec->reference_count >= 1) |
2110 | acpi_set_gpe(NULL, gpe_number: ec->gpe, ACPI_GPE_ENABLE); |
2111 | |
2112 | return 0; |
2113 | } |
2114 | |
2115 | static int acpi_ec_resume(struct device *dev) |
2116 | { |
2117 | struct acpi_ec *ec = |
2118 | acpi_driver_data(to_acpi_device(dev)); |
2119 | |
2120 | acpi_ec_enable_event(ec); |
2121 | return 0; |
2122 | } |
2123 | |
2124 | void acpi_ec_mark_gpe_for_wake(void) |
2125 | { |
2126 | if (first_ec && !ec_no_wakeup) |
2127 | acpi_mark_gpe_for_wake(NULL, gpe_number: first_ec->gpe); |
2128 | } |
2129 | EXPORT_SYMBOL_GPL(acpi_ec_mark_gpe_for_wake); |
2130 | |
2131 | void acpi_ec_set_gpe_wake_mask(u8 action) |
2132 | { |
2133 | if (pm_suspend_no_platform() && first_ec && !ec_no_wakeup) |
2134 | acpi_set_gpe_wake_mask(NULL, gpe_number: first_ec->gpe, action); |
2135 | } |
2136 | |
2137 | static bool acpi_ec_work_in_progress(struct acpi_ec *ec) |
2138 | { |
2139 | return ec->events_in_progress + ec->queries_in_progress > 0; |
2140 | } |
2141 | |
2142 | bool acpi_ec_dispatch_gpe(void) |
2143 | { |
2144 | bool work_in_progress = false; |
2145 | |
2146 | if (!first_ec) |
2147 | return acpi_any_gpe_status_set(U32_MAX); |
2148 | |
2149 | /* |
2150 | * Report wakeup if the status bit is set for any enabled GPE other |
2151 | * than the EC one. |
2152 | */ |
2153 | if (acpi_any_gpe_status_set(gpe_skip_number: first_ec->gpe)) |
2154 | return true; |
2155 | |
2156 | /* |
2157 | * Cancel the SCI wakeup and process all pending events in case there |
2158 | * are any wakeup ones in there. |
2159 | * |
2160 | * Note that if any non-EC GPEs are active at this point, the SCI will |
2161 | * retrigger after the rearming in acpi_s2idle_wake(), so no events |
2162 | * should be missed by canceling the wakeup here. |
2163 | */ |
2164 | pm_system_cancel_wakeup(); |
2165 | |
2166 | /* |
2167 | * Dispatch the EC GPE in-band, but do not report wakeup in any case |
2168 | * to allow the caller to process events properly after that. |
2169 | */ |
2170 | spin_lock_irq(lock: &first_ec->lock); |
2171 | |
2172 | if (acpi_ec_gpe_status_set(ec: first_ec)) { |
2173 | pm_pr_dbg("ACPI EC GPE status set\n"); |
2174 | |
2175 | clear_gpe_and_advance_transaction(ec: first_ec, interrupt: false); |
2176 | work_in_progress = acpi_ec_work_in_progress(ec: first_ec); |
2177 | } |
2178 | |
2179 | spin_unlock_irq(lock: &first_ec->lock); |
2180 | |
2181 | if (!work_in_progress) |
2182 | return false; |
2183 | |
2184 | pm_pr_dbg("ACPI EC GPE dispatched\n"); |
2185 | |
2186 | /* Drain EC work. */ |
2187 | do { |
2188 | acpi_ec_flush_work(); |
2189 | |
2190 | pm_pr_dbg("ACPI EC work flushed\n"); |
2191 | |
2192 | spin_lock_irq(lock: &first_ec->lock); |
2193 | |
2194 | work_in_progress = acpi_ec_work_in_progress(ec: first_ec); |
2195 | |
2196 | spin_unlock_irq(lock: &first_ec->lock); |
2197 | } while (work_in_progress && !pm_wakeup_pending()); |
2198 | |
2199 | return false; |
2200 | } |
2201 | #endif /* CONFIG_PM_SLEEP */ |
2202 | |
2203 | static const struct dev_pm_ops acpi_ec_pm = { |
2204 | SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend_noirq, acpi_ec_resume_noirq) |
2205 | SET_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend, acpi_ec_resume) |
2206 | }; |
2207 | |
2208 | static int param_set_event_clearing(const char *val, |
2209 | const struct kernel_param *kp) |
2210 | { |
2211 | int result = 0; |
2212 | |
2213 | if (!strncmp(val, "status", sizeof( "status") - 1)) { |
2214 | ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS; |
2215 | pr_info("Assuming SCI_EVT clearing on EC_SC accesses\n"); |
2216 | } else if (!strncmp(val, "query", sizeof( "query") - 1)) { |
2217 | ec_event_clearing = ACPI_EC_EVT_TIMING_QUERY; |
2218 | pr_info("Assuming SCI_EVT clearing on QR_EC writes\n"); |
2219 | } else if (!strncmp(val, "event", sizeof( "event") - 1)) { |
2220 | ec_event_clearing = ACPI_EC_EVT_TIMING_EVENT; |
2221 | pr_info("Assuming SCI_EVT clearing on event reads\n"); |
2222 | } else |
2223 | result = -EINVAL; |
2224 | return result; |
2225 | } |
2226 | |
2227 | static int param_get_event_clearing(char *buffer, |
2228 | const struct kernel_param *kp) |
2229 | { |
2230 | switch (ec_event_clearing) { |
2231 | case ACPI_EC_EVT_TIMING_STATUS: |
2232 | return sprintf(buf: buffer, fmt: "status\n"); |
2233 | case ACPI_EC_EVT_TIMING_QUERY: |
2234 | return sprintf(buf: buffer, fmt: "query\n"); |
2235 | case ACPI_EC_EVT_TIMING_EVENT: |
2236 | return sprintf(buf: buffer, fmt: "event\n"); |
2237 | default: |
2238 | return sprintf(buf: buffer, fmt: "invalid\n"); |
2239 | } |
2240 | return 0; |
2241 | } |
2242 | |
2243 | module_param_call(ec_event_clearing, param_set_event_clearing, param_get_event_clearing, |
2244 | NULL, 0644); |
2245 | MODULE_PARM_DESC(ec_event_clearing, "Assumed SCI_EVT clearing timing"); |
2246 | |
2247 | static struct acpi_driver acpi_ec_driver = { |
2248 | .name = "ec", |
2249 | .class = ACPI_EC_CLASS, |
2250 | .ids = ec_device_ids, |
2251 | .ops = { |
2252 | .add = acpi_ec_add, |
2253 | .remove = acpi_ec_remove, |
2254 | }, |
2255 | .drv.pm = &acpi_ec_pm, |
2256 | }; |
2257 | |
2258 | static void acpi_ec_destroy_workqueues(void) |
2259 | { |
2260 | if (ec_wq) { |
2261 | destroy_workqueue(wq: ec_wq); |
2262 | ec_wq = NULL; |
2263 | } |
2264 | if (ec_query_wq) { |
2265 | destroy_workqueue(wq: ec_query_wq); |
2266 | ec_query_wq = NULL; |
2267 | } |
2268 | } |
2269 | |
2270 | static int acpi_ec_init_workqueues(void) |
2271 | { |
2272 | if (!ec_wq) |
2273 | ec_wq = alloc_ordered_workqueue("kec", 0); |
2274 | |
2275 | if (!ec_query_wq) |
2276 | ec_query_wq = alloc_workqueue(fmt: "kec_query", flags: 0, max_active: ec_max_queries); |
2277 | |
2278 | if (!ec_wq || !ec_query_wq) { |
2279 | acpi_ec_destroy_workqueues(); |
2280 | return -ENODEV; |
2281 | } |
2282 | return 0; |
2283 | } |
2284 | |
2285 | static const struct dmi_system_id acpi_ec_no_wakeup[] = { |
2286 | { |
2287 | .matches = { |
2288 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), |
2289 | DMI_MATCH(DMI_PRODUCT_FAMILY, "Thinkpad X1 Carbon 6th"), |
2290 | }, |
2291 | }, |
2292 | { |
2293 | .matches = { |
2294 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), |
2295 | DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Yoga 3rd"), |
2296 | }, |
2297 | }, |
2298 | { |
2299 | .matches = { |
2300 | DMI_MATCH(DMI_SYS_VENDOR, "HP"), |
2301 | DMI_MATCH(DMI_PRODUCT_FAMILY, "103C_5336AN HP ZHAN 66 Pro"), |
2302 | }, |
2303 | }, |
2304 | /* |
2305 | * Lenovo Legion Go S; touchscreen blocks HW sleep when woken up from EC |
2306 | * https://gitlab.freedesktop.org/drm/amd/-/issues/3929 |
2307 | */ |
2308 | { |
2309 | .matches = { |
2310 | DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), |
2311 | DMI_MATCH(DMI_PRODUCT_NAME, "83L3"), |
2312 | } |
2313 | }, |
2314 | { |
2315 | .matches = { |
2316 | DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), |
2317 | DMI_MATCH(DMI_PRODUCT_NAME, "83N6"), |
2318 | } |
2319 | }, |
2320 | { |
2321 | .matches = { |
2322 | DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), |
2323 | DMI_MATCH(DMI_PRODUCT_NAME, "83Q2"), |
2324 | } |
2325 | }, |
2326 | { |
2327 | .matches = { |
2328 | DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), |
2329 | DMI_MATCH(DMI_PRODUCT_NAME, "83Q3"), |
2330 | } |
2331 | }, |
2332 | { |
2333 | // TUXEDO InfinityBook Pro AMD Gen9 |
2334 | .matches = { |
2335 | DMI_MATCH(DMI_BOARD_NAME, "GXxHRXx"), |
2336 | }, |
2337 | }, |
2338 | { }, |
2339 | }; |
2340 | |
2341 | void __init acpi_ec_init(void) |
2342 | { |
2343 | int result; |
2344 | |
2345 | result = acpi_ec_init_workqueues(); |
2346 | if (result) |
2347 | return; |
2348 | |
2349 | /* |
2350 | * Disable EC wakeup on following systems to prevent periodic |
2351 | * wakeup from EC GPE. |
2352 | */ |
2353 | if (dmi_check_system(list: acpi_ec_no_wakeup)) { |
2354 | ec_no_wakeup = true; |
2355 | pr_debug("Disabling EC wakeup on suspend-to-idle\n"); |
2356 | } |
2357 | |
2358 | /* Driver must be registered after acpi_ec_init_workqueues(). */ |
2359 | acpi_bus_register_driver(&acpi_ec_driver); |
2360 | |
2361 | acpi_ec_ecdt_start(); |
2362 | } |
2363 | |
2364 | /* EC driver currently not unloadable */ |
2365 | #if 0 |
2366 | static void __exit acpi_ec_exit(void) |
2367 | { |
2368 | |
2369 | acpi_bus_unregister_driver(&acpi_ec_driver); |
2370 | acpi_ec_destroy_workqueues(); |
2371 | } |
2372 | #endif /* 0 */ |
2373 |
Definitions
- ec_command
- ec_delay
- ec_max_queries
- ec_busy_polling
- ec_polling_guard
- ec_event_clearing
- ec_storm_threshold
- ec_freeze_events
- ec_no_wakeup
- acpi_ec_query_handler
- transaction
- acpi_ec_query
- first_ec
- boot_ec
- boot_ec_is_ecdt
- ec_wq
- ec_query_wq
- EC_FLAGS_CORRECT_ECDT
- EC_FLAGS_TRUST_DSDT_GPE
- EC_FLAGS_CLEAR_ON_RESUME
- acpi_ec_started
- acpi_ec_event_enabled
- acpi_ec_flushed
- acpi_ec_read_status
- acpi_ec_read_data
- acpi_ec_write_cmd
- acpi_ec_write_data
- acpi_ec_cmd_string
- acpi_ec_gpe_status_set
- acpi_ec_enable_gpe
- acpi_ec_disable_gpe
- acpi_ec_submit_request
- acpi_ec_complete_request
- acpi_ec_mask_events
- acpi_ec_unmask_events
- acpi_ec_submit_flushable_request
- acpi_ec_submit_event
- acpi_ec_complete_event
- acpi_ec_close_event
- __acpi_ec_enable_event
- __acpi_ec_disable_event
- acpi_ec_clear
- acpi_ec_enable_event
- __acpi_ec_flush_work
- acpi_ec_disable_event
- acpi_ec_flush_work
- acpi_ec_guard_event
- ec_transaction_polled
- ec_transaction_completed
- ec_transaction_transition
- acpi_ec_spurious_interrupt
- advance_transaction
- start_transaction
- ec_guard
- ec_poll
- acpi_ec_transaction_unlocked
- acpi_ec_transaction
- acpi_ec_burst_enable
- acpi_ec_burst_disable
- acpi_ec_read
- acpi_ec_read_unlocked
- acpi_ec_write
- acpi_ec_write_unlocked
- ec_read
- ec_write
- ec_transaction
- ec_get_handle
- acpi_ec_start
- acpi_ec_stopped
- acpi_ec_stop
- acpi_ec_enter_noirq
- acpi_ec_leave_noirq
- acpi_ec_block_transactions
- acpi_ec_unblock_transactions
- acpi_ec_get_query_handler_by_value
- acpi_ec_query_handler_release
- acpi_ec_put_query_handler
- acpi_ec_add_query_handler
- acpi_ec_remove_query_handlers
- acpi_ec_remove_query_handler
- acpi_ec_event_processor
- acpi_ec_create_query
- acpi_ec_submit_query
- acpi_ec_event_handler
- clear_gpe_and_advance_transaction
- acpi_ec_handle_interrupt
- acpi_ec_gpe_handler
- acpi_ec_irq_handler
- acpi_ec_space_handler
- acpi_ec_free
- acpi_ec_alloc
- acpi_ec_register_query_methods
- ec_parse_device
- install_gpe_event_handler
- install_gpio_irq_event_handler
- ec_install_handlers
- ec_remove_handlers
- acpi_ec_setup
- acpi_ec_add
- acpi_ec_remove
- acpi_ec_register_opregions
- ec_parse_io_ports
- ec_device_ids
- acpi_ec_dsdt_probe
- acpi_ec_ecdt_start
- ec_clear_on_resume
- ec_correct_ecdt
- ec_honor_dsdt_gpe
- ec_dmi_table
- acpi_ec_ecdt_probe
- acpi_ec_suspend
- acpi_ec_suspend_noirq
- acpi_ec_resume_noirq
- acpi_ec_resume
- acpi_ec_mark_gpe_for_wake
- acpi_ec_set_gpe_wake_mask
- acpi_ec_work_in_progress
- acpi_ec_dispatch_gpe
- acpi_ec_pm
- param_set_event_clearing
- param_get_event_clearing
- acpi_ec_driver
- acpi_ec_destroy_workqueues
- acpi_ec_init_workqueues
- acpi_ec_no_wakeup
Improve your Profiling and Debugging skills
Find out more