1 | // SPDX-License-Identifier: GPL-2.0-only |
---|---|
2 | /* |
3 | * ACPI AML interfacing support |
4 | * |
5 | * Copyright (C) 2015, Intel Corporation |
6 | * Authors: Lv Zheng <lv.zheng@intel.com> |
7 | */ |
8 | |
9 | /* #define DEBUG */ |
10 | #define pr_fmt(fmt) "ACPI: AML: " fmt |
11 | |
12 | #include <linux/kernel.h> |
13 | #include <linux/module.h> |
14 | #include <linux/wait.h> |
15 | #include <linux/poll.h> |
16 | #include <linux/sched.h> |
17 | #include <linux/kthread.h> |
18 | #include <linux/proc_fs.h> |
19 | #include <linux/debugfs.h> |
20 | #include <linux/circ_buf.h> |
21 | #include <linux/acpi.h> |
22 | #include "internal.h" |
23 | |
24 | #define ACPI_AML_BUF_ALIGN (sizeof (acpi_size)) |
25 | #define ACPI_AML_BUF_SIZE PAGE_SIZE |
26 | |
27 | #define circ_count(circ) \ |
28 | (CIRC_CNT((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE)) |
29 | #define circ_count_to_end(circ) \ |
30 | (CIRC_CNT_TO_END((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE)) |
31 | #define circ_space(circ) \ |
32 | (CIRC_SPACE((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE)) |
33 | #define circ_space_to_end(circ) \ |
34 | (CIRC_SPACE_TO_END((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE)) |
35 | |
36 | #define ACPI_AML_OPENED 0x0001 |
37 | #define ACPI_AML_CLOSED 0x0002 |
38 | #define ACPI_AML_IN_USER 0x0004 /* user space is writing cmd */ |
39 | #define ACPI_AML_IN_KERN 0x0008 /* kernel space is reading cmd */ |
40 | #define ACPI_AML_OUT_USER 0x0010 /* user space is reading log */ |
41 | #define ACPI_AML_OUT_KERN 0x0020 /* kernel space is writing log */ |
42 | #define ACPI_AML_USER (ACPI_AML_IN_USER | ACPI_AML_OUT_USER) |
43 | #define ACPI_AML_KERN (ACPI_AML_IN_KERN | ACPI_AML_OUT_KERN) |
44 | #define ACPI_AML_BUSY (ACPI_AML_USER | ACPI_AML_KERN) |
45 | #define ACPI_AML_OPEN (ACPI_AML_OPENED | ACPI_AML_CLOSED) |
46 | |
47 | struct acpi_aml_io { |
48 | wait_queue_head_t wait; |
49 | unsigned long flags; |
50 | unsigned long users; |
51 | struct mutex lock; |
52 | struct task_struct *thread; |
53 | char out_buf[ACPI_AML_BUF_SIZE] __aligned(ACPI_AML_BUF_ALIGN); |
54 | struct circ_buf out_crc; |
55 | char in_buf[ACPI_AML_BUF_SIZE] __aligned(ACPI_AML_BUF_ALIGN); |
56 | struct circ_buf in_crc; |
57 | acpi_osd_exec_callback function; |
58 | void *context; |
59 | unsigned long usages; |
60 | }; |
61 | |
62 | static struct acpi_aml_io acpi_aml_io; |
63 | static bool acpi_aml_initialized; |
64 | static struct file *acpi_aml_active_reader; |
65 | static struct dentry *acpi_aml_dentry; |
66 | |
67 | static inline bool __acpi_aml_running(void) |
68 | { |
69 | return acpi_aml_io.thread ? true : false; |
70 | } |
71 | |
72 | static inline bool __acpi_aml_access_ok(unsigned long flag) |
73 | { |
74 | /* |
75 | * The debugger interface is in opened state (OPENED && !CLOSED), |
76 | * then it is allowed to access the debugger buffers from either |
77 | * user space or the kernel space. |
78 | * In addition, for the kernel space, only the debugger thread |
79 | * (thread ID matched) is allowed to access. |
80 | */ |
81 | if (!(acpi_aml_io.flags & ACPI_AML_OPENED) || |
82 | (acpi_aml_io.flags & ACPI_AML_CLOSED) || |
83 | !__acpi_aml_running()) |
84 | return false; |
85 | if ((flag & ACPI_AML_KERN) && |
86 | current != acpi_aml_io.thread) |
87 | return false; |
88 | return true; |
89 | } |
90 | |
91 | static inline bool __acpi_aml_readable(struct circ_buf *circ, unsigned long flag) |
92 | { |
93 | /* |
94 | * Another read is not in progress and there is data in buffer |
95 | * available for read. |
96 | */ |
97 | if (!(acpi_aml_io.flags & flag) && circ_count(circ)) |
98 | return true; |
99 | return false; |
100 | } |
101 | |
102 | static inline bool __acpi_aml_writable(struct circ_buf *circ, unsigned long flag) |
103 | { |
104 | /* |
105 | * Another write is not in progress and there is buffer space |
106 | * available for write. |
107 | */ |
108 | if (!(acpi_aml_io.flags & flag) && circ_space(circ)) |
109 | return true; |
110 | return false; |
111 | } |
112 | |
113 | static inline bool __acpi_aml_busy(void) |
114 | { |
115 | if (acpi_aml_io.flags & ACPI_AML_BUSY) |
116 | return true; |
117 | return false; |
118 | } |
119 | |
120 | static inline bool __acpi_aml_used(void) |
121 | { |
122 | return acpi_aml_io.usages ? true : false; |
123 | } |
124 | |
125 | static inline bool acpi_aml_running(void) |
126 | { |
127 | bool ret; |
128 | |
129 | mutex_lock(&acpi_aml_io.lock); |
130 | ret = __acpi_aml_running(); |
131 | mutex_unlock(lock: &acpi_aml_io.lock); |
132 | return ret; |
133 | } |
134 | |
135 | static bool acpi_aml_busy(void) |
136 | { |
137 | bool ret; |
138 | |
139 | mutex_lock(&acpi_aml_io.lock); |
140 | ret = __acpi_aml_busy(); |
141 | mutex_unlock(lock: &acpi_aml_io.lock); |
142 | return ret; |
143 | } |
144 | |
145 | static bool acpi_aml_used(void) |
146 | { |
147 | bool ret; |
148 | |
149 | /* |
150 | * The usage count is prepared to avoid race conditions between the |
151 | * starts and the stops of the debugger thread. |
152 | */ |
153 | mutex_lock(&acpi_aml_io.lock); |
154 | ret = __acpi_aml_used(); |
155 | mutex_unlock(lock: &acpi_aml_io.lock); |
156 | return ret; |
157 | } |
158 | |
159 | static bool acpi_aml_kern_readable(void) |
160 | { |
161 | bool ret; |
162 | |
163 | mutex_lock(&acpi_aml_io.lock); |
164 | ret = !__acpi_aml_access_ok(ACPI_AML_IN_KERN) || |
165 | __acpi_aml_readable(circ: &acpi_aml_io.in_crc, ACPI_AML_IN_KERN); |
166 | mutex_unlock(lock: &acpi_aml_io.lock); |
167 | return ret; |
168 | } |
169 | |
170 | static bool acpi_aml_kern_writable(void) |
171 | { |
172 | bool ret; |
173 | |
174 | mutex_lock(&acpi_aml_io.lock); |
175 | ret = !__acpi_aml_access_ok(ACPI_AML_OUT_KERN) || |
176 | __acpi_aml_writable(circ: &acpi_aml_io.out_crc, ACPI_AML_OUT_KERN); |
177 | mutex_unlock(lock: &acpi_aml_io.lock); |
178 | return ret; |
179 | } |
180 | |
181 | static bool acpi_aml_user_readable(void) |
182 | { |
183 | bool ret; |
184 | |
185 | mutex_lock(&acpi_aml_io.lock); |
186 | ret = !__acpi_aml_access_ok(ACPI_AML_OUT_USER) || |
187 | __acpi_aml_readable(circ: &acpi_aml_io.out_crc, ACPI_AML_OUT_USER); |
188 | mutex_unlock(lock: &acpi_aml_io.lock); |
189 | return ret; |
190 | } |
191 | |
192 | static bool acpi_aml_user_writable(void) |
193 | { |
194 | bool ret; |
195 | |
196 | mutex_lock(&acpi_aml_io.lock); |
197 | ret = !__acpi_aml_access_ok(ACPI_AML_IN_USER) || |
198 | __acpi_aml_writable(circ: &acpi_aml_io.in_crc, ACPI_AML_IN_USER); |
199 | mutex_unlock(lock: &acpi_aml_io.lock); |
200 | return ret; |
201 | } |
202 | |
203 | static int acpi_aml_lock_write(struct circ_buf *circ, unsigned long flag) |
204 | { |
205 | int ret = 0; |
206 | |
207 | mutex_lock(&acpi_aml_io.lock); |
208 | if (!__acpi_aml_access_ok(flag)) { |
209 | ret = -EFAULT; |
210 | goto out; |
211 | } |
212 | if (!__acpi_aml_writable(circ, flag)) { |
213 | ret = -EAGAIN; |
214 | goto out; |
215 | } |
216 | acpi_aml_io.flags |= flag; |
217 | out: |
218 | mutex_unlock(lock: &acpi_aml_io.lock); |
219 | return ret; |
220 | } |
221 | |
222 | static int acpi_aml_lock_read(struct circ_buf *circ, unsigned long flag) |
223 | { |
224 | int ret = 0; |
225 | |
226 | mutex_lock(&acpi_aml_io.lock); |
227 | if (!__acpi_aml_access_ok(flag)) { |
228 | ret = -EFAULT; |
229 | goto out; |
230 | } |
231 | if (!__acpi_aml_readable(circ, flag)) { |
232 | ret = -EAGAIN; |
233 | goto out; |
234 | } |
235 | acpi_aml_io.flags |= flag; |
236 | out: |
237 | mutex_unlock(lock: &acpi_aml_io.lock); |
238 | return ret; |
239 | } |
240 | |
241 | static void acpi_aml_unlock_fifo(unsigned long flag, bool wakeup) |
242 | { |
243 | mutex_lock(&acpi_aml_io.lock); |
244 | acpi_aml_io.flags &= ~flag; |
245 | if (wakeup) |
246 | wake_up_interruptible(&acpi_aml_io.wait); |
247 | mutex_unlock(lock: &acpi_aml_io.lock); |
248 | } |
249 | |
250 | static int acpi_aml_write_kern(const char *buf, int len) |
251 | { |
252 | int ret; |
253 | struct circ_buf *crc = &acpi_aml_io.out_crc; |
254 | int n; |
255 | char *p; |
256 | |
257 | ret = acpi_aml_lock_write(circ: crc, ACPI_AML_OUT_KERN); |
258 | if (ret < 0) |
259 | return ret; |
260 | /* sync tail before inserting logs */ |
261 | smp_mb(); |
262 | p = &crc->buf[crc->head]; |
263 | n = min(len, circ_space_to_end(crc)); |
264 | memcpy(p, buf, n); |
265 | /* sync head after inserting logs */ |
266 | smp_wmb(); |
267 | crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1); |
268 | acpi_aml_unlock_fifo(ACPI_AML_OUT_KERN, wakeup: true); |
269 | return n; |
270 | } |
271 | |
272 | static int acpi_aml_readb_kern(void) |
273 | { |
274 | int ret; |
275 | struct circ_buf *crc = &acpi_aml_io.in_crc; |
276 | char *p; |
277 | |
278 | ret = acpi_aml_lock_read(circ: crc, ACPI_AML_IN_KERN); |
279 | if (ret < 0) |
280 | return ret; |
281 | /* sync head before removing cmds */ |
282 | smp_rmb(); |
283 | p = &crc->buf[crc->tail]; |
284 | ret = (int)*p; |
285 | /* sync tail before inserting cmds */ |
286 | smp_mb(); |
287 | crc->tail = (crc->tail + 1) & (ACPI_AML_BUF_SIZE - 1); |
288 | acpi_aml_unlock_fifo(ACPI_AML_IN_KERN, wakeup: true); |
289 | return ret; |
290 | } |
291 | |
292 | /* |
293 | * acpi_aml_write_log() - Capture debugger output |
294 | * @msg: the debugger output |
295 | * |
296 | * This function should be used to implement acpi_os_printf() to filter out |
297 | * the debugger output and store the output into the debugger interface |
298 | * buffer. Return the size of stored logs or errno. |
299 | */ |
300 | static ssize_t acpi_aml_write_log(const char *msg) |
301 | { |
302 | int ret = 0; |
303 | int count = 0, size = 0; |
304 | |
305 | if (!acpi_aml_initialized) |
306 | return -ENODEV; |
307 | if (msg) |
308 | count = strlen(msg); |
309 | while (count > 0) { |
310 | again: |
311 | ret = acpi_aml_write_kern(buf: msg + size, len: count); |
312 | if (ret == -EAGAIN) { |
313 | ret = wait_event_interruptible(acpi_aml_io.wait, |
314 | acpi_aml_kern_writable()); |
315 | /* |
316 | * We need to retry when the condition |
317 | * becomes true. |
318 | */ |
319 | if (ret == 0) |
320 | goto again; |
321 | break; |
322 | } |
323 | if (ret < 0) |
324 | break; |
325 | size += ret; |
326 | count -= ret; |
327 | } |
328 | return size > 0 ? size : ret; |
329 | } |
330 | |
331 | /* |
332 | * acpi_aml_read_cmd() - Capture debugger input |
333 | * @msg: the debugger input |
334 | * @size: the size of the debugger input |
335 | * |
336 | * This function should be used to implement acpi_os_get_line() to capture |
337 | * the debugger input commands and store the input commands into the |
338 | * debugger interface buffer. Return the size of stored commands or errno. |
339 | */ |
340 | static ssize_t acpi_aml_read_cmd(char *msg, size_t count) |
341 | { |
342 | int ret = 0; |
343 | int size = 0; |
344 | |
345 | /* |
346 | * This is ensured by the running fact of the debugger thread |
347 | * unless a bug is introduced. |
348 | */ |
349 | BUG_ON(!acpi_aml_initialized); |
350 | while (count > 0) { |
351 | again: |
352 | /* |
353 | * Check each input byte to find the end of the command. |
354 | */ |
355 | ret = acpi_aml_readb_kern(); |
356 | if (ret == -EAGAIN) { |
357 | ret = wait_event_interruptible(acpi_aml_io.wait, |
358 | acpi_aml_kern_readable()); |
359 | /* |
360 | * We need to retry when the condition becomes |
361 | * true. |
362 | */ |
363 | if (ret == 0) |
364 | goto again; |
365 | } |
366 | if (ret < 0) |
367 | break; |
368 | *(msg + size) = (char)ret; |
369 | size++; |
370 | count--; |
371 | if (ret == '\n') { |
372 | /* |
373 | * acpi_os_get_line() requires a zero terminated command |
374 | * string. |
375 | */ |
376 | *(msg + size - 1) = '\0'; |
377 | break; |
378 | } |
379 | } |
380 | return size > 0 ? size : ret; |
381 | } |
382 | |
383 | static int acpi_aml_thread(void *unused) |
384 | { |
385 | acpi_osd_exec_callback function = NULL; |
386 | void *context; |
387 | |
388 | mutex_lock(&acpi_aml_io.lock); |
389 | if (acpi_aml_io.function) { |
390 | acpi_aml_io.usages++; |
391 | function = acpi_aml_io.function; |
392 | context = acpi_aml_io.context; |
393 | } |
394 | mutex_unlock(lock: &acpi_aml_io.lock); |
395 | |
396 | if (function) |
397 | function(context); |
398 | |
399 | mutex_lock(&acpi_aml_io.lock); |
400 | acpi_aml_io.usages--; |
401 | if (!__acpi_aml_used()) { |
402 | acpi_aml_io.thread = NULL; |
403 | wake_up(&acpi_aml_io.wait); |
404 | } |
405 | mutex_unlock(lock: &acpi_aml_io.lock); |
406 | |
407 | return 0; |
408 | } |
409 | |
410 | /* |
411 | * acpi_aml_create_thread() - Create AML debugger thread |
412 | * @function: the debugger thread callback |
413 | * @context: the context to be passed to the debugger thread |
414 | * |
415 | * This function should be used to implement acpi_os_execute() which is |
416 | * used by the ACPICA debugger to create the debugger thread. |
417 | */ |
418 | static int acpi_aml_create_thread(acpi_osd_exec_callback function, void *context) |
419 | { |
420 | struct task_struct *t; |
421 | |
422 | mutex_lock(&acpi_aml_io.lock); |
423 | acpi_aml_io.function = function; |
424 | acpi_aml_io.context = context; |
425 | mutex_unlock(lock: &acpi_aml_io.lock); |
426 | |
427 | t = kthread_create(acpi_aml_thread, NULL, "aml"); |
428 | if (IS_ERR(ptr: t)) { |
429 | pr_err("Failed to create AML debugger thread.\n"); |
430 | return PTR_ERR(ptr: t); |
431 | } |
432 | |
433 | mutex_lock(&acpi_aml_io.lock); |
434 | acpi_aml_io.thread = t; |
435 | acpi_set_debugger_thread_id(thread_id: (acpi_thread_id)(unsigned long)t); |
436 | wake_up_process(tsk: t); |
437 | mutex_unlock(lock: &acpi_aml_io.lock); |
438 | return 0; |
439 | } |
440 | |
441 | static int acpi_aml_wait_command_ready(bool single_step, |
442 | char *buffer, size_t length) |
443 | { |
444 | acpi_status status; |
445 | |
446 | if (single_step) |
447 | acpi_os_printf(format: "\n%1c ", ACPI_DEBUGGER_EXECUTE_PROMPT); |
448 | else |
449 | acpi_os_printf(format: "\n%1c ", ACPI_DEBUGGER_COMMAND_PROMPT); |
450 | |
451 | status = acpi_os_get_line(buffer, buffer_length: length, NULL); |
452 | if (ACPI_FAILURE(status)) |
453 | return -EINVAL; |
454 | return 0; |
455 | } |
456 | |
457 | static int acpi_aml_notify_command_complete(void) |
458 | { |
459 | return 0; |
460 | } |
461 | |
462 | static int acpi_aml_open(struct inode *inode, struct file *file) |
463 | { |
464 | int ret = 0; |
465 | acpi_status status; |
466 | |
467 | mutex_lock(&acpi_aml_io.lock); |
468 | /* |
469 | * The debugger interface is being closed, no new user is allowed |
470 | * during this period. |
471 | */ |
472 | if (acpi_aml_io.flags & ACPI_AML_CLOSED) { |
473 | ret = -EBUSY; |
474 | goto err_lock; |
475 | } |
476 | if ((file->f_flags & O_ACCMODE) != O_WRONLY) { |
477 | /* |
478 | * Only one reader is allowed to initiate the debugger |
479 | * thread. |
480 | */ |
481 | if (acpi_aml_active_reader) { |
482 | ret = -EBUSY; |
483 | goto err_lock; |
484 | } else { |
485 | pr_debug("Opening debugger reader.\n"); |
486 | acpi_aml_active_reader = file; |
487 | } |
488 | } else { |
489 | /* |
490 | * No writer is allowed unless the debugger thread is |
491 | * ready. |
492 | */ |
493 | if (!(acpi_aml_io.flags & ACPI_AML_OPENED)) { |
494 | ret = -ENODEV; |
495 | goto err_lock; |
496 | } |
497 | } |
498 | if (acpi_aml_active_reader == file) { |
499 | pr_debug("Opening debugger interface.\n"); |
500 | mutex_unlock(lock: &acpi_aml_io.lock); |
501 | |
502 | pr_debug("Initializing debugger thread.\n"); |
503 | status = acpi_initialize_debugger(); |
504 | if (ACPI_FAILURE(status)) { |
505 | pr_err("Failed to initialize debugger.\n"); |
506 | ret = -EINVAL; |
507 | goto err_exit; |
508 | } |
509 | pr_debug("Debugger thread initialized.\n"); |
510 | |
511 | mutex_lock(&acpi_aml_io.lock); |
512 | acpi_aml_io.flags |= ACPI_AML_OPENED; |
513 | acpi_aml_io.out_crc.head = acpi_aml_io.out_crc.tail = 0; |
514 | acpi_aml_io.in_crc.head = acpi_aml_io.in_crc.tail = 0; |
515 | pr_debug("Debugger interface opened.\n"); |
516 | } |
517 | acpi_aml_io.users++; |
518 | err_lock: |
519 | if (ret < 0) { |
520 | if (acpi_aml_active_reader == file) |
521 | acpi_aml_active_reader = NULL; |
522 | } |
523 | mutex_unlock(lock: &acpi_aml_io.lock); |
524 | err_exit: |
525 | return ret; |
526 | } |
527 | |
528 | static int acpi_aml_release(struct inode *inode, struct file *file) |
529 | { |
530 | mutex_lock(&acpi_aml_io.lock); |
531 | acpi_aml_io.users--; |
532 | if (file == acpi_aml_active_reader) { |
533 | pr_debug("Closing debugger reader.\n"); |
534 | acpi_aml_active_reader = NULL; |
535 | |
536 | pr_debug("Closing debugger interface.\n"); |
537 | acpi_aml_io.flags |= ACPI_AML_CLOSED; |
538 | |
539 | /* |
540 | * Wake up all user space/kernel space blocked |
541 | * readers/writers. |
542 | */ |
543 | wake_up_interruptible(&acpi_aml_io.wait); |
544 | mutex_unlock(lock: &acpi_aml_io.lock); |
545 | /* |
546 | * Wait all user space/kernel space readers/writers to |
547 | * stop so that ACPICA command loop of the debugger thread |
548 | * should fail all its command line reads after this point. |
549 | */ |
550 | wait_event(acpi_aml_io.wait, !acpi_aml_busy()); |
551 | |
552 | /* |
553 | * Then we try to terminate the debugger thread if it is |
554 | * not terminated. |
555 | */ |
556 | pr_debug("Terminating debugger thread.\n"); |
557 | acpi_terminate_debugger(); |
558 | wait_event(acpi_aml_io.wait, !acpi_aml_used()); |
559 | pr_debug("Debugger thread terminated.\n"); |
560 | |
561 | mutex_lock(&acpi_aml_io.lock); |
562 | acpi_aml_io.flags &= ~ACPI_AML_OPENED; |
563 | } |
564 | if (acpi_aml_io.users == 0) { |
565 | pr_debug("Debugger interface closed.\n"); |
566 | acpi_aml_io.flags &= ~ACPI_AML_CLOSED; |
567 | } |
568 | mutex_unlock(lock: &acpi_aml_io.lock); |
569 | return 0; |
570 | } |
571 | |
572 | static int acpi_aml_read_user(char __user *buf, int len) |
573 | { |
574 | int ret; |
575 | struct circ_buf *crc = &acpi_aml_io.out_crc; |
576 | int n; |
577 | char *p; |
578 | |
579 | ret = acpi_aml_lock_read(circ: crc, ACPI_AML_OUT_USER); |
580 | if (ret < 0) |
581 | return ret; |
582 | /* sync head before removing logs */ |
583 | smp_rmb(); |
584 | p = &crc->buf[crc->tail]; |
585 | n = min(len, circ_count_to_end(crc)); |
586 | if (copy_to_user(to: buf, from: p, n)) { |
587 | ret = -EFAULT; |
588 | goto out; |
589 | } |
590 | /* sync tail after removing logs */ |
591 | smp_mb(); |
592 | crc->tail = (crc->tail + n) & (ACPI_AML_BUF_SIZE - 1); |
593 | ret = n; |
594 | out: |
595 | acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, wakeup: ret >= 0); |
596 | return ret; |
597 | } |
598 | |
599 | static ssize_t acpi_aml_read(struct file *file, char __user *buf, |
600 | size_t count, loff_t *ppos) |
601 | { |
602 | int ret = 0; |
603 | int size = 0; |
604 | |
605 | if (!count) |
606 | return 0; |
607 | if (!access_ok(buf, count)) |
608 | return -EFAULT; |
609 | |
610 | while (count > 0) { |
611 | again: |
612 | ret = acpi_aml_read_user(buf: buf + size, len: count); |
613 | if (ret == -EAGAIN) { |
614 | if (file->f_flags & O_NONBLOCK) |
615 | break; |
616 | else { |
617 | ret = wait_event_interruptible(acpi_aml_io.wait, |
618 | acpi_aml_user_readable()); |
619 | /* |
620 | * We need to retry when the condition |
621 | * becomes true. |
622 | */ |
623 | if (ret == 0) |
624 | goto again; |
625 | } |
626 | } |
627 | if (ret < 0) { |
628 | if (!acpi_aml_running()) |
629 | ret = 0; |
630 | break; |
631 | } |
632 | if (ret) { |
633 | size += ret; |
634 | count -= ret; |
635 | *ppos += ret; |
636 | break; |
637 | } |
638 | } |
639 | return size > 0 ? size : ret; |
640 | } |
641 | |
642 | static int acpi_aml_write_user(const char __user *buf, int len) |
643 | { |
644 | int ret; |
645 | struct circ_buf *crc = &acpi_aml_io.in_crc; |
646 | int n; |
647 | char *p; |
648 | |
649 | ret = acpi_aml_lock_write(circ: crc, ACPI_AML_IN_USER); |
650 | if (ret < 0) |
651 | return ret; |
652 | /* sync tail before inserting cmds */ |
653 | smp_mb(); |
654 | p = &crc->buf[crc->head]; |
655 | n = min(len, circ_space_to_end(crc)); |
656 | if (copy_from_user(to: p, from: buf, n)) { |
657 | ret = -EFAULT; |
658 | goto out; |
659 | } |
660 | /* sync head after inserting cmds */ |
661 | smp_wmb(); |
662 | crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1); |
663 | ret = n; |
664 | out: |
665 | acpi_aml_unlock_fifo(ACPI_AML_IN_USER, wakeup: ret >= 0); |
666 | return n; |
667 | } |
668 | |
669 | static ssize_t acpi_aml_write(struct file *file, const char __user *buf, |
670 | size_t count, loff_t *ppos) |
671 | { |
672 | int ret = 0; |
673 | int size = 0; |
674 | |
675 | if (!count) |
676 | return 0; |
677 | if (!access_ok(buf, count)) |
678 | return -EFAULT; |
679 | |
680 | while (count > 0) { |
681 | again: |
682 | ret = acpi_aml_write_user(buf: buf + size, len: count); |
683 | if (ret == -EAGAIN) { |
684 | if (file->f_flags & O_NONBLOCK) |
685 | break; |
686 | else { |
687 | ret = wait_event_interruptible(acpi_aml_io.wait, |
688 | acpi_aml_user_writable()); |
689 | /* |
690 | * We need to retry when the condition |
691 | * becomes true. |
692 | */ |
693 | if (ret == 0) |
694 | goto again; |
695 | } |
696 | } |
697 | if (ret < 0) { |
698 | if (!acpi_aml_running()) |
699 | ret = 0; |
700 | break; |
701 | } |
702 | if (ret) { |
703 | size += ret; |
704 | count -= ret; |
705 | *ppos += ret; |
706 | } |
707 | } |
708 | return size > 0 ? size : ret; |
709 | } |
710 | |
711 | static __poll_t acpi_aml_poll(struct file *file, poll_table *wait) |
712 | { |
713 | __poll_t masks = 0; |
714 | |
715 | poll_wait(filp: file, wait_address: &acpi_aml_io.wait, p: wait); |
716 | if (acpi_aml_user_readable()) |
717 | masks |= EPOLLIN | EPOLLRDNORM; |
718 | if (acpi_aml_user_writable()) |
719 | masks |= EPOLLOUT | EPOLLWRNORM; |
720 | |
721 | return masks; |
722 | } |
723 | |
724 | static const struct file_operations acpi_aml_operations = { |
725 | .read = acpi_aml_read, |
726 | .write = acpi_aml_write, |
727 | .poll = acpi_aml_poll, |
728 | .open = acpi_aml_open, |
729 | .release = acpi_aml_release, |
730 | .llseek = generic_file_llseek, |
731 | }; |
732 | |
733 | static const struct acpi_debugger_ops acpi_aml_debugger = { |
734 | .create_thread = acpi_aml_create_thread, |
735 | .read_cmd = acpi_aml_read_cmd, |
736 | .write_log = acpi_aml_write_log, |
737 | .wait_command_ready = acpi_aml_wait_command_ready, |
738 | .notify_command_complete = acpi_aml_notify_command_complete, |
739 | }; |
740 | |
741 | static int __init acpi_aml_init(void) |
742 | { |
743 | int ret; |
744 | |
745 | if (acpi_disabled) |
746 | return -ENODEV; |
747 | |
748 | /* Initialize AML IO interface */ |
749 | mutex_init(&acpi_aml_io.lock); |
750 | init_waitqueue_head(&acpi_aml_io.wait); |
751 | acpi_aml_io.out_crc.buf = acpi_aml_io.out_buf; |
752 | acpi_aml_io.in_crc.buf = acpi_aml_io.in_buf; |
753 | |
754 | acpi_aml_dentry = debugfs_create_file(name: "acpidbg", |
755 | S_IFREG | S_IRUGO | S_IWUSR, |
756 | parent: acpi_debugfs_dir, NULL, |
757 | fops: &acpi_aml_operations); |
758 | |
759 | ret = acpi_register_debugger(THIS_MODULE, ops: &acpi_aml_debugger); |
760 | if (ret) { |
761 | debugfs_remove(dentry: acpi_aml_dentry); |
762 | acpi_aml_dentry = NULL; |
763 | return ret; |
764 | } |
765 | |
766 | acpi_aml_initialized = true; |
767 | return 0; |
768 | } |
769 | |
770 | static void __exit acpi_aml_exit(void) |
771 | { |
772 | if (acpi_aml_initialized) { |
773 | acpi_unregister_debugger(ops: &acpi_aml_debugger); |
774 | debugfs_remove(dentry: acpi_aml_dentry); |
775 | acpi_aml_dentry = NULL; |
776 | acpi_aml_initialized = false; |
777 | } |
778 | } |
779 | |
780 | module_init(acpi_aml_init); |
781 | module_exit(acpi_aml_exit); |
782 | |
783 | MODULE_AUTHOR("Lv Zheng"); |
784 | MODULE_DESCRIPTION("ACPI debugger userspace IO driver"); |
785 | MODULE_LICENSE("GPL"); |
786 |
Definitions
- acpi_aml_io
- acpi_aml_io
- acpi_aml_initialized
- acpi_aml_active_reader
- acpi_aml_dentry
- __acpi_aml_running
- __acpi_aml_access_ok
- __acpi_aml_readable
- __acpi_aml_writable
- __acpi_aml_busy
- __acpi_aml_used
- acpi_aml_running
- acpi_aml_busy
- acpi_aml_used
- acpi_aml_kern_readable
- acpi_aml_kern_writable
- acpi_aml_user_readable
- acpi_aml_user_writable
- acpi_aml_lock_write
- acpi_aml_lock_read
- acpi_aml_unlock_fifo
- acpi_aml_write_kern
- acpi_aml_readb_kern
- acpi_aml_write_log
- acpi_aml_read_cmd
- acpi_aml_thread
- acpi_aml_create_thread
- acpi_aml_wait_command_ready
- acpi_aml_notify_command_complete
- acpi_aml_open
- acpi_aml_release
- acpi_aml_read_user
- acpi_aml_read
- acpi_aml_write_user
- acpi_aml_write
- acpi_aml_poll
- acpi_aml_operations
- acpi_aml_debugger
- acpi_aml_init
Improve your Profiling and Debugging skills
Find out more