1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * acpi_osl.c - OS-dependent functions ($Revision: 83 $) |
4 | * |
5 | * Copyright (C) 2000 Andrew Henroid |
6 | * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> |
7 | * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> |
8 | * Copyright (c) 2008 Intel Corporation |
9 | * Author: Matthew Wilcox <willy@linux.intel.com> |
10 | */ |
11 | |
12 | #define pr_fmt(fmt) "ACPI: OSL: " fmt |
13 | |
14 | #include <linux/module.h> |
15 | #include <linux/kernel.h> |
16 | #include <linux/slab.h> |
17 | #include <linux/mm.h> |
18 | #include <linux/highmem.h> |
19 | #include <linux/lockdep.h> |
20 | #include <linux/pci.h> |
21 | #include <linux/interrupt.h> |
22 | #include <linux/kmod.h> |
23 | #include <linux/delay.h> |
24 | #include <linux/workqueue.h> |
25 | #include <linux/nmi.h> |
26 | #include <linux/acpi.h> |
27 | #include <linux/efi.h> |
28 | #include <linux/ioport.h> |
29 | #include <linux/list.h> |
30 | #include <linux/jiffies.h> |
31 | #include <linux/semaphore.h> |
32 | #include <linux/security.h> |
33 | |
34 | #include <asm/io.h> |
35 | #include <linux/uaccess.h> |
36 | #include <linux/io-64-nonatomic-lo-hi.h> |
37 | |
38 | #include "acpica/accommon.h" |
39 | #include "internal.h" |
40 | |
41 | /* Definitions for ACPI_DEBUG_PRINT() */ |
42 | #define _COMPONENT ACPI_OS_SERVICES |
43 | ACPI_MODULE_NAME("osl" ); |
44 | |
45 | struct acpi_os_dpc { |
46 | acpi_osd_exec_callback function; |
47 | void *context; |
48 | struct work_struct work; |
49 | }; |
50 | |
51 | #ifdef ENABLE_DEBUGGER |
52 | #include <linux/kdb.h> |
53 | |
54 | /* stuff for debugger support */ |
55 | int acpi_in_debugger; |
56 | EXPORT_SYMBOL(acpi_in_debugger); |
57 | #endif /*ENABLE_DEBUGGER */ |
58 | |
59 | static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl, |
60 | u32 pm1b_ctrl); |
61 | static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a, |
62 | u32 val_b); |
63 | |
64 | static acpi_osd_handler acpi_irq_handler; |
65 | static void *acpi_irq_context; |
66 | static struct workqueue_struct *kacpid_wq; |
67 | static struct workqueue_struct *kacpi_notify_wq; |
68 | static struct workqueue_struct *kacpi_hotplug_wq; |
69 | static bool acpi_os_initialized; |
70 | unsigned int acpi_sci_irq = INVALID_ACPI_IRQ; |
71 | bool acpi_permanent_mmap = false; |
72 | |
73 | /* |
74 | * This list of permanent mappings is for memory that may be accessed from |
75 | * interrupt context, where we can't do the ioremap(). |
76 | */ |
77 | struct acpi_ioremap { |
78 | struct list_head list; |
79 | void __iomem *virt; |
80 | acpi_physical_address phys; |
81 | acpi_size size; |
82 | union { |
83 | unsigned long refcount; |
84 | struct rcu_work rwork; |
85 | } track; |
86 | }; |
87 | |
88 | static LIST_HEAD(acpi_ioremaps); |
89 | static DEFINE_MUTEX(acpi_ioremap_lock); |
90 | #define acpi_ioremap_lock_held() lock_is_held(&acpi_ioremap_lock.dep_map) |
91 | |
92 | static void __init acpi_request_region (struct acpi_generic_address *gas, |
93 | unsigned int length, char *desc) |
94 | { |
95 | u64 addr; |
96 | |
97 | /* Handle possible alignment issues */ |
98 | memcpy(&addr, &gas->address, sizeof(addr)); |
99 | if (!addr || !length) |
100 | return; |
101 | |
102 | /* Resources are never freed */ |
103 | if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) |
104 | request_region(addr, length, desc); |
105 | else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) |
106 | request_mem_region(addr, length, desc); |
107 | } |
108 | |
109 | static int __init acpi_reserve_resources(void) |
110 | { |
111 | acpi_request_region(gas: &acpi_gbl_FADT.xpm1a_event_block, length: acpi_gbl_FADT.pm1_event_length, |
112 | desc: "ACPI PM1a_EVT_BLK" ); |
113 | |
114 | acpi_request_region(gas: &acpi_gbl_FADT.xpm1b_event_block, length: acpi_gbl_FADT.pm1_event_length, |
115 | desc: "ACPI PM1b_EVT_BLK" ); |
116 | |
117 | acpi_request_region(gas: &acpi_gbl_FADT.xpm1a_control_block, length: acpi_gbl_FADT.pm1_control_length, |
118 | desc: "ACPI PM1a_CNT_BLK" ); |
119 | |
120 | acpi_request_region(gas: &acpi_gbl_FADT.xpm1b_control_block, length: acpi_gbl_FADT.pm1_control_length, |
121 | desc: "ACPI PM1b_CNT_BLK" ); |
122 | |
123 | if (acpi_gbl_FADT.pm_timer_length == 4) |
124 | acpi_request_region(gas: &acpi_gbl_FADT.xpm_timer_block, length: 4, desc: "ACPI PM_TMR" ); |
125 | |
126 | acpi_request_region(gas: &acpi_gbl_FADT.xpm2_control_block, length: acpi_gbl_FADT.pm2_control_length, |
127 | desc: "ACPI PM2_CNT_BLK" ); |
128 | |
129 | /* Length of GPE blocks must be a non-negative multiple of 2 */ |
130 | |
131 | if (!(acpi_gbl_FADT.gpe0_block_length & 0x1)) |
132 | acpi_request_region(gas: &acpi_gbl_FADT.xgpe0_block, |
133 | length: acpi_gbl_FADT.gpe0_block_length, desc: "ACPI GPE0_BLK" ); |
134 | |
135 | if (!(acpi_gbl_FADT.gpe1_block_length & 0x1)) |
136 | acpi_request_region(gas: &acpi_gbl_FADT.xgpe1_block, |
137 | length: acpi_gbl_FADT.gpe1_block_length, desc: "ACPI GPE1_BLK" ); |
138 | |
139 | return 0; |
140 | } |
141 | fs_initcall_sync(acpi_reserve_resources); |
142 | |
143 | void acpi_os_printf(const char *fmt, ...) |
144 | { |
145 | va_list args; |
146 | va_start(args, fmt); |
147 | acpi_os_vprintf(format: fmt, args); |
148 | va_end(args); |
149 | } |
150 | EXPORT_SYMBOL(acpi_os_printf); |
151 | |
152 | void __printf(1, 0) acpi_os_vprintf(const char *fmt, va_list args) |
153 | { |
154 | static char buffer[512]; |
155 | |
156 | vsprintf(buf: buffer, fmt, args); |
157 | |
158 | #ifdef ENABLE_DEBUGGER |
159 | if (acpi_in_debugger) { |
160 | kdb_printf("%s" , buffer); |
161 | } else { |
162 | if (printk_get_level(buffer)) |
163 | printk("%s" , buffer); |
164 | else |
165 | printk(KERN_CONT "%s" , buffer); |
166 | } |
167 | #else |
168 | if (acpi_debugger_write_log(msg: buffer) < 0) { |
169 | if (printk_get_level(buffer)) |
170 | printk("%s" , buffer); |
171 | else |
172 | printk(KERN_CONT "%s" , buffer); |
173 | } |
174 | #endif |
175 | } |
176 | |
177 | #ifdef CONFIG_KEXEC |
178 | static unsigned long acpi_rsdp; |
179 | static int __init setup_acpi_rsdp(char *arg) |
180 | { |
181 | return kstrtoul(s: arg, base: 16, res: &acpi_rsdp); |
182 | } |
183 | early_param("acpi_rsdp" , setup_acpi_rsdp); |
184 | #endif |
185 | |
186 | acpi_physical_address __init acpi_os_get_root_pointer(void) |
187 | { |
188 | acpi_physical_address pa; |
189 | |
190 | #ifdef CONFIG_KEXEC |
191 | /* |
192 | * We may have been provided with an RSDP on the command line, |
193 | * but if a malicious user has done so they may be pointing us |
194 | * at modified ACPI tables that could alter kernel behaviour - |
195 | * so, we check the lockdown status before making use of |
196 | * it. If we trust it then also stash it in an architecture |
197 | * specific location (if appropriate) so it can be carried |
198 | * over further kexec()s. |
199 | */ |
200 | if (acpi_rsdp && !security_locked_down(what: LOCKDOWN_ACPI_TABLES)) { |
201 | acpi_arch_set_root_pointer(addr: acpi_rsdp); |
202 | return acpi_rsdp; |
203 | } |
204 | #endif |
205 | pa = acpi_arch_get_root_pointer(); |
206 | if (pa) |
207 | return pa; |
208 | |
209 | if (efi_enabled(EFI_CONFIG_TABLES)) { |
210 | if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) |
211 | return efi.acpi20; |
212 | if (efi.acpi != EFI_INVALID_TABLE_ADDR) |
213 | return efi.acpi; |
214 | pr_err("System description tables not found\n" ); |
215 | } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) { |
216 | acpi_find_root_pointer(rsdp_address: &pa); |
217 | } |
218 | |
219 | return pa; |
220 | } |
221 | |
222 | /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ |
223 | static struct acpi_ioremap * |
224 | acpi_map_lookup(acpi_physical_address phys, acpi_size size) |
225 | { |
226 | struct acpi_ioremap *map; |
227 | |
228 | list_for_each_entry_rcu(map, &acpi_ioremaps, list, acpi_ioremap_lock_held()) |
229 | if (map->phys <= phys && |
230 | phys + size <= map->phys + map->size) |
231 | return map; |
232 | |
233 | return NULL; |
234 | } |
235 | |
236 | /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ |
237 | static void __iomem * |
238 | acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size) |
239 | { |
240 | struct acpi_ioremap *map; |
241 | |
242 | map = acpi_map_lookup(phys, size); |
243 | if (map) |
244 | return map->virt + (phys - map->phys); |
245 | |
246 | return NULL; |
247 | } |
248 | |
249 | void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size) |
250 | { |
251 | struct acpi_ioremap *map; |
252 | void __iomem *virt = NULL; |
253 | |
254 | mutex_lock(&acpi_ioremap_lock); |
255 | map = acpi_map_lookup(phys, size); |
256 | if (map) { |
257 | virt = map->virt + (phys - map->phys); |
258 | map->track.refcount++; |
259 | } |
260 | mutex_unlock(lock: &acpi_ioremap_lock); |
261 | return virt; |
262 | } |
263 | EXPORT_SYMBOL_GPL(acpi_os_get_iomem); |
264 | |
265 | /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ |
266 | static struct acpi_ioremap * |
267 | acpi_map_lookup_virt(void __iomem *virt, acpi_size size) |
268 | { |
269 | struct acpi_ioremap *map; |
270 | |
271 | list_for_each_entry_rcu(map, &acpi_ioremaps, list, acpi_ioremap_lock_held()) |
272 | if (map->virt <= virt && |
273 | virt + size <= map->virt + map->size) |
274 | return map; |
275 | |
276 | return NULL; |
277 | } |
278 | |
279 | #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) |
280 | /* ioremap will take care of cache attributes */ |
281 | #define should_use_kmap(pfn) 0 |
282 | #else |
283 | #define should_use_kmap(pfn) page_is_ram(pfn) |
284 | #endif |
285 | |
286 | static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz) |
287 | { |
288 | unsigned long pfn; |
289 | |
290 | pfn = pg_off >> PAGE_SHIFT; |
291 | if (should_use_kmap(pfn)) { |
292 | if (pg_sz > PAGE_SIZE) |
293 | return NULL; |
294 | return (void __iomem __force *)kmap(pfn_to_page(pfn)); |
295 | } else |
296 | return acpi_os_ioremap(phys: pg_off, size: pg_sz); |
297 | } |
298 | |
299 | static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr) |
300 | { |
301 | unsigned long pfn; |
302 | |
303 | pfn = pg_off >> PAGE_SHIFT; |
304 | if (should_use_kmap(pfn)) |
305 | kunmap(pfn_to_page(pfn)); |
306 | else |
307 | iounmap(addr: vaddr); |
308 | } |
309 | |
310 | /** |
311 | * acpi_os_map_iomem - Get a virtual address for a given physical address range. |
312 | * @phys: Start of the physical address range to map. |
313 | * @size: Size of the physical address range to map. |
314 | * |
315 | * Look up the given physical address range in the list of existing ACPI memory |
316 | * mappings. If found, get a reference to it and return a pointer to it (its |
317 | * virtual address). If not found, map it, add it to that list and return a |
318 | * pointer to it. |
319 | * |
320 | * During early init (when acpi_permanent_mmap has not been set yet) this |
321 | * routine simply calls __acpi_map_table() to get the job done. |
322 | */ |
323 | void __iomem __ref |
324 | *acpi_os_map_iomem(acpi_physical_address phys, acpi_size size) |
325 | { |
326 | struct acpi_ioremap *map; |
327 | void __iomem *virt; |
328 | acpi_physical_address pg_off; |
329 | acpi_size pg_sz; |
330 | |
331 | if (phys > ULONG_MAX) { |
332 | pr_err("Cannot map memory that high: 0x%llx\n" , phys); |
333 | return NULL; |
334 | } |
335 | |
336 | if (!acpi_permanent_mmap) |
337 | return __acpi_map_table(phys: (unsigned long)phys, size); |
338 | |
339 | mutex_lock(&acpi_ioremap_lock); |
340 | /* Check if there's a suitable mapping already. */ |
341 | map = acpi_map_lookup(phys, size); |
342 | if (map) { |
343 | map->track.refcount++; |
344 | goto out; |
345 | } |
346 | |
347 | map = kzalloc(size: sizeof(*map), GFP_KERNEL); |
348 | if (!map) { |
349 | mutex_unlock(lock: &acpi_ioremap_lock); |
350 | return NULL; |
351 | } |
352 | |
353 | pg_off = round_down(phys, PAGE_SIZE); |
354 | pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off; |
355 | virt = acpi_map(pg_off: phys, pg_sz: size); |
356 | if (!virt) { |
357 | mutex_unlock(lock: &acpi_ioremap_lock); |
358 | kfree(objp: map); |
359 | return NULL; |
360 | } |
361 | |
362 | INIT_LIST_HEAD(list: &map->list); |
363 | map->virt = (void __iomem __force *)((unsigned long)virt & PAGE_MASK); |
364 | map->phys = pg_off; |
365 | map->size = pg_sz; |
366 | map->track.refcount = 1; |
367 | |
368 | list_add_tail_rcu(new: &map->list, head: &acpi_ioremaps); |
369 | |
370 | out: |
371 | mutex_unlock(lock: &acpi_ioremap_lock); |
372 | return map->virt + (phys - map->phys); |
373 | } |
374 | EXPORT_SYMBOL_GPL(acpi_os_map_iomem); |
375 | |
376 | void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size) |
377 | { |
378 | return (void *)acpi_os_map_iomem(phys, size); |
379 | } |
380 | EXPORT_SYMBOL_GPL(acpi_os_map_memory); |
381 | |
382 | static void acpi_os_map_remove(struct work_struct *work) |
383 | { |
384 | struct acpi_ioremap *map = container_of(to_rcu_work(work), |
385 | struct acpi_ioremap, |
386 | track.rwork); |
387 | |
388 | acpi_unmap(pg_off: map->phys, vaddr: map->virt); |
389 | kfree(objp: map); |
390 | } |
391 | |
392 | /* Must be called with mutex_lock(&acpi_ioremap_lock) */ |
393 | static void acpi_os_drop_map_ref(struct acpi_ioremap *map) |
394 | { |
395 | if (--map->track.refcount) |
396 | return; |
397 | |
398 | list_del_rcu(entry: &map->list); |
399 | |
400 | INIT_RCU_WORK(&map->track.rwork, acpi_os_map_remove); |
401 | queue_rcu_work(wq: system_wq, rwork: &map->track.rwork); |
402 | } |
403 | |
404 | /** |
405 | * acpi_os_unmap_iomem - Drop a memory mapping reference. |
406 | * @virt: Start of the address range to drop a reference to. |
407 | * @size: Size of the address range to drop a reference to. |
408 | * |
409 | * Look up the given virtual address range in the list of existing ACPI memory |
410 | * mappings, drop a reference to it and if there are no more active references |
411 | * to it, queue it up for later removal. |
412 | * |
413 | * During early init (when acpi_permanent_mmap has not been set yet) this |
414 | * routine simply calls __acpi_unmap_table() to get the job done. Since |
415 | * __acpi_unmap_table() is an __init function, the __ref annotation is needed |
416 | * here. |
417 | */ |
418 | void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size) |
419 | { |
420 | struct acpi_ioremap *map; |
421 | |
422 | if (!acpi_permanent_mmap) { |
423 | __acpi_unmap_table(map: virt, size); |
424 | return; |
425 | } |
426 | |
427 | mutex_lock(&acpi_ioremap_lock); |
428 | |
429 | map = acpi_map_lookup_virt(virt, size); |
430 | if (!map) { |
431 | mutex_unlock(lock: &acpi_ioremap_lock); |
432 | WARN(true, "ACPI: %s: bad address %p\n" , __func__, virt); |
433 | return; |
434 | } |
435 | acpi_os_drop_map_ref(map); |
436 | |
437 | mutex_unlock(lock: &acpi_ioremap_lock); |
438 | } |
439 | EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem); |
440 | |
441 | /** |
442 | * acpi_os_unmap_memory - Drop a memory mapping reference. |
443 | * @virt: Start of the address range to drop a reference to. |
444 | * @size: Size of the address range to drop a reference to. |
445 | */ |
446 | void __ref acpi_os_unmap_memory(void *virt, acpi_size size) |
447 | { |
448 | acpi_os_unmap_iomem((void __iomem *)virt, size); |
449 | } |
450 | EXPORT_SYMBOL_GPL(acpi_os_unmap_memory); |
451 | |
452 | void __iomem *acpi_os_map_generic_address(struct acpi_generic_address *gas) |
453 | { |
454 | u64 addr; |
455 | |
456 | if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) |
457 | return NULL; |
458 | |
459 | /* Handle possible alignment issues */ |
460 | memcpy(&addr, &gas->address, sizeof(addr)); |
461 | if (!addr || !gas->bit_width) |
462 | return NULL; |
463 | |
464 | return acpi_os_map_iomem(addr, gas->bit_width / 8); |
465 | } |
466 | EXPORT_SYMBOL(acpi_os_map_generic_address); |
467 | |
468 | void acpi_os_unmap_generic_address(struct acpi_generic_address *gas) |
469 | { |
470 | u64 addr; |
471 | struct acpi_ioremap *map; |
472 | |
473 | if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) |
474 | return; |
475 | |
476 | /* Handle possible alignment issues */ |
477 | memcpy(&addr, &gas->address, sizeof(addr)); |
478 | if (!addr || !gas->bit_width) |
479 | return; |
480 | |
481 | mutex_lock(&acpi_ioremap_lock); |
482 | |
483 | map = acpi_map_lookup(phys: addr, size: gas->bit_width / 8); |
484 | if (!map) { |
485 | mutex_unlock(lock: &acpi_ioremap_lock); |
486 | return; |
487 | } |
488 | acpi_os_drop_map_ref(map); |
489 | |
490 | mutex_unlock(lock: &acpi_ioremap_lock); |
491 | } |
492 | EXPORT_SYMBOL(acpi_os_unmap_generic_address); |
493 | |
494 | #ifdef ACPI_FUTURE_USAGE |
495 | acpi_status |
496 | acpi_os_get_physical_address(void *virt, acpi_physical_address *phys) |
497 | { |
498 | if (!phys || !virt) |
499 | return AE_BAD_PARAMETER; |
500 | |
501 | *phys = virt_to_phys(virt); |
502 | |
503 | return AE_OK; |
504 | } |
505 | #endif |
506 | |
507 | #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE |
508 | static bool acpi_rev_override; |
509 | |
510 | int __init acpi_rev_override_setup(char *str) |
511 | { |
512 | acpi_rev_override = true; |
513 | return 1; |
514 | } |
515 | __setup("acpi_rev_override" , acpi_rev_override_setup); |
516 | #else |
517 | #define acpi_rev_override false |
518 | #endif |
519 | |
520 | #define ACPI_MAX_OVERRIDE_LEN 100 |
521 | |
522 | static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN]; |
523 | |
524 | acpi_status |
525 | acpi_os_predefined_override(const struct acpi_predefined_names *init_val, |
526 | acpi_string *new_val) |
527 | { |
528 | if (!init_val || !new_val) |
529 | return AE_BAD_PARAMETER; |
530 | |
531 | *new_val = NULL; |
532 | if (!memcmp(p: init_val->name, q: "_OS_" , size: 4) && strlen(acpi_os_name)) { |
533 | pr_info("Overriding _OS definition to '%s'\n" , acpi_os_name); |
534 | *new_val = acpi_os_name; |
535 | } |
536 | |
537 | if (!memcmp(p: init_val->name, q: "_REV" , size: 4) && acpi_rev_override) { |
538 | pr_info("Overriding _REV return value to 5\n" ); |
539 | *new_val = (char *)5; |
540 | } |
541 | |
542 | return AE_OK; |
543 | } |
544 | |
545 | static irqreturn_t acpi_irq(int irq, void *dev_id) |
546 | { |
547 | if ((*acpi_irq_handler)(acpi_irq_context)) { |
548 | acpi_irq_handled++; |
549 | return IRQ_HANDLED; |
550 | } else { |
551 | acpi_irq_not_handled++; |
552 | return IRQ_NONE; |
553 | } |
554 | } |
555 | |
556 | acpi_status |
557 | acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler, |
558 | void *context) |
559 | { |
560 | unsigned int irq; |
561 | |
562 | acpi_irq_stats_init(); |
563 | |
564 | /* |
565 | * ACPI interrupts different from the SCI in our copy of the FADT are |
566 | * not supported. |
567 | */ |
568 | if (gsi != acpi_gbl_FADT.sci_interrupt) |
569 | return AE_BAD_PARAMETER; |
570 | |
571 | if (acpi_irq_handler) |
572 | return AE_ALREADY_ACQUIRED; |
573 | |
574 | if (acpi_gsi_to_irq(gsi, irq: &irq) < 0) { |
575 | pr_err("SCI (ACPI GSI %d) not registered\n" , gsi); |
576 | return AE_OK; |
577 | } |
578 | |
579 | acpi_irq_handler = handler; |
580 | acpi_irq_context = context; |
581 | if (request_threaded_irq(irq, NULL, thread_fn: acpi_irq, IRQF_SHARED | IRQF_ONESHOT, |
582 | name: "acpi" , dev: acpi_irq)) { |
583 | pr_err("SCI (IRQ%d) allocation failed\n" , irq); |
584 | acpi_irq_handler = NULL; |
585 | return AE_NOT_ACQUIRED; |
586 | } |
587 | acpi_sci_irq = irq; |
588 | |
589 | return AE_OK; |
590 | } |
591 | |
592 | acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler) |
593 | { |
594 | if (gsi != acpi_gbl_FADT.sci_interrupt || !acpi_sci_irq_valid()) |
595 | return AE_BAD_PARAMETER; |
596 | |
597 | free_irq(acpi_sci_irq, acpi_irq); |
598 | acpi_irq_handler = NULL; |
599 | acpi_sci_irq = INVALID_ACPI_IRQ; |
600 | |
601 | return AE_OK; |
602 | } |
603 | |
604 | /* |
605 | * Running in interpreter thread context, safe to sleep |
606 | */ |
607 | |
608 | void acpi_os_sleep(u64 ms) |
609 | { |
610 | msleep(msecs: ms); |
611 | } |
612 | |
613 | void acpi_os_stall(u32 us) |
614 | { |
615 | while (us) { |
616 | u32 delay = 1000; |
617 | |
618 | if (delay > us) |
619 | delay = us; |
620 | udelay(delay); |
621 | touch_nmi_watchdog(); |
622 | us -= delay; |
623 | } |
624 | } |
625 | |
626 | /* |
627 | * Support ACPI 3.0 AML Timer operand. Returns a 64-bit free-running, |
628 | * monotonically increasing timer with 100ns granularity. Do not use |
629 | * ktime_get() to implement this function because this function may get |
630 | * called after timekeeping has been suspended. Note: calling this function |
631 | * after timekeeping has been suspended may lead to unexpected results |
632 | * because when timekeeping is suspended the jiffies counter is not |
633 | * incremented. See also timekeeping_suspend(). |
634 | */ |
635 | u64 acpi_os_get_timer(void) |
636 | { |
637 | return (get_jiffies_64() - INITIAL_JIFFIES) * |
638 | (ACPI_100NSEC_PER_SEC / HZ); |
639 | } |
640 | |
641 | acpi_status acpi_os_read_port(acpi_io_address port, u32 *value, u32 width) |
642 | { |
643 | u32 dummy; |
644 | |
645 | if (value) |
646 | *value = 0; |
647 | else |
648 | value = &dummy; |
649 | |
650 | if (width <= 8) { |
651 | *value = inb(port); |
652 | } else if (width <= 16) { |
653 | *value = inw(port); |
654 | } else if (width <= 32) { |
655 | *value = inl(port); |
656 | } else { |
657 | pr_debug("%s: Access width %d not supported\n" , __func__, width); |
658 | return AE_BAD_PARAMETER; |
659 | } |
660 | |
661 | return AE_OK; |
662 | } |
663 | |
664 | EXPORT_SYMBOL(acpi_os_read_port); |
665 | |
666 | acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width) |
667 | { |
668 | if (width <= 8) { |
669 | outb(value, port); |
670 | } else if (width <= 16) { |
671 | outw(value, port); |
672 | } else if (width <= 32) { |
673 | outl(value, port); |
674 | } else { |
675 | pr_debug("%s: Access width %d not supported\n" , __func__, width); |
676 | return AE_BAD_PARAMETER; |
677 | } |
678 | |
679 | return AE_OK; |
680 | } |
681 | |
682 | EXPORT_SYMBOL(acpi_os_write_port); |
683 | |
684 | int acpi_os_read_iomem(void __iomem *virt_addr, u64 *value, u32 width) |
685 | { |
686 | |
687 | switch (width) { |
688 | case 8: |
689 | *(u8 *) value = readb(addr: virt_addr); |
690 | break; |
691 | case 16: |
692 | *(u16 *) value = readw(addr: virt_addr); |
693 | break; |
694 | case 32: |
695 | *(u32 *) value = readl(addr: virt_addr); |
696 | break; |
697 | case 64: |
698 | *(u64 *) value = readq(addr: virt_addr); |
699 | break; |
700 | default: |
701 | return -EINVAL; |
702 | } |
703 | |
704 | return 0; |
705 | } |
706 | |
707 | acpi_status |
708 | acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width) |
709 | { |
710 | void __iomem *virt_addr; |
711 | unsigned int size = width / 8; |
712 | bool unmap = false; |
713 | u64 dummy; |
714 | int error; |
715 | |
716 | rcu_read_lock(); |
717 | virt_addr = acpi_map_vaddr_lookup(phys: phys_addr, size); |
718 | if (!virt_addr) { |
719 | rcu_read_unlock(); |
720 | virt_addr = acpi_os_ioremap(phys: phys_addr, size); |
721 | if (!virt_addr) |
722 | return AE_BAD_ADDRESS; |
723 | unmap = true; |
724 | } |
725 | |
726 | if (!value) |
727 | value = &dummy; |
728 | |
729 | error = acpi_os_read_iomem(virt_addr, value, width); |
730 | BUG_ON(error); |
731 | |
732 | if (unmap) |
733 | iounmap(addr: virt_addr); |
734 | else |
735 | rcu_read_unlock(); |
736 | |
737 | return AE_OK; |
738 | } |
739 | |
740 | acpi_status |
741 | acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width) |
742 | { |
743 | void __iomem *virt_addr; |
744 | unsigned int size = width / 8; |
745 | bool unmap = false; |
746 | |
747 | rcu_read_lock(); |
748 | virt_addr = acpi_map_vaddr_lookup(phys: phys_addr, size); |
749 | if (!virt_addr) { |
750 | rcu_read_unlock(); |
751 | virt_addr = acpi_os_ioremap(phys: phys_addr, size); |
752 | if (!virt_addr) |
753 | return AE_BAD_ADDRESS; |
754 | unmap = true; |
755 | } |
756 | |
757 | switch (width) { |
758 | case 8: |
759 | writeb(val: value, addr: virt_addr); |
760 | break; |
761 | case 16: |
762 | writew(val: value, addr: virt_addr); |
763 | break; |
764 | case 32: |
765 | writel(val: value, addr: virt_addr); |
766 | break; |
767 | case 64: |
768 | writeq(val: value, addr: virt_addr); |
769 | break; |
770 | default: |
771 | BUG(); |
772 | } |
773 | |
774 | if (unmap) |
775 | iounmap(addr: virt_addr); |
776 | else |
777 | rcu_read_unlock(); |
778 | |
779 | return AE_OK; |
780 | } |
781 | |
782 | #ifdef CONFIG_PCI |
783 | acpi_status |
784 | acpi_os_read_pci_configuration(struct acpi_pci_id *pci_id, u32 reg, |
785 | u64 *value, u32 width) |
786 | { |
787 | int result, size; |
788 | u32 value32; |
789 | |
790 | if (!value) |
791 | return AE_BAD_PARAMETER; |
792 | |
793 | switch (width) { |
794 | case 8: |
795 | size = 1; |
796 | break; |
797 | case 16: |
798 | size = 2; |
799 | break; |
800 | case 32: |
801 | size = 4; |
802 | break; |
803 | default: |
804 | return AE_ERROR; |
805 | } |
806 | |
807 | result = raw_pci_read(domain: pci_id->segment, bus: pci_id->bus, |
808 | PCI_DEVFN(pci_id->device, pci_id->function), |
809 | reg, len: size, val: &value32); |
810 | *value = value32; |
811 | |
812 | return (result ? AE_ERROR : AE_OK); |
813 | } |
814 | |
815 | acpi_status |
816 | acpi_os_write_pci_configuration(struct acpi_pci_id *pci_id, u32 reg, |
817 | u64 value, u32 width) |
818 | { |
819 | int result, size; |
820 | |
821 | switch (width) { |
822 | case 8: |
823 | size = 1; |
824 | break; |
825 | case 16: |
826 | size = 2; |
827 | break; |
828 | case 32: |
829 | size = 4; |
830 | break; |
831 | default: |
832 | return AE_ERROR; |
833 | } |
834 | |
835 | result = raw_pci_write(domain: pci_id->segment, bus: pci_id->bus, |
836 | PCI_DEVFN(pci_id->device, pci_id->function), |
837 | reg, len: size, val: value); |
838 | |
839 | return (result ? AE_ERROR : AE_OK); |
840 | } |
841 | #endif |
842 | |
843 | static void acpi_os_execute_deferred(struct work_struct *work) |
844 | { |
845 | struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work); |
846 | |
847 | dpc->function(dpc->context); |
848 | kfree(objp: dpc); |
849 | } |
850 | |
851 | #ifdef CONFIG_ACPI_DEBUGGER |
852 | static struct acpi_debugger acpi_debugger; |
853 | static bool acpi_debugger_initialized; |
854 | |
855 | int acpi_register_debugger(struct module *owner, |
856 | const struct acpi_debugger_ops *ops) |
857 | { |
858 | int ret = 0; |
859 | |
860 | mutex_lock(&acpi_debugger.lock); |
861 | if (acpi_debugger.ops) { |
862 | ret = -EBUSY; |
863 | goto err_lock; |
864 | } |
865 | |
866 | acpi_debugger.owner = owner; |
867 | acpi_debugger.ops = ops; |
868 | |
869 | err_lock: |
870 | mutex_unlock(lock: &acpi_debugger.lock); |
871 | return ret; |
872 | } |
873 | EXPORT_SYMBOL(acpi_register_debugger); |
874 | |
875 | void acpi_unregister_debugger(const struct acpi_debugger_ops *ops) |
876 | { |
877 | mutex_lock(&acpi_debugger.lock); |
878 | if (ops == acpi_debugger.ops) { |
879 | acpi_debugger.ops = NULL; |
880 | acpi_debugger.owner = NULL; |
881 | } |
882 | mutex_unlock(lock: &acpi_debugger.lock); |
883 | } |
884 | EXPORT_SYMBOL(acpi_unregister_debugger); |
885 | |
886 | int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context) |
887 | { |
888 | int ret; |
889 | int (*func)(acpi_osd_exec_callback, void *); |
890 | struct module *owner; |
891 | |
892 | if (!acpi_debugger_initialized) |
893 | return -ENODEV; |
894 | mutex_lock(&acpi_debugger.lock); |
895 | if (!acpi_debugger.ops) { |
896 | ret = -ENODEV; |
897 | goto err_lock; |
898 | } |
899 | if (!try_module_get(module: acpi_debugger.owner)) { |
900 | ret = -ENODEV; |
901 | goto err_lock; |
902 | } |
903 | func = acpi_debugger.ops->create_thread; |
904 | owner = acpi_debugger.owner; |
905 | mutex_unlock(lock: &acpi_debugger.lock); |
906 | |
907 | ret = func(function, context); |
908 | |
909 | mutex_lock(&acpi_debugger.lock); |
910 | module_put(module: owner); |
911 | err_lock: |
912 | mutex_unlock(lock: &acpi_debugger.lock); |
913 | return ret; |
914 | } |
915 | |
916 | ssize_t acpi_debugger_write_log(const char *msg) |
917 | { |
918 | ssize_t ret; |
919 | ssize_t (*func)(const char *); |
920 | struct module *owner; |
921 | |
922 | if (!acpi_debugger_initialized) |
923 | return -ENODEV; |
924 | mutex_lock(&acpi_debugger.lock); |
925 | if (!acpi_debugger.ops) { |
926 | ret = -ENODEV; |
927 | goto err_lock; |
928 | } |
929 | if (!try_module_get(module: acpi_debugger.owner)) { |
930 | ret = -ENODEV; |
931 | goto err_lock; |
932 | } |
933 | func = acpi_debugger.ops->write_log; |
934 | owner = acpi_debugger.owner; |
935 | mutex_unlock(lock: &acpi_debugger.lock); |
936 | |
937 | ret = func(msg); |
938 | |
939 | mutex_lock(&acpi_debugger.lock); |
940 | module_put(module: owner); |
941 | err_lock: |
942 | mutex_unlock(lock: &acpi_debugger.lock); |
943 | return ret; |
944 | } |
945 | |
946 | ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length) |
947 | { |
948 | ssize_t ret; |
949 | ssize_t (*func)(char *, size_t); |
950 | struct module *owner; |
951 | |
952 | if (!acpi_debugger_initialized) |
953 | return -ENODEV; |
954 | mutex_lock(&acpi_debugger.lock); |
955 | if (!acpi_debugger.ops) { |
956 | ret = -ENODEV; |
957 | goto err_lock; |
958 | } |
959 | if (!try_module_get(module: acpi_debugger.owner)) { |
960 | ret = -ENODEV; |
961 | goto err_lock; |
962 | } |
963 | func = acpi_debugger.ops->read_cmd; |
964 | owner = acpi_debugger.owner; |
965 | mutex_unlock(lock: &acpi_debugger.lock); |
966 | |
967 | ret = func(buffer, buffer_length); |
968 | |
969 | mutex_lock(&acpi_debugger.lock); |
970 | module_put(module: owner); |
971 | err_lock: |
972 | mutex_unlock(lock: &acpi_debugger.lock); |
973 | return ret; |
974 | } |
975 | |
976 | int acpi_debugger_wait_command_ready(void) |
977 | { |
978 | int ret; |
979 | int (*func)(bool, char *, size_t); |
980 | struct module *owner; |
981 | |
982 | if (!acpi_debugger_initialized) |
983 | return -ENODEV; |
984 | mutex_lock(&acpi_debugger.lock); |
985 | if (!acpi_debugger.ops) { |
986 | ret = -ENODEV; |
987 | goto err_lock; |
988 | } |
989 | if (!try_module_get(module: acpi_debugger.owner)) { |
990 | ret = -ENODEV; |
991 | goto err_lock; |
992 | } |
993 | func = acpi_debugger.ops->wait_command_ready; |
994 | owner = acpi_debugger.owner; |
995 | mutex_unlock(lock: &acpi_debugger.lock); |
996 | |
997 | ret = func(acpi_gbl_method_executing, |
998 | acpi_gbl_db_line_buf, ACPI_DB_LINE_BUFFER_SIZE); |
999 | |
1000 | mutex_lock(&acpi_debugger.lock); |
1001 | module_put(module: owner); |
1002 | err_lock: |
1003 | mutex_unlock(lock: &acpi_debugger.lock); |
1004 | return ret; |
1005 | } |
1006 | |
1007 | int acpi_debugger_notify_command_complete(void) |
1008 | { |
1009 | int ret; |
1010 | int (*func)(void); |
1011 | struct module *owner; |
1012 | |
1013 | if (!acpi_debugger_initialized) |
1014 | return -ENODEV; |
1015 | mutex_lock(&acpi_debugger.lock); |
1016 | if (!acpi_debugger.ops) { |
1017 | ret = -ENODEV; |
1018 | goto err_lock; |
1019 | } |
1020 | if (!try_module_get(module: acpi_debugger.owner)) { |
1021 | ret = -ENODEV; |
1022 | goto err_lock; |
1023 | } |
1024 | func = acpi_debugger.ops->notify_command_complete; |
1025 | owner = acpi_debugger.owner; |
1026 | mutex_unlock(lock: &acpi_debugger.lock); |
1027 | |
1028 | ret = func(); |
1029 | |
1030 | mutex_lock(&acpi_debugger.lock); |
1031 | module_put(module: owner); |
1032 | err_lock: |
1033 | mutex_unlock(lock: &acpi_debugger.lock); |
1034 | return ret; |
1035 | } |
1036 | |
1037 | int __init acpi_debugger_init(void) |
1038 | { |
1039 | mutex_init(&acpi_debugger.lock); |
1040 | acpi_debugger_initialized = true; |
1041 | return 0; |
1042 | } |
1043 | #endif |
1044 | |
1045 | /******************************************************************************* |
1046 | * |
1047 | * FUNCTION: acpi_os_execute |
1048 | * |
1049 | * PARAMETERS: Type - Type of the callback |
1050 | * Function - Function to be executed |
1051 | * Context - Function parameters |
1052 | * |
1053 | * RETURN: Status |
1054 | * |
1055 | * DESCRIPTION: Depending on type, either queues function for deferred execution or |
1056 | * immediately executes function on a separate thread. |
1057 | * |
1058 | ******************************************************************************/ |
1059 | |
1060 | acpi_status acpi_os_execute(acpi_execute_type type, |
1061 | acpi_osd_exec_callback function, void *context) |
1062 | { |
1063 | struct acpi_os_dpc *dpc; |
1064 | int ret; |
1065 | |
1066 | ACPI_DEBUG_PRINT((ACPI_DB_EXEC, |
1067 | "Scheduling function [%p(%p)] for deferred execution.\n" , |
1068 | function, context)); |
1069 | |
1070 | if (type == OSL_DEBUGGER_MAIN_THREAD) { |
1071 | ret = acpi_debugger_create_thread(function, context); |
1072 | if (ret) { |
1073 | pr_err("Kernel thread creation failed\n" ); |
1074 | return AE_ERROR; |
1075 | } |
1076 | return AE_OK; |
1077 | } |
1078 | |
1079 | /* |
1080 | * Allocate/initialize DPC structure. Note that this memory will be |
1081 | * freed by the callee. The kernel handles the work_struct list in a |
1082 | * way that allows us to also free its memory inside the callee. |
1083 | * Because we may want to schedule several tasks with different |
1084 | * parameters we can't use the approach some kernel code uses of |
1085 | * having a static work_struct. |
1086 | */ |
1087 | |
1088 | dpc = kzalloc(size: sizeof(struct acpi_os_dpc), GFP_ATOMIC); |
1089 | if (!dpc) |
1090 | return AE_NO_MEMORY; |
1091 | |
1092 | dpc->function = function; |
1093 | dpc->context = context; |
1094 | INIT_WORK(&dpc->work, acpi_os_execute_deferred); |
1095 | |
1096 | /* |
1097 | * To prevent lockdep from complaining unnecessarily, make sure that |
1098 | * there is a different static lockdep key for each workqueue by using |
1099 | * INIT_WORK() for each of them separately. |
1100 | */ |
1101 | switch (type) { |
1102 | case OSL_NOTIFY_HANDLER: |
1103 | ret = queue_work(wq: kacpi_notify_wq, work: &dpc->work); |
1104 | break; |
1105 | case OSL_GPE_HANDLER: |
1106 | /* |
1107 | * On some machines, a software-initiated SMI causes corruption |
1108 | * unless the SMI runs on CPU 0. An SMI can be initiated by |
1109 | * any AML, but typically it's done in GPE-related methods that |
1110 | * are run via workqueues, so we can avoid the known corruption |
1111 | * cases by always queueing on CPU 0. |
1112 | */ |
1113 | ret = queue_work_on(cpu: 0, wq: kacpid_wq, work: &dpc->work); |
1114 | break; |
1115 | default: |
1116 | pr_err("Unsupported os_execute type %d.\n" , type); |
1117 | goto err; |
1118 | } |
1119 | if (!ret) { |
1120 | pr_err("Unable to queue work\n" ); |
1121 | goto err; |
1122 | } |
1123 | |
1124 | return AE_OK; |
1125 | |
1126 | err: |
1127 | kfree(objp: dpc); |
1128 | return AE_ERROR; |
1129 | } |
1130 | EXPORT_SYMBOL(acpi_os_execute); |
1131 | |
1132 | void acpi_os_wait_events_complete(void) |
1133 | { |
1134 | /* |
1135 | * Make sure the GPE handler or the fixed event handler is not used |
1136 | * on another CPU after removal. |
1137 | */ |
1138 | if (acpi_sci_irq_valid()) |
1139 | synchronize_hardirq(irq: acpi_sci_irq); |
1140 | flush_workqueue(kacpid_wq); |
1141 | flush_workqueue(kacpi_notify_wq); |
1142 | } |
1143 | EXPORT_SYMBOL(acpi_os_wait_events_complete); |
1144 | |
1145 | struct acpi_hp_work { |
1146 | struct work_struct work; |
1147 | struct acpi_device *adev; |
1148 | u32 src; |
1149 | }; |
1150 | |
1151 | static void acpi_hotplug_work_fn(struct work_struct *work) |
1152 | { |
1153 | struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work); |
1154 | |
1155 | acpi_os_wait_events_complete(); |
1156 | acpi_device_hotplug(adev: hpw->adev, src: hpw->src); |
1157 | kfree(objp: hpw); |
1158 | } |
1159 | |
1160 | acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src) |
1161 | { |
1162 | struct acpi_hp_work *hpw; |
1163 | |
1164 | acpi_handle_debug(adev->handle, |
1165 | "Scheduling hotplug event %u for deferred handling\n" , |
1166 | src); |
1167 | |
1168 | hpw = kmalloc(size: sizeof(*hpw), GFP_KERNEL); |
1169 | if (!hpw) |
1170 | return AE_NO_MEMORY; |
1171 | |
1172 | INIT_WORK(&hpw->work, acpi_hotplug_work_fn); |
1173 | hpw->adev = adev; |
1174 | hpw->src = src; |
1175 | /* |
1176 | * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because |
1177 | * the hotplug code may call driver .remove() functions, which may |
1178 | * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush |
1179 | * these workqueues. |
1180 | */ |
1181 | if (!queue_work(wq: kacpi_hotplug_wq, work: &hpw->work)) { |
1182 | kfree(objp: hpw); |
1183 | return AE_ERROR; |
1184 | } |
1185 | return AE_OK; |
1186 | } |
1187 | |
1188 | bool acpi_queue_hotplug_work(struct work_struct *work) |
1189 | { |
1190 | return queue_work(wq: kacpi_hotplug_wq, work); |
1191 | } |
1192 | |
1193 | acpi_status |
1194 | acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle *handle) |
1195 | { |
1196 | struct semaphore *sem = NULL; |
1197 | |
1198 | sem = acpi_os_allocate_zeroed(size: sizeof(struct semaphore)); |
1199 | if (!sem) |
1200 | return AE_NO_MEMORY; |
1201 | |
1202 | sema_init(sem, val: initial_units); |
1203 | |
1204 | *handle = (acpi_handle *) sem; |
1205 | |
1206 | ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n" , |
1207 | *handle, initial_units)); |
1208 | |
1209 | return AE_OK; |
1210 | } |
1211 | |
1212 | /* |
1213 | * TODO: A better way to delete semaphores? Linux doesn't have a |
1214 | * 'delete_semaphore()' function -- may result in an invalid |
1215 | * pointer dereference for non-synchronized consumers. Should |
1216 | * we at least check for blocked threads and signal/cancel them? |
1217 | */ |
1218 | |
1219 | acpi_status acpi_os_delete_semaphore(acpi_handle handle) |
1220 | { |
1221 | struct semaphore *sem = (struct semaphore *)handle; |
1222 | |
1223 | if (!sem) |
1224 | return AE_BAD_PARAMETER; |
1225 | |
1226 | ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n" , handle)); |
1227 | |
1228 | BUG_ON(!list_empty(&sem->wait_list)); |
1229 | kfree(objp: sem); |
1230 | sem = NULL; |
1231 | |
1232 | return AE_OK; |
1233 | } |
1234 | |
1235 | /* |
1236 | * TODO: Support for units > 1? |
1237 | */ |
1238 | acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout) |
1239 | { |
1240 | acpi_status status = AE_OK; |
1241 | struct semaphore *sem = (struct semaphore *)handle; |
1242 | long jiffies; |
1243 | int ret = 0; |
1244 | |
1245 | if (!acpi_os_initialized) |
1246 | return AE_OK; |
1247 | |
1248 | if (!sem || (units < 1)) |
1249 | return AE_BAD_PARAMETER; |
1250 | |
1251 | if (units > 1) |
1252 | return AE_SUPPORT; |
1253 | |
1254 | ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n" , |
1255 | handle, units, timeout)); |
1256 | |
1257 | if (timeout == ACPI_WAIT_FOREVER) |
1258 | jiffies = MAX_SCHEDULE_TIMEOUT; |
1259 | else |
1260 | jiffies = msecs_to_jiffies(m: timeout); |
1261 | |
1262 | ret = down_timeout(sem, jiffies); |
1263 | if (ret) |
1264 | status = AE_TIME; |
1265 | |
1266 | if (ACPI_FAILURE(status)) { |
1267 | ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, |
1268 | "Failed to acquire semaphore[%p|%d|%d], %s" , |
1269 | handle, units, timeout, |
1270 | acpi_format_exception(status))); |
1271 | } else { |
1272 | ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, |
1273 | "Acquired semaphore[%p|%d|%d]" , handle, |
1274 | units, timeout)); |
1275 | } |
1276 | |
1277 | return status; |
1278 | } |
1279 | |
1280 | /* |
1281 | * TODO: Support for units > 1? |
1282 | */ |
1283 | acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units) |
1284 | { |
1285 | struct semaphore *sem = (struct semaphore *)handle; |
1286 | |
1287 | if (!acpi_os_initialized) |
1288 | return AE_OK; |
1289 | |
1290 | if (!sem || (units < 1)) |
1291 | return AE_BAD_PARAMETER; |
1292 | |
1293 | if (units > 1) |
1294 | return AE_SUPPORT; |
1295 | |
1296 | ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n" , handle, |
1297 | units)); |
1298 | |
1299 | up(sem); |
1300 | |
1301 | return AE_OK; |
1302 | } |
1303 | |
1304 | acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read) |
1305 | { |
1306 | #ifdef ENABLE_DEBUGGER |
1307 | if (acpi_in_debugger) { |
1308 | u32 chars; |
1309 | |
1310 | kdb_read(buffer, buffer_length); |
1311 | |
1312 | /* remove the CR kdb includes */ |
1313 | chars = strlen(buffer) - 1; |
1314 | buffer[chars] = '\0'; |
1315 | } |
1316 | #else |
1317 | int ret; |
1318 | |
1319 | ret = acpi_debugger_read_cmd(buffer, buffer_length); |
1320 | if (ret < 0) |
1321 | return AE_ERROR; |
1322 | if (bytes_read) |
1323 | *bytes_read = ret; |
1324 | #endif |
1325 | |
1326 | return AE_OK; |
1327 | } |
1328 | EXPORT_SYMBOL(acpi_os_get_line); |
1329 | |
1330 | acpi_status acpi_os_wait_command_ready(void) |
1331 | { |
1332 | int ret; |
1333 | |
1334 | ret = acpi_debugger_wait_command_ready(); |
1335 | if (ret < 0) |
1336 | return AE_ERROR; |
1337 | return AE_OK; |
1338 | } |
1339 | |
1340 | acpi_status acpi_os_notify_command_complete(void) |
1341 | { |
1342 | int ret; |
1343 | |
1344 | ret = acpi_debugger_notify_command_complete(); |
1345 | if (ret < 0) |
1346 | return AE_ERROR; |
1347 | return AE_OK; |
1348 | } |
1349 | |
1350 | acpi_status acpi_os_signal(u32 function, void *info) |
1351 | { |
1352 | switch (function) { |
1353 | case ACPI_SIGNAL_FATAL: |
1354 | pr_err("Fatal opcode executed\n" ); |
1355 | break; |
1356 | case ACPI_SIGNAL_BREAKPOINT: |
1357 | /* |
1358 | * AML Breakpoint |
1359 | * ACPI spec. says to treat it as a NOP unless |
1360 | * you are debugging. So if/when we integrate |
1361 | * AML debugger into the kernel debugger its |
1362 | * hook will go here. But until then it is |
1363 | * not useful to print anything on breakpoints. |
1364 | */ |
1365 | break; |
1366 | default: |
1367 | break; |
1368 | } |
1369 | |
1370 | return AE_OK; |
1371 | } |
1372 | |
1373 | static int __init acpi_os_name_setup(char *str) |
1374 | { |
1375 | char *p = acpi_os_name; |
1376 | int count = ACPI_MAX_OVERRIDE_LEN - 1; |
1377 | |
1378 | if (!str || !*str) |
1379 | return 0; |
1380 | |
1381 | for (; count-- && *str; str++) { |
1382 | if (isalnum(*str) || *str == ' ' || *str == ':') |
1383 | *p++ = *str; |
1384 | else if (*str == '\'' || *str == '"') |
1385 | continue; |
1386 | else |
1387 | break; |
1388 | } |
1389 | *p = 0; |
1390 | |
1391 | return 1; |
1392 | |
1393 | } |
1394 | |
1395 | __setup("acpi_os_name=" , acpi_os_name_setup); |
1396 | |
1397 | /* |
1398 | * Disable the auto-serialization of named objects creation methods. |
1399 | * |
1400 | * This feature is enabled by default. It marks the AML control methods |
1401 | * that contain the opcodes to create named objects as "Serialized". |
1402 | */ |
1403 | static int __init acpi_no_auto_serialize_setup(char *str) |
1404 | { |
1405 | acpi_gbl_auto_serialize_methods = FALSE; |
1406 | pr_info("Auto-serialization disabled\n" ); |
1407 | |
1408 | return 1; |
1409 | } |
1410 | |
1411 | __setup("acpi_no_auto_serialize" , acpi_no_auto_serialize_setup); |
1412 | |
1413 | /* Check of resource interference between native drivers and ACPI |
1414 | * OperationRegions (SystemIO and System Memory only). |
1415 | * IO ports and memory declared in ACPI might be used by the ACPI subsystem |
1416 | * in arbitrary AML code and can interfere with legacy drivers. |
1417 | * acpi_enforce_resources= can be set to: |
1418 | * |
1419 | * - strict (default) (2) |
1420 | * -> further driver trying to access the resources will not load |
1421 | * - lax (1) |
1422 | * -> further driver trying to access the resources will load, but you |
1423 | * get a system message that something might go wrong... |
1424 | * |
1425 | * - no (0) |
1426 | * -> ACPI Operation Region resources will not be registered |
1427 | * |
1428 | */ |
1429 | #define ENFORCE_RESOURCES_STRICT 2 |
1430 | #define ENFORCE_RESOURCES_LAX 1 |
1431 | #define ENFORCE_RESOURCES_NO 0 |
1432 | |
1433 | static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT; |
1434 | |
1435 | static int __init acpi_enforce_resources_setup(char *str) |
1436 | { |
1437 | if (str == NULL || *str == '\0') |
1438 | return 0; |
1439 | |
1440 | if (!strcmp("strict" , str)) |
1441 | acpi_enforce_resources = ENFORCE_RESOURCES_STRICT; |
1442 | else if (!strcmp("lax" , str)) |
1443 | acpi_enforce_resources = ENFORCE_RESOURCES_LAX; |
1444 | else if (!strcmp("no" , str)) |
1445 | acpi_enforce_resources = ENFORCE_RESOURCES_NO; |
1446 | |
1447 | return 1; |
1448 | } |
1449 | |
1450 | __setup("acpi_enforce_resources=" , acpi_enforce_resources_setup); |
1451 | |
1452 | /* Check for resource conflicts between ACPI OperationRegions and native |
1453 | * drivers */ |
1454 | int acpi_check_resource_conflict(const struct resource *res) |
1455 | { |
1456 | acpi_adr_space_type space_id; |
1457 | |
1458 | if (acpi_enforce_resources == ENFORCE_RESOURCES_NO) |
1459 | return 0; |
1460 | |
1461 | if (res->flags & IORESOURCE_IO) |
1462 | space_id = ACPI_ADR_SPACE_SYSTEM_IO; |
1463 | else if (res->flags & IORESOURCE_MEM) |
1464 | space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY; |
1465 | else |
1466 | return 0; |
1467 | |
1468 | if (!acpi_check_address_range(space_id, address: res->start, length: resource_size(res), warn: 1)) |
1469 | return 0; |
1470 | |
1471 | pr_info("Resource conflict; ACPI support missing from driver?\n" ); |
1472 | |
1473 | if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT) |
1474 | return -EBUSY; |
1475 | |
1476 | if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX) |
1477 | pr_notice("Resource conflict: System may be unstable or behave erratically\n" ); |
1478 | |
1479 | return 0; |
1480 | } |
1481 | EXPORT_SYMBOL(acpi_check_resource_conflict); |
1482 | |
1483 | int acpi_check_region(resource_size_t start, resource_size_t n, |
1484 | const char *name) |
1485 | { |
1486 | struct resource res = DEFINE_RES_IO_NAMED(start, n, name); |
1487 | |
1488 | return acpi_check_resource_conflict(&res); |
1489 | } |
1490 | EXPORT_SYMBOL(acpi_check_region); |
1491 | |
1492 | /* |
1493 | * Let drivers know whether the resource checks are effective |
1494 | */ |
1495 | int acpi_resources_are_enforced(void) |
1496 | { |
1497 | return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT; |
1498 | } |
1499 | EXPORT_SYMBOL(acpi_resources_are_enforced); |
1500 | |
1501 | /* |
1502 | * Deallocate the memory for a spinlock. |
1503 | */ |
1504 | void acpi_os_delete_lock(acpi_spinlock handle) |
1505 | { |
1506 | ACPI_FREE(handle); |
1507 | } |
1508 | |
1509 | /* |
1510 | * Acquire a spinlock. |
1511 | * |
1512 | * handle is a pointer to the spinlock_t. |
1513 | */ |
1514 | |
1515 | acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp) |
1516 | __acquires(lockp) |
1517 | { |
1518 | spin_lock(lock: lockp); |
1519 | return 0; |
1520 | } |
1521 | |
1522 | /* |
1523 | * Release a spinlock. See above. |
1524 | */ |
1525 | |
1526 | void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags not_used) |
1527 | __releases(lockp) |
1528 | { |
1529 | spin_unlock(lock: lockp); |
1530 | } |
1531 | |
1532 | #ifndef ACPI_USE_LOCAL_CACHE |
1533 | |
1534 | /******************************************************************************* |
1535 | * |
1536 | * FUNCTION: acpi_os_create_cache |
1537 | * |
1538 | * PARAMETERS: name - Ascii name for the cache |
1539 | * size - Size of each cached object |
1540 | * depth - Maximum depth of the cache (in objects) <ignored> |
1541 | * cache - Where the new cache object is returned |
1542 | * |
1543 | * RETURN: status |
1544 | * |
1545 | * DESCRIPTION: Create a cache object |
1546 | * |
1547 | ******************************************************************************/ |
1548 | |
1549 | acpi_status |
1550 | acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t **cache) |
1551 | { |
1552 | *cache = kmem_cache_create(name, size, align: 0, flags: 0, NULL); |
1553 | if (*cache == NULL) |
1554 | return AE_ERROR; |
1555 | else |
1556 | return AE_OK; |
1557 | } |
1558 | |
1559 | /******************************************************************************* |
1560 | * |
1561 | * FUNCTION: acpi_os_purge_cache |
1562 | * |
1563 | * PARAMETERS: Cache - Handle to cache object |
1564 | * |
1565 | * RETURN: Status |
1566 | * |
1567 | * DESCRIPTION: Free all objects within the requested cache. |
1568 | * |
1569 | ******************************************************************************/ |
1570 | |
1571 | acpi_status acpi_os_purge_cache(acpi_cache_t *cache) |
1572 | { |
1573 | kmem_cache_shrink(s: cache); |
1574 | return AE_OK; |
1575 | } |
1576 | |
1577 | /******************************************************************************* |
1578 | * |
1579 | * FUNCTION: acpi_os_delete_cache |
1580 | * |
1581 | * PARAMETERS: Cache - Handle to cache object |
1582 | * |
1583 | * RETURN: Status |
1584 | * |
1585 | * DESCRIPTION: Free all objects within the requested cache and delete the |
1586 | * cache object. |
1587 | * |
1588 | ******************************************************************************/ |
1589 | |
1590 | acpi_status acpi_os_delete_cache(acpi_cache_t *cache) |
1591 | { |
1592 | kmem_cache_destroy(s: cache); |
1593 | return AE_OK; |
1594 | } |
1595 | |
1596 | /******************************************************************************* |
1597 | * |
1598 | * FUNCTION: acpi_os_release_object |
1599 | * |
1600 | * PARAMETERS: Cache - Handle to cache object |
1601 | * Object - The object to be released |
1602 | * |
1603 | * RETURN: None |
1604 | * |
1605 | * DESCRIPTION: Release an object to the specified cache. If cache is full, |
1606 | * the object is deleted. |
1607 | * |
1608 | ******************************************************************************/ |
1609 | |
1610 | acpi_status acpi_os_release_object(acpi_cache_t *cache, void *object) |
1611 | { |
1612 | kmem_cache_free(s: cache, objp: object); |
1613 | return AE_OK; |
1614 | } |
1615 | #endif |
1616 | |
1617 | static int __init acpi_no_static_ssdt_setup(char *s) |
1618 | { |
1619 | acpi_gbl_disable_ssdt_table_install = TRUE; |
1620 | pr_info("Static SSDT installation disabled\n" ); |
1621 | |
1622 | return 0; |
1623 | } |
1624 | |
1625 | early_param("acpi_no_static_ssdt" , acpi_no_static_ssdt_setup); |
1626 | |
1627 | static int __init acpi_disable_return_repair(char *s) |
1628 | { |
1629 | pr_notice("Predefined validation mechanism disabled\n" ); |
1630 | acpi_gbl_disable_auto_repair = TRUE; |
1631 | |
1632 | return 1; |
1633 | } |
1634 | |
1635 | __setup("acpica_no_return_repair" , acpi_disable_return_repair); |
1636 | |
1637 | acpi_status __init acpi_os_initialize(void) |
1638 | { |
1639 | acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block); |
1640 | acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block); |
1641 | |
1642 | acpi_gbl_xgpe0_block_logical_address = |
1643 | (unsigned long)acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block); |
1644 | acpi_gbl_xgpe1_block_logical_address = |
1645 | (unsigned long)acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block); |
1646 | |
1647 | if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) { |
1648 | /* |
1649 | * Use acpi_os_map_generic_address to pre-map the reset |
1650 | * register if it's in system memory. |
1651 | */ |
1652 | void *rv; |
1653 | |
1654 | rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register); |
1655 | pr_debug("%s: Reset register mapping %s\n" , __func__, |
1656 | rv ? "successful" : "failed" ); |
1657 | } |
1658 | acpi_os_initialized = true; |
1659 | |
1660 | return AE_OK; |
1661 | } |
1662 | |
1663 | acpi_status __init acpi_os_initialize1(void) |
1664 | { |
1665 | kacpid_wq = alloc_workqueue(fmt: "kacpid" , flags: 0, max_active: 1); |
1666 | kacpi_notify_wq = alloc_workqueue(fmt: "kacpi_notify" , flags: 0, max_active: 0); |
1667 | kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug" , 0); |
1668 | BUG_ON(!kacpid_wq); |
1669 | BUG_ON(!kacpi_notify_wq); |
1670 | BUG_ON(!kacpi_hotplug_wq); |
1671 | acpi_osi_init(); |
1672 | return AE_OK; |
1673 | } |
1674 | |
1675 | acpi_status acpi_os_terminate(void) |
1676 | { |
1677 | if (acpi_irq_handler) { |
1678 | acpi_os_remove_interrupt_handler(gsi: acpi_gbl_FADT.sci_interrupt, |
1679 | handler: acpi_irq_handler); |
1680 | } |
1681 | |
1682 | acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block); |
1683 | acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block); |
1684 | acpi_gbl_xgpe0_block_logical_address = 0UL; |
1685 | acpi_gbl_xgpe1_block_logical_address = 0UL; |
1686 | |
1687 | acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block); |
1688 | acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block); |
1689 | |
1690 | if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) |
1691 | acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register); |
1692 | |
1693 | destroy_workqueue(wq: kacpid_wq); |
1694 | destroy_workqueue(wq: kacpi_notify_wq); |
1695 | destroy_workqueue(wq: kacpi_hotplug_wq); |
1696 | |
1697 | return AE_OK; |
1698 | } |
1699 | |
1700 | acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control, |
1701 | u32 pm1b_control) |
1702 | { |
1703 | int rc = 0; |
1704 | |
1705 | if (__acpi_os_prepare_sleep) |
1706 | rc = __acpi_os_prepare_sleep(sleep_state, |
1707 | pm1a_control, pm1b_control); |
1708 | if (rc < 0) |
1709 | return AE_ERROR; |
1710 | else if (rc > 0) |
1711 | return AE_CTRL_TERMINATE; |
1712 | |
1713 | return AE_OK; |
1714 | } |
1715 | |
1716 | void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state, |
1717 | u32 pm1a_ctrl, u32 pm1b_ctrl)) |
1718 | { |
1719 | __acpi_os_prepare_sleep = func; |
1720 | } |
1721 | |
1722 | #if (ACPI_REDUCED_HARDWARE) |
1723 | acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a, |
1724 | u32 val_b) |
1725 | { |
1726 | int rc = 0; |
1727 | |
1728 | if (__acpi_os_prepare_extended_sleep) |
1729 | rc = __acpi_os_prepare_extended_sleep(sleep_state, |
1730 | val_a, val_b); |
1731 | if (rc < 0) |
1732 | return AE_ERROR; |
1733 | else if (rc > 0) |
1734 | return AE_CTRL_TERMINATE; |
1735 | |
1736 | return AE_OK; |
1737 | } |
1738 | #else |
1739 | acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a, |
1740 | u32 val_b) |
1741 | { |
1742 | return AE_OK; |
1743 | } |
1744 | #endif |
1745 | |
1746 | void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state, |
1747 | u32 val_a, u32 val_b)) |
1748 | { |
1749 | __acpi_os_prepare_extended_sleep = func; |
1750 | } |
1751 | |
1752 | acpi_status acpi_os_enter_sleep(u8 sleep_state, |
1753 | u32 reg_a_value, u32 reg_b_value) |
1754 | { |
1755 | acpi_status status; |
1756 | |
1757 | if (acpi_gbl_reduced_hardware) |
1758 | status = acpi_os_prepare_extended_sleep(sleep_state, |
1759 | val_a: reg_a_value, |
1760 | val_b: reg_b_value); |
1761 | else |
1762 | status = acpi_os_prepare_sleep(sleep_state, |
1763 | pm1a_control: reg_a_value, pm1b_control: reg_b_value); |
1764 | return status; |
1765 | } |
1766 | |