1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * PCI support in ACPI |
4 | * |
5 | * Copyright (C) 2005 David Shaohua Li <shaohua.li@intel.com> |
6 | * Copyright (C) 2004 Tom Long Nguyen <tom.l.nguyen@intel.com> |
7 | * Copyright (C) 2004 Intel Corp. |
8 | */ |
9 | |
10 | #include <linux/delay.h> |
11 | #include <linux/init.h> |
12 | #include <linux/irqdomain.h> |
13 | #include <linux/pci.h> |
14 | #include <linux/msi.h> |
15 | #include <linux/pci_hotplug.h> |
16 | #include <linux/module.h> |
17 | #include <linux/pci-acpi.h> |
18 | #include <linux/pm_runtime.h> |
19 | #include <linux/pm_qos.h> |
20 | #include <linux/rwsem.h> |
21 | #include "pci.h" |
22 | |
23 | /* |
24 | * The GUID is defined in the PCI Firmware Specification available |
25 | * here to PCI-SIG members: |
26 | * https://members.pcisig.com/wg/PCI-SIG/document/15350 |
27 | */ |
28 | const guid_t pci_acpi_dsm_guid = |
29 | GUID_INIT(0xe5c937d0, 0x3553, 0x4d7a, |
30 | 0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d); |
31 | |
32 | #if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64) |
33 | static int acpi_get_rc_addr(struct acpi_device *adev, struct resource *res) |
34 | { |
35 | struct device *dev = &adev->dev; |
36 | struct resource_entry *entry; |
37 | struct list_head list; |
38 | unsigned long flags; |
39 | int ret; |
40 | |
41 | INIT_LIST_HEAD(&list); |
42 | flags = IORESOURCE_MEM; |
43 | ret = acpi_dev_get_resources(adev, &list, |
44 | acpi_dev_filter_resource_type_cb, |
45 | (void *) flags); |
46 | if (ret < 0) { |
47 | dev_err(dev, "failed to parse _CRS method, error code %d\n" , |
48 | ret); |
49 | return ret; |
50 | } |
51 | |
52 | if (ret == 0) { |
53 | dev_err(dev, "no IO and memory resources present in _CRS\n" ); |
54 | return -EINVAL; |
55 | } |
56 | |
57 | entry = list_first_entry(&list, struct resource_entry, node); |
58 | *res = *entry->res; |
59 | acpi_dev_free_resource_list(&list); |
60 | return 0; |
61 | } |
62 | |
63 | static acpi_status acpi_match_rc(acpi_handle handle, u32 lvl, void *context, |
64 | void **retval) |
65 | { |
66 | u16 *segment = context; |
67 | unsigned long long uid; |
68 | acpi_status status; |
69 | |
70 | status = acpi_evaluate_integer(handle, METHOD_NAME__UID, NULL, &uid); |
71 | if (ACPI_FAILURE(status) || uid != *segment) |
72 | return AE_CTRL_DEPTH; |
73 | |
74 | *(acpi_handle *)retval = handle; |
75 | return AE_CTRL_TERMINATE; |
76 | } |
77 | |
78 | int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment, |
79 | struct resource *res) |
80 | { |
81 | struct acpi_device *adev; |
82 | acpi_status status; |
83 | acpi_handle handle; |
84 | int ret; |
85 | |
86 | status = acpi_get_devices(hid, acpi_match_rc, &segment, &handle); |
87 | if (ACPI_FAILURE(status)) { |
88 | dev_err(dev, "can't find _HID %s device to locate resources\n" , |
89 | hid); |
90 | return -ENODEV; |
91 | } |
92 | |
93 | adev = acpi_fetch_acpi_dev(handle); |
94 | if (!adev) |
95 | return -ENODEV; |
96 | |
97 | ret = acpi_get_rc_addr(adev, res); |
98 | if (ret) { |
99 | dev_err(dev, "can't get resource from %s\n" , |
100 | dev_name(&adev->dev)); |
101 | return ret; |
102 | } |
103 | |
104 | return 0; |
105 | } |
106 | #endif |
107 | |
108 | phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle) |
109 | { |
110 | acpi_status status = AE_NOT_EXIST; |
111 | unsigned long long mcfg_addr; |
112 | |
113 | if (handle) |
114 | status = acpi_evaluate_integer(handle, METHOD_NAME__CBA, |
115 | NULL, data: &mcfg_addr); |
116 | if (ACPI_FAILURE(status)) |
117 | return 0; |
118 | |
119 | return (phys_addr_t)mcfg_addr; |
120 | } |
121 | |
122 | /* _HPX PCI Setting Record (Type 0); same as _HPP */ |
123 | struct hpx_type0 { |
124 | u32 revision; /* Not present in _HPP */ |
125 | u8 cache_line_size; /* Not applicable to PCIe */ |
126 | u8 latency_timer; /* Not applicable to PCIe */ |
127 | u8 enable_serr; |
128 | u8 enable_perr; |
129 | }; |
130 | |
131 | static struct hpx_type0 pci_default_type0 = { |
132 | .revision = 1, |
133 | .cache_line_size = 8, |
134 | .latency_timer = 0x40, |
135 | .enable_serr = 0, |
136 | .enable_perr = 0, |
137 | }; |
138 | |
139 | static void program_hpx_type0(struct pci_dev *dev, struct hpx_type0 *hpx) |
140 | { |
141 | u16 pci_cmd, pci_bctl; |
142 | |
143 | if (!hpx) |
144 | hpx = &pci_default_type0; |
145 | |
146 | if (hpx->revision > 1) { |
147 | pci_warn(dev, "PCI settings rev %d not supported; using defaults\n" , |
148 | hpx->revision); |
149 | hpx = &pci_default_type0; |
150 | } |
151 | |
152 | pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, val: hpx->cache_line_size); |
153 | pci_write_config_byte(dev, PCI_LATENCY_TIMER, val: hpx->latency_timer); |
154 | pci_read_config_word(dev, PCI_COMMAND, val: &pci_cmd); |
155 | if (hpx->enable_serr) |
156 | pci_cmd |= PCI_COMMAND_SERR; |
157 | if (hpx->enable_perr) |
158 | pci_cmd |= PCI_COMMAND_PARITY; |
159 | pci_write_config_word(dev, PCI_COMMAND, val: pci_cmd); |
160 | |
161 | /* Program bridge control value */ |
162 | if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { |
163 | pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, |
164 | val: hpx->latency_timer); |
165 | pci_read_config_word(dev, PCI_BRIDGE_CONTROL, val: &pci_bctl); |
166 | if (hpx->enable_perr) |
167 | pci_bctl |= PCI_BRIDGE_CTL_PARITY; |
168 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, val: pci_bctl); |
169 | } |
170 | } |
171 | |
172 | static acpi_status decode_type0_hpx_record(union acpi_object *record, |
173 | struct hpx_type0 *hpx0) |
174 | { |
175 | int i; |
176 | union acpi_object *fields = record->package.elements; |
177 | u32 revision = fields[1].integer.value; |
178 | |
179 | switch (revision) { |
180 | case 1: |
181 | if (record->package.count != 6) |
182 | return AE_ERROR; |
183 | for (i = 2; i < 6; i++) |
184 | if (fields[i].type != ACPI_TYPE_INTEGER) |
185 | return AE_ERROR; |
186 | hpx0->revision = revision; |
187 | hpx0->cache_line_size = fields[2].integer.value; |
188 | hpx0->latency_timer = fields[3].integer.value; |
189 | hpx0->enable_serr = fields[4].integer.value; |
190 | hpx0->enable_perr = fields[5].integer.value; |
191 | break; |
192 | default: |
193 | pr_warn("%s: Type 0 Revision %d record not supported\n" , |
194 | __func__, revision); |
195 | return AE_ERROR; |
196 | } |
197 | return AE_OK; |
198 | } |
199 | |
200 | /* _HPX PCI-X Setting Record (Type 1) */ |
201 | struct hpx_type1 { |
202 | u32 revision; |
203 | u8 max_mem_read; |
204 | u8 avg_max_split; |
205 | u16 tot_max_split; |
206 | }; |
207 | |
208 | static void program_hpx_type1(struct pci_dev *dev, struct hpx_type1 *hpx) |
209 | { |
210 | int pos; |
211 | |
212 | if (!hpx) |
213 | return; |
214 | |
215 | pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); |
216 | if (!pos) |
217 | return; |
218 | |
219 | pci_warn(dev, "PCI-X settings not supported\n" ); |
220 | } |
221 | |
222 | static acpi_status decode_type1_hpx_record(union acpi_object *record, |
223 | struct hpx_type1 *hpx1) |
224 | { |
225 | int i; |
226 | union acpi_object *fields = record->package.elements; |
227 | u32 revision = fields[1].integer.value; |
228 | |
229 | switch (revision) { |
230 | case 1: |
231 | if (record->package.count != 5) |
232 | return AE_ERROR; |
233 | for (i = 2; i < 5; i++) |
234 | if (fields[i].type != ACPI_TYPE_INTEGER) |
235 | return AE_ERROR; |
236 | hpx1->revision = revision; |
237 | hpx1->max_mem_read = fields[2].integer.value; |
238 | hpx1->avg_max_split = fields[3].integer.value; |
239 | hpx1->tot_max_split = fields[4].integer.value; |
240 | break; |
241 | default: |
242 | pr_warn("%s: Type 1 Revision %d record not supported\n" , |
243 | __func__, revision); |
244 | return AE_ERROR; |
245 | } |
246 | return AE_OK; |
247 | } |
248 | |
249 | static bool pcie_root_rcb_set(struct pci_dev *dev) |
250 | { |
251 | struct pci_dev *rp = pcie_find_root_port(dev); |
252 | u16 lnkctl; |
253 | |
254 | if (!rp) |
255 | return false; |
256 | |
257 | pcie_capability_read_word(dev: rp, PCI_EXP_LNKCTL, val: &lnkctl); |
258 | if (lnkctl & PCI_EXP_LNKCTL_RCB) |
259 | return true; |
260 | |
261 | return false; |
262 | } |
263 | |
264 | /* _HPX PCI Express Setting Record (Type 2) */ |
265 | struct hpx_type2 { |
266 | u32 revision; |
267 | u32 unc_err_mask_and; |
268 | u32 unc_err_mask_or; |
269 | u32 unc_err_sever_and; |
270 | u32 unc_err_sever_or; |
271 | u32 cor_err_mask_and; |
272 | u32 cor_err_mask_or; |
273 | u32 adv_err_cap_and; |
274 | u32 adv_err_cap_or; |
275 | u16 pci_exp_devctl_and; |
276 | u16 pci_exp_devctl_or; |
277 | u16 pci_exp_lnkctl_and; |
278 | u16 pci_exp_lnkctl_or; |
279 | u32 sec_unc_err_sever_and; |
280 | u32 sec_unc_err_sever_or; |
281 | u32 sec_unc_err_mask_and; |
282 | u32 sec_unc_err_mask_or; |
283 | }; |
284 | |
285 | static void program_hpx_type2(struct pci_dev *dev, struct hpx_type2 *hpx) |
286 | { |
287 | int pos; |
288 | u32 reg32; |
289 | |
290 | if (!hpx) |
291 | return; |
292 | |
293 | if (!pci_is_pcie(dev)) |
294 | return; |
295 | |
296 | if (hpx->revision > 1) { |
297 | pci_warn(dev, "PCIe settings rev %d not supported\n" , |
298 | hpx->revision); |
299 | return; |
300 | } |
301 | |
302 | /* |
303 | * Don't allow _HPX to change MPS or MRRS settings. We manage |
304 | * those to make sure they're consistent with the rest of the |
305 | * platform. |
306 | */ |
307 | hpx->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD | |
308 | PCI_EXP_DEVCTL_READRQ; |
309 | hpx->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD | |
310 | PCI_EXP_DEVCTL_READRQ); |
311 | |
312 | /* Initialize Device Control Register */ |
313 | pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, |
314 | clear: ~hpx->pci_exp_devctl_and, set: hpx->pci_exp_devctl_or); |
315 | |
316 | /* Initialize Link Control Register */ |
317 | if (pcie_cap_has_lnkctl(dev)) { |
318 | |
319 | /* |
320 | * If the Root Port supports Read Completion Boundary of |
321 | * 128, set RCB to 128. Otherwise, clear it. |
322 | */ |
323 | hpx->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB; |
324 | hpx->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB; |
325 | if (pcie_root_rcb_set(dev)) |
326 | hpx->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB; |
327 | |
328 | pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL, |
329 | clear: ~hpx->pci_exp_lnkctl_and, set: hpx->pci_exp_lnkctl_or); |
330 | } |
331 | |
332 | /* Find Advanced Error Reporting Enhanced Capability */ |
333 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); |
334 | if (!pos) |
335 | return; |
336 | |
337 | /* Initialize Uncorrectable Error Mask Register */ |
338 | pci_read_config_dword(dev, where: pos + PCI_ERR_UNCOR_MASK, val: ®32); |
339 | reg32 = (reg32 & hpx->unc_err_mask_and) | hpx->unc_err_mask_or; |
340 | pci_write_config_dword(dev, where: pos + PCI_ERR_UNCOR_MASK, val: reg32); |
341 | |
342 | /* Initialize Uncorrectable Error Severity Register */ |
343 | pci_read_config_dword(dev, where: pos + PCI_ERR_UNCOR_SEVER, val: ®32); |
344 | reg32 = (reg32 & hpx->unc_err_sever_and) | hpx->unc_err_sever_or; |
345 | pci_write_config_dword(dev, where: pos + PCI_ERR_UNCOR_SEVER, val: reg32); |
346 | |
347 | /* Initialize Correctable Error Mask Register */ |
348 | pci_read_config_dword(dev, where: pos + PCI_ERR_COR_MASK, val: ®32); |
349 | reg32 = (reg32 & hpx->cor_err_mask_and) | hpx->cor_err_mask_or; |
350 | pci_write_config_dword(dev, where: pos + PCI_ERR_COR_MASK, val: reg32); |
351 | |
352 | /* Initialize Advanced Error Capabilities and Control Register */ |
353 | pci_read_config_dword(dev, where: pos + PCI_ERR_CAP, val: ®32); |
354 | reg32 = (reg32 & hpx->adv_err_cap_and) | hpx->adv_err_cap_or; |
355 | |
356 | /* Don't enable ECRC generation or checking if unsupported */ |
357 | if (!(reg32 & PCI_ERR_CAP_ECRC_GENC)) |
358 | reg32 &= ~PCI_ERR_CAP_ECRC_GENE; |
359 | if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC)) |
360 | reg32 &= ~PCI_ERR_CAP_ECRC_CHKE; |
361 | pci_write_config_dword(dev, where: pos + PCI_ERR_CAP, val: reg32); |
362 | |
363 | /* |
364 | * FIXME: The following two registers are not supported yet. |
365 | * |
366 | * o Secondary Uncorrectable Error Severity Register |
367 | * o Secondary Uncorrectable Error Mask Register |
368 | */ |
369 | } |
370 | |
371 | static acpi_status decode_type2_hpx_record(union acpi_object *record, |
372 | struct hpx_type2 *hpx2) |
373 | { |
374 | int i; |
375 | union acpi_object *fields = record->package.elements; |
376 | u32 revision = fields[1].integer.value; |
377 | |
378 | switch (revision) { |
379 | case 1: |
380 | if (record->package.count != 18) |
381 | return AE_ERROR; |
382 | for (i = 2; i < 18; i++) |
383 | if (fields[i].type != ACPI_TYPE_INTEGER) |
384 | return AE_ERROR; |
385 | hpx2->revision = revision; |
386 | hpx2->unc_err_mask_and = fields[2].integer.value; |
387 | hpx2->unc_err_mask_or = fields[3].integer.value; |
388 | hpx2->unc_err_sever_and = fields[4].integer.value; |
389 | hpx2->unc_err_sever_or = fields[5].integer.value; |
390 | hpx2->cor_err_mask_and = fields[6].integer.value; |
391 | hpx2->cor_err_mask_or = fields[7].integer.value; |
392 | hpx2->adv_err_cap_and = fields[8].integer.value; |
393 | hpx2->adv_err_cap_or = fields[9].integer.value; |
394 | hpx2->pci_exp_devctl_and = fields[10].integer.value; |
395 | hpx2->pci_exp_devctl_or = fields[11].integer.value; |
396 | hpx2->pci_exp_lnkctl_and = fields[12].integer.value; |
397 | hpx2->pci_exp_lnkctl_or = fields[13].integer.value; |
398 | hpx2->sec_unc_err_sever_and = fields[14].integer.value; |
399 | hpx2->sec_unc_err_sever_or = fields[15].integer.value; |
400 | hpx2->sec_unc_err_mask_and = fields[16].integer.value; |
401 | hpx2->sec_unc_err_mask_or = fields[17].integer.value; |
402 | break; |
403 | default: |
404 | pr_warn("%s: Type 2 Revision %d record not supported\n" , |
405 | __func__, revision); |
406 | return AE_ERROR; |
407 | } |
408 | return AE_OK; |
409 | } |
410 | |
411 | /* _HPX PCI Express Setting Record (Type 3) */ |
412 | struct hpx_type3 { |
413 | u16 device_type; |
414 | u16 function_type; |
415 | u16 config_space_location; |
416 | u16 pci_exp_cap_id; |
417 | u16 pci_exp_cap_ver; |
418 | u16 pci_exp_vendor_id; |
419 | u16 dvsec_id; |
420 | u16 dvsec_rev; |
421 | u16 match_offset; |
422 | u32 match_mask_and; |
423 | u32 match_value; |
424 | u16 reg_offset; |
425 | u32 reg_mask_and; |
426 | u32 reg_mask_or; |
427 | }; |
428 | |
429 | enum hpx_type3_dev_type { |
430 | HPX_TYPE_ENDPOINT = BIT(0), |
431 | HPX_TYPE_LEG_END = BIT(1), |
432 | HPX_TYPE_RC_END = BIT(2), |
433 | HPX_TYPE_RC_EC = BIT(3), |
434 | HPX_TYPE_ROOT_PORT = BIT(4), |
435 | HPX_TYPE_UPSTREAM = BIT(5), |
436 | HPX_TYPE_DOWNSTREAM = BIT(6), |
437 | HPX_TYPE_PCI_BRIDGE = BIT(7), |
438 | HPX_TYPE_PCIE_BRIDGE = BIT(8), |
439 | }; |
440 | |
441 | static u16 hpx3_device_type(struct pci_dev *dev) |
442 | { |
443 | u16 pcie_type = pci_pcie_type(dev); |
444 | static const int pcie_to_hpx3_type[] = { |
445 | [PCI_EXP_TYPE_ENDPOINT] = HPX_TYPE_ENDPOINT, |
446 | [PCI_EXP_TYPE_LEG_END] = HPX_TYPE_LEG_END, |
447 | [PCI_EXP_TYPE_RC_END] = HPX_TYPE_RC_END, |
448 | [PCI_EXP_TYPE_RC_EC] = HPX_TYPE_RC_EC, |
449 | [PCI_EXP_TYPE_ROOT_PORT] = HPX_TYPE_ROOT_PORT, |
450 | [PCI_EXP_TYPE_UPSTREAM] = HPX_TYPE_UPSTREAM, |
451 | [PCI_EXP_TYPE_DOWNSTREAM] = HPX_TYPE_DOWNSTREAM, |
452 | [PCI_EXP_TYPE_PCI_BRIDGE] = HPX_TYPE_PCI_BRIDGE, |
453 | [PCI_EXP_TYPE_PCIE_BRIDGE] = HPX_TYPE_PCIE_BRIDGE, |
454 | }; |
455 | |
456 | if (pcie_type >= ARRAY_SIZE(pcie_to_hpx3_type)) |
457 | return 0; |
458 | |
459 | return pcie_to_hpx3_type[pcie_type]; |
460 | } |
461 | |
462 | enum hpx_type3_fn_type { |
463 | HPX_FN_NORMAL = BIT(0), |
464 | HPX_FN_SRIOV_PHYS = BIT(1), |
465 | HPX_FN_SRIOV_VIRT = BIT(2), |
466 | }; |
467 | |
468 | static u8 hpx3_function_type(struct pci_dev *dev) |
469 | { |
470 | if (dev->is_virtfn) |
471 | return HPX_FN_SRIOV_VIRT; |
472 | else if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV) > 0) |
473 | return HPX_FN_SRIOV_PHYS; |
474 | else |
475 | return HPX_FN_NORMAL; |
476 | } |
477 | |
478 | static bool hpx3_cap_ver_matches(u8 pcie_cap_id, u8 hpx3_cap_id) |
479 | { |
480 | u8 cap_ver = hpx3_cap_id & 0xf; |
481 | |
482 | if ((hpx3_cap_id & BIT(4)) && cap_ver >= pcie_cap_id) |
483 | return true; |
484 | else if (cap_ver == pcie_cap_id) |
485 | return true; |
486 | |
487 | return false; |
488 | } |
489 | |
490 | enum hpx_type3_cfg_loc { |
491 | HPX_CFG_PCICFG = 0, |
492 | HPX_CFG_PCIE_CAP = 1, |
493 | HPX_CFG_PCIE_CAP_EXT = 2, |
494 | HPX_CFG_VEND_CAP = 3, |
495 | HPX_CFG_DVSEC = 4, |
496 | HPX_CFG_MAX, |
497 | }; |
498 | |
499 | static void program_hpx_type3_register(struct pci_dev *dev, |
500 | const struct hpx_type3 *reg) |
501 | { |
502 | u32 match_reg, write_reg, , orig_value; |
503 | u16 pos; |
504 | |
505 | if (!(hpx3_device_type(dev) & reg->device_type)) |
506 | return; |
507 | |
508 | if (!(hpx3_function_type(dev) & reg->function_type)) |
509 | return; |
510 | |
511 | switch (reg->config_space_location) { |
512 | case HPX_CFG_PCICFG: |
513 | pos = 0; |
514 | break; |
515 | case HPX_CFG_PCIE_CAP: |
516 | pos = pci_find_capability(dev, cap: reg->pci_exp_cap_id); |
517 | if (pos == 0) |
518 | return; |
519 | |
520 | break; |
521 | case HPX_CFG_PCIE_CAP_EXT: |
522 | pos = pci_find_ext_capability(dev, cap: reg->pci_exp_cap_id); |
523 | if (pos == 0) |
524 | return; |
525 | |
526 | pci_read_config_dword(dev, where: pos, val: &header); |
527 | if (!hpx3_cap_ver_matches(PCI_EXT_CAP_VER(header), |
528 | hpx3_cap_id: reg->pci_exp_cap_ver)) |
529 | return; |
530 | |
531 | break; |
532 | case HPX_CFG_VEND_CAP: |
533 | case HPX_CFG_DVSEC: |
534 | default: |
535 | pci_warn(dev, "Encountered _HPX type 3 with unsupported config space location" ); |
536 | return; |
537 | } |
538 | |
539 | pci_read_config_dword(dev, where: pos + reg->match_offset, val: &match_reg); |
540 | |
541 | if ((match_reg & reg->match_mask_and) != reg->match_value) |
542 | return; |
543 | |
544 | pci_read_config_dword(dev, where: pos + reg->reg_offset, val: &write_reg); |
545 | orig_value = write_reg; |
546 | write_reg &= reg->reg_mask_and; |
547 | write_reg |= reg->reg_mask_or; |
548 | |
549 | if (orig_value == write_reg) |
550 | return; |
551 | |
552 | pci_write_config_dword(dev, where: pos + reg->reg_offset, val: write_reg); |
553 | |
554 | pci_dbg(dev, "Applied _HPX3 at [0x%x]: 0x%08x -> 0x%08x" , |
555 | pos, orig_value, write_reg); |
556 | } |
557 | |
558 | static void program_hpx_type3(struct pci_dev *dev, struct hpx_type3 *hpx) |
559 | { |
560 | if (!hpx) |
561 | return; |
562 | |
563 | if (!pci_is_pcie(dev)) |
564 | return; |
565 | |
566 | program_hpx_type3_register(dev, reg: hpx); |
567 | } |
568 | |
569 | static void parse_hpx3_register(struct hpx_type3 *hpx3_reg, |
570 | union acpi_object *reg_fields) |
571 | { |
572 | hpx3_reg->device_type = reg_fields[0].integer.value; |
573 | hpx3_reg->function_type = reg_fields[1].integer.value; |
574 | hpx3_reg->config_space_location = reg_fields[2].integer.value; |
575 | hpx3_reg->pci_exp_cap_id = reg_fields[3].integer.value; |
576 | hpx3_reg->pci_exp_cap_ver = reg_fields[4].integer.value; |
577 | hpx3_reg->pci_exp_vendor_id = reg_fields[5].integer.value; |
578 | hpx3_reg->dvsec_id = reg_fields[6].integer.value; |
579 | hpx3_reg->dvsec_rev = reg_fields[7].integer.value; |
580 | hpx3_reg->match_offset = reg_fields[8].integer.value; |
581 | hpx3_reg->match_mask_and = reg_fields[9].integer.value; |
582 | hpx3_reg->match_value = reg_fields[10].integer.value; |
583 | hpx3_reg->reg_offset = reg_fields[11].integer.value; |
584 | hpx3_reg->reg_mask_and = reg_fields[12].integer.value; |
585 | hpx3_reg->reg_mask_or = reg_fields[13].integer.value; |
586 | } |
587 | |
588 | static acpi_status program_type3_hpx_record(struct pci_dev *dev, |
589 | union acpi_object *record) |
590 | { |
591 | union acpi_object *fields = record->package.elements; |
592 | u32 desc_count, expected_length, revision; |
593 | union acpi_object *reg_fields; |
594 | struct hpx_type3 hpx3; |
595 | int i; |
596 | |
597 | revision = fields[1].integer.value; |
598 | switch (revision) { |
599 | case 1: |
600 | desc_count = fields[2].integer.value; |
601 | expected_length = 3 + desc_count * 14; |
602 | |
603 | if (record->package.count != expected_length) |
604 | return AE_ERROR; |
605 | |
606 | for (i = 2; i < expected_length; i++) |
607 | if (fields[i].type != ACPI_TYPE_INTEGER) |
608 | return AE_ERROR; |
609 | |
610 | for (i = 0; i < desc_count; i++) { |
611 | reg_fields = fields + 3 + i * 14; |
612 | parse_hpx3_register(hpx3_reg: &hpx3, reg_fields); |
613 | program_hpx_type3(dev, hpx: &hpx3); |
614 | } |
615 | |
616 | break; |
617 | default: |
618 | printk(KERN_WARNING |
619 | "%s: Type 3 Revision %d record not supported\n" , |
620 | __func__, revision); |
621 | return AE_ERROR; |
622 | } |
623 | return AE_OK; |
624 | } |
625 | |
626 | static acpi_status acpi_run_hpx(struct pci_dev *dev, acpi_handle handle) |
627 | { |
628 | acpi_status status; |
629 | struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; |
630 | union acpi_object *package, *record, *fields; |
631 | struct hpx_type0 hpx0; |
632 | struct hpx_type1 hpx1; |
633 | struct hpx_type2 hpx2; |
634 | u32 type; |
635 | int i; |
636 | |
637 | status = acpi_evaluate_object(object: handle, pathname: "_HPX" , NULL, return_object_buffer: &buffer); |
638 | if (ACPI_FAILURE(status)) |
639 | return status; |
640 | |
641 | package = (union acpi_object *)buffer.pointer; |
642 | if (package->type != ACPI_TYPE_PACKAGE) { |
643 | status = AE_ERROR; |
644 | goto exit; |
645 | } |
646 | |
647 | for (i = 0; i < package->package.count; i++) { |
648 | record = &package->package.elements[i]; |
649 | if (record->type != ACPI_TYPE_PACKAGE) { |
650 | status = AE_ERROR; |
651 | goto exit; |
652 | } |
653 | |
654 | fields = record->package.elements; |
655 | if (fields[0].type != ACPI_TYPE_INTEGER || |
656 | fields[1].type != ACPI_TYPE_INTEGER) { |
657 | status = AE_ERROR; |
658 | goto exit; |
659 | } |
660 | |
661 | type = fields[0].integer.value; |
662 | switch (type) { |
663 | case 0: |
664 | memset(&hpx0, 0, sizeof(hpx0)); |
665 | status = decode_type0_hpx_record(record, hpx0: &hpx0); |
666 | if (ACPI_FAILURE(status)) |
667 | goto exit; |
668 | program_hpx_type0(dev, hpx: &hpx0); |
669 | break; |
670 | case 1: |
671 | memset(&hpx1, 0, sizeof(hpx1)); |
672 | status = decode_type1_hpx_record(record, hpx1: &hpx1); |
673 | if (ACPI_FAILURE(status)) |
674 | goto exit; |
675 | program_hpx_type1(dev, hpx: &hpx1); |
676 | break; |
677 | case 2: |
678 | memset(&hpx2, 0, sizeof(hpx2)); |
679 | status = decode_type2_hpx_record(record, hpx2: &hpx2); |
680 | if (ACPI_FAILURE(status)) |
681 | goto exit; |
682 | program_hpx_type2(dev, hpx: &hpx2); |
683 | break; |
684 | case 3: |
685 | status = program_type3_hpx_record(dev, record); |
686 | if (ACPI_FAILURE(status)) |
687 | goto exit; |
688 | break; |
689 | default: |
690 | pr_err("%s: Type %d record not supported\n" , |
691 | __func__, type); |
692 | status = AE_ERROR; |
693 | goto exit; |
694 | } |
695 | } |
696 | exit: |
697 | kfree(objp: buffer.pointer); |
698 | return status; |
699 | } |
700 | |
701 | static acpi_status acpi_run_hpp(struct pci_dev *dev, acpi_handle handle) |
702 | { |
703 | acpi_status status; |
704 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
705 | union acpi_object *package, *fields; |
706 | struct hpx_type0 hpx0; |
707 | int i; |
708 | |
709 | memset(&hpx0, 0, sizeof(hpx0)); |
710 | |
711 | status = acpi_evaluate_object(object: handle, pathname: "_HPP" , NULL, return_object_buffer: &buffer); |
712 | if (ACPI_FAILURE(status)) |
713 | return status; |
714 | |
715 | package = (union acpi_object *) buffer.pointer; |
716 | if (package->type != ACPI_TYPE_PACKAGE || |
717 | package->package.count != 4) { |
718 | status = AE_ERROR; |
719 | goto exit; |
720 | } |
721 | |
722 | fields = package->package.elements; |
723 | for (i = 0; i < 4; i++) { |
724 | if (fields[i].type != ACPI_TYPE_INTEGER) { |
725 | status = AE_ERROR; |
726 | goto exit; |
727 | } |
728 | } |
729 | |
730 | hpx0.revision = 1; |
731 | hpx0.cache_line_size = fields[0].integer.value; |
732 | hpx0.latency_timer = fields[1].integer.value; |
733 | hpx0.enable_serr = fields[2].integer.value; |
734 | hpx0.enable_perr = fields[3].integer.value; |
735 | |
736 | program_hpx_type0(dev, hpx: &hpx0); |
737 | |
738 | exit: |
739 | kfree(objp: buffer.pointer); |
740 | return status; |
741 | } |
742 | |
743 | /* pci_acpi_program_hp_params |
744 | * |
745 | * @dev - the pci_dev for which we want parameters |
746 | */ |
747 | int pci_acpi_program_hp_params(struct pci_dev *dev) |
748 | { |
749 | acpi_status status; |
750 | acpi_handle handle, phandle; |
751 | struct pci_bus *pbus; |
752 | |
753 | if (acpi_pci_disabled) |
754 | return -ENODEV; |
755 | |
756 | handle = NULL; |
757 | for (pbus = dev->bus; pbus; pbus = pbus->parent) { |
758 | handle = acpi_pci_get_bridge_handle(pbus); |
759 | if (handle) |
760 | break; |
761 | } |
762 | |
763 | /* |
764 | * _HPP settings apply to all child buses, until another _HPP is |
765 | * encountered. If we don't find an _HPP for the input pci dev, |
766 | * look for it in the parent device scope since that would apply to |
767 | * this pci dev. |
768 | */ |
769 | while (handle) { |
770 | status = acpi_run_hpx(dev, handle); |
771 | if (ACPI_SUCCESS(status)) |
772 | return 0; |
773 | status = acpi_run_hpp(dev, handle); |
774 | if (ACPI_SUCCESS(status)) |
775 | return 0; |
776 | if (acpi_is_root_bridge(handle)) |
777 | break; |
778 | status = acpi_get_parent(object: handle, out_handle: &phandle); |
779 | if (ACPI_FAILURE(status)) |
780 | break; |
781 | handle = phandle; |
782 | } |
783 | return -ENODEV; |
784 | } |
785 | |
786 | /** |
787 | * pciehp_is_native - Check whether a hotplug port is handled by the OS |
788 | * @bridge: Hotplug port to check |
789 | * |
790 | * Returns true if the given @bridge is handled by the native PCIe hotplug |
791 | * driver. |
792 | */ |
793 | bool pciehp_is_native(struct pci_dev *bridge) |
794 | { |
795 | const struct pci_host_bridge *host; |
796 | u32 slot_cap; |
797 | |
798 | if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) |
799 | return false; |
800 | |
801 | pcie_capability_read_dword(dev: bridge, PCI_EXP_SLTCAP, val: &slot_cap); |
802 | if (!(slot_cap & PCI_EXP_SLTCAP_HPC)) |
803 | return false; |
804 | |
805 | if (pcie_ports_native) |
806 | return true; |
807 | |
808 | host = pci_find_host_bridge(bus: bridge->bus); |
809 | return host->native_pcie_hotplug; |
810 | } |
811 | |
812 | /** |
813 | * shpchp_is_native - Check whether a hotplug port is handled by the OS |
814 | * @bridge: Hotplug port to check |
815 | * |
816 | * Returns true if the given @bridge is handled by the native SHPC hotplug |
817 | * driver. |
818 | */ |
819 | bool shpchp_is_native(struct pci_dev *bridge) |
820 | { |
821 | return bridge->shpc_managed; |
822 | } |
823 | |
824 | /** |
825 | * pci_acpi_wake_bus - Root bus wakeup notification fork function. |
826 | * @context: Device wakeup context. |
827 | */ |
828 | static void pci_acpi_wake_bus(struct acpi_device_wakeup_context *context) |
829 | { |
830 | struct acpi_device *adev; |
831 | struct acpi_pci_root *root; |
832 | |
833 | adev = container_of(context, struct acpi_device, wakeup.context); |
834 | root = acpi_driver_data(d: adev); |
835 | pci_pme_wakeup_bus(bus: root->bus); |
836 | } |
837 | |
838 | /** |
839 | * pci_acpi_wake_dev - PCI device wakeup notification work function. |
840 | * @context: Device wakeup context. |
841 | */ |
842 | static void pci_acpi_wake_dev(struct acpi_device_wakeup_context *context) |
843 | { |
844 | struct pci_dev *pci_dev; |
845 | |
846 | pci_dev = to_pci_dev(context->dev); |
847 | |
848 | if (pci_dev->pme_poll) |
849 | pci_dev->pme_poll = false; |
850 | |
851 | if (pci_dev->current_state == PCI_D3cold) { |
852 | pci_wakeup_event(dev: pci_dev); |
853 | pm_request_resume(dev: &pci_dev->dev); |
854 | return; |
855 | } |
856 | |
857 | /* Clear PME Status if set. */ |
858 | if (pci_dev->pme_support) |
859 | pci_check_pme_status(dev: pci_dev); |
860 | |
861 | pci_wakeup_event(dev: pci_dev); |
862 | pm_request_resume(dev: &pci_dev->dev); |
863 | |
864 | pci_pme_wakeup_bus(bus: pci_dev->subordinate); |
865 | } |
866 | |
867 | /** |
868 | * pci_acpi_add_bus_pm_notifier - Register PM notifier for root PCI bus. |
869 | * @dev: PCI root bridge ACPI device. |
870 | */ |
871 | acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev) |
872 | { |
873 | return acpi_add_pm_notifier(adev: dev, NULL, func: pci_acpi_wake_bus); |
874 | } |
875 | |
876 | /** |
877 | * pci_acpi_add_pm_notifier - Register PM notifier for given PCI device. |
878 | * @dev: ACPI device to add the notifier for. |
879 | * @pci_dev: PCI device to check for the PME status if an event is signaled. |
880 | */ |
881 | acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev, |
882 | struct pci_dev *pci_dev) |
883 | { |
884 | return acpi_add_pm_notifier(adev: dev, dev: &pci_dev->dev, func: pci_acpi_wake_dev); |
885 | } |
886 | |
887 | /* |
888 | * _SxD returns the D-state with the highest power |
889 | * (lowest D-state number) supported in the S-state "x". |
890 | * |
891 | * If the devices does not have a _PRW |
892 | * (Power Resources for Wake) supporting system wakeup from "x" |
893 | * then the OS is free to choose a lower power (higher number |
894 | * D-state) than the return value from _SxD. |
895 | * |
896 | * But if _PRW is enabled at S-state "x", the OS |
897 | * must not choose a power lower than _SxD -- |
898 | * unless the device has an _SxW method specifying |
899 | * the lowest power (highest D-state number) the device |
900 | * may enter while still able to wake the system. |
901 | * |
902 | * ie. depending on global OS policy: |
903 | * |
904 | * if (_PRW at S-state x) |
905 | * choose from highest power _SxD to lowest power _SxW |
906 | * else // no _PRW at S-state x |
907 | * choose highest power _SxD or any lower power |
908 | */ |
909 | |
910 | pci_power_t acpi_pci_choose_state(struct pci_dev *pdev) |
911 | { |
912 | int acpi_state, d_max; |
913 | |
914 | if (pdev->no_d3cold || !pdev->d3cold_allowed) |
915 | d_max = ACPI_STATE_D3_HOT; |
916 | else |
917 | d_max = ACPI_STATE_D3_COLD; |
918 | acpi_state = acpi_pm_device_sleep_state(&pdev->dev, NULL, d_max); |
919 | if (acpi_state < 0) |
920 | return PCI_POWER_ERROR; |
921 | |
922 | switch (acpi_state) { |
923 | case ACPI_STATE_D0: |
924 | return PCI_D0; |
925 | case ACPI_STATE_D1: |
926 | return PCI_D1; |
927 | case ACPI_STATE_D2: |
928 | return PCI_D2; |
929 | case ACPI_STATE_D3_HOT: |
930 | return PCI_D3hot; |
931 | case ACPI_STATE_D3_COLD: |
932 | return PCI_D3cold; |
933 | } |
934 | return PCI_POWER_ERROR; |
935 | } |
936 | |
937 | static struct acpi_device *acpi_pci_find_companion(struct device *dev); |
938 | |
939 | void pci_set_acpi_fwnode(struct pci_dev *dev) |
940 | { |
941 | if (!dev_fwnode(&dev->dev) && !pci_dev_is_added(dev)) |
942 | ACPI_COMPANION_SET(&dev->dev, |
943 | acpi_pci_find_companion(&dev->dev)); |
944 | } |
945 | |
946 | /** |
947 | * pci_dev_acpi_reset - do a function level reset using _RST method |
948 | * @dev: device to reset |
949 | * @probe: if true, return 0 if device supports _RST |
950 | */ |
951 | int pci_dev_acpi_reset(struct pci_dev *dev, bool probe) |
952 | { |
953 | acpi_handle handle = ACPI_HANDLE(&dev->dev); |
954 | |
955 | if (!handle || !acpi_has_method(handle, name: "_RST" )) |
956 | return -ENOTTY; |
957 | |
958 | if (probe) |
959 | return 0; |
960 | |
961 | if (ACPI_FAILURE(acpi_evaluate_object(handle, "_RST" , NULL, NULL))) { |
962 | pci_warn(dev, "ACPI _RST failed\n" ); |
963 | return -ENOTTY; |
964 | } |
965 | |
966 | return 0; |
967 | } |
968 | |
969 | bool acpi_pci_power_manageable(struct pci_dev *dev) |
970 | { |
971 | struct acpi_device *adev = ACPI_COMPANION(&dev->dev); |
972 | |
973 | return adev && acpi_device_power_manageable(adev); |
974 | } |
975 | |
976 | bool acpi_pci_bridge_d3(struct pci_dev *dev) |
977 | { |
978 | struct pci_dev *rpdev; |
979 | struct acpi_device *adev, *rpadev; |
980 | const union acpi_object *obj; |
981 | |
982 | if (acpi_pci_disabled || !dev->is_hotplug_bridge) |
983 | return false; |
984 | |
985 | adev = ACPI_COMPANION(&dev->dev); |
986 | if (adev) { |
987 | /* |
988 | * If the bridge has _S0W, whether or not it can go into D3 |
989 | * depends on what is returned by that object. In particular, |
990 | * if the power state returned by _S0W is D2 or shallower, |
991 | * entering D3 should not be allowed. |
992 | */ |
993 | if (acpi_dev_power_state_for_wake(adev) <= ACPI_STATE_D2) |
994 | return false; |
995 | |
996 | /* |
997 | * Otherwise, assume that the bridge can enter D3 so long as it |
998 | * is power-manageable via ACPI. |
999 | */ |
1000 | if (acpi_device_power_manageable(adev)) |
1001 | return true; |
1002 | } |
1003 | |
1004 | rpdev = pcie_find_root_port(dev); |
1005 | if (!rpdev) |
1006 | return false; |
1007 | |
1008 | if (rpdev == dev) |
1009 | rpadev = adev; |
1010 | else |
1011 | rpadev = ACPI_COMPANION(&rpdev->dev); |
1012 | |
1013 | if (!rpadev) |
1014 | return false; |
1015 | |
1016 | /* |
1017 | * If the Root Port cannot signal wakeup signals at all, i.e., it |
1018 | * doesn't supply a wakeup GPE via _PRW, it cannot signal hotplug |
1019 | * events from low-power states including D3hot and D3cold. |
1020 | */ |
1021 | if (!rpadev->wakeup.flags.valid) |
1022 | return false; |
1023 | |
1024 | /* |
1025 | * In the bridge-below-a-Root-Port case, evaluate _S0W for the Root Port |
1026 | * to verify whether or not it can signal wakeup from D3. |
1027 | */ |
1028 | if (rpadev != adev && |
1029 | acpi_dev_power_state_for_wake(adev: rpadev) <= ACPI_STATE_D2) |
1030 | return false; |
1031 | |
1032 | /* |
1033 | * The "HotPlugSupportInD3" property in a Root Port _DSD indicates |
1034 | * the Port can signal hotplug events while in D3. We assume any |
1035 | * bridges *below* that Root Port can also signal hotplug events |
1036 | * while in D3. |
1037 | */ |
1038 | if (!acpi_dev_get_property(adev: rpadev, name: "HotPlugSupportInD3" , |
1039 | ACPI_TYPE_INTEGER, obj: &obj) && |
1040 | obj->integer.value == 1) |
1041 | return true; |
1042 | |
1043 | return false; |
1044 | } |
1045 | |
1046 | static void acpi_pci_config_space_access(struct pci_dev *dev, bool enable) |
1047 | { |
1048 | int val = enable ? ACPI_REG_CONNECT : ACPI_REG_DISCONNECT; |
1049 | int ret = acpi_evaluate_reg(ACPI_HANDLE(&dev->dev), |
1050 | ACPI_ADR_SPACE_PCI_CONFIG, function: val); |
1051 | if (ret) |
1052 | pci_dbg(dev, "ACPI _REG %s evaluation failed (%d)\n" , |
1053 | enable ? "connect" : "disconnect" , ret); |
1054 | } |
1055 | |
1056 | int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) |
1057 | { |
1058 | struct acpi_device *adev = ACPI_COMPANION(&dev->dev); |
1059 | static const u8 state_conv[] = { |
1060 | [PCI_D0] = ACPI_STATE_D0, |
1061 | [PCI_D1] = ACPI_STATE_D1, |
1062 | [PCI_D2] = ACPI_STATE_D2, |
1063 | [PCI_D3hot] = ACPI_STATE_D3_HOT, |
1064 | [PCI_D3cold] = ACPI_STATE_D3_COLD, |
1065 | }; |
1066 | int error; |
1067 | |
1068 | /* If the ACPI device has _EJ0, ignore the device */ |
1069 | if (!adev || acpi_has_method(handle: adev->handle, name: "_EJ0" )) |
1070 | return -ENODEV; |
1071 | |
1072 | switch (state) { |
1073 | case PCI_D0: |
1074 | case PCI_D1: |
1075 | case PCI_D2: |
1076 | case PCI_D3hot: |
1077 | case PCI_D3cold: |
1078 | break; |
1079 | default: |
1080 | return -EINVAL; |
1081 | } |
1082 | |
1083 | if (state == PCI_D3cold) { |
1084 | if (dev_pm_qos_flags(dev: &dev->dev, PM_QOS_FLAG_NO_POWER_OFF) == |
1085 | PM_QOS_FLAGS_ALL) |
1086 | return -EBUSY; |
1087 | |
1088 | /* Notify AML lack of PCI config space availability */ |
1089 | acpi_pci_config_space_access(dev, enable: false); |
1090 | } |
1091 | |
1092 | error = acpi_device_set_power(device: adev, state: state_conv[state]); |
1093 | if (error) |
1094 | return error; |
1095 | |
1096 | pci_dbg(dev, "power state changed by ACPI to %s\n" , |
1097 | acpi_power_state_string(adev->power.state)); |
1098 | |
1099 | /* |
1100 | * Notify AML of PCI config space availability. Config space is |
1101 | * accessible in all states except D3cold; the only transitions |
1102 | * that change availability are transitions to D3cold and from |
1103 | * D3cold to D0. |
1104 | */ |
1105 | if (state == PCI_D0) |
1106 | acpi_pci_config_space_access(dev, enable: true); |
1107 | |
1108 | return 0; |
1109 | } |
1110 | |
1111 | pci_power_t acpi_pci_get_power_state(struct pci_dev *dev) |
1112 | { |
1113 | struct acpi_device *adev = ACPI_COMPANION(&dev->dev); |
1114 | static const pci_power_t state_conv[] = { |
1115 | [ACPI_STATE_D0] = PCI_D0, |
1116 | [ACPI_STATE_D1] = PCI_D1, |
1117 | [ACPI_STATE_D2] = PCI_D2, |
1118 | [ACPI_STATE_D3_HOT] = PCI_D3hot, |
1119 | [ACPI_STATE_D3_COLD] = PCI_D3cold, |
1120 | }; |
1121 | int state; |
1122 | |
1123 | if (!adev || !acpi_device_power_manageable(adev)) |
1124 | return PCI_UNKNOWN; |
1125 | |
1126 | state = adev->power.state; |
1127 | if (state == ACPI_STATE_UNKNOWN) |
1128 | return PCI_UNKNOWN; |
1129 | |
1130 | return state_conv[state]; |
1131 | } |
1132 | |
1133 | void acpi_pci_refresh_power_state(struct pci_dev *dev) |
1134 | { |
1135 | struct acpi_device *adev = ACPI_COMPANION(&dev->dev); |
1136 | |
1137 | if (adev && acpi_device_power_manageable(adev)) |
1138 | acpi_device_update_power(device: adev, NULL); |
1139 | } |
1140 | |
1141 | static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable) |
1142 | { |
1143 | while (bus->parent) { |
1144 | if (acpi_pm_device_can_wakeup(dev: &bus->self->dev)) |
1145 | return acpi_pm_set_device_wakeup(dev: &bus->self->dev, enable); |
1146 | |
1147 | bus = bus->parent; |
1148 | } |
1149 | |
1150 | /* We have reached the root bus. */ |
1151 | if (bus->bridge) { |
1152 | if (acpi_pm_device_can_wakeup(dev: bus->bridge)) |
1153 | return acpi_pm_set_device_wakeup(dev: bus->bridge, enable); |
1154 | } |
1155 | return 0; |
1156 | } |
1157 | |
1158 | int acpi_pci_wakeup(struct pci_dev *dev, bool enable) |
1159 | { |
1160 | if (acpi_pci_disabled) |
1161 | return 0; |
1162 | |
1163 | if (acpi_pm_device_can_wakeup(dev: &dev->dev)) |
1164 | return acpi_pm_set_device_wakeup(dev: &dev->dev, enable); |
1165 | |
1166 | return acpi_pci_propagate_wakeup(bus: dev->bus, enable); |
1167 | } |
1168 | |
1169 | bool acpi_pci_need_resume(struct pci_dev *dev) |
1170 | { |
1171 | struct acpi_device *adev; |
1172 | |
1173 | if (acpi_pci_disabled) |
1174 | return false; |
1175 | |
1176 | /* |
1177 | * In some cases (eg. Samsung 305V4A) leaving a bridge in suspend over |
1178 | * system-wide suspend/resume confuses the platform firmware, so avoid |
1179 | * doing that. According to Section 16.1.6 of ACPI 6.2, endpoint |
1180 | * devices are expected to be in D3 before invoking the S3 entry path |
1181 | * from the firmware, so they should not be affected by this issue. |
1182 | */ |
1183 | if (pci_is_bridge(dev) && acpi_target_system_state() != ACPI_STATE_S0) |
1184 | return true; |
1185 | |
1186 | adev = ACPI_COMPANION(&dev->dev); |
1187 | if (!adev || !acpi_device_power_manageable(adev)) |
1188 | return false; |
1189 | |
1190 | if (adev->wakeup.flags.valid && |
1191 | device_may_wakeup(dev: &dev->dev) != !!adev->wakeup.prepare_count) |
1192 | return true; |
1193 | |
1194 | if (acpi_target_system_state() == ACPI_STATE_S0) |
1195 | return false; |
1196 | |
1197 | return !!adev->power.flags.dsw_present; |
1198 | } |
1199 | |
1200 | void acpi_pci_add_bus(struct pci_bus *bus) |
1201 | { |
1202 | union acpi_object *obj; |
1203 | struct pci_host_bridge *bridge; |
1204 | |
1205 | if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge)) |
1206 | return; |
1207 | |
1208 | acpi_pci_slot_enumerate(bus); |
1209 | acpiphp_enumerate_slots(bus); |
1210 | |
1211 | /* |
1212 | * For a host bridge, check its _DSM for function 8 and if |
1213 | * that is available, mark it in pci_host_bridge. |
1214 | */ |
1215 | if (!pci_is_root_bus(pbus: bus)) |
1216 | return; |
1217 | |
1218 | obj = acpi_evaluate_dsm_typed(ACPI_HANDLE(bus->bridge), guid: &pci_acpi_dsm_guid, rev: 3, |
1219 | DSM_PCI_POWER_ON_RESET_DELAY, NULL, ACPI_TYPE_INTEGER); |
1220 | if (!obj) |
1221 | return; |
1222 | |
1223 | if (obj->integer.value == 1) { |
1224 | bridge = pci_find_host_bridge(bus); |
1225 | bridge->ignore_reset_delay = 1; |
1226 | } |
1227 | ACPI_FREE(obj); |
1228 | } |
1229 | |
1230 | void acpi_pci_remove_bus(struct pci_bus *bus) |
1231 | { |
1232 | if (acpi_pci_disabled || !bus->bridge) |
1233 | return; |
1234 | |
1235 | acpiphp_remove_slots(bus); |
1236 | acpi_pci_slot_remove(bus); |
1237 | } |
1238 | |
1239 | /* ACPI bus type */ |
1240 | |
1241 | |
1242 | static DECLARE_RWSEM(pci_acpi_companion_lookup_sem); |
1243 | static struct acpi_device *(*pci_acpi_find_companion_hook)(struct pci_dev *); |
1244 | |
1245 | /** |
1246 | * pci_acpi_set_companion_lookup_hook - Set ACPI companion lookup callback. |
1247 | * @func: ACPI companion lookup callback pointer or NULL. |
1248 | * |
1249 | * Set a special ACPI companion lookup callback for PCI devices whose companion |
1250 | * objects in the ACPI namespace have _ADR with non-standard bus-device-function |
1251 | * encodings. |
1252 | * |
1253 | * Return 0 on success or a negative error code on failure (in which case no |
1254 | * changes are made). |
1255 | * |
1256 | * The caller is responsible for the appropriate ordering of the invocations of |
1257 | * this function with respect to the enumeration of the PCI devices needing the |
1258 | * callback installed by it. |
1259 | */ |
1260 | int pci_acpi_set_companion_lookup_hook(struct acpi_device *(*func)(struct pci_dev *)) |
1261 | { |
1262 | int ret; |
1263 | |
1264 | if (!func) |
1265 | return -EINVAL; |
1266 | |
1267 | down_write(sem: &pci_acpi_companion_lookup_sem); |
1268 | |
1269 | if (pci_acpi_find_companion_hook) { |
1270 | ret = -EBUSY; |
1271 | } else { |
1272 | pci_acpi_find_companion_hook = func; |
1273 | ret = 0; |
1274 | } |
1275 | |
1276 | up_write(sem: &pci_acpi_companion_lookup_sem); |
1277 | |
1278 | return ret; |
1279 | } |
1280 | EXPORT_SYMBOL_GPL(pci_acpi_set_companion_lookup_hook); |
1281 | |
1282 | /** |
1283 | * pci_acpi_clear_companion_lookup_hook - Clear ACPI companion lookup callback. |
1284 | * |
1285 | * Clear the special ACPI companion lookup callback previously set by |
1286 | * pci_acpi_set_companion_lookup_hook(). Block until the last running instance |
1287 | * of the callback returns before clearing it. |
1288 | * |
1289 | * The caller is responsible for the appropriate ordering of the invocations of |
1290 | * this function with respect to the enumeration of the PCI devices needing the |
1291 | * callback cleared by it. |
1292 | */ |
1293 | void pci_acpi_clear_companion_lookup_hook(void) |
1294 | { |
1295 | down_write(sem: &pci_acpi_companion_lookup_sem); |
1296 | |
1297 | pci_acpi_find_companion_hook = NULL; |
1298 | |
1299 | up_write(sem: &pci_acpi_companion_lookup_sem); |
1300 | } |
1301 | EXPORT_SYMBOL_GPL(pci_acpi_clear_companion_lookup_hook); |
1302 | |
1303 | static struct acpi_device *acpi_pci_find_companion(struct device *dev) |
1304 | { |
1305 | struct pci_dev *pci_dev = to_pci_dev(dev); |
1306 | struct acpi_device *adev; |
1307 | bool check_children; |
1308 | u64 addr; |
1309 | |
1310 | if (!dev->parent) |
1311 | return NULL; |
1312 | |
1313 | down_read(sem: &pci_acpi_companion_lookup_sem); |
1314 | |
1315 | adev = pci_acpi_find_companion_hook ? |
1316 | pci_acpi_find_companion_hook(pci_dev) : NULL; |
1317 | |
1318 | up_read(sem: &pci_acpi_companion_lookup_sem); |
1319 | |
1320 | if (adev) |
1321 | return adev; |
1322 | |
1323 | check_children = pci_is_bridge(dev: pci_dev); |
1324 | /* Please ref to ACPI spec for the syntax of _ADR */ |
1325 | addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn); |
1326 | adev = acpi_find_child_device(ACPI_COMPANION(dev->parent), address: addr, |
1327 | check_children); |
1328 | |
1329 | /* |
1330 | * There may be ACPI device objects in the ACPI namespace that are |
1331 | * children of the device object representing the host bridge, but don't |
1332 | * represent PCI devices. Both _HID and _ADR may be present for them, |
1333 | * even though that is against the specification (for example, see |
1334 | * Section 6.1 of ACPI 6.3), but in many cases the _ADR returns 0 which |
1335 | * appears to indicate that they should not be taken into consideration |
1336 | * as potential companions of PCI devices on the root bus. |
1337 | * |
1338 | * To catch this special case, disregard the returned device object if |
1339 | * it has a valid _HID, addr is 0 and the PCI device at hand is on the |
1340 | * root bus. |
1341 | */ |
1342 | if (adev && adev->pnp.type.platform_id && !addr && |
1343 | pci_is_root_bus(pbus: pci_dev->bus)) |
1344 | return NULL; |
1345 | |
1346 | return adev; |
1347 | } |
1348 | |
1349 | /** |
1350 | * pci_acpi_optimize_delay - optimize PCI D3 and D3cold delay from ACPI |
1351 | * @pdev: the PCI device whose delay is to be updated |
1352 | * @handle: ACPI handle of this device |
1353 | * |
1354 | * Update the d3hot_delay and d3cold_delay of a PCI device from the ACPI _DSM |
1355 | * control method of either the device itself or the PCI host bridge. |
1356 | * |
1357 | * Function 8, "Reset Delay," applies to the entire hierarchy below a PCI |
1358 | * host bridge. If it returns one, the OS may assume that all devices in |
1359 | * the hierarchy have already completed power-on reset delays. |
1360 | * |
1361 | * Function 9, "Device Readiness Durations," applies only to the object |
1362 | * where it is located. It returns delay durations required after various |
1363 | * events if the device requires less time than the spec requires. Delays |
1364 | * from this function take precedence over the Reset Delay function. |
1365 | * |
1366 | * These _DSM functions are defined by the draft ECN of January 28, 2014, |
1367 | * titled "ACPI additions for FW latency optimizations." |
1368 | */ |
1369 | static void pci_acpi_optimize_delay(struct pci_dev *pdev, |
1370 | acpi_handle handle) |
1371 | { |
1372 | struct pci_host_bridge *bridge = pci_find_host_bridge(bus: pdev->bus); |
1373 | int value; |
1374 | union acpi_object *obj, *elements; |
1375 | |
1376 | if (bridge->ignore_reset_delay) |
1377 | pdev->d3cold_delay = 0; |
1378 | |
1379 | obj = acpi_evaluate_dsm_typed(handle, guid: &pci_acpi_dsm_guid, rev: 3, |
1380 | DSM_PCI_DEVICE_READINESS_DURATIONS, NULL, |
1381 | ACPI_TYPE_PACKAGE); |
1382 | if (!obj) |
1383 | return; |
1384 | |
1385 | if (obj->package.count == 5) { |
1386 | elements = obj->package.elements; |
1387 | if (elements[0].type == ACPI_TYPE_INTEGER) { |
1388 | value = (int)elements[0].integer.value / 1000; |
1389 | if (value < PCI_PM_D3COLD_WAIT) |
1390 | pdev->d3cold_delay = value; |
1391 | } |
1392 | if (elements[3].type == ACPI_TYPE_INTEGER) { |
1393 | value = (int)elements[3].integer.value / 1000; |
1394 | if (value < PCI_PM_D3HOT_WAIT) |
1395 | pdev->d3hot_delay = value; |
1396 | } |
1397 | } |
1398 | ACPI_FREE(obj); |
1399 | } |
1400 | |
1401 | static void pci_acpi_set_external_facing(struct pci_dev *dev) |
1402 | { |
1403 | u8 val; |
1404 | |
1405 | if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) |
1406 | return; |
1407 | if (device_property_read_u8(dev: &dev->dev, propname: "ExternalFacingPort" , val: &val)) |
1408 | return; |
1409 | |
1410 | /* |
1411 | * These root ports expose PCIe (including DMA) outside of the |
1412 | * system. Everything downstream from them is external. |
1413 | */ |
1414 | if (val) |
1415 | dev->external_facing = 1; |
1416 | } |
1417 | |
1418 | void pci_acpi_setup(struct device *dev, struct acpi_device *adev) |
1419 | { |
1420 | struct pci_dev *pci_dev = to_pci_dev(dev); |
1421 | |
1422 | pci_acpi_optimize_delay(pdev: pci_dev, handle: adev->handle); |
1423 | pci_acpi_set_external_facing(dev: pci_dev); |
1424 | pci_acpi_add_edr_notifier(pdev: pci_dev); |
1425 | |
1426 | pci_acpi_add_pm_notifier(dev: adev, pci_dev); |
1427 | if (!adev->wakeup.flags.valid) |
1428 | return; |
1429 | |
1430 | device_set_wakeup_capable(dev, capable: true); |
1431 | /* |
1432 | * For bridges that can do D3 we enable wake automatically (as |
1433 | * we do for the power management itself in that case). The |
1434 | * reason is that the bridge may have additional methods such as |
1435 | * _DSW that need to be called. |
1436 | */ |
1437 | if (pci_dev->bridge_d3) |
1438 | device_wakeup_enable(dev); |
1439 | |
1440 | acpi_pci_wakeup(dev: pci_dev, enable: false); |
1441 | acpi_device_power_add_dependent(adev, dev); |
1442 | |
1443 | if (pci_is_bridge(dev: pci_dev)) |
1444 | acpi_dev_power_up_children_with_adr(adev); |
1445 | } |
1446 | |
1447 | void pci_acpi_cleanup(struct device *dev, struct acpi_device *adev) |
1448 | { |
1449 | struct pci_dev *pci_dev = to_pci_dev(dev); |
1450 | |
1451 | pci_acpi_remove_edr_notifier(pdev: pci_dev); |
1452 | pci_acpi_remove_pm_notifier(dev: adev); |
1453 | if (adev->wakeup.flags.valid) { |
1454 | acpi_device_power_remove_dependent(adev, dev); |
1455 | if (pci_dev->bridge_d3) |
1456 | device_wakeup_disable(dev); |
1457 | |
1458 | device_set_wakeup_capable(dev, capable: false); |
1459 | } |
1460 | } |
1461 | |
1462 | static struct fwnode_handle *(*pci_msi_get_fwnode_cb)(struct device *dev); |
1463 | |
1464 | /** |
1465 | * pci_msi_register_fwnode_provider - Register callback to retrieve fwnode |
1466 | * @fn: Callback matching a device to a fwnode that identifies a PCI |
1467 | * MSI domain. |
1468 | * |
1469 | * This should be called by irqchip driver, which is the parent of |
1470 | * the MSI domain to provide callback interface to query fwnode. |
1471 | */ |
1472 | void |
1473 | pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *)) |
1474 | { |
1475 | pci_msi_get_fwnode_cb = fn; |
1476 | } |
1477 | |
1478 | /** |
1479 | * pci_host_bridge_acpi_msi_domain - Retrieve MSI domain of a PCI host bridge |
1480 | * @bus: The PCI host bridge bus. |
1481 | * |
1482 | * This function uses the callback function registered by |
1483 | * pci_msi_register_fwnode_provider() to retrieve the irq_domain with |
1484 | * type DOMAIN_BUS_PCI_MSI of the specified host bridge bus. |
1485 | * This returns NULL on error or when the domain is not found. |
1486 | */ |
1487 | struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) |
1488 | { |
1489 | struct fwnode_handle *fwnode; |
1490 | |
1491 | if (!pci_msi_get_fwnode_cb) |
1492 | return NULL; |
1493 | |
1494 | fwnode = pci_msi_get_fwnode_cb(&bus->dev); |
1495 | if (!fwnode) |
1496 | return NULL; |
1497 | |
1498 | return irq_find_matching_fwnode(fwnode, bus_token: DOMAIN_BUS_PCI_MSI); |
1499 | } |
1500 | |
1501 | static int __init acpi_pci_init(void) |
1502 | { |
1503 | if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) { |
1504 | pr_info("ACPI FADT declares the system doesn't support MSI, so disable it\n" ); |
1505 | pci_no_msi(); |
1506 | } |
1507 | |
1508 | if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) { |
1509 | pr_info("ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n" ); |
1510 | pcie_no_aspm(); |
1511 | } |
1512 | |
1513 | if (acpi_pci_disabled) |
1514 | return 0; |
1515 | |
1516 | acpi_pci_slot_init(); |
1517 | acpiphp_init(); |
1518 | |
1519 | return 0; |
1520 | } |
1521 | arch_initcall(acpi_pci_init); |
1522 | |