1 | // SPDX-License-Identifier: GPL-2.0 |
---|---|
2 | /* |
3 | * xHCI host controller driver |
4 | * |
5 | * Copyright (C) 2008 Intel Corp. |
6 | * |
7 | * Author: Sarah Sharp |
8 | * Some code borrowed from the Linux EHCI driver. |
9 | */ |
10 | |
11 | #include <linux/jiffies.h> |
12 | #include <linux/pci.h> |
13 | #include <linux/iommu.h> |
14 | #include <linux/iopoll.h> |
15 | #include <linux/irq.h> |
16 | #include <linux/log2.h> |
17 | #include <linux/module.h> |
18 | #include <linux/moduleparam.h> |
19 | #include <linux/slab.h> |
20 | #include <linux/string_choices.h> |
21 | #include <linux/dmi.h> |
22 | #include <linux/dma-mapping.h> |
23 | #include <linux/usb/xhci-sideband.h> |
24 | |
25 | #include "xhci.h" |
26 | #include "xhci-trace.h" |
27 | #include "xhci-debugfs.h" |
28 | #include "xhci-dbgcap.h" |
29 | |
30 | #define DRIVER_AUTHOR "Sarah Sharp" |
31 | #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" |
32 | |
33 | #define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E) |
34 | |
35 | /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */ |
36 | static int link_quirk; |
37 | module_param(link_quirk, int, S_IRUGO | S_IWUSR); |
38 | MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB"); |
39 | |
40 | static unsigned long long quirks; |
41 | module_param(quirks, ullong, S_IRUGO); |
42 | MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default"); |
43 | |
44 | static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring) |
45 | { |
46 | struct xhci_segment *seg; |
47 | |
48 | if (!td || !td->start_seg) |
49 | return false; |
50 | |
51 | xhci_for_each_ring_seg(ring->first_seg, seg) { |
52 | if (seg == td->start_seg) |
53 | return true; |
54 | } |
55 | |
56 | return false; |
57 | } |
58 | |
59 | /* |
60 | * xhci_handshake - spin reading hc until handshake completes or fails |
61 | * @ptr: address of hc register to be read |
62 | * @mask: bits to look at in result of read |
63 | * @done: value of those bits when handshake succeeds |
64 | * @usec: timeout in microseconds |
65 | * |
66 | * Returns negative errno, or zero on success |
67 | * |
68 | * Success happens when the "mask" bits have the specified value (hardware |
69 | * handshake done). There are two failure modes: "usec" have passed (major |
70 | * hardware flakeout), or the register reads as all-ones (hardware removed). |
71 | */ |
72 | int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us) |
73 | { |
74 | u32 result; |
75 | int ret; |
76 | |
77 | ret = readl_poll_timeout_atomic(ptr, result, |
78 | (result & mask) == done || |
79 | result == U32_MAX, |
80 | 1, timeout_us); |
81 | if (result == U32_MAX) /* card removed */ |
82 | return -ENODEV; |
83 | |
84 | return ret; |
85 | } |
86 | |
87 | /* |
88 | * xhci_handshake_check_state - same as xhci_handshake but takes an additional |
89 | * exit_state parameter, and bails out with an error immediately when xhc_state |
90 | * has exit_state flag set. |
91 | */ |
92 | int xhci_handshake_check_state(struct xhci_hcd *xhci, void __iomem *ptr, |
93 | u32 mask, u32 done, int usec, unsigned int exit_state) |
94 | { |
95 | u32 result; |
96 | int ret; |
97 | |
98 | ret = readl_poll_timeout_atomic(ptr, result, |
99 | (result & mask) == done || |
100 | result == U32_MAX || |
101 | xhci->xhc_state & exit_state, |
102 | 1, usec); |
103 | |
104 | if (result == U32_MAX || xhci->xhc_state & exit_state) |
105 | return -ENODEV; |
106 | |
107 | return ret; |
108 | } |
109 | |
110 | /* |
111 | * Disable interrupts and begin the xHCI halting process. |
112 | */ |
113 | void xhci_quiesce(struct xhci_hcd *xhci) |
114 | { |
115 | u32 halted; |
116 | u32 cmd; |
117 | u32 mask; |
118 | |
119 | mask = ~(XHCI_IRQS); |
120 | halted = readl(addr: &xhci->op_regs->status) & STS_HALT; |
121 | if (!halted) |
122 | mask &= ~CMD_RUN; |
123 | |
124 | cmd = readl(addr: &xhci->op_regs->command); |
125 | cmd &= mask; |
126 | writel(val: cmd, addr: &xhci->op_regs->command); |
127 | } |
128 | |
129 | /* |
130 | * Force HC into halt state. |
131 | * |
132 | * Disable any IRQs and clear the run/stop bit. |
133 | * HC will complete any current and actively pipelined transactions, and |
134 | * should halt within 16 ms of the run/stop bit being cleared. |
135 | * Read HC Halted bit in the status register to see when the HC is finished. |
136 | */ |
137 | int xhci_halt(struct xhci_hcd *xhci) |
138 | { |
139 | int ret; |
140 | |
141 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, fmt: "// Halt the HC"); |
142 | xhci_quiesce(xhci); |
143 | |
144 | ret = xhci_handshake(ptr: &xhci->op_regs->status, |
145 | STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); |
146 | if (ret) { |
147 | xhci_warn(xhci, "Host halt failed, %d\n", ret); |
148 | return ret; |
149 | } |
150 | |
151 | xhci->xhc_state |= XHCI_STATE_HALTED; |
152 | xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; |
153 | |
154 | return ret; |
155 | } |
156 | |
157 | /* |
158 | * Set the run bit and wait for the host to be running. |
159 | */ |
160 | int xhci_start(struct xhci_hcd *xhci) |
161 | { |
162 | u32 temp; |
163 | int ret; |
164 | |
165 | temp = readl(addr: &xhci->op_regs->command); |
166 | temp |= (CMD_RUN); |
167 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, fmt: "// Turn on HC, cmd = 0x%x.", |
168 | temp); |
169 | writel(val: temp, addr: &xhci->op_regs->command); |
170 | |
171 | /* |
172 | * Wait for the HCHalted Status bit to be 0 to indicate the host is |
173 | * running. |
174 | */ |
175 | ret = xhci_handshake(ptr: &xhci->op_regs->status, |
176 | STS_HALT, done: 0, XHCI_MAX_HALT_USEC); |
177 | if (ret == -ETIMEDOUT) |
178 | xhci_err(xhci, "Host took too long to start, " |
179 | "waited %u microseconds.\n", |
180 | XHCI_MAX_HALT_USEC); |
181 | if (!ret) { |
182 | /* clear state flags. Including dying, halted or removing */ |
183 | xhci->xhc_state = 0; |
184 | xhci->run_graceperiod = jiffies + msecs_to_jiffies(m: 500); |
185 | } |
186 | |
187 | return ret; |
188 | } |
189 | |
190 | /* |
191 | * Reset a halted HC. |
192 | * |
193 | * This resets pipelines, timers, counters, state machines, etc. |
194 | * Transactions will be terminated immediately, and operational registers |
195 | * will be set to their defaults. |
196 | */ |
197 | int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us) |
198 | { |
199 | u32 command; |
200 | u32 state; |
201 | int ret; |
202 | |
203 | state = readl(addr: &xhci->op_regs->status); |
204 | |
205 | if (state == ~(u32)0) { |
206 | xhci_warn(xhci, "Host not accessible, reset failed.\n"); |
207 | return -ENODEV; |
208 | } |
209 | |
210 | if ((state & STS_HALT) == 0) { |
211 | xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); |
212 | return 0; |
213 | } |
214 | |
215 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, fmt: "// Reset the HC"); |
216 | command = readl(addr: &xhci->op_regs->command); |
217 | command |= CMD_RESET; |
218 | writel(val: command, addr: &xhci->op_regs->command); |
219 | |
220 | /* Existing Intel xHCI controllers require a delay of 1 mS, |
221 | * after setting the CMD_RESET bit, and before accessing any |
222 | * HC registers. This allows the HC to complete the |
223 | * reset operation and be ready for HC register access. |
224 | * Without this delay, the subsequent HC register access, |
225 | * may result in a system hang very rarely. |
226 | */ |
227 | if (xhci->quirks & XHCI_INTEL_HOST) |
228 | udelay(usec: 1000); |
229 | |
230 | ret = xhci_handshake_check_state(xhci, ptr: &xhci->op_regs->command, |
231 | CMD_RESET, done: 0, usec: timeout_us, XHCI_STATE_REMOVING); |
232 | if (ret) |
233 | return ret; |
234 | |
235 | if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL) |
236 | usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller)); |
237 | |
238 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, |
239 | fmt: "Wait for controller to be ready for doorbell rings"); |
240 | /* |
241 | * xHCI cannot write to any doorbells or operational registers other |
242 | * than status until the "Controller Not Ready" flag is cleared. |
243 | */ |
244 | ret = xhci_handshake(ptr: &xhci->op_regs->status, STS_CNR, done: 0, timeout_us); |
245 | |
246 | xhci->usb2_rhub.bus_state.port_c_suspend = 0; |
247 | xhci->usb2_rhub.bus_state.suspended_ports = 0; |
248 | xhci->usb2_rhub.bus_state.resuming_ports = 0; |
249 | xhci->usb3_rhub.bus_state.port_c_suspend = 0; |
250 | xhci->usb3_rhub.bus_state.suspended_ports = 0; |
251 | xhci->usb3_rhub.bus_state.resuming_ports = 0; |
252 | |
253 | return ret; |
254 | } |
255 | |
256 | static void xhci_zero_64b_regs(struct xhci_hcd *xhci) |
257 | { |
258 | struct device *dev = xhci_to_hcd(xhci)->self.sysdev; |
259 | struct iommu_domain *domain; |
260 | int err, i; |
261 | u64 val; |
262 | u32 intrs; |
263 | |
264 | /* |
265 | * Some Renesas controllers get into a weird state if they are |
266 | * reset while programmed with 64bit addresses (they will preserve |
267 | * the top half of the address in internal, non visible |
268 | * registers). You end up with half the address coming from the |
269 | * kernel, and the other half coming from the firmware. Also, |
270 | * changing the programming leads to extra accesses even if the |
271 | * controller is supposed to be halted. The controller ends up with |
272 | * a fatal fault, and is then ripe for being properly reset. |
273 | * |
274 | * Special care is taken to only apply this if the device is behind |
275 | * an iommu. Doing anything when there is no iommu is definitely |
276 | * unsafe... |
277 | */ |
278 | domain = iommu_get_domain_for_dev(dev); |
279 | if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !domain || |
280 | domain->type == IOMMU_DOMAIN_IDENTITY) |
281 | return; |
282 | |
283 | xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n"); |
284 | |
285 | /* Clear HSEIE so that faults do not get signaled */ |
286 | val = readl(addr: &xhci->op_regs->command); |
287 | val &= ~CMD_HSEIE; |
288 | writel(val, addr: &xhci->op_regs->command); |
289 | |
290 | /* Clear HSE (aka FATAL) */ |
291 | val = readl(addr: &xhci->op_regs->status); |
292 | val |= STS_FATAL; |
293 | writel(val, addr: &xhci->op_regs->status); |
294 | |
295 | /* Now zero the registers, and brace for impact */ |
296 | val = xhci_read_64(xhci, regs: &xhci->op_regs->dcbaa_ptr); |
297 | if (upper_32_bits(val)) |
298 | xhci_write_64(xhci, val: 0, regs: &xhci->op_regs->dcbaa_ptr); |
299 | val = xhci_read_64(xhci, regs: &xhci->op_regs->cmd_ring); |
300 | if (upper_32_bits(val)) |
301 | xhci_write_64(xhci, val: 0, regs: &xhci->op_regs->cmd_ring); |
302 | |
303 | intrs = min_t(u32, HCS_MAX_INTRS(xhci->hcs_params1), |
304 | ARRAY_SIZE(xhci->run_regs->ir_set)); |
305 | |
306 | for (i = 0; i < intrs; i++) { |
307 | struct xhci_intr_reg __iomem *ir; |
308 | |
309 | ir = &xhci->run_regs->ir_set[i]; |
310 | val = xhci_read_64(xhci, regs: &ir->erst_base); |
311 | if (upper_32_bits(val)) |
312 | xhci_write_64(xhci, val: 0, regs: &ir->erst_base); |
313 | val= xhci_read_64(xhci, regs: &ir->erst_dequeue); |
314 | if (upper_32_bits(val)) |
315 | xhci_write_64(xhci, val: 0, regs: &ir->erst_dequeue); |
316 | } |
317 | |
318 | /* Wait for the fault to appear. It will be cleared on reset */ |
319 | err = xhci_handshake(ptr: &xhci->op_regs->status, |
320 | STS_FATAL, STS_FATAL, |
321 | XHCI_MAX_HALT_USEC); |
322 | if (!err) |
323 | xhci_info(xhci, "Fault detected\n"); |
324 | } |
325 | |
326 | int xhci_enable_interrupter(struct xhci_interrupter *ir) |
327 | { |
328 | u32 iman; |
329 | |
330 | if (!ir || !ir->ir_set) |
331 | return -EINVAL; |
332 | |
333 | iman = readl(addr: &ir->ir_set->iman); |
334 | iman |= IMAN_IE; |
335 | writel(val: iman, addr: &ir->ir_set->iman); |
336 | |
337 | /* Read operation to guarantee the write has been flushed from posted buffers */ |
338 | readl(addr: &ir->ir_set->iman); |
339 | return 0; |
340 | } |
341 | |
342 | int xhci_disable_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir) |
343 | { |
344 | u32 iman; |
345 | |
346 | if (!ir || !ir->ir_set) |
347 | return -EINVAL; |
348 | |
349 | iman = readl(addr: &ir->ir_set->iman); |
350 | iman &= ~IMAN_IE; |
351 | writel(val: iman, addr: &ir->ir_set->iman); |
352 | |
353 | iman = readl(addr: &ir->ir_set->iman); |
354 | if (iman & IMAN_IP) |
355 | xhci_dbg(xhci, "%s: Interrupt pending\n", __func__); |
356 | |
357 | return 0; |
358 | } |
359 | |
360 | /* interrupt moderation interval imod_interval in nanoseconds */ |
361 | int xhci_set_interrupter_moderation(struct xhci_interrupter *ir, |
362 | u32 imod_interval) |
363 | { |
364 | u32 imod; |
365 | |
366 | if (!ir || !ir->ir_set) |
367 | return -EINVAL; |
368 | |
369 | /* IMODI value in IMOD register is in 250ns increments */ |
370 | imod_interval = umin(imod_interval / 250, IMODI_MASK); |
371 | |
372 | imod = readl(addr: &ir->ir_set->imod); |
373 | imod &= ~IMODI_MASK; |
374 | imod |= imod_interval; |
375 | writel(val: imod, addr: &ir->ir_set->imod); |
376 | |
377 | return 0; |
378 | } |
379 | |
380 | static void compliance_mode_recovery(struct timer_list *t) |
381 | { |
382 | struct xhci_hcd *xhci; |
383 | struct usb_hcd *hcd; |
384 | struct xhci_hub *rhub; |
385 | u32 temp; |
386 | int i; |
387 | |
388 | xhci = timer_container_of(xhci, t, comp_mode_recovery_timer); |
389 | rhub = &xhci->usb3_rhub; |
390 | hcd = rhub->hcd; |
391 | |
392 | if (!hcd) |
393 | return; |
394 | |
395 | for (i = 0; i < rhub->num_ports; i++) { |
396 | temp = readl(addr: rhub->ports[i]->addr); |
397 | if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) { |
398 | /* |
399 | * Compliance Mode Detected. Letting USB Core |
400 | * handle the Warm Reset |
401 | */ |
402 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
403 | fmt: "Compliance mode detected->port %d", |
404 | i + 1); |
405 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
406 | fmt: "Attempting compliance mode recovery"); |
407 | |
408 | if (hcd->state == HC_STATE_SUSPENDED) |
409 | usb_hcd_resume_root_hub(hcd); |
410 | |
411 | usb_hcd_poll_rh_status(hcd); |
412 | } |
413 | } |
414 | |
415 | if (xhci->port_status_u0 != ((1 << rhub->num_ports) - 1)) |
416 | mod_timer(timer: &xhci->comp_mode_recovery_timer, |
417 | expires: jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS)); |
418 | } |
419 | |
420 | /* |
421 | * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver |
422 | * that causes ports behind that hardware to enter compliance mode sometimes. |
423 | * The quirk creates a timer that polls every 2 seconds the link state of |
424 | * each host controller's port and recovers it by issuing a Warm reset |
425 | * if Compliance mode is detected, otherwise the port will become "dead" (no |
426 | * device connections or disconnections will be detected anymore). Becasue no |
427 | * status event is generated when entering compliance mode (per xhci spec), |
428 | * this quirk is needed on systems that have the failing hardware installed. |
429 | */ |
430 | static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci) |
431 | { |
432 | xhci->port_status_u0 = 0; |
433 | timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery, |
434 | 0); |
435 | xhci->comp_mode_recovery_timer.expires = jiffies + |
436 | msecs_to_jiffies(COMP_MODE_RCVRY_MSECS); |
437 | |
438 | add_timer(timer: &xhci->comp_mode_recovery_timer); |
439 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
440 | fmt: "Compliance mode recovery timer initialized"); |
441 | } |
442 | |
443 | /* |
444 | * This function identifies the systems that have installed the SN65LVPE502CP |
445 | * USB3.0 re-driver and that need the Compliance Mode Quirk. |
446 | * Systems: |
447 | * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820 |
448 | */ |
449 | static bool xhci_compliance_mode_recovery_timer_quirk_check(void) |
450 | { |
451 | const char *dmi_product_name, *dmi_sys_vendor; |
452 | |
453 | dmi_product_name = dmi_get_system_info(field: DMI_PRODUCT_NAME); |
454 | dmi_sys_vendor = dmi_get_system_info(field: DMI_SYS_VENDOR); |
455 | if (!dmi_product_name || !dmi_sys_vendor) |
456 | return false; |
457 | |
458 | if (!(strstr(dmi_sys_vendor, "Hewlett-Packard"))) |
459 | return false; |
460 | |
461 | if (strstr(dmi_product_name, "Z420") || |
462 | strstr(dmi_product_name, "Z620") || |
463 | strstr(dmi_product_name, "Z820") || |
464 | strstr(dmi_product_name, "Z1 Workstation")) |
465 | return true; |
466 | |
467 | return false; |
468 | } |
469 | |
470 | static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci) |
471 | { |
472 | return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1)); |
473 | } |
474 | |
475 | static void xhci_hcd_page_size(struct xhci_hcd *xhci) |
476 | { |
477 | u32 page_size; |
478 | |
479 | page_size = readl(addr: &xhci->op_regs->page_size) & XHCI_PAGE_SIZE_MASK; |
480 | if (!is_power_of_2(n: page_size)) { |
481 | xhci_warn(xhci, "Invalid page size register = 0x%x\n", page_size); |
482 | /* Fallback to 4K page size, since that's common */ |
483 | page_size = 1; |
484 | } |
485 | |
486 | xhci->page_size = page_size << 12; |
487 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, fmt: "HCD page size set to %iK", |
488 | xhci->page_size >> 10); |
489 | } |
490 | |
491 | static void xhci_enable_max_dev_slots(struct xhci_hcd *xhci) |
492 | { |
493 | u32 config_reg; |
494 | u32 max_slots; |
495 | |
496 | max_slots = HCS_MAX_SLOTS(xhci->hcs_params1); |
497 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, fmt: "xHC can handle at most %d device slots", |
498 | max_slots); |
499 | |
500 | config_reg = readl(addr: &xhci->op_regs->config_reg); |
501 | config_reg &= ~HCS_SLOTS_MASK; |
502 | config_reg |= max_slots; |
503 | |
504 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, fmt: "Setting Max device slots reg = 0x%x", |
505 | config_reg); |
506 | writel(val: config_reg, addr: &xhci->op_regs->config_reg); |
507 | } |
508 | |
509 | static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) |
510 | { |
511 | dma_addr_t deq_dma; |
512 | u64 crcr; |
513 | |
514 | deq_dma = xhci_trb_virt_to_dma(seg: xhci->cmd_ring->deq_seg, trb: xhci->cmd_ring->dequeue); |
515 | deq_dma &= CMD_RING_PTR_MASK; |
516 | |
517 | crcr = xhci_read_64(xhci, regs: &xhci->op_regs->cmd_ring); |
518 | crcr &= ~CMD_RING_PTR_MASK; |
519 | crcr |= deq_dma; |
520 | |
521 | crcr &= ~CMD_RING_CYCLE; |
522 | crcr |= xhci->cmd_ring->cycle_state; |
523 | |
524 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, fmt: "Setting command ring address to 0x%llx", crcr); |
525 | xhci_write_64(xhci, val: crcr, regs: &xhci->op_regs->cmd_ring); |
526 | } |
527 | |
528 | static void xhci_set_doorbell_ptr(struct xhci_hcd *xhci) |
529 | { |
530 | u32 offset; |
531 | |
532 | offset = readl(addr: &xhci->cap_regs->db_off) & DBOFF_MASK; |
533 | xhci->dba = (void __iomem *)xhci->cap_regs + offset; |
534 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, |
535 | fmt: "Doorbell array is located at offset 0x%x from cap regs base addr", offset); |
536 | } |
537 | |
538 | /* |
539 | * Enable USB 3.0 device notifications for function remote wake, which is necessary |
540 | * for allowing USB 3.0 devices to do remote wakeup from U3 (device suspend). |
541 | */ |
542 | static void xhci_set_dev_notifications(struct xhci_hcd *xhci) |
543 | { |
544 | u32 dev_notf; |
545 | |
546 | dev_notf = readl(addr: &xhci->op_regs->dev_notification); |
547 | dev_notf &= ~DEV_NOTE_MASK; |
548 | dev_notf |= DEV_NOTE_FWAKE; |
549 | writel(val: dev_notf, addr: &xhci->op_regs->dev_notification); |
550 | } |
551 | |
552 | /* |
553 | * Initialize memory for HCD and xHC (one-time init). |
554 | * |
555 | * Program the PAGESIZE register, initialize the device context array, create |
556 | * device contexts (?), set up a command ring segment (or two?), create event |
557 | * ring (one for now). |
558 | */ |
559 | static int xhci_init(struct usb_hcd *hcd) |
560 | { |
561 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
562 | int retval; |
563 | |
564 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, fmt: "Starting %s", __func__); |
565 | spin_lock_init(&xhci->lock); |
566 | |
567 | INIT_LIST_HEAD(list: &xhci->cmd_list); |
568 | INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout); |
569 | init_completion(x: &xhci->cmd_ring_stop_completion); |
570 | xhci_hcd_page_size(xhci); |
571 | memset(xhci->devs, 0, MAX_HC_SLOTS * sizeof(*xhci->devs)); |
572 | |
573 | retval = xhci_mem_init(xhci, GFP_KERNEL); |
574 | if (retval) |
575 | return retval; |
576 | |
577 | /* Set the Number of Device Slots Enabled to the maximum supported value */ |
578 | xhci_enable_max_dev_slots(xhci); |
579 | |
580 | /* Set the address in the Command Ring Control register */ |
581 | xhci_set_cmd_ring_deq(xhci); |
582 | |
583 | /* Set Device Context Base Address Array pointer */ |
584 | xhci_write_64(xhci, val: xhci->dcbaa->dma, regs: &xhci->op_regs->dcbaa_ptr); |
585 | |
586 | /* Set Doorbell array pointer */ |
587 | xhci_set_doorbell_ptr(xhci); |
588 | |
589 | /* Set USB 3.0 device notifications for function remote wake */ |
590 | xhci_set_dev_notifications(xhci); |
591 | |
592 | /* Initialize the Primary interrupter */ |
593 | xhci_add_interrupter(xhci, intr_num: 0); |
594 | xhci->interrupters[0]->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX; |
595 | |
596 | /* Initializing Compliance Mode Recovery Data If Needed */ |
597 | if (xhci_compliance_mode_recovery_timer_quirk_check()) { |
598 | xhci->quirks |= XHCI_COMP_MODE_QUIRK; |
599 | compliance_mode_recovery_timer_init(xhci); |
600 | } |
601 | |
602 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, fmt: "Finished %s", __func__); |
603 | return 0; |
604 | } |
605 | |
606 | /*-------------------------------------------------------------------------*/ |
607 | |
608 | static int xhci_run_finished(struct xhci_hcd *xhci) |
609 | { |
610 | struct xhci_interrupter *ir = xhci->interrupters[0]; |
611 | unsigned long flags; |
612 | u32 temp; |
613 | |
614 | /* |
615 | * Enable interrupts before starting the host (xhci 4.2 and 5.5.2). |
616 | * Protect the short window before host is running with a lock |
617 | */ |
618 | spin_lock_irqsave(&xhci->lock, flags); |
619 | |
620 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, fmt: "Enable interrupts"); |
621 | temp = readl(addr: &xhci->op_regs->command); |
622 | temp |= (CMD_EIE); |
623 | writel(val: temp, addr: &xhci->op_regs->command); |
624 | |
625 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, fmt: "Enable primary interrupter"); |
626 | xhci_enable_interrupter(ir); |
627 | |
628 | if (xhci_start(xhci)) { |
629 | xhci_halt(xhci); |
630 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
631 | return -ENODEV; |
632 | } |
633 | |
634 | xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; |
635 | |
636 | if (xhci->quirks & XHCI_NEC_HOST) |
637 | xhci_ring_cmd_db(xhci); |
638 | |
639 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
640 | |
641 | return 0; |
642 | } |
643 | |
644 | /* |
645 | * Start the HC after it was halted. |
646 | * |
647 | * This function is called by the USB core when the HC driver is added. |
648 | * Its opposite is xhci_stop(). |
649 | * |
650 | * xhci_init() must be called once before this function can be called. |
651 | * Reset the HC, enable device slot contexts, program DCBAAP, and |
652 | * set command ring pointer and event ring pointer. |
653 | * |
654 | * Setup MSI-X vectors and enable interrupts. |
655 | */ |
656 | int xhci_run(struct usb_hcd *hcd) |
657 | { |
658 | u64 temp_64; |
659 | int ret; |
660 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
661 | struct xhci_interrupter *ir = xhci->interrupters[0]; |
662 | /* Start the xHCI host controller running only after the USB 2.0 roothub |
663 | * is setup. |
664 | */ |
665 | |
666 | hcd->uses_new_polling = 1; |
667 | if (hcd->msi_enabled) |
668 | ir->ip_autoclear = true; |
669 | |
670 | if (!usb_hcd_is_primary_hcd(hcd)) |
671 | return xhci_run_finished(xhci); |
672 | |
673 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, fmt: "xhci_run"); |
674 | |
675 | temp_64 = xhci_read_64(xhci, regs: &ir->ir_set->erst_dequeue); |
676 | temp_64 &= ERST_PTR_MASK; |
677 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, |
678 | fmt: "ERST deq = 64'h%0lx", (long unsigned int) temp_64); |
679 | |
680 | xhci_set_interrupter_moderation(ir, imod_interval: xhci->imod_interval); |
681 | |
682 | if (xhci->quirks & XHCI_NEC_HOST) { |
683 | struct xhci_command *command; |
684 | |
685 | command = xhci_alloc_command(xhci, allocate_completion: false, GFP_KERNEL); |
686 | if (!command) |
687 | return -ENOMEM; |
688 | |
689 | ret = xhci_queue_vendor_command(xhci, cmd: command, field1: 0, field2: 0, field3: 0, |
690 | TRB_TYPE(TRB_NEC_GET_FW)); |
691 | if (ret) |
692 | xhci_free_command(xhci, command); |
693 | } |
694 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, |
695 | fmt: "Finished %s for main hcd", __func__); |
696 | |
697 | xhci_create_dbc_dev(xhci); |
698 | |
699 | xhci_debugfs_init(xhci); |
700 | |
701 | if (xhci_has_one_roothub(xhci)) |
702 | return xhci_run_finished(xhci); |
703 | |
704 | set_bit(HCD_FLAG_DEFER_RH_REGISTER, addr: &hcd->flags); |
705 | |
706 | return 0; |
707 | } |
708 | EXPORT_SYMBOL_GPL(xhci_run); |
709 | |
710 | /* |
711 | * Stop xHCI driver. |
712 | * |
713 | * This function is called by the USB core when the HC driver is removed. |
714 | * Its opposite is xhci_run(). |
715 | * |
716 | * Disable device contexts, disable IRQs, and quiesce the HC. |
717 | * Reset the HC, finish any completed transactions, and cleanup memory. |
718 | */ |
719 | void xhci_stop(struct usb_hcd *hcd) |
720 | { |
721 | u32 temp; |
722 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
723 | struct xhci_interrupter *ir = xhci->interrupters[0]; |
724 | |
725 | mutex_lock(&xhci->mutex); |
726 | |
727 | /* Only halt host and free memory after both hcds are removed */ |
728 | if (!usb_hcd_is_primary_hcd(hcd)) { |
729 | mutex_unlock(lock: &xhci->mutex); |
730 | return; |
731 | } |
732 | |
733 | xhci_remove_dbc_dev(xhci); |
734 | |
735 | spin_lock_irq(lock: &xhci->lock); |
736 | xhci->xhc_state |= XHCI_STATE_HALTED; |
737 | xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; |
738 | xhci_halt(xhci); |
739 | xhci_reset(xhci, XHCI_RESET_SHORT_USEC); |
740 | spin_unlock_irq(lock: &xhci->lock); |
741 | |
742 | /* Deleting Compliance Mode Recovery Timer */ |
743 | if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && |
744 | (!(xhci_all_ports_seen_u0(xhci)))) { |
745 | timer_delete_sync(timer: &xhci->comp_mode_recovery_timer); |
746 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
747 | fmt: "%s: compliance mode recovery timer deleted", |
748 | __func__); |
749 | } |
750 | |
751 | if (xhci->quirks & XHCI_AMD_PLL_FIX) |
752 | usb_amd_dev_put(); |
753 | |
754 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, |
755 | fmt: "// Disabling event ring interrupts"); |
756 | temp = readl(addr: &xhci->op_regs->status); |
757 | writel(val: (temp & ~0x1fff) | STS_EINT, addr: &xhci->op_regs->status); |
758 | xhci_disable_interrupter(xhci, ir); |
759 | |
760 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, fmt: "cleaning up memory"); |
761 | xhci_mem_cleanup(xhci); |
762 | xhci_debugfs_exit(xhci); |
763 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, |
764 | fmt: "xhci_stop completed - status = %x", |
765 | readl(addr: &xhci->op_regs->status)); |
766 | mutex_unlock(lock: &xhci->mutex); |
767 | } |
768 | EXPORT_SYMBOL_GPL(xhci_stop); |
769 | |
770 | /* |
771 | * Shutdown HC (not bus-specific) |
772 | * |
773 | * This is called when the machine is rebooting or halting. We assume that the |
774 | * machine will be powered off, and the HC's internal state will be reset. |
775 | * Don't bother to free memory. |
776 | * |
777 | * This will only ever be called with the main usb_hcd (the USB3 roothub). |
778 | */ |
779 | void xhci_shutdown(struct usb_hcd *hcd) |
780 | { |
781 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
782 | |
783 | if (xhci->quirks & XHCI_SPURIOUS_REBOOT) |
784 | usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev)); |
785 | |
786 | /* Don't poll the roothubs after shutdown. */ |
787 | xhci_dbg(xhci, "%s: stopping usb%d port polling.\n", |
788 | __func__, hcd->self.busnum); |
789 | clear_bit(HCD_FLAG_POLL_RH, addr: &hcd->flags); |
790 | timer_delete_sync(timer: &hcd->rh_timer); |
791 | |
792 | if (xhci->shared_hcd) { |
793 | clear_bit(HCD_FLAG_POLL_RH, addr: &xhci->shared_hcd->flags); |
794 | timer_delete_sync(timer: &xhci->shared_hcd->rh_timer); |
795 | } |
796 | |
797 | spin_lock_irq(lock: &xhci->lock); |
798 | xhci_halt(xhci); |
799 | |
800 | /* |
801 | * Workaround for spurious wakeps at shutdown with HSW, and for boot |
802 | * firmware delay in ADL-P PCH if port are left in U3 at shutdown |
803 | */ |
804 | if (xhci->quirks & XHCI_SPURIOUS_WAKEUP || |
805 | xhci->quirks & XHCI_RESET_TO_DEFAULT) |
806 | xhci_reset(xhci, XHCI_RESET_SHORT_USEC); |
807 | |
808 | spin_unlock_irq(lock: &xhci->lock); |
809 | |
810 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, |
811 | fmt: "xhci_shutdown completed - status = %x", |
812 | readl(addr: &xhci->op_regs->status)); |
813 | } |
814 | EXPORT_SYMBOL_GPL(xhci_shutdown); |
815 | |
816 | #ifdef CONFIG_PM |
817 | static void xhci_save_registers(struct xhci_hcd *xhci) |
818 | { |
819 | struct xhci_interrupter *ir; |
820 | unsigned int i; |
821 | |
822 | xhci->s3.command = readl(addr: &xhci->op_regs->command); |
823 | xhci->s3.dev_nt = readl(addr: &xhci->op_regs->dev_notification); |
824 | xhci->s3.dcbaa_ptr = xhci_read_64(xhci, regs: &xhci->op_regs->dcbaa_ptr); |
825 | xhci->s3.config_reg = readl(addr: &xhci->op_regs->config_reg); |
826 | |
827 | /* save both primary and all secondary interrupters */ |
828 | /* fixme, shold we lock to prevent race with remove secondary interrupter? */ |
829 | for (i = 0; i < xhci->max_interrupters; i++) { |
830 | ir = xhci->interrupters[i]; |
831 | if (!ir) |
832 | continue; |
833 | |
834 | ir->s3_erst_size = readl(addr: &ir->ir_set->erst_size); |
835 | ir->s3_erst_base = xhci_read_64(xhci, regs: &ir->ir_set->erst_base); |
836 | ir->s3_erst_dequeue = xhci_read_64(xhci, regs: &ir->ir_set->erst_dequeue); |
837 | ir->s3_iman = readl(addr: &ir->ir_set->iman); |
838 | ir->s3_imod = readl(addr: &ir->ir_set->imod); |
839 | } |
840 | } |
841 | |
842 | static void xhci_restore_registers(struct xhci_hcd *xhci) |
843 | { |
844 | struct xhci_interrupter *ir; |
845 | unsigned int i; |
846 | |
847 | writel(val: xhci->s3.command, addr: &xhci->op_regs->command); |
848 | writel(val: xhci->s3.dev_nt, addr: &xhci->op_regs->dev_notification); |
849 | xhci_write_64(xhci, val: xhci->s3.dcbaa_ptr, regs: &xhci->op_regs->dcbaa_ptr); |
850 | writel(val: xhci->s3.config_reg, addr: &xhci->op_regs->config_reg); |
851 | |
852 | /* FIXME should we lock to protect against freeing of interrupters */ |
853 | for (i = 0; i < xhci->max_interrupters; i++) { |
854 | ir = xhci->interrupters[i]; |
855 | if (!ir) |
856 | continue; |
857 | |
858 | writel(val: ir->s3_erst_size, addr: &ir->ir_set->erst_size); |
859 | xhci_write_64(xhci, val: ir->s3_erst_base, regs: &ir->ir_set->erst_base); |
860 | xhci_write_64(xhci, val: ir->s3_erst_dequeue, regs: &ir->ir_set->erst_dequeue); |
861 | writel(val: ir->s3_iman, addr: &ir->ir_set->iman); |
862 | writel(val: ir->s3_imod, addr: &ir->ir_set->imod); |
863 | } |
864 | } |
865 | |
866 | /* |
867 | * The whole command ring must be cleared to zero when we suspend the host. |
868 | * |
869 | * The host doesn't save the command ring pointer in the suspend well, so we |
870 | * need to re-program it on resume. Unfortunately, the pointer must be 64-byte |
871 | * aligned, because of the reserved bits in the command ring dequeue pointer |
872 | * register. Therefore, we can't just set the dequeue pointer back in the |
873 | * middle of the ring (TRBs are 16-byte aligned). |
874 | */ |
875 | static void xhci_clear_command_ring(struct xhci_hcd *xhci) |
876 | { |
877 | struct xhci_ring *ring; |
878 | struct xhci_segment *seg; |
879 | |
880 | ring = xhci->cmd_ring; |
881 | xhci_for_each_ring_seg(ring->first_seg, seg) { |
882 | /* erase all TRBs before the link */ |
883 | memset(seg->trbs, 0, sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1)); |
884 | /* clear link cycle bit */ |
885 | seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= cpu_to_le32(~TRB_CYCLE); |
886 | } |
887 | |
888 | xhci_initialize_ring_info(ring); |
889 | /* |
890 | * Reset the hardware dequeue pointer. |
891 | * Yes, this will need to be re-written after resume, but we're paranoid |
892 | * and want to make sure the hardware doesn't access bogus memory |
893 | * because, say, the BIOS or an SMI started the host without changing |
894 | * the command ring pointers. |
895 | */ |
896 | xhci_set_cmd_ring_deq(xhci); |
897 | } |
898 | |
899 | /* |
900 | * Disable port wake bits if do_wakeup is not set. |
901 | * |
902 | * Also clear a possible internal port wake state left hanging for ports that |
903 | * detected termination but never successfully enumerated (trained to 0U). |
904 | * Internal wake causes immediate xHCI wake after suspend. PORT_CSC write done |
905 | * at enumeration clears this wake, force one here as well for unconnected ports |
906 | */ |
907 | |
908 | static void xhci_disable_hub_port_wake(struct xhci_hcd *xhci, |
909 | struct xhci_hub *rhub, |
910 | bool do_wakeup) |
911 | { |
912 | unsigned long flags; |
913 | u32 t1, t2, portsc; |
914 | int i; |
915 | |
916 | spin_lock_irqsave(&xhci->lock, flags); |
917 | |
918 | for (i = 0; i < rhub->num_ports; i++) { |
919 | portsc = readl(addr: rhub->ports[i]->addr); |
920 | t1 = xhci_port_state_to_neutral(state: portsc); |
921 | t2 = t1; |
922 | |
923 | /* clear wake bits if do_wake is not set */ |
924 | if (!do_wakeup) |
925 | t2 &= ~PORT_WAKE_BITS; |
926 | |
927 | /* Don't touch csc bit if connected or connect change is set */ |
928 | if (!(portsc & (PORT_CSC | PORT_CONNECT))) |
929 | t2 |= PORT_CSC; |
930 | |
931 | if (t1 != t2) { |
932 | writel(val: t2, addr: rhub->ports[i]->addr); |
933 | xhci_dbg(xhci, "config port %d-%d wake bits, portsc: 0x%x, write: 0x%x\n", |
934 | rhub->hcd->self.busnum, i + 1, portsc, t2); |
935 | } |
936 | } |
937 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
938 | } |
939 | |
940 | static bool xhci_pending_portevent(struct xhci_hcd *xhci) |
941 | { |
942 | struct xhci_port **ports; |
943 | int port_index; |
944 | u32 status; |
945 | u32 portsc; |
946 | |
947 | status = readl(addr: &xhci->op_regs->status); |
948 | if (status & STS_EINT) |
949 | return true; |
950 | /* |
951 | * Checking STS_EINT is not enough as there is a lag between a change |
952 | * bit being set and the Port Status Change Event that it generated |
953 | * being written to the Event Ring. See note in xhci 1.1 section 4.19.2. |
954 | */ |
955 | |
956 | port_index = xhci->usb2_rhub.num_ports; |
957 | ports = xhci->usb2_rhub.ports; |
958 | while (port_index--) { |
959 | portsc = readl(addr: ports[port_index]->addr); |
960 | if (portsc & PORT_CHANGE_MASK || |
961 | (portsc & PORT_PLS_MASK) == XDEV_RESUME) |
962 | return true; |
963 | } |
964 | port_index = xhci->usb3_rhub.num_ports; |
965 | ports = xhci->usb3_rhub.ports; |
966 | while (port_index--) { |
967 | portsc = readl(addr: ports[port_index]->addr); |
968 | if (portsc & (PORT_CHANGE_MASK | PORT_CAS) || |
969 | (portsc & PORT_PLS_MASK) == XDEV_RESUME) |
970 | return true; |
971 | } |
972 | return false; |
973 | } |
974 | |
975 | /* |
976 | * Stop HC (not bus-specific) |
977 | * |
978 | * This is called when the machine transition into S3/S4 mode. |
979 | * |
980 | */ |
981 | int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) |
982 | { |
983 | int rc = 0; |
984 | unsigned int delay = XHCI_MAX_HALT_USEC * 2; |
985 | struct usb_hcd *hcd = xhci_to_hcd(xhci); |
986 | u32 command; |
987 | u32 res; |
988 | |
989 | if (!hcd->state) |
990 | return 0; |
991 | |
992 | if (hcd->state != HC_STATE_SUSPENDED || |
993 | (xhci->shared_hcd && xhci->shared_hcd->state != HC_STATE_SUSPENDED)) |
994 | return -EINVAL; |
995 | |
996 | /* Clear root port wake on bits if wakeup not allowed. */ |
997 | xhci_disable_hub_port_wake(xhci, rhub: &xhci->usb3_rhub, do_wakeup); |
998 | xhci_disable_hub_port_wake(xhci, rhub: &xhci->usb2_rhub, do_wakeup); |
999 | |
1000 | if (!HCD_HW_ACCESSIBLE(hcd)) |
1001 | return 0; |
1002 | |
1003 | xhci_dbc_suspend(xhci); |
1004 | |
1005 | /* Don't poll the roothubs on bus suspend. */ |
1006 | xhci_dbg(xhci, "%s: stopping usb%d port polling.\n", |
1007 | __func__, hcd->self.busnum); |
1008 | clear_bit(HCD_FLAG_POLL_RH, addr: &hcd->flags); |
1009 | timer_delete_sync(timer: &hcd->rh_timer); |
1010 | if (xhci->shared_hcd) { |
1011 | clear_bit(HCD_FLAG_POLL_RH, addr: &xhci->shared_hcd->flags); |
1012 | timer_delete_sync(timer: &xhci->shared_hcd->rh_timer); |
1013 | } |
1014 | |
1015 | if (xhci->quirks & XHCI_SUSPEND_DELAY) |
1016 | usleep_range(min: 1000, max: 1500); |
1017 | |
1018 | spin_lock_irq(lock: &xhci->lock); |
1019 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, addr: &hcd->flags); |
1020 | if (xhci->shared_hcd) |
1021 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, addr: &xhci->shared_hcd->flags); |
1022 | /* step 1: stop endpoint */ |
1023 | /* skipped assuming that port suspend has done */ |
1024 | |
1025 | /* step 2: clear Run/Stop bit */ |
1026 | command = readl(addr: &xhci->op_regs->command); |
1027 | command &= ~CMD_RUN; |
1028 | writel(val: command, addr: &xhci->op_regs->command); |
1029 | |
1030 | /* Some chips from Fresco Logic need an extraordinary delay */ |
1031 | delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1; |
1032 | |
1033 | if (xhci_handshake(ptr: &xhci->op_regs->status, |
1034 | STS_HALT, STS_HALT, timeout_us: delay)) { |
1035 | xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n"); |
1036 | spin_unlock_irq(lock: &xhci->lock); |
1037 | return -ETIMEDOUT; |
1038 | } |
1039 | xhci_clear_command_ring(xhci); |
1040 | |
1041 | /* step 3: save registers */ |
1042 | xhci_save_registers(xhci); |
1043 | |
1044 | /* step 4: set CSS flag */ |
1045 | command = readl(addr: &xhci->op_regs->command); |
1046 | command |= CMD_CSS; |
1047 | writel(val: command, addr: &xhci->op_regs->command); |
1048 | xhci->broken_suspend = 0; |
1049 | if (xhci_handshake(ptr: &xhci->op_regs->status, |
1050 | STS_SAVE, done: 0, timeout_us: 20 * 1000)) { |
1051 | /* |
1052 | * AMD SNPS xHC 3.0 occasionally does not clear the |
1053 | * SSS bit of USBSTS and when driver tries to poll |
1054 | * to see if the xHC clears BIT(8) which never happens |
1055 | * and driver assumes that controller is not responding |
1056 | * and times out. To workaround this, its good to check |
1057 | * if SRE and HCE bits are not set (as per xhci |
1058 | * Section 5.4.2) and bypass the timeout. |
1059 | */ |
1060 | res = readl(addr: &xhci->op_regs->status); |
1061 | if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) && |
1062 | (((res & STS_SRE) == 0) && |
1063 | ((res & STS_HCE) == 0))) { |
1064 | xhci->broken_suspend = 1; |
1065 | } else { |
1066 | xhci_warn(xhci, "WARN: xHC save state timeout\n"); |
1067 | spin_unlock_irq(lock: &xhci->lock); |
1068 | return -ETIMEDOUT; |
1069 | } |
1070 | } |
1071 | spin_unlock_irq(lock: &xhci->lock); |
1072 | |
1073 | /* |
1074 | * Deleting Compliance Mode Recovery Timer because the xHCI Host |
1075 | * is about to be suspended. |
1076 | */ |
1077 | if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && |
1078 | (!(xhci_all_ports_seen_u0(xhci)))) { |
1079 | timer_delete_sync(timer: &xhci->comp_mode_recovery_timer); |
1080 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
1081 | fmt: "%s: compliance mode recovery timer deleted", |
1082 | __func__); |
1083 | } |
1084 | |
1085 | return rc; |
1086 | } |
1087 | EXPORT_SYMBOL_GPL(xhci_suspend); |
1088 | |
1089 | /* |
1090 | * start xHC (not bus-specific) |
1091 | * |
1092 | * This is called when the machine transition from S3/S4 mode. |
1093 | * |
1094 | */ |
1095 | int xhci_resume(struct xhci_hcd *xhci, bool power_lost, bool is_auto_resume) |
1096 | { |
1097 | u32 command, temp = 0; |
1098 | struct usb_hcd *hcd = xhci_to_hcd(xhci); |
1099 | int retval = 0; |
1100 | bool comp_timer_running = false; |
1101 | bool pending_portevent = false; |
1102 | bool suspended_usb3_devs = false; |
1103 | |
1104 | if (!hcd->state) |
1105 | return 0; |
1106 | |
1107 | /* Wait a bit if either of the roothubs need to settle from the |
1108 | * transition into bus suspend. |
1109 | */ |
1110 | |
1111 | if (time_before(jiffies, xhci->usb2_rhub.bus_state.next_statechange) || |
1112 | time_before(jiffies, xhci->usb3_rhub.bus_state.next_statechange)) |
1113 | msleep(msecs: 100); |
1114 | |
1115 | set_bit(HCD_FLAG_HW_ACCESSIBLE, addr: &hcd->flags); |
1116 | if (xhci->shared_hcd) |
1117 | set_bit(HCD_FLAG_HW_ACCESSIBLE, addr: &xhci->shared_hcd->flags); |
1118 | |
1119 | spin_lock_irq(lock: &xhci->lock); |
1120 | |
1121 | if (xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend) |
1122 | power_lost = true; |
1123 | |
1124 | if (!power_lost) { |
1125 | /* |
1126 | * Some controllers might lose power during suspend, so wait |
1127 | * for controller not ready bit to clear, just as in xHC init. |
1128 | */ |
1129 | retval = xhci_handshake(ptr: &xhci->op_regs->status, |
1130 | STS_CNR, done: 0, timeout_us: 10 * 1000 * 1000); |
1131 | if (retval) { |
1132 | xhci_warn(xhci, "Controller not ready at resume %d\n", |
1133 | retval); |
1134 | spin_unlock_irq(lock: &xhci->lock); |
1135 | return retval; |
1136 | } |
1137 | /* step 1: restore register */ |
1138 | xhci_restore_registers(xhci); |
1139 | /* step 2: initialize command ring buffer */ |
1140 | xhci_set_cmd_ring_deq(xhci); |
1141 | /* step 3: restore state and start state*/ |
1142 | /* step 3: set CRS flag */ |
1143 | command = readl(addr: &xhci->op_regs->command); |
1144 | command |= CMD_CRS; |
1145 | writel(val: command, addr: &xhci->op_regs->command); |
1146 | /* |
1147 | * Some controllers take up to 55+ ms to complete the controller |
1148 | * restore so setting the timeout to 100ms. Xhci specification |
1149 | * doesn't mention any timeout value. |
1150 | */ |
1151 | if (xhci_handshake(ptr: &xhci->op_regs->status, |
1152 | STS_RESTORE, done: 0, timeout_us: 100 * 1000)) { |
1153 | xhci_warn(xhci, "WARN: xHC restore state timeout\n"); |
1154 | spin_unlock_irq(lock: &xhci->lock); |
1155 | return -ETIMEDOUT; |
1156 | } |
1157 | } |
1158 | |
1159 | temp = readl(addr: &xhci->op_regs->status); |
1160 | |
1161 | /* re-initialize the HC on Restore Error, or Host Controller Error */ |
1162 | if ((temp & (STS_SRE | STS_HCE)) && |
1163 | !(xhci->xhc_state & XHCI_STATE_REMOVING)) { |
1164 | if (!power_lost) |
1165 | xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp); |
1166 | power_lost = true; |
1167 | } |
1168 | |
1169 | if (power_lost) { |
1170 | if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && |
1171 | !(xhci_all_ports_seen_u0(xhci))) { |
1172 | timer_delete_sync(timer: &xhci->comp_mode_recovery_timer); |
1173 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
1174 | fmt: "Compliance Mode Recovery Timer deleted!"); |
1175 | } |
1176 | |
1177 | /* Let the USB core know _both_ roothubs lost power. */ |
1178 | usb_root_hub_lost_power(rhdev: xhci->main_hcd->self.root_hub); |
1179 | if (xhci->shared_hcd) |
1180 | usb_root_hub_lost_power(rhdev: xhci->shared_hcd->self.root_hub); |
1181 | |
1182 | xhci_dbg(xhci, "Stop HCD\n"); |
1183 | xhci_halt(xhci); |
1184 | xhci_zero_64b_regs(xhci); |
1185 | retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC); |
1186 | spin_unlock_irq(lock: &xhci->lock); |
1187 | if (retval) |
1188 | return retval; |
1189 | |
1190 | xhci_dbg(xhci, "// Disabling event ring interrupts\n"); |
1191 | temp = readl(addr: &xhci->op_regs->status); |
1192 | writel(val: (temp & ~0x1fff) | STS_EINT, addr: &xhci->op_regs->status); |
1193 | xhci_disable_interrupter(xhci, ir: xhci->interrupters[0]); |
1194 | |
1195 | xhci_dbg(xhci, "cleaning up memory\n"); |
1196 | xhci_mem_cleanup(xhci); |
1197 | xhci_debugfs_exit(xhci); |
1198 | xhci_dbg(xhci, "xhci_stop completed - status = %x\n", |
1199 | readl(&xhci->op_regs->status)); |
1200 | |
1201 | /* USB core calls the PCI reinit and start functions twice: |
1202 | * first with the primary HCD, and then with the secondary HCD. |
1203 | * If we don't do the same, the host will never be started. |
1204 | */ |
1205 | xhci_dbg(xhci, "Initialize the xhci_hcd\n"); |
1206 | retval = xhci_init(hcd); |
1207 | if (retval) |
1208 | return retval; |
1209 | comp_timer_running = true; |
1210 | |
1211 | xhci_dbg(xhci, "Start the primary HCD\n"); |
1212 | retval = xhci_run(hcd); |
1213 | if (!retval && xhci->shared_hcd) { |
1214 | xhci_dbg(xhci, "Start the secondary HCD\n"); |
1215 | retval = xhci_run(xhci->shared_hcd); |
1216 | } |
1217 | if (retval) |
1218 | return retval; |
1219 | /* |
1220 | * Resume roothubs unconditionally as PORTSC change bits are not |
1221 | * immediately visible after xHC reset |
1222 | */ |
1223 | hcd->state = HC_STATE_SUSPENDED; |
1224 | |
1225 | if (xhci->shared_hcd) { |
1226 | xhci->shared_hcd->state = HC_STATE_SUSPENDED; |
1227 | usb_hcd_resume_root_hub(hcd: xhci->shared_hcd); |
1228 | } |
1229 | usb_hcd_resume_root_hub(hcd); |
1230 | |
1231 | goto done; |
1232 | } |
1233 | |
1234 | /* step 4: set Run/Stop bit */ |
1235 | command = readl(addr: &xhci->op_regs->command); |
1236 | command |= CMD_RUN; |
1237 | writel(val: command, addr: &xhci->op_regs->command); |
1238 | xhci_handshake(ptr: &xhci->op_regs->status, STS_HALT, |
1239 | done: 0, timeout_us: 250 * 1000); |
1240 | |
1241 | /* step 5: walk topology and initialize portsc, |
1242 | * portpmsc and portli |
1243 | */ |
1244 | /* this is done in bus_resume */ |
1245 | |
1246 | /* step 6: restart each of the previously |
1247 | * Running endpoints by ringing their doorbells |
1248 | */ |
1249 | |
1250 | spin_unlock_irq(lock: &xhci->lock); |
1251 | |
1252 | xhci_dbc_resume(xhci); |
1253 | |
1254 | if (retval == 0) { |
1255 | /* |
1256 | * Resume roothubs only if there are pending events. |
1257 | * USB 3 devices resend U3 LFPS wake after a 100ms delay if |
1258 | * the first wake signalling failed, give it that chance if |
1259 | * there are suspended USB 3 devices. |
1260 | */ |
1261 | if (xhci->usb3_rhub.bus_state.suspended_ports || |
1262 | xhci->usb3_rhub.bus_state.bus_suspended) |
1263 | suspended_usb3_devs = true; |
1264 | |
1265 | pending_portevent = xhci_pending_portevent(xhci); |
1266 | |
1267 | if (suspended_usb3_devs && !pending_portevent && is_auto_resume) { |
1268 | msleep(msecs: 120); |
1269 | pending_portevent = xhci_pending_portevent(xhci); |
1270 | } |
1271 | |
1272 | if (pending_portevent) { |
1273 | if (xhci->shared_hcd) |
1274 | usb_hcd_resume_root_hub(hcd: xhci->shared_hcd); |
1275 | usb_hcd_resume_root_hub(hcd); |
1276 | } |
1277 | } |
1278 | done: |
1279 | /* |
1280 | * If system is subject to the Quirk, Compliance Mode Timer needs to |
1281 | * be re-initialized Always after a system resume. Ports are subject |
1282 | * to suffer the Compliance Mode issue again. It doesn't matter if |
1283 | * ports have entered previously to U0 before system's suspension. |
1284 | */ |
1285 | if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running) |
1286 | compliance_mode_recovery_timer_init(xhci); |
1287 | |
1288 | if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL) |
1289 | usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller)); |
1290 | |
1291 | /* Re-enable port polling. */ |
1292 | xhci_dbg(xhci, "%s: starting usb%d port polling.\n", |
1293 | __func__, hcd->self.busnum); |
1294 | if (xhci->shared_hcd) { |
1295 | set_bit(HCD_FLAG_POLL_RH, addr: &xhci->shared_hcd->flags); |
1296 | usb_hcd_poll_rh_status(hcd: xhci->shared_hcd); |
1297 | } |
1298 | set_bit(HCD_FLAG_POLL_RH, addr: &hcd->flags); |
1299 | usb_hcd_poll_rh_status(hcd); |
1300 | |
1301 | return retval; |
1302 | } |
1303 | EXPORT_SYMBOL_GPL(xhci_resume); |
1304 | #endif /* CONFIG_PM */ |
1305 | |
1306 | /*-------------------------------------------------------------------------*/ |
1307 | |
1308 | static int xhci_map_temp_buffer(struct usb_hcd *hcd, struct urb *urb) |
1309 | { |
1310 | void *temp; |
1311 | int ret = 0; |
1312 | unsigned int buf_len; |
1313 | enum dma_data_direction dir; |
1314 | |
1315 | dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; |
1316 | buf_len = urb->transfer_buffer_length; |
1317 | |
1318 | temp = kzalloc_node(buf_len, GFP_ATOMIC, |
1319 | dev_to_node(hcd->self.sysdev)); |
1320 | if (!temp) |
1321 | return -ENOMEM; |
1322 | |
1323 | if (usb_urb_dir_out(urb)) |
1324 | sg_pcopy_to_buffer(sgl: urb->sg, nents: urb->num_sgs, |
1325 | buf: temp, buflen: buf_len, skip: 0); |
1326 | |
1327 | urb->transfer_buffer = temp; |
1328 | urb->transfer_dma = dma_map_single(hcd->self.sysdev, |
1329 | urb->transfer_buffer, |
1330 | urb->transfer_buffer_length, |
1331 | dir); |
1332 | |
1333 | if (dma_mapping_error(dev: hcd->self.sysdev, |
1334 | dma_addr: urb->transfer_dma)) { |
1335 | ret = -EAGAIN; |
1336 | kfree(objp: temp); |
1337 | } else { |
1338 | urb->transfer_flags |= URB_DMA_MAP_SINGLE; |
1339 | } |
1340 | |
1341 | return ret; |
1342 | } |
1343 | |
1344 | static bool xhci_urb_temp_buffer_required(struct usb_hcd *hcd, |
1345 | struct urb *urb) |
1346 | { |
1347 | bool ret = false; |
1348 | unsigned int i; |
1349 | unsigned int len = 0; |
1350 | unsigned int trb_size; |
1351 | unsigned int max_pkt; |
1352 | struct scatterlist *sg; |
1353 | struct scatterlist *tail_sg; |
1354 | |
1355 | tail_sg = urb->sg; |
1356 | max_pkt = usb_endpoint_maxp(epd: &urb->ep->desc); |
1357 | |
1358 | if (!urb->num_sgs) |
1359 | return ret; |
1360 | |
1361 | if (urb->dev->speed >= USB_SPEED_SUPER) |
1362 | trb_size = TRB_CACHE_SIZE_SS; |
1363 | else |
1364 | trb_size = TRB_CACHE_SIZE_HS; |
1365 | |
1366 | if (urb->transfer_buffer_length != 0 && |
1367 | !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) { |
1368 | for_each_sg(urb->sg, sg, urb->num_sgs, i) { |
1369 | len = len + sg->length; |
1370 | if (i > trb_size - 2) { |
1371 | len = len - tail_sg->length; |
1372 | if (len < max_pkt) { |
1373 | ret = true; |
1374 | break; |
1375 | } |
1376 | |
1377 | tail_sg = sg_next(sg: tail_sg); |
1378 | } |
1379 | } |
1380 | } |
1381 | return ret; |
1382 | } |
1383 | |
1384 | static void xhci_unmap_temp_buf(struct usb_hcd *hcd, struct urb *urb) |
1385 | { |
1386 | unsigned int len; |
1387 | unsigned int buf_len; |
1388 | enum dma_data_direction dir; |
1389 | |
1390 | dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; |
1391 | |
1392 | buf_len = urb->transfer_buffer_length; |
1393 | |
1394 | if (IS_ENABLED(CONFIG_HAS_DMA) && |
1395 | (urb->transfer_flags & URB_DMA_MAP_SINGLE)) |
1396 | dma_unmap_single(hcd->self.sysdev, |
1397 | urb->transfer_dma, |
1398 | urb->transfer_buffer_length, |
1399 | dir); |
1400 | |
1401 | if (usb_urb_dir_in(urb)) { |
1402 | len = sg_pcopy_from_buffer(sgl: urb->sg, nents: urb->num_sgs, |
1403 | buf: urb->transfer_buffer, |
1404 | buflen: buf_len, |
1405 | skip: 0); |
1406 | if (len != buf_len) { |
1407 | xhci_dbg(hcd_to_xhci(hcd), |
1408 | "Copy from tmp buf to urb sg list failed\n"); |
1409 | urb->actual_length = len; |
1410 | } |
1411 | } |
1412 | urb->transfer_flags &= ~URB_DMA_MAP_SINGLE; |
1413 | kfree(objp: urb->transfer_buffer); |
1414 | urb->transfer_buffer = NULL; |
1415 | } |
1416 | |
1417 | /* |
1418 | * Bypass the DMA mapping if URB is suitable for Immediate Transfer (IDT), |
1419 | * we'll copy the actual data into the TRB address register. This is limited to |
1420 | * transfers up to 8 bytes on output endpoints of any kind with wMaxPacketSize |
1421 | * >= 8 bytes. If suitable for IDT only one Transfer TRB per TD is allowed. |
1422 | */ |
1423 | static int xhci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, |
1424 | gfp_t mem_flags) |
1425 | { |
1426 | struct xhci_hcd *xhci; |
1427 | |
1428 | xhci = hcd_to_xhci(hcd); |
1429 | |
1430 | if (xhci_urb_suitable_for_idt(urb)) |
1431 | return 0; |
1432 | |
1433 | if (xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) { |
1434 | if (xhci_urb_temp_buffer_required(hcd, urb)) |
1435 | return xhci_map_temp_buffer(hcd, urb); |
1436 | } |
1437 | return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags); |
1438 | } |
1439 | |
1440 | static void xhci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) |
1441 | { |
1442 | struct xhci_hcd *xhci; |
1443 | bool unmap_temp_buf = false; |
1444 | |
1445 | xhci = hcd_to_xhci(hcd); |
1446 | |
1447 | if (urb->num_sgs && (urb->transfer_flags & URB_DMA_MAP_SINGLE)) |
1448 | unmap_temp_buf = true; |
1449 | |
1450 | if ((xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) && unmap_temp_buf) |
1451 | xhci_unmap_temp_buf(hcd, urb); |
1452 | else |
1453 | usb_hcd_unmap_urb_for_dma(hcd, urb); |
1454 | } |
1455 | |
1456 | /** |
1457 | * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and |
1458 | * HCDs. Find the index for an endpoint given its descriptor. Use the return |
1459 | * value to right shift 1 for the bitmask. |
1460 | * @desc: USB endpoint descriptor to determine index for |
1461 | * |
1462 | * Index = (epnum * 2) + direction - 1, |
1463 | * where direction = 0 for OUT, 1 for IN. |
1464 | * For control endpoints, the IN index is used (OUT index is unused), so |
1465 | * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2) |
1466 | */ |
1467 | unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc) |
1468 | { |
1469 | unsigned int index; |
1470 | if (usb_endpoint_xfer_control(epd: desc)) |
1471 | index = (unsigned int) (usb_endpoint_num(epd: desc)*2); |
1472 | else |
1473 | index = (unsigned int) (usb_endpoint_num(epd: desc)*2) + |
1474 | (usb_endpoint_dir_in(epd: desc) ? 1 : 0) - 1; |
1475 | return index; |
1476 | } |
1477 | EXPORT_SYMBOL_GPL(xhci_get_endpoint_index); |
1478 | |
1479 | /* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint |
1480 | * address from the XHCI endpoint index. |
1481 | */ |
1482 | static unsigned int xhci_get_endpoint_address(unsigned int ep_index) |
1483 | { |
1484 | unsigned int number = DIV_ROUND_UP(ep_index, 2); |
1485 | unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN; |
1486 | return direction | number; |
1487 | } |
1488 | |
1489 | /* Find the flag for this endpoint (for use in the control context). Use the |
1490 | * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is |
1491 | * bit 1, etc. |
1492 | */ |
1493 | static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc) |
1494 | { |
1495 | return 1 << (xhci_get_endpoint_index(desc) + 1); |
1496 | } |
1497 | |
1498 | /* Compute the last valid endpoint context index. Basically, this is the |
1499 | * endpoint index plus one. For slot contexts with more than valid endpoint, |
1500 | * we find the most significant bit set in the added contexts flags. |
1501 | * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000 |
1502 | * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one. |
1503 | */ |
1504 | unsigned int xhci_last_valid_endpoint(u32 added_ctxs) |
1505 | { |
1506 | return fls(x: added_ctxs) - 1; |
1507 | } |
1508 | |
1509 | /* Returns 1 if the arguments are OK; |
1510 | * returns 0 this is a root hub; returns -EINVAL for NULL pointers. |
1511 | */ |
1512 | static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, |
1513 | struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev, |
1514 | const char *func) { |
1515 | struct xhci_hcd *xhci; |
1516 | struct xhci_virt_device *virt_dev; |
1517 | |
1518 | if (!hcd || (check_ep && !ep) || !udev) { |
1519 | pr_debug("xHCI %s called with invalid args\n", func); |
1520 | return -EINVAL; |
1521 | } |
1522 | if (!udev->parent) { |
1523 | pr_debug("xHCI %s called for root hub\n", func); |
1524 | return 0; |
1525 | } |
1526 | |
1527 | xhci = hcd_to_xhci(hcd); |
1528 | if (check_virt_dev) { |
1529 | if (!udev->slot_id || !xhci->devs[udev->slot_id]) { |
1530 | xhci_dbg(xhci, "xHCI %s called with unaddressed device\n", |
1531 | func); |
1532 | return -EINVAL; |
1533 | } |
1534 | |
1535 | virt_dev = xhci->devs[udev->slot_id]; |
1536 | if (virt_dev->udev != udev) { |
1537 | xhci_dbg(xhci, "xHCI %s called with udev and " |
1538 | "virt_dev does not match\n", func); |
1539 | return -EINVAL; |
1540 | } |
1541 | } |
1542 | |
1543 | if (xhci->xhc_state & XHCI_STATE_HALTED) |
1544 | return -ENODEV; |
1545 | |
1546 | return 1; |
1547 | } |
1548 | |
1549 | static int xhci_configure_endpoint(struct xhci_hcd *xhci, |
1550 | struct usb_device *udev, struct xhci_command *command, |
1551 | bool ctx_change, bool must_succeed); |
1552 | |
1553 | /* |
1554 | * Full speed devices may have a max packet size greater than 8 bytes, but the |
1555 | * USB core doesn't know that until it reads the first 8 bytes of the |
1556 | * descriptor. If the usb_device's max packet size changes after that point, |
1557 | * we need to issue an evaluate context command and wait on it. |
1558 | */ |
1559 | static int xhci_check_ep0_maxpacket(struct xhci_hcd *xhci, struct xhci_virt_device *vdev) |
1560 | { |
1561 | struct xhci_input_control_ctx *ctrl_ctx; |
1562 | struct xhci_ep_ctx *ep_ctx; |
1563 | struct xhci_command *command; |
1564 | int max_packet_size; |
1565 | int hw_max_packet_size; |
1566 | int ret = 0; |
1567 | |
1568 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: vdev->out_ctx, ep_index: 0); |
1569 | hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2)); |
1570 | max_packet_size = usb_endpoint_maxp(epd: &vdev->udev->ep0.desc); |
1571 | |
1572 | if (hw_max_packet_size == max_packet_size) |
1573 | return 0; |
1574 | |
1575 | switch (max_packet_size) { |
1576 | case 8: case 16: case 32: case 64: case 9: |
1577 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_context_change, |
1578 | fmt: "Max Packet Size for ep 0 changed."); |
1579 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_context_change, |
1580 | fmt: "Max packet size in usb_device = %d", |
1581 | max_packet_size); |
1582 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_context_change, |
1583 | fmt: "Max packet size in xHCI HW = %d", |
1584 | hw_max_packet_size); |
1585 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_context_change, |
1586 | fmt: "Issuing evaluate context command."); |
1587 | |
1588 | command = xhci_alloc_command(xhci, allocate_completion: true, GFP_KERNEL); |
1589 | if (!command) |
1590 | return -ENOMEM; |
1591 | |
1592 | command->in_ctx = vdev->in_ctx; |
1593 | ctrl_ctx = xhci_get_input_control_ctx(ctx: command->in_ctx); |
1594 | if (!ctrl_ctx) { |
1595 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", |
1596 | __func__); |
1597 | ret = -ENOMEM; |
1598 | break; |
1599 | } |
1600 | /* Set up the modified control endpoint 0 */ |
1601 | xhci_endpoint_copy(xhci, in_ctx: vdev->in_ctx, out_ctx: vdev->out_ctx, ep_index: 0); |
1602 | |
1603 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: command->in_ctx, ep_index: 0); |
1604 | ep_ctx->ep_info &= cpu_to_le32(~EP_STATE_MASK);/* must clear */ |
1605 | ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK); |
1606 | ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size)); |
1607 | |
1608 | ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG); |
1609 | ctrl_ctx->drop_flags = 0; |
1610 | |
1611 | ret = xhci_configure_endpoint(xhci, udev: vdev->udev, command, |
1612 | ctx_change: true, must_succeed: false); |
1613 | /* Clean up the input context for later use by bandwidth functions */ |
1614 | ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG); |
1615 | break; |
1616 | default: |
1617 | dev_dbg(&vdev->udev->dev, "incorrect max packet size %d for ep0\n", |
1618 | max_packet_size); |
1619 | return -EINVAL; |
1620 | } |
1621 | |
1622 | kfree(objp: command->completion); |
1623 | kfree(objp: command); |
1624 | |
1625 | return ret; |
1626 | } |
1627 | |
1628 | /* |
1629 | * non-error returns are a promise to giveback() the urb later |
1630 | * we drop ownership so next owner (or urb unlink) can get it |
1631 | */ |
1632 | static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) |
1633 | { |
1634 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
1635 | unsigned long flags; |
1636 | int ret = 0; |
1637 | unsigned int slot_id, ep_index; |
1638 | unsigned int *ep_state; |
1639 | struct urb_priv *urb_priv; |
1640 | int num_tds; |
1641 | |
1642 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); |
1643 | |
1644 | if (usb_endpoint_xfer_isoc(epd: &urb->ep->desc)) |
1645 | num_tds = urb->number_of_packets; |
1646 | else if (usb_endpoint_is_bulk_out(epd: &urb->ep->desc) && |
1647 | urb->transfer_buffer_length > 0 && |
1648 | urb->transfer_flags & URB_ZERO_PACKET && |
1649 | !(urb->transfer_buffer_length % usb_endpoint_maxp(epd: &urb->ep->desc))) |
1650 | num_tds = 2; |
1651 | else |
1652 | num_tds = 1; |
1653 | |
1654 | urb_priv = kzalloc(struct_size(urb_priv, td, num_tds), mem_flags); |
1655 | if (!urb_priv) |
1656 | return -ENOMEM; |
1657 | |
1658 | urb_priv->num_tds = num_tds; |
1659 | urb_priv->num_tds_done = 0; |
1660 | urb->hcpriv = urb_priv; |
1661 | |
1662 | trace_xhci_urb_enqueue(urb); |
1663 | |
1664 | spin_lock_irqsave(&xhci->lock, flags); |
1665 | |
1666 | ret = xhci_check_args(hcd, udev: urb->dev, ep: urb->ep, |
1667 | check_ep: true, check_virt_dev: true, func: __func__); |
1668 | if (ret <= 0) { |
1669 | ret = ret ? ret : -EINVAL; |
1670 | goto free_priv; |
1671 | } |
1672 | |
1673 | slot_id = urb->dev->slot_id; |
1674 | |
1675 | if (!HCD_HW_ACCESSIBLE(hcd)) { |
1676 | ret = -ESHUTDOWN; |
1677 | goto free_priv; |
1678 | } |
1679 | |
1680 | if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) { |
1681 | xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n"); |
1682 | ret = -ENODEV; |
1683 | goto free_priv; |
1684 | } |
1685 | |
1686 | if (xhci->xhc_state & XHCI_STATE_DYING) { |
1687 | xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n", |
1688 | urb->ep->desc.bEndpointAddress, urb); |
1689 | ret = -ESHUTDOWN; |
1690 | goto free_priv; |
1691 | } |
1692 | |
1693 | ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state; |
1694 | |
1695 | if (*ep_state & (EP_GETTING_STREAMS | EP_GETTING_NO_STREAMS)) { |
1696 | xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n", |
1697 | *ep_state); |
1698 | ret = -EINVAL; |
1699 | goto free_priv; |
1700 | } |
1701 | if (*ep_state & EP_SOFT_CLEAR_TOGGLE) { |
1702 | xhci_warn(xhci, "Can't enqueue URB while manually clearing toggle\n"); |
1703 | ret = -EINVAL; |
1704 | goto free_priv; |
1705 | } |
1706 | |
1707 | switch (usb_endpoint_type(epd: &urb->ep->desc)) { |
1708 | |
1709 | case USB_ENDPOINT_XFER_CONTROL: |
1710 | ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, |
1711 | slot_id, ep_index); |
1712 | break; |
1713 | case USB_ENDPOINT_XFER_BULK: |
1714 | ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, |
1715 | slot_id, ep_index); |
1716 | break; |
1717 | case USB_ENDPOINT_XFER_INT: |
1718 | ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, |
1719 | slot_id, ep_index); |
1720 | break; |
1721 | case USB_ENDPOINT_XFER_ISOC: |
1722 | ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, |
1723 | slot_id, ep_index); |
1724 | } |
1725 | |
1726 | if (ret) { |
1727 | free_priv: |
1728 | xhci_urb_free_priv(urb_priv); |
1729 | urb->hcpriv = NULL; |
1730 | } |
1731 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
1732 | return ret; |
1733 | } |
1734 | |
1735 | /* |
1736 | * Remove the URB's TD from the endpoint ring. This may cause the HC to stop |
1737 | * USB transfers, potentially stopping in the middle of a TRB buffer. The HC |
1738 | * should pick up where it left off in the TD, unless a Set Transfer Ring |
1739 | * Dequeue Pointer is issued. |
1740 | * |
1741 | * The TRBs that make up the buffers for the canceled URB will be "removed" from |
1742 | * the ring. Since the ring is a contiguous structure, they can't be physically |
1743 | * removed. Instead, there are two options: |
1744 | * |
1745 | * 1) If the HC is in the middle of processing the URB to be canceled, we |
1746 | * simply move the ring's dequeue pointer past those TRBs using the Set |
1747 | * Transfer Ring Dequeue Pointer command. This will be the common case, |
1748 | * when drivers timeout on the last submitted URB and attempt to cancel. |
1749 | * |
1750 | * 2) If the HC is in the middle of a different TD, we turn the TRBs into a |
1751 | * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The |
1752 | * HC will need to invalidate the any TRBs it has cached after the stop |
1753 | * endpoint command, as noted in the xHCI 0.95 errata. |
1754 | * |
1755 | * 3) The TD may have completed by the time the Stop Endpoint Command |
1756 | * completes, so software needs to handle that case too. |
1757 | * |
1758 | * This function should protect against the TD enqueueing code ringing the |
1759 | * doorbell while this code is waiting for a Stop Endpoint command to complete. |
1760 | * It also needs to account for multiple cancellations on happening at the same |
1761 | * time for the same endpoint. |
1762 | * |
1763 | * Note that this function can be called in any context, or so says |
1764 | * usb_hcd_unlink_urb() |
1765 | */ |
1766 | static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) |
1767 | { |
1768 | unsigned long flags; |
1769 | int ret, i; |
1770 | u32 temp; |
1771 | struct xhci_hcd *xhci; |
1772 | struct urb_priv *urb_priv; |
1773 | struct xhci_td *td; |
1774 | unsigned int ep_index; |
1775 | struct xhci_ring *ep_ring; |
1776 | struct xhci_virt_ep *ep; |
1777 | struct xhci_command *command; |
1778 | struct xhci_virt_device *vdev; |
1779 | |
1780 | xhci = hcd_to_xhci(hcd); |
1781 | spin_lock_irqsave(&xhci->lock, flags); |
1782 | |
1783 | trace_xhci_urb_dequeue(urb); |
1784 | |
1785 | /* Make sure the URB hasn't completed or been unlinked already */ |
1786 | ret = usb_hcd_check_unlink_urb(hcd, urb, status); |
1787 | if (ret) |
1788 | goto done; |
1789 | |
1790 | /* give back URB now if we can't queue it for cancel */ |
1791 | vdev = xhci->devs[urb->dev->slot_id]; |
1792 | urb_priv = urb->hcpriv; |
1793 | if (!vdev || !urb_priv) |
1794 | goto err_giveback; |
1795 | |
1796 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); |
1797 | ep = &vdev->eps[ep_index]; |
1798 | ep_ring = xhci_urb_to_transfer_ring(xhci, urb); |
1799 | if (!ep || !ep_ring) |
1800 | goto err_giveback; |
1801 | |
1802 | /* If xHC is dead take it down and return ALL URBs in xhci_hc_died() */ |
1803 | temp = readl(addr: &xhci->op_regs->status); |
1804 | if (temp == ~(u32)0 || xhci->xhc_state & XHCI_STATE_DYING) { |
1805 | xhci_hc_died(xhci); |
1806 | goto done; |
1807 | } |
1808 | |
1809 | /* |
1810 | * check ring is not re-allocated since URB was enqueued. If it is, then |
1811 | * make sure none of the ring related pointers in this URB private data |
1812 | * are touched, such as td_list, otherwise we overwrite freed data |
1813 | */ |
1814 | if (!td_on_ring(td: &urb_priv->td[0], ring: ep_ring)) { |
1815 | xhci_err(xhci, "Canceled URB td not found on endpoint ring"); |
1816 | for (i = urb_priv->num_tds_done; i < urb_priv->num_tds; i++) { |
1817 | td = &urb_priv->td[i]; |
1818 | if (!list_empty(head: &td->cancelled_td_list)) |
1819 | list_del_init(entry: &td->cancelled_td_list); |
1820 | } |
1821 | goto err_giveback; |
1822 | } |
1823 | |
1824 | if (xhci->xhc_state & XHCI_STATE_HALTED) { |
1825 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_cancel_urb, |
1826 | fmt: "HC halted, freeing TD manually."); |
1827 | for (i = urb_priv->num_tds_done; |
1828 | i < urb_priv->num_tds; |
1829 | i++) { |
1830 | td = &urb_priv->td[i]; |
1831 | if (!list_empty(head: &td->td_list)) |
1832 | list_del_init(entry: &td->td_list); |
1833 | if (!list_empty(head: &td->cancelled_td_list)) |
1834 | list_del_init(entry: &td->cancelled_td_list); |
1835 | } |
1836 | goto err_giveback; |
1837 | } |
1838 | |
1839 | i = urb_priv->num_tds_done; |
1840 | if (i < urb_priv->num_tds) |
1841 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_cancel_urb, |
1842 | fmt: "Cancel URB %p, dev %s, ep 0x%x, " |
1843 | "starting at offset 0x%llx", |
1844 | urb, urb->dev->devpath, |
1845 | urb->ep->desc.bEndpointAddress, |
1846 | (unsigned long long) xhci_trb_virt_to_dma( |
1847 | seg: urb_priv->td[i].start_seg, |
1848 | trb: urb_priv->td[i].start_trb)); |
1849 | |
1850 | for (; i < urb_priv->num_tds; i++) { |
1851 | td = &urb_priv->td[i]; |
1852 | /* TD can already be on cancelled list if ep halted on it */ |
1853 | if (list_empty(head: &td->cancelled_td_list)) { |
1854 | td->cancel_status = TD_DIRTY; |
1855 | list_add_tail(new: &td->cancelled_td_list, |
1856 | head: &ep->cancelled_td_list); |
1857 | } |
1858 | } |
1859 | |
1860 | /* These completion handlers will sort out cancelled TDs for us */ |
1861 | if (ep->ep_state & (EP_STOP_CMD_PENDING | EP_HALTED | SET_DEQ_PENDING)) { |
1862 | xhci_dbg(xhci, "Not queuing Stop Endpoint on slot %d ep %d in state 0x%x\n", |
1863 | urb->dev->slot_id, ep_index, ep->ep_state); |
1864 | goto done; |
1865 | } |
1866 | |
1867 | /* In this case no commands are pending but the endpoint is stopped */ |
1868 | if (ep->ep_state & EP_CLEARING_TT) { |
1869 | /* and cancelled TDs can be given back right away */ |
1870 | xhci_dbg(xhci, "Invalidating TDs instantly on slot %d ep %d in state 0x%x\n", |
1871 | urb->dev->slot_id, ep_index, ep->ep_state); |
1872 | xhci_process_cancelled_tds(ep); |
1873 | } else { |
1874 | /* Otherwise, queue a new Stop Endpoint command */ |
1875 | command = xhci_alloc_command(xhci, allocate_completion: false, GFP_ATOMIC); |
1876 | if (!command) { |
1877 | ret = -ENOMEM; |
1878 | goto done; |
1879 | } |
1880 | ep->stop_time = jiffies; |
1881 | ep->ep_state |= EP_STOP_CMD_PENDING; |
1882 | xhci_queue_stop_endpoint(xhci, cmd: command, slot_id: urb->dev->slot_id, |
1883 | ep_index, suspend: 0); |
1884 | xhci_ring_cmd_db(xhci); |
1885 | } |
1886 | done: |
1887 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
1888 | return ret; |
1889 | |
1890 | err_giveback: |
1891 | if (urb_priv) |
1892 | xhci_urb_free_priv(urb_priv); |
1893 | usb_hcd_unlink_urb_from_ep(hcd, urb); |
1894 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
1895 | usb_hcd_giveback_urb(hcd, urb, status: -ESHUTDOWN); |
1896 | return ret; |
1897 | } |
1898 | |
1899 | /* Drop an endpoint from a new bandwidth configuration for this device. |
1900 | * Only one call to this function is allowed per endpoint before |
1901 | * check_bandwidth() or reset_bandwidth() must be called. |
1902 | * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will |
1903 | * add the endpoint to the schedule with possibly new parameters denoted by a |
1904 | * different endpoint descriptor in usb_host_endpoint. |
1905 | * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is |
1906 | * not allowed. |
1907 | * |
1908 | * The USB core will not allow URBs to be queued to an endpoint that is being |
1909 | * disabled, so there's no need for mutual exclusion to protect |
1910 | * the xhci->devs[slot_id] structure. |
1911 | */ |
1912 | int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, |
1913 | struct usb_host_endpoint *ep) |
1914 | { |
1915 | struct xhci_hcd *xhci; |
1916 | struct xhci_container_ctx *in_ctx, *out_ctx; |
1917 | struct xhci_input_control_ctx *ctrl_ctx; |
1918 | unsigned int ep_index; |
1919 | struct xhci_ep_ctx *ep_ctx; |
1920 | u32 drop_flag; |
1921 | u32 new_add_flags, new_drop_flags; |
1922 | int ret; |
1923 | |
1924 | ret = xhci_check_args(hcd, udev, ep, check_ep: 1, check_virt_dev: true, func: __func__); |
1925 | if (ret <= 0) |
1926 | return ret; |
1927 | xhci = hcd_to_xhci(hcd); |
1928 | if (xhci->xhc_state & XHCI_STATE_DYING) |
1929 | return -ENODEV; |
1930 | |
1931 | xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); |
1932 | drop_flag = xhci_get_endpoint_flag(desc: &ep->desc); |
1933 | if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) { |
1934 | xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n", |
1935 | __func__, drop_flag); |
1936 | return 0; |
1937 | } |
1938 | |
1939 | in_ctx = xhci->devs[udev->slot_id]->in_ctx; |
1940 | out_ctx = xhci->devs[udev->slot_id]->out_ctx; |
1941 | ctrl_ctx = xhci_get_input_control_ctx(ctx: in_ctx); |
1942 | if (!ctrl_ctx) { |
1943 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", |
1944 | __func__); |
1945 | return 0; |
1946 | } |
1947 | |
1948 | ep_index = xhci_get_endpoint_index(&ep->desc); |
1949 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: out_ctx, ep_index); |
1950 | /* If the HC already knows the endpoint is disabled, |
1951 | * or the HCD has noted it is disabled, ignore this request |
1952 | */ |
1953 | if ((GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) || |
1954 | le32_to_cpu(ctrl_ctx->drop_flags) & |
1955 | xhci_get_endpoint_flag(desc: &ep->desc)) { |
1956 | /* Do not warn when called after a usb_device_reset */ |
1957 | if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL) |
1958 | xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", |
1959 | __func__, ep); |
1960 | return 0; |
1961 | } |
1962 | |
1963 | ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag); |
1964 | new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); |
1965 | |
1966 | ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag); |
1967 | new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); |
1968 | |
1969 | xhci_debugfs_remove_endpoint(xhci, virt_dev: xhci->devs[udev->slot_id], ep_index); |
1970 | |
1971 | xhci_endpoint_zero(xhci, virt_dev: xhci->devs[udev->slot_id], ep); |
1972 | |
1973 | xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", |
1974 | (unsigned int) ep->desc.bEndpointAddress, |
1975 | udev->slot_id, |
1976 | (unsigned int) new_drop_flags, |
1977 | (unsigned int) new_add_flags); |
1978 | return 0; |
1979 | } |
1980 | EXPORT_SYMBOL_GPL(xhci_drop_endpoint); |
1981 | |
1982 | /* Add an endpoint to a new possible bandwidth configuration for this device. |
1983 | * Only one call to this function is allowed per endpoint before |
1984 | * check_bandwidth() or reset_bandwidth() must be called. |
1985 | * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will |
1986 | * add the endpoint to the schedule with possibly new parameters denoted by a |
1987 | * different endpoint descriptor in usb_host_endpoint. |
1988 | * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is |
1989 | * not allowed. |
1990 | * |
1991 | * The USB core will not allow URBs to be queued to an endpoint until the |
1992 | * configuration or alt setting is installed in the device, so there's no need |
1993 | * for mutual exclusion to protect the xhci->devs[slot_id] structure. |
1994 | */ |
1995 | int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, |
1996 | struct usb_host_endpoint *ep) |
1997 | { |
1998 | struct xhci_hcd *xhci; |
1999 | struct xhci_container_ctx *in_ctx; |
2000 | unsigned int ep_index; |
2001 | struct xhci_input_control_ctx *ctrl_ctx; |
2002 | struct xhci_ep_ctx *ep_ctx; |
2003 | u32 added_ctxs; |
2004 | u32 new_add_flags, new_drop_flags; |
2005 | struct xhci_virt_device *virt_dev; |
2006 | int ret = 0; |
2007 | |
2008 | ret = xhci_check_args(hcd, udev, ep, check_ep: 1, check_virt_dev: true, func: __func__); |
2009 | if (ret <= 0) { |
2010 | /* So we won't queue a reset ep command for a root hub */ |
2011 | ep->hcpriv = NULL; |
2012 | return ret; |
2013 | } |
2014 | xhci = hcd_to_xhci(hcd); |
2015 | if (xhci->xhc_state & XHCI_STATE_DYING) |
2016 | return -ENODEV; |
2017 | |
2018 | added_ctxs = xhci_get_endpoint_flag(desc: &ep->desc); |
2019 | if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) { |
2020 | /* FIXME when we have to issue an evaluate endpoint command to |
2021 | * deal with ep0 max packet size changing once we get the |
2022 | * descriptors |
2023 | */ |
2024 | xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n", |
2025 | __func__, added_ctxs); |
2026 | return 0; |
2027 | } |
2028 | |
2029 | virt_dev = xhci->devs[udev->slot_id]; |
2030 | in_ctx = virt_dev->in_ctx; |
2031 | ctrl_ctx = xhci_get_input_control_ctx(ctx: in_ctx); |
2032 | if (!ctrl_ctx) { |
2033 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", |
2034 | __func__); |
2035 | return 0; |
2036 | } |
2037 | |
2038 | ep_index = xhci_get_endpoint_index(&ep->desc); |
2039 | /* If this endpoint is already in use, and the upper layers are trying |
2040 | * to add it again without dropping it, reject the addition. |
2041 | */ |
2042 | if (virt_dev->eps[ep_index].ring && |
2043 | !(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) { |
2044 | xhci_warn(xhci, "Trying to add endpoint 0x%x " |
2045 | "without dropping it.\n", |
2046 | (unsigned int) ep->desc.bEndpointAddress); |
2047 | return -EINVAL; |
2048 | } |
2049 | |
2050 | /* If the HCD has already noted the endpoint is enabled, |
2051 | * ignore this request. |
2052 | */ |
2053 | if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) { |
2054 | xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", |
2055 | __func__, ep); |
2056 | return 0; |
2057 | } |
2058 | |
2059 | /* |
2060 | * Configuration and alternate setting changes must be done in |
2061 | * process context, not interrupt context (or so documenation |
2062 | * for usb_set_interface() and usb_set_configuration() claim). |
2063 | */ |
2064 | if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) { |
2065 | dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n", |
2066 | __func__, ep->desc.bEndpointAddress); |
2067 | return -ENOMEM; |
2068 | } |
2069 | |
2070 | ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs); |
2071 | new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); |
2072 | |
2073 | /* If xhci_endpoint_disable() was called for this endpoint, but the |
2074 | * xHC hasn't been notified yet through the check_bandwidth() call, |
2075 | * this re-adds a new state for the endpoint from the new endpoint |
2076 | * descriptors. We must drop and re-add this endpoint, so we leave the |
2077 | * drop flags alone. |
2078 | */ |
2079 | new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); |
2080 | |
2081 | /* Store the usb_device pointer for later use */ |
2082 | ep->hcpriv = udev; |
2083 | |
2084 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: virt_dev->in_ctx, ep_index); |
2085 | trace_xhci_add_endpoint(ctx: ep_ctx); |
2086 | |
2087 | xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", |
2088 | (unsigned int) ep->desc.bEndpointAddress, |
2089 | udev->slot_id, |
2090 | (unsigned int) new_drop_flags, |
2091 | (unsigned int) new_add_flags); |
2092 | return 0; |
2093 | } |
2094 | EXPORT_SYMBOL_GPL(xhci_add_endpoint); |
2095 | |
2096 | static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) |
2097 | { |
2098 | struct xhci_input_control_ctx *ctrl_ctx; |
2099 | struct xhci_ep_ctx *ep_ctx; |
2100 | struct xhci_slot_ctx *slot_ctx; |
2101 | int i; |
2102 | |
2103 | ctrl_ctx = xhci_get_input_control_ctx(ctx: virt_dev->in_ctx); |
2104 | if (!ctrl_ctx) { |
2105 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", |
2106 | __func__); |
2107 | return; |
2108 | } |
2109 | |
2110 | /* When a device's add flag and drop flag are zero, any subsequent |
2111 | * configure endpoint command will leave that endpoint's state |
2112 | * untouched. Make sure we don't leave any old state in the input |
2113 | * endpoint contexts. |
2114 | */ |
2115 | ctrl_ctx->drop_flags = 0; |
2116 | ctrl_ctx->add_flags = 0; |
2117 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: virt_dev->in_ctx); |
2118 | slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); |
2119 | /* Endpoint 0 is always valid */ |
2120 | slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); |
2121 | for (i = 1; i < 31; i++) { |
2122 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: virt_dev->in_ctx, ep_index: i); |
2123 | ep_ctx->ep_info = 0; |
2124 | ep_ctx->ep_info2 = 0; |
2125 | ep_ctx->deq = 0; |
2126 | ep_ctx->tx_info = 0; |
2127 | } |
2128 | } |
2129 | |
2130 | static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, |
2131 | struct usb_device *udev, u32 *cmd_status) |
2132 | { |
2133 | int ret; |
2134 | |
2135 | switch (*cmd_status) { |
2136 | case COMP_COMMAND_ABORTED: |
2137 | case COMP_COMMAND_RING_STOPPED: |
2138 | xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n"); |
2139 | ret = -ETIME; |
2140 | break; |
2141 | case COMP_RESOURCE_ERROR: |
2142 | dev_warn(&udev->dev, |
2143 | "Not enough host controller resources for new device state.\n"); |
2144 | ret = -ENOMEM; |
2145 | /* FIXME: can we allocate more resources for the HC? */ |
2146 | break; |
2147 | case COMP_BANDWIDTH_ERROR: |
2148 | case COMP_SECONDARY_BANDWIDTH_ERROR: |
2149 | dev_warn(&udev->dev, |
2150 | "Not enough bandwidth for new device state.\n"); |
2151 | ret = -ENOSPC; |
2152 | /* FIXME: can we go back to the old state? */ |
2153 | break; |
2154 | case COMP_TRB_ERROR: |
2155 | /* the HCD set up something wrong */ |
2156 | dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, " |
2157 | "add flag = 1, " |
2158 | "and endpoint is not disabled.\n"); |
2159 | ret = -EINVAL; |
2160 | break; |
2161 | case COMP_INCOMPATIBLE_DEVICE_ERROR: |
2162 | dev_warn(&udev->dev, |
2163 | "ERROR: Incompatible device for endpoint configure command.\n"); |
2164 | ret = -ENODEV; |
2165 | break; |
2166 | case COMP_SUCCESS: |
2167 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_context_change, |
2168 | fmt: "Successful Endpoint Configure command"); |
2169 | ret = 0; |
2170 | break; |
2171 | default: |
2172 | xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n", |
2173 | *cmd_status); |
2174 | ret = -EINVAL; |
2175 | break; |
2176 | } |
2177 | return ret; |
2178 | } |
2179 | |
2180 | static int xhci_evaluate_context_result(struct xhci_hcd *xhci, |
2181 | struct usb_device *udev, u32 *cmd_status) |
2182 | { |
2183 | int ret; |
2184 | |
2185 | switch (*cmd_status) { |
2186 | case COMP_COMMAND_ABORTED: |
2187 | case COMP_COMMAND_RING_STOPPED: |
2188 | xhci_warn(xhci, "Timeout while waiting for evaluate context command\n"); |
2189 | ret = -ETIME; |
2190 | break; |
2191 | case COMP_PARAMETER_ERROR: |
2192 | dev_warn(&udev->dev, |
2193 | "WARN: xHCI driver setup invalid evaluate context command.\n"); |
2194 | ret = -EINVAL; |
2195 | break; |
2196 | case COMP_SLOT_NOT_ENABLED_ERROR: |
2197 | dev_warn(&udev->dev, |
2198 | "WARN: slot not enabled for evaluate context command.\n"); |
2199 | ret = -EINVAL; |
2200 | break; |
2201 | case COMP_CONTEXT_STATE_ERROR: |
2202 | dev_warn(&udev->dev, |
2203 | "WARN: invalid context state for evaluate context command.\n"); |
2204 | ret = -EINVAL; |
2205 | break; |
2206 | case COMP_INCOMPATIBLE_DEVICE_ERROR: |
2207 | dev_warn(&udev->dev, |
2208 | "ERROR: Incompatible device for evaluate context command.\n"); |
2209 | ret = -ENODEV; |
2210 | break; |
2211 | case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR: |
2212 | /* Max Exit Latency too large error */ |
2213 | dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n"); |
2214 | ret = -EINVAL; |
2215 | break; |
2216 | case COMP_SUCCESS: |
2217 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_context_change, |
2218 | fmt: "Successful evaluate context command"); |
2219 | ret = 0; |
2220 | break; |
2221 | default: |
2222 | xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n", |
2223 | *cmd_status); |
2224 | ret = -EINVAL; |
2225 | break; |
2226 | } |
2227 | return ret; |
2228 | } |
2229 | |
2230 | static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci, |
2231 | struct xhci_input_control_ctx *ctrl_ctx) |
2232 | { |
2233 | u32 valid_add_flags; |
2234 | u32 valid_drop_flags; |
2235 | |
2236 | /* Ignore the slot flag (bit 0), and the default control endpoint flag |
2237 | * (bit 1). The default control endpoint is added during the Address |
2238 | * Device command and is never removed until the slot is disabled. |
2239 | */ |
2240 | valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2; |
2241 | valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2; |
2242 | |
2243 | /* Use hweight32 to count the number of ones in the add flags, or |
2244 | * number of endpoints added. Don't count endpoints that are changed |
2245 | * (both added and dropped). |
2246 | */ |
2247 | return hweight32(valid_add_flags) - |
2248 | hweight32(valid_add_flags & valid_drop_flags); |
2249 | } |
2250 | |
2251 | static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci, |
2252 | struct xhci_input_control_ctx *ctrl_ctx) |
2253 | { |
2254 | u32 valid_add_flags; |
2255 | u32 valid_drop_flags; |
2256 | |
2257 | valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2; |
2258 | valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2; |
2259 | |
2260 | return hweight32(valid_drop_flags) - |
2261 | hweight32(valid_add_flags & valid_drop_flags); |
2262 | } |
2263 | |
2264 | /* |
2265 | * We need to reserve the new number of endpoints before the configure endpoint |
2266 | * command completes. We can't subtract the dropped endpoints from the number |
2267 | * of active endpoints until the command completes because we can oversubscribe |
2268 | * the host in this case: |
2269 | * |
2270 | * - the first configure endpoint command drops more endpoints than it adds |
2271 | * - a second configure endpoint command that adds more endpoints is queued |
2272 | * - the first configure endpoint command fails, so the config is unchanged |
2273 | * - the second command may succeed, even though there isn't enough resources |
2274 | * |
2275 | * Must be called with xhci->lock held. |
2276 | */ |
2277 | static int xhci_reserve_host_resources(struct xhci_hcd *xhci, |
2278 | struct xhci_input_control_ctx *ctrl_ctx) |
2279 | { |
2280 | u32 added_eps; |
2281 | |
2282 | added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); |
2283 | if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) { |
2284 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
2285 | fmt: "Not enough ep ctxs: " |
2286 | "%u active, need to add %u, limit is %u.", |
2287 | xhci->num_active_eps, added_eps, |
2288 | xhci->limit_active_eps); |
2289 | return -ENOMEM; |
2290 | } |
2291 | xhci->num_active_eps += added_eps; |
2292 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
2293 | fmt: "Adding %u ep ctxs, %u now active.", added_eps, |
2294 | xhci->num_active_eps); |
2295 | return 0; |
2296 | } |
2297 | |
2298 | /* |
2299 | * The configure endpoint was failed by the xHC for some other reason, so we |
2300 | * need to revert the resources that failed configuration would have used. |
2301 | * |
2302 | * Must be called with xhci->lock held. |
2303 | */ |
2304 | static void xhci_free_host_resources(struct xhci_hcd *xhci, |
2305 | struct xhci_input_control_ctx *ctrl_ctx) |
2306 | { |
2307 | u32 num_failed_eps; |
2308 | |
2309 | num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); |
2310 | xhci->num_active_eps -= num_failed_eps; |
2311 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
2312 | fmt: "Removing %u failed ep ctxs, %u now active.", |
2313 | num_failed_eps, |
2314 | xhci->num_active_eps); |
2315 | } |
2316 | |
2317 | /* |
2318 | * Now that the command has completed, clean up the active endpoint count by |
2319 | * subtracting out the endpoints that were dropped (but not changed). |
2320 | * |
2321 | * Must be called with xhci->lock held. |
2322 | */ |
2323 | static void xhci_finish_resource_reservation(struct xhci_hcd *xhci, |
2324 | struct xhci_input_control_ctx *ctrl_ctx) |
2325 | { |
2326 | u32 num_dropped_eps; |
2327 | |
2328 | num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx); |
2329 | xhci->num_active_eps -= num_dropped_eps; |
2330 | if (num_dropped_eps) |
2331 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
2332 | fmt: "Removing %u dropped ep ctxs, %u now active.", |
2333 | num_dropped_eps, |
2334 | xhci->num_active_eps); |
2335 | } |
2336 | |
2337 | static unsigned int xhci_get_block_size(struct usb_device *udev) |
2338 | { |
2339 | switch (udev->speed) { |
2340 | case USB_SPEED_LOW: |
2341 | case USB_SPEED_FULL: |
2342 | return FS_BLOCK; |
2343 | case USB_SPEED_HIGH: |
2344 | return HS_BLOCK; |
2345 | case USB_SPEED_SUPER: |
2346 | case USB_SPEED_SUPER_PLUS: |
2347 | return SS_BLOCK; |
2348 | case USB_SPEED_UNKNOWN: |
2349 | default: |
2350 | /* Should never happen */ |
2351 | return 1; |
2352 | } |
2353 | } |
2354 | |
2355 | static unsigned int |
2356 | xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw) |
2357 | { |
2358 | if (interval_bw->overhead[LS_OVERHEAD_TYPE]) |
2359 | return LS_OVERHEAD; |
2360 | if (interval_bw->overhead[FS_OVERHEAD_TYPE]) |
2361 | return FS_OVERHEAD; |
2362 | return HS_OVERHEAD; |
2363 | } |
2364 | |
2365 | /* If we are changing a LS/FS device under a HS hub, |
2366 | * make sure (if we are activating a new TT) that the HS bus has enough |
2367 | * bandwidth for this new TT. |
2368 | */ |
2369 | static int xhci_check_tt_bw_table(struct xhci_hcd *xhci, |
2370 | struct xhci_virt_device *virt_dev, |
2371 | int old_active_eps) |
2372 | { |
2373 | struct xhci_interval_bw_table *bw_table; |
2374 | struct xhci_tt_bw_info *tt_info; |
2375 | |
2376 | /* Find the bandwidth table for the root port this TT is attached to. */ |
2377 | bw_table = &xhci->rh_bw[virt_dev->rhub_port->hw_portnum].bw_table; |
2378 | tt_info = virt_dev->tt_info; |
2379 | /* If this TT already had active endpoints, the bandwidth for this TT |
2380 | * has already been added. Removing all periodic endpoints (and thus |
2381 | * making the TT enactive) will only decrease the bandwidth used. |
2382 | */ |
2383 | if (old_active_eps) |
2384 | return 0; |
2385 | if (old_active_eps == 0 && tt_info->active_eps != 0) { |
2386 | if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT) |
2387 | return -ENOMEM; |
2388 | return 0; |
2389 | } |
2390 | /* Not sure why we would have no new active endpoints... |
2391 | * |
2392 | * Maybe because of an Evaluate Context change for a hub update or a |
2393 | * control endpoint 0 max packet size change? |
2394 | * FIXME: skip the bandwidth calculation in that case. |
2395 | */ |
2396 | return 0; |
2397 | } |
2398 | |
2399 | static int xhci_check_ss_bw(struct xhci_hcd *xhci, |
2400 | struct xhci_virt_device *virt_dev) |
2401 | { |
2402 | unsigned int bw_reserved; |
2403 | |
2404 | bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100); |
2405 | if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved)) |
2406 | return -ENOMEM; |
2407 | |
2408 | bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100); |
2409 | if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved)) |
2410 | return -ENOMEM; |
2411 | |
2412 | return 0; |
2413 | } |
2414 | |
2415 | /* |
2416 | * This algorithm is a very conservative estimate of the worst-case scheduling |
2417 | * scenario for any one interval. The hardware dynamically schedules the |
2418 | * packets, so we can't tell which microframe could be the limiting factor in |
2419 | * the bandwidth scheduling. This only takes into account periodic endpoints. |
2420 | * |
2421 | * Obviously, we can't solve an NP complete problem to find the minimum worst |
2422 | * case scenario. Instead, we come up with an estimate that is no less than |
2423 | * the worst case bandwidth used for any one microframe, but may be an |
2424 | * over-estimate. |
2425 | * |
2426 | * We walk the requirements for each endpoint by interval, starting with the |
2427 | * smallest interval, and place packets in the schedule where there is only one |
2428 | * possible way to schedule packets for that interval. In order to simplify |
2429 | * this algorithm, we record the largest max packet size for each interval, and |
2430 | * assume all packets will be that size. |
2431 | * |
2432 | * For interval 0, we obviously must schedule all packets for each interval. |
2433 | * The bandwidth for interval 0 is just the amount of data to be transmitted |
2434 | * (the sum of all max ESIT payload sizes, plus any overhead per packet times |
2435 | * the number of packets). |
2436 | * |
2437 | * For interval 1, we have two possible microframes to schedule those packets |
2438 | * in. For this algorithm, if we can schedule the same number of packets for |
2439 | * each possible scheduling opportunity (each microframe), we will do so. The |
2440 | * remaining number of packets will be saved to be transmitted in the gaps in |
2441 | * the next interval's scheduling sequence. |
2442 | * |
2443 | * As we move those remaining packets to be scheduled with interval 2 packets, |
2444 | * we have to double the number of remaining packets to transmit. This is |
2445 | * because the intervals are actually powers of 2, and we would be transmitting |
2446 | * the previous interval's packets twice in this interval. We also have to be |
2447 | * sure that when we look at the largest max packet size for this interval, we |
2448 | * also look at the largest max packet size for the remaining packets and take |
2449 | * the greater of the two. |
2450 | * |
2451 | * The algorithm continues to evenly distribute packets in each scheduling |
2452 | * opportunity, and push the remaining packets out, until we get to the last |
2453 | * interval. Then those packets and their associated overhead are just added |
2454 | * to the bandwidth used. |
2455 | */ |
2456 | static int xhci_check_bw_table(struct xhci_hcd *xhci, |
2457 | struct xhci_virt_device *virt_dev, |
2458 | int old_active_eps) |
2459 | { |
2460 | unsigned int bw_reserved; |
2461 | unsigned int max_bandwidth; |
2462 | unsigned int bw_used; |
2463 | unsigned int block_size; |
2464 | struct xhci_interval_bw_table *bw_table; |
2465 | unsigned int packet_size = 0; |
2466 | unsigned int overhead = 0; |
2467 | unsigned int packets_transmitted = 0; |
2468 | unsigned int packets_remaining = 0; |
2469 | unsigned int i; |
2470 | |
2471 | if (virt_dev->udev->speed >= USB_SPEED_SUPER) |
2472 | return xhci_check_ss_bw(xhci, virt_dev); |
2473 | |
2474 | if (virt_dev->udev->speed == USB_SPEED_HIGH) { |
2475 | max_bandwidth = HS_BW_LIMIT; |
2476 | /* Convert percent of bus BW reserved to blocks reserved */ |
2477 | bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100); |
2478 | } else { |
2479 | max_bandwidth = FS_BW_LIMIT; |
2480 | bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100); |
2481 | } |
2482 | |
2483 | bw_table = virt_dev->bw_table; |
2484 | /* We need to translate the max packet size and max ESIT payloads into |
2485 | * the units the hardware uses. |
2486 | */ |
2487 | block_size = xhci_get_block_size(udev: virt_dev->udev); |
2488 | |
2489 | /* If we are manipulating a LS/FS device under a HS hub, double check |
2490 | * that the HS bus has enough bandwidth if we are activing a new TT. |
2491 | */ |
2492 | if (virt_dev->tt_info) { |
2493 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
2494 | fmt: "Recalculating BW for rootport %u", |
2495 | virt_dev->rhub_port->hw_portnum + 1); |
2496 | if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) { |
2497 | xhci_warn(xhci, "Not enough bandwidth on HS bus for " |
2498 | "newly activated TT.\n"); |
2499 | return -ENOMEM; |
2500 | } |
2501 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
2502 | fmt: "Recalculating BW for TT slot %u port %u", |
2503 | virt_dev->tt_info->slot_id, |
2504 | virt_dev->tt_info->ttport); |
2505 | } else { |
2506 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
2507 | fmt: "Recalculating BW for rootport %u", |
2508 | virt_dev->rhub_port->hw_portnum + 1); |
2509 | } |
2510 | |
2511 | /* Add in how much bandwidth will be used for interval zero, or the |
2512 | * rounded max ESIT payload + number of packets * largest overhead. |
2513 | */ |
2514 | bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) + |
2515 | bw_table->interval_bw[0].num_packets * |
2516 | xhci_get_largest_overhead(interval_bw: &bw_table->interval_bw[0]); |
2517 | |
2518 | for (i = 1; i < XHCI_MAX_INTERVAL; i++) { |
2519 | unsigned int bw_added; |
2520 | unsigned int largest_mps; |
2521 | unsigned int interval_overhead; |
2522 | |
2523 | /* |
2524 | * How many packets could we transmit in this interval? |
2525 | * If packets didn't fit in the previous interval, we will need |
2526 | * to transmit that many packets twice within this interval. |
2527 | */ |
2528 | packets_remaining = 2 * packets_remaining + |
2529 | bw_table->interval_bw[i].num_packets; |
2530 | |
2531 | /* Find the largest max packet size of this or the previous |
2532 | * interval. |
2533 | */ |
2534 | if (list_empty(head: &bw_table->interval_bw[i].endpoints)) |
2535 | largest_mps = 0; |
2536 | else { |
2537 | struct xhci_virt_ep *virt_ep; |
2538 | struct list_head *ep_entry; |
2539 | |
2540 | ep_entry = bw_table->interval_bw[i].endpoints.next; |
2541 | virt_ep = list_entry(ep_entry, |
2542 | struct xhci_virt_ep, bw_endpoint_list); |
2543 | /* Convert to blocks, rounding up */ |
2544 | largest_mps = DIV_ROUND_UP( |
2545 | virt_ep->bw_info.max_packet_size, |
2546 | block_size); |
2547 | } |
2548 | if (largest_mps > packet_size) |
2549 | packet_size = largest_mps; |
2550 | |
2551 | /* Use the larger overhead of this or the previous interval. */ |
2552 | interval_overhead = xhci_get_largest_overhead( |
2553 | interval_bw: &bw_table->interval_bw[i]); |
2554 | if (interval_overhead > overhead) |
2555 | overhead = interval_overhead; |
2556 | |
2557 | /* How many packets can we evenly distribute across |
2558 | * (1 << (i + 1)) possible scheduling opportunities? |
2559 | */ |
2560 | packets_transmitted = packets_remaining >> (i + 1); |
2561 | |
2562 | /* Add in the bandwidth used for those scheduled packets */ |
2563 | bw_added = packets_transmitted * (overhead + packet_size); |
2564 | |
2565 | /* How many packets do we have remaining to transmit? */ |
2566 | packets_remaining = packets_remaining % (1 << (i + 1)); |
2567 | |
2568 | /* What largest max packet size should those packets have? */ |
2569 | /* If we've transmitted all packets, don't carry over the |
2570 | * largest packet size. |
2571 | */ |
2572 | if (packets_remaining == 0) { |
2573 | packet_size = 0; |
2574 | overhead = 0; |
2575 | } else if (packets_transmitted > 0) { |
2576 | /* Otherwise if we do have remaining packets, and we've |
2577 | * scheduled some packets in this interval, take the |
2578 | * largest max packet size from endpoints with this |
2579 | * interval. |
2580 | */ |
2581 | packet_size = largest_mps; |
2582 | overhead = interval_overhead; |
2583 | } |
2584 | /* Otherwise carry over packet_size and overhead from the last |
2585 | * time we had a remainder. |
2586 | */ |
2587 | bw_used += bw_added; |
2588 | if (bw_used > max_bandwidth) { |
2589 | xhci_warn(xhci, "Not enough bandwidth. " |
2590 | "Proposed: %u, Max: %u\n", |
2591 | bw_used, max_bandwidth); |
2592 | return -ENOMEM; |
2593 | } |
2594 | } |
2595 | /* |
2596 | * Ok, we know we have some packets left over after even-handedly |
2597 | * scheduling interval 15. We don't know which microframes they will |
2598 | * fit into, so we over-schedule and say they will be scheduled every |
2599 | * microframe. |
2600 | */ |
2601 | if (packets_remaining > 0) |
2602 | bw_used += overhead + packet_size; |
2603 | |
2604 | if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) { |
2605 | /* OK, we're manipulating a HS device attached to a |
2606 | * root port bandwidth domain. Include the number of active TTs |
2607 | * in the bandwidth used. |
2608 | */ |
2609 | bw_used += TT_HS_OVERHEAD * |
2610 | xhci->rh_bw[virt_dev->rhub_port->hw_portnum].num_active_tts; |
2611 | } |
2612 | |
2613 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
2614 | fmt: "Final bandwidth: %u, Limit: %u, Reserved: %u, " |
2615 | "Available: %u " "percent", |
2616 | bw_used, max_bandwidth, bw_reserved, |
2617 | (max_bandwidth - bw_used - bw_reserved) * 100 / |
2618 | max_bandwidth); |
2619 | |
2620 | bw_used += bw_reserved; |
2621 | if (bw_used > max_bandwidth) { |
2622 | xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n", |
2623 | bw_used, max_bandwidth); |
2624 | return -ENOMEM; |
2625 | } |
2626 | |
2627 | bw_table->bw_used = bw_used; |
2628 | return 0; |
2629 | } |
2630 | |
2631 | static bool xhci_is_async_ep(unsigned int ep_type) |
2632 | { |
2633 | return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP && |
2634 | ep_type != ISOC_IN_EP && |
2635 | ep_type != INT_IN_EP); |
2636 | } |
2637 | |
2638 | static bool xhci_is_sync_in_ep(unsigned int ep_type) |
2639 | { |
2640 | return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP); |
2641 | } |
2642 | |
2643 | static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw) |
2644 | { |
2645 | unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK); |
2646 | |
2647 | if (ep_bw->ep_interval == 0) |
2648 | return SS_OVERHEAD_BURST + |
2649 | (ep_bw->mult * ep_bw->num_packets * |
2650 | (SS_OVERHEAD + mps)); |
2651 | return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets * |
2652 | (SS_OVERHEAD + mps + SS_OVERHEAD_BURST), |
2653 | 1 << ep_bw->ep_interval); |
2654 | |
2655 | } |
2656 | |
2657 | static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci, |
2658 | struct xhci_bw_info *ep_bw, |
2659 | struct xhci_interval_bw_table *bw_table, |
2660 | struct usb_device *udev, |
2661 | struct xhci_virt_ep *virt_ep, |
2662 | struct xhci_tt_bw_info *tt_info) |
2663 | { |
2664 | struct xhci_interval_bw *interval_bw; |
2665 | int normalized_interval; |
2666 | |
2667 | if (xhci_is_async_ep(ep_type: ep_bw->type)) |
2668 | return; |
2669 | |
2670 | if (udev->speed >= USB_SPEED_SUPER) { |
2671 | if (xhci_is_sync_in_ep(ep_type: ep_bw->type)) |
2672 | xhci->devs[udev->slot_id]->bw_table->ss_bw_in -= |
2673 | xhci_get_ss_bw_consumed(ep_bw); |
2674 | else |
2675 | xhci->devs[udev->slot_id]->bw_table->ss_bw_out -= |
2676 | xhci_get_ss_bw_consumed(ep_bw); |
2677 | return; |
2678 | } |
2679 | |
2680 | /* SuperSpeed endpoints never get added to intervals in the table, so |
2681 | * this check is only valid for HS/FS/LS devices. |
2682 | */ |
2683 | if (list_empty(head: &virt_ep->bw_endpoint_list)) |
2684 | return; |
2685 | /* For LS/FS devices, we need to translate the interval expressed in |
2686 | * microframes to frames. |
2687 | */ |
2688 | if (udev->speed == USB_SPEED_HIGH) |
2689 | normalized_interval = ep_bw->ep_interval; |
2690 | else |
2691 | normalized_interval = ep_bw->ep_interval - 3; |
2692 | |
2693 | if (normalized_interval == 0) |
2694 | bw_table->interval0_esit_payload -= ep_bw->max_esit_payload; |
2695 | interval_bw = &bw_table->interval_bw[normalized_interval]; |
2696 | interval_bw->num_packets -= ep_bw->num_packets; |
2697 | switch (udev->speed) { |
2698 | case USB_SPEED_LOW: |
2699 | interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1; |
2700 | break; |
2701 | case USB_SPEED_FULL: |
2702 | interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1; |
2703 | break; |
2704 | case USB_SPEED_HIGH: |
2705 | interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1; |
2706 | break; |
2707 | default: |
2708 | /* Should never happen because only LS/FS/HS endpoints will get |
2709 | * added to the endpoint list. |
2710 | */ |
2711 | return; |
2712 | } |
2713 | if (tt_info) |
2714 | tt_info->active_eps -= 1; |
2715 | list_del_init(entry: &virt_ep->bw_endpoint_list); |
2716 | } |
2717 | |
2718 | static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci, |
2719 | struct xhci_bw_info *ep_bw, |
2720 | struct xhci_interval_bw_table *bw_table, |
2721 | struct usb_device *udev, |
2722 | struct xhci_virt_ep *virt_ep, |
2723 | struct xhci_tt_bw_info *tt_info) |
2724 | { |
2725 | struct xhci_interval_bw *interval_bw; |
2726 | struct xhci_virt_ep *smaller_ep; |
2727 | int normalized_interval; |
2728 | |
2729 | if (xhci_is_async_ep(ep_type: ep_bw->type)) |
2730 | return; |
2731 | |
2732 | if (udev->speed == USB_SPEED_SUPER) { |
2733 | if (xhci_is_sync_in_ep(ep_type: ep_bw->type)) |
2734 | xhci->devs[udev->slot_id]->bw_table->ss_bw_in += |
2735 | xhci_get_ss_bw_consumed(ep_bw); |
2736 | else |
2737 | xhci->devs[udev->slot_id]->bw_table->ss_bw_out += |
2738 | xhci_get_ss_bw_consumed(ep_bw); |
2739 | return; |
2740 | } |
2741 | |
2742 | /* For LS/FS devices, we need to translate the interval expressed in |
2743 | * microframes to frames. |
2744 | */ |
2745 | if (udev->speed == USB_SPEED_HIGH) |
2746 | normalized_interval = ep_bw->ep_interval; |
2747 | else |
2748 | normalized_interval = ep_bw->ep_interval - 3; |
2749 | |
2750 | if (normalized_interval == 0) |
2751 | bw_table->interval0_esit_payload += ep_bw->max_esit_payload; |
2752 | interval_bw = &bw_table->interval_bw[normalized_interval]; |
2753 | interval_bw->num_packets += ep_bw->num_packets; |
2754 | switch (udev->speed) { |
2755 | case USB_SPEED_LOW: |
2756 | interval_bw->overhead[LS_OVERHEAD_TYPE] += 1; |
2757 | break; |
2758 | case USB_SPEED_FULL: |
2759 | interval_bw->overhead[FS_OVERHEAD_TYPE] += 1; |
2760 | break; |
2761 | case USB_SPEED_HIGH: |
2762 | interval_bw->overhead[HS_OVERHEAD_TYPE] += 1; |
2763 | break; |
2764 | default: |
2765 | /* Should never happen because only LS/FS/HS endpoints will get |
2766 | * added to the endpoint list. |
2767 | */ |
2768 | return; |
2769 | } |
2770 | |
2771 | if (tt_info) |
2772 | tt_info->active_eps += 1; |
2773 | /* Insert the endpoint into the list, largest max packet size first. */ |
2774 | list_for_each_entry(smaller_ep, &interval_bw->endpoints, |
2775 | bw_endpoint_list) { |
2776 | if (ep_bw->max_packet_size >= |
2777 | smaller_ep->bw_info.max_packet_size) { |
2778 | /* Add the new ep before the smaller endpoint */ |
2779 | list_add_tail(new: &virt_ep->bw_endpoint_list, |
2780 | head: &smaller_ep->bw_endpoint_list); |
2781 | return; |
2782 | } |
2783 | } |
2784 | /* Add the new endpoint at the end of the list. */ |
2785 | list_add_tail(new: &virt_ep->bw_endpoint_list, |
2786 | head: &interval_bw->endpoints); |
2787 | } |
2788 | |
2789 | void xhci_update_tt_active_eps(struct xhci_hcd *xhci, |
2790 | struct xhci_virt_device *virt_dev, |
2791 | int old_active_eps) |
2792 | { |
2793 | struct xhci_root_port_bw_info *rh_bw_info; |
2794 | if (!virt_dev->tt_info) |
2795 | return; |
2796 | |
2797 | rh_bw_info = &xhci->rh_bw[virt_dev->rhub_port->hw_portnum]; |
2798 | if (old_active_eps == 0 && |
2799 | virt_dev->tt_info->active_eps != 0) { |
2800 | rh_bw_info->num_active_tts += 1; |
2801 | rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD; |
2802 | } else if (old_active_eps != 0 && |
2803 | virt_dev->tt_info->active_eps == 0) { |
2804 | rh_bw_info->num_active_tts -= 1; |
2805 | rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD; |
2806 | } |
2807 | } |
2808 | |
2809 | static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, |
2810 | struct xhci_virt_device *virt_dev, |
2811 | struct xhci_container_ctx *in_ctx) |
2812 | { |
2813 | struct xhci_bw_info ep_bw_info[31]; |
2814 | int i; |
2815 | struct xhci_input_control_ctx *ctrl_ctx; |
2816 | int old_active_eps = 0; |
2817 | |
2818 | if (virt_dev->tt_info) |
2819 | old_active_eps = virt_dev->tt_info->active_eps; |
2820 | |
2821 | ctrl_ctx = xhci_get_input_control_ctx(ctx: in_ctx); |
2822 | if (!ctrl_ctx) { |
2823 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", |
2824 | __func__); |
2825 | return -ENOMEM; |
2826 | } |
2827 | |
2828 | for (i = 0; i < 31; i++) { |
2829 | if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) |
2830 | continue; |
2831 | |
2832 | /* Make a copy of the BW info in case we need to revert this */ |
2833 | memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info, |
2834 | sizeof(ep_bw_info[i])); |
2835 | /* Drop the endpoint from the interval table if the endpoint is |
2836 | * being dropped or changed. |
2837 | */ |
2838 | if (EP_IS_DROPPED(ctrl_ctx, i)) |
2839 | xhci_drop_ep_from_interval_table(xhci, |
2840 | ep_bw: &virt_dev->eps[i].bw_info, |
2841 | bw_table: virt_dev->bw_table, |
2842 | udev: virt_dev->udev, |
2843 | virt_ep: &virt_dev->eps[i], |
2844 | tt_info: virt_dev->tt_info); |
2845 | } |
2846 | /* Overwrite the information stored in the endpoints' bw_info */ |
2847 | xhci_update_bw_info(xhci, in_ctx: virt_dev->in_ctx, ctrl_ctx, virt_dev); |
2848 | for (i = 0; i < 31; i++) { |
2849 | /* Add any changed or added endpoints to the interval table */ |
2850 | if (EP_IS_ADDED(ctrl_ctx, i)) |
2851 | xhci_add_ep_to_interval_table(xhci, |
2852 | ep_bw: &virt_dev->eps[i].bw_info, |
2853 | bw_table: virt_dev->bw_table, |
2854 | udev: virt_dev->udev, |
2855 | virt_ep: &virt_dev->eps[i], |
2856 | tt_info: virt_dev->tt_info); |
2857 | } |
2858 | |
2859 | if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) { |
2860 | /* Ok, this fits in the bandwidth we have. |
2861 | * Update the number of active TTs. |
2862 | */ |
2863 | xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); |
2864 | return 0; |
2865 | } |
2866 | |
2867 | /* We don't have enough bandwidth for this, revert the stored info. */ |
2868 | for (i = 0; i < 31; i++) { |
2869 | if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) |
2870 | continue; |
2871 | |
2872 | /* Drop the new copies of any added or changed endpoints from |
2873 | * the interval table. |
2874 | */ |
2875 | if (EP_IS_ADDED(ctrl_ctx, i)) { |
2876 | xhci_drop_ep_from_interval_table(xhci, |
2877 | ep_bw: &virt_dev->eps[i].bw_info, |
2878 | bw_table: virt_dev->bw_table, |
2879 | udev: virt_dev->udev, |
2880 | virt_ep: &virt_dev->eps[i], |
2881 | tt_info: virt_dev->tt_info); |
2882 | } |
2883 | /* Revert the endpoint back to its old information */ |
2884 | memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i], |
2885 | sizeof(ep_bw_info[i])); |
2886 | /* Add any changed or dropped endpoints back into the table */ |
2887 | if (EP_IS_DROPPED(ctrl_ctx, i)) |
2888 | xhci_add_ep_to_interval_table(xhci, |
2889 | ep_bw: &virt_dev->eps[i].bw_info, |
2890 | bw_table: virt_dev->bw_table, |
2891 | udev: virt_dev->udev, |
2892 | virt_ep: &virt_dev->eps[i], |
2893 | tt_info: virt_dev->tt_info); |
2894 | } |
2895 | return -ENOMEM; |
2896 | } |
2897 | |
2898 | /* |
2899 | * Synchronous XHCI stop endpoint helper. Issues the stop endpoint command and |
2900 | * waits for the command completion before returning. This does not call |
2901 | * xhci_handle_cmd_stop_ep(), which has additional handling for 'context error' |
2902 | * cases, along with transfer ring cleanup. |
2903 | * |
2904 | * xhci_stop_endpoint_sync() is intended to be utilized by clients that manage |
2905 | * their own transfer ring, such as offload situations. |
2906 | */ |
2907 | int xhci_stop_endpoint_sync(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, int suspend, |
2908 | gfp_t gfp_flags) |
2909 | { |
2910 | struct xhci_command *command; |
2911 | unsigned long flags; |
2912 | int ret; |
2913 | |
2914 | command = xhci_alloc_command(xhci, allocate_completion: true, mem_flags: gfp_flags); |
2915 | if (!command) |
2916 | return -ENOMEM; |
2917 | |
2918 | spin_lock_irqsave(&xhci->lock, flags); |
2919 | ret = xhci_queue_stop_endpoint(xhci, cmd: command, slot_id: ep->vdev->slot_id, |
2920 | ep_index: ep->ep_index, suspend); |
2921 | if (ret < 0) { |
2922 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
2923 | goto out; |
2924 | } |
2925 | |
2926 | xhci_ring_cmd_db(xhci); |
2927 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
2928 | |
2929 | wait_for_completion(command->completion); |
2930 | |
2931 | /* No handling for COMP_CONTEXT_STATE_ERROR done at command completion*/ |
2932 | if (command->status == COMP_COMMAND_ABORTED || |
2933 | command->status == COMP_COMMAND_RING_STOPPED) { |
2934 | xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n"); |
2935 | ret = -ETIME; |
2936 | } |
2937 | out: |
2938 | xhci_free_command(xhci, command); |
2939 | |
2940 | return ret; |
2941 | } |
2942 | EXPORT_SYMBOL_GPL(xhci_stop_endpoint_sync); |
2943 | |
2944 | /* Issue a configure endpoint command or evaluate context command |
2945 | * and wait for it to finish. |
2946 | */ |
2947 | static int xhci_configure_endpoint(struct xhci_hcd *xhci, |
2948 | struct usb_device *udev, |
2949 | struct xhci_command *command, |
2950 | bool ctx_change, bool must_succeed) |
2951 | { |
2952 | int ret; |
2953 | unsigned long flags; |
2954 | struct xhci_input_control_ctx *ctrl_ctx; |
2955 | struct xhci_virt_device *virt_dev; |
2956 | struct xhci_slot_ctx *slot_ctx; |
2957 | |
2958 | if (!command) |
2959 | return -EINVAL; |
2960 | |
2961 | spin_lock_irqsave(&xhci->lock, flags); |
2962 | |
2963 | if (xhci->xhc_state & XHCI_STATE_DYING) { |
2964 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
2965 | return -ESHUTDOWN; |
2966 | } |
2967 | |
2968 | virt_dev = xhci->devs[udev->slot_id]; |
2969 | |
2970 | ctrl_ctx = xhci_get_input_control_ctx(ctx: command->in_ctx); |
2971 | if (!ctrl_ctx) { |
2972 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
2973 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", |
2974 | __func__); |
2975 | return -ENOMEM; |
2976 | } |
2977 | |
2978 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) && |
2979 | xhci_reserve_host_resources(xhci, ctrl_ctx)) { |
2980 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
2981 | xhci_warn(xhci, "Not enough host resources, " |
2982 | "active endpoint contexts = %u\n", |
2983 | xhci->num_active_eps); |
2984 | return -ENOMEM; |
2985 | } |
2986 | if ((xhci->quirks & XHCI_SW_BW_CHECKING) && !ctx_change && |
2987 | xhci_reserve_bandwidth(xhci, virt_dev, in_ctx: command->in_ctx)) { |
2988 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) |
2989 | xhci_free_host_resources(xhci, ctrl_ctx); |
2990 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
2991 | xhci_warn(xhci, "Not enough bandwidth\n"); |
2992 | return -ENOMEM; |
2993 | } |
2994 | |
2995 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: command->in_ctx); |
2996 | |
2997 | trace_xhci_configure_endpoint_ctrl_ctx(ctrl_ctx); |
2998 | trace_xhci_configure_endpoint(ctx: slot_ctx); |
2999 | |
3000 | if (!ctx_change) |
3001 | ret = xhci_queue_configure_endpoint(xhci, cmd: command, |
3002 | in_ctx_ptr: command->in_ctx->dma, |
3003 | slot_id: udev->slot_id, command_must_succeed: must_succeed); |
3004 | else |
3005 | ret = xhci_queue_evaluate_context(xhci, cmd: command, |
3006 | in_ctx_ptr: command->in_ctx->dma, |
3007 | slot_id: udev->slot_id, command_must_succeed: must_succeed); |
3008 | if (ret < 0) { |
3009 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) |
3010 | xhci_free_host_resources(xhci, ctrl_ctx); |
3011 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3012 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_context_change, |
3013 | fmt: "FIXME allocate a new ring segment"); |
3014 | return -ENOMEM; |
3015 | } |
3016 | xhci_ring_cmd_db(xhci); |
3017 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3018 | |
3019 | /* Wait for the configure endpoint command to complete */ |
3020 | wait_for_completion(command->completion); |
3021 | |
3022 | if (!ctx_change) |
3023 | ret = xhci_configure_endpoint_result(xhci, udev, |
3024 | cmd_status: &command->status); |
3025 | else |
3026 | ret = xhci_evaluate_context_result(xhci, udev, |
3027 | cmd_status: &command->status); |
3028 | |
3029 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { |
3030 | spin_lock_irqsave(&xhci->lock, flags); |
3031 | /* If the command failed, remove the reserved resources. |
3032 | * Otherwise, clean up the estimate to include dropped eps. |
3033 | */ |
3034 | if (ret) |
3035 | xhci_free_host_resources(xhci, ctrl_ctx); |
3036 | else |
3037 | xhci_finish_resource_reservation(xhci, ctrl_ctx); |
3038 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3039 | } |
3040 | return ret; |
3041 | } |
3042 | |
3043 | static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci, |
3044 | struct xhci_virt_device *vdev, int i) |
3045 | { |
3046 | struct xhci_virt_ep *ep = &vdev->eps[i]; |
3047 | |
3048 | if (ep->ep_state & EP_HAS_STREAMS) { |
3049 | xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n", |
3050 | xhci_get_endpoint_address(i)); |
3051 | xhci_free_stream_info(xhci, stream_info: ep->stream_info); |
3052 | ep->stream_info = NULL; |
3053 | ep->ep_state &= ~EP_HAS_STREAMS; |
3054 | } |
3055 | } |
3056 | |
3057 | /* Called after one or more calls to xhci_add_endpoint() or |
3058 | * xhci_drop_endpoint(). If this call fails, the USB core is expected |
3059 | * to call xhci_reset_bandwidth(). |
3060 | * |
3061 | * Since we are in the middle of changing either configuration or |
3062 | * installing a new alt setting, the USB core won't allow URBs to be |
3063 | * enqueued for any endpoint on the old config or interface. Nothing |
3064 | * else should be touching the xhci->devs[slot_id] structure, so we |
3065 | * don't need to take the xhci->lock for manipulating that. |
3066 | */ |
3067 | int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) |
3068 | { |
3069 | int i; |
3070 | int ret = 0; |
3071 | struct xhci_hcd *xhci; |
3072 | struct xhci_virt_device *virt_dev; |
3073 | struct xhci_input_control_ctx *ctrl_ctx; |
3074 | struct xhci_slot_ctx *slot_ctx; |
3075 | struct xhci_command *command; |
3076 | |
3077 | ret = xhci_check_args(hcd, udev, NULL, check_ep: 0, check_virt_dev: true, func: __func__); |
3078 | if (ret <= 0) |
3079 | return ret; |
3080 | xhci = hcd_to_xhci(hcd); |
3081 | if ((xhci->xhc_state & XHCI_STATE_DYING) || |
3082 | (xhci->xhc_state & XHCI_STATE_REMOVING)) |
3083 | return -ENODEV; |
3084 | |
3085 | xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); |
3086 | virt_dev = xhci->devs[udev->slot_id]; |
3087 | |
3088 | command = xhci_alloc_command(xhci, allocate_completion: true, GFP_KERNEL); |
3089 | if (!command) |
3090 | return -ENOMEM; |
3091 | |
3092 | command->in_ctx = virt_dev->in_ctx; |
3093 | |
3094 | /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ |
3095 | ctrl_ctx = xhci_get_input_control_ctx(ctx: command->in_ctx); |
3096 | if (!ctrl_ctx) { |
3097 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", |
3098 | __func__); |
3099 | ret = -ENOMEM; |
3100 | goto command_cleanup; |
3101 | } |
3102 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
3103 | ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); |
3104 | ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG)); |
3105 | |
3106 | /* Don't issue the command if there's no endpoints to update. */ |
3107 | if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) && |
3108 | ctrl_ctx->drop_flags == 0) { |
3109 | ret = 0; |
3110 | goto command_cleanup; |
3111 | } |
3112 | /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */ |
3113 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: virt_dev->in_ctx); |
3114 | for (i = 31; i >= 1; i--) { |
3115 | __le32 le32 = cpu_to_le32(BIT(i)); |
3116 | |
3117 | if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32)) |
3118 | || (ctrl_ctx->add_flags & le32) || i == 1) { |
3119 | slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); |
3120 | slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i)); |
3121 | break; |
3122 | } |
3123 | } |
3124 | |
3125 | ret = xhci_configure_endpoint(xhci, udev, command, |
3126 | ctx_change: false, must_succeed: false); |
3127 | if (ret) |
3128 | /* Callee should call reset_bandwidth() */ |
3129 | goto command_cleanup; |
3130 | |
3131 | /* Free any rings that were dropped, but not changed. */ |
3132 | for (i = 1; i < 31; i++) { |
3133 | if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) && |
3134 | !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) { |
3135 | xhci_free_endpoint_ring(xhci, virt_dev, ep_index: i); |
3136 | xhci_check_bw_drop_ep_streams(xhci, vdev: virt_dev, i); |
3137 | } |
3138 | } |
3139 | xhci_zero_in_ctx(xhci, virt_dev); |
3140 | /* |
3141 | * Install any rings for completely new endpoints or changed endpoints, |
3142 | * and free any old rings from changed endpoints. |
3143 | */ |
3144 | for (i = 1; i < 31; i++) { |
3145 | if (!virt_dev->eps[i].new_ring) |
3146 | continue; |
3147 | /* Only free the old ring if it exists. |
3148 | * It may not if this is the first add of an endpoint. |
3149 | */ |
3150 | if (virt_dev->eps[i].ring) { |
3151 | xhci_free_endpoint_ring(xhci, virt_dev, ep_index: i); |
3152 | } |
3153 | xhci_check_bw_drop_ep_streams(xhci, vdev: virt_dev, i); |
3154 | virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; |
3155 | virt_dev->eps[i].new_ring = NULL; |
3156 | xhci_debugfs_create_endpoint(xhci, virt_dev, ep_index: i); |
3157 | } |
3158 | command_cleanup: |
3159 | kfree(objp: command->completion); |
3160 | kfree(objp: command); |
3161 | |
3162 | return ret; |
3163 | } |
3164 | EXPORT_SYMBOL_GPL(xhci_check_bandwidth); |
3165 | |
3166 | void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) |
3167 | { |
3168 | struct xhci_hcd *xhci; |
3169 | struct xhci_virt_device *virt_dev; |
3170 | int i, ret; |
3171 | |
3172 | ret = xhci_check_args(hcd, udev, NULL, check_ep: 0, check_virt_dev: true, func: __func__); |
3173 | if (ret <= 0) |
3174 | return; |
3175 | xhci = hcd_to_xhci(hcd); |
3176 | |
3177 | xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); |
3178 | virt_dev = xhci->devs[udev->slot_id]; |
3179 | /* Free any rings allocated for added endpoints */ |
3180 | for (i = 0; i < 31; i++) { |
3181 | if (virt_dev->eps[i].new_ring) { |
3182 | xhci_debugfs_remove_endpoint(xhci, virt_dev, ep_index: i); |
3183 | xhci_ring_free(xhci, ring: virt_dev->eps[i].new_ring); |
3184 | virt_dev->eps[i].new_ring = NULL; |
3185 | } |
3186 | } |
3187 | xhci_zero_in_ctx(xhci, virt_dev); |
3188 | } |
3189 | EXPORT_SYMBOL_GPL(xhci_reset_bandwidth); |
3190 | |
3191 | /* Get the available bandwidth of the ports under the xhci roothub */ |
3192 | int xhci_get_port_bandwidth(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, |
3193 | u8 dev_speed) |
3194 | { |
3195 | struct xhci_command *cmd; |
3196 | unsigned long flags; |
3197 | int ret; |
3198 | |
3199 | if (!ctx || !xhci) |
3200 | return -EINVAL; |
3201 | |
3202 | cmd = xhci_alloc_command(xhci, allocate_completion: true, GFP_KERNEL); |
3203 | if (!cmd) |
3204 | return -ENOMEM; |
3205 | |
3206 | cmd->in_ctx = ctx; |
3207 | |
3208 | /* get xhci port bandwidth, refer to xhci rev1_2 protocol 4.6.15 */ |
3209 | spin_lock_irqsave(&xhci->lock, flags); |
3210 | |
3211 | ret = xhci_queue_get_port_bw(xhci, cmd, in_ctx_ptr: ctx->dma, dev_speed, command_must_succeed: 0); |
3212 | if (ret) { |
3213 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3214 | goto err_out; |
3215 | } |
3216 | xhci_ring_cmd_db(xhci); |
3217 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3218 | |
3219 | wait_for_completion(cmd->completion); |
3220 | err_out: |
3221 | kfree(objp: cmd->completion); |
3222 | kfree(objp: cmd); |
3223 | |
3224 | return ret; |
3225 | } |
3226 | |
3227 | static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, |
3228 | struct xhci_container_ctx *in_ctx, |
3229 | struct xhci_container_ctx *out_ctx, |
3230 | struct xhci_input_control_ctx *ctrl_ctx, |
3231 | u32 add_flags, u32 drop_flags) |
3232 | { |
3233 | ctrl_ctx->add_flags = cpu_to_le32(add_flags); |
3234 | ctrl_ctx->drop_flags = cpu_to_le32(drop_flags); |
3235 | xhci_slot_copy(xhci, in_ctx, out_ctx); |
3236 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
3237 | } |
3238 | |
3239 | static void xhci_endpoint_disable(struct usb_hcd *hcd, |
3240 | struct usb_host_endpoint *host_ep) |
3241 | { |
3242 | struct xhci_hcd *xhci; |
3243 | struct xhci_virt_device *vdev; |
3244 | struct xhci_virt_ep *ep; |
3245 | struct usb_device *udev; |
3246 | unsigned long flags; |
3247 | unsigned int ep_index; |
3248 | |
3249 | xhci = hcd_to_xhci(hcd); |
3250 | rescan: |
3251 | spin_lock_irqsave(&xhci->lock, flags); |
3252 | |
3253 | udev = (struct usb_device *)host_ep->hcpriv; |
3254 | if (!udev || !udev->slot_id) |
3255 | goto done; |
3256 | |
3257 | vdev = xhci->devs[udev->slot_id]; |
3258 | if (!vdev) |
3259 | goto done; |
3260 | |
3261 | ep_index = xhci_get_endpoint_index(&host_ep->desc); |
3262 | ep = &vdev->eps[ep_index]; |
3263 | |
3264 | /* wait for hub_tt_work to finish clearing hub TT */ |
3265 | if (ep->ep_state & EP_CLEARING_TT) { |
3266 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3267 | schedule_timeout_uninterruptible(timeout: 1); |
3268 | goto rescan; |
3269 | } |
3270 | |
3271 | if (ep->ep_state) |
3272 | xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n", |
3273 | ep->ep_state); |
3274 | done: |
3275 | host_ep->hcpriv = NULL; |
3276 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3277 | } |
3278 | |
3279 | /* |
3280 | * Called after usb core issues a clear halt control message. |
3281 | * The host side of the halt should already be cleared by a reset endpoint |
3282 | * command issued when the STALL event was received. |
3283 | * |
3284 | * The reset endpoint command may only be issued to endpoints in the halted |
3285 | * state. For software that wishes to reset the data toggle or sequence number |
3286 | * of an endpoint that isn't in the halted state this function will issue a |
3287 | * configure endpoint command with the Drop and Add bits set for the target |
3288 | * endpoint. Refer to the additional note in xhci spcification section 4.6.8. |
3289 | * |
3290 | * vdev may be lost due to xHC restore error and re-initialization during S3/S4 |
3291 | * resume. A new vdev will be allocated later by xhci_discover_or_reset_device() |
3292 | */ |
3293 | |
3294 | static void xhci_endpoint_reset(struct usb_hcd *hcd, |
3295 | struct usb_host_endpoint *host_ep) |
3296 | { |
3297 | struct xhci_hcd *xhci; |
3298 | struct usb_device *udev; |
3299 | struct xhci_virt_device *vdev; |
3300 | struct xhci_virt_ep *ep; |
3301 | struct xhci_input_control_ctx *ctrl_ctx; |
3302 | struct xhci_command *stop_cmd, *cfg_cmd; |
3303 | unsigned int ep_index; |
3304 | unsigned long flags; |
3305 | u32 ep_flag; |
3306 | int err; |
3307 | |
3308 | xhci = hcd_to_xhci(hcd); |
3309 | ep_index = xhci_get_endpoint_index(&host_ep->desc); |
3310 | |
3311 | /* |
3312 | * Usb core assumes a max packet value for ep0 on FS devices until the |
3313 | * real value is read from the descriptor. Core resets Ep0 if values |
3314 | * mismatch. Reconfigure the xhci ep0 endpoint context here in that case |
3315 | */ |
3316 | if (usb_endpoint_xfer_control(epd: &host_ep->desc) && ep_index == 0) { |
3317 | |
3318 | udev = container_of(host_ep, struct usb_device, ep0); |
3319 | if (udev->speed != USB_SPEED_FULL || !udev->slot_id) |
3320 | return; |
3321 | |
3322 | vdev = xhci->devs[udev->slot_id]; |
3323 | if (!vdev || vdev->udev != udev) |
3324 | return; |
3325 | |
3326 | xhci_check_ep0_maxpacket(xhci, vdev); |
3327 | |
3328 | /* Nothing else should be done here for ep0 during ep reset */ |
3329 | return; |
3330 | } |
3331 | |
3332 | if (!host_ep->hcpriv) |
3333 | return; |
3334 | udev = (struct usb_device *) host_ep->hcpriv; |
3335 | vdev = xhci->devs[udev->slot_id]; |
3336 | |
3337 | if (!udev->slot_id || !vdev) |
3338 | return; |
3339 | |
3340 | ep = &vdev->eps[ep_index]; |
3341 | |
3342 | /* Bail out if toggle is already being cleared by a endpoint reset */ |
3343 | spin_lock_irqsave(&xhci->lock, flags); |
3344 | if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) { |
3345 | ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE; |
3346 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3347 | return; |
3348 | } |
3349 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3350 | /* Only interrupt and bulk ep's use data toggle, USB2 spec 5.5.4-> */ |
3351 | if (usb_endpoint_xfer_control(epd: &host_ep->desc) || |
3352 | usb_endpoint_xfer_isoc(epd: &host_ep->desc)) |
3353 | return; |
3354 | |
3355 | ep_flag = xhci_get_endpoint_flag(desc: &host_ep->desc); |
3356 | |
3357 | if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG) |
3358 | return; |
3359 | |
3360 | stop_cmd = xhci_alloc_command(xhci, allocate_completion: true, GFP_NOWAIT); |
3361 | if (!stop_cmd) |
3362 | return; |
3363 | |
3364 | cfg_cmd = xhci_alloc_command_with_ctx(xhci, allocate_completion: true, GFP_NOWAIT); |
3365 | if (!cfg_cmd) |
3366 | goto cleanup; |
3367 | |
3368 | spin_lock_irqsave(&xhci->lock, flags); |
3369 | |
3370 | /* block queuing new trbs and ringing ep doorbell */ |
3371 | ep->ep_state |= EP_SOFT_CLEAR_TOGGLE; |
3372 | |
3373 | /* |
3374 | * Make sure endpoint ring is empty before resetting the toggle/seq. |
3375 | * Driver is required to synchronously cancel all transfer request. |
3376 | * Stop the endpoint to force xHC to update the output context |
3377 | */ |
3378 | |
3379 | if (!list_empty(head: &ep->ring->td_list)) { |
3380 | dev_err(&udev->dev, "EP not empty, refuse reset\n"); |
3381 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3382 | xhci_free_command(xhci, command: cfg_cmd); |
3383 | goto cleanup; |
3384 | } |
3385 | |
3386 | err = xhci_queue_stop_endpoint(xhci, cmd: stop_cmd, slot_id: udev->slot_id, |
3387 | ep_index, suspend: 0); |
3388 | if (err < 0) { |
3389 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3390 | xhci_free_command(xhci, command: cfg_cmd); |
3391 | xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ", |
3392 | __func__, err); |
3393 | goto cleanup; |
3394 | } |
3395 | |
3396 | xhci_ring_cmd_db(xhci); |
3397 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3398 | |
3399 | wait_for_completion(stop_cmd->completion); |
3400 | |
3401 | spin_lock_irqsave(&xhci->lock, flags); |
3402 | |
3403 | /* config ep command clears toggle if add and drop ep flags are set */ |
3404 | ctrl_ctx = xhci_get_input_control_ctx(ctx: cfg_cmd->in_ctx); |
3405 | if (!ctrl_ctx) { |
3406 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3407 | xhci_free_command(xhci, command: cfg_cmd); |
3408 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", |
3409 | __func__); |
3410 | goto cleanup; |
3411 | } |
3412 | |
3413 | xhci_setup_input_ctx_for_config_ep(xhci, in_ctx: cfg_cmd->in_ctx, out_ctx: vdev->out_ctx, |
3414 | ctrl_ctx, add_flags: ep_flag, drop_flags: ep_flag); |
3415 | xhci_endpoint_copy(xhci, in_ctx: cfg_cmd->in_ctx, out_ctx: vdev->out_ctx, ep_index); |
3416 | |
3417 | err = xhci_queue_configure_endpoint(xhci, cmd: cfg_cmd, in_ctx_ptr: cfg_cmd->in_ctx->dma, |
3418 | slot_id: udev->slot_id, command_must_succeed: false); |
3419 | if (err < 0) { |
3420 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3421 | xhci_free_command(xhci, command: cfg_cmd); |
3422 | xhci_dbg(xhci, "%s: Failed to queue config ep command, %d ", |
3423 | __func__, err); |
3424 | goto cleanup; |
3425 | } |
3426 | |
3427 | xhci_ring_cmd_db(xhci); |
3428 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3429 | |
3430 | wait_for_completion(cfg_cmd->completion); |
3431 | |
3432 | xhci_free_command(xhci, command: cfg_cmd); |
3433 | cleanup: |
3434 | xhci_free_command(xhci, command: stop_cmd); |
3435 | spin_lock_irqsave(&xhci->lock, flags); |
3436 | if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE) |
3437 | ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE; |
3438 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3439 | } |
3440 | |
3441 | static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, |
3442 | struct usb_device *udev, struct usb_host_endpoint *ep, |
3443 | unsigned int slot_id) |
3444 | { |
3445 | int ret; |
3446 | unsigned int ep_index; |
3447 | unsigned int ep_state; |
3448 | |
3449 | if (!ep) |
3450 | return -EINVAL; |
3451 | ret = xhci_check_args(hcd: xhci_to_hcd(xhci), udev, ep, check_ep: 1, check_virt_dev: true, func: __func__); |
3452 | if (ret <= 0) |
3453 | return ret ? ret : -EINVAL; |
3454 | if (usb_ss_max_streams(comp: &ep->ss_ep_comp) == 0) { |
3455 | xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" |
3456 | " descriptor for ep 0x%x does not support streams\n", |
3457 | ep->desc.bEndpointAddress); |
3458 | return -EINVAL; |
3459 | } |
3460 | |
3461 | ep_index = xhci_get_endpoint_index(&ep->desc); |
3462 | ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; |
3463 | if (ep_state & EP_HAS_STREAMS || |
3464 | ep_state & EP_GETTING_STREAMS) { |
3465 | xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x " |
3466 | "already has streams set up.\n", |
3467 | ep->desc.bEndpointAddress); |
3468 | xhci_warn(xhci, "Send email to xHCI maintainer and ask for " |
3469 | "dynamic stream context array reallocation.\n"); |
3470 | return -EINVAL; |
3471 | } |
3472 | if (!list_empty(head: &xhci->devs[slot_id]->eps[ep_index].ring->td_list)) { |
3473 | xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk " |
3474 | "endpoint 0x%x; URBs are pending.\n", |
3475 | ep->desc.bEndpointAddress); |
3476 | return -EINVAL; |
3477 | } |
3478 | return 0; |
3479 | } |
3480 | |
3481 | static void xhci_calculate_streams_entries(struct xhci_hcd *xhci, |
3482 | unsigned int *num_streams, unsigned int *num_stream_ctxs) |
3483 | { |
3484 | unsigned int max_streams; |
3485 | |
3486 | /* The stream context array size must be a power of two */ |
3487 | *num_stream_ctxs = roundup_pow_of_two(*num_streams); |
3488 | /* |
3489 | * Find out how many primary stream array entries the host controller |
3490 | * supports. Later we may use secondary stream arrays (similar to 2nd |
3491 | * level page entries), but that's an optional feature for xHCI host |
3492 | * controllers. xHCs must support at least 4 stream IDs. |
3493 | */ |
3494 | max_streams = HCC_MAX_PSA(xhci->hcc_params); |
3495 | if (*num_stream_ctxs > max_streams) { |
3496 | xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n", |
3497 | max_streams); |
3498 | *num_stream_ctxs = max_streams; |
3499 | *num_streams = max_streams; |
3500 | } |
3501 | } |
3502 | |
3503 | /* Returns an error code if one of the endpoint already has streams. |
3504 | * This does not change any data structures, it only checks and gathers |
3505 | * information. |
3506 | */ |
3507 | static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci, |
3508 | struct usb_device *udev, |
3509 | struct usb_host_endpoint **eps, unsigned int num_eps, |
3510 | unsigned int *num_streams, u32 *changed_ep_bitmask) |
3511 | { |
3512 | unsigned int max_streams; |
3513 | unsigned int endpoint_flag; |
3514 | int i; |
3515 | int ret; |
3516 | |
3517 | for (i = 0; i < num_eps; i++) { |
3518 | ret = xhci_check_streams_endpoint(xhci, udev, |
3519 | ep: eps[i], slot_id: udev->slot_id); |
3520 | if (ret < 0) |
3521 | return ret; |
3522 | |
3523 | max_streams = usb_ss_max_streams(comp: &eps[i]->ss_ep_comp); |
3524 | if (max_streams < (*num_streams - 1)) { |
3525 | xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n", |
3526 | eps[i]->desc.bEndpointAddress, |
3527 | max_streams); |
3528 | *num_streams = max_streams+1; |
3529 | } |
3530 | |
3531 | endpoint_flag = xhci_get_endpoint_flag(desc: &eps[i]->desc); |
3532 | if (*changed_ep_bitmask & endpoint_flag) |
3533 | return -EINVAL; |
3534 | *changed_ep_bitmask |= endpoint_flag; |
3535 | } |
3536 | return 0; |
3537 | } |
3538 | |
3539 | static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci, |
3540 | struct usb_device *udev, |
3541 | struct usb_host_endpoint **eps, unsigned int num_eps) |
3542 | { |
3543 | u32 changed_ep_bitmask = 0; |
3544 | unsigned int slot_id; |
3545 | unsigned int ep_index; |
3546 | unsigned int ep_state; |
3547 | int i; |
3548 | |
3549 | slot_id = udev->slot_id; |
3550 | if (!xhci->devs[slot_id]) |
3551 | return 0; |
3552 | |
3553 | for (i = 0; i < num_eps; i++) { |
3554 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
3555 | ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; |
3556 | /* Are streams already being freed for the endpoint? */ |
3557 | if (ep_state & EP_GETTING_NO_STREAMS) { |
3558 | xhci_warn(xhci, "WARN Can't disable streams for " |
3559 | "endpoint 0x%x, " |
3560 | "streams are being disabled already\n", |
3561 | eps[i]->desc.bEndpointAddress); |
3562 | return 0; |
3563 | } |
3564 | /* Are there actually any streams to free? */ |
3565 | if (!(ep_state & EP_HAS_STREAMS) && |
3566 | !(ep_state & EP_GETTING_STREAMS)) { |
3567 | xhci_warn(xhci, "WARN Can't disable streams for " |
3568 | "endpoint 0x%x, " |
3569 | "streams are already disabled!\n", |
3570 | eps[i]->desc.bEndpointAddress); |
3571 | xhci_warn(xhci, "WARN xhci_free_streams() called " |
3572 | "with non-streams endpoint\n"); |
3573 | return 0; |
3574 | } |
3575 | changed_ep_bitmask |= xhci_get_endpoint_flag(desc: &eps[i]->desc); |
3576 | } |
3577 | return changed_ep_bitmask; |
3578 | } |
3579 | |
3580 | /* |
3581 | * The USB device drivers use this function (through the HCD interface in USB |
3582 | * core) to prepare a set of bulk endpoints to use streams. Streams are used to |
3583 | * coordinate mass storage command queueing across multiple endpoints (basically |
3584 | * a stream ID == a task ID). |
3585 | * |
3586 | * Setting up streams involves allocating the same size stream context array |
3587 | * for each endpoint and issuing a configure endpoint command for all endpoints. |
3588 | * |
3589 | * Don't allow the call to succeed if one endpoint only supports one stream |
3590 | * (which means it doesn't support streams at all). |
3591 | * |
3592 | * Drivers may get less stream IDs than they asked for, if the host controller |
3593 | * hardware or endpoints claim they can't support the number of requested |
3594 | * stream IDs. |
3595 | */ |
3596 | static int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev, |
3597 | struct usb_host_endpoint **eps, unsigned int num_eps, |
3598 | unsigned int num_streams, gfp_t mem_flags) |
3599 | { |
3600 | int i, ret; |
3601 | struct xhci_hcd *xhci; |
3602 | struct xhci_virt_device *vdev; |
3603 | struct xhci_command *config_cmd; |
3604 | struct xhci_input_control_ctx *ctrl_ctx; |
3605 | unsigned int ep_index; |
3606 | unsigned int num_stream_ctxs; |
3607 | unsigned int max_packet; |
3608 | unsigned long flags; |
3609 | u32 changed_ep_bitmask = 0; |
3610 | |
3611 | if (!eps) |
3612 | return -EINVAL; |
3613 | |
3614 | /* Add one to the number of streams requested to account for |
3615 | * stream 0 that is reserved for xHCI usage. |
3616 | */ |
3617 | num_streams += 1; |
3618 | xhci = hcd_to_xhci(hcd); |
3619 | xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n", |
3620 | num_streams); |
3621 | |
3622 | /* MaxPSASize value 0 (2 streams) means streams are not supported */ |
3623 | if ((xhci->quirks & XHCI_BROKEN_STREAMS) || |
3624 | HCC_MAX_PSA(xhci->hcc_params) < 4) { |
3625 | xhci_dbg(xhci, "xHCI controller does not support streams.\n"); |
3626 | return -ENOSYS; |
3627 | } |
3628 | |
3629 | config_cmd = xhci_alloc_command_with_ctx(xhci, allocate_completion: true, mem_flags); |
3630 | if (!config_cmd) |
3631 | return -ENOMEM; |
3632 | |
3633 | ctrl_ctx = xhci_get_input_control_ctx(ctx: config_cmd->in_ctx); |
3634 | if (!ctrl_ctx) { |
3635 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", |
3636 | __func__); |
3637 | xhci_free_command(xhci, command: config_cmd); |
3638 | return -ENOMEM; |
3639 | } |
3640 | |
3641 | /* Check to make sure all endpoints are not already configured for |
3642 | * streams. While we're at it, find the maximum number of streams that |
3643 | * all the endpoints will support and check for duplicate endpoints. |
3644 | */ |
3645 | spin_lock_irqsave(&xhci->lock, flags); |
3646 | ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps, |
3647 | num_eps, num_streams: &num_streams, changed_ep_bitmask: &changed_ep_bitmask); |
3648 | if (ret < 0) { |
3649 | xhci_free_command(xhci, command: config_cmd); |
3650 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3651 | return ret; |
3652 | } |
3653 | if (num_streams <= 1) { |
3654 | xhci_warn(xhci, "WARN: endpoints can't handle " |
3655 | "more than one stream.\n"); |
3656 | xhci_free_command(xhci, command: config_cmd); |
3657 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3658 | return -EINVAL; |
3659 | } |
3660 | vdev = xhci->devs[udev->slot_id]; |
3661 | /* Mark each endpoint as being in transition, so |
3662 | * xhci_urb_enqueue() will reject all URBs. |
3663 | */ |
3664 | for (i = 0; i < num_eps; i++) { |
3665 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
3666 | vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS; |
3667 | } |
3668 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3669 | |
3670 | /* Setup internal data structures and allocate HW data structures for |
3671 | * streams (but don't install the HW structures in the input context |
3672 | * until we're sure all memory allocation succeeded). |
3673 | */ |
3674 | xhci_calculate_streams_entries(xhci, num_streams: &num_streams, num_stream_ctxs: &num_stream_ctxs); |
3675 | xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n", |
3676 | num_stream_ctxs, num_streams); |
3677 | |
3678 | for (i = 0; i < num_eps; i++) { |
3679 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
3680 | max_packet = usb_endpoint_maxp(epd: &eps[i]->desc); |
3681 | vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci, |
3682 | num_stream_ctxs, |
3683 | num_streams, |
3684 | max_packet, flags: mem_flags); |
3685 | if (!vdev->eps[ep_index].stream_info) |
3686 | goto cleanup; |
3687 | /* Set maxPstreams in endpoint context and update deq ptr to |
3688 | * point to stream context array. FIXME |
3689 | */ |
3690 | } |
3691 | |
3692 | /* Set up the input context for a configure endpoint command. */ |
3693 | for (i = 0; i < num_eps; i++) { |
3694 | struct xhci_ep_ctx *ep_ctx; |
3695 | |
3696 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
3697 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: config_cmd->in_ctx, ep_index); |
3698 | |
3699 | xhci_endpoint_copy(xhci, in_ctx: config_cmd->in_ctx, |
3700 | out_ctx: vdev->out_ctx, ep_index); |
3701 | xhci_setup_streams_ep_input_ctx(xhci, ep_ctx, |
3702 | stream_info: vdev->eps[ep_index].stream_info); |
3703 | } |
3704 | /* Tell the HW to drop its old copy of the endpoint context info |
3705 | * and add the updated copy from the input context. |
3706 | */ |
3707 | xhci_setup_input_ctx_for_config_ep(xhci, in_ctx: config_cmd->in_ctx, |
3708 | out_ctx: vdev->out_ctx, ctrl_ctx, |
3709 | add_flags: changed_ep_bitmask, drop_flags: changed_ep_bitmask); |
3710 | |
3711 | /* Issue and wait for the configure endpoint command */ |
3712 | ret = xhci_configure_endpoint(xhci, udev, command: config_cmd, |
3713 | ctx_change: false, must_succeed: false); |
3714 | |
3715 | /* xHC rejected the configure endpoint command for some reason, so we |
3716 | * leave the old ring intact and free our internal streams data |
3717 | * structure. |
3718 | */ |
3719 | if (ret < 0) |
3720 | goto cleanup; |
3721 | |
3722 | spin_lock_irqsave(&xhci->lock, flags); |
3723 | for (i = 0; i < num_eps; i++) { |
3724 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
3725 | vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; |
3726 | xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n", |
3727 | udev->slot_id, ep_index); |
3728 | vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS; |
3729 | } |
3730 | xhci_free_command(xhci, command: config_cmd); |
3731 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3732 | |
3733 | for (i = 0; i < num_eps; i++) { |
3734 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
3735 | xhci_debugfs_create_stream_files(xhci, virt_dev: vdev, ep_index); |
3736 | } |
3737 | /* Subtract 1 for stream 0, which drivers can't use */ |
3738 | return num_streams - 1; |
3739 | |
3740 | cleanup: |
3741 | /* If it didn't work, free the streams! */ |
3742 | for (i = 0; i < num_eps; i++) { |
3743 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
3744 | xhci_free_stream_info(xhci, stream_info: vdev->eps[ep_index].stream_info); |
3745 | vdev->eps[ep_index].stream_info = NULL; |
3746 | /* FIXME Unset maxPstreams in endpoint context and |
3747 | * update deq ptr to point to normal string ring. |
3748 | */ |
3749 | vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; |
3750 | vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; |
3751 | xhci_endpoint_zero(xhci, virt_dev: vdev, ep: eps[i]); |
3752 | } |
3753 | xhci_free_command(xhci, command: config_cmd); |
3754 | return -ENOMEM; |
3755 | } |
3756 | |
3757 | /* Transition the endpoint from using streams to being a "normal" endpoint |
3758 | * without streams. |
3759 | * |
3760 | * Modify the endpoint context state, submit a configure endpoint command, |
3761 | * and free all endpoint rings for streams if that completes successfully. |
3762 | */ |
3763 | static int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev, |
3764 | struct usb_host_endpoint **eps, unsigned int num_eps, |
3765 | gfp_t mem_flags) |
3766 | { |
3767 | int i, ret; |
3768 | struct xhci_hcd *xhci; |
3769 | struct xhci_virt_device *vdev; |
3770 | struct xhci_command *command; |
3771 | struct xhci_input_control_ctx *ctrl_ctx; |
3772 | unsigned int ep_index; |
3773 | unsigned long flags; |
3774 | u32 changed_ep_bitmask; |
3775 | |
3776 | xhci = hcd_to_xhci(hcd); |
3777 | vdev = xhci->devs[udev->slot_id]; |
3778 | |
3779 | /* Set up a configure endpoint command to remove the streams rings */ |
3780 | spin_lock_irqsave(&xhci->lock, flags); |
3781 | changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci, |
3782 | udev, eps, num_eps); |
3783 | if (changed_ep_bitmask == 0) { |
3784 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3785 | return -EINVAL; |
3786 | } |
3787 | |
3788 | /* Use the xhci_command structure from the first endpoint. We may have |
3789 | * allocated too many, but the driver may call xhci_free_streams() for |
3790 | * each endpoint it grouped into one call to xhci_alloc_streams(). |
3791 | */ |
3792 | ep_index = xhci_get_endpoint_index(&eps[0]->desc); |
3793 | command = vdev->eps[ep_index].stream_info->free_streams_command; |
3794 | ctrl_ctx = xhci_get_input_control_ctx(ctx: command->in_ctx); |
3795 | if (!ctrl_ctx) { |
3796 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3797 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", |
3798 | __func__); |
3799 | return -EINVAL; |
3800 | } |
3801 | |
3802 | for (i = 0; i < num_eps; i++) { |
3803 | struct xhci_ep_ctx *ep_ctx; |
3804 | |
3805 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
3806 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: command->in_ctx, ep_index); |
3807 | xhci->devs[udev->slot_id]->eps[ep_index].ep_state |= |
3808 | EP_GETTING_NO_STREAMS; |
3809 | |
3810 | xhci_endpoint_copy(xhci, in_ctx: command->in_ctx, |
3811 | out_ctx: vdev->out_ctx, ep_index); |
3812 | xhci_setup_no_streams_ep_input_ctx(ep_ctx, |
3813 | ep: &vdev->eps[ep_index]); |
3814 | } |
3815 | xhci_setup_input_ctx_for_config_ep(xhci, in_ctx: command->in_ctx, |
3816 | out_ctx: vdev->out_ctx, ctrl_ctx, |
3817 | add_flags: changed_ep_bitmask, drop_flags: changed_ep_bitmask); |
3818 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3819 | |
3820 | /* Issue and wait for the configure endpoint command, |
3821 | * which must succeed. |
3822 | */ |
3823 | ret = xhci_configure_endpoint(xhci, udev, command, |
3824 | ctx_change: false, must_succeed: true); |
3825 | |
3826 | /* xHC rejected the configure endpoint command for some reason, so we |
3827 | * leave the streams rings intact. |
3828 | */ |
3829 | if (ret < 0) |
3830 | return ret; |
3831 | |
3832 | spin_lock_irqsave(&xhci->lock, flags); |
3833 | for (i = 0; i < num_eps; i++) { |
3834 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
3835 | xhci_free_stream_info(xhci, stream_info: vdev->eps[ep_index].stream_info); |
3836 | vdev->eps[ep_index].stream_info = NULL; |
3837 | /* FIXME Unset maxPstreams in endpoint context and |
3838 | * update deq ptr to point to normal string ring. |
3839 | */ |
3840 | vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS; |
3841 | vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; |
3842 | } |
3843 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3844 | |
3845 | return 0; |
3846 | } |
3847 | |
3848 | /* |
3849 | * Deletes endpoint resources for endpoints that were active before a Reset |
3850 | * Device command, or a Disable Slot command. The Reset Device command leaves |
3851 | * the control endpoint intact, whereas the Disable Slot command deletes it. |
3852 | * |
3853 | * Must be called with xhci->lock held. |
3854 | */ |
3855 | void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci, |
3856 | struct xhci_virt_device *virt_dev, bool drop_control_ep) |
3857 | { |
3858 | int i; |
3859 | unsigned int num_dropped_eps = 0; |
3860 | unsigned int drop_flags = 0; |
3861 | |
3862 | for (i = (drop_control_ep ? 0 : 1); i < 31; i++) { |
3863 | if (virt_dev->eps[i].ring) { |
3864 | drop_flags |= 1 << i; |
3865 | num_dropped_eps++; |
3866 | } |
3867 | } |
3868 | xhci->num_active_eps -= num_dropped_eps; |
3869 | if (num_dropped_eps) |
3870 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
3871 | fmt: "Dropped %u ep ctxs, flags = 0x%x, " |
3872 | "%u now active.", |
3873 | num_dropped_eps, drop_flags, |
3874 | xhci->num_active_eps); |
3875 | } |
3876 | |
3877 | static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev); |
3878 | |
3879 | /* |
3880 | * This submits a Reset Device Command, which will set the device state to 0, |
3881 | * set the device address to 0, and disable all the endpoints except the default |
3882 | * control endpoint. The USB core should come back and call |
3883 | * xhci_address_device(), and then re-set up the configuration. If this is |
3884 | * called because of a usb_reset_and_verify_device(), then the old alternate |
3885 | * settings will be re-installed through the normal bandwidth allocation |
3886 | * functions. |
3887 | * |
3888 | * Wait for the Reset Device command to finish. Remove all structures |
3889 | * associated with the endpoints that were disabled. Clear the input device |
3890 | * structure? Reset the control endpoint 0 max packet size? |
3891 | * |
3892 | * If the virt_dev to be reset does not exist or does not match the udev, |
3893 | * it means the device is lost, possibly due to the xHC restore error and |
3894 | * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to |
3895 | * re-allocate the device. |
3896 | */ |
3897 | static int xhci_discover_or_reset_device(struct usb_hcd *hcd, |
3898 | struct usb_device *udev) |
3899 | { |
3900 | int ret, i; |
3901 | unsigned long flags; |
3902 | struct xhci_hcd *xhci; |
3903 | unsigned int slot_id; |
3904 | struct xhci_virt_device *virt_dev; |
3905 | struct xhci_command *reset_device_cmd; |
3906 | struct xhci_slot_ctx *slot_ctx; |
3907 | int old_active_eps = 0; |
3908 | |
3909 | ret = xhci_check_args(hcd, udev, NULL, check_ep: 0, check_virt_dev: false, func: __func__); |
3910 | if (ret <= 0) |
3911 | return ret; |
3912 | xhci = hcd_to_xhci(hcd); |
3913 | slot_id = udev->slot_id; |
3914 | virt_dev = xhci->devs[slot_id]; |
3915 | if (!virt_dev) { |
3916 | xhci_dbg(xhci, "The device to be reset with slot ID %u does " |
3917 | "not exist. Re-allocate the device\n", slot_id); |
3918 | ret = xhci_alloc_dev(hcd, udev); |
3919 | if (ret == 1) |
3920 | return 0; |
3921 | else |
3922 | return -EINVAL; |
3923 | } |
3924 | |
3925 | if (virt_dev->tt_info) |
3926 | old_active_eps = virt_dev->tt_info->active_eps; |
3927 | |
3928 | if (virt_dev->udev != udev) { |
3929 | /* If the virt_dev and the udev does not match, this virt_dev |
3930 | * may belong to another udev. |
3931 | * Re-allocate the device. |
3932 | */ |
3933 | xhci_dbg(xhci, "The device to be reset with slot ID %u does " |
3934 | "not match the udev. Re-allocate the device\n", |
3935 | slot_id); |
3936 | ret = xhci_alloc_dev(hcd, udev); |
3937 | if (ret == 1) |
3938 | return 0; |
3939 | else |
3940 | return -EINVAL; |
3941 | } |
3942 | |
3943 | /* If device is not setup, there is no point in resetting it */ |
3944 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: virt_dev->out_ctx); |
3945 | if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == |
3946 | SLOT_STATE_DISABLED) |
3947 | return 0; |
3948 | |
3949 | if (xhci->quirks & XHCI_ETRON_HOST) { |
3950 | /* |
3951 | * Obtaining a new device slot to inform the xHCI host that |
3952 | * the USB device has been reset. |
3953 | */ |
3954 | ret = xhci_disable_slot(xhci, slot_id: udev->slot_id); |
3955 | xhci_free_virt_device(xhci, slot_id: udev->slot_id); |
3956 | if (!ret) { |
3957 | ret = xhci_alloc_dev(hcd, udev); |
3958 | if (ret == 1) |
3959 | ret = 0; |
3960 | else |
3961 | ret = -EINVAL; |
3962 | } |
3963 | return ret; |
3964 | } |
3965 | |
3966 | trace_xhci_discover_or_reset_device(ctx: slot_ctx); |
3967 | |
3968 | xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); |
3969 | /* Allocate the command structure that holds the struct completion. |
3970 | * Assume we're in process context, since the normal device reset |
3971 | * process has to wait for the device anyway. Storage devices are |
3972 | * reset as part of error handling, so use GFP_NOIO instead of |
3973 | * GFP_KERNEL. |
3974 | */ |
3975 | reset_device_cmd = xhci_alloc_command(xhci, allocate_completion: true, GFP_NOIO); |
3976 | if (!reset_device_cmd) { |
3977 | xhci_dbg(xhci, "Couldn't allocate command structure.\n"); |
3978 | return -ENOMEM; |
3979 | } |
3980 | |
3981 | /* Attempt to submit the Reset Device command to the command ring */ |
3982 | spin_lock_irqsave(&xhci->lock, flags); |
3983 | |
3984 | ret = xhci_queue_reset_device(xhci, cmd: reset_device_cmd, slot_id); |
3985 | if (ret) { |
3986 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); |
3987 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3988 | goto command_cleanup; |
3989 | } |
3990 | xhci_ring_cmd_db(xhci); |
3991 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3992 | |
3993 | /* Wait for the Reset Device command to finish */ |
3994 | wait_for_completion(reset_device_cmd->completion); |
3995 | |
3996 | /* The Reset Device command can't fail, according to the 0.95/0.96 spec, |
3997 | * unless we tried to reset a slot ID that wasn't enabled, |
3998 | * or the device wasn't in the addressed or configured state. |
3999 | */ |
4000 | ret = reset_device_cmd->status; |
4001 | switch (ret) { |
4002 | case COMP_COMMAND_ABORTED: |
4003 | case COMP_COMMAND_RING_STOPPED: |
4004 | xhci_warn(xhci, "Timeout waiting for reset device command\n"); |
4005 | ret = -ETIME; |
4006 | goto command_cleanup; |
4007 | case COMP_SLOT_NOT_ENABLED_ERROR: /* 0.95 completion for bad slot ID */ |
4008 | case COMP_CONTEXT_STATE_ERROR: /* 0.96 completion code for same thing */ |
4009 | xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n", |
4010 | slot_id, |
4011 | xhci_get_slot_state(xhci, virt_dev->out_ctx)); |
4012 | xhci_dbg(xhci, "Not freeing device rings.\n"); |
4013 | /* Don't treat this as an error. May change my mind later. */ |
4014 | ret = 0; |
4015 | goto command_cleanup; |
4016 | case COMP_SUCCESS: |
4017 | xhci_dbg(xhci, "Successful reset device command.\n"); |
4018 | break; |
4019 | default: |
4020 | if (xhci_is_vendor_info_code(xhci, trb_comp_code: ret)) |
4021 | break; |
4022 | xhci_warn(xhci, "Unknown completion code %u for " |
4023 | "reset device command.\n", ret); |
4024 | ret = -EINVAL; |
4025 | goto command_cleanup; |
4026 | } |
4027 | |
4028 | /* Free up host controller endpoint resources */ |
4029 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { |
4030 | spin_lock_irqsave(&xhci->lock, flags); |
4031 | /* Don't delete the default control endpoint resources */ |
4032 | xhci_free_device_endpoint_resources(xhci, virt_dev, drop_control_ep: false); |
4033 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4034 | } |
4035 | |
4036 | /* Everything but endpoint 0 is disabled, so free the rings. */ |
4037 | for (i = 1; i < 31; i++) { |
4038 | struct xhci_virt_ep *ep = &virt_dev->eps[i]; |
4039 | |
4040 | if (ep->ep_state & EP_HAS_STREAMS) { |
4041 | xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n", |
4042 | xhci_get_endpoint_address(i)); |
4043 | xhci_free_stream_info(xhci, stream_info: ep->stream_info); |
4044 | ep->stream_info = NULL; |
4045 | ep->ep_state &= ~EP_HAS_STREAMS; |
4046 | } |
4047 | |
4048 | if (ep->ring) { |
4049 | if (ep->sideband) |
4050 | xhci_sideband_notify_ep_ring_free(sb: ep->sideband, ep_index: i); |
4051 | xhci_debugfs_remove_endpoint(xhci, virt_dev, ep_index: i); |
4052 | xhci_free_endpoint_ring(xhci, virt_dev, ep_index: i); |
4053 | } |
4054 | if (!list_empty(head: &virt_dev->eps[i].bw_endpoint_list)) |
4055 | xhci_drop_ep_from_interval_table(xhci, |
4056 | ep_bw: &virt_dev->eps[i].bw_info, |
4057 | bw_table: virt_dev->bw_table, |
4058 | udev, |
4059 | virt_ep: &virt_dev->eps[i], |
4060 | tt_info: virt_dev->tt_info); |
4061 | xhci_clear_endpoint_bw_info(bw_info: &virt_dev->eps[i].bw_info); |
4062 | } |
4063 | /* If necessary, update the number of active TTs on this root port */ |
4064 | xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); |
4065 | virt_dev->flags = 0; |
4066 | ret = 0; |
4067 | |
4068 | command_cleanup: |
4069 | xhci_free_command(xhci, command: reset_device_cmd); |
4070 | return ret; |
4071 | } |
4072 | |
4073 | /* |
4074 | * At this point, the struct usb_device is about to go away, the device has |
4075 | * disconnected, and all traffic has been stopped and the endpoints have been |
4076 | * disabled. Free any HC data structures associated with that device. |
4077 | */ |
4078 | static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) |
4079 | { |
4080 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
4081 | struct xhci_virt_device *virt_dev; |
4082 | struct xhci_slot_ctx *slot_ctx; |
4083 | unsigned long flags; |
4084 | int i, ret; |
4085 | |
4086 | /* |
4087 | * We called pm_runtime_get_noresume when the device was attached. |
4088 | * Decrement the counter here to allow controller to runtime suspend |
4089 | * if no devices remain. |
4090 | */ |
4091 | if (xhci->quirks & XHCI_RESET_ON_RESUME) |
4092 | pm_runtime_put_noidle(dev: hcd->self.controller); |
4093 | |
4094 | ret = xhci_check_args(hcd, udev, NULL, check_ep: 0, check_virt_dev: true, func: __func__); |
4095 | /* If the host is halted due to driver unload, we still need to free the |
4096 | * device. |
4097 | */ |
4098 | if (ret <= 0 && ret != -ENODEV) |
4099 | return; |
4100 | |
4101 | virt_dev = xhci->devs[udev->slot_id]; |
4102 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: virt_dev->out_ctx); |
4103 | trace_xhci_free_dev(ctx: slot_ctx); |
4104 | |
4105 | /* Stop any wayward timer functions (which may grab the lock) */ |
4106 | for (i = 0; i < 31; i++) |
4107 | virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING; |
4108 | virt_dev->udev = NULL; |
4109 | xhci_disable_slot(xhci, slot_id: udev->slot_id); |
4110 | |
4111 | spin_lock_irqsave(&xhci->lock, flags); |
4112 | xhci_free_virt_device(xhci, slot_id: udev->slot_id); |
4113 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4114 | |
4115 | } |
4116 | |
4117 | int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) |
4118 | { |
4119 | struct xhci_command *command; |
4120 | unsigned long flags; |
4121 | u32 state; |
4122 | int ret; |
4123 | |
4124 | command = xhci_alloc_command(xhci, allocate_completion: true, GFP_KERNEL); |
4125 | if (!command) |
4126 | return -ENOMEM; |
4127 | |
4128 | xhci_debugfs_remove_slot(xhci, slot_id); |
4129 | |
4130 | spin_lock_irqsave(&xhci->lock, flags); |
4131 | /* Don't disable the slot if the host controller is dead. */ |
4132 | state = readl(addr: &xhci->op_regs->status); |
4133 | if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || |
4134 | (xhci->xhc_state & XHCI_STATE_HALTED)) { |
4135 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4136 | kfree(objp: command); |
4137 | return -ENODEV; |
4138 | } |
4139 | |
4140 | ret = xhci_queue_slot_control(xhci, cmd: command, TRB_DISABLE_SLOT, |
4141 | slot_id); |
4142 | if (ret) { |
4143 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4144 | kfree(objp: command); |
4145 | return ret; |
4146 | } |
4147 | xhci_ring_cmd_db(xhci); |
4148 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4149 | |
4150 | wait_for_completion(command->completion); |
4151 | |
4152 | if (command->status != COMP_SUCCESS) |
4153 | xhci_warn(xhci, "Unsuccessful disable slot %u command, status %d\n", |
4154 | slot_id, command->status); |
4155 | |
4156 | xhci_free_command(xhci, command); |
4157 | |
4158 | return 0; |
4159 | } |
4160 | |
4161 | /* |
4162 | * Checks if we have enough host controller resources for the default control |
4163 | * endpoint. |
4164 | * |
4165 | * Must be called with xhci->lock held. |
4166 | */ |
4167 | static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci) |
4168 | { |
4169 | if (xhci->num_active_eps + 1 > xhci->limit_active_eps) { |
4170 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
4171 | fmt: "Not enough ep ctxs: " |
4172 | "%u active, need to add 1, limit is %u.", |
4173 | xhci->num_active_eps, xhci->limit_active_eps); |
4174 | return -ENOMEM; |
4175 | } |
4176 | xhci->num_active_eps += 1; |
4177 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
4178 | fmt: "Adding 1 ep ctx, %u now active.", |
4179 | xhci->num_active_eps); |
4180 | return 0; |
4181 | } |
4182 | |
4183 | |
4184 | /* |
4185 | * Returns 0 if the xHC ran out of device slots, the Enable Slot command |
4186 | * timed out, or allocating memory failed. Returns 1 on success. |
4187 | */ |
4188 | int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) |
4189 | { |
4190 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
4191 | struct xhci_virt_device *vdev; |
4192 | struct xhci_slot_ctx *slot_ctx; |
4193 | unsigned long flags; |
4194 | int ret, slot_id; |
4195 | struct xhci_command *command; |
4196 | |
4197 | command = xhci_alloc_command(xhci, allocate_completion: true, GFP_KERNEL); |
4198 | if (!command) |
4199 | return 0; |
4200 | |
4201 | spin_lock_irqsave(&xhci->lock, flags); |
4202 | ret = xhci_queue_slot_control(xhci, cmd: command, TRB_ENABLE_SLOT, slot_id: 0); |
4203 | if (ret) { |
4204 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4205 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); |
4206 | xhci_free_command(xhci, command); |
4207 | return 0; |
4208 | } |
4209 | xhci_ring_cmd_db(xhci); |
4210 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4211 | |
4212 | wait_for_completion(command->completion); |
4213 | slot_id = command->slot_id; |
4214 | |
4215 | if (!slot_id || command->status != COMP_SUCCESS) { |
4216 | xhci_err(xhci, "Error while assigning device slot ID: %s\n", |
4217 | xhci_trb_comp_code_string(command->status)); |
4218 | xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n", |
4219 | HCS_MAX_SLOTS( |
4220 | readl(&xhci->cap_regs->hcs_params1))); |
4221 | xhci_free_command(xhci, command); |
4222 | return 0; |
4223 | } |
4224 | |
4225 | xhci_free_command(xhci, command); |
4226 | |
4227 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { |
4228 | spin_lock_irqsave(&xhci->lock, flags); |
4229 | ret = xhci_reserve_host_control_ep_resources(xhci); |
4230 | if (ret) { |
4231 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4232 | xhci_warn(xhci, "Not enough host resources, " |
4233 | "active endpoint contexts = %u\n", |
4234 | xhci->num_active_eps); |
4235 | goto disable_slot; |
4236 | } |
4237 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4238 | } |
4239 | /* Use GFP_NOIO, since this function can be called from |
4240 | * xhci_discover_or_reset_device(), which may be called as part of |
4241 | * mass storage driver error handling. |
4242 | */ |
4243 | if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) { |
4244 | xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); |
4245 | goto disable_slot; |
4246 | } |
4247 | vdev = xhci->devs[slot_id]; |
4248 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: vdev->out_ctx); |
4249 | trace_xhci_alloc_dev(ctx: slot_ctx); |
4250 | |
4251 | udev->slot_id = slot_id; |
4252 | |
4253 | xhci_debugfs_create_slot(xhci, slot_id); |
4254 | |
4255 | /* |
4256 | * If resetting upon resume, we can't put the controller into runtime |
4257 | * suspend if there is a device attached. |
4258 | */ |
4259 | if (xhci->quirks & XHCI_RESET_ON_RESUME) |
4260 | pm_runtime_get_noresume(dev: hcd->self.controller); |
4261 | |
4262 | /* Is this a LS or FS device under a HS hub? */ |
4263 | /* Hub or peripherial? */ |
4264 | return 1; |
4265 | |
4266 | disable_slot: |
4267 | xhci_disable_slot(xhci, slot_id: udev->slot_id); |
4268 | xhci_free_virt_device(xhci, slot_id: udev->slot_id); |
4269 | |
4270 | return 0; |
4271 | } |
4272 | |
4273 | /** |
4274 | * xhci_setup_device - issues an Address Device command to assign a unique |
4275 | * USB bus address. |
4276 | * @hcd: USB host controller data structure. |
4277 | * @udev: USB dev structure representing the connected device. |
4278 | * @setup: Enum specifying setup mode: address only or with context. |
4279 | * @timeout_ms: Max wait time (ms) for the command operation to complete. |
4280 | * |
4281 | * Return: 0 if successful; otherwise, negative error code. |
4282 | */ |
4283 | static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, |
4284 | enum xhci_setup_dev setup, unsigned int timeout_ms) |
4285 | { |
4286 | const char *act = setup == SETUP_CONTEXT_ONLY ? "context": "address"; |
4287 | unsigned long flags; |
4288 | struct xhci_virt_device *virt_dev; |
4289 | int ret = 0; |
4290 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
4291 | struct xhci_slot_ctx *slot_ctx; |
4292 | struct xhci_input_control_ctx *ctrl_ctx; |
4293 | u64 temp_64; |
4294 | struct xhci_command *command = NULL; |
4295 | |
4296 | mutex_lock(&xhci->mutex); |
4297 | |
4298 | if (xhci->xhc_state) { /* dying, removing or halted */ |
4299 | ret = -ESHUTDOWN; |
4300 | goto out; |
4301 | } |
4302 | |
4303 | if (!udev->slot_id) { |
4304 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_address, |
4305 | fmt: "Bad Slot ID %d", udev->slot_id); |
4306 | ret = -EINVAL; |
4307 | goto out; |
4308 | } |
4309 | |
4310 | virt_dev = xhci->devs[udev->slot_id]; |
4311 | |
4312 | if (WARN_ON(!virt_dev)) { |
4313 | /* |
4314 | * In plug/unplug torture test with an NEC controller, |
4315 | * a zero-dereference was observed once due to virt_dev = 0. |
4316 | * Print useful debug rather than crash if it is observed again! |
4317 | */ |
4318 | xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n", |
4319 | udev->slot_id); |
4320 | ret = -EINVAL; |
4321 | goto out; |
4322 | } |
4323 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: virt_dev->out_ctx); |
4324 | trace_xhci_setup_device_slot(ctx: slot_ctx); |
4325 | |
4326 | if (setup == SETUP_CONTEXT_ONLY) { |
4327 | if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == |
4328 | SLOT_STATE_DEFAULT) { |
4329 | xhci_dbg(xhci, "Slot already in default state\n"); |
4330 | goto out; |
4331 | } |
4332 | } |
4333 | |
4334 | command = xhci_alloc_command(xhci, allocate_completion: true, GFP_KERNEL); |
4335 | if (!command) { |
4336 | ret = -ENOMEM; |
4337 | goto out; |
4338 | } |
4339 | |
4340 | command->in_ctx = virt_dev->in_ctx; |
4341 | command->timeout_ms = timeout_ms; |
4342 | |
4343 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: virt_dev->in_ctx); |
4344 | ctrl_ctx = xhci_get_input_control_ctx(ctx: virt_dev->in_ctx); |
4345 | if (!ctrl_ctx) { |
4346 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", |
4347 | __func__); |
4348 | ret = -EINVAL; |
4349 | goto out; |
4350 | } |
4351 | /* |
4352 | * If this is the first Set Address since device plug-in or |
4353 | * virt_device realloaction after a resume with an xHCI power loss, |
4354 | * then set up the slot context. |
4355 | */ |
4356 | if (!slot_ctx->dev_info) |
4357 | xhci_setup_addressable_virt_dev(xhci, udev); |
4358 | /* Otherwise, update the control endpoint ring enqueue pointer. */ |
4359 | else |
4360 | xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); |
4361 | ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); |
4362 | ctrl_ctx->drop_flags = 0; |
4363 | |
4364 | trace_xhci_address_ctx(xhci, ctx: virt_dev->in_ctx, |
4365 | le32_to_cpu(slot_ctx->dev_info) >> 27); |
4366 | |
4367 | trace_xhci_address_ctrl_ctx(ctrl_ctx); |
4368 | spin_lock_irqsave(&xhci->lock, flags); |
4369 | trace_xhci_setup_device(vdev: virt_dev); |
4370 | ret = xhci_queue_address_device(xhci, cmd: command, in_ctx_ptr: virt_dev->in_ctx->dma, |
4371 | slot_id: udev->slot_id, setup); |
4372 | if (ret) { |
4373 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4374 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_address, |
4375 | fmt: "FIXME: allocate a command ring segment"); |
4376 | goto out; |
4377 | } |
4378 | xhci_ring_cmd_db(xhci); |
4379 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4380 | |
4381 | /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */ |
4382 | wait_for_completion(command->completion); |
4383 | |
4384 | /* FIXME: From section 4.3.4: "Software shall be responsible for timing |
4385 | * the SetAddress() "recovery interval" required by USB and aborting the |
4386 | * command on a timeout. |
4387 | */ |
4388 | switch (command->status) { |
4389 | case COMP_COMMAND_ABORTED: |
4390 | case COMP_COMMAND_RING_STOPPED: |
4391 | xhci_warn(xhci, "Timeout while waiting for setup device command\n"); |
4392 | ret = -ETIME; |
4393 | break; |
4394 | case COMP_CONTEXT_STATE_ERROR: |
4395 | case COMP_SLOT_NOT_ENABLED_ERROR: |
4396 | xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n", |
4397 | act, udev->slot_id); |
4398 | ret = -EINVAL; |
4399 | break; |
4400 | case COMP_USB_TRANSACTION_ERROR: |
4401 | dev_warn(&udev->dev, "Device not responding to setup %s.\n", act); |
4402 | |
4403 | mutex_unlock(lock: &xhci->mutex); |
4404 | ret = xhci_disable_slot(xhci, slot_id: udev->slot_id); |
4405 | xhci_free_virt_device(xhci, slot_id: udev->slot_id); |
4406 | if (!ret) { |
4407 | if (xhci_alloc_dev(hcd, udev) == 1) |
4408 | xhci_setup_addressable_virt_dev(xhci, udev); |
4409 | } |
4410 | kfree(objp: command->completion); |
4411 | kfree(objp: command); |
4412 | return -EPROTO; |
4413 | case COMP_INCOMPATIBLE_DEVICE_ERROR: |
4414 | dev_warn(&udev->dev, |
4415 | "ERROR: Incompatible device for setup %s command\n", act); |
4416 | ret = -ENODEV; |
4417 | break; |
4418 | case COMP_SUCCESS: |
4419 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_address, |
4420 | fmt: "Successful setup %s command", act); |
4421 | break; |
4422 | default: |
4423 | xhci_err(xhci, |
4424 | "ERROR: unexpected setup %s command completion code 0x%x.\n", |
4425 | act, command->status); |
4426 | trace_xhci_address_ctx(xhci, ctx: virt_dev->out_ctx, ep_num: 1); |
4427 | ret = -EINVAL; |
4428 | break; |
4429 | } |
4430 | if (ret) |
4431 | goto out; |
4432 | temp_64 = xhci_read_64(xhci, regs: &xhci->op_regs->dcbaa_ptr); |
4433 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_address, |
4434 | fmt: "Op regs DCBAA ptr = %#016llx", temp_64); |
4435 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_address, |
4436 | fmt: "Slot ID %d dcbaa entry @%p = %#016llx", |
4437 | udev->slot_id, |
4438 | &xhci->dcbaa->dev_context_ptrs[udev->slot_id], |
4439 | (unsigned long long) |
4440 | le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id])); |
4441 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_address, |
4442 | fmt: "Output Context DMA address = %#08llx", |
4443 | (unsigned long long)virt_dev->out_ctx->dma); |
4444 | trace_xhci_address_ctx(xhci, ctx: virt_dev->in_ctx, |
4445 | le32_to_cpu(slot_ctx->dev_info) >> 27); |
4446 | /* |
4447 | * USB core uses address 1 for the roothubs, so we add one to the |
4448 | * address given back to us by the HC. |
4449 | */ |
4450 | trace_xhci_address_ctx(xhci, ctx: virt_dev->out_ctx, |
4451 | le32_to_cpu(slot_ctx->dev_info) >> 27); |
4452 | /* Zero the input context control for later use */ |
4453 | ctrl_ctx->add_flags = 0; |
4454 | ctrl_ctx->drop_flags = 0; |
4455 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: virt_dev->out_ctx); |
4456 | udev->devaddr = (u8)(le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK); |
4457 | |
4458 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_address, |
4459 | fmt: "Internal device address = %d", |
4460 | le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK); |
4461 | out: |
4462 | mutex_unlock(lock: &xhci->mutex); |
4463 | if (command) { |
4464 | kfree(objp: command->completion); |
4465 | kfree(objp: command); |
4466 | } |
4467 | return ret; |
4468 | } |
4469 | |
4470 | static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev, |
4471 | unsigned int timeout_ms) |
4472 | { |
4473 | return xhci_setup_device(hcd, udev, setup: SETUP_CONTEXT_ADDRESS, timeout_ms); |
4474 | } |
4475 | |
4476 | static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev) |
4477 | { |
4478 | return xhci_setup_device(hcd, udev, setup: SETUP_CONTEXT_ONLY, |
4479 | XHCI_CMD_DEFAULT_TIMEOUT); |
4480 | } |
4481 | |
4482 | /* |
4483 | * Transfer the port index into real index in the HW port status |
4484 | * registers. Caculate offset between the port's PORTSC register |
4485 | * and port status base. Divide the number of per port register |
4486 | * to get the real index. The raw port number bases 1. |
4487 | */ |
4488 | int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1) |
4489 | { |
4490 | struct xhci_hub *rhub; |
4491 | |
4492 | rhub = xhci_get_rhub(hcd); |
4493 | return rhub->ports[port1 - 1]->hw_portnum + 1; |
4494 | } |
4495 | |
4496 | /* |
4497 | * Issue an Evaluate Context command to change the Maximum Exit Latency in the |
4498 | * slot context. If that succeeds, store the new MEL in the xhci_virt_device. |
4499 | */ |
4500 | static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci, |
4501 | struct usb_device *udev, u16 max_exit_latency) |
4502 | { |
4503 | struct xhci_virt_device *virt_dev; |
4504 | struct xhci_command *command; |
4505 | struct xhci_input_control_ctx *ctrl_ctx; |
4506 | struct xhci_slot_ctx *slot_ctx; |
4507 | unsigned long flags; |
4508 | int ret; |
4509 | |
4510 | command = xhci_alloc_command_with_ctx(xhci, allocate_completion: true, GFP_KERNEL); |
4511 | if (!command) |
4512 | return -ENOMEM; |
4513 | |
4514 | spin_lock_irqsave(&xhci->lock, flags); |
4515 | |
4516 | virt_dev = xhci->devs[udev->slot_id]; |
4517 | |
4518 | /* |
4519 | * virt_dev might not exists yet if xHC resumed from hibernate (S4) and |
4520 | * xHC was re-initialized. Exit latency will be set later after |
4521 | * hub_port_finish_reset() is done and xhci->devs[] are re-allocated |
4522 | */ |
4523 | |
4524 | if (!virt_dev || max_exit_latency == virt_dev->current_mel) { |
4525 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4526 | xhci_free_command(xhci, command); |
4527 | return 0; |
4528 | } |
4529 | |
4530 | /* Attempt to issue an Evaluate Context command to change the MEL. */ |
4531 | ctrl_ctx = xhci_get_input_control_ctx(ctx: command->in_ctx); |
4532 | if (!ctrl_ctx) { |
4533 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4534 | xhci_free_command(xhci, command); |
4535 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", |
4536 | __func__); |
4537 | return -ENOMEM; |
4538 | } |
4539 | |
4540 | xhci_slot_copy(xhci, in_ctx: command->in_ctx, out_ctx: virt_dev->out_ctx); |
4541 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4542 | |
4543 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
4544 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: command->in_ctx); |
4545 | slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT)); |
4546 | slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency); |
4547 | slot_ctx->dev_state = 0; |
4548 | |
4549 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_context_change, |
4550 | fmt: "Set up evaluate context for LPM MEL change."); |
4551 | |
4552 | /* Issue and wait for the evaluate context command. */ |
4553 | ret = xhci_configure_endpoint(xhci, udev, command, |
4554 | ctx_change: true, must_succeed: true); |
4555 | |
4556 | if (!ret) { |
4557 | spin_lock_irqsave(&xhci->lock, flags); |
4558 | virt_dev->current_mel = max_exit_latency; |
4559 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4560 | } |
4561 | |
4562 | xhci_free_command(xhci, command); |
4563 | |
4564 | return ret; |
4565 | } |
4566 | |
4567 | #ifdef CONFIG_PM |
4568 | |
4569 | /* BESL to HIRD Encoding array for USB2 LPM */ |
4570 | static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000, |
4571 | 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000}; |
4572 | |
4573 | /* Calculate HIRD/BESL for USB2 PORTPMSC*/ |
4574 | static int xhci_calculate_hird_besl(struct xhci_hcd *xhci, |
4575 | struct usb_device *udev) |
4576 | { |
4577 | int u2del, besl, besl_host; |
4578 | int besl_device = 0; |
4579 | u32 field; |
4580 | |
4581 | u2del = HCS_U2_LATENCY(xhci->hcs_params3); |
4582 | field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); |
4583 | |
4584 | if (field & USB_BESL_SUPPORT) { |
4585 | for (besl_host = 0; besl_host < 16; besl_host++) { |
4586 | if (xhci_besl_encoding[besl_host] >= u2del) |
4587 | break; |
4588 | } |
4589 | /* Use baseline BESL value as default */ |
4590 | if (field & USB_BESL_BASELINE_VALID) |
4591 | besl_device = USB_GET_BESL_BASELINE(field); |
4592 | else if (field & USB_BESL_DEEP_VALID) |
4593 | besl_device = USB_GET_BESL_DEEP(field); |
4594 | } else { |
4595 | if (u2del <= 50) |
4596 | besl_host = 0; |
4597 | else |
4598 | besl_host = (u2del - 51) / 75 + 1; |
4599 | } |
4600 | |
4601 | besl = besl_host + besl_device; |
4602 | if (besl > 15) |
4603 | besl = 15; |
4604 | |
4605 | return besl; |
4606 | } |
4607 | |
4608 | /* Calculate BESLD, L1 timeout and HIRDM for USB2 PORTHLPMC */ |
4609 | static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev) |
4610 | { |
4611 | u32 field; |
4612 | int l1; |
4613 | int besld = 0; |
4614 | int hirdm = 0; |
4615 | |
4616 | field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); |
4617 | |
4618 | /* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */ |
4619 | l1 = udev->l1_params.timeout / 256; |
4620 | |
4621 | /* device has preferred BESLD */ |
4622 | if (field & USB_BESL_DEEP_VALID) { |
4623 | besld = USB_GET_BESL_DEEP(field); |
4624 | hirdm = 1; |
4625 | } |
4626 | |
4627 | return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm); |
4628 | } |
4629 | |
4630 | static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, |
4631 | struct usb_device *udev, int enable) |
4632 | { |
4633 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
4634 | struct xhci_port **ports; |
4635 | __le32 __iomem *pm_addr, *hlpm_addr; |
4636 | u32 pm_val, hlpm_val, field; |
4637 | unsigned int port_num; |
4638 | unsigned long flags; |
4639 | int hird, exit_latency; |
4640 | int ret; |
4641 | |
4642 | if (xhci->quirks & XHCI_HW_LPM_DISABLE) |
4643 | return -EPERM; |
4644 | |
4645 | if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support || |
4646 | !udev->lpm_capable) |
4647 | return -EPERM; |
4648 | |
4649 | if (!udev->parent || udev->parent->parent || |
4650 | udev->descriptor.bDeviceClass == USB_CLASS_HUB) |
4651 | return -EPERM; |
4652 | |
4653 | if (udev->usb2_hw_lpm_capable != 1) |
4654 | return -EPERM; |
4655 | |
4656 | spin_lock_irqsave(&xhci->lock, flags); |
4657 | |
4658 | ports = xhci->usb2_rhub.ports; |
4659 | port_num = udev->portnum - 1; |
4660 | pm_addr = ports[port_num]->addr + PORTPMSC; |
4661 | pm_val = readl(addr: pm_addr); |
4662 | hlpm_addr = ports[port_num]->addr + PORTHLPMC; |
4663 | |
4664 | xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n", |
4665 | str_enable_disable(enable), port_num + 1); |
4666 | |
4667 | if (enable) { |
4668 | /* Host supports BESL timeout instead of HIRD */ |
4669 | if (udev->usb2_hw_lpm_besl_capable) { |
4670 | /* if device doesn't have a preferred BESL value use a |
4671 | * default one which works with mixed HIRD and BESL |
4672 | * systems. See XHCI_DEFAULT_BESL definition in xhci.h |
4673 | */ |
4674 | field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); |
4675 | if ((field & USB_BESL_SUPPORT) && |
4676 | (field & USB_BESL_BASELINE_VALID)) |
4677 | hird = USB_GET_BESL_BASELINE(field); |
4678 | else |
4679 | hird = udev->l1_params.besl; |
4680 | |
4681 | exit_latency = xhci_besl_encoding[hird]; |
4682 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4683 | |
4684 | ret = xhci_change_max_exit_latency(xhci, udev, |
4685 | max_exit_latency: exit_latency); |
4686 | if (ret < 0) |
4687 | return ret; |
4688 | spin_lock_irqsave(&xhci->lock, flags); |
4689 | |
4690 | hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev); |
4691 | writel(val: hlpm_val, addr: hlpm_addr); |
4692 | /* flush write */ |
4693 | readl(addr: hlpm_addr); |
4694 | } else { |
4695 | hird = xhci_calculate_hird_besl(xhci, udev); |
4696 | } |
4697 | |
4698 | pm_val &= ~PORT_HIRD_MASK; |
4699 | pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id); |
4700 | writel(val: pm_val, addr: pm_addr); |
4701 | pm_val = readl(addr: pm_addr); |
4702 | pm_val |= PORT_HLE; |
4703 | writel(val: pm_val, addr: pm_addr); |
4704 | /* flush write */ |
4705 | readl(addr: pm_addr); |
4706 | } else { |
4707 | pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK); |
4708 | writel(val: pm_val, addr: pm_addr); |
4709 | /* flush write */ |
4710 | readl(addr: pm_addr); |
4711 | if (udev->usb2_hw_lpm_besl_capable) { |
4712 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4713 | xhci_change_max_exit_latency(xhci, udev, max_exit_latency: 0); |
4714 | readl_poll_timeout(ports[port_num]->addr, pm_val, |
4715 | (pm_val & PORT_PLS_MASK) == XDEV_U0, |
4716 | 100, 10000); |
4717 | return 0; |
4718 | } |
4719 | } |
4720 | |
4721 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4722 | return 0; |
4723 | } |
4724 | |
4725 | static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) |
4726 | { |
4727 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
4728 | struct xhci_port *port; |
4729 | u32 capability; |
4730 | |
4731 | /* Check if USB3 device at root port is tunneled over USB4 */ |
4732 | if (hcd->speed >= HCD_USB3 && !udev->parent->parent) { |
4733 | port = xhci->usb3_rhub.ports[udev->portnum - 1]; |
4734 | |
4735 | udev->tunnel_mode = xhci_port_is_tunneled(xhci, port); |
4736 | if (udev->tunnel_mode == USB_LINK_UNKNOWN) |
4737 | dev_dbg(&udev->dev, "link tunnel state unknown\n"); |
4738 | else if (udev->tunnel_mode == USB_LINK_TUNNELED) |
4739 | dev_dbg(&udev->dev, "tunneled over USB4 link\n"); |
4740 | else if (udev->tunnel_mode == USB_LINK_NATIVE) |
4741 | dev_dbg(&udev->dev, "native USB 3.x link\n"); |
4742 | return 0; |
4743 | } |
4744 | |
4745 | if (hcd->speed >= HCD_USB3 || !udev->lpm_capable || !xhci->hw_lpm_support) |
4746 | return 0; |
4747 | |
4748 | /* we only support lpm for non-hub device connected to root hub yet */ |
4749 | if (!udev->parent || udev->parent->parent || |
4750 | udev->descriptor.bDeviceClass == USB_CLASS_HUB) |
4751 | return 0; |
4752 | |
4753 | port = xhci->usb2_rhub.ports[udev->portnum - 1]; |
4754 | capability = port->port_cap->protocol_caps; |
4755 | |
4756 | if (capability & XHCI_HLC) { |
4757 | udev->usb2_hw_lpm_capable = 1; |
4758 | udev->l1_params.timeout = XHCI_L1_TIMEOUT; |
4759 | udev->l1_params.besl = XHCI_DEFAULT_BESL; |
4760 | if (capability & XHCI_BLC) |
4761 | udev->usb2_hw_lpm_besl_capable = 1; |
4762 | } |
4763 | |
4764 | return 0; |
4765 | } |
4766 | |
4767 | /*---------------------- USB 3.0 Link PM functions ------------------------*/ |
4768 | |
4769 | /* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */ |
4770 | static unsigned long long xhci_service_interval_to_ns( |
4771 | struct usb_endpoint_descriptor *desc) |
4772 | { |
4773 | return (1ULL << (desc->bInterval - 1)) * 125 * 1000; |
4774 | } |
4775 | |
4776 | static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev, |
4777 | enum usb3_link_state state) |
4778 | { |
4779 | unsigned long long sel; |
4780 | unsigned long long pel; |
4781 | unsigned int max_sel_pel; |
4782 | char *state_name; |
4783 | |
4784 | switch (state) { |
4785 | case USB3_LPM_U1: |
4786 | /* Convert SEL and PEL stored in nanoseconds to microseconds */ |
4787 | sel = DIV_ROUND_UP(udev->u1_params.sel, 1000); |
4788 | pel = DIV_ROUND_UP(udev->u1_params.pel, 1000); |
4789 | max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL; |
4790 | state_name = "U1"; |
4791 | break; |
4792 | case USB3_LPM_U2: |
4793 | sel = DIV_ROUND_UP(udev->u2_params.sel, 1000); |
4794 | pel = DIV_ROUND_UP(udev->u2_params.pel, 1000); |
4795 | max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL; |
4796 | state_name = "U2"; |
4797 | break; |
4798 | default: |
4799 | dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n", |
4800 | __func__); |
4801 | return USB3_LPM_DISABLED; |
4802 | } |
4803 | |
4804 | if (sel <= max_sel_pel && pel <= max_sel_pel) |
4805 | return USB3_LPM_DEVICE_INITIATED; |
4806 | |
4807 | if (sel > max_sel_pel) |
4808 | dev_dbg(&udev->dev, "Device-initiated %s disabled " |
4809 | "due to long SEL %llu ms\n", |
4810 | state_name, sel); |
4811 | else |
4812 | dev_dbg(&udev->dev, "Device-initiated %s disabled " |
4813 | "due to long PEL %llu ms\n", |
4814 | state_name, pel); |
4815 | return USB3_LPM_DISABLED; |
4816 | } |
4817 | |
4818 | /* The U1 timeout should be the maximum of the following values: |
4819 | * - For control endpoints, U1 system exit latency (SEL) * 3 |
4820 | * - For bulk endpoints, U1 SEL * 5 |
4821 | * - For interrupt endpoints: |
4822 | * - Notification EPs, U1 SEL * 3 |
4823 | * - Periodic EPs, max(105% of bInterval, U1 SEL * 2) |
4824 | * - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2) |
4825 | */ |
4826 | static unsigned long long xhci_calculate_intel_u1_timeout( |
4827 | struct usb_device *udev, |
4828 | struct usb_endpoint_descriptor *desc) |
4829 | { |
4830 | unsigned long long timeout_ns; |
4831 | int ep_type; |
4832 | int intr_type; |
4833 | |
4834 | ep_type = usb_endpoint_type(epd: desc); |
4835 | switch (ep_type) { |
4836 | case USB_ENDPOINT_XFER_CONTROL: |
4837 | timeout_ns = udev->u1_params.sel * 3; |
4838 | break; |
4839 | case USB_ENDPOINT_XFER_BULK: |
4840 | timeout_ns = udev->u1_params.sel * 5; |
4841 | break; |
4842 | case USB_ENDPOINT_XFER_INT: |
4843 | intr_type = usb_endpoint_interrupt_type(epd: desc); |
4844 | if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) { |
4845 | timeout_ns = udev->u1_params.sel * 3; |
4846 | break; |
4847 | } |
4848 | /* Otherwise the calculation is the same as isoc eps */ |
4849 | fallthrough; |
4850 | case USB_ENDPOINT_XFER_ISOC: |
4851 | timeout_ns = xhci_service_interval_to_ns(desc); |
4852 | timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100); |
4853 | if (timeout_ns < udev->u1_params.sel * 2) |
4854 | timeout_ns = udev->u1_params.sel * 2; |
4855 | break; |
4856 | default: |
4857 | return 0; |
4858 | } |
4859 | |
4860 | return timeout_ns; |
4861 | } |
4862 | |
4863 | /* Returns the hub-encoded U1 timeout value. */ |
4864 | static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci, |
4865 | struct usb_device *udev, |
4866 | struct usb_endpoint_descriptor *desc) |
4867 | { |
4868 | unsigned long long timeout_ns; |
4869 | |
4870 | /* Prevent U1 if service interval is shorter than U1 exit latency */ |
4871 | if (usb_endpoint_xfer_int(epd: desc) || usb_endpoint_xfer_isoc(epd: desc)) { |
4872 | if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) { |
4873 | dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n"); |
4874 | return USB3_LPM_DISABLED; |
4875 | } |
4876 | } |
4877 | |
4878 | if (xhci->quirks & (XHCI_INTEL_HOST | XHCI_ZHAOXIN_HOST)) |
4879 | timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc); |
4880 | else |
4881 | timeout_ns = udev->u1_params.sel; |
4882 | |
4883 | /* The U1 timeout is encoded in 1us intervals. |
4884 | * Don't return a timeout of zero, because that's USB3_LPM_DISABLED. |
4885 | */ |
4886 | if (timeout_ns == USB3_LPM_DISABLED) |
4887 | timeout_ns = 1; |
4888 | else |
4889 | timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000); |
4890 | |
4891 | /* If the necessary timeout value is bigger than what we can set in the |
4892 | * USB 3.0 hub, we have to disable hub-initiated U1. |
4893 | */ |
4894 | if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT) |
4895 | return timeout_ns; |
4896 | dev_dbg(&udev->dev, "Hub-initiated U1 disabled due to long timeout %lluus\n", |
4897 | timeout_ns); |
4898 | return xhci_get_timeout_no_hub_lpm(udev, state: USB3_LPM_U1); |
4899 | } |
4900 | |
4901 | /* The U2 timeout should be the maximum of: |
4902 | * - 10 ms (to avoid the bandwidth impact on the scheduler) |
4903 | * - largest bInterval of any active periodic endpoint (to avoid going |
4904 | * into lower power link states between intervals). |
4905 | * - the U2 Exit Latency of the device |
4906 | */ |
4907 | static unsigned long long xhci_calculate_intel_u2_timeout( |
4908 | struct usb_device *udev, |
4909 | struct usb_endpoint_descriptor *desc) |
4910 | { |
4911 | unsigned long long timeout_ns; |
4912 | unsigned long long u2_del_ns; |
4913 | |
4914 | timeout_ns = 10 * 1000 * 1000; |
4915 | |
4916 | if ((usb_endpoint_xfer_int(epd: desc) || usb_endpoint_xfer_isoc(epd: desc)) && |
4917 | (xhci_service_interval_to_ns(desc) > timeout_ns)) |
4918 | timeout_ns = xhci_service_interval_to_ns(desc); |
4919 | |
4920 | u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL; |
4921 | if (u2_del_ns > timeout_ns) |
4922 | timeout_ns = u2_del_ns; |
4923 | |
4924 | return timeout_ns; |
4925 | } |
4926 | |
4927 | /* Returns the hub-encoded U2 timeout value. */ |
4928 | static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci, |
4929 | struct usb_device *udev, |
4930 | struct usb_endpoint_descriptor *desc) |
4931 | { |
4932 | unsigned long long timeout_ns; |
4933 | |
4934 | /* Prevent U2 if service interval is shorter than U2 exit latency */ |
4935 | if (usb_endpoint_xfer_int(epd: desc) || usb_endpoint_xfer_isoc(epd: desc)) { |
4936 | if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) { |
4937 | dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n"); |
4938 | return USB3_LPM_DISABLED; |
4939 | } |
4940 | } |
4941 | |
4942 | if (xhci->quirks & (XHCI_INTEL_HOST | XHCI_ZHAOXIN_HOST)) |
4943 | timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc); |
4944 | else |
4945 | timeout_ns = udev->u2_params.sel; |
4946 | |
4947 | /* The U2 timeout is encoded in 256us intervals */ |
4948 | timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000); |
4949 | /* If the necessary timeout value is bigger than what we can set in the |
4950 | * USB 3.0 hub, we have to disable hub-initiated U2. |
4951 | */ |
4952 | if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT) |
4953 | return timeout_ns; |
4954 | dev_dbg(&udev->dev, "Hub-initiated U2 disabled due to long timeout %lluus\n", |
4955 | timeout_ns * 256); |
4956 | return xhci_get_timeout_no_hub_lpm(udev, state: USB3_LPM_U2); |
4957 | } |
4958 | |
4959 | static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci, |
4960 | struct usb_device *udev, |
4961 | struct usb_endpoint_descriptor *desc, |
4962 | enum usb3_link_state state, |
4963 | u16 *timeout) |
4964 | { |
4965 | if (state == USB3_LPM_U1) |
4966 | return xhci_calculate_u1_timeout(xhci, udev, desc); |
4967 | else if (state == USB3_LPM_U2) |
4968 | return xhci_calculate_u2_timeout(xhci, udev, desc); |
4969 | |
4970 | return USB3_LPM_DISABLED; |
4971 | } |
4972 | |
4973 | static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci, |
4974 | struct usb_device *udev, |
4975 | struct usb_endpoint_descriptor *desc, |
4976 | enum usb3_link_state state, |
4977 | u16 *timeout) |
4978 | { |
4979 | u16 alt_timeout; |
4980 | |
4981 | alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev, |
4982 | desc, state, timeout); |
4983 | |
4984 | /* If we found we can't enable hub-initiated LPM, and |
4985 | * the U1 or U2 exit latency was too high to allow |
4986 | * device-initiated LPM as well, then we will disable LPM |
4987 | * for this device, so stop searching any further. |
4988 | */ |
4989 | if (alt_timeout == USB3_LPM_DISABLED) { |
4990 | *timeout = alt_timeout; |
4991 | return -E2BIG; |
4992 | } |
4993 | if (alt_timeout > *timeout) |
4994 | *timeout = alt_timeout; |
4995 | return 0; |
4996 | } |
4997 | |
4998 | static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci, |
4999 | struct usb_device *udev, |
5000 | struct usb_host_interface *alt, |
5001 | enum usb3_link_state state, |
5002 | u16 *timeout) |
5003 | { |
5004 | int j; |
5005 | |
5006 | for (j = 0; j < alt->desc.bNumEndpoints; j++) { |
5007 | if (xhci_update_timeout_for_endpoint(xhci, udev, |
5008 | desc: &alt->endpoint[j].desc, state, timeout)) |
5009 | return -E2BIG; |
5010 | } |
5011 | return 0; |
5012 | } |
5013 | |
5014 | static int xhci_check_tier_policy(struct xhci_hcd *xhci, |
5015 | struct usb_device *udev, |
5016 | enum usb3_link_state state) |
5017 | { |
5018 | struct usb_device *parent = udev->parent; |
5019 | int tier = 1; /* roothub is tier1 */ |
5020 | |
5021 | while (parent) { |
5022 | parent = parent->parent; |
5023 | tier++; |
5024 | } |
5025 | |
5026 | if (xhci->quirks & XHCI_INTEL_HOST && tier > 3) |
5027 | goto fail; |
5028 | if (xhci->quirks & XHCI_ZHAOXIN_HOST && tier > 2) |
5029 | goto fail; |
5030 | |
5031 | return 0; |
5032 | fail: |
5033 | dev_dbg(&udev->dev, "Tier policy prevents U1/U2 LPM states for devices at tier %d\n", |
5034 | tier); |
5035 | return -E2BIG; |
5036 | } |
5037 | |
5038 | /* Returns the U1 or U2 timeout that should be enabled. |
5039 | * If the tier check or timeout setting functions return with a non-zero exit |
5040 | * code, that means the timeout value has been finalized and we shouldn't look |
5041 | * at any more endpoints. |
5042 | */ |
5043 | static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd, |
5044 | struct usb_device *udev, enum usb3_link_state state) |
5045 | { |
5046 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
5047 | struct usb_host_config *config; |
5048 | char *state_name; |
5049 | int i; |
5050 | u16 timeout = USB3_LPM_DISABLED; |
5051 | |
5052 | if (state == USB3_LPM_U1) |
5053 | state_name = "U1"; |
5054 | else if (state == USB3_LPM_U2) |
5055 | state_name = "U2"; |
5056 | else { |
5057 | dev_warn(&udev->dev, "Can't enable unknown link state %i\n", |
5058 | state); |
5059 | return timeout; |
5060 | } |
5061 | |
5062 | /* Gather some information about the currently installed configuration |
5063 | * and alternate interface settings. |
5064 | */ |
5065 | if (xhci_update_timeout_for_endpoint(xhci, udev, desc: &udev->ep0.desc, |
5066 | state, timeout: &timeout)) |
5067 | return timeout; |
5068 | |
5069 | config = udev->actconfig; |
5070 | if (!config) |
5071 | return timeout; |
5072 | |
5073 | for (i = 0; i < config->desc.bNumInterfaces; i++) { |
5074 | struct usb_driver *driver; |
5075 | struct usb_interface *intf = config->interface[i]; |
5076 | |
5077 | if (!intf) |
5078 | continue; |
5079 | |
5080 | /* Check if any currently bound drivers want hub-initiated LPM |
5081 | * disabled. |
5082 | */ |
5083 | if (intf->dev.driver) { |
5084 | driver = to_usb_driver(intf->dev.driver); |
5085 | if (driver && driver->disable_hub_initiated_lpm) { |
5086 | dev_dbg(&udev->dev, "Hub-initiated %s disabled at request of driver %s\n", |
5087 | state_name, driver->name); |
5088 | timeout = xhci_get_timeout_no_hub_lpm(udev, |
5089 | state); |
5090 | if (timeout == USB3_LPM_DISABLED) |
5091 | return timeout; |
5092 | } |
5093 | } |
5094 | |
5095 | /* Not sure how this could happen... */ |
5096 | if (!intf->cur_altsetting) |
5097 | continue; |
5098 | |
5099 | if (xhci_update_timeout_for_interface(xhci, udev, |
5100 | alt: intf->cur_altsetting, |
5101 | state, timeout: &timeout)) |
5102 | return timeout; |
5103 | } |
5104 | return timeout; |
5105 | } |
5106 | |
5107 | static int calculate_max_exit_latency(struct usb_device *udev, |
5108 | enum usb3_link_state state_changed, |
5109 | u16 hub_encoded_timeout) |
5110 | { |
5111 | unsigned long long u1_mel_us = 0; |
5112 | unsigned long long u2_mel_us = 0; |
5113 | unsigned long long mel_us = 0; |
5114 | bool disabling_u1; |
5115 | bool disabling_u2; |
5116 | bool enabling_u1; |
5117 | bool enabling_u2; |
5118 | |
5119 | disabling_u1 = (state_changed == USB3_LPM_U1 && |
5120 | hub_encoded_timeout == USB3_LPM_DISABLED); |
5121 | disabling_u2 = (state_changed == USB3_LPM_U2 && |
5122 | hub_encoded_timeout == USB3_LPM_DISABLED); |
5123 | |
5124 | enabling_u1 = (state_changed == USB3_LPM_U1 && |
5125 | hub_encoded_timeout != USB3_LPM_DISABLED); |
5126 | enabling_u2 = (state_changed == USB3_LPM_U2 && |
5127 | hub_encoded_timeout != USB3_LPM_DISABLED); |
5128 | |
5129 | /* If U1 was already enabled and we're not disabling it, |
5130 | * or we're going to enable U1, account for the U1 max exit latency. |
5131 | */ |
5132 | if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) || |
5133 | enabling_u1) |
5134 | u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000); |
5135 | if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) || |
5136 | enabling_u2) |
5137 | u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000); |
5138 | |
5139 | mel_us = max(u1_mel_us, u2_mel_us); |
5140 | |
5141 | /* xHCI host controller max exit latency field is only 16 bits wide. */ |
5142 | if (mel_us > MAX_EXIT) { |
5143 | dev_warn(&udev->dev, "Link PM max exit latency of %lluus " |
5144 | "is too big.\n", mel_us); |
5145 | return -E2BIG; |
5146 | } |
5147 | return mel_us; |
5148 | } |
5149 | |
5150 | /* Returns the USB3 hub-encoded value for the U1/U2 timeout. */ |
5151 | static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd, |
5152 | struct usb_device *udev, enum usb3_link_state state) |
5153 | { |
5154 | struct xhci_hcd *xhci; |
5155 | struct xhci_port *port; |
5156 | u16 hub_encoded_timeout; |
5157 | int mel; |
5158 | int ret; |
5159 | |
5160 | xhci = hcd_to_xhci(hcd); |
5161 | /* The LPM timeout values are pretty host-controller specific, so don't |
5162 | * enable hub-initiated timeouts unless the vendor has provided |
5163 | * information about their timeout algorithm. |
5164 | */ |
5165 | if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || |
5166 | !xhci->devs[udev->slot_id]) |
5167 | return USB3_LPM_DISABLED; |
5168 | |
5169 | if (xhci_check_tier_policy(xhci, udev, state) < 0) |
5170 | return USB3_LPM_DISABLED; |
5171 | |
5172 | /* If connected to root port then check port can handle lpm */ |
5173 | if (udev->parent && !udev->parent->parent) { |
5174 | port = xhci->usb3_rhub.ports[udev->portnum - 1]; |
5175 | if (port->lpm_incapable) |
5176 | return USB3_LPM_DISABLED; |
5177 | } |
5178 | |
5179 | hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state); |
5180 | mel = calculate_max_exit_latency(udev, state_changed: state, hub_encoded_timeout); |
5181 | if (mel < 0) { |
5182 | /* Max Exit Latency is too big, disable LPM. */ |
5183 | hub_encoded_timeout = USB3_LPM_DISABLED; |
5184 | mel = 0; |
5185 | } |
5186 | |
5187 | ret = xhci_change_max_exit_latency(xhci, udev, max_exit_latency: mel); |
5188 | if (ret) |
5189 | return ret; |
5190 | return hub_encoded_timeout; |
5191 | } |
5192 | |
5193 | static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd, |
5194 | struct usb_device *udev, enum usb3_link_state state) |
5195 | { |
5196 | struct xhci_hcd *xhci; |
5197 | u16 mel; |
5198 | |
5199 | xhci = hcd_to_xhci(hcd); |
5200 | if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || |
5201 | !xhci->devs[udev->slot_id]) |
5202 | return 0; |
5203 | |
5204 | mel = calculate_max_exit_latency(udev, state_changed: state, USB3_LPM_DISABLED); |
5205 | return xhci_change_max_exit_latency(xhci, udev, max_exit_latency: mel); |
5206 | } |
5207 | #else /* CONFIG_PM */ |
5208 | |
5209 | static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, |
5210 | struct usb_device *udev, int enable) |
5211 | { |
5212 | return 0; |
5213 | } |
5214 | |
5215 | static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) |
5216 | { |
5217 | return 0; |
5218 | } |
5219 | |
5220 | static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd, |
5221 | struct usb_device *udev, enum usb3_link_state state) |
5222 | { |
5223 | return USB3_LPM_DISABLED; |
5224 | } |
5225 | |
5226 | static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd, |
5227 | struct usb_device *udev, enum usb3_link_state state) |
5228 | { |
5229 | return 0; |
5230 | } |
5231 | #endif /* CONFIG_PM */ |
5232 | |
5233 | /*-------------------------------------------------------------------------*/ |
5234 | |
5235 | /* Once a hub descriptor is fetched for a device, we need to update the xHC's |
5236 | * internal data structures for the device. |
5237 | */ |
5238 | int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, |
5239 | struct usb_tt *tt, gfp_t mem_flags) |
5240 | { |
5241 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
5242 | struct xhci_virt_device *vdev; |
5243 | struct xhci_command *config_cmd; |
5244 | struct xhci_input_control_ctx *ctrl_ctx; |
5245 | struct xhci_slot_ctx *slot_ctx; |
5246 | unsigned long flags; |
5247 | unsigned think_time; |
5248 | int ret; |
5249 | |
5250 | /* Ignore root hubs */ |
5251 | if (!hdev->parent) |
5252 | return 0; |
5253 | |
5254 | vdev = xhci->devs[hdev->slot_id]; |
5255 | if (!vdev) { |
5256 | xhci_warn(xhci, "Cannot update hub desc for unknown device.\n"); |
5257 | return -EINVAL; |
5258 | } |
5259 | |
5260 | config_cmd = xhci_alloc_command_with_ctx(xhci, allocate_completion: true, mem_flags); |
5261 | if (!config_cmd) |
5262 | return -ENOMEM; |
5263 | |
5264 | ctrl_ctx = xhci_get_input_control_ctx(ctx: config_cmd->in_ctx); |
5265 | if (!ctrl_ctx) { |
5266 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", |
5267 | __func__); |
5268 | xhci_free_command(xhci, command: config_cmd); |
5269 | return -ENOMEM; |
5270 | } |
5271 | |
5272 | spin_lock_irqsave(&xhci->lock, flags); |
5273 | if (hdev->speed == USB_SPEED_HIGH && |
5274 | xhci_alloc_tt_info(xhci, virt_dev: vdev, hdev, tt, GFP_ATOMIC)) { |
5275 | xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n"); |
5276 | xhci_free_command(xhci, command: config_cmd); |
5277 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
5278 | return -ENOMEM; |
5279 | } |
5280 | |
5281 | xhci_slot_copy(xhci, in_ctx: config_cmd->in_ctx, out_ctx: vdev->out_ctx); |
5282 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
5283 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: config_cmd->in_ctx); |
5284 | slot_ctx->dev_info |= cpu_to_le32(DEV_HUB); |
5285 | /* |
5286 | * refer to section 6.2.2: MTT should be 0 for full speed hub, |
5287 | * but it may be already set to 1 when setup an xHCI virtual |
5288 | * device, so clear it anyway. |
5289 | */ |
5290 | if (tt->multi) |
5291 | slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); |
5292 | else if (hdev->speed == USB_SPEED_FULL) |
5293 | slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT); |
5294 | |
5295 | if (xhci->hci_version > 0x95) { |
5296 | xhci_dbg(xhci, "xHCI version %x needs hub " |
5297 | "TT think time and number of ports\n", |
5298 | (unsigned int) xhci->hci_version); |
5299 | slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild)); |
5300 | /* Set TT think time - convert from ns to FS bit times. |
5301 | * 0 = 8 FS bit times, 1 = 16 FS bit times, |
5302 | * 2 = 24 FS bit times, 3 = 32 FS bit times. |
5303 | * |
5304 | * xHCI 1.0: this field shall be 0 if the device is not a |
5305 | * High-spped hub. |
5306 | */ |
5307 | think_time = tt->think_time; |
5308 | if (think_time != 0) |
5309 | think_time = (think_time / 666) - 1; |
5310 | if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH) |
5311 | slot_ctx->tt_info |= |
5312 | cpu_to_le32(TT_THINK_TIME(think_time)); |
5313 | } else { |
5314 | xhci_dbg(xhci, "xHCI version %x doesn't need hub " |
5315 | "TT think time or number of ports\n", |
5316 | (unsigned int) xhci->hci_version); |
5317 | } |
5318 | slot_ctx->dev_state = 0; |
5319 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
5320 | |
5321 | xhci_dbg(xhci, "Set up %s for hub device.\n", |
5322 | (xhci->hci_version > 0x95) ? |
5323 | "configure endpoint": "evaluate context"); |
5324 | |
5325 | /* Issue and wait for the configure endpoint or |
5326 | * evaluate context command. |
5327 | */ |
5328 | if (xhci->hci_version > 0x95) |
5329 | ret = xhci_configure_endpoint(xhci, udev: hdev, command: config_cmd, |
5330 | ctx_change: false, must_succeed: false); |
5331 | else |
5332 | ret = xhci_configure_endpoint(xhci, udev: hdev, command: config_cmd, |
5333 | ctx_change: true, must_succeed: false); |
5334 | |
5335 | xhci_free_command(xhci, command: config_cmd); |
5336 | return ret; |
5337 | } |
5338 | EXPORT_SYMBOL_GPL(xhci_update_hub_device); |
5339 | |
5340 | static int xhci_get_frame(struct usb_hcd *hcd) |
5341 | { |
5342 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
5343 | /* EHCI mods by the periodic size. Why? */ |
5344 | return readl(addr: &xhci->run_regs->microframe_index) >> 3; |
5345 | } |
5346 | |
5347 | static void xhci_hcd_init_usb2_data(struct xhci_hcd *xhci, struct usb_hcd *hcd) |
5348 | { |
5349 | xhci->usb2_rhub.hcd = hcd; |
5350 | hcd->speed = HCD_USB2; |
5351 | hcd->self.root_hub->speed = USB_SPEED_HIGH; |
5352 | /* |
5353 | * USB 2.0 roothub under xHCI has an integrated TT, |
5354 | * (rate matching hub) as opposed to having an OHCI/UHCI |
5355 | * companion controller. |
5356 | */ |
5357 | hcd->has_tt = 1; |
5358 | } |
5359 | |
5360 | static void xhci_hcd_init_usb3_data(struct xhci_hcd *xhci, struct usb_hcd *hcd) |
5361 | { |
5362 | unsigned int minor_rev; |
5363 | |
5364 | /* |
5365 | * Early xHCI 1.1 spec did not mention USB 3.1 capable hosts |
5366 | * should return 0x31 for sbrn, or that the minor revision |
5367 | * is a two digit BCD containig minor and sub-minor numbers. |
5368 | * This was later clarified in xHCI 1.2. |
5369 | * |
5370 | * Some USB 3.1 capable hosts therefore have sbrn 0x30, and |
5371 | * minor revision set to 0x1 instead of 0x10. |
5372 | */ |
5373 | if (xhci->usb3_rhub.min_rev == 0x1) |
5374 | minor_rev = 1; |
5375 | else |
5376 | minor_rev = xhci->usb3_rhub.min_rev / 0x10; |
5377 | |
5378 | switch (minor_rev) { |
5379 | case 2: |
5380 | hcd->speed = HCD_USB32; |
5381 | hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; |
5382 | hcd->self.root_hub->rx_lanes = 2; |
5383 | hcd->self.root_hub->tx_lanes = 2; |
5384 | hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x2; |
5385 | break; |
5386 | case 1: |
5387 | hcd->speed = HCD_USB31; |
5388 | hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; |
5389 | hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x1; |
5390 | break; |
5391 | } |
5392 | xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n", |
5393 | minor_rev, minor_rev ? "Enhanced ": ""); |
5394 | |
5395 | xhci->usb3_rhub.hcd = hcd; |
5396 | } |
5397 | |
5398 | int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) |
5399 | { |
5400 | struct xhci_hcd *xhci; |
5401 | /* |
5402 | * TODO: Check with DWC3 clients for sysdev according to |
5403 | * quirks |
5404 | */ |
5405 | struct device *dev = hcd->self.sysdev; |
5406 | int retval; |
5407 | |
5408 | /* Accept arbitrarily long scatter-gather lists */ |
5409 | hcd->self.sg_tablesize = ~0; |
5410 | |
5411 | /* support to build packet from discontinuous buffers */ |
5412 | hcd->self.no_sg_constraint = 1; |
5413 | |
5414 | /* XHCI controllers don't stop the ep queue on short packets :| */ |
5415 | hcd->self.no_stop_on_short = 1; |
5416 | |
5417 | xhci = hcd_to_xhci(hcd); |
5418 | |
5419 | if (!usb_hcd_is_primary_hcd(hcd)) { |
5420 | xhci_hcd_init_usb3_data(xhci, hcd); |
5421 | return 0; |
5422 | } |
5423 | |
5424 | mutex_init(&xhci->mutex); |
5425 | xhci->main_hcd = hcd; |
5426 | xhci->cap_regs = hcd->regs; |
5427 | xhci->op_regs = hcd->regs + |
5428 | HC_LENGTH(readl(&xhci->cap_regs->hc_capbase)); |
5429 | xhci->run_regs = hcd->regs + |
5430 | (readl(addr: &xhci->cap_regs->run_regs_off) & RTSOFF_MASK); |
5431 | /* Cache read-only capability registers */ |
5432 | xhci->hcs_params1 = readl(addr: &xhci->cap_regs->hcs_params1); |
5433 | xhci->hcs_params2 = readl(addr: &xhci->cap_regs->hcs_params2); |
5434 | xhci->hcs_params3 = readl(addr: &xhci->cap_regs->hcs_params3); |
5435 | xhci->hci_version = HC_VERSION(readl(&xhci->cap_regs->hc_capbase)); |
5436 | xhci->hcc_params = readl(addr: &xhci->cap_regs->hcc_params); |
5437 | if (xhci->hci_version > 0x100) |
5438 | xhci->hcc_params2 = readl(addr: &xhci->cap_regs->hcc_params2); |
5439 | |
5440 | /* xhci-plat or xhci-pci might have set max_interrupters already */ |
5441 | if ((!xhci->max_interrupters) || |
5442 | xhci->max_interrupters > HCS_MAX_INTRS(xhci->hcs_params1)) |
5443 | xhci->max_interrupters = HCS_MAX_INTRS(xhci->hcs_params1); |
5444 | |
5445 | xhci->quirks |= quirks; |
5446 | |
5447 | if (get_quirks) |
5448 | get_quirks(dev, xhci); |
5449 | |
5450 | /* In xhci controllers which follow xhci 1.0 spec gives a spurious |
5451 | * success event after a short transfer. This quirk will ignore such |
5452 | * spurious event. |
5453 | */ |
5454 | if (xhci->hci_version > 0x96) |
5455 | xhci->quirks |= XHCI_SPURIOUS_SUCCESS; |
5456 | |
5457 | if (xhci->hci_version == 0x95 && link_quirk) { |
5458 | xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits"); |
5459 | xhci->quirks |= XHCI_LINK_TRB_QUIRK; |
5460 | } |
5461 | |
5462 | /* Make sure the HC is halted. */ |
5463 | retval = xhci_halt(xhci); |
5464 | if (retval) |
5465 | return retval; |
5466 | |
5467 | xhci_zero_64b_regs(xhci); |
5468 | |
5469 | xhci_dbg(xhci, "Resetting HCD\n"); |
5470 | /* Reset the internal HC memory state and registers. */ |
5471 | retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC); |
5472 | if (retval) |
5473 | return retval; |
5474 | xhci_dbg(xhci, "Reset complete\n"); |
5475 | |
5476 | /* |
5477 | * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0) |
5478 | * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit |
5479 | * address memory pointers actually. So, this driver clears the AC64 |
5480 | * bit of xhci->hcc_params to call dma_set_coherent_mask(dev, |
5481 | * DMA_BIT_MASK(32)) in this xhci_gen_setup(). |
5482 | */ |
5483 | if (xhci->quirks & XHCI_NO_64BIT_SUPPORT) |
5484 | xhci->hcc_params &= ~BIT(0); |
5485 | |
5486 | /* Set dma_mask and coherent_dma_mask to 64-bits, |
5487 | * if xHC supports 64-bit addressing */ |
5488 | if (HCC_64BIT_ADDR(xhci->hcc_params) && |
5489 | !dma_set_mask(dev, DMA_BIT_MASK(64))) { |
5490 | xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); |
5491 | dma_set_coherent_mask(dev, DMA_BIT_MASK(64)); |
5492 | } else { |
5493 | /* |
5494 | * This is to avoid error in cases where a 32-bit USB |
5495 | * controller is used on a 64-bit capable system. |
5496 | */ |
5497 | retval = dma_set_mask(dev, DMA_BIT_MASK(32)); |
5498 | if (retval) |
5499 | return retval; |
5500 | xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n"); |
5501 | dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); |
5502 | } |
5503 | |
5504 | xhci_dbg(xhci, "Calling HCD init\n"); |
5505 | /* Initialize HCD and host controller data structures. */ |
5506 | retval = xhci_init(hcd); |
5507 | if (retval) |
5508 | return retval; |
5509 | xhci_dbg(xhci, "Called HCD init\n"); |
5510 | |
5511 | if (xhci_hcd_is_usb3(hcd)) |
5512 | xhci_hcd_init_usb3_data(xhci, hcd); |
5513 | else |
5514 | xhci_hcd_init_usb2_data(xhci, hcd); |
5515 | |
5516 | xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n", |
5517 | xhci->hcc_params, xhci->hci_version, xhci->quirks); |
5518 | |
5519 | return 0; |
5520 | } |
5521 | EXPORT_SYMBOL_GPL(xhci_gen_setup); |
5522 | |
5523 | static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd, |
5524 | struct usb_host_endpoint *ep) |
5525 | { |
5526 | struct xhci_hcd *xhci; |
5527 | struct usb_device *udev; |
5528 | unsigned int slot_id; |
5529 | unsigned int ep_index; |
5530 | unsigned long flags; |
5531 | |
5532 | xhci = hcd_to_xhci(hcd); |
5533 | |
5534 | spin_lock_irqsave(&xhci->lock, flags); |
5535 | udev = (struct usb_device *)ep->hcpriv; |
5536 | slot_id = udev->slot_id; |
5537 | ep_index = xhci_get_endpoint_index(&ep->desc); |
5538 | |
5539 | xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT; |
5540 | xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index); |
5541 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
5542 | } |
5543 | |
5544 | static const struct hc_driver xhci_hc_driver = { |
5545 | .description = "xhci-hcd", |
5546 | .product_desc = "xHCI Host Controller", |
5547 | .hcd_priv_size = sizeof(struct xhci_hcd), |
5548 | |
5549 | /* |
5550 | * generic hardware linkage |
5551 | */ |
5552 | .irq = xhci_irq, |
5553 | .flags = HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED | |
5554 | HCD_BH, |
5555 | |
5556 | /* |
5557 | * basic lifecycle operations |
5558 | */ |
5559 | .reset = NULL, /* set in xhci_init_driver() */ |
5560 | .start = xhci_run, |
5561 | .stop = xhci_stop, |
5562 | .shutdown = xhci_shutdown, |
5563 | |
5564 | /* |
5565 | * managing i/o requests and associated device resources |
5566 | */ |
5567 | .map_urb_for_dma = xhci_map_urb_for_dma, |
5568 | .unmap_urb_for_dma = xhci_unmap_urb_for_dma, |
5569 | .urb_enqueue = xhci_urb_enqueue, |
5570 | .urb_dequeue = xhci_urb_dequeue, |
5571 | .alloc_dev = xhci_alloc_dev, |
5572 | .free_dev = xhci_free_dev, |
5573 | .alloc_streams = xhci_alloc_streams, |
5574 | .free_streams = xhci_free_streams, |
5575 | .add_endpoint = xhci_add_endpoint, |
5576 | .drop_endpoint = xhci_drop_endpoint, |
5577 | .endpoint_disable = xhci_endpoint_disable, |
5578 | .endpoint_reset = xhci_endpoint_reset, |
5579 | .check_bandwidth = xhci_check_bandwidth, |
5580 | .reset_bandwidth = xhci_reset_bandwidth, |
5581 | .address_device = xhci_address_device, |
5582 | .enable_device = xhci_enable_device, |
5583 | .update_hub_device = xhci_update_hub_device, |
5584 | .reset_device = xhci_discover_or_reset_device, |
5585 | |
5586 | /* |
5587 | * scheduling support |
5588 | */ |
5589 | .get_frame_number = xhci_get_frame, |
5590 | |
5591 | /* |
5592 | * root hub support |
5593 | */ |
5594 | .hub_control = xhci_hub_control, |
5595 | .hub_status_data = xhci_hub_status_data, |
5596 | .bus_suspend = xhci_bus_suspend, |
5597 | .bus_resume = xhci_bus_resume, |
5598 | .get_resuming_ports = xhci_get_resuming_ports, |
5599 | |
5600 | /* |
5601 | * call back when device connected and addressed |
5602 | */ |
5603 | .update_device = xhci_update_device, |
5604 | .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm, |
5605 | .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout, |
5606 | .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout, |
5607 | .find_raw_port_number = xhci_find_raw_port_number, |
5608 | .clear_tt_buffer_complete = xhci_clear_tt_buffer_complete, |
5609 | }; |
5610 | |
5611 | void xhci_init_driver(struct hc_driver *drv, |
5612 | const struct xhci_driver_overrides *over) |
5613 | { |
5614 | BUG_ON(!over); |
5615 | |
5616 | /* Copy the generic table to drv then apply the overrides */ |
5617 | *drv = xhci_hc_driver; |
5618 | |
5619 | if (over) { |
5620 | drv->hcd_priv_size += over->extra_priv_size; |
5621 | if (over->reset) |
5622 | drv->reset = over->reset; |
5623 | if (over->start) |
5624 | drv->start = over->start; |
5625 | if (over->add_endpoint) |
5626 | drv->add_endpoint = over->add_endpoint; |
5627 | if (over->drop_endpoint) |
5628 | drv->drop_endpoint = over->drop_endpoint; |
5629 | if (over->check_bandwidth) |
5630 | drv->check_bandwidth = over->check_bandwidth; |
5631 | if (over->reset_bandwidth) |
5632 | drv->reset_bandwidth = over->reset_bandwidth; |
5633 | if (over->update_hub_device) |
5634 | drv->update_hub_device = over->update_hub_device; |
5635 | if (over->hub_control) |
5636 | drv->hub_control = over->hub_control; |
5637 | } |
5638 | } |
5639 | EXPORT_SYMBOL_GPL(xhci_init_driver); |
5640 | |
5641 | MODULE_DESCRIPTION(DRIVER_DESC); |
5642 | MODULE_AUTHOR(DRIVER_AUTHOR); |
5643 | MODULE_LICENSE("GPL"); |
5644 | |
5645 | static int __init xhci_hcd_init(void) |
5646 | { |
5647 | /* |
5648 | * Check the compiler generated sizes of structures that must be laid |
5649 | * out in specific ways for hardware access. |
5650 | */ |
5651 | BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8); |
5652 | BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8); |
5653 | BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8); |
5654 | /* xhci_device_control has eight fields, and also |
5655 | * embeds one xhci_slot_ctx and 31 xhci_ep_ctx |
5656 | */ |
5657 | BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); |
5658 | BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); |
5659 | BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); |
5660 | BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8); |
5661 | BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8); |
5662 | /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */ |
5663 | BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8); |
5664 | |
5665 | if (usb_disabled()) |
5666 | return -ENODEV; |
5667 | |
5668 | xhci_debugfs_create_root(); |
5669 | xhci_dbc_init(); |
5670 | |
5671 | return 0; |
5672 | } |
5673 | |
5674 | /* |
5675 | * If an init function is provided, an exit function must also be provided |
5676 | * to allow module unload. |
5677 | */ |
5678 | static void __exit xhci_hcd_fini(void) |
5679 | { |
5680 | xhci_debugfs_remove_root(); |
5681 | xhci_dbc_exit(); |
5682 | } |
5683 | |
5684 | module_init(xhci_hcd_init); |
5685 | module_exit(xhci_hcd_fini); |
5686 |
Definitions
- link_quirk
- quirks
- td_on_ring
- xhci_handshake
- xhci_handshake_check_state
- xhci_quiesce
- xhci_halt
- xhci_start
- xhci_reset
- xhci_zero_64b_regs
- xhci_enable_interrupter
- xhci_disable_interrupter
- xhci_set_interrupter_moderation
- compliance_mode_recovery
- compliance_mode_recovery_timer_init
- xhci_compliance_mode_recovery_timer_quirk_check
- xhci_all_ports_seen_u0
- xhci_hcd_page_size
- xhci_enable_max_dev_slots
- xhci_set_cmd_ring_deq
- xhci_set_doorbell_ptr
- xhci_set_dev_notifications
- xhci_init
- xhci_run_finished
- xhci_run
- xhci_stop
- xhci_shutdown
- xhci_save_registers
- xhci_restore_registers
- xhci_clear_command_ring
- xhci_disable_hub_port_wake
- xhci_pending_portevent
- xhci_suspend
- xhci_resume
- xhci_map_temp_buffer
- xhci_urb_temp_buffer_required
- xhci_unmap_temp_buf
- xhci_map_urb_for_dma
- xhci_unmap_urb_for_dma
- xhci_get_endpoint_index
- xhci_get_endpoint_address
- xhci_get_endpoint_flag
- xhci_last_valid_endpoint
- xhci_check_args
- xhci_check_ep0_maxpacket
- xhci_urb_enqueue
- xhci_urb_dequeue
- xhci_drop_endpoint
- xhci_add_endpoint
- xhci_zero_in_ctx
- xhci_configure_endpoint_result
- xhci_evaluate_context_result
- xhci_count_num_new_endpoints
- xhci_count_num_dropped_endpoints
- xhci_reserve_host_resources
- xhci_free_host_resources
- xhci_finish_resource_reservation
- xhci_get_block_size
- xhci_get_largest_overhead
- xhci_check_tt_bw_table
- xhci_check_ss_bw
- xhci_check_bw_table
- xhci_is_async_ep
- xhci_is_sync_in_ep
- xhci_get_ss_bw_consumed
- xhci_drop_ep_from_interval_table
- xhci_add_ep_to_interval_table
- xhci_update_tt_active_eps
- xhci_reserve_bandwidth
- xhci_stop_endpoint_sync
- xhci_configure_endpoint
- xhci_check_bw_drop_ep_streams
- xhci_check_bandwidth
- xhci_reset_bandwidth
- xhci_get_port_bandwidth
- xhci_setup_input_ctx_for_config_ep
- xhci_endpoint_disable
- xhci_endpoint_reset
- xhci_check_streams_endpoint
- xhci_calculate_streams_entries
- xhci_calculate_streams_and_bitmask
- xhci_calculate_no_streams_bitmask
- xhci_alloc_streams
- xhci_free_streams
- xhci_free_device_endpoint_resources
- xhci_discover_or_reset_device
- xhci_free_dev
- xhci_disable_slot
- xhci_reserve_host_control_ep_resources
- xhci_alloc_dev
- xhci_setup_device
- xhci_address_device
- xhci_enable_device
- xhci_find_raw_port_number
- xhci_change_max_exit_latency
- xhci_besl_encoding
- xhci_calculate_hird_besl
- xhci_calculate_usb2_hw_lpm_params
- xhci_set_usb2_hardware_lpm
- xhci_update_device
- xhci_service_interval_to_ns
- xhci_get_timeout_no_hub_lpm
- xhci_calculate_intel_u1_timeout
- xhci_calculate_u1_timeout
- xhci_calculate_intel_u2_timeout
- xhci_calculate_u2_timeout
- xhci_call_host_update_timeout_for_endpoint
- xhci_update_timeout_for_endpoint
- xhci_update_timeout_for_interface
- xhci_check_tier_policy
- xhci_calculate_lpm_timeout
- calculate_max_exit_latency
- xhci_enable_usb3_lpm_timeout
- xhci_disable_usb3_lpm_timeout
- xhci_update_hub_device
- xhci_get_frame
- xhci_hcd_init_usb2_data
- xhci_hcd_init_usb3_data
- xhci_gen_setup
- xhci_clear_tt_buffer_complete
- xhci_hc_driver
- xhci_init_driver
- xhci_hcd_init
Improve your Profiling and Debugging skills
Find out more