1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * xHCI host controller driver |
4 | * |
5 | * Copyright (C) 2008 Intel Corp. |
6 | * |
7 | * Author: Sarah Sharp |
8 | * Some code borrowed from the Linux EHCI driver. |
9 | */ |
10 | |
11 | #include <linux/pci.h> |
12 | #include <linux/iommu.h> |
13 | #include <linux/iopoll.h> |
14 | #include <linux/irq.h> |
15 | #include <linux/log2.h> |
16 | #include <linux/module.h> |
17 | #include <linux/moduleparam.h> |
18 | #include <linux/slab.h> |
19 | #include <linux/dmi.h> |
20 | #include <linux/dma-mapping.h> |
21 | |
22 | #include "xhci.h" |
23 | #include "xhci-trace.h" |
24 | #include "xhci-debugfs.h" |
25 | #include "xhci-dbgcap.h" |
26 | |
27 | #define DRIVER_AUTHOR "Sarah Sharp" |
28 | #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" |
29 | |
30 | #define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E) |
31 | |
32 | /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */ |
33 | static int link_quirk; |
34 | module_param(link_quirk, int, S_IRUGO | S_IWUSR); |
35 | MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB" ); |
36 | |
37 | static unsigned long long quirks; |
38 | module_param(quirks, ullong, S_IRUGO); |
39 | MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default" ); |
40 | |
41 | static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring) |
42 | { |
43 | struct xhci_segment *seg = ring->first_seg; |
44 | |
45 | if (!td || !td->start_seg) |
46 | return false; |
47 | do { |
48 | if (seg == td->start_seg) |
49 | return true; |
50 | seg = seg->next; |
51 | } while (seg && seg != ring->first_seg); |
52 | |
53 | return false; |
54 | } |
55 | |
56 | /* |
57 | * xhci_handshake - spin reading hc until handshake completes or fails |
58 | * @ptr: address of hc register to be read |
59 | * @mask: bits to look at in result of read |
60 | * @done: value of those bits when handshake succeeds |
61 | * @usec: timeout in microseconds |
62 | * |
63 | * Returns negative errno, or zero on success |
64 | * |
65 | * Success happens when the "mask" bits have the specified value (hardware |
66 | * handshake done). There are two failure modes: "usec" have passed (major |
67 | * hardware flakeout), or the register reads as all-ones (hardware removed). |
68 | */ |
69 | int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us) |
70 | { |
71 | u32 result; |
72 | int ret; |
73 | |
74 | ret = readl_poll_timeout_atomic(ptr, result, |
75 | (result & mask) == done || |
76 | result == U32_MAX, |
77 | 1, timeout_us); |
78 | if (result == U32_MAX) /* card removed */ |
79 | return -ENODEV; |
80 | |
81 | return ret; |
82 | } |
83 | |
84 | /* |
85 | * xhci_handshake_check_state - same as xhci_handshake but takes an additional |
86 | * exit_state parameter, and bails out with an error immediately when xhc_state |
87 | * has exit_state flag set. |
88 | */ |
89 | int xhci_handshake_check_state(struct xhci_hcd *xhci, void __iomem *ptr, |
90 | u32 mask, u32 done, int usec, unsigned int exit_state) |
91 | { |
92 | u32 result; |
93 | int ret; |
94 | |
95 | ret = readl_poll_timeout_atomic(ptr, result, |
96 | (result & mask) == done || |
97 | result == U32_MAX || |
98 | xhci->xhc_state & exit_state, |
99 | 1, usec); |
100 | |
101 | if (result == U32_MAX || xhci->xhc_state & exit_state) |
102 | return -ENODEV; |
103 | |
104 | return ret; |
105 | } |
106 | |
107 | /* |
108 | * Disable interrupts and begin the xHCI halting process. |
109 | */ |
110 | void xhci_quiesce(struct xhci_hcd *xhci) |
111 | { |
112 | u32 halted; |
113 | u32 cmd; |
114 | u32 mask; |
115 | |
116 | mask = ~(XHCI_IRQS); |
117 | halted = readl(addr: &xhci->op_regs->status) & STS_HALT; |
118 | if (!halted) |
119 | mask &= ~CMD_RUN; |
120 | |
121 | cmd = readl(addr: &xhci->op_regs->command); |
122 | cmd &= mask; |
123 | writel(val: cmd, addr: &xhci->op_regs->command); |
124 | } |
125 | |
126 | /* |
127 | * Force HC into halt state. |
128 | * |
129 | * Disable any IRQs and clear the run/stop bit. |
130 | * HC will complete any current and actively pipelined transactions, and |
131 | * should halt within 16 ms of the run/stop bit being cleared. |
132 | * Read HC Halted bit in the status register to see when the HC is finished. |
133 | */ |
134 | int xhci_halt(struct xhci_hcd *xhci) |
135 | { |
136 | int ret; |
137 | |
138 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, fmt: "// Halt the HC" ); |
139 | xhci_quiesce(xhci); |
140 | |
141 | ret = xhci_handshake(ptr: &xhci->op_regs->status, |
142 | STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); |
143 | if (ret) { |
144 | xhci_warn(xhci, "Host halt failed, %d\n" , ret); |
145 | return ret; |
146 | } |
147 | |
148 | xhci->xhc_state |= XHCI_STATE_HALTED; |
149 | xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; |
150 | |
151 | return ret; |
152 | } |
153 | |
154 | /* |
155 | * Set the run bit and wait for the host to be running. |
156 | */ |
157 | int xhci_start(struct xhci_hcd *xhci) |
158 | { |
159 | u32 temp; |
160 | int ret; |
161 | |
162 | temp = readl(addr: &xhci->op_regs->command); |
163 | temp |= (CMD_RUN); |
164 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, fmt: "// Turn on HC, cmd = 0x%x." , |
165 | temp); |
166 | writel(val: temp, addr: &xhci->op_regs->command); |
167 | |
168 | /* |
169 | * Wait for the HCHalted Status bit to be 0 to indicate the host is |
170 | * running. |
171 | */ |
172 | ret = xhci_handshake(ptr: &xhci->op_regs->status, |
173 | STS_HALT, done: 0, XHCI_MAX_HALT_USEC); |
174 | if (ret == -ETIMEDOUT) |
175 | xhci_err(xhci, "Host took too long to start, " |
176 | "waited %u microseconds.\n" , |
177 | XHCI_MAX_HALT_USEC); |
178 | if (!ret) { |
179 | /* clear state flags. Including dying, halted or removing */ |
180 | xhci->xhc_state = 0; |
181 | xhci->run_graceperiod = jiffies + msecs_to_jiffies(m: 500); |
182 | } |
183 | |
184 | return ret; |
185 | } |
186 | |
187 | /* |
188 | * Reset a halted HC. |
189 | * |
190 | * This resets pipelines, timers, counters, state machines, etc. |
191 | * Transactions will be terminated immediately, and operational registers |
192 | * will be set to their defaults. |
193 | */ |
194 | int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us) |
195 | { |
196 | u32 command; |
197 | u32 state; |
198 | int ret; |
199 | |
200 | state = readl(addr: &xhci->op_regs->status); |
201 | |
202 | if (state == ~(u32)0) { |
203 | xhci_warn(xhci, "Host not accessible, reset failed.\n" ); |
204 | return -ENODEV; |
205 | } |
206 | |
207 | if ((state & STS_HALT) == 0) { |
208 | xhci_warn(xhci, "Host controller not halted, aborting reset.\n" ); |
209 | return 0; |
210 | } |
211 | |
212 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, fmt: "// Reset the HC" ); |
213 | command = readl(addr: &xhci->op_regs->command); |
214 | command |= CMD_RESET; |
215 | writel(val: command, addr: &xhci->op_regs->command); |
216 | |
217 | /* Existing Intel xHCI controllers require a delay of 1 mS, |
218 | * after setting the CMD_RESET bit, and before accessing any |
219 | * HC registers. This allows the HC to complete the |
220 | * reset operation and be ready for HC register access. |
221 | * Without this delay, the subsequent HC register access, |
222 | * may result in a system hang very rarely. |
223 | */ |
224 | if (xhci->quirks & XHCI_INTEL_HOST) |
225 | udelay(1000); |
226 | |
227 | ret = xhci_handshake_check_state(xhci, ptr: &xhci->op_regs->command, |
228 | CMD_RESET, done: 0, usec: timeout_us, XHCI_STATE_REMOVING); |
229 | if (ret) |
230 | return ret; |
231 | |
232 | if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL) |
233 | usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller)); |
234 | |
235 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, |
236 | fmt: "Wait for controller to be ready for doorbell rings" ); |
237 | /* |
238 | * xHCI cannot write to any doorbells or operational registers other |
239 | * than status until the "Controller Not Ready" flag is cleared. |
240 | */ |
241 | ret = xhci_handshake(ptr: &xhci->op_regs->status, STS_CNR, done: 0, timeout_us); |
242 | |
243 | xhci->usb2_rhub.bus_state.port_c_suspend = 0; |
244 | xhci->usb2_rhub.bus_state.suspended_ports = 0; |
245 | xhci->usb2_rhub.bus_state.resuming_ports = 0; |
246 | xhci->usb3_rhub.bus_state.port_c_suspend = 0; |
247 | xhci->usb3_rhub.bus_state.suspended_ports = 0; |
248 | xhci->usb3_rhub.bus_state.resuming_ports = 0; |
249 | |
250 | return ret; |
251 | } |
252 | |
253 | static void xhci_zero_64b_regs(struct xhci_hcd *xhci) |
254 | { |
255 | struct device *dev = xhci_to_hcd(xhci)->self.sysdev; |
256 | struct iommu_domain *domain; |
257 | int err, i; |
258 | u64 val; |
259 | u32 intrs; |
260 | |
261 | /* |
262 | * Some Renesas controllers get into a weird state if they are |
263 | * reset while programmed with 64bit addresses (they will preserve |
264 | * the top half of the address in internal, non visible |
265 | * registers). You end up with half the address coming from the |
266 | * kernel, and the other half coming from the firmware. Also, |
267 | * changing the programming leads to extra accesses even if the |
268 | * controller is supposed to be halted. The controller ends up with |
269 | * a fatal fault, and is then ripe for being properly reset. |
270 | * |
271 | * Special care is taken to only apply this if the device is behind |
272 | * an iommu. Doing anything when there is no iommu is definitely |
273 | * unsafe... |
274 | */ |
275 | domain = iommu_get_domain_for_dev(dev); |
276 | if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !domain || |
277 | domain->type == IOMMU_DOMAIN_IDENTITY) |
278 | return; |
279 | |
280 | xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n" ); |
281 | |
282 | /* Clear HSEIE so that faults do not get signaled */ |
283 | val = readl(addr: &xhci->op_regs->command); |
284 | val &= ~CMD_HSEIE; |
285 | writel(val, addr: &xhci->op_regs->command); |
286 | |
287 | /* Clear HSE (aka FATAL) */ |
288 | val = readl(addr: &xhci->op_regs->status); |
289 | val |= STS_FATAL; |
290 | writel(val, addr: &xhci->op_regs->status); |
291 | |
292 | /* Now zero the registers, and brace for impact */ |
293 | val = xhci_read_64(xhci, regs: &xhci->op_regs->dcbaa_ptr); |
294 | if (upper_32_bits(val)) |
295 | xhci_write_64(xhci, val: 0, regs: &xhci->op_regs->dcbaa_ptr); |
296 | val = xhci_read_64(xhci, regs: &xhci->op_regs->cmd_ring); |
297 | if (upper_32_bits(val)) |
298 | xhci_write_64(xhci, val: 0, regs: &xhci->op_regs->cmd_ring); |
299 | |
300 | intrs = min_t(u32, HCS_MAX_INTRS(xhci->hcs_params1), |
301 | ARRAY_SIZE(xhci->run_regs->ir_set)); |
302 | |
303 | for (i = 0; i < intrs; i++) { |
304 | struct xhci_intr_reg __iomem *ir; |
305 | |
306 | ir = &xhci->run_regs->ir_set[i]; |
307 | val = xhci_read_64(xhci, regs: &ir->erst_base); |
308 | if (upper_32_bits(val)) |
309 | xhci_write_64(xhci, val: 0, regs: &ir->erst_base); |
310 | val= xhci_read_64(xhci, regs: &ir->erst_dequeue); |
311 | if (upper_32_bits(val)) |
312 | xhci_write_64(xhci, val: 0, regs: &ir->erst_dequeue); |
313 | } |
314 | |
315 | /* Wait for the fault to appear. It will be cleared on reset */ |
316 | err = xhci_handshake(ptr: &xhci->op_regs->status, |
317 | STS_FATAL, STS_FATAL, |
318 | XHCI_MAX_HALT_USEC); |
319 | if (!err) |
320 | xhci_info(xhci, "Fault detected\n" ); |
321 | } |
322 | |
323 | static int xhci_enable_interrupter(struct xhci_interrupter *ir) |
324 | { |
325 | u32 iman; |
326 | |
327 | if (!ir || !ir->ir_set) |
328 | return -EINVAL; |
329 | |
330 | iman = readl(addr: &ir->ir_set->irq_pending); |
331 | writel(ER_IRQ_ENABLE(iman), addr: &ir->ir_set->irq_pending); |
332 | |
333 | return 0; |
334 | } |
335 | |
336 | static int xhci_disable_interrupter(struct xhci_interrupter *ir) |
337 | { |
338 | u32 iman; |
339 | |
340 | if (!ir || !ir->ir_set) |
341 | return -EINVAL; |
342 | |
343 | iman = readl(addr: &ir->ir_set->irq_pending); |
344 | writel(ER_IRQ_DISABLE(iman), addr: &ir->ir_set->irq_pending); |
345 | |
346 | return 0; |
347 | } |
348 | |
349 | /* interrupt moderation interval imod_interval in nanoseconds */ |
350 | static int xhci_set_interrupter_moderation(struct xhci_interrupter *ir, |
351 | u32 imod_interval) |
352 | { |
353 | u32 imod; |
354 | |
355 | if (!ir || !ir->ir_set || imod_interval > U16_MAX * 250) |
356 | return -EINVAL; |
357 | |
358 | imod = readl(addr: &ir->ir_set->irq_control); |
359 | imod &= ~ER_IRQ_INTERVAL_MASK; |
360 | imod |= (imod_interval / 250) & ER_IRQ_INTERVAL_MASK; |
361 | writel(val: imod, addr: &ir->ir_set->irq_control); |
362 | |
363 | return 0; |
364 | } |
365 | |
366 | static void compliance_mode_recovery(struct timer_list *t) |
367 | { |
368 | struct xhci_hcd *xhci; |
369 | struct usb_hcd *hcd; |
370 | struct xhci_hub *rhub; |
371 | u32 temp; |
372 | int i; |
373 | |
374 | xhci = from_timer(xhci, t, comp_mode_recovery_timer); |
375 | rhub = &xhci->usb3_rhub; |
376 | hcd = rhub->hcd; |
377 | |
378 | if (!hcd) |
379 | return; |
380 | |
381 | for (i = 0; i < rhub->num_ports; i++) { |
382 | temp = readl(addr: rhub->ports[i]->addr); |
383 | if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) { |
384 | /* |
385 | * Compliance Mode Detected. Letting USB Core |
386 | * handle the Warm Reset |
387 | */ |
388 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
389 | fmt: "Compliance mode detected->port %d" , |
390 | i + 1); |
391 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
392 | fmt: "Attempting compliance mode recovery" ); |
393 | |
394 | if (hcd->state == HC_STATE_SUSPENDED) |
395 | usb_hcd_resume_root_hub(hcd); |
396 | |
397 | usb_hcd_poll_rh_status(hcd); |
398 | } |
399 | } |
400 | |
401 | if (xhci->port_status_u0 != ((1 << rhub->num_ports) - 1)) |
402 | mod_timer(timer: &xhci->comp_mode_recovery_timer, |
403 | expires: jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS)); |
404 | } |
405 | |
406 | /* |
407 | * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver |
408 | * that causes ports behind that hardware to enter compliance mode sometimes. |
409 | * The quirk creates a timer that polls every 2 seconds the link state of |
410 | * each host controller's port and recovers it by issuing a Warm reset |
411 | * if Compliance mode is detected, otherwise the port will become "dead" (no |
412 | * device connections or disconnections will be detected anymore). Becasue no |
413 | * status event is generated when entering compliance mode (per xhci spec), |
414 | * this quirk is needed on systems that have the failing hardware installed. |
415 | */ |
416 | static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci) |
417 | { |
418 | xhci->port_status_u0 = 0; |
419 | timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery, |
420 | 0); |
421 | xhci->comp_mode_recovery_timer.expires = jiffies + |
422 | msecs_to_jiffies(COMP_MODE_RCVRY_MSECS); |
423 | |
424 | add_timer(timer: &xhci->comp_mode_recovery_timer); |
425 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
426 | fmt: "Compliance mode recovery timer initialized" ); |
427 | } |
428 | |
429 | /* |
430 | * This function identifies the systems that have installed the SN65LVPE502CP |
431 | * USB3.0 re-driver and that need the Compliance Mode Quirk. |
432 | * Systems: |
433 | * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820 |
434 | */ |
435 | static bool xhci_compliance_mode_recovery_timer_quirk_check(void) |
436 | { |
437 | const char *dmi_product_name, *dmi_sys_vendor; |
438 | |
439 | dmi_product_name = dmi_get_system_info(field: DMI_PRODUCT_NAME); |
440 | dmi_sys_vendor = dmi_get_system_info(field: DMI_SYS_VENDOR); |
441 | if (!dmi_product_name || !dmi_sys_vendor) |
442 | return false; |
443 | |
444 | if (!(strstr(dmi_sys_vendor, "Hewlett-Packard" ))) |
445 | return false; |
446 | |
447 | if (strstr(dmi_product_name, "Z420" ) || |
448 | strstr(dmi_product_name, "Z620" ) || |
449 | strstr(dmi_product_name, "Z820" ) || |
450 | strstr(dmi_product_name, "Z1 Workstation" )) |
451 | return true; |
452 | |
453 | return false; |
454 | } |
455 | |
456 | static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci) |
457 | { |
458 | return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1)); |
459 | } |
460 | |
461 | |
462 | /* |
463 | * Initialize memory for HCD and xHC (one-time init). |
464 | * |
465 | * Program the PAGESIZE register, initialize the device context array, create |
466 | * device contexts (?), set up a command ring segment (or two?), create event |
467 | * ring (one for now). |
468 | */ |
469 | static int xhci_init(struct usb_hcd *hcd) |
470 | { |
471 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
472 | int retval; |
473 | |
474 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, fmt: "xhci_init" ); |
475 | spin_lock_init(&xhci->lock); |
476 | if (xhci->hci_version == 0x95 && link_quirk) { |
477 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
478 | fmt: "QUIRK: Not clearing Link TRB chain bits." ); |
479 | xhci->quirks |= XHCI_LINK_TRB_QUIRK; |
480 | } else { |
481 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, |
482 | fmt: "xHCI doesn't need link TRB QUIRK" ); |
483 | } |
484 | retval = xhci_mem_init(xhci, GFP_KERNEL); |
485 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, fmt: "Finished xhci_init" ); |
486 | |
487 | /* Initializing Compliance Mode Recovery Data If Needed */ |
488 | if (xhci_compliance_mode_recovery_timer_quirk_check()) { |
489 | xhci->quirks |= XHCI_COMP_MODE_QUIRK; |
490 | compliance_mode_recovery_timer_init(xhci); |
491 | } |
492 | |
493 | return retval; |
494 | } |
495 | |
496 | /*-------------------------------------------------------------------------*/ |
497 | |
498 | static int xhci_run_finished(struct xhci_hcd *xhci) |
499 | { |
500 | struct xhci_interrupter *ir = xhci->interrupters[0]; |
501 | unsigned long flags; |
502 | u32 temp; |
503 | |
504 | /* |
505 | * Enable interrupts before starting the host (xhci 4.2 and 5.5.2). |
506 | * Protect the short window before host is running with a lock |
507 | */ |
508 | spin_lock_irqsave(&xhci->lock, flags); |
509 | |
510 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, fmt: "Enable interrupts" ); |
511 | temp = readl(addr: &xhci->op_regs->command); |
512 | temp |= (CMD_EIE); |
513 | writel(val: temp, addr: &xhci->op_regs->command); |
514 | |
515 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, fmt: "Enable primary interrupter" ); |
516 | xhci_enable_interrupter(ir); |
517 | |
518 | if (xhci_start(xhci)) { |
519 | xhci_halt(xhci); |
520 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
521 | return -ENODEV; |
522 | } |
523 | |
524 | xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; |
525 | |
526 | if (xhci->quirks & XHCI_NEC_HOST) |
527 | xhci_ring_cmd_db(xhci); |
528 | |
529 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
530 | |
531 | return 0; |
532 | } |
533 | |
534 | /* |
535 | * Start the HC after it was halted. |
536 | * |
537 | * This function is called by the USB core when the HC driver is added. |
538 | * Its opposite is xhci_stop(). |
539 | * |
540 | * xhci_init() must be called once before this function can be called. |
541 | * Reset the HC, enable device slot contexts, program DCBAAP, and |
542 | * set command ring pointer and event ring pointer. |
543 | * |
544 | * Setup MSI-X vectors and enable interrupts. |
545 | */ |
546 | int xhci_run(struct usb_hcd *hcd) |
547 | { |
548 | u64 temp_64; |
549 | int ret; |
550 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
551 | struct xhci_interrupter *ir = xhci->interrupters[0]; |
552 | /* Start the xHCI host controller running only after the USB 2.0 roothub |
553 | * is setup. |
554 | */ |
555 | |
556 | hcd->uses_new_polling = 1; |
557 | if (hcd->msi_enabled) |
558 | ir->ip_autoclear = true; |
559 | |
560 | if (!usb_hcd_is_primary_hcd(hcd)) |
561 | return xhci_run_finished(xhci); |
562 | |
563 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, fmt: "xhci_run" ); |
564 | |
565 | temp_64 = xhci_read_64(xhci, regs: &ir->ir_set->erst_dequeue); |
566 | temp_64 &= ERST_PTR_MASK; |
567 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, |
568 | fmt: "ERST deq = 64'h%0lx" , (long unsigned int) temp_64); |
569 | |
570 | xhci_set_interrupter_moderation(ir, imod_interval: xhci->imod_interval); |
571 | |
572 | if (xhci->quirks & XHCI_NEC_HOST) { |
573 | struct xhci_command *command; |
574 | |
575 | command = xhci_alloc_command(xhci, allocate_completion: false, GFP_KERNEL); |
576 | if (!command) |
577 | return -ENOMEM; |
578 | |
579 | ret = xhci_queue_vendor_command(xhci, cmd: command, field1: 0, field2: 0, field3: 0, |
580 | TRB_TYPE(TRB_NEC_GET_FW)); |
581 | if (ret) |
582 | xhci_free_command(xhci, command); |
583 | } |
584 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, |
585 | fmt: "Finished %s for main hcd" , __func__); |
586 | |
587 | xhci_create_dbc_dev(xhci); |
588 | |
589 | xhci_debugfs_init(xhci); |
590 | |
591 | if (xhci_has_one_roothub(xhci)) |
592 | return xhci_run_finished(xhci); |
593 | |
594 | set_bit(HCD_FLAG_DEFER_RH_REGISTER, addr: &hcd->flags); |
595 | |
596 | return 0; |
597 | } |
598 | EXPORT_SYMBOL_GPL(xhci_run); |
599 | |
600 | /* |
601 | * Stop xHCI driver. |
602 | * |
603 | * This function is called by the USB core when the HC driver is removed. |
604 | * Its opposite is xhci_run(). |
605 | * |
606 | * Disable device contexts, disable IRQs, and quiesce the HC. |
607 | * Reset the HC, finish any completed transactions, and cleanup memory. |
608 | */ |
609 | void xhci_stop(struct usb_hcd *hcd) |
610 | { |
611 | u32 temp; |
612 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
613 | struct xhci_interrupter *ir = xhci->interrupters[0]; |
614 | |
615 | mutex_lock(&xhci->mutex); |
616 | |
617 | /* Only halt host and free memory after both hcds are removed */ |
618 | if (!usb_hcd_is_primary_hcd(hcd)) { |
619 | mutex_unlock(lock: &xhci->mutex); |
620 | return; |
621 | } |
622 | |
623 | xhci_remove_dbc_dev(xhci); |
624 | |
625 | spin_lock_irq(lock: &xhci->lock); |
626 | xhci->xhc_state |= XHCI_STATE_HALTED; |
627 | xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; |
628 | xhci_halt(xhci); |
629 | xhci_reset(xhci, XHCI_RESET_SHORT_USEC); |
630 | spin_unlock_irq(lock: &xhci->lock); |
631 | |
632 | /* Deleting Compliance Mode Recovery Timer */ |
633 | if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && |
634 | (!(xhci_all_ports_seen_u0(xhci)))) { |
635 | del_timer_sync(timer: &xhci->comp_mode_recovery_timer); |
636 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
637 | fmt: "%s: compliance mode recovery timer deleted" , |
638 | __func__); |
639 | } |
640 | |
641 | if (xhci->quirks & XHCI_AMD_PLL_FIX) |
642 | usb_amd_dev_put(); |
643 | |
644 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, |
645 | fmt: "// Disabling event ring interrupts" ); |
646 | temp = readl(addr: &xhci->op_regs->status); |
647 | writel(val: (temp & ~0x1fff) | STS_EINT, addr: &xhci->op_regs->status); |
648 | xhci_disable_interrupter(ir); |
649 | |
650 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, fmt: "cleaning up memory" ); |
651 | xhci_mem_cleanup(xhci); |
652 | xhci_debugfs_exit(xhci); |
653 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, |
654 | fmt: "xhci_stop completed - status = %x" , |
655 | readl(addr: &xhci->op_regs->status)); |
656 | mutex_unlock(lock: &xhci->mutex); |
657 | } |
658 | EXPORT_SYMBOL_GPL(xhci_stop); |
659 | |
660 | /* |
661 | * Shutdown HC (not bus-specific) |
662 | * |
663 | * This is called when the machine is rebooting or halting. We assume that the |
664 | * machine will be powered off, and the HC's internal state will be reset. |
665 | * Don't bother to free memory. |
666 | * |
667 | * This will only ever be called with the main usb_hcd (the USB3 roothub). |
668 | */ |
669 | void xhci_shutdown(struct usb_hcd *hcd) |
670 | { |
671 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
672 | |
673 | if (xhci->quirks & XHCI_SPURIOUS_REBOOT) |
674 | usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev)); |
675 | |
676 | /* Don't poll the roothubs after shutdown. */ |
677 | xhci_dbg(xhci, "%s: stopping usb%d port polling.\n" , |
678 | __func__, hcd->self.busnum); |
679 | clear_bit(HCD_FLAG_POLL_RH, addr: &hcd->flags); |
680 | del_timer_sync(timer: &hcd->rh_timer); |
681 | |
682 | if (xhci->shared_hcd) { |
683 | clear_bit(HCD_FLAG_POLL_RH, addr: &xhci->shared_hcd->flags); |
684 | del_timer_sync(timer: &xhci->shared_hcd->rh_timer); |
685 | } |
686 | |
687 | spin_lock_irq(lock: &xhci->lock); |
688 | xhci_halt(xhci); |
689 | |
690 | /* |
691 | * Workaround for spurious wakeps at shutdown with HSW, and for boot |
692 | * firmware delay in ADL-P PCH if port are left in U3 at shutdown |
693 | */ |
694 | if (xhci->quirks & XHCI_SPURIOUS_WAKEUP || |
695 | xhci->quirks & XHCI_RESET_TO_DEFAULT) |
696 | xhci_reset(xhci, XHCI_RESET_SHORT_USEC); |
697 | |
698 | spin_unlock_irq(lock: &xhci->lock); |
699 | |
700 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, |
701 | fmt: "xhci_shutdown completed - status = %x" , |
702 | readl(addr: &xhci->op_regs->status)); |
703 | } |
704 | EXPORT_SYMBOL_GPL(xhci_shutdown); |
705 | |
706 | #ifdef CONFIG_PM |
707 | static void xhci_save_registers(struct xhci_hcd *xhci) |
708 | { |
709 | struct xhci_interrupter *ir; |
710 | unsigned int i; |
711 | |
712 | xhci->s3.command = readl(addr: &xhci->op_regs->command); |
713 | xhci->s3.dev_nt = readl(addr: &xhci->op_regs->dev_notification); |
714 | xhci->s3.dcbaa_ptr = xhci_read_64(xhci, regs: &xhci->op_regs->dcbaa_ptr); |
715 | xhci->s3.config_reg = readl(addr: &xhci->op_regs->config_reg); |
716 | |
717 | /* save both primary and all secondary interrupters */ |
718 | /* fixme, shold we lock to prevent race with remove secondary interrupter? */ |
719 | for (i = 0; i < xhci->max_interrupters; i++) { |
720 | ir = xhci->interrupters[i]; |
721 | if (!ir) |
722 | continue; |
723 | |
724 | ir->s3_erst_size = readl(addr: &ir->ir_set->erst_size); |
725 | ir->s3_erst_base = xhci_read_64(xhci, regs: &ir->ir_set->erst_base); |
726 | ir->s3_erst_dequeue = xhci_read_64(xhci, regs: &ir->ir_set->erst_dequeue); |
727 | ir->s3_irq_pending = readl(addr: &ir->ir_set->irq_pending); |
728 | ir->s3_irq_control = readl(addr: &ir->ir_set->irq_control); |
729 | } |
730 | } |
731 | |
732 | static void xhci_restore_registers(struct xhci_hcd *xhci) |
733 | { |
734 | struct xhci_interrupter *ir; |
735 | unsigned int i; |
736 | |
737 | writel(val: xhci->s3.command, addr: &xhci->op_regs->command); |
738 | writel(val: xhci->s3.dev_nt, addr: &xhci->op_regs->dev_notification); |
739 | xhci_write_64(xhci, val: xhci->s3.dcbaa_ptr, regs: &xhci->op_regs->dcbaa_ptr); |
740 | writel(val: xhci->s3.config_reg, addr: &xhci->op_regs->config_reg); |
741 | |
742 | /* FIXME should we lock to protect against freeing of interrupters */ |
743 | for (i = 0; i < xhci->max_interrupters; i++) { |
744 | ir = xhci->interrupters[i]; |
745 | if (!ir) |
746 | continue; |
747 | |
748 | writel(val: ir->s3_erst_size, addr: &ir->ir_set->erst_size); |
749 | xhci_write_64(xhci, val: ir->s3_erst_base, regs: &ir->ir_set->erst_base); |
750 | xhci_write_64(xhci, val: ir->s3_erst_dequeue, regs: &ir->ir_set->erst_dequeue); |
751 | writel(val: ir->s3_irq_pending, addr: &ir->ir_set->irq_pending); |
752 | writel(val: ir->s3_irq_control, addr: &ir->ir_set->irq_control); |
753 | } |
754 | } |
755 | |
756 | static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) |
757 | { |
758 | u64 val_64; |
759 | |
760 | /* step 2: initialize command ring buffer */ |
761 | val_64 = xhci_read_64(xhci, regs: &xhci->op_regs->cmd_ring); |
762 | val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | |
763 | (xhci_trb_virt_to_dma(seg: xhci->cmd_ring->deq_seg, |
764 | trb: xhci->cmd_ring->dequeue) & |
765 | (u64) ~CMD_RING_RSVD_BITS) | |
766 | xhci->cmd_ring->cycle_state; |
767 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, |
768 | fmt: "// Setting command ring address to 0x%llx" , |
769 | (long unsigned long) val_64); |
770 | xhci_write_64(xhci, val: val_64, regs: &xhci->op_regs->cmd_ring); |
771 | } |
772 | |
773 | /* |
774 | * The whole command ring must be cleared to zero when we suspend the host. |
775 | * |
776 | * The host doesn't save the command ring pointer in the suspend well, so we |
777 | * need to re-program it on resume. Unfortunately, the pointer must be 64-byte |
778 | * aligned, because of the reserved bits in the command ring dequeue pointer |
779 | * register. Therefore, we can't just set the dequeue pointer back in the |
780 | * middle of the ring (TRBs are 16-byte aligned). |
781 | */ |
782 | static void xhci_clear_command_ring(struct xhci_hcd *xhci) |
783 | { |
784 | struct xhci_ring *ring; |
785 | struct xhci_segment *seg; |
786 | |
787 | ring = xhci->cmd_ring; |
788 | seg = ring->deq_seg; |
789 | do { |
790 | memset(seg->trbs, 0, |
791 | sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1)); |
792 | seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= |
793 | cpu_to_le32(~TRB_CYCLE); |
794 | seg = seg->next; |
795 | } while (seg != ring->deq_seg); |
796 | |
797 | xhci_initialize_ring_info(ring, cycle_state: 1); |
798 | /* |
799 | * Reset the hardware dequeue pointer. |
800 | * Yes, this will need to be re-written after resume, but we're paranoid |
801 | * and want to make sure the hardware doesn't access bogus memory |
802 | * because, say, the BIOS or an SMI started the host without changing |
803 | * the command ring pointers. |
804 | */ |
805 | xhci_set_cmd_ring_deq(xhci); |
806 | } |
807 | |
808 | /* |
809 | * Disable port wake bits if do_wakeup is not set. |
810 | * |
811 | * Also clear a possible internal port wake state left hanging for ports that |
812 | * detected termination but never successfully enumerated (trained to 0U). |
813 | * Internal wake causes immediate xHCI wake after suspend. PORT_CSC write done |
814 | * at enumeration clears this wake, force one here as well for unconnected ports |
815 | */ |
816 | |
817 | static void xhci_disable_hub_port_wake(struct xhci_hcd *xhci, |
818 | struct xhci_hub *rhub, |
819 | bool do_wakeup) |
820 | { |
821 | unsigned long flags; |
822 | u32 t1, t2, portsc; |
823 | int i; |
824 | |
825 | spin_lock_irqsave(&xhci->lock, flags); |
826 | |
827 | for (i = 0; i < rhub->num_ports; i++) { |
828 | portsc = readl(addr: rhub->ports[i]->addr); |
829 | t1 = xhci_port_state_to_neutral(state: portsc); |
830 | t2 = t1; |
831 | |
832 | /* clear wake bits if do_wake is not set */ |
833 | if (!do_wakeup) |
834 | t2 &= ~PORT_WAKE_BITS; |
835 | |
836 | /* Don't touch csc bit if connected or connect change is set */ |
837 | if (!(portsc & (PORT_CSC | PORT_CONNECT))) |
838 | t2 |= PORT_CSC; |
839 | |
840 | if (t1 != t2) { |
841 | writel(val: t2, addr: rhub->ports[i]->addr); |
842 | xhci_dbg(xhci, "config port %d-%d wake bits, portsc: 0x%x, write: 0x%x\n" , |
843 | rhub->hcd->self.busnum, i + 1, portsc, t2); |
844 | } |
845 | } |
846 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
847 | } |
848 | |
849 | static bool xhci_pending_portevent(struct xhci_hcd *xhci) |
850 | { |
851 | struct xhci_port **ports; |
852 | int port_index; |
853 | u32 status; |
854 | u32 portsc; |
855 | |
856 | status = readl(addr: &xhci->op_regs->status); |
857 | if (status & STS_EINT) |
858 | return true; |
859 | /* |
860 | * Checking STS_EINT is not enough as there is a lag between a change |
861 | * bit being set and the Port Status Change Event that it generated |
862 | * being written to the Event Ring. See note in xhci 1.1 section 4.19.2. |
863 | */ |
864 | |
865 | port_index = xhci->usb2_rhub.num_ports; |
866 | ports = xhci->usb2_rhub.ports; |
867 | while (port_index--) { |
868 | portsc = readl(addr: ports[port_index]->addr); |
869 | if (portsc & PORT_CHANGE_MASK || |
870 | (portsc & PORT_PLS_MASK) == XDEV_RESUME) |
871 | return true; |
872 | } |
873 | port_index = xhci->usb3_rhub.num_ports; |
874 | ports = xhci->usb3_rhub.ports; |
875 | while (port_index--) { |
876 | portsc = readl(addr: ports[port_index]->addr); |
877 | if (portsc & (PORT_CHANGE_MASK | PORT_CAS) || |
878 | (portsc & PORT_PLS_MASK) == XDEV_RESUME) |
879 | return true; |
880 | } |
881 | return false; |
882 | } |
883 | |
884 | /* |
885 | * Stop HC (not bus-specific) |
886 | * |
887 | * This is called when the machine transition into S3/S4 mode. |
888 | * |
889 | */ |
890 | int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) |
891 | { |
892 | int rc = 0; |
893 | unsigned int delay = XHCI_MAX_HALT_USEC * 2; |
894 | struct usb_hcd *hcd = xhci_to_hcd(xhci); |
895 | u32 command; |
896 | u32 res; |
897 | |
898 | if (!hcd->state) |
899 | return 0; |
900 | |
901 | if (hcd->state != HC_STATE_SUSPENDED || |
902 | (xhci->shared_hcd && xhci->shared_hcd->state != HC_STATE_SUSPENDED)) |
903 | return -EINVAL; |
904 | |
905 | /* Clear root port wake on bits if wakeup not allowed. */ |
906 | xhci_disable_hub_port_wake(xhci, rhub: &xhci->usb3_rhub, do_wakeup); |
907 | xhci_disable_hub_port_wake(xhci, rhub: &xhci->usb2_rhub, do_wakeup); |
908 | |
909 | if (!HCD_HW_ACCESSIBLE(hcd)) |
910 | return 0; |
911 | |
912 | xhci_dbc_suspend(xhci); |
913 | |
914 | /* Don't poll the roothubs on bus suspend. */ |
915 | xhci_dbg(xhci, "%s: stopping usb%d port polling.\n" , |
916 | __func__, hcd->self.busnum); |
917 | clear_bit(HCD_FLAG_POLL_RH, addr: &hcd->flags); |
918 | del_timer_sync(timer: &hcd->rh_timer); |
919 | if (xhci->shared_hcd) { |
920 | clear_bit(HCD_FLAG_POLL_RH, addr: &xhci->shared_hcd->flags); |
921 | del_timer_sync(timer: &xhci->shared_hcd->rh_timer); |
922 | } |
923 | |
924 | if (xhci->quirks & XHCI_SUSPEND_DELAY) |
925 | usleep_range(min: 1000, max: 1500); |
926 | |
927 | spin_lock_irq(lock: &xhci->lock); |
928 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, addr: &hcd->flags); |
929 | if (xhci->shared_hcd) |
930 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, addr: &xhci->shared_hcd->flags); |
931 | /* step 1: stop endpoint */ |
932 | /* skipped assuming that port suspend has done */ |
933 | |
934 | /* step 2: clear Run/Stop bit */ |
935 | command = readl(addr: &xhci->op_regs->command); |
936 | command &= ~CMD_RUN; |
937 | writel(val: command, addr: &xhci->op_regs->command); |
938 | |
939 | /* Some chips from Fresco Logic need an extraordinary delay */ |
940 | delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1; |
941 | |
942 | if (xhci_handshake(ptr: &xhci->op_regs->status, |
943 | STS_HALT, STS_HALT, timeout_us: delay)) { |
944 | xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n" ); |
945 | spin_unlock_irq(lock: &xhci->lock); |
946 | return -ETIMEDOUT; |
947 | } |
948 | xhci_clear_command_ring(xhci); |
949 | |
950 | /* step 3: save registers */ |
951 | xhci_save_registers(xhci); |
952 | |
953 | /* step 4: set CSS flag */ |
954 | command = readl(addr: &xhci->op_regs->command); |
955 | command |= CMD_CSS; |
956 | writel(val: command, addr: &xhci->op_regs->command); |
957 | xhci->broken_suspend = 0; |
958 | if (xhci_handshake(ptr: &xhci->op_regs->status, |
959 | STS_SAVE, done: 0, timeout_us: 20 * 1000)) { |
960 | /* |
961 | * AMD SNPS xHC 3.0 occasionally does not clear the |
962 | * SSS bit of USBSTS and when driver tries to poll |
963 | * to see if the xHC clears BIT(8) which never happens |
964 | * and driver assumes that controller is not responding |
965 | * and times out. To workaround this, its good to check |
966 | * if SRE and HCE bits are not set (as per xhci |
967 | * Section 5.4.2) and bypass the timeout. |
968 | */ |
969 | res = readl(addr: &xhci->op_regs->status); |
970 | if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) && |
971 | (((res & STS_SRE) == 0) && |
972 | ((res & STS_HCE) == 0))) { |
973 | xhci->broken_suspend = 1; |
974 | } else { |
975 | xhci_warn(xhci, "WARN: xHC save state timeout\n" ); |
976 | spin_unlock_irq(lock: &xhci->lock); |
977 | return -ETIMEDOUT; |
978 | } |
979 | } |
980 | spin_unlock_irq(lock: &xhci->lock); |
981 | |
982 | /* |
983 | * Deleting Compliance Mode Recovery Timer because the xHCI Host |
984 | * is about to be suspended. |
985 | */ |
986 | if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && |
987 | (!(xhci_all_ports_seen_u0(xhci)))) { |
988 | del_timer_sync(timer: &xhci->comp_mode_recovery_timer); |
989 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
990 | fmt: "%s: compliance mode recovery timer deleted" , |
991 | __func__); |
992 | } |
993 | |
994 | return rc; |
995 | } |
996 | EXPORT_SYMBOL_GPL(xhci_suspend); |
997 | |
998 | /* |
999 | * start xHC (not bus-specific) |
1000 | * |
1001 | * This is called when the machine transition from S3/S4 mode. |
1002 | * |
1003 | */ |
1004 | int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg) |
1005 | { |
1006 | bool hibernated = (msg.event == PM_EVENT_RESTORE); |
1007 | u32 command, temp = 0; |
1008 | struct usb_hcd *hcd = xhci_to_hcd(xhci); |
1009 | int retval = 0; |
1010 | bool comp_timer_running = false; |
1011 | bool pending_portevent = false; |
1012 | bool suspended_usb3_devs = false; |
1013 | bool reinit_xhc = false; |
1014 | |
1015 | if (!hcd->state) |
1016 | return 0; |
1017 | |
1018 | /* Wait a bit if either of the roothubs need to settle from the |
1019 | * transition into bus suspend. |
1020 | */ |
1021 | |
1022 | if (time_before(jiffies, xhci->usb2_rhub.bus_state.next_statechange) || |
1023 | time_before(jiffies, xhci->usb3_rhub.bus_state.next_statechange)) |
1024 | msleep(msecs: 100); |
1025 | |
1026 | set_bit(HCD_FLAG_HW_ACCESSIBLE, addr: &hcd->flags); |
1027 | if (xhci->shared_hcd) |
1028 | set_bit(HCD_FLAG_HW_ACCESSIBLE, addr: &xhci->shared_hcd->flags); |
1029 | |
1030 | spin_lock_irq(lock: &xhci->lock); |
1031 | |
1032 | if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend) |
1033 | reinit_xhc = true; |
1034 | |
1035 | if (!reinit_xhc) { |
1036 | /* |
1037 | * Some controllers might lose power during suspend, so wait |
1038 | * for controller not ready bit to clear, just as in xHC init. |
1039 | */ |
1040 | retval = xhci_handshake(ptr: &xhci->op_regs->status, |
1041 | STS_CNR, done: 0, timeout_us: 10 * 1000 * 1000); |
1042 | if (retval) { |
1043 | xhci_warn(xhci, "Controller not ready at resume %d\n" , |
1044 | retval); |
1045 | spin_unlock_irq(lock: &xhci->lock); |
1046 | return retval; |
1047 | } |
1048 | /* step 1: restore register */ |
1049 | xhci_restore_registers(xhci); |
1050 | /* step 2: initialize command ring buffer */ |
1051 | xhci_set_cmd_ring_deq(xhci); |
1052 | /* step 3: restore state and start state*/ |
1053 | /* step 3: set CRS flag */ |
1054 | command = readl(addr: &xhci->op_regs->command); |
1055 | command |= CMD_CRS; |
1056 | writel(val: command, addr: &xhci->op_regs->command); |
1057 | /* |
1058 | * Some controllers take up to 55+ ms to complete the controller |
1059 | * restore so setting the timeout to 100ms. Xhci specification |
1060 | * doesn't mention any timeout value. |
1061 | */ |
1062 | if (xhci_handshake(ptr: &xhci->op_regs->status, |
1063 | STS_RESTORE, done: 0, timeout_us: 100 * 1000)) { |
1064 | xhci_warn(xhci, "WARN: xHC restore state timeout\n" ); |
1065 | spin_unlock_irq(lock: &xhci->lock); |
1066 | return -ETIMEDOUT; |
1067 | } |
1068 | } |
1069 | |
1070 | temp = readl(addr: &xhci->op_regs->status); |
1071 | |
1072 | /* re-initialize the HC on Restore Error, or Host Controller Error */ |
1073 | if ((temp & (STS_SRE | STS_HCE)) && |
1074 | !(xhci->xhc_state & XHCI_STATE_REMOVING)) { |
1075 | reinit_xhc = true; |
1076 | if (!xhci->broken_suspend) |
1077 | xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n" , temp); |
1078 | } |
1079 | |
1080 | if (reinit_xhc) { |
1081 | if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && |
1082 | !(xhci_all_ports_seen_u0(xhci))) { |
1083 | del_timer_sync(timer: &xhci->comp_mode_recovery_timer); |
1084 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
1085 | fmt: "Compliance Mode Recovery Timer deleted!" ); |
1086 | } |
1087 | |
1088 | /* Let the USB core know _both_ roothubs lost power. */ |
1089 | usb_root_hub_lost_power(rhdev: xhci->main_hcd->self.root_hub); |
1090 | if (xhci->shared_hcd) |
1091 | usb_root_hub_lost_power(rhdev: xhci->shared_hcd->self.root_hub); |
1092 | |
1093 | xhci_dbg(xhci, "Stop HCD\n" ); |
1094 | xhci_halt(xhci); |
1095 | xhci_zero_64b_regs(xhci); |
1096 | retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC); |
1097 | spin_unlock_irq(lock: &xhci->lock); |
1098 | if (retval) |
1099 | return retval; |
1100 | |
1101 | xhci_dbg(xhci, "// Disabling event ring interrupts\n" ); |
1102 | temp = readl(addr: &xhci->op_regs->status); |
1103 | writel(val: (temp & ~0x1fff) | STS_EINT, addr: &xhci->op_regs->status); |
1104 | xhci_disable_interrupter(ir: xhci->interrupters[0]); |
1105 | |
1106 | xhci_dbg(xhci, "cleaning up memory\n" ); |
1107 | xhci_mem_cleanup(xhci); |
1108 | xhci_debugfs_exit(xhci); |
1109 | xhci_dbg(xhci, "xhci_stop completed - status = %x\n" , |
1110 | readl(&xhci->op_regs->status)); |
1111 | |
1112 | /* USB core calls the PCI reinit and start functions twice: |
1113 | * first with the primary HCD, and then with the secondary HCD. |
1114 | * If we don't do the same, the host will never be started. |
1115 | */ |
1116 | xhci_dbg(xhci, "Initialize the xhci_hcd\n" ); |
1117 | retval = xhci_init(hcd); |
1118 | if (retval) |
1119 | return retval; |
1120 | comp_timer_running = true; |
1121 | |
1122 | xhci_dbg(xhci, "Start the primary HCD\n" ); |
1123 | retval = xhci_run(hcd); |
1124 | if (!retval && xhci->shared_hcd) { |
1125 | xhci_dbg(xhci, "Start the secondary HCD\n" ); |
1126 | retval = xhci_run(xhci->shared_hcd); |
1127 | } |
1128 | |
1129 | hcd->state = HC_STATE_SUSPENDED; |
1130 | if (xhci->shared_hcd) |
1131 | xhci->shared_hcd->state = HC_STATE_SUSPENDED; |
1132 | goto done; |
1133 | } |
1134 | |
1135 | /* step 4: set Run/Stop bit */ |
1136 | command = readl(addr: &xhci->op_regs->command); |
1137 | command |= CMD_RUN; |
1138 | writel(val: command, addr: &xhci->op_regs->command); |
1139 | xhci_handshake(ptr: &xhci->op_regs->status, STS_HALT, |
1140 | done: 0, timeout_us: 250 * 1000); |
1141 | |
1142 | /* step 5: walk topology and initialize portsc, |
1143 | * portpmsc and portli |
1144 | */ |
1145 | /* this is done in bus_resume */ |
1146 | |
1147 | /* step 6: restart each of the previously |
1148 | * Running endpoints by ringing their doorbells |
1149 | */ |
1150 | |
1151 | spin_unlock_irq(lock: &xhci->lock); |
1152 | |
1153 | xhci_dbc_resume(xhci); |
1154 | |
1155 | done: |
1156 | if (retval == 0) { |
1157 | /* |
1158 | * Resume roothubs only if there are pending events. |
1159 | * USB 3 devices resend U3 LFPS wake after a 100ms delay if |
1160 | * the first wake signalling failed, give it that chance if |
1161 | * there are suspended USB 3 devices. |
1162 | */ |
1163 | if (xhci->usb3_rhub.bus_state.suspended_ports || |
1164 | xhci->usb3_rhub.bus_state.bus_suspended) |
1165 | suspended_usb3_devs = true; |
1166 | |
1167 | pending_portevent = xhci_pending_portevent(xhci); |
1168 | |
1169 | if (suspended_usb3_devs && !pending_portevent && |
1170 | msg.event == PM_EVENT_AUTO_RESUME) { |
1171 | msleep(msecs: 120); |
1172 | pending_portevent = xhci_pending_portevent(xhci); |
1173 | } |
1174 | |
1175 | if (pending_portevent) { |
1176 | if (xhci->shared_hcd) |
1177 | usb_hcd_resume_root_hub(hcd: xhci->shared_hcd); |
1178 | usb_hcd_resume_root_hub(hcd); |
1179 | } |
1180 | } |
1181 | /* |
1182 | * If system is subject to the Quirk, Compliance Mode Timer needs to |
1183 | * be re-initialized Always after a system resume. Ports are subject |
1184 | * to suffer the Compliance Mode issue again. It doesn't matter if |
1185 | * ports have entered previously to U0 before system's suspension. |
1186 | */ |
1187 | if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running) |
1188 | compliance_mode_recovery_timer_init(xhci); |
1189 | |
1190 | if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL) |
1191 | usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller)); |
1192 | |
1193 | /* Re-enable port polling. */ |
1194 | xhci_dbg(xhci, "%s: starting usb%d port polling.\n" , |
1195 | __func__, hcd->self.busnum); |
1196 | if (xhci->shared_hcd) { |
1197 | set_bit(HCD_FLAG_POLL_RH, addr: &xhci->shared_hcd->flags); |
1198 | usb_hcd_poll_rh_status(hcd: xhci->shared_hcd); |
1199 | } |
1200 | set_bit(HCD_FLAG_POLL_RH, addr: &hcd->flags); |
1201 | usb_hcd_poll_rh_status(hcd); |
1202 | |
1203 | return retval; |
1204 | } |
1205 | EXPORT_SYMBOL_GPL(xhci_resume); |
1206 | #endif /* CONFIG_PM */ |
1207 | |
1208 | /*-------------------------------------------------------------------------*/ |
1209 | |
1210 | static int xhci_map_temp_buffer(struct usb_hcd *hcd, struct urb *urb) |
1211 | { |
1212 | void *temp; |
1213 | int ret = 0; |
1214 | unsigned int buf_len; |
1215 | enum dma_data_direction dir; |
1216 | |
1217 | dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; |
1218 | buf_len = urb->transfer_buffer_length; |
1219 | |
1220 | temp = kzalloc_node(size: buf_len, GFP_ATOMIC, |
1221 | node: dev_to_node(dev: hcd->self.sysdev)); |
1222 | if (!temp) |
1223 | return -ENOMEM; |
1224 | |
1225 | if (usb_urb_dir_out(urb)) |
1226 | sg_pcopy_to_buffer(sgl: urb->sg, nents: urb->num_sgs, |
1227 | buf: temp, buflen: buf_len, skip: 0); |
1228 | |
1229 | urb->transfer_buffer = temp; |
1230 | urb->transfer_dma = dma_map_single(hcd->self.sysdev, |
1231 | urb->transfer_buffer, |
1232 | urb->transfer_buffer_length, |
1233 | dir); |
1234 | |
1235 | if (dma_mapping_error(dev: hcd->self.sysdev, |
1236 | dma_addr: urb->transfer_dma)) { |
1237 | ret = -EAGAIN; |
1238 | kfree(objp: temp); |
1239 | } else { |
1240 | urb->transfer_flags |= URB_DMA_MAP_SINGLE; |
1241 | } |
1242 | |
1243 | return ret; |
1244 | } |
1245 | |
1246 | static bool xhci_urb_temp_buffer_required(struct usb_hcd *hcd, |
1247 | struct urb *urb) |
1248 | { |
1249 | bool ret = false; |
1250 | unsigned int i; |
1251 | unsigned int len = 0; |
1252 | unsigned int trb_size; |
1253 | unsigned int max_pkt; |
1254 | struct scatterlist *sg; |
1255 | struct scatterlist *tail_sg; |
1256 | |
1257 | tail_sg = urb->sg; |
1258 | max_pkt = usb_endpoint_maxp(epd: &urb->ep->desc); |
1259 | |
1260 | if (!urb->num_sgs) |
1261 | return ret; |
1262 | |
1263 | if (urb->dev->speed >= USB_SPEED_SUPER) |
1264 | trb_size = TRB_CACHE_SIZE_SS; |
1265 | else |
1266 | trb_size = TRB_CACHE_SIZE_HS; |
1267 | |
1268 | if (urb->transfer_buffer_length != 0 && |
1269 | !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) { |
1270 | for_each_sg(urb->sg, sg, urb->num_sgs, i) { |
1271 | len = len + sg->length; |
1272 | if (i > trb_size - 2) { |
1273 | len = len - tail_sg->length; |
1274 | if (len < max_pkt) { |
1275 | ret = true; |
1276 | break; |
1277 | } |
1278 | |
1279 | tail_sg = sg_next(tail_sg); |
1280 | } |
1281 | } |
1282 | } |
1283 | return ret; |
1284 | } |
1285 | |
1286 | static void xhci_unmap_temp_buf(struct usb_hcd *hcd, struct urb *urb) |
1287 | { |
1288 | unsigned int len; |
1289 | unsigned int buf_len; |
1290 | enum dma_data_direction dir; |
1291 | |
1292 | dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; |
1293 | |
1294 | buf_len = urb->transfer_buffer_length; |
1295 | |
1296 | if (IS_ENABLED(CONFIG_HAS_DMA) && |
1297 | (urb->transfer_flags & URB_DMA_MAP_SINGLE)) |
1298 | dma_unmap_single(hcd->self.sysdev, |
1299 | urb->transfer_dma, |
1300 | urb->transfer_buffer_length, |
1301 | dir); |
1302 | |
1303 | if (usb_urb_dir_in(urb)) { |
1304 | len = sg_pcopy_from_buffer(sgl: urb->sg, nents: urb->num_sgs, |
1305 | buf: urb->transfer_buffer, |
1306 | buflen: buf_len, |
1307 | skip: 0); |
1308 | if (len != buf_len) { |
1309 | xhci_dbg(hcd_to_xhci(hcd), |
1310 | "Copy from tmp buf to urb sg list failed\n" ); |
1311 | urb->actual_length = len; |
1312 | } |
1313 | } |
1314 | urb->transfer_flags &= ~URB_DMA_MAP_SINGLE; |
1315 | kfree(objp: urb->transfer_buffer); |
1316 | urb->transfer_buffer = NULL; |
1317 | } |
1318 | |
1319 | /* |
1320 | * Bypass the DMA mapping if URB is suitable for Immediate Transfer (IDT), |
1321 | * we'll copy the actual data into the TRB address register. This is limited to |
1322 | * transfers up to 8 bytes on output endpoints of any kind with wMaxPacketSize |
1323 | * >= 8 bytes. If suitable for IDT only one Transfer TRB per TD is allowed. |
1324 | */ |
1325 | static int xhci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, |
1326 | gfp_t mem_flags) |
1327 | { |
1328 | struct xhci_hcd *xhci; |
1329 | |
1330 | xhci = hcd_to_xhci(hcd); |
1331 | |
1332 | if (xhci_urb_suitable_for_idt(urb)) |
1333 | return 0; |
1334 | |
1335 | if (xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) { |
1336 | if (xhci_urb_temp_buffer_required(hcd, urb)) |
1337 | return xhci_map_temp_buffer(hcd, urb); |
1338 | } |
1339 | return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags); |
1340 | } |
1341 | |
1342 | static void xhci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) |
1343 | { |
1344 | struct xhci_hcd *xhci; |
1345 | bool unmap_temp_buf = false; |
1346 | |
1347 | xhci = hcd_to_xhci(hcd); |
1348 | |
1349 | if (urb->num_sgs && (urb->transfer_flags & URB_DMA_MAP_SINGLE)) |
1350 | unmap_temp_buf = true; |
1351 | |
1352 | if ((xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) && unmap_temp_buf) |
1353 | xhci_unmap_temp_buf(hcd, urb); |
1354 | else |
1355 | usb_hcd_unmap_urb_for_dma(hcd, urb); |
1356 | } |
1357 | |
1358 | /** |
1359 | * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and |
1360 | * HCDs. Find the index for an endpoint given its descriptor. Use the return |
1361 | * value to right shift 1 for the bitmask. |
1362 | * |
1363 | * Index = (epnum * 2) + direction - 1, |
1364 | * where direction = 0 for OUT, 1 for IN. |
1365 | * For control endpoints, the IN index is used (OUT index is unused), so |
1366 | * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2) |
1367 | */ |
1368 | unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc) |
1369 | { |
1370 | unsigned int index; |
1371 | if (usb_endpoint_xfer_control(epd: desc)) |
1372 | index = (unsigned int) (usb_endpoint_num(epd: desc)*2); |
1373 | else |
1374 | index = (unsigned int) (usb_endpoint_num(epd: desc)*2) + |
1375 | (usb_endpoint_dir_in(epd: desc) ? 1 : 0) - 1; |
1376 | return index; |
1377 | } |
1378 | EXPORT_SYMBOL_GPL(xhci_get_endpoint_index); |
1379 | |
1380 | /* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint |
1381 | * address from the XHCI endpoint index. |
1382 | */ |
1383 | static unsigned int xhci_get_endpoint_address(unsigned int ep_index) |
1384 | { |
1385 | unsigned int number = DIV_ROUND_UP(ep_index, 2); |
1386 | unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN; |
1387 | return direction | number; |
1388 | } |
1389 | |
1390 | /* Find the flag for this endpoint (for use in the control context). Use the |
1391 | * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is |
1392 | * bit 1, etc. |
1393 | */ |
1394 | static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc) |
1395 | { |
1396 | return 1 << (xhci_get_endpoint_index(desc) + 1); |
1397 | } |
1398 | |
1399 | /* Compute the last valid endpoint context index. Basically, this is the |
1400 | * endpoint index plus one. For slot contexts with more than valid endpoint, |
1401 | * we find the most significant bit set in the added contexts flags. |
1402 | * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000 |
1403 | * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one. |
1404 | */ |
1405 | unsigned int xhci_last_valid_endpoint(u32 added_ctxs) |
1406 | { |
1407 | return fls(x: added_ctxs) - 1; |
1408 | } |
1409 | |
1410 | /* Returns 1 if the arguments are OK; |
1411 | * returns 0 this is a root hub; returns -EINVAL for NULL pointers. |
1412 | */ |
1413 | static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, |
1414 | struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev, |
1415 | const char *func) { |
1416 | struct xhci_hcd *xhci; |
1417 | struct xhci_virt_device *virt_dev; |
1418 | |
1419 | if (!hcd || (check_ep && !ep) || !udev) { |
1420 | pr_debug("xHCI %s called with invalid args\n" , func); |
1421 | return -EINVAL; |
1422 | } |
1423 | if (!udev->parent) { |
1424 | pr_debug("xHCI %s called for root hub\n" , func); |
1425 | return 0; |
1426 | } |
1427 | |
1428 | xhci = hcd_to_xhci(hcd); |
1429 | if (check_virt_dev) { |
1430 | if (!udev->slot_id || !xhci->devs[udev->slot_id]) { |
1431 | xhci_dbg(xhci, "xHCI %s called with unaddressed device\n" , |
1432 | func); |
1433 | return -EINVAL; |
1434 | } |
1435 | |
1436 | virt_dev = xhci->devs[udev->slot_id]; |
1437 | if (virt_dev->udev != udev) { |
1438 | xhci_dbg(xhci, "xHCI %s called with udev and " |
1439 | "virt_dev does not match\n" , func); |
1440 | return -EINVAL; |
1441 | } |
1442 | } |
1443 | |
1444 | if (xhci->xhc_state & XHCI_STATE_HALTED) |
1445 | return -ENODEV; |
1446 | |
1447 | return 1; |
1448 | } |
1449 | |
1450 | static int xhci_configure_endpoint(struct xhci_hcd *xhci, |
1451 | struct usb_device *udev, struct xhci_command *command, |
1452 | bool ctx_change, bool must_succeed); |
1453 | |
1454 | /* |
1455 | * Full speed devices may have a max packet size greater than 8 bytes, but the |
1456 | * USB core doesn't know that until it reads the first 8 bytes of the |
1457 | * descriptor. If the usb_device's max packet size changes after that point, |
1458 | * we need to issue an evaluate context command and wait on it. |
1459 | */ |
1460 | static int xhci_check_ep0_maxpacket(struct xhci_hcd *xhci, struct xhci_virt_device *vdev) |
1461 | { |
1462 | struct xhci_input_control_ctx *ctrl_ctx; |
1463 | struct xhci_ep_ctx *ep_ctx; |
1464 | struct xhci_command *command; |
1465 | int max_packet_size; |
1466 | int hw_max_packet_size; |
1467 | int ret = 0; |
1468 | |
1469 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: vdev->out_ctx, ep_index: 0); |
1470 | hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2)); |
1471 | max_packet_size = usb_endpoint_maxp(epd: &vdev->udev->ep0.desc); |
1472 | |
1473 | if (hw_max_packet_size == max_packet_size) |
1474 | return 0; |
1475 | |
1476 | switch (max_packet_size) { |
1477 | case 8: case 16: case 32: case 64: case 9: |
1478 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_context_change, |
1479 | fmt: "Max Packet Size for ep 0 changed." ); |
1480 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_context_change, |
1481 | fmt: "Max packet size in usb_device = %d" , |
1482 | max_packet_size); |
1483 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_context_change, |
1484 | fmt: "Max packet size in xHCI HW = %d" , |
1485 | hw_max_packet_size); |
1486 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_context_change, |
1487 | fmt: "Issuing evaluate context command." ); |
1488 | |
1489 | command = xhci_alloc_command(xhci, allocate_completion: true, GFP_KERNEL); |
1490 | if (!command) |
1491 | return -ENOMEM; |
1492 | |
1493 | command->in_ctx = vdev->in_ctx; |
1494 | ctrl_ctx = xhci_get_input_control_ctx(ctx: command->in_ctx); |
1495 | if (!ctrl_ctx) { |
1496 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n" , |
1497 | __func__); |
1498 | ret = -ENOMEM; |
1499 | break; |
1500 | } |
1501 | /* Set up the modified control endpoint 0 */ |
1502 | xhci_endpoint_copy(xhci, in_ctx: vdev->in_ctx, out_ctx: vdev->out_ctx, ep_index: 0); |
1503 | |
1504 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: command->in_ctx, ep_index: 0); |
1505 | ep_ctx->ep_info &= cpu_to_le32(~EP_STATE_MASK);/* must clear */ |
1506 | ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK); |
1507 | ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size)); |
1508 | |
1509 | ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG); |
1510 | ctrl_ctx->drop_flags = 0; |
1511 | |
1512 | ret = xhci_configure_endpoint(xhci, udev: vdev->udev, command, |
1513 | ctx_change: true, must_succeed: false); |
1514 | /* Clean up the input context for later use by bandwidth functions */ |
1515 | ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG); |
1516 | break; |
1517 | default: |
1518 | dev_dbg(&vdev->udev->dev, "incorrect max packet size %d for ep0\n" , |
1519 | max_packet_size); |
1520 | return -EINVAL; |
1521 | } |
1522 | |
1523 | kfree(objp: command->completion); |
1524 | kfree(objp: command); |
1525 | |
1526 | return ret; |
1527 | } |
1528 | |
1529 | /* |
1530 | * non-error returns are a promise to giveback() the urb later |
1531 | * we drop ownership so next owner (or urb unlink) can get it |
1532 | */ |
1533 | static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) |
1534 | { |
1535 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
1536 | unsigned long flags; |
1537 | int ret = 0; |
1538 | unsigned int slot_id, ep_index; |
1539 | unsigned int *ep_state; |
1540 | struct urb_priv *urb_priv; |
1541 | int num_tds; |
1542 | |
1543 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); |
1544 | |
1545 | if (usb_endpoint_xfer_isoc(epd: &urb->ep->desc)) |
1546 | num_tds = urb->number_of_packets; |
1547 | else if (usb_endpoint_is_bulk_out(epd: &urb->ep->desc) && |
1548 | urb->transfer_buffer_length > 0 && |
1549 | urb->transfer_flags & URB_ZERO_PACKET && |
1550 | !(urb->transfer_buffer_length % usb_endpoint_maxp(epd: &urb->ep->desc))) |
1551 | num_tds = 2; |
1552 | else |
1553 | num_tds = 1; |
1554 | |
1555 | urb_priv = kzalloc(struct_size(urb_priv, td, num_tds), flags: mem_flags); |
1556 | if (!urb_priv) |
1557 | return -ENOMEM; |
1558 | |
1559 | urb_priv->num_tds = num_tds; |
1560 | urb_priv->num_tds_done = 0; |
1561 | urb->hcpriv = urb_priv; |
1562 | |
1563 | trace_xhci_urb_enqueue(urb); |
1564 | |
1565 | spin_lock_irqsave(&xhci->lock, flags); |
1566 | |
1567 | ret = xhci_check_args(hcd, udev: urb->dev, ep: urb->ep, |
1568 | check_ep: true, check_virt_dev: true, func: __func__); |
1569 | if (ret <= 0) { |
1570 | ret = ret ? ret : -EINVAL; |
1571 | goto free_priv; |
1572 | } |
1573 | |
1574 | slot_id = urb->dev->slot_id; |
1575 | |
1576 | if (!HCD_HW_ACCESSIBLE(hcd)) { |
1577 | ret = -ESHUTDOWN; |
1578 | goto free_priv; |
1579 | } |
1580 | |
1581 | if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) { |
1582 | xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n" ); |
1583 | ret = -ENODEV; |
1584 | goto free_priv; |
1585 | } |
1586 | |
1587 | if (xhci->xhc_state & XHCI_STATE_DYING) { |
1588 | xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n" , |
1589 | urb->ep->desc.bEndpointAddress, urb); |
1590 | ret = -ESHUTDOWN; |
1591 | goto free_priv; |
1592 | } |
1593 | |
1594 | ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state; |
1595 | |
1596 | if (*ep_state & (EP_GETTING_STREAMS | EP_GETTING_NO_STREAMS)) { |
1597 | xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n" , |
1598 | *ep_state); |
1599 | ret = -EINVAL; |
1600 | goto free_priv; |
1601 | } |
1602 | if (*ep_state & EP_SOFT_CLEAR_TOGGLE) { |
1603 | xhci_warn(xhci, "Can't enqueue URB while manually clearing toggle\n" ); |
1604 | ret = -EINVAL; |
1605 | goto free_priv; |
1606 | } |
1607 | |
1608 | switch (usb_endpoint_type(epd: &urb->ep->desc)) { |
1609 | |
1610 | case USB_ENDPOINT_XFER_CONTROL: |
1611 | ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, |
1612 | slot_id, ep_index); |
1613 | break; |
1614 | case USB_ENDPOINT_XFER_BULK: |
1615 | ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, |
1616 | slot_id, ep_index); |
1617 | break; |
1618 | case USB_ENDPOINT_XFER_INT: |
1619 | ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, |
1620 | slot_id, ep_index); |
1621 | break; |
1622 | case USB_ENDPOINT_XFER_ISOC: |
1623 | ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, |
1624 | slot_id, ep_index); |
1625 | } |
1626 | |
1627 | if (ret) { |
1628 | free_priv: |
1629 | xhci_urb_free_priv(urb_priv); |
1630 | urb->hcpriv = NULL; |
1631 | } |
1632 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
1633 | return ret; |
1634 | } |
1635 | |
1636 | /* |
1637 | * Remove the URB's TD from the endpoint ring. This may cause the HC to stop |
1638 | * USB transfers, potentially stopping in the middle of a TRB buffer. The HC |
1639 | * should pick up where it left off in the TD, unless a Set Transfer Ring |
1640 | * Dequeue Pointer is issued. |
1641 | * |
1642 | * The TRBs that make up the buffers for the canceled URB will be "removed" from |
1643 | * the ring. Since the ring is a contiguous structure, they can't be physically |
1644 | * removed. Instead, there are two options: |
1645 | * |
1646 | * 1) If the HC is in the middle of processing the URB to be canceled, we |
1647 | * simply move the ring's dequeue pointer past those TRBs using the Set |
1648 | * Transfer Ring Dequeue Pointer command. This will be the common case, |
1649 | * when drivers timeout on the last submitted URB and attempt to cancel. |
1650 | * |
1651 | * 2) If the HC is in the middle of a different TD, we turn the TRBs into a |
1652 | * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The |
1653 | * HC will need to invalidate the any TRBs it has cached after the stop |
1654 | * endpoint command, as noted in the xHCI 0.95 errata. |
1655 | * |
1656 | * 3) The TD may have completed by the time the Stop Endpoint Command |
1657 | * completes, so software needs to handle that case too. |
1658 | * |
1659 | * This function should protect against the TD enqueueing code ringing the |
1660 | * doorbell while this code is waiting for a Stop Endpoint command to complete. |
1661 | * It also needs to account for multiple cancellations on happening at the same |
1662 | * time for the same endpoint. |
1663 | * |
1664 | * Note that this function can be called in any context, or so says |
1665 | * usb_hcd_unlink_urb() |
1666 | */ |
1667 | static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) |
1668 | { |
1669 | unsigned long flags; |
1670 | int ret, i; |
1671 | u32 temp; |
1672 | struct xhci_hcd *xhci; |
1673 | struct urb_priv *urb_priv; |
1674 | struct xhci_td *td; |
1675 | unsigned int ep_index; |
1676 | struct xhci_ring *ep_ring; |
1677 | struct xhci_virt_ep *ep; |
1678 | struct xhci_command *command; |
1679 | struct xhci_virt_device *vdev; |
1680 | |
1681 | xhci = hcd_to_xhci(hcd); |
1682 | spin_lock_irqsave(&xhci->lock, flags); |
1683 | |
1684 | trace_xhci_urb_dequeue(urb); |
1685 | |
1686 | /* Make sure the URB hasn't completed or been unlinked already */ |
1687 | ret = usb_hcd_check_unlink_urb(hcd, urb, status); |
1688 | if (ret) |
1689 | goto done; |
1690 | |
1691 | /* give back URB now if we can't queue it for cancel */ |
1692 | vdev = xhci->devs[urb->dev->slot_id]; |
1693 | urb_priv = urb->hcpriv; |
1694 | if (!vdev || !urb_priv) |
1695 | goto err_giveback; |
1696 | |
1697 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); |
1698 | ep = &vdev->eps[ep_index]; |
1699 | ep_ring = xhci_urb_to_transfer_ring(xhci, urb); |
1700 | if (!ep || !ep_ring) |
1701 | goto err_giveback; |
1702 | |
1703 | /* If xHC is dead take it down and return ALL URBs in xhci_hc_died() */ |
1704 | temp = readl(addr: &xhci->op_regs->status); |
1705 | if (temp == ~(u32)0 || xhci->xhc_state & XHCI_STATE_DYING) { |
1706 | xhci_hc_died(xhci); |
1707 | goto done; |
1708 | } |
1709 | |
1710 | /* |
1711 | * check ring is not re-allocated since URB was enqueued. If it is, then |
1712 | * make sure none of the ring related pointers in this URB private data |
1713 | * are touched, such as td_list, otherwise we overwrite freed data |
1714 | */ |
1715 | if (!td_on_ring(td: &urb_priv->td[0], ring: ep_ring)) { |
1716 | xhci_err(xhci, "Canceled URB td not found on endpoint ring" ); |
1717 | for (i = urb_priv->num_tds_done; i < urb_priv->num_tds; i++) { |
1718 | td = &urb_priv->td[i]; |
1719 | if (!list_empty(head: &td->cancelled_td_list)) |
1720 | list_del_init(entry: &td->cancelled_td_list); |
1721 | } |
1722 | goto err_giveback; |
1723 | } |
1724 | |
1725 | if (xhci->xhc_state & XHCI_STATE_HALTED) { |
1726 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_cancel_urb, |
1727 | fmt: "HC halted, freeing TD manually." ); |
1728 | for (i = urb_priv->num_tds_done; |
1729 | i < urb_priv->num_tds; |
1730 | i++) { |
1731 | td = &urb_priv->td[i]; |
1732 | if (!list_empty(head: &td->td_list)) |
1733 | list_del_init(entry: &td->td_list); |
1734 | if (!list_empty(head: &td->cancelled_td_list)) |
1735 | list_del_init(entry: &td->cancelled_td_list); |
1736 | } |
1737 | goto err_giveback; |
1738 | } |
1739 | |
1740 | i = urb_priv->num_tds_done; |
1741 | if (i < urb_priv->num_tds) |
1742 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_cancel_urb, |
1743 | fmt: "Cancel URB %p, dev %s, ep 0x%x, " |
1744 | "starting at offset 0x%llx" , |
1745 | urb, urb->dev->devpath, |
1746 | urb->ep->desc.bEndpointAddress, |
1747 | (unsigned long long) xhci_trb_virt_to_dma( |
1748 | seg: urb_priv->td[i].start_seg, |
1749 | trb: urb_priv->td[i].first_trb)); |
1750 | |
1751 | for (; i < urb_priv->num_tds; i++) { |
1752 | td = &urb_priv->td[i]; |
1753 | /* TD can already be on cancelled list if ep halted on it */ |
1754 | if (list_empty(head: &td->cancelled_td_list)) { |
1755 | td->cancel_status = TD_DIRTY; |
1756 | list_add_tail(new: &td->cancelled_td_list, |
1757 | head: &ep->cancelled_td_list); |
1758 | } |
1759 | } |
1760 | |
1761 | /* Queue a stop endpoint command, but only if this is |
1762 | * the first cancellation to be handled. |
1763 | */ |
1764 | if (!(ep->ep_state & EP_STOP_CMD_PENDING)) { |
1765 | command = xhci_alloc_command(xhci, allocate_completion: false, GFP_ATOMIC); |
1766 | if (!command) { |
1767 | ret = -ENOMEM; |
1768 | goto done; |
1769 | } |
1770 | ep->ep_state |= EP_STOP_CMD_PENDING; |
1771 | xhci_queue_stop_endpoint(xhci, cmd: command, slot_id: urb->dev->slot_id, |
1772 | ep_index, suspend: 0); |
1773 | xhci_ring_cmd_db(xhci); |
1774 | } |
1775 | done: |
1776 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
1777 | return ret; |
1778 | |
1779 | err_giveback: |
1780 | if (urb_priv) |
1781 | xhci_urb_free_priv(urb_priv); |
1782 | usb_hcd_unlink_urb_from_ep(hcd, urb); |
1783 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
1784 | usb_hcd_giveback_urb(hcd, urb, status: -ESHUTDOWN); |
1785 | return ret; |
1786 | } |
1787 | |
1788 | /* Drop an endpoint from a new bandwidth configuration for this device. |
1789 | * Only one call to this function is allowed per endpoint before |
1790 | * check_bandwidth() or reset_bandwidth() must be called. |
1791 | * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will |
1792 | * add the endpoint to the schedule with possibly new parameters denoted by a |
1793 | * different endpoint descriptor in usb_host_endpoint. |
1794 | * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is |
1795 | * not allowed. |
1796 | * |
1797 | * The USB core will not allow URBs to be queued to an endpoint that is being |
1798 | * disabled, so there's no need for mutual exclusion to protect |
1799 | * the xhci->devs[slot_id] structure. |
1800 | */ |
1801 | int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, |
1802 | struct usb_host_endpoint *ep) |
1803 | { |
1804 | struct xhci_hcd *xhci; |
1805 | struct xhci_container_ctx *in_ctx, *out_ctx; |
1806 | struct xhci_input_control_ctx *ctrl_ctx; |
1807 | unsigned int ep_index; |
1808 | struct xhci_ep_ctx *ep_ctx; |
1809 | u32 drop_flag; |
1810 | u32 new_add_flags, new_drop_flags; |
1811 | int ret; |
1812 | |
1813 | ret = xhci_check_args(hcd, udev, ep, check_ep: 1, check_virt_dev: true, func: __func__); |
1814 | if (ret <= 0) |
1815 | return ret; |
1816 | xhci = hcd_to_xhci(hcd); |
1817 | if (xhci->xhc_state & XHCI_STATE_DYING) |
1818 | return -ENODEV; |
1819 | |
1820 | xhci_dbg(xhci, "%s called for udev %p\n" , __func__, udev); |
1821 | drop_flag = xhci_get_endpoint_flag(desc: &ep->desc); |
1822 | if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) { |
1823 | xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n" , |
1824 | __func__, drop_flag); |
1825 | return 0; |
1826 | } |
1827 | |
1828 | in_ctx = xhci->devs[udev->slot_id]->in_ctx; |
1829 | out_ctx = xhci->devs[udev->slot_id]->out_ctx; |
1830 | ctrl_ctx = xhci_get_input_control_ctx(ctx: in_ctx); |
1831 | if (!ctrl_ctx) { |
1832 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n" , |
1833 | __func__); |
1834 | return 0; |
1835 | } |
1836 | |
1837 | ep_index = xhci_get_endpoint_index(&ep->desc); |
1838 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: out_ctx, ep_index); |
1839 | /* If the HC already knows the endpoint is disabled, |
1840 | * or the HCD has noted it is disabled, ignore this request |
1841 | */ |
1842 | if ((GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) || |
1843 | le32_to_cpu(ctrl_ctx->drop_flags) & |
1844 | xhci_get_endpoint_flag(desc: &ep->desc)) { |
1845 | /* Do not warn when called after a usb_device_reset */ |
1846 | if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL) |
1847 | xhci_warn(xhci, "xHCI %s called with disabled ep %p\n" , |
1848 | __func__, ep); |
1849 | return 0; |
1850 | } |
1851 | |
1852 | ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag); |
1853 | new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); |
1854 | |
1855 | ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag); |
1856 | new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); |
1857 | |
1858 | xhci_debugfs_remove_endpoint(xhci, virt_dev: xhci->devs[udev->slot_id], ep_index); |
1859 | |
1860 | xhci_endpoint_zero(xhci, virt_dev: xhci->devs[udev->slot_id], ep); |
1861 | |
1862 | xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n" , |
1863 | (unsigned int) ep->desc.bEndpointAddress, |
1864 | udev->slot_id, |
1865 | (unsigned int) new_drop_flags, |
1866 | (unsigned int) new_add_flags); |
1867 | return 0; |
1868 | } |
1869 | EXPORT_SYMBOL_GPL(xhci_drop_endpoint); |
1870 | |
1871 | /* Add an endpoint to a new possible bandwidth configuration for this device. |
1872 | * Only one call to this function is allowed per endpoint before |
1873 | * check_bandwidth() or reset_bandwidth() must be called. |
1874 | * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will |
1875 | * add the endpoint to the schedule with possibly new parameters denoted by a |
1876 | * different endpoint descriptor in usb_host_endpoint. |
1877 | * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is |
1878 | * not allowed. |
1879 | * |
1880 | * The USB core will not allow URBs to be queued to an endpoint until the |
1881 | * configuration or alt setting is installed in the device, so there's no need |
1882 | * for mutual exclusion to protect the xhci->devs[slot_id] structure. |
1883 | */ |
1884 | int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, |
1885 | struct usb_host_endpoint *ep) |
1886 | { |
1887 | struct xhci_hcd *xhci; |
1888 | struct xhci_container_ctx *in_ctx; |
1889 | unsigned int ep_index; |
1890 | struct xhci_input_control_ctx *ctrl_ctx; |
1891 | struct xhci_ep_ctx *ep_ctx; |
1892 | u32 added_ctxs; |
1893 | u32 new_add_flags, new_drop_flags; |
1894 | struct xhci_virt_device *virt_dev; |
1895 | int ret = 0; |
1896 | |
1897 | ret = xhci_check_args(hcd, udev, ep, check_ep: 1, check_virt_dev: true, func: __func__); |
1898 | if (ret <= 0) { |
1899 | /* So we won't queue a reset ep command for a root hub */ |
1900 | ep->hcpriv = NULL; |
1901 | return ret; |
1902 | } |
1903 | xhci = hcd_to_xhci(hcd); |
1904 | if (xhci->xhc_state & XHCI_STATE_DYING) |
1905 | return -ENODEV; |
1906 | |
1907 | added_ctxs = xhci_get_endpoint_flag(desc: &ep->desc); |
1908 | if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) { |
1909 | /* FIXME when we have to issue an evaluate endpoint command to |
1910 | * deal with ep0 max packet size changing once we get the |
1911 | * descriptors |
1912 | */ |
1913 | xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n" , |
1914 | __func__, added_ctxs); |
1915 | return 0; |
1916 | } |
1917 | |
1918 | virt_dev = xhci->devs[udev->slot_id]; |
1919 | in_ctx = virt_dev->in_ctx; |
1920 | ctrl_ctx = xhci_get_input_control_ctx(ctx: in_ctx); |
1921 | if (!ctrl_ctx) { |
1922 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n" , |
1923 | __func__); |
1924 | return 0; |
1925 | } |
1926 | |
1927 | ep_index = xhci_get_endpoint_index(&ep->desc); |
1928 | /* If this endpoint is already in use, and the upper layers are trying |
1929 | * to add it again without dropping it, reject the addition. |
1930 | */ |
1931 | if (virt_dev->eps[ep_index].ring && |
1932 | !(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) { |
1933 | xhci_warn(xhci, "Trying to add endpoint 0x%x " |
1934 | "without dropping it.\n" , |
1935 | (unsigned int) ep->desc.bEndpointAddress); |
1936 | return -EINVAL; |
1937 | } |
1938 | |
1939 | /* If the HCD has already noted the endpoint is enabled, |
1940 | * ignore this request. |
1941 | */ |
1942 | if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) { |
1943 | xhci_warn(xhci, "xHCI %s called with enabled ep %p\n" , |
1944 | __func__, ep); |
1945 | return 0; |
1946 | } |
1947 | |
1948 | /* |
1949 | * Configuration and alternate setting changes must be done in |
1950 | * process context, not interrupt context (or so documenation |
1951 | * for usb_set_interface() and usb_set_configuration() claim). |
1952 | */ |
1953 | if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) { |
1954 | dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n" , |
1955 | __func__, ep->desc.bEndpointAddress); |
1956 | return -ENOMEM; |
1957 | } |
1958 | |
1959 | ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs); |
1960 | new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); |
1961 | |
1962 | /* If xhci_endpoint_disable() was called for this endpoint, but the |
1963 | * xHC hasn't been notified yet through the check_bandwidth() call, |
1964 | * this re-adds a new state for the endpoint from the new endpoint |
1965 | * descriptors. We must drop and re-add this endpoint, so we leave the |
1966 | * drop flags alone. |
1967 | */ |
1968 | new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); |
1969 | |
1970 | /* Store the usb_device pointer for later use */ |
1971 | ep->hcpriv = udev; |
1972 | |
1973 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: virt_dev->in_ctx, ep_index); |
1974 | trace_xhci_add_endpoint(ctx: ep_ctx); |
1975 | |
1976 | xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n" , |
1977 | (unsigned int) ep->desc.bEndpointAddress, |
1978 | udev->slot_id, |
1979 | (unsigned int) new_drop_flags, |
1980 | (unsigned int) new_add_flags); |
1981 | return 0; |
1982 | } |
1983 | EXPORT_SYMBOL_GPL(xhci_add_endpoint); |
1984 | |
1985 | static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) |
1986 | { |
1987 | struct xhci_input_control_ctx *ctrl_ctx; |
1988 | struct xhci_ep_ctx *ep_ctx; |
1989 | struct xhci_slot_ctx *slot_ctx; |
1990 | int i; |
1991 | |
1992 | ctrl_ctx = xhci_get_input_control_ctx(ctx: virt_dev->in_ctx); |
1993 | if (!ctrl_ctx) { |
1994 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n" , |
1995 | __func__); |
1996 | return; |
1997 | } |
1998 | |
1999 | /* When a device's add flag and drop flag are zero, any subsequent |
2000 | * configure endpoint command will leave that endpoint's state |
2001 | * untouched. Make sure we don't leave any old state in the input |
2002 | * endpoint contexts. |
2003 | */ |
2004 | ctrl_ctx->drop_flags = 0; |
2005 | ctrl_ctx->add_flags = 0; |
2006 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: virt_dev->in_ctx); |
2007 | slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); |
2008 | /* Endpoint 0 is always valid */ |
2009 | slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); |
2010 | for (i = 1; i < 31; i++) { |
2011 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: virt_dev->in_ctx, ep_index: i); |
2012 | ep_ctx->ep_info = 0; |
2013 | ep_ctx->ep_info2 = 0; |
2014 | ep_ctx->deq = 0; |
2015 | ep_ctx->tx_info = 0; |
2016 | } |
2017 | } |
2018 | |
2019 | static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, |
2020 | struct usb_device *udev, u32 *cmd_status) |
2021 | { |
2022 | int ret; |
2023 | |
2024 | switch (*cmd_status) { |
2025 | case COMP_COMMAND_ABORTED: |
2026 | case COMP_COMMAND_RING_STOPPED: |
2027 | xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n" ); |
2028 | ret = -ETIME; |
2029 | break; |
2030 | case COMP_RESOURCE_ERROR: |
2031 | dev_warn(&udev->dev, |
2032 | "Not enough host controller resources for new device state.\n" ); |
2033 | ret = -ENOMEM; |
2034 | /* FIXME: can we allocate more resources for the HC? */ |
2035 | break; |
2036 | case COMP_BANDWIDTH_ERROR: |
2037 | case COMP_SECONDARY_BANDWIDTH_ERROR: |
2038 | dev_warn(&udev->dev, |
2039 | "Not enough bandwidth for new device state.\n" ); |
2040 | ret = -ENOSPC; |
2041 | /* FIXME: can we go back to the old state? */ |
2042 | break; |
2043 | case COMP_TRB_ERROR: |
2044 | /* the HCD set up something wrong */ |
2045 | dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, " |
2046 | "add flag = 1, " |
2047 | "and endpoint is not disabled.\n" ); |
2048 | ret = -EINVAL; |
2049 | break; |
2050 | case COMP_INCOMPATIBLE_DEVICE_ERROR: |
2051 | dev_warn(&udev->dev, |
2052 | "ERROR: Incompatible device for endpoint configure command.\n" ); |
2053 | ret = -ENODEV; |
2054 | break; |
2055 | case COMP_SUCCESS: |
2056 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_context_change, |
2057 | fmt: "Successful Endpoint Configure command" ); |
2058 | ret = 0; |
2059 | break; |
2060 | default: |
2061 | xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n" , |
2062 | *cmd_status); |
2063 | ret = -EINVAL; |
2064 | break; |
2065 | } |
2066 | return ret; |
2067 | } |
2068 | |
2069 | static int xhci_evaluate_context_result(struct xhci_hcd *xhci, |
2070 | struct usb_device *udev, u32 *cmd_status) |
2071 | { |
2072 | int ret; |
2073 | |
2074 | switch (*cmd_status) { |
2075 | case COMP_COMMAND_ABORTED: |
2076 | case COMP_COMMAND_RING_STOPPED: |
2077 | xhci_warn(xhci, "Timeout while waiting for evaluate context command\n" ); |
2078 | ret = -ETIME; |
2079 | break; |
2080 | case COMP_PARAMETER_ERROR: |
2081 | dev_warn(&udev->dev, |
2082 | "WARN: xHCI driver setup invalid evaluate context command.\n" ); |
2083 | ret = -EINVAL; |
2084 | break; |
2085 | case COMP_SLOT_NOT_ENABLED_ERROR: |
2086 | dev_warn(&udev->dev, |
2087 | "WARN: slot not enabled for evaluate context command.\n" ); |
2088 | ret = -EINVAL; |
2089 | break; |
2090 | case COMP_CONTEXT_STATE_ERROR: |
2091 | dev_warn(&udev->dev, |
2092 | "WARN: invalid context state for evaluate context command.\n" ); |
2093 | ret = -EINVAL; |
2094 | break; |
2095 | case COMP_INCOMPATIBLE_DEVICE_ERROR: |
2096 | dev_warn(&udev->dev, |
2097 | "ERROR: Incompatible device for evaluate context command.\n" ); |
2098 | ret = -ENODEV; |
2099 | break; |
2100 | case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR: |
2101 | /* Max Exit Latency too large error */ |
2102 | dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n" ); |
2103 | ret = -EINVAL; |
2104 | break; |
2105 | case COMP_SUCCESS: |
2106 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_context_change, |
2107 | fmt: "Successful evaluate context command" ); |
2108 | ret = 0; |
2109 | break; |
2110 | default: |
2111 | xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n" , |
2112 | *cmd_status); |
2113 | ret = -EINVAL; |
2114 | break; |
2115 | } |
2116 | return ret; |
2117 | } |
2118 | |
2119 | static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci, |
2120 | struct xhci_input_control_ctx *ctrl_ctx) |
2121 | { |
2122 | u32 valid_add_flags; |
2123 | u32 valid_drop_flags; |
2124 | |
2125 | /* Ignore the slot flag (bit 0), and the default control endpoint flag |
2126 | * (bit 1). The default control endpoint is added during the Address |
2127 | * Device command and is never removed until the slot is disabled. |
2128 | */ |
2129 | valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2; |
2130 | valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2; |
2131 | |
2132 | /* Use hweight32 to count the number of ones in the add flags, or |
2133 | * number of endpoints added. Don't count endpoints that are changed |
2134 | * (both added and dropped). |
2135 | */ |
2136 | return hweight32(valid_add_flags) - |
2137 | hweight32(valid_add_flags & valid_drop_flags); |
2138 | } |
2139 | |
2140 | static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci, |
2141 | struct xhci_input_control_ctx *ctrl_ctx) |
2142 | { |
2143 | u32 valid_add_flags; |
2144 | u32 valid_drop_flags; |
2145 | |
2146 | valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2; |
2147 | valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2; |
2148 | |
2149 | return hweight32(valid_drop_flags) - |
2150 | hweight32(valid_add_flags & valid_drop_flags); |
2151 | } |
2152 | |
2153 | /* |
2154 | * We need to reserve the new number of endpoints before the configure endpoint |
2155 | * command completes. We can't subtract the dropped endpoints from the number |
2156 | * of active endpoints until the command completes because we can oversubscribe |
2157 | * the host in this case: |
2158 | * |
2159 | * - the first configure endpoint command drops more endpoints than it adds |
2160 | * - a second configure endpoint command that adds more endpoints is queued |
2161 | * - the first configure endpoint command fails, so the config is unchanged |
2162 | * - the second command may succeed, even though there isn't enough resources |
2163 | * |
2164 | * Must be called with xhci->lock held. |
2165 | */ |
2166 | static int xhci_reserve_host_resources(struct xhci_hcd *xhci, |
2167 | struct xhci_input_control_ctx *ctrl_ctx) |
2168 | { |
2169 | u32 added_eps; |
2170 | |
2171 | added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); |
2172 | if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) { |
2173 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
2174 | fmt: "Not enough ep ctxs: " |
2175 | "%u active, need to add %u, limit is %u." , |
2176 | xhci->num_active_eps, added_eps, |
2177 | xhci->limit_active_eps); |
2178 | return -ENOMEM; |
2179 | } |
2180 | xhci->num_active_eps += added_eps; |
2181 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
2182 | fmt: "Adding %u ep ctxs, %u now active." , added_eps, |
2183 | xhci->num_active_eps); |
2184 | return 0; |
2185 | } |
2186 | |
2187 | /* |
2188 | * The configure endpoint was failed by the xHC for some other reason, so we |
2189 | * need to revert the resources that failed configuration would have used. |
2190 | * |
2191 | * Must be called with xhci->lock held. |
2192 | */ |
2193 | static void xhci_free_host_resources(struct xhci_hcd *xhci, |
2194 | struct xhci_input_control_ctx *ctrl_ctx) |
2195 | { |
2196 | u32 num_failed_eps; |
2197 | |
2198 | num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); |
2199 | xhci->num_active_eps -= num_failed_eps; |
2200 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
2201 | fmt: "Removing %u failed ep ctxs, %u now active." , |
2202 | num_failed_eps, |
2203 | xhci->num_active_eps); |
2204 | } |
2205 | |
2206 | /* |
2207 | * Now that the command has completed, clean up the active endpoint count by |
2208 | * subtracting out the endpoints that were dropped (but not changed). |
2209 | * |
2210 | * Must be called with xhci->lock held. |
2211 | */ |
2212 | static void xhci_finish_resource_reservation(struct xhci_hcd *xhci, |
2213 | struct xhci_input_control_ctx *ctrl_ctx) |
2214 | { |
2215 | u32 num_dropped_eps; |
2216 | |
2217 | num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx); |
2218 | xhci->num_active_eps -= num_dropped_eps; |
2219 | if (num_dropped_eps) |
2220 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
2221 | fmt: "Removing %u dropped ep ctxs, %u now active." , |
2222 | num_dropped_eps, |
2223 | xhci->num_active_eps); |
2224 | } |
2225 | |
2226 | static unsigned int xhci_get_block_size(struct usb_device *udev) |
2227 | { |
2228 | switch (udev->speed) { |
2229 | case USB_SPEED_LOW: |
2230 | case USB_SPEED_FULL: |
2231 | return FS_BLOCK; |
2232 | case USB_SPEED_HIGH: |
2233 | return HS_BLOCK; |
2234 | case USB_SPEED_SUPER: |
2235 | case USB_SPEED_SUPER_PLUS: |
2236 | return SS_BLOCK; |
2237 | case USB_SPEED_UNKNOWN: |
2238 | default: |
2239 | /* Should never happen */ |
2240 | return 1; |
2241 | } |
2242 | } |
2243 | |
2244 | static unsigned int |
2245 | xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw) |
2246 | { |
2247 | if (interval_bw->overhead[LS_OVERHEAD_TYPE]) |
2248 | return LS_OVERHEAD; |
2249 | if (interval_bw->overhead[FS_OVERHEAD_TYPE]) |
2250 | return FS_OVERHEAD; |
2251 | return HS_OVERHEAD; |
2252 | } |
2253 | |
2254 | /* If we are changing a LS/FS device under a HS hub, |
2255 | * make sure (if we are activating a new TT) that the HS bus has enough |
2256 | * bandwidth for this new TT. |
2257 | */ |
2258 | static int xhci_check_tt_bw_table(struct xhci_hcd *xhci, |
2259 | struct xhci_virt_device *virt_dev, |
2260 | int old_active_eps) |
2261 | { |
2262 | struct xhci_interval_bw_table *bw_table; |
2263 | struct xhci_tt_bw_info *tt_info; |
2264 | |
2265 | /* Find the bandwidth table for the root port this TT is attached to. */ |
2266 | bw_table = &xhci->rh_bw[virt_dev->rhub_port->hw_portnum].bw_table; |
2267 | tt_info = virt_dev->tt_info; |
2268 | /* If this TT already had active endpoints, the bandwidth for this TT |
2269 | * has already been added. Removing all periodic endpoints (and thus |
2270 | * making the TT enactive) will only decrease the bandwidth used. |
2271 | */ |
2272 | if (old_active_eps) |
2273 | return 0; |
2274 | if (old_active_eps == 0 && tt_info->active_eps != 0) { |
2275 | if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT) |
2276 | return -ENOMEM; |
2277 | return 0; |
2278 | } |
2279 | /* Not sure why we would have no new active endpoints... |
2280 | * |
2281 | * Maybe because of an Evaluate Context change for a hub update or a |
2282 | * control endpoint 0 max packet size change? |
2283 | * FIXME: skip the bandwidth calculation in that case. |
2284 | */ |
2285 | return 0; |
2286 | } |
2287 | |
2288 | static int xhci_check_ss_bw(struct xhci_hcd *xhci, |
2289 | struct xhci_virt_device *virt_dev) |
2290 | { |
2291 | unsigned int bw_reserved; |
2292 | |
2293 | bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100); |
2294 | if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved)) |
2295 | return -ENOMEM; |
2296 | |
2297 | bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100); |
2298 | if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved)) |
2299 | return -ENOMEM; |
2300 | |
2301 | return 0; |
2302 | } |
2303 | |
2304 | /* |
2305 | * This algorithm is a very conservative estimate of the worst-case scheduling |
2306 | * scenario for any one interval. The hardware dynamically schedules the |
2307 | * packets, so we can't tell which microframe could be the limiting factor in |
2308 | * the bandwidth scheduling. This only takes into account periodic endpoints. |
2309 | * |
2310 | * Obviously, we can't solve an NP complete problem to find the minimum worst |
2311 | * case scenario. Instead, we come up with an estimate that is no less than |
2312 | * the worst case bandwidth used for any one microframe, but may be an |
2313 | * over-estimate. |
2314 | * |
2315 | * We walk the requirements for each endpoint by interval, starting with the |
2316 | * smallest interval, and place packets in the schedule where there is only one |
2317 | * possible way to schedule packets for that interval. In order to simplify |
2318 | * this algorithm, we record the largest max packet size for each interval, and |
2319 | * assume all packets will be that size. |
2320 | * |
2321 | * For interval 0, we obviously must schedule all packets for each interval. |
2322 | * The bandwidth for interval 0 is just the amount of data to be transmitted |
2323 | * (the sum of all max ESIT payload sizes, plus any overhead per packet times |
2324 | * the number of packets). |
2325 | * |
2326 | * For interval 1, we have two possible microframes to schedule those packets |
2327 | * in. For this algorithm, if we can schedule the same number of packets for |
2328 | * each possible scheduling opportunity (each microframe), we will do so. The |
2329 | * remaining number of packets will be saved to be transmitted in the gaps in |
2330 | * the next interval's scheduling sequence. |
2331 | * |
2332 | * As we move those remaining packets to be scheduled with interval 2 packets, |
2333 | * we have to double the number of remaining packets to transmit. This is |
2334 | * because the intervals are actually powers of 2, and we would be transmitting |
2335 | * the previous interval's packets twice in this interval. We also have to be |
2336 | * sure that when we look at the largest max packet size for this interval, we |
2337 | * also look at the largest max packet size for the remaining packets and take |
2338 | * the greater of the two. |
2339 | * |
2340 | * The algorithm continues to evenly distribute packets in each scheduling |
2341 | * opportunity, and push the remaining packets out, until we get to the last |
2342 | * interval. Then those packets and their associated overhead are just added |
2343 | * to the bandwidth used. |
2344 | */ |
2345 | static int xhci_check_bw_table(struct xhci_hcd *xhci, |
2346 | struct xhci_virt_device *virt_dev, |
2347 | int old_active_eps) |
2348 | { |
2349 | unsigned int bw_reserved; |
2350 | unsigned int max_bandwidth; |
2351 | unsigned int bw_used; |
2352 | unsigned int block_size; |
2353 | struct xhci_interval_bw_table *bw_table; |
2354 | unsigned int packet_size = 0; |
2355 | unsigned int overhead = 0; |
2356 | unsigned int packets_transmitted = 0; |
2357 | unsigned int packets_remaining = 0; |
2358 | unsigned int i; |
2359 | |
2360 | if (virt_dev->udev->speed >= USB_SPEED_SUPER) |
2361 | return xhci_check_ss_bw(xhci, virt_dev); |
2362 | |
2363 | if (virt_dev->udev->speed == USB_SPEED_HIGH) { |
2364 | max_bandwidth = HS_BW_LIMIT; |
2365 | /* Convert percent of bus BW reserved to blocks reserved */ |
2366 | bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100); |
2367 | } else { |
2368 | max_bandwidth = FS_BW_LIMIT; |
2369 | bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100); |
2370 | } |
2371 | |
2372 | bw_table = virt_dev->bw_table; |
2373 | /* We need to translate the max packet size and max ESIT payloads into |
2374 | * the units the hardware uses. |
2375 | */ |
2376 | block_size = xhci_get_block_size(udev: virt_dev->udev); |
2377 | |
2378 | /* If we are manipulating a LS/FS device under a HS hub, double check |
2379 | * that the HS bus has enough bandwidth if we are activing a new TT. |
2380 | */ |
2381 | if (virt_dev->tt_info) { |
2382 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
2383 | fmt: "Recalculating BW for rootport %u" , |
2384 | virt_dev->rhub_port->hw_portnum + 1); |
2385 | if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) { |
2386 | xhci_warn(xhci, "Not enough bandwidth on HS bus for " |
2387 | "newly activated TT.\n" ); |
2388 | return -ENOMEM; |
2389 | } |
2390 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
2391 | fmt: "Recalculating BW for TT slot %u port %u" , |
2392 | virt_dev->tt_info->slot_id, |
2393 | virt_dev->tt_info->ttport); |
2394 | } else { |
2395 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
2396 | fmt: "Recalculating BW for rootport %u" , |
2397 | virt_dev->rhub_port->hw_portnum + 1); |
2398 | } |
2399 | |
2400 | /* Add in how much bandwidth will be used for interval zero, or the |
2401 | * rounded max ESIT payload + number of packets * largest overhead. |
2402 | */ |
2403 | bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) + |
2404 | bw_table->interval_bw[0].num_packets * |
2405 | xhci_get_largest_overhead(interval_bw: &bw_table->interval_bw[0]); |
2406 | |
2407 | for (i = 1; i < XHCI_MAX_INTERVAL; i++) { |
2408 | unsigned int bw_added; |
2409 | unsigned int largest_mps; |
2410 | unsigned int interval_overhead; |
2411 | |
2412 | /* |
2413 | * How many packets could we transmit in this interval? |
2414 | * If packets didn't fit in the previous interval, we will need |
2415 | * to transmit that many packets twice within this interval. |
2416 | */ |
2417 | packets_remaining = 2 * packets_remaining + |
2418 | bw_table->interval_bw[i].num_packets; |
2419 | |
2420 | /* Find the largest max packet size of this or the previous |
2421 | * interval. |
2422 | */ |
2423 | if (list_empty(head: &bw_table->interval_bw[i].endpoints)) |
2424 | largest_mps = 0; |
2425 | else { |
2426 | struct xhci_virt_ep *virt_ep; |
2427 | struct list_head *ep_entry; |
2428 | |
2429 | ep_entry = bw_table->interval_bw[i].endpoints.next; |
2430 | virt_ep = list_entry(ep_entry, |
2431 | struct xhci_virt_ep, bw_endpoint_list); |
2432 | /* Convert to blocks, rounding up */ |
2433 | largest_mps = DIV_ROUND_UP( |
2434 | virt_ep->bw_info.max_packet_size, |
2435 | block_size); |
2436 | } |
2437 | if (largest_mps > packet_size) |
2438 | packet_size = largest_mps; |
2439 | |
2440 | /* Use the larger overhead of this or the previous interval. */ |
2441 | interval_overhead = xhci_get_largest_overhead( |
2442 | interval_bw: &bw_table->interval_bw[i]); |
2443 | if (interval_overhead > overhead) |
2444 | overhead = interval_overhead; |
2445 | |
2446 | /* How many packets can we evenly distribute across |
2447 | * (1 << (i + 1)) possible scheduling opportunities? |
2448 | */ |
2449 | packets_transmitted = packets_remaining >> (i + 1); |
2450 | |
2451 | /* Add in the bandwidth used for those scheduled packets */ |
2452 | bw_added = packets_transmitted * (overhead + packet_size); |
2453 | |
2454 | /* How many packets do we have remaining to transmit? */ |
2455 | packets_remaining = packets_remaining % (1 << (i + 1)); |
2456 | |
2457 | /* What largest max packet size should those packets have? */ |
2458 | /* If we've transmitted all packets, don't carry over the |
2459 | * largest packet size. |
2460 | */ |
2461 | if (packets_remaining == 0) { |
2462 | packet_size = 0; |
2463 | overhead = 0; |
2464 | } else if (packets_transmitted > 0) { |
2465 | /* Otherwise if we do have remaining packets, and we've |
2466 | * scheduled some packets in this interval, take the |
2467 | * largest max packet size from endpoints with this |
2468 | * interval. |
2469 | */ |
2470 | packet_size = largest_mps; |
2471 | overhead = interval_overhead; |
2472 | } |
2473 | /* Otherwise carry over packet_size and overhead from the last |
2474 | * time we had a remainder. |
2475 | */ |
2476 | bw_used += bw_added; |
2477 | if (bw_used > max_bandwidth) { |
2478 | xhci_warn(xhci, "Not enough bandwidth. " |
2479 | "Proposed: %u, Max: %u\n" , |
2480 | bw_used, max_bandwidth); |
2481 | return -ENOMEM; |
2482 | } |
2483 | } |
2484 | /* |
2485 | * Ok, we know we have some packets left over after even-handedly |
2486 | * scheduling interval 15. We don't know which microframes they will |
2487 | * fit into, so we over-schedule and say they will be scheduled every |
2488 | * microframe. |
2489 | */ |
2490 | if (packets_remaining > 0) |
2491 | bw_used += overhead + packet_size; |
2492 | |
2493 | if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) { |
2494 | /* OK, we're manipulating a HS device attached to a |
2495 | * root port bandwidth domain. Include the number of active TTs |
2496 | * in the bandwidth used. |
2497 | */ |
2498 | bw_used += TT_HS_OVERHEAD * |
2499 | xhci->rh_bw[virt_dev->rhub_port->hw_portnum].num_active_tts; |
2500 | } |
2501 | |
2502 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
2503 | fmt: "Final bandwidth: %u, Limit: %u, Reserved: %u, " |
2504 | "Available: %u " "percent" , |
2505 | bw_used, max_bandwidth, bw_reserved, |
2506 | (max_bandwidth - bw_used - bw_reserved) * 100 / |
2507 | max_bandwidth); |
2508 | |
2509 | bw_used += bw_reserved; |
2510 | if (bw_used > max_bandwidth) { |
2511 | xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n" , |
2512 | bw_used, max_bandwidth); |
2513 | return -ENOMEM; |
2514 | } |
2515 | |
2516 | bw_table->bw_used = bw_used; |
2517 | return 0; |
2518 | } |
2519 | |
2520 | static bool xhci_is_async_ep(unsigned int ep_type) |
2521 | { |
2522 | return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP && |
2523 | ep_type != ISOC_IN_EP && |
2524 | ep_type != INT_IN_EP); |
2525 | } |
2526 | |
2527 | static bool xhci_is_sync_in_ep(unsigned int ep_type) |
2528 | { |
2529 | return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP); |
2530 | } |
2531 | |
2532 | static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw) |
2533 | { |
2534 | unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK); |
2535 | |
2536 | if (ep_bw->ep_interval == 0) |
2537 | return SS_OVERHEAD_BURST + |
2538 | (ep_bw->mult * ep_bw->num_packets * |
2539 | (SS_OVERHEAD + mps)); |
2540 | return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets * |
2541 | (SS_OVERHEAD + mps + SS_OVERHEAD_BURST), |
2542 | 1 << ep_bw->ep_interval); |
2543 | |
2544 | } |
2545 | |
2546 | static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci, |
2547 | struct xhci_bw_info *ep_bw, |
2548 | struct xhci_interval_bw_table *bw_table, |
2549 | struct usb_device *udev, |
2550 | struct xhci_virt_ep *virt_ep, |
2551 | struct xhci_tt_bw_info *tt_info) |
2552 | { |
2553 | struct xhci_interval_bw *interval_bw; |
2554 | int normalized_interval; |
2555 | |
2556 | if (xhci_is_async_ep(ep_type: ep_bw->type)) |
2557 | return; |
2558 | |
2559 | if (udev->speed >= USB_SPEED_SUPER) { |
2560 | if (xhci_is_sync_in_ep(ep_type: ep_bw->type)) |
2561 | xhci->devs[udev->slot_id]->bw_table->ss_bw_in -= |
2562 | xhci_get_ss_bw_consumed(ep_bw); |
2563 | else |
2564 | xhci->devs[udev->slot_id]->bw_table->ss_bw_out -= |
2565 | xhci_get_ss_bw_consumed(ep_bw); |
2566 | return; |
2567 | } |
2568 | |
2569 | /* SuperSpeed endpoints never get added to intervals in the table, so |
2570 | * this check is only valid for HS/FS/LS devices. |
2571 | */ |
2572 | if (list_empty(head: &virt_ep->bw_endpoint_list)) |
2573 | return; |
2574 | /* For LS/FS devices, we need to translate the interval expressed in |
2575 | * microframes to frames. |
2576 | */ |
2577 | if (udev->speed == USB_SPEED_HIGH) |
2578 | normalized_interval = ep_bw->ep_interval; |
2579 | else |
2580 | normalized_interval = ep_bw->ep_interval - 3; |
2581 | |
2582 | if (normalized_interval == 0) |
2583 | bw_table->interval0_esit_payload -= ep_bw->max_esit_payload; |
2584 | interval_bw = &bw_table->interval_bw[normalized_interval]; |
2585 | interval_bw->num_packets -= ep_bw->num_packets; |
2586 | switch (udev->speed) { |
2587 | case USB_SPEED_LOW: |
2588 | interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1; |
2589 | break; |
2590 | case USB_SPEED_FULL: |
2591 | interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1; |
2592 | break; |
2593 | case USB_SPEED_HIGH: |
2594 | interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1; |
2595 | break; |
2596 | default: |
2597 | /* Should never happen because only LS/FS/HS endpoints will get |
2598 | * added to the endpoint list. |
2599 | */ |
2600 | return; |
2601 | } |
2602 | if (tt_info) |
2603 | tt_info->active_eps -= 1; |
2604 | list_del_init(entry: &virt_ep->bw_endpoint_list); |
2605 | } |
2606 | |
2607 | static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci, |
2608 | struct xhci_bw_info *ep_bw, |
2609 | struct xhci_interval_bw_table *bw_table, |
2610 | struct usb_device *udev, |
2611 | struct xhci_virt_ep *virt_ep, |
2612 | struct xhci_tt_bw_info *tt_info) |
2613 | { |
2614 | struct xhci_interval_bw *interval_bw; |
2615 | struct xhci_virt_ep *smaller_ep; |
2616 | int normalized_interval; |
2617 | |
2618 | if (xhci_is_async_ep(ep_type: ep_bw->type)) |
2619 | return; |
2620 | |
2621 | if (udev->speed == USB_SPEED_SUPER) { |
2622 | if (xhci_is_sync_in_ep(ep_type: ep_bw->type)) |
2623 | xhci->devs[udev->slot_id]->bw_table->ss_bw_in += |
2624 | xhci_get_ss_bw_consumed(ep_bw); |
2625 | else |
2626 | xhci->devs[udev->slot_id]->bw_table->ss_bw_out += |
2627 | xhci_get_ss_bw_consumed(ep_bw); |
2628 | return; |
2629 | } |
2630 | |
2631 | /* For LS/FS devices, we need to translate the interval expressed in |
2632 | * microframes to frames. |
2633 | */ |
2634 | if (udev->speed == USB_SPEED_HIGH) |
2635 | normalized_interval = ep_bw->ep_interval; |
2636 | else |
2637 | normalized_interval = ep_bw->ep_interval - 3; |
2638 | |
2639 | if (normalized_interval == 0) |
2640 | bw_table->interval0_esit_payload += ep_bw->max_esit_payload; |
2641 | interval_bw = &bw_table->interval_bw[normalized_interval]; |
2642 | interval_bw->num_packets += ep_bw->num_packets; |
2643 | switch (udev->speed) { |
2644 | case USB_SPEED_LOW: |
2645 | interval_bw->overhead[LS_OVERHEAD_TYPE] += 1; |
2646 | break; |
2647 | case USB_SPEED_FULL: |
2648 | interval_bw->overhead[FS_OVERHEAD_TYPE] += 1; |
2649 | break; |
2650 | case USB_SPEED_HIGH: |
2651 | interval_bw->overhead[HS_OVERHEAD_TYPE] += 1; |
2652 | break; |
2653 | default: |
2654 | /* Should never happen because only LS/FS/HS endpoints will get |
2655 | * added to the endpoint list. |
2656 | */ |
2657 | return; |
2658 | } |
2659 | |
2660 | if (tt_info) |
2661 | tt_info->active_eps += 1; |
2662 | /* Insert the endpoint into the list, largest max packet size first. */ |
2663 | list_for_each_entry(smaller_ep, &interval_bw->endpoints, |
2664 | bw_endpoint_list) { |
2665 | if (ep_bw->max_packet_size >= |
2666 | smaller_ep->bw_info.max_packet_size) { |
2667 | /* Add the new ep before the smaller endpoint */ |
2668 | list_add_tail(new: &virt_ep->bw_endpoint_list, |
2669 | head: &smaller_ep->bw_endpoint_list); |
2670 | return; |
2671 | } |
2672 | } |
2673 | /* Add the new endpoint at the end of the list. */ |
2674 | list_add_tail(new: &virt_ep->bw_endpoint_list, |
2675 | head: &interval_bw->endpoints); |
2676 | } |
2677 | |
2678 | void xhci_update_tt_active_eps(struct xhci_hcd *xhci, |
2679 | struct xhci_virt_device *virt_dev, |
2680 | int old_active_eps) |
2681 | { |
2682 | struct xhci_root_port_bw_info *rh_bw_info; |
2683 | if (!virt_dev->tt_info) |
2684 | return; |
2685 | |
2686 | rh_bw_info = &xhci->rh_bw[virt_dev->rhub_port->hw_portnum]; |
2687 | if (old_active_eps == 0 && |
2688 | virt_dev->tt_info->active_eps != 0) { |
2689 | rh_bw_info->num_active_tts += 1; |
2690 | rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD; |
2691 | } else if (old_active_eps != 0 && |
2692 | virt_dev->tt_info->active_eps == 0) { |
2693 | rh_bw_info->num_active_tts -= 1; |
2694 | rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD; |
2695 | } |
2696 | } |
2697 | |
2698 | static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, |
2699 | struct xhci_virt_device *virt_dev, |
2700 | struct xhci_container_ctx *in_ctx) |
2701 | { |
2702 | struct xhci_bw_info ep_bw_info[31]; |
2703 | int i; |
2704 | struct xhci_input_control_ctx *ctrl_ctx; |
2705 | int old_active_eps = 0; |
2706 | |
2707 | if (virt_dev->tt_info) |
2708 | old_active_eps = virt_dev->tt_info->active_eps; |
2709 | |
2710 | ctrl_ctx = xhci_get_input_control_ctx(ctx: in_ctx); |
2711 | if (!ctrl_ctx) { |
2712 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n" , |
2713 | __func__); |
2714 | return -ENOMEM; |
2715 | } |
2716 | |
2717 | for (i = 0; i < 31; i++) { |
2718 | if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) |
2719 | continue; |
2720 | |
2721 | /* Make a copy of the BW info in case we need to revert this */ |
2722 | memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info, |
2723 | sizeof(ep_bw_info[i])); |
2724 | /* Drop the endpoint from the interval table if the endpoint is |
2725 | * being dropped or changed. |
2726 | */ |
2727 | if (EP_IS_DROPPED(ctrl_ctx, i)) |
2728 | xhci_drop_ep_from_interval_table(xhci, |
2729 | ep_bw: &virt_dev->eps[i].bw_info, |
2730 | bw_table: virt_dev->bw_table, |
2731 | udev: virt_dev->udev, |
2732 | virt_ep: &virt_dev->eps[i], |
2733 | tt_info: virt_dev->tt_info); |
2734 | } |
2735 | /* Overwrite the information stored in the endpoints' bw_info */ |
2736 | xhci_update_bw_info(xhci, in_ctx: virt_dev->in_ctx, ctrl_ctx, virt_dev); |
2737 | for (i = 0; i < 31; i++) { |
2738 | /* Add any changed or added endpoints to the interval table */ |
2739 | if (EP_IS_ADDED(ctrl_ctx, i)) |
2740 | xhci_add_ep_to_interval_table(xhci, |
2741 | ep_bw: &virt_dev->eps[i].bw_info, |
2742 | bw_table: virt_dev->bw_table, |
2743 | udev: virt_dev->udev, |
2744 | virt_ep: &virt_dev->eps[i], |
2745 | tt_info: virt_dev->tt_info); |
2746 | } |
2747 | |
2748 | if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) { |
2749 | /* Ok, this fits in the bandwidth we have. |
2750 | * Update the number of active TTs. |
2751 | */ |
2752 | xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); |
2753 | return 0; |
2754 | } |
2755 | |
2756 | /* We don't have enough bandwidth for this, revert the stored info. */ |
2757 | for (i = 0; i < 31; i++) { |
2758 | if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) |
2759 | continue; |
2760 | |
2761 | /* Drop the new copies of any added or changed endpoints from |
2762 | * the interval table. |
2763 | */ |
2764 | if (EP_IS_ADDED(ctrl_ctx, i)) { |
2765 | xhci_drop_ep_from_interval_table(xhci, |
2766 | ep_bw: &virt_dev->eps[i].bw_info, |
2767 | bw_table: virt_dev->bw_table, |
2768 | udev: virt_dev->udev, |
2769 | virt_ep: &virt_dev->eps[i], |
2770 | tt_info: virt_dev->tt_info); |
2771 | } |
2772 | /* Revert the endpoint back to its old information */ |
2773 | memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i], |
2774 | sizeof(ep_bw_info[i])); |
2775 | /* Add any changed or dropped endpoints back into the table */ |
2776 | if (EP_IS_DROPPED(ctrl_ctx, i)) |
2777 | xhci_add_ep_to_interval_table(xhci, |
2778 | ep_bw: &virt_dev->eps[i].bw_info, |
2779 | bw_table: virt_dev->bw_table, |
2780 | udev: virt_dev->udev, |
2781 | virt_ep: &virt_dev->eps[i], |
2782 | tt_info: virt_dev->tt_info); |
2783 | } |
2784 | return -ENOMEM; |
2785 | } |
2786 | |
2787 | |
2788 | /* Issue a configure endpoint command or evaluate context command |
2789 | * and wait for it to finish. |
2790 | */ |
2791 | static int xhci_configure_endpoint(struct xhci_hcd *xhci, |
2792 | struct usb_device *udev, |
2793 | struct xhci_command *command, |
2794 | bool ctx_change, bool must_succeed) |
2795 | { |
2796 | int ret; |
2797 | unsigned long flags; |
2798 | struct xhci_input_control_ctx *ctrl_ctx; |
2799 | struct xhci_virt_device *virt_dev; |
2800 | struct xhci_slot_ctx *slot_ctx; |
2801 | |
2802 | if (!command) |
2803 | return -EINVAL; |
2804 | |
2805 | spin_lock_irqsave(&xhci->lock, flags); |
2806 | |
2807 | if (xhci->xhc_state & XHCI_STATE_DYING) { |
2808 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
2809 | return -ESHUTDOWN; |
2810 | } |
2811 | |
2812 | virt_dev = xhci->devs[udev->slot_id]; |
2813 | |
2814 | ctrl_ctx = xhci_get_input_control_ctx(ctx: command->in_ctx); |
2815 | if (!ctrl_ctx) { |
2816 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
2817 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n" , |
2818 | __func__); |
2819 | return -ENOMEM; |
2820 | } |
2821 | |
2822 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) && |
2823 | xhci_reserve_host_resources(xhci, ctrl_ctx)) { |
2824 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
2825 | xhci_warn(xhci, "Not enough host resources, " |
2826 | "active endpoint contexts = %u\n" , |
2827 | xhci->num_active_eps); |
2828 | return -ENOMEM; |
2829 | } |
2830 | if ((xhci->quirks & XHCI_SW_BW_CHECKING) && |
2831 | xhci_reserve_bandwidth(xhci, virt_dev, in_ctx: command->in_ctx)) { |
2832 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) |
2833 | xhci_free_host_resources(xhci, ctrl_ctx); |
2834 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
2835 | xhci_warn(xhci, "Not enough bandwidth\n" ); |
2836 | return -ENOMEM; |
2837 | } |
2838 | |
2839 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: command->in_ctx); |
2840 | |
2841 | trace_xhci_configure_endpoint_ctrl_ctx(ctrl_ctx); |
2842 | trace_xhci_configure_endpoint(ctx: slot_ctx); |
2843 | |
2844 | if (!ctx_change) |
2845 | ret = xhci_queue_configure_endpoint(xhci, cmd: command, |
2846 | in_ctx_ptr: command->in_ctx->dma, |
2847 | slot_id: udev->slot_id, command_must_succeed: must_succeed); |
2848 | else |
2849 | ret = xhci_queue_evaluate_context(xhci, cmd: command, |
2850 | in_ctx_ptr: command->in_ctx->dma, |
2851 | slot_id: udev->slot_id, command_must_succeed: must_succeed); |
2852 | if (ret < 0) { |
2853 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) |
2854 | xhci_free_host_resources(xhci, ctrl_ctx); |
2855 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
2856 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_context_change, |
2857 | fmt: "FIXME allocate a new ring segment" ); |
2858 | return -ENOMEM; |
2859 | } |
2860 | xhci_ring_cmd_db(xhci); |
2861 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
2862 | |
2863 | /* Wait for the configure endpoint command to complete */ |
2864 | wait_for_completion(command->completion); |
2865 | |
2866 | if (!ctx_change) |
2867 | ret = xhci_configure_endpoint_result(xhci, udev, |
2868 | cmd_status: &command->status); |
2869 | else |
2870 | ret = xhci_evaluate_context_result(xhci, udev, |
2871 | cmd_status: &command->status); |
2872 | |
2873 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { |
2874 | spin_lock_irqsave(&xhci->lock, flags); |
2875 | /* If the command failed, remove the reserved resources. |
2876 | * Otherwise, clean up the estimate to include dropped eps. |
2877 | */ |
2878 | if (ret) |
2879 | xhci_free_host_resources(xhci, ctrl_ctx); |
2880 | else |
2881 | xhci_finish_resource_reservation(xhci, ctrl_ctx); |
2882 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
2883 | } |
2884 | return ret; |
2885 | } |
2886 | |
2887 | static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci, |
2888 | struct xhci_virt_device *vdev, int i) |
2889 | { |
2890 | struct xhci_virt_ep *ep = &vdev->eps[i]; |
2891 | |
2892 | if (ep->ep_state & EP_HAS_STREAMS) { |
2893 | xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n" , |
2894 | xhci_get_endpoint_address(i)); |
2895 | xhci_free_stream_info(xhci, stream_info: ep->stream_info); |
2896 | ep->stream_info = NULL; |
2897 | ep->ep_state &= ~EP_HAS_STREAMS; |
2898 | } |
2899 | } |
2900 | |
2901 | /* Called after one or more calls to xhci_add_endpoint() or |
2902 | * xhci_drop_endpoint(). If this call fails, the USB core is expected |
2903 | * to call xhci_reset_bandwidth(). |
2904 | * |
2905 | * Since we are in the middle of changing either configuration or |
2906 | * installing a new alt setting, the USB core won't allow URBs to be |
2907 | * enqueued for any endpoint on the old config or interface. Nothing |
2908 | * else should be touching the xhci->devs[slot_id] structure, so we |
2909 | * don't need to take the xhci->lock for manipulating that. |
2910 | */ |
2911 | int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) |
2912 | { |
2913 | int i; |
2914 | int ret = 0; |
2915 | struct xhci_hcd *xhci; |
2916 | struct xhci_virt_device *virt_dev; |
2917 | struct xhci_input_control_ctx *ctrl_ctx; |
2918 | struct xhci_slot_ctx *slot_ctx; |
2919 | struct xhci_command *command; |
2920 | |
2921 | ret = xhci_check_args(hcd, udev, NULL, check_ep: 0, check_virt_dev: true, func: __func__); |
2922 | if (ret <= 0) |
2923 | return ret; |
2924 | xhci = hcd_to_xhci(hcd); |
2925 | if ((xhci->xhc_state & XHCI_STATE_DYING) || |
2926 | (xhci->xhc_state & XHCI_STATE_REMOVING)) |
2927 | return -ENODEV; |
2928 | |
2929 | xhci_dbg(xhci, "%s called for udev %p\n" , __func__, udev); |
2930 | virt_dev = xhci->devs[udev->slot_id]; |
2931 | |
2932 | command = xhci_alloc_command(xhci, allocate_completion: true, GFP_KERNEL); |
2933 | if (!command) |
2934 | return -ENOMEM; |
2935 | |
2936 | command->in_ctx = virt_dev->in_ctx; |
2937 | |
2938 | /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ |
2939 | ctrl_ctx = xhci_get_input_control_ctx(ctx: command->in_ctx); |
2940 | if (!ctrl_ctx) { |
2941 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n" , |
2942 | __func__); |
2943 | ret = -ENOMEM; |
2944 | goto command_cleanup; |
2945 | } |
2946 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
2947 | ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); |
2948 | ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG)); |
2949 | |
2950 | /* Don't issue the command if there's no endpoints to update. */ |
2951 | if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) && |
2952 | ctrl_ctx->drop_flags == 0) { |
2953 | ret = 0; |
2954 | goto command_cleanup; |
2955 | } |
2956 | /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */ |
2957 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: virt_dev->in_ctx); |
2958 | for (i = 31; i >= 1; i--) { |
2959 | __le32 le32 = cpu_to_le32(BIT(i)); |
2960 | |
2961 | if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32)) |
2962 | || (ctrl_ctx->add_flags & le32) || i == 1) { |
2963 | slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); |
2964 | slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i)); |
2965 | break; |
2966 | } |
2967 | } |
2968 | |
2969 | ret = xhci_configure_endpoint(xhci, udev, command, |
2970 | ctx_change: false, must_succeed: false); |
2971 | if (ret) |
2972 | /* Callee should call reset_bandwidth() */ |
2973 | goto command_cleanup; |
2974 | |
2975 | /* Free any rings that were dropped, but not changed. */ |
2976 | for (i = 1; i < 31; i++) { |
2977 | if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) && |
2978 | !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) { |
2979 | xhci_free_endpoint_ring(xhci, virt_dev, ep_index: i); |
2980 | xhci_check_bw_drop_ep_streams(xhci, vdev: virt_dev, i); |
2981 | } |
2982 | } |
2983 | xhci_zero_in_ctx(xhci, virt_dev); |
2984 | /* |
2985 | * Install any rings for completely new endpoints or changed endpoints, |
2986 | * and free any old rings from changed endpoints. |
2987 | */ |
2988 | for (i = 1; i < 31; i++) { |
2989 | if (!virt_dev->eps[i].new_ring) |
2990 | continue; |
2991 | /* Only free the old ring if it exists. |
2992 | * It may not if this is the first add of an endpoint. |
2993 | */ |
2994 | if (virt_dev->eps[i].ring) { |
2995 | xhci_free_endpoint_ring(xhci, virt_dev, ep_index: i); |
2996 | } |
2997 | xhci_check_bw_drop_ep_streams(xhci, vdev: virt_dev, i); |
2998 | virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; |
2999 | virt_dev->eps[i].new_ring = NULL; |
3000 | xhci_debugfs_create_endpoint(xhci, virt_dev, ep_index: i); |
3001 | } |
3002 | command_cleanup: |
3003 | kfree(objp: command->completion); |
3004 | kfree(objp: command); |
3005 | |
3006 | return ret; |
3007 | } |
3008 | EXPORT_SYMBOL_GPL(xhci_check_bandwidth); |
3009 | |
3010 | void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) |
3011 | { |
3012 | struct xhci_hcd *xhci; |
3013 | struct xhci_virt_device *virt_dev; |
3014 | int i, ret; |
3015 | |
3016 | ret = xhci_check_args(hcd, udev, NULL, check_ep: 0, check_virt_dev: true, func: __func__); |
3017 | if (ret <= 0) |
3018 | return; |
3019 | xhci = hcd_to_xhci(hcd); |
3020 | |
3021 | xhci_dbg(xhci, "%s called for udev %p\n" , __func__, udev); |
3022 | virt_dev = xhci->devs[udev->slot_id]; |
3023 | /* Free any rings allocated for added endpoints */ |
3024 | for (i = 0; i < 31; i++) { |
3025 | if (virt_dev->eps[i].new_ring) { |
3026 | xhci_debugfs_remove_endpoint(xhci, virt_dev, ep_index: i); |
3027 | xhci_ring_free(xhci, ring: virt_dev->eps[i].new_ring); |
3028 | virt_dev->eps[i].new_ring = NULL; |
3029 | } |
3030 | } |
3031 | xhci_zero_in_ctx(xhci, virt_dev); |
3032 | } |
3033 | EXPORT_SYMBOL_GPL(xhci_reset_bandwidth); |
3034 | |
3035 | static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, |
3036 | struct xhci_container_ctx *in_ctx, |
3037 | struct xhci_container_ctx *out_ctx, |
3038 | struct xhci_input_control_ctx *ctrl_ctx, |
3039 | u32 add_flags, u32 drop_flags) |
3040 | { |
3041 | ctrl_ctx->add_flags = cpu_to_le32(add_flags); |
3042 | ctrl_ctx->drop_flags = cpu_to_le32(drop_flags); |
3043 | xhci_slot_copy(xhci, in_ctx, out_ctx); |
3044 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
3045 | } |
3046 | |
3047 | static void xhci_endpoint_disable(struct usb_hcd *hcd, |
3048 | struct usb_host_endpoint *host_ep) |
3049 | { |
3050 | struct xhci_hcd *xhci; |
3051 | struct xhci_virt_device *vdev; |
3052 | struct xhci_virt_ep *ep; |
3053 | struct usb_device *udev; |
3054 | unsigned long flags; |
3055 | unsigned int ep_index; |
3056 | |
3057 | xhci = hcd_to_xhci(hcd); |
3058 | rescan: |
3059 | spin_lock_irqsave(&xhci->lock, flags); |
3060 | |
3061 | udev = (struct usb_device *)host_ep->hcpriv; |
3062 | if (!udev || !udev->slot_id) |
3063 | goto done; |
3064 | |
3065 | vdev = xhci->devs[udev->slot_id]; |
3066 | if (!vdev) |
3067 | goto done; |
3068 | |
3069 | ep_index = xhci_get_endpoint_index(&host_ep->desc); |
3070 | ep = &vdev->eps[ep_index]; |
3071 | |
3072 | /* wait for hub_tt_work to finish clearing hub TT */ |
3073 | if (ep->ep_state & EP_CLEARING_TT) { |
3074 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3075 | schedule_timeout_uninterruptible(timeout: 1); |
3076 | goto rescan; |
3077 | } |
3078 | |
3079 | if (ep->ep_state) |
3080 | xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n" , |
3081 | ep->ep_state); |
3082 | done: |
3083 | host_ep->hcpriv = NULL; |
3084 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3085 | } |
3086 | |
3087 | /* |
3088 | * Called after usb core issues a clear halt control message. |
3089 | * The host side of the halt should already be cleared by a reset endpoint |
3090 | * command issued when the STALL event was received. |
3091 | * |
3092 | * The reset endpoint command may only be issued to endpoints in the halted |
3093 | * state. For software that wishes to reset the data toggle or sequence number |
3094 | * of an endpoint that isn't in the halted state this function will issue a |
3095 | * configure endpoint command with the Drop and Add bits set for the target |
3096 | * endpoint. Refer to the additional note in xhci spcification section 4.6.8. |
3097 | * |
3098 | * vdev may be lost due to xHC restore error and re-initialization during S3/S4 |
3099 | * resume. A new vdev will be allocated later by xhci_discover_or_reset_device() |
3100 | */ |
3101 | |
3102 | static void xhci_endpoint_reset(struct usb_hcd *hcd, |
3103 | struct usb_host_endpoint *host_ep) |
3104 | { |
3105 | struct xhci_hcd *xhci; |
3106 | struct usb_device *udev; |
3107 | struct xhci_virt_device *vdev; |
3108 | struct xhci_virt_ep *ep; |
3109 | struct xhci_input_control_ctx *ctrl_ctx; |
3110 | struct xhci_command *stop_cmd, *cfg_cmd; |
3111 | unsigned int ep_index; |
3112 | unsigned long flags; |
3113 | u32 ep_flag; |
3114 | int err; |
3115 | |
3116 | xhci = hcd_to_xhci(hcd); |
3117 | ep_index = xhci_get_endpoint_index(&host_ep->desc); |
3118 | |
3119 | /* |
3120 | * Usb core assumes a max packet value for ep0 on FS devices until the |
3121 | * real value is read from the descriptor. Core resets Ep0 if values |
3122 | * mismatch. Reconfigure the xhci ep0 endpoint context here in that case |
3123 | */ |
3124 | if (usb_endpoint_xfer_control(epd: &host_ep->desc) && ep_index == 0) { |
3125 | |
3126 | udev = container_of(host_ep, struct usb_device, ep0); |
3127 | if (udev->speed != USB_SPEED_FULL || !udev->slot_id) |
3128 | return; |
3129 | |
3130 | vdev = xhci->devs[udev->slot_id]; |
3131 | if (!vdev || vdev->udev != udev) |
3132 | return; |
3133 | |
3134 | xhci_check_ep0_maxpacket(xhci, vdev); |
3135 | |
3136 | /* Nothing else should be done here for ep0 during ep reset */ |
3137 | return; |
3138 | } |
3139 | |
3140 | if (!host_ep->hcpriv) |
3141 | return; |
3142 | udev = (struct usb_device *) host_ep->hcpriv; |
3143 | vdev = xhci->devs[udev->slot_id]; |
3144 | |
3145 | if (!udev->slot_id || !vdev) |
3146 | return; |
3147 | |
3148 | ep = &vdev->eps[ep_index]; |
3149 | |
3150 | /* Bail out if toggle is already being cleared by a endpoint reset */ |
3151 | spin_lock_irqsave(&xhci->lock, flags); |
3152 | if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) { |
3153 | ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE; |
3154 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3155 | return; |
3156 | } |
3157 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3158 | /* Only interrupt and bulk ep's use data toggle, USB2 spec 5.5.4-> */ |
3159 | if (usb_endpoint_xfer_control(epd: &host_ep->desc) || |
3160 | usb_endpoint_xfer_isoc(epd: &host_ep->desc)) |
3161 | return; |
3162 | |
3163 | ep_flag = xhci_get_endpoint_flag(desc: &host_ep->desc); |
3164 | |
3165 | if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG) |
3166 | return; |
3167 | |
3168 | stop_cmd = xhci_alloc_command(xhci, allocate_completion: true, GFP_NOWAIT); |
3169 | if (!stop_cmd) |
3170 | return; |
3171 | |
3172 | cfg_cmd = xhci_alloc_command_with_ctx(xhci, allocate_completion: true, GFP_NOWAIT); |
3173 | if (!cfg_cmd) |
3174 | goto cleanup; |
3175 | |
3176 | spin_lock_irqsave(&xhci->lock, flags); |
3177 | |
3178 | /* block queuing new trbs and ringing ep doorbell */ |
3179 | ep->ep_state |= EP_SOFT_CLEAR_TOGGLE; |
3180 | |
3181 | /* |
3182 | * Make sure endpoint ring is empty before resetting the toggle/seq. |
3183 | * Driver is required to synchronously cancel all transfer request. |
3184 | * Stop the endpoint to force xHC to update the output context |
3185 | */ |
3186 | |
3187 | if (!list_empty(head: &ep->ring->td_list)) { |
3188 | dev_err(&udev->dev, "EP not empty, refuse reset\n" ); |
3189 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3190 | xhci_free_command(xhci, command: cfg_cmd); |
3191 | goto cleanup; |
3192 | } |
3193 | |
3194 | err = xhci_queue_stop_endpoint(xhci, cmd: stop_cmd, slot_id: udev->slot_id, |
3195 | ep_index, suspend: 0); |
3196 | if (err < 0) { |
3197 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3198 | xhci_free_command(xhci, command: cfg_cmd); |
3199 | xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d " , |
3200 | __func__, err); |
3201 | goto cleanup; |
3202 | } |
3203 | |
3204 | xhci_ring_cmd_db(xhci); |
3205 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3206 | |
3207 | wait_for_completion(stop_cmd->completion); |
3208 | |
3209 | spin_lock_irqsave(&xhci->lock, flags); |
3210 | |
3211 | /* config ep command clears toggle if add and drop ep flags are set */ |
3212 | ctrl_ctx = xhci_get_input_control_ctx(ctx: cfg_cmd->in_ctx); |
3213 | if (!ctrl_ctx) { |
3214 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3215 | xhci_free_command(xhci, command: cfg_cmd); |
3216 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n" , |
3217 | __func__); |
3218 | goto cleanup; |
3219 | } |
3220 | |
3221 | xhci_setup_input_ctx_for_config_ep(xhci, in_ctx: cfg_cmd->in_ctx, out_ctx: vdev->out_ctx, |
3222 | ctrl_ctx, add_flags: ep_flag, drop_flags: ep_flag); |
3223 | xhci_endpoint_copy(xhci, in_ctx: cfg_cmd->in_ctx, out_ctx: vdev->out_ctx, ep_index); |
3224 | |
3225 | err = xhci_queue_configure_endpoint(xhci, cmd: cfg_cmd, in_ctx_ptr: cfg_cmd->in_ctx->dma, |
3226 | slot_id: udev->slot_id, command_must_succeed: false); |
3227 | if (err < 0) { |
3228 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3229 | xhci_free_command(xhci, command: cfg_cmd); |
3230 | xhci_dbg(xhci, "%s: Failed to queue config ep command, %d " , |
3231 | __func__, err); |
3232 | goto cleanup; |
3233 | } |
3234 | |
3235 | xhci_ring_cmd_db(xhci); |
3236 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3237 | |
3238 | wait_for_completion(cfg_cmd->completion); |
3239 | |
3240 | xhci_free_command(xhci, command: cfg_cmd); |
3241 | cleanup: |
3242 | xhci_free_command(xhci, command: stop_cmd); |
3243 | spin_lock_irqsave(&xhci->lock, flags); |
3244 | if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE) |
3245 | ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE; |
3246 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3247 | } |
3248 | |
3249 | static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, |
3250 | struct usb_device *udev, struct usb_host_endpoint *ep, |
3251 | unsigned int slot_id) |
3252 | { |
3253 | int ret; |
3254 | unsigned int ep_index; |
3255 | unsigned int ep_state; |
3256 | |
3257 | if (!ep) |
3258 | return -EINVAL; |
3259 | ret = xhci_check_args(hcd: xhci_to_hcd(xhci), udev, ep, check_ep: 1, check_virt_dev: true, func: __func__); |
3260 | if (ret <= 0) |
3261 | return ret ? ret : -EINVAL; |
3262 | if (usb_ss_max_streams(comp: &ep->ss_ep_comp) == 0) { |
3263 | xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" |
3264 | " descriptor for ep 0x%x does not support streams\n" , |
3265 | ep->desc.bEndpointAddress); |
3266 | return -EINVAL; |
3267 | } |
3268 | |
3269 | ep_index = xhci_get_endpoint_index(&ep->desc); |
3270 | ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; |
3271 | if (ep_state & EP_HAS_STREAMS || |
3272 | ep_state & EP_GETTING_STREAMS) { |
3273 | xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x " |
3274 | "already has streams set up.\n" , |
3275 | ep->desc.bEndpointAddress); |
3276 | xhci_warn(xhci, "Send email to xHCI maintainer and ask for " |
3277 | "dynamic stream context array reallocation.\n" ); |
3278 | return -EINVAL; |
3279 | } |
3280 | if (!list_empty(head: &xhci->devs[slot_id]->eps[ep_index].ring->td_list)) { |
3281 | xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk " |
3282 | "endpoint 0x%x; URBs are pending.\n" , |
3283 | ep->desc.bEndpointAddress); |
3284 | return -EINVAL; |
3285 | } |
3286 | return 0; |
3287 | } |
3288 | |
3289 | static void xhci_calculate_streams_entries(struct xhci_hcd *xhci, |
3290 | unsigned int *num_streams, unsigned int *num_stream_ctxs) |
3291 | { |
3292 | unsigned int max_streams; |
3293 | |
3294 | /* The stream context array size must be a power of two */ |
3295 | *num_stream_ctxs = roundup_pow_of_two(*num_streams); |
3296 | /* |
3297 | * Find out how many primary stream array entries the host controller |
3298 | * supports. Later we may use secondary stream arrays (similar to 2nd |
3299 | * level page entries), but that's an optional feature for xHCI host |
3300 | * controllers. xHCs must support at least 4 stream IDs. |
3301 | */ |
3302 | max_streams = HCC_MAX_PSA(xhci->hcc_params); |
3303 | if (*num_stream_ctxs > max_streams) { |
3304 | xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n" , |
3305 | max_streams); |
3306 | *num_stream_ctxs = max_streams; |
3307 | *num_streams = max_streams; |
3308 | } |
3309 | } |
3310 | |
3311 | /* Returns an error code if one of the endpoint already has streams. |
3312 | * This does not change any data structures, it only checks and gathers |
3313 | * information. |
3314 | */ |
3315 | static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci, |
3316 | struct usb_device *udev, |
3317 | struct usb_host_endpoint **eps, unsigned int num_eps, |
3318 | unsigned int *num_streams, u32 *changed_ep_bitmask) |
3319 | { |
3320 | unsigned int max_streams; |
3321 | unsigned int endpoint_flag; |
3322 | int i; |
3323 | int ret; |
3324 | |
3325 | for (i = 0; i < num_eps; i++) { |
3326 | ret = xhci_check_streams_endpoint(xhci, udev, |
3327 | ep: eps[i], slot_id: udev->slot_id); |
3328 | if (ret < 0) |
3329 | return ret; |
3330 | |
3331 | max_streams = usb_ss_max_streams(comp: &eps[i]->ss_ep_comp); |
3332 | if (max_streams < (*num_streams - 1)) { |
3333 | xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n" , |
3334 | eps[i]->desc.bEndpointAddress, |
3335 | max_streams); |
3336 | *num_streams = max_streams+1; |
3337 | } |
3338 | |
3339 | endpoint_flag = xhci_get_endpoint_flag(desc: &eps[i]->desc); |
3340 | if (*changed_ep_bitmask & endpoint_flag) |
3341 | return -EINVAL; |
3342 | *changed_ep_bitmask |= endpoint_flag; |
3343 | } |
3344 | return 0; |
3345 | } |
3346 | |
3347 | static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci, |
3348 | struct usb_device *udev, |
3349 | struct usb_host_endpoint **eps, unsigned int num_eps) |
3350 | { |
3351 | u32 changed_ep_bitmask = 0; |
3352 | unsigned int slot_id; |
3353 | unsigned int ep_index; |
3354 | unsigned int ep_state; |
3355 | int i; |
3356 | |
3357 | slot_id = udev->slot_id; |
3358 | if (!xhci->devs[slot_id]) |
3359 | return 0; |
3360 | |
3361 | for (i = 0; i < num_eps; i++) { |
3362 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
3363 | ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; |
3364 | /* Are streams already being freed for the endpoint? */ |
3365 | if (ep_state & EP_GETTING_NO_STREAMS) { |
3366 | xhci_warn(xhci, "WARN Can't disable streams for " |
3367 | "endpoint 0x%x, " |
3368 | "streams are being disabled already\n" , |
3369 | eps[i]->desc.bEndpointAddress); |
3370 | return 0; |
3371 | } |
3372 | /* Are there actually any streams to free? */ |
3373 | if (!(ep_state & EP_HAS_STREAMS) && |
3374 | !(ep_state & EP_GETTING_STREAMS)) { |
3375 | xhci_warn(xhci, "WARN Can't disable streams for " |
3376 | "endpoint 0x%x, " |
3377 | "streams are already disabled!\n" , |
3378 | eps[i]->desc.bEndpointAddress); |
3379 | xhci_warn(xhci, "WARN xhci_free_streams() called " |
3380 | "with non-streams endpoint\n" ); |
3381 | return 0; |
3382 | } |
3383 | changed_ep_bitmask |= xhci_get_endpoint_flag(desc: &eps[i]->desc); |
3384 | } |
3385 | return changed_ep_bitmask; |
3386 | } |
3387 | |
3388 | /* |
3389 | * The USB device drivers use this function (through the HCD interface in USB |
3390 | * core) to prepare a set of bulk endpoints to use streams. Streams are used to |
3391 | * coordinate mass storage command queueing across multiple endpoints (basically |
3392 | * a stream ID == a task ID). |
3393 | * |
3394 | * Setting up streams involves allocating the same size stream context array |
3395 | * for each endpoint and issuing a configure endpoint command for all endpoints. |
3396 | * |
3397 | * Don't allow the call to succeed if one endpoint only supports one stream |
3398 | * (which means it doesn't support streams at all). |
3399 | * |
3400 | * Drivers may get less stream IDs than they asked for, if the host controller |
3401 | * hardware or endpoints claim they can't support the number of requested |
3402 | * stream IDs. |
3403 | */ |
3404 | static int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev, |
3405 | struct usb_host_endpoint **eps, unsigned int num_eps, |
3406 | unsigned int num_streams, gfp_t mem_flags) |
3407 | { |
3408 | int i, ret; |
3409 | struct xhci_hcd *xhci; |
3410 | struct xhci_virt_device *vdev; |
3411 | struct xhci_command *config_cmd; |
3412 | struct xhci_input_control_ctx *ctrl_ctx; |
3413 | unsigned int ep_index; |
3414 | unsigned int num_stream_ctxs; |
3415 | unsigned int max_packet; |
3416 | unsigned long flags; |
3417 | u32 changed_ep_bitmask = 0; |
3418 | |
3419 | if (!eps) |
3420 | return -EINVAL; |
3421 | |
3422 | /* Add one to the number of streams requested to account for |
3423 | * stream 0 that is reserved for xHCI usage. |
3424 | */ |
3425 | num_streams += 1; |
3426 | xhci = hcd_to_xhci(hcd); |
3427 | xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n" , |
3428 | num_streams); |
3429 | |
3430 | /* MaxPSASize value 0 (2 streams) means streams are not supported */ |
3431 | if ((xhci->quirks & XHCI_BROKEN_STREAMS) || |
3432 | HCC_MAX_PSA(xhci->hcc_params) < 4) { |
3433 | xhci_dbg(xhci, "xHCI controller does not support streams.\n" ); |
3434 | return -ENOSYS; |
3435 | } |
3436 | |
3437 | config_cmd = xhci_alloc_command_with_ctx(xhci, allocate_completion: true, mem_flags); |
3438 | if (!config_cmd) |
3439 | return -ENOMEM; |
3440 | |
3441 | ctrl_ctx = xhci_get_input_control_ctx(ctx: config_cmd->in_ctx); |
3442 | if (!ctrl_ctx) { |
3443 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n" , |
3444 | __func__); |
3445 | xhci_free_command(xhci, command: config_cmd); |
3446 | return -ENOMEM; |
3447 | } |
3448 | |
3449 | /* Check to make sure all endpoints are not already configured for |
3450 | * streams. While we're at it, find the maximum number of streams that |
3451 | * all the endpoints will support and check for duplicate endpoints. |
3452 | */ |
3453 | spin_lock_irqsave(&xhci->lock, flags); |
3454 | ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps, |
3455 | num_eps, num_streams: &num_streams, changed_ep_bitmask: &changed_ep_bitmask); |
3456 | if (ret < 0) { |
3457 | xhci_free_command(xhci, command: config_cmd); |
3458 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3459 | return ret; |
3460 | } |
3461 | if (num_streams <= 1) { |
3462 | xhci_warn(xhci, "WARN: endpoints can't handle " |
3463 | "more than one stream.\n" ); |
3464 | xhci_free_command(xhci, command: config_cmd); |
3465 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3466 | return -EINVAL; |
3467 | } |
3468 | vdev = xhci->devs[udev->slot_id]; |
3469 | /* Mark each endpoint as being in transition, so |
3470 | * xhci_urb_enqueue() will reject all URBs. |
3471 | */ |
3472 | for (i = 0; i < num_eps; i++) { |
3473 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
3474 | vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS; |
3475 | } |
3476 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3477 | |
3478 | /* Setup internal data structures and allocate HW data structures for |
3479 | * streams (but don't install the HW structures in the input context |
3480 | * until we're sure all memory allocation succeeded). |
3481 | */ |
3482 | xhci_calculate_streams_entries(xhci, num_streams: &num_streams, num_stream_ctxs: &num_stream_ctxs); |
3483 | xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n" , |
3484 | num_stream_ctxs, num_streams); |
3485 | |
3486 | for (i = 0; i < num_eps; i++) { |
3487 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
3488 | max_packet = usb_endpoint_maxp(epd: &eps[i]->desc); |
3489 | vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci, |
3490 | num_stream_ctxs, |
3491 | num_streams, |
3492 | max_packet, flags: mem_flags); |
3493 | if (!vdev->eps[ep_index].stream_info) |
3494 | goto cleanup; |
3495 | /* Set maxPstreams in endpoint context and update deq ptr to |
3496 | * point to stream context array. FIXME |
3497 | */ |
3498 | } |
3499 | |
3500 | /* Set up the input context for a configure endpoint command. */ |
3501 | for (i = 0; i < num_eps; i++) { |
3502 | struct xhci_ep_ctx *ep_ctx; |
3503 | |
3504 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
3505 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: config_cmd->in_ctx, ep_index); |
3506 | |
3507 | xhci_endpoint_copy(xhci, in_ctx: config_cmd->in_ctx, |
3508 | out_ctx: vdev->out_ctx, ep_index); |
3509 | xhci_setup_streams_ep_input_ctx(xhci, ep_ctx, |
3510 | stream_info: vdev->eps[ep_index].stream_info); |
3511 | } |
3512 | /* Tell the HW to drop its old copy of the endpoint context info |
3513 | * and add the updated copy from the input context. |
3514 | */ |
3515 | xhci_setup_input_ctx_for_config_ep(xhci, in_ctx: config_cmd->in_ctx, |
3516 | out_ctx: vdev->out_ctx, ctrl_ctx, |
3517 | add_flags: changed_ep_bitmask, drop_flags: changed_ep_bitmask); |
3518 | |
3519 | /* Issue and wait for the configure endpoint command */ |
3520 | ret = xhci_configure_endpoint(xhci, udev, command: config_cmd, |
3521 | ctx_change: false, must_succeed: false); |
3522 | |
3523 | /* xHC rejected the configure endpoint command for some reason, so we |
3524 | * leave the old ring intact and free our internal streams data |
3525 | * structure. |
3526 | */ |
3527 | if (ret < 0) |
3528 | goto cleanup; |
3529 | |
3530 | spin_lock_irqsave(&xhci->lock, flags); |
3531 | for (i = 0; i < num_eps; i++) { |
3532 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
3533 | vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; |
3534 | xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n" , |
3535 | udev->slot_id, ep_index); |
3536 | vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS; |
3537 | } |
3538 | xhci_free_command(xhci, command: config_cmd); |
3539 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3540 | |
3541 | for (i = 0; i < num_eps; i++) { |
3542 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
3543 | xhci_debugfs_create_stream_files(xhci, virt_dev: vdev, ep_index); |
3544 | } |
3545 | /* Subtract 1 for stream 0, which drivers can't use */ |
3546 | return num_streams - 1; |
3547 | |
3548 | cleanup: |
3549 | /* If it didn't work, free the streams! */ |
3550 | for (i = 0; i < num_eps; i++) { |
3551 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
3552 | xhci_free_stream_info(xhci, stream_info: vdev->eps[ep_index].stream_info); |
3553 | vdev->eps[ep_index].stream_info = NULL; |
3554 | /* FIXME Unset maxPstreams in endpoint context and |
3555 | * update deq ptr to point to normal string ring. |
3556 | */ |
3557 | vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; |
3558 | vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; |
3559 | xhci_endpoint_zero(xhci, virt_dev: vdev, ep: eps[i]); |
3560 | } |
3561 | xhci_free_command(xhci, command: config_cmd); |
3562 | return -ENOMEM; |
3563 | } |
3564 | |
3565 | /* Transition the endpoint from using streams to being a "normal" endpoint |
3566 | * without streams. |
3567 | * |
3568 | * Modify the endpoint context state, submit a configure endpoint command, |
3569 | * and free all endpoint rings for streams if that completes successfully. |
3570 | */ |
3571 | static int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev, |
3572 | struct usb_host_endpoint **eps, unsigned int num_eps, |
3573 | gfp_t mem_flags) |
3574 | { |
3575 | int i, ret; |
3576 | struct xhci_hcd *xhci; |
3577 | struct xhci_virt_device *vdev; |
3578 | struct xhci_command *command; |
3579 | struct xhci_input_control_ctx *ctrl_ctx; |
3580 | unsigned int ep_index; |
3581 | unsigned long flags; |
3582 | u32 changed_ep_bitmask; |
3583 | |
3584 | xhci = hcd_to_xhci(hcd); |
3585 | vdev = xhci->devs[udev->slot_id]; |
3586 | |
3587 | /* Set up a configure endpoint command to remove the streams rings */ |
3588 | spin_lock_irqsave(&xhci->lock, flags); |
3589 | changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci, |
3590 | udev, eps, num_eps); |
3591 | if (changed_ep_bitmask == 0) { |
3592 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3593 | return -EINVAL; |
3594 | } |
3595 | |
3596 | /* Use the xhci_command structure from the first endpoint. We may have |
3597 | * allocated too many, but the driver may call xhci_free_streams() for |
3598 | * each endpoint it grouped into one call to xhci_alloc_streams(). |
3599 | */ |
3600 | ep_index = xhci_get_endpoint_index(&eps[0]->desc); |
3601 | command = vdev->eps[ep_index].stream_info->free_streams_command; |
3602 | ctrl_ctx = xhci_get_input_control_ctx(ctx: command->in_ctx); |
3603 | if (!ctrl_ctx) { |
3604 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3605 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n" , |
3606 | __func__); |
3607 | return -EINVAL; |
3608 | } |
3609 | |
3610 | for (i = 0; i < num_eps; i++) { |
3611 | struct xhci_ep_ctx *ep_ctx; |
3612 | |
3613 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
3614 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: command->in_ctx, ep_index); |
3615 | xhci->devs[udev->slot_id]->eps[ep_index].ep_state |= |
3616 | EP_GETTING_NO_STREAMS; |
3617 | |
3618 | xhci_endpoint_copy(xhci, in_ctx: command->in_ctx, |
3619 | out_ctx: vdev->out_ctx, ep_index); |
3620 | xhci_setup_no_streams_ep_input_ctx(ep_ctx, |
3621 | ep: &vdev->eps[ep_index]); |
3622 | } |
3623 | xhci_setup_input_ctx_for_config_ep(xhci, in_ctx: command->in_ctx, |
3624 | out_ctx: vdev->out_ctx, ctrl_ctx, |
3625 | add_flags: changed_ep_bitmask, drop_flags: changed_ep_bitmask); |
3626 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3627 | |
3628 | /* Issue and wait for the configure endpoint command, |
3629 | * which must succeed. |
3630 | */ |
3631 | ret = xhci_configure_endpoint(xhci, udev, command, |
3632 | ctx_change: false, must_succeed: true); |
3633 | |
3634 | /* xHC rejected the configure endpoint command for some reason, so we |
3635 | * leave the streams rings intact. |
3636 | */ |
3637 | if (ret < 0) |
3638 | return ret; |
3639 | |
3640 | spin_lock_irqsave(&xhci->lock, flags); |
3641 | for (i = 0; i < num_eps; i++) { |
3642 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
3643 | xhci_free_stream_info(xhci, stream_info: vdev->eps[ep_index].stream_info); |
3644 | vdev->eps[ep_index].stream_info = NULL; |
3645 | /* FIXME Unset maxPstreams in endpoint context and |
3646 | * update deq ptr to point to normal string ring. |
3647 | */ |
3648 | vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS; |
3649 | vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; |
3650 | } |
3651 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3652 | |
3653 | return 0; |
3654 | } |
3655 | |
3656 | /* |
3657 | * Deletes endpoint resources for endpoints that were active before a Reset |
3658 | * Device command, or a Disable Slot command. The Reset Device command leaves |
3659 | * the control endpoint intact, whereas the Disable Slot command deletes it. |
3660 | * |
3661 | * Must be called with xhci->lock held. |
3662 | */ |
3663 | void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci, |
3664 | struct xhci_virt_device *virt_dev, bool drop_control_ep) |
3665 | { |
3666 | int i; |
3667 | unsigned int num_dropped_eps = 0; |
3668 | unsigned int drop_flags = 0; |
3669 | |
3670 | for (i = (drop_control_ep ? 0 : 1); i < 31; i++) { |
3671 | if (virt_dev->eps[i].ring) { |
3672 | drop_flags |= 1 << i; |
3673 | num_dropped_eps++; |
3674 | } |
3675 | } |
3676 | xhci->num_active_eps -= num_dropped_eps; |
3677 | if (num_dropped_eps) |
3678 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
3679 | fmt: "Dropped %u ep ctxs, flags = 0x%x, " |
3680 | "%u now active." , |
3681 | num_dropped_eps, drop_flags, |
3682 | xhci->num_active_eps); |
3683 | } |
3684 | |
3685 | /* |
3686 | * This submits a Reset Device Command, which will set the device state to 0, |
3687 | * set the device address to 0, and disable all the endpoints except the default |
3688 | * control endpoint. The USB core should come back and call |
3689 | * xhci_address_device(), and then re-set up the configuration. If this is |
3690 | * called because of a usb_reset_and_verify_device(), then the old alternate |
3691 | * settings will be re-installed through the normal bandwidth allocation |
3692 | * functions. |
3693 | * |
3694 | * Wait for the Reset Device command to finish. Remove all structures |
3695 | * associated with the endpoints that were disabled. Clear the input device |
3696 | * structure? Reset the control endpoint 0 max packet size? |
3697 | * |
3698 | * If the virt_dev to be reset does not exist or does not match the udev, |
3699 | * it means the device is lost, possibly due to the xHC restore error and |
3700 | * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to |
3701 | * re-allocate the device. |
3702 | */ |
3703 | static int xhci_discover_or_reset_device(struct usb_hcd *hcd, |
3704 | struct usb_device *udev) |
3705 | { |
3706 | int ret, i; |
3707 | unsigned long flags; |
3708 | struct xhci_hcd *xhci; |
3709 | unsigned int slot_id; |
3710 | struct xhci_virt_device *virt_dev; |
3711 | struct xhci_command *reset_device_cmd; |
3712 | struct xhci_slot_ctx *slot_ctx; |
3713 | int old_active_eps = 0; |
3714 | |
3715 | ret = xhci_check_args(hcd, udev, NULL, check_ep: 0, check_virt_dev: false, func: __func__); |
3716 | if (ret <= 0) |
3717 | return ret; |
3718 | xhci = hcd_to_xhci(hcd); |
3719 | slot_id = udev->slot_id; |
3720 | virt_dev = xhci->devs[slot_id]; |
3721 | if (!virt_dev) { |
3722 | xhci_dbg(xhci, "The device to be reset with slot ID %u does " |
3723 | "not exist. Re-allocate the device\n" , slot_id); |
3724 | ret = xhci_alloc_dev(hcd, udev); |
3725 | if (ret == 1) |
3726 | return 0; |
3727 | else |
3728 | return -EINVAL; |
3729 | } |
3730 | |
3731 | if (virt_dev->tt_info) |
3732 | old_active_eps = virt_dev->tt_info->active_eps; |
3733 | |
3734 | if (virt_dev->udev != udev) { |
3735 | /* If the virt_dev and the udev does not match, this virt_dev |
3736 | * may belong to another udev. |
3737 | * Re-allocate the device. |
3738 | */ |
3739 | xhci_dbg(xhci, "The device to be reset with slot ID %u does " |
3740 | "not match the udev. Re-allocate the device\n" , |
3741 | slot_id); |
3742 | ret = xhci_alloc_dev(hcd, udev); |
3743 | if (ret == 1) |
3744 | return 0; |
3745 | else |
3746 | return -EINVAL; |
3747 | } |
3748 | |
3749 | /* If device is not setup, there is no point in resetting it */ |
3750 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: virt_dev->out_ctx); |
3751 | if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == |
3752 | SLOT_STATE_DISABLED) |
3753 | return 0; |
3754 | |
3755 | trace_xhci_discover_or_reset_device(ctx: slot_ctx); |
3756 | |
3757 | xhci_dbg(xhci, "Resetting device with slot ID %u\n" , slot_id); |
3758 | /* Allocate the command structure that holds the struct completion. |
3759 | * Assume we're in process context, since the normal device reset |
3760 | * process has to wait for the device anyway. Storage devices are |
3761 | * reset as part of error handling, so use GFP_NOIO instead of |
3762 | * GFP_KERNEL. |
3763 | */ |
3764 | reset_device_cmd = xhci_alloc_command(xhci, allocate_completion: true, GFP_NOIO); |
3765 | if (!reset_device_cmd) { |
3766 | xhci_dbg(xhci, "Couldn't allocate command structure.\n" ); |
3767 | return -ENOMEM; |
3768 | } |
3769 | |
3770 | /* Attempt to submit the Reset Device command to the command ring */ |
3771 | spin_lock_irqsave(&xhci->lock, flags); |
3772 | |
3773 | ret = xhci_queue_reset_device(xhci, cmd: reset_device_cmd, slot_id); |
3774 | if (ret) { |
3775 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n" ); |
3776 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3777 | goto command_cleanup; |
3778 | } |
3779 | xhci_ring_cmd_db(xhci); |
3780 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3781 | |
3782 | /* Wait for the Reset Device command to finish */ |
3783 | wait_for_completion(reset_device_cmd->completion); |
3784 | |
3785 | /* The Reset Device command can't fail, according to the 0.95/0.96 spec, |
3786 | * unless we tried to reset a slot ID that wasn't enabled, |
3787 | * or the device wasn't in the addressed or configured state. |
3788 | */ |
3789 | ret = reset_device_cmd->status; |
3790 | switch (ret) { |
3791 | case COMP_COMMAND_ABORTED: |
3792 | case COMP_COMMAND_RING_STOPPED: |
3793 | xhci_warn(xhci, "Timeout waiting for reset device command\n" ); |
3794 | ret = -ETIME; |
3795 | goto command_cleanup; |
3796 | case COMP_SLOT_NOT_ENABLED_ERROR: /* 0.95 completion for bad slot ID */ |
3797 | case COMP_CONTEXT_STATE_ERROR: /* 0.96 completion code for same thing */ |
3798 | xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n" , |
3799 | slot_id, |
3800 | xhci_get_slot_state(xhci, virt_dev->out_ctx)); |
3801 | xhci_dbg(xhci, "Not freeing device rings.\n" ); |
3802 | /* Don't treat this as an error. May change my mind later. */ |
3803 | ret = 0; |
3804 | goto command_cleanup; |
3805 | case COMP_SUCCESS: |
3806 | xhci_dbg(xhci, "Successful reset device command.\n" ); |
3807 | break; |
3808 | default: |
3809 | if (xhci_is_vendor_info_code(xhci, trb_comp_code: ret)) |
3810 | break; |
3811 | xhci_warn(xhci, "Unknown completion code %u for " |
3812 | "reset device command.\n" , ret); |
3813 | ret = -EINVAL; |
3814 | goto command_cleanup; |
3815 | } |
3816 | |
3817 | /* Free up host controller endpoint resources */ |
3818 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { |
3819 | spin_lock_irqsave(&xhci->lock, flags); |
3820 | /* Don't delete the default control endpoint resources */ |
3821 | xhci_free_device_endpoint_resources(xhci, virt_dev, drop_control_ep: false); |
3822 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3823 | } |
3824 | |
3825 | /* Everything but endpoint 0 is disabled, so free the rings. */ |
3826 | for (i = 1; i < 31; i++) { |
3827 | struct xhci_virt_ep *ep = &virt_dev->eps[i]; |
3828 | |
3829 | if (ep->ep_state & EP_HAS_STREAMS) { |
3830 | xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n" , |
3831 | xhci_get_endpoint_address(i)); |
3832 | xhci_free_stream_info(xhci, stream_info: ep->stream_info); |
3833 | ep->stream_info = NULL; |
3834 | ep->ep_state &= ~EP_HAS_STREAMS; |
3835 | } |
3836 | |
3837 | if (ep->ring) { |
3838 | xhci_debugfs_remove_endpoint(xhci, virt_dev, ep_index: i); |
3839 | xhci_free_endpoint_ring(xhci, virt_dev, ep_index: i); |
3840 | } |
3841 | if (!list_empty(head: &virt_dev->eps[i].bw_endpoint_list)) |
3842 | xhci_drop_ep_from_interval_table(xhci, |
3843 | ep_bw: &virt_dev->eps[i].bw_info, |
3844 | bw_table: virt_dev->bw_table, |
3845 | udev, |
3846 | virt_ep: &virt_dev->eps[i], |
3847 | tt_info: virt_dev->tt_info); |
3848 | xhci_clear_endpoint_bw_info(bw_info: &virt_dev->eps[i].bw_info); |
3849 | } |
3850 | /* If necessary, update the number of active TTs on this root port */ |
3851 | xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); |
3852 | virt_dev->flags = 0; |
3853 | ret = 0; |
3854 | |
3855 | command_cleanup: |
3856 | xhci_free_command(xhci, command: reset_device_cmd); |
3857 | return ret; |
3858 | } |
3859 | |
3860 | /* |
3861 | * At this point, the struct usb_device is about to go away, the device has |
3862 | * disconnected, and all traffic has been stopped and the endpoints have been |
3863 | * disabled. Free any HC data structures associated with that device. |
3864 | */ |
3865 | static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) |
3866 | { |
3867 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
3868 | struct xhci_virt_device *virt_dev; |
3869 | struct xhci_slot_ctx *slot_ctx; |
3870 | unsigned long flags; |
3871 | int i, ret; |
3872 | |
3873 | /* |
3874 | * We called pm_runtime_get_noresume when the device was attached. |
3875 | * Decrement the counter here to allow controller to runtime suspend |
3876 | * if no devices remain. |
3877 | */ |
3878 | if (xhci->quirks & XHCI_RESET_ON_RESUME) |
3879 | pm_runtime_put_noidle(dev: hcd->self.controller); |
3880 | |
3881 | ret = xhci_check_args(hcd, udev, NULL, check_ep: 0, check_virt_dev: true, func: __func__); |
3882 | /* If the host is halted due to driver unload, we still need to free the |
3883 | * device. |
3884 | */ |
3885 | if (ret <= 0 && ret != -ENODEV) |
3886 | return; |
3887 | |
3888 | virt_dev = xhci->devs[udev->slot_id]; |
3889 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: virt_dev->out_ctx); |
3890 | trace_xhci_free_dev(ctx: slot_ctx); |
3891 | |
3892 | /* Stop any wayward timer functions (which may grab the lock) */ |
3893 | for (i = 0; i < 31; i++) |
3894 | virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING; |
3895 | virt_dev->udev = NULL; |
3896 | xhci_disable_slot(xhci, slot_id: udev->slot_id); |
3897 | |
3898 | spin_lock_irqsave(&xhci->lock, flags); |
3899 | xhci_free_virt_device(xhci, slot_id: udev->slot_id); |
3900 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3901 | |
3902 | } |
3903 | |
3904 | int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) |
3905 | { |
3906 | struct xhci_command *command; |
3907 | unsigned long flags; |
3908 | u32 state; |
3909 | int ret; |
3910 | |
3911 | command = xhci_alloc_command(xhci, allocate_completion: true, GFP_KERNEL); |
3912 | if (!command) |
3913 | return -ENOMEM; |
3914 | |
3915 | xhci_debugfs_remove_slot(xhci, slot_id); |
3916 | |
3917 | spin_lock_irqsave(&xhci->lock, flags); |
3918 | /* Don't disable the slot if the host controller is dead. */ |
3919 | state = readl(addr: &xhci->op_regs->status); |
3920 | if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || |
3921 | (xhci->xhc_state & XHCI_STATE_HALTED)) { |
3922 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3923 | kfree(objp: command); |
3924 | return -ENODEV; |
3925 | } |
3926 | |
3927 | ret = xhci_queue_slot_control(xhci, cmd: command, TRB_DISABLE_SLOT, |
3928 | slot_id); |
3929 | if (ret) { |
3930 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3931 | kfree(objp: command); |
3932 | return ret; |
3933 | } |
3934 | xhci_ring_cmd_db(xhci); |
3935 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3936 | |
3937 | wait_for_completion(command->completion); |
3938 | |
3939 | if (command->status != COMP_SUCCESS) |
3940 | xhci_warn(xhci, "Unsuccessful disable slot %u command, status %d\n" , |
3941 | slot_id, command->status); |
3942 | |
3943 | xhci_free_command(xhci, command); |
3944 | |
3945 | return 0; |
3946 | } |
3947 | |
3948 | /* |
3949 | * Checks if we have enough host controller resources for the default control |
3950 | * endpoint. |
3951 | * |
3952 | * Must be called with xhci->lock held. |
3953 | */ |
3954 | static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci) |
3955 | { |
3956 | if (xhci->num_active_eps + 1 > xhci->limit_active_eps) { |
3957 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
3958 | fmt: "Not enough ep ctxs: " |
3959 | "%u active, need to add 1, limit is %u." , |
3960 | xhci->num_active_eps, xhci->limit_active_eps); |
3961 | return -ENOMEM; |
3962 | } |
3963 | xhci->num_active_eps += 1; |
3964 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
3965 | fmt: "Adding 1 ep ctx, %u now active." , |
3966 | xhci->num_active_eps); |
3967 | return 0; |
3968 | } |
3969 | |
3970 | |
3971 | /* |
3972 | * Returns 0 if the xHC ran out of device slots, the Enable Slot command |
3973 | * timed out, or allocating memory failed. Returns 1 on success. |
3974 | */ |
3975 | int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) |
3976 | { |
3977 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
3978 | struct xhci_virt_device *vdev; |
3979 | struct xhci_slot_ctx *slot_ctx; |
3980 | unsigned long flags; |
3981 | int ret, slot_id; |
3982 | struct xhci_command *command; |
3983 | |
3984 | command = xhci_alloc_command(xhci, allocate_completion: true, GFP_KERNEL); |
3985 | if (!command) |
3986 | return 0; |
3987 | |
3988 | spin_lock_irqsave(&xhci->lock, flags); |
3989 | ret = xhci_queue_slot_control(xhci, cmd: command, TRB_ENABLE_SLOT, slot_id: 0); |
3990 | if (ret) { |
3991 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3992 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n" ); |
3993 | xhci_free_command(xhci, command); |
3994 | return 0; |
3995 | } |
3996 | xhci_ring_cmd_db(xhci); |
3997 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3998 | |
3999 | wait_for_completion(command->completion); |
4000 | slot_id = command->slot_id; |
4001 | |
4002 | if (!slot_id || command->status != COMP_SUCCESS) { |
4003 | xhci_err(xhci, "Error while assigning device slot ID: %s\n" , |
4004 | xhci_trb_comp_code_string(command->status)); |
4005 | xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n" , |
4006 | HCS_MAX_SLOTS( |
4007 | readl(&xhci->cap_regs->hcs_params1))); |
4008 | xhci_free_command(xhci, command); |
4009 | return 0; |
4010 | } |
4011 | |
4012 | xhci_free_command(xhci, command); |
4013 | |
4014 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { |
4015 | spin_lock_irqsave(&xhci->lock, flags); |
4016 | ret = xhci_reserve_host_control_ep_resources(xhci); |
4017 | if (ret) { |
4018 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4019 | xhci_warn(xhci, "Not enough host resources, " |
4020 | "active endpoint contexts = %u\n" , |
4021 | xhci->num_active_eps); |
4022 | goto disable_slot; |
4023 | } |
4024 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4025 | } |
4026 | /* Use GFP_NOIO, since this function can be called from |
4027 | * xhci_discover_or_reset_device(), which may be called as part of |
4028 | * mass storage driver error handling. |
4029 | */ |
4030 | if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) { |
4031 | xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n" ); |
4032 | goto disable_slot; |
4033 | } |
4034 | vdev = xhci->devs[slot_id]; |
4035 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: vdev->out_ctx); |
4036 | trace_xhci_alloc_dev(ctx: slot_ctx); |
4037 | |
4038 | udev->slot_id = slot_id; |
4039 | |
4040 | xhci_debugfs_create_slot(xhci, slot_id); |
4041 | |
4042 | /* |
4043 | * If resetting upon resume, we can't put the controller into runtime |
4044 | * suspend if there is a device attached. |
4045 | */ |
4046 | if (xhci->quirks & XHCI_RESET_ON_RESUME) |
4047 | pm_runtime_get_noresume(dev: hcd->self.controller); |
4048 | |
4049 | /* Is this a LS or FS device under a HS hub? */ |
4050 | /* Hub or peripherial? */ |
4051 | return 1; |
4052 | |
4053 | disable_slot: |
4054 | xhci_disable_slot(xhci, slot_id: udev->slot_id); |
4055 | xhci_free_virt_device(xhci, slot_id: udev->slot_id); |
4056 | |
4057 | return 0; |
4058 | } |
4059 | |
4060 | /** |
4061 | * xhci_setup_device - issues an Address Device command to assign a unique |
4062 | * USB bus address. |
4063 | * @hcd: USB host controller data structure. |
4064 | * @udev: USB dev structure representing the connected device. |
4065 | * @setup: Enum specifying setup mode: address only or with context. |
4066 | * @timeout_ms: Max wait time (ms) for the command operation to complete. |
4067 | * |
4068 | * Return: 0 if successful; otherwise, negative error code. |
4069 | */ |
4070 | static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, |
4071 | enum xhci_setup_dev setup, unsigned int timeout_ms) |
4072 | { |
4073 | const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address" ; |
4074 | unsigned long flags; |
4075 | struct xhci_virt_device *virt_dev; |
4076 | int ret = 0; |
4077 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
4078 | struct xhci_slot_ctx *slot_ctx; |
4079 | struct xhci_input_control_ctx *ctrl_ctx; |
4080 | u64 temp_64; |
4081 | struct xhci_command *command = NULL; |
4082 | |
4083 | mutex_lock(&xhci->mutex); |
4084 | |
4085 | if (xhci->xhc_state) { /* dying, removing or halted */ |
4086 | ret = -ESHUTDOWN; |
4087 | goto out; |
4088 | } |
4089 | |
4090 | if (!udev->slot_id) { |
4091 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_address, |
4092 | fmt: "Bad Slot ID %d" , udev->slot_id); |
4093 | ret = -EINVAL; |
4094 | goto out; |
4095 | } |
4096 | |
4097 | virt_dev = xhci->devs[udev->slot_id]; |
4098 | |
4099 | if (WARN_ON(!virt_dev)) { |
4100 | /* |
4101 | * In plug/unplug torture test with an NEC controller, |
4102 | * a zero-dereference was observed once due to virt_dev = 0. |
4103 | * Print useful debug rather than crash if it is observed again! |
4104 | */ |
4105 | xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n" , |
4106 | udev->slot_id); |
4107 | ret = -EINVAL; |
4108 | goto out; |
4109 | } |
4110 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: virt_dev->out_ctx); |
4111 | trace_xhci_setup_device_slot(ctx: slot_ctx); |
4112 | |
4113 | if (setup == SETUP_CONTEXT_ONLY) { |
4114 | if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == |
4115 | SLOT_STATE_DEFAULT) { |
4116 | xhci_dbg(xhci, "Slot already in default state\n" ); |
4117 | goto out; |
4118 | } |
4119 | } |
4120 | |
4121 | command = xhci_alloc_command(xhci, allocate_completion: true, GFP_KERNEL); |
4122 | if (!command) { |
4123 | ret = -ENOMEM; |
4124 | goto out; |
4125 | } |
4126 | |
4127 | command->in_ctx = virt_dev->in_ctx; |
4128 | command->timeout_ms = timeout_ms; |
4129 | |
4130 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: virt_dev->in_ctx); |
4131 | ctrl_ctx = xhci_get_input_control_ctx(ctx: virt_dev->in_ctx); |
4132 | if (!ctrl_ctx) { |
4133 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n" , |
4134 | __func__); |
4135 | ret = -EINVAL; |
4136 | goto out; |
4137 | } |
4138 | /* |
4139 | * If this is the first Set Address since device plug-in or |
4140 | * virt_device realloaction after a resume with an xHCI power loss, |
4141 | * then set up the slot context. |
4142 | */ |
4143 | if (!slot_ctx->dev_info) |
4144 | xhci_setup_addressable_virt_dev(xhci, udev); |
4145 | /* Otherwise, update the control endpoint ring enqueue pointer. */ |
4146 | else |
4147 | xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); |
4148 | ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); |
4149 | ctrl_ctx->drop_flags = 0; |
4150 | |
4151 | trace_xhci_address_ctx(xhci, ctx: virt_dev->in_ctx, |
4152 | le32_to_cpu(slot_ctx->dev_info) >> 27); |
4153 | |
4154 | trace_xhci_address_ctrl_ctx(ctrl_ctx); |
4155 | spin_lock_irqsave(&xhci->lock, flags); |
4156 | trace_xhci_setup_device(vdev: virt_dev); |
4157 | ret = xhci_queue_address_device(xhci, cmd: command, in_ctx_ptr: virt_dev->in_ctx->dma, |
4158 | slot_id: udev->slot_id, setup); |
4159 | if (ret) { |
4160 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4161 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_address, |
4162 | fmt: "FIXME: allocate a command ring segment" ); |
4163 | goto out; |
4164 | } |
4165 | xhci_ring_cmd_db(xhci); |
4166 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4167 | |
4168 | /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */ |
4169 | wait_for_completion(command->completion); |
4170 | |
4171 | /* FIXME: From section 4.3.4: "Software shall be responsible for timing |
4172 | * the SetAddress() "recovery interval" required by USB and aborting the |
4173 | * command on a timeout. |
4174 | */ |
4175 | switch (command->status) { |
4176 | case COMP_COMMAND_ABORTED: |
4177 | case COMP_COMMAND_RING_STOPPED: |
4178 | xhci_warn(xhci, "Timeout while waiting for setup device command\n" ); |
4179 | ret = -ETIME; |
4180 | break; |
4181 | case COMP_CONTEXT_STATE_ERROR: |
4182 | case COMP_SLOT_NOT_ENABLED_ERROR: |
4183 | xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n" , |
4184 | act, udev->slot_id); |
4185 | ret = -EINVAL; |
4186 | break; |
4187 | case COMP_USB_TRANSACTION_ERROR: |
4188 | dev_warn(&udev->dev, "Device not responding to setup %s.\n" , act); |
4189 | |
4190 | mutex_unlock(lock: &xhci->mutex); |
4191 | ret = xhci_disable_slot(xhci, slot_id: udev->slot_id); |
4192 | xhci_free_virt_device(xhci, slot_id: udev->slot_id); |
4193 | if (!ret) |
4194 | xhci_alloc_dev(hcd, udev); |
4195 | kfree(objp: command->completion); |
4196 | kfree(objp: command); |
4197 | return -EPROTO; |
4198 | case COMP_INCOMPATIBLE_DEVICE_ERROR: |
4199 | dev_warn(&udev->dev, |
4200 | "ERROR: Incompatible device for setup %s command\n" , act); |
4201 | ret = -ENODEV; |
4202 | break; |
4203 | case COMP_SUCCESS: |
4204 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_address, |
4205 | fmt: "Successful setup %s command" , act); |
4206 | break; |
4207 | default: |
4208 | xhci_err(xhci, |
4209 | "ERROR: unexpected setup %s command completion code 0x%x.\n" , |
4210 | act, command->status); |
4211 | trace_xhci_address_ctx(xhci, ctx: virt_dev->out_ctx, ep_num: 1); |
4212 | ret = -EINVAL; |
4213 | break; |
4214 | } |
4215 | if (ret) |
4216 | goto out; |
4217 | temp_64 = xhci_read_64(xhci, regs: &xhci->op_regs->dcbaa_ptr); |
4218 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_address, |
4219 | fmt: "Op regs DCBAA ptr = %#016llx" , temp_64); |
4220 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_address, |
4221 | fmt: "Slot ID %d dcbaa entry @%p = %#016llx" , |
4222 | udev->slot_id, |
4223 | &xhci->dcbaa->dev_context_ptrs[udev->slot_id], |
4224 | (unsigned long long) |
4225 | le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id])); |
4226 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_address, |
4227 | fmt: "Output Context DMA address = %#08llx" , |
4228 | (unsigned long long)virt_dev->out_ctx->dma); |
4229 | trace_xhci_address_ctx(xhci, ctx: virt_dev->in_ctx, |
4230 | le32_to_cpu(slot_ctx->dev_info) >> 27); |
4231 | /* |
4232 | * USB core uses address 1 for the roothubs, so we add one to the |
4233 | * address given back to us by the HC. |
4234 | */ |
4235 | trace_xhci_address_ctx(xhci, ctx: virt_dev->out_ctx, |
4236 | le32_to_cpu(slot_ctx->dev_info) >> 27); |
4237 | /* Zero the input context control for later use */ |
4238 | ctrl_ctx->add_flags = 0; |
4239 | ctrl_ctx->drop_flags = 0; |
4240 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: virt_dev->out_ctx); |
4241 | udev->devaddr = (u8)(le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK); |
4242 | |
4243 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_address, |
4244 | fmt: "Internal device address = %d" , |
4245 | le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK); |
4246 | out: |
4247 | mutex_unlock(lock: &xhci->mutex); |
4248 | if (command) { |
4249 | kfree(objp: command->completion); |
4250 | kfree(objp: command); |
4251 | } |
4252 | return ret; |
4253 | } |
4254 | |
4255 | static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev, |
4256 | unsigned int timeout_ms) |
4257 | { |
4258 | return xhci_setup_device(hcd, udev, setup: SETUP_CONTEXT_ADDRESS, timeout_ms); |
4259 | } |
4260 | |
4261 | static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev) |
4262 | { |
4263 | return xhci_setup_device(hcd, udev, setup: SETUP_CONTEXT_ONLY, |
4264 | XHCI_CMD_DEFAULT_TIMEOUT); |
4265 | } |
4266 | |
4267 | /* |
4268 | * Transfer the port index into real index in the HW port status |
4269 | * registers. Caculate offset between the port's PORTSC register |
4270 | * and port status base. Divide the number of per port register |
4271 | * to get the real index. The raw port number bases 1. |
4272 | */ |
4273 | int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1) |
4274 | { |
4275 | struct xhci_hub *rhub; |
4276 | |
4277 | rhub = xhci_get_rhub(hcd); |
4278 | return rhub->ports[port1 - 1]->hw_portnum + 1; |
4279 | } |
4280 | |
4281 | /* |
4282 | * Issue an Evaluate Context command to change the Maximum Exit Latency in the |
4283 | * slot context. If that succeeds, store the new MEL in the xhci_virt_device. |
4284 | */ |
4285 | static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci, |
4286 | struct usb_device *udev, u16 max_exit_latency) |
4287 | { |
4288 | struct xhci_virt_device *virt_dev; |
4289 | struct xhci_command *command; |
4290 | struct xhci_input_control_ctx *ctrl_ctx; |
4291 | struct xhci_slot_ctx *slot_ctx; |
4292 | unsigned long flags; |
4293 | int ret; |
4294 | |
4295 | command = xhci_alloc_command_with_ctx(xhci, allocate_completion: true, GFP_KERNEL); |
4296 | if (!command) |
4297 | return -ENOMEM; |
4298 | |
4299 | spin_lock_irqsave(&xhci->lock, flags); |
4300 | |
4301 | virt_dev = xhci->devs[udev->slot_id]; |
4302 | |
4303 | /* |
4304 | * virt_dev might not exists yet if xHC resumed from hibernate (S4) and |
4305 | * xHC was re-initialized. Exit latency will be set later after |
4306 | * hub_port_finish_reset() is done and xhci->devs[] are re-allocated |
4307 | */ |
4308 | |
4309 | if (!virt_dev || max_exit_latency == virt_dev->current_mel) { |
4310 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4311 | xhci_free_command(xhci, command); |
4312 | return 0; |
4313 | } |
4314 | |
4315 | /* Attempt to issue an Evaluate Context command to change the MEL. */ |
4316 | ctrl_ctx = xhci_get_input_control_ctx(ctx: command->in_ctx); |
4317 | if (!ctrl_ctx) { |
4318 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4319 | xhci_free_command(xhci, command); |
4320 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n" , |
4321 | __func__); |
4322 | return -ENOMEM; |
4323 | } |
4324 | |
4325 | xhci_slot_copy(xhci, in_ctx: command->in_ctx, out_ctx: virt_dev->out_ctx); |
4326 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4327 | |
4328 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
4329 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: command->in_ctx); |
4330 | slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT)); |
4331 | slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency); |
4332 | slot_ctx->dev_state = 0; |
4333 | |
4334 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_context_change, |
4335 | fmt: "Set up evaluate context for LPM MEL change." ); |
4336 | |
4337 | /* Issue and wait for the evaluate context command. */ |
4338 | ret = xhci_configure_endpoint(xhci, udev, command, |
4339 | ctx_change: true, must_succeed: true); |
4340 | |
4341 | if (!ret) { |
4342 | spin_lock_irqsave(&xhci->lock, flags); |
4343 | virt_dev->current_mel = max_exit_latency; |
4344 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4345 | } |
4346 | |
4347 | xhci_free_command(xhci, command); |
4348 | |
4349 | return ret; |
4350 | } |
4351 | |
4352 | #ifdef CONFIG_PM |
4353 | |
4354 | /* BESL to HIRD Encoding array for USB2 LPM */ |
4355 | static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000, |
4356 | 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000}; |
4357 | |
4358 | /* Calculate HIRD/BESL for USB2 PORTPMSC*/ |
4359 | static int xhci_calculate_hird_besl(struct xhci_hcd *xhci, |
4360 | struct usb_device *udev) |
4361 | { |
4362 | int u2del, besl, besl_host; |
4363 | int besl_device = 0; |
4364 | u32 field; |
4365 | |
4366 | u2del = HCS_U2_LATENCY(xhci->hcs_params3); |
4367 | field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); |
4368 | |
4369 | if (field & USB_BESL_SUPPORT) { |
4370 | for (besl_host = 0; besl_host < 16; besl_host++) { |
4371 | if (xhci_besl_encoding[besl_host] >= u2del) |
4372 | break; |
4373 | } |
4374 | /* Use baseline BESL value as default */ |
4375 | if (field & USB_BESL_BASELINE_VALID) |
4376 | besl_device = USB_GET_BESL_BASELINE(field); |
4377 | else if (field & USB_BESL_DEEP_VALID) |
4378 | besl_device = USB_GET_BESL_DEEP(field); |
4379 | } else { |
4380 | if (u2del <= 50) |
4381 | besl_host = 0; |
4382 | else |
4383 | besl_host = (u2del - 51) / 75 + 1; |
4384 | } |
4385 | |
4386 | besl = besl_host + besl_device; |
4387 | if (besl > 15) |
4388 | besl = 15; |
4389 | |
4390 | return besl; |
4391 | } |
4392 | |
4393 | /* Calculate BESLD, L1 timeout and HIRDM for USB2 PORTHLPMC */ |
4394 | static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev) |
4395 | { |
4396 | u32 field; |
4397 | int l1; |
4398 | int besld = 0; |
4399 | int hirdm = 0; |
4400 | |
4401 | field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); |
4402 | |
4403 | /* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */ |
4404 | l1 = udev->l1_params.timeout / 256; |
4405 | |
4406 | /* device has preferred BESLD */ |
4407 | if (field & USB_BESL_DEEP_VALID) { |
4408 | besld = USB_GET_BESL_DEEP(field); |
4409 | hirdm = 1; |
4410 | } |
4411 | |
4412 | return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm); |
4413 | } |
4414 | |
4415 | static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, |
4416 | struct usb_device *udev, int enable) |
4417 | { |
4418 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
4419 | struct xhci_port **ports; |
4420 | __le32 __iomem *pm_addr, *hlpm_addr; |
4421 | u32 pm_val, hlpm_val, field; |
4422 | unsigned int port_num; |
4423 | unsigned long flags; |
4424 | int hird, exit_latency; |
4425 | int ret; |
4426 | |
4427 | if (xhci->quirks & XHCI_HW_LPM_DISABLE) |
4428 | return -EPERM; |
4429 | |
4430 | if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support || |
4431 | !udev->lpm_capable) |
4432 | return -EPERM; |
4433 | |
4434 | if (!udev->parent || udev->parent->parent || |
4435 | udev->descriptor.bDeviceClass == USB_CLASS_HUB) |
4436 | return -EPERM; |
4437 | |
4438 | if (udev->usb2_hw_lpm_capable != 1) |
4439 | return -EPERM; |
4440 | |
4441 | spin_lock_irqsave(&xhci->lock, flags); |
4442 | |
4443 | ports = xhci->usb2_rhub.ports; |
4444 | port_num = udev->portnum - 1; |
4445 | pm_addr = ports[port_num]->addr + PORTPMSC; |
4446 | pm_val = readl(addr: pm_addr); |
4447 | hlpm_addr = ports[port_num]->addr + PORTHLPMC; |
4448 | |
4449 | xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n" , |
4450 | enable ? "enable" : "disable" , port_num + 1); |
4451 | |
4452 | if (enable) { |
4453 | /* Host supports BESL timeout instead of HIRD */ |
4454 | if (udev->usb2_hw_lpm_besl_capable) { |
4455 | /* if device doesn't have a preferred BESL value use a |
4456 | * default one which works with mixed HIRD and BESL |
4457 | * systems. See XHCI_DEFAULT_BESL definition in xhci.h |
4458 | */ |
4459 | field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); |
4460 | if ((field & USB_BESL_SUPPORT) && |
4461 | (field & USB_BESL_BASELINE_VALID)) |
4462 | hird = USB_GET_BESL_BASELINE(field); |
4463 | else |
4464 | hird = udev->l1_params.besl; |
4465 | |
4466 | exit_latency = xhci_besl_encoding[hird]; |
4467 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4468 | |
4469 | ret = xhci_change_max_exit_latency(xhci, udev, |
4470 | max_exit_latency: exit_latency); |
4471 | if (ret < 0) |
4472 | return ret; |
4473 | spin_lock_irqsave(&xhci->lock, flags); |
4474 | |
4475 | hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev); |
4476 | writel(val: hlpm_val, addr: hlpm_addr); |
4477 | /* flush write */ |
4478 | readl(addr: hlpm_addr); |
4479 | } else { |
4480 | hird = xhci_calculate_hird_besl(xhci, udev); |
4481 | } |
4482 | |
4483 | pm_val &= ~PORT_HIRD_MASK; |
4484 | pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id); |
4485 | writel(val: pm_val, addr: pm_addr); |
4486 | pm_val = readl(addr: pm_addr); |
4487 | pm_val |= PORT_HLE; |
4488 | writel(val: pm_val, addr: pm_addr); |
4489 | /* flush write */ |
4490 | readl(addr: pm_addr); |
4491 | } else { |
4492 | pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK); |
4493 | writel(val: pm_val, addr: pm_addr); |
4494 | /* flush write */ |
4495 | readl(addr: pm_addr); |
4496 | if (udev->usb2_hw_lpm_besl_capable) { |
4497 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4498 | xhci_change_max_exit_latency(xhci, udev, max_exit_latency: 0); |
4499 | readl_poll_timeout(ports[port_num]->addr, pm_val, |
4500 | (pm_val & PORT_PLS_MASK) == XDEV_U0, |
4501 | 100, 10000); |
4502 | return 0; |
4503 | } |
4504 | } |
4505 | |
4506 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4507 | return 0; |
4508 | } |
4509 | |
4510 | /* check if a usb2 port supports a given extened capability protocol |
4511 | * only USB2 ports extended protocol capability values are cached. |
4512 | * Return 1 if capability is supported |
4513 | */ |
4514 | static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port, |
4515 | unsigned capability) |
4516 | { |
4517 | u32 port_offset, port_count; |
4518 | int i; |
4519 | |
4520 | for (i = 0; i < xhci->num_ext_caps; i++) { |
4521 | if (xhci->ext_caps[i] & capability) { |
4522 | /* port offsets starts at 1 */ |
4523 | port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1; |
4524 | port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]); |
4525 | if (port >= port_offset && |
4526 | port < port_offset + port_count) |
4527 | return 1; |
4528 | } |
4529 | } |
4530 | return 0; |
4531 | } |
4532 | |
4533 | static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) |
4534 | { |
4535 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
4536 | int portnum = udev->portnum - 1; |
4537 | |
4538 | if (hcd->speed >= HCD_USB3 || !udev->lpm_capable) |
4539 | return 0; |
4540 | |
4541 | /* we only support lpm for non-hub device connected to root hub yet */ |
4542 | if (!udev->parent || udev->parent->parent || |
4543 | udev->descriptor.bDeviceClass == USB_CLASS_HUB) |
4544 | return 0; |
4545 | |
4546 | if (xhci->hw_lpm_support == 1 && |
4547 | xhci_check_usb2_port_capability( |
4548 | xhci, port: portnum, XHCI_HLC)) { |
4549 | udev->usb2_hw_lpm_capable = 1; |
4550 | udev->l1_params.timeout = XHCI_L1_TIMEOUT; |
4551 | udev->l1_params.besl = XHCI_DEFAULT_BESL; |
4552 | if (xhci_check_usb2_port_capability(xhci, port: portnum, |
4553 | XHCI_BLC)) |
4554 | udev->usb2_hw_lpm_besl_capable = 1; |
4555 | } |
4556 | |
4557 | return 0; |
4558 | } |
4559 | |
4560 | /*---------------------- USB 3.0 Link PM functions ------------------------*/ |
4561 | |
4562 | /* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */ |
4563 | static unsigned long long xhci_service_interval_to_ns( |
4564 | struct usb_endpoint_descriptor *desc) |
4565 | { |
4566 | return (1ULL << (desc->bInterval - 1)) * 125 * 1000; |
4567 | } |
4568 | |
4569 | static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev, |
4570 | enum usb3_link_state state) |
4571 | { |
4572 | unsigned long long sel; |
4573 | unsigned long long pel; |
4574 | unsigned int max_sel_pel; |
4575 | char *state_name; |
4576 | |
4577 | switch (state) { |
4578 | case USB3_LPM_U1: |
4579 | /* Convert SEL and PEL stored in nanoseconds to microseconds */ |
4580 | sel = DIV_ROUND_UP(udev->u1_params.sel, 1000); |
4581 | pel = DIV_ROUND_UP(udev->u1_params.pel, 1000); |
4582 | max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL; |
4583 | state_name = "U1" ; |
4584 | break; |
4585 | case USB3_LPM_U2: |
4586 | sel = DIV_ROUND_UP(udev->u2_params.sel, 1000); |
4587 | pel = DIV_ROUND_UP(udev->u2_params.pel, 1000); |
4588 | max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL; |
4589 | state_name = "U2" ; |
4590 | break; |
4591 | default: |
4592 | dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n" , |
4593 | __func__); |
4594 | return USB3_LPM_DISABLED; |
4595 | } |
4596 | |
4597 | if (sel <= max_sel_pel && pel <= max_sel_pel) |
4598 | return USB3_LPM_DEVICE_INITIATED; |
4599 | |
4600 | if (sel > max_sel_pel) |
4601 | dev_dbg(&udev->dev, "Device-initiated %s disabled " |
4602 | "due to long SEL %llu ms\n" , |
4603 | state_name, sel); |
4604 | else |
4605 | dev_dbg(&udev->dev, "Device-initiated %s disabled " |
4606 | "due to long PEL %llu ms\n" , |
4607 | state_name, pel); |
4608 | return USB3_LPM_DISABLED; |
4609 | } |
4610 | |
4611 | /* The U1 timeout should be the maximum of the following values: |
4612 | * - For control endpoints, U1 system exit latency (SEL) * 3 |
4613 | * - For bulk endpoints, U1 SEL * 5 |
4614 | * - For interrupt endpoints: |
4615 | * - Notification EPs, U1 SEL * 3 |
4616 | * - Periodic EPs, max(105% of bInterval, U1 SEL * 2) |
4617 | * - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2) |
4618 | */ |
4619 | static unsigned long long xhci_calculate_intel_u1_timeout( |
4620 | struct usb_device *udev, |
4621 | struct usb_endpoint_descriptor *desc) |
4622 | { |
4623 | unsigned long long timeout_ns; |
4624 | int ep_type; |
4625 | int intr_type; |
4626 | |
4627 | ep_type = usb_endpoint_type(epd: desc); |
4628 | switch (ep_type) { |
4629 | case USB_ENDPOINT_XFER_CONTROL: |
4630 | timeout_ns = udev->u1_params.sel * 3; |
4631 | break; |
4632 | case USB_ENDPOINT_XFER_BULK: |
4633 | timeout_ns = udev->u1_params.sel * 5; |
4634 | break; |
4635 | case USB_ENDPOINT_XFER_INT: |
4636 | intr_type = usb_endpoint_interrupt_type(epd: desc); |
4637 | if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) { |
4638 | timeout_ns = udev->u1_params.sel * 3; |
4639 | break; |
4640 | } |
4641 | /* Otherwise the calculation is the same as isoc eps */ |
4642 | fallthrough; |
4643 | case USB_ENDPOINT_XFER_ISOC: |
4644 | timeout_ns = xhci_service_interval_to_ns(desc); |
4645 | timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100); |
4646 | if (timeout_ns < udev->u1_params.sel * 2) |
4647 | timeout_ns = udev->u1_params.sel * 2; |
4648 | break; |
4649 | default: |
4650 | return 0; |
4651 | } |
4652 | |
4653 | return timeout_ns; |
4654 | } |
4655 | |
4656 | /* Returns the hub-encoded U1 timeout value. */ |
4657 | static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci, |
4658 | struct usb_device *udev, |
4659 | struct usb_endpoint_descriptor *desc) |
4660 | { |
4661 | unsigned long long timeout_ns; |
4662 | |
4663 | /* Prevent U1 if service interval is shorter than U1 exit latency */ |
4664 | if (usb_endpoint_xfer_int(epd: desc) || usb_endpoint_xfer_isoc(epd: desc)) { |
4665 | if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) { |
4666 | dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n" ); |
4667 | return USB3_LPM_DISABLED; |
4668 | } |
4669 | } |
4670 | |
4671 | if (xhci->quirks & (XHCI_INTEL_HOST | XHCI_ZHAOXIN_HOST)) |
4672 | timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc); |
4673 | else |
4674 | timeout_ns = udev->u1_params.sel; |
4675 | |
4676 | /* The U1 timeout is encoded in 1us intervals. |
4677 | * Don't return a timeout of zero, because that's USB3_LPM_DISABLED. |
4678 | */ |
4679 | if (timeout_ns == USB3_LPM_DISABLED) |
4680 | timeout_ns = 1; |
4681 | else |
4682 | timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000); |
4683 | |
4684 | /* If the necessary timeout value is bigger than what we can set in the |
4685 | * USB 3.0 hub, we have to disable hub-initiated U1. |
4686 | */ |
4687 | if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT) |
4688 | return timeout_ns; |
4689 | dev_dbg(&udev->dev, "Hub-initiated U1 disabled " |
4690 | "due to long timeout %llu ms\n" , timeout_ns); |
4691 | return xhci_get_timeout_no_hub_lpm(udev, state: USB3_LPM_U1); |
4692 | } |
4693 | |
4694 | /* The U2 timeout should be the maximum of: |
4695 | * - 10 ms (to avoid the bandwidth impact on the scheduler) |
4696 | * - largest bInterval of any active periodic endpoint (to avoid going |
4697 | * into lower power link states between intervals). |
4698 | * - the U2 Exit Latency of the device |
4699 | */ |
4700 | static unsigned long long xhci_calculate_intel_u2_timeout( |
4701 | struct usb_device *udev, |
4702 | struct usb_endpoint_descriptor *desc) |
4703 | { |
4704 | unsigned long long timeout_ns; |
4705 | unsigned long long u2_del_ns; |
4706 | |
4707 | timeout_ns = 10 * 1000 * 1000; |
4708 | |
4709 | if ((usb_endpoint_xfer_int(epd: desc) || usb_endpoint_xfer_isoc(epd: desc)) && |
4710 | (xhci_service_interval_to_ns(desc) > timeout_ns)) |
4711 | timeout_ns = xhci_service_interval_to_ns(desc); |
4712 | |
4713 | u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL; |
4714 | if (u2_del_ns > timeout_ns) |
4715 | timeout_ns = u2_del_ns; |
4716 | |
4717 | return timeout_ns; |
4718 | } |
4719 | |
4720 | /* Returns the hub-encoded U2 timeout value. */ |
4721 | static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci, |
4722 | struct usb_device *udev, |
4723 | struct usb_endpoint_descriptor *desc) |
4724 | { |
4725 | unsigned long long timeout_ns; |
4726 | |
4727 | /* Prevent U2 if service interval is shorter than U2 exit latency */ |
4728 | if (usb_endpoint_xfer_int(epd: desc) || usb_endpoint_xfer_isoc(epd: desc)) { |
4729 | if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) { |
4730 | dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n" ); |
4731 | return USB3_LPM_DISABLED; |
4732 | } |
4733 | } |
4734 | |
4735 | if (xhci->quirks & (XHCI_INTEL_HOST | XHCI_ZHAOXIN_HOST)) |
4736 | timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc); |
4737 | else |
4738 | timeout_ns = udev->u2_params.sel; |
4739 | |
4740 | /* The U2 timeout is encoded in 256us intervals */ |
4741 | timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000); |
4742 | /* If the necessary timeout value is bigger than what we can set in the |
4743 | * USB 3.0 hub, we have to disable hub-initiated U2. |
4744 | */ |
4745 | if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT) |
4746 | return timeout_ns; |
4747 | dev_dbg(&udev->dev, "Hub-initiated U2 disabled " |
4748 | "due to long timeout %llu ms\n" , timeout_ns); |
4749 | return xhci_get_timeout_no_hub_lpm(udev, state: USB3_LPM_U2); |
4750 | } |
4751 | |
4752 | static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci, |
4753 | struct usb_device *udev, |
4754 | struct usb_endpoint_descriptor *desc, |
4755 | enum usb3_link_state state, |
4756 | u16 *timeout) |
4757 | { |
4758 | if (state == USB3_LPM_U1) |
4759 | return xhci_calculate_u1_timeout(xhci, udev, desc); |
4760 | else if (state == USB3_LPM_U2) |
4761 | return xhci_calculate_u2_timeout(xhci, udev, desc); |
4762 | |
4763 | return USB3_LPM_DISABLED; |
4764 | } |
4765 | |
4766 | static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci, |
4767 | struct usb_device *udev, |
4768 | struct usb_endpoint_descriptor *desc, |
4769 | enum usb3_link_state state, |
4770 | u16 *timeout) |
4771 | { |
4772 | u16 alt_timeout; |
4773 | |
4774 | alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev, |
4775 | desc, state, timeout); |
4776 | |
4777 | /* If we found we can't enable hub-initiated LPM, and |
4778 | * the U1 or U2 exit latency was too high to allow |
4779 | * device-initiated LPM as well, then we will disable LPM |
4780 | * for this device, so stop searching any further. |
4781 | */ |
4782 | if (alt_timeout == USB3_LPM_DISABLED) { |
4783 | *timeout = alt_timeout; |
4784 | return -E2BIG; |
4785 | } |
4786 | if (alt_timeout > *timeout) |
4787 | *timeout = alt_timeout; |
4788 | return 0; |
4789 | } |
4790 | |
4791 | static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci, |
4792 | struct usb_device *udev, |
4793 | struct usb_host_interface *alt, |
4794 | enum usb3_link_state state, |
4795 | u16 *timeout) |
4796 | { |
4797 | int j; |
4798 | |
4799 | for (j = 0; j < alt->desc.bNumEndpoints; j++) { |
4800 | if (xhci_update_timeout_for_endpoint(xhci, udev, |
4801 | desc: &alt->endpoint[j].desc, state, timeout)) |
4802 | return -E2BIG; |
4803 | } |
4804 | return 0; |
4805 | } |
4806 | |
4807 | static int xhci_check_tier_policy(struct xhci_hcd *xhci, |
4808 | struct usb_device *udev, |
4809 | enum usb3_link_state state) |
4810 | { |
4811 | struct usb_device *parent = udev->parent; |
4812 | int tier = 1; /* roothub is tier1 */ |
4813 | |
4814 | while (parent) { |
4815 | parent = parent->parent; |
4816 | tier++; |
4817 | } |
4818 | |
4819 | if (xhci->quirks & XHCI_INTEL_HOST && tier > 3) |
4820 | goto fail; |
4821 | if (xhci->quirks & XHCI_ZHAOXIN_HOST && tier > 2) |
4822 | goto fail; |
4823 | |
4824 | return 0; |
4825 | fail: |
4826 | dev_dbg(&udev->dev, "Tier policy prevents U1/U2 LPM states for devices at tier %d\n" , |
4827 | tier); |
4828 | return -E2BIG; |
4829 | } |
4830 | |
4831 | /* Returns the U1 or U2 timeout that should be enabled. |
4832 | * If the tier check or timeout setting functions return with a non-zero exit |
4833 | * code, that means the timeout value has been finalized and we shouldn't look |
4834 | * at any more endpoints. |
4835 | */ |
4836 | static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd, |
4837 | struct usb_device *udev, enum usb3_link_state state) |
4838 | { |
4839 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
4840 | struct usb_host_config *config; |
4841 | char *state_name; |
4842 | int i; |
4843 | u16 timeout = USB3_LPM_DISABLED; |
4844 | |
4845 | if (state == USB3_LPM_U1) |
4846 | state_name = "U1" ; |
4847 | else if (state == USB3_LPM_U2) |
4848 | state_name = "U2" ; |
4849 | else { |
4850 | dev_warn(&udev->dev, "Can't enable unknown link state %i\n" , |
4851 | state); |
4852 | return timeout; |
4853 | } |
4854 | |
4855 | /* Gather some information about the currently installed configuration |
4856 | * and alternate interface settings. |
4857 | */ |
4858 | if (xhci_update_timeout_for_endpoint(xhci, udev, desc: &udev->ep0.desc, |
4859 | state, timeout: &timeout)) |
4860 | return timeout; |
4861 | |
4862 | config = udev->actconfig; |
4863 | if (!config) |
4864 | return timeout; |
4865 | |
4866 | for (i = 0; i < config->desc.bNumInterfaces; i++) { |
4867 | struct usb_driver *driver; |
4868 | struct usb_interface *intf = config->interface[i]; |
4869 | |
4870 | if (!intf) |
4871 | continue; |
4872 | |
4873 | /* Check if any currently bound drivers want hub-initiated LPM |
4874 | * disabled. |
4875 | */ |
4876 | if (intf->dev.driver) { |
4877 | driver = to_usb_driver(intf->dev.driver); |
4878 | if (driver && driver->disable_hub_initiated_lpm) { |
4879 | dev_dbg(&udev->dev, "Hub-initiated %s disabled at request of driver %s\n" , |
4880 | state_name, driver->name); |
4881 | timeout = xhci_get_timeout_no_hub_lpm(udev, |
4882 | state); |
4883 | if (timeout == USB3_LPM_DISABLED) |
4884 | return timeout; |
4885 | } |
4886 | } |
4887 | |
4888 | /* Not sure how this could happen... */ |
4889 | if (!intf->cur_altsetting) |
4890 | continue; |
4891 | |
4892 | if (xhci_update_timeout_for_interface(xhci, udev, |
4893 | alt: intf->cur_altsetting, |
4894 | state, timeout: &timeout)) |
4895 | return timeout; |
4896 | } |
4897 | return timeout; |
4898 | } |
4899 | |
4900 | static int calculate_max_exit_latency(struct usb_device *udev, |
4901 | enum usb3_link_state state_changed, |
4902 | u16 hub_encoded_timeout) |
4903 | { |
4904 | unsigned long long u1_mel_us = 0; |
4905 | unsigned long long u2_mel_us = 0; |
4906 | unsigned long long mel_us = 0; |
4907 | bool disabling_u1; |
4908 | bool disabling_u2; |
4909 | bool enabling_u1; |
4910 | bool enabling_u2; |
4911 | |
4912 | disabling_u1 = (state_changed == USB3_LPM_U1 && |
4913 | hub_encoded_timeout == USB3_LPM_DISABLED); |
4914 | disabling_u2 = (state_changed == USB3_LPM_U2 && |
4915 | hub_encoded_timeout == USB3_LPM_DISABLED); |
4916 | |
4917 | enabling_u1 = (state_changed == USB3_LPM_U1 && |
4918 | hub_encoded_timeout != USB3_LPM_DISABLED); |
4919 | enabling_u2 = (state_changed == USB3_LPM_U2 && |
4920 | hub_encoded_timeout != USB3_LPM_DISABLED); |
4921 | |
4922 | /* If U1 was already enabled and we're not disabling it, |
4923 | * or we're going to enable U1, account for the U1 max exit latency. |
4924 | */ |
4925 | if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) || |
4926 | enabling_u1) |
4927 | u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000); |
4928 | if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) || |
4929 | enabling_u2) |
4930 | u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000); |
4931 | |
4932 | mel_us = max(u1_mel_us, u2_mel_us); |
4933 | |
4934 | /* xHCI host controller max exit latency field is only 16 bits wide. */ |
4935 | if (mel_us > MAX_EXIT) { |
4936 | dev_warn(&udev->dev, "Link PM max exit latency of %lluus " |
4937 | "is too big.\n" , mel_us); |
4938 | return -E2BIG; |
4939 | } |
4940 | return mel_us; |
4941 | } |
4942 | |
4943 | /* Returns the USB3 hub-encoded value for the U1/U2 timeout. */ |
4944 | static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd, |
4945 | struct usb_device *udev, enum usb3_link_state state) |
4946 | { |
4947 | struct xhci_hcd *xhci; |
4948 | struct xhci_port *port; |
4949 | u16 hub_encoded_timeout; |
4950 | int mel; |
4951 | int ret; |
4952 | |
4953 | xhci = hcd_to_xhci(hcd); |
4954 | /* The LPM timeout values are pretty host-controller specific, so don't |
4955 | * enable hub-initiated timeouts unless the vendor has provided |
4956 | * information about their timeout algorithm. |
4957 | */ |
4958 | if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || |
4959 | !xhci->devs[udev->slot_id]) |
4960 | return USB3_LPM_DISABLED; |
4961 | |
4962 | if (xhci_check_tier_policy(xhci, udev, state) < 0) |
4963 | return USB3_LPM_DISABLED; |
4964 | |
4965 | /* If connected to root port then check port can handle lpm */ |
4966 | if (udev->parent && !udev->parent->parent) { |
4967 | port = xhci->usb3_rhub.ports[udev->portnum - 1]; |
4968 | if (port->lpm_incapable) |
4969 | return USB3_LPM_DISABLED; |
4970 | } |
4971 | |
4972 | hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state); |
4973 | mel = calculate_max_exit_latency(udev, state_changed: state, hub_encoded_timeout); |
4974 | if (mel < 0) { |
4975 | /* Max Exit Latency is too big, disable LPM. */ |
4976 | hub_encoded_timeout = USB3_LPM_DISABLED; |
4977 | mel = 0; |
4978 | } |
4979 | |
4980 | ret = xhci_change_max_exit_latency(xhci, udev, max_exit_latency: mel); |
4981 | if (ret) |
4982 | return ret; |
4983 | return hub_encoded_timeout; |
4984 | } |
4985 | |
4986 | static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd, |
4987 | struct usb_device *udev, enum usb3_link_state state) |
4988 | { |
4989 | struct xhci_hcd *xhci; |
4990 | u16 mel; |
4991 | |
4992 | xhci = hcd_to_xhci(hcd); |
4993 | if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || |
4994 | !xhci->devs[udev->slot_id]) |
4995 | return 0; |
4996 | |
4997 | mel = calculate_max_exit_latency(udev, state_changed: state, USB3_LPM_DISABLED); |
4998 | return xhci_change_max_exit_latency(xhci, udev, max_exit_latency: mel); |
4999 | } |
5000 | #else /* CONFIG_PM */ |
5001 | |
5002 | static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, |
5003 | struct usb_device *udev, int enable) |
5004 | { |
5005 | return 0; |
5006 | } |
5007 | |
5008 | static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) |
5009 | { |
5010 | return 0; |
5011 | } |
5012 | |
5013 | static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd, |
5014 | struct usb_device *udev, enum usb3_link_state state) |
5015 | { |
5016 | return USB3_LPM_DISABLED; |
5017 | } |
5018 | |
5019 | static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd, |
5020 | struct usb_device *udev, enum usb3_link_state state) |
5021 | { |
5022 | return 0; |
5023 | } |
5024 | #endif /* CONFIG_PM */ |
5025 | |
5026 | /*-------------------------------------------------------------------------*/ |
5027 | |
5028 | /* Once a hub descriptor is fetched for a device, we need to update the xHC's |
5029 | * internal data structures for the device. |
5030 | */ |
5031 | int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, |
5032 | struct usb_tt *tt, gfp_t mem_flags) |
5033 | { |
5034 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
5035 | struct xhci_virt_device *vdev; |
5036 | struct xhci_command *config_cmd; |
5037 | struct xhci_input_control_ctx *ctrl_ctx; |
5038 | struct xhci_slot_ctx *slot_ctx; |
5039 | unsigned long flags; |
5040 | unsigned think_time; |
5041 | int ret; |
5042 | |
5043 | /* Ignore root hubs */ |
5044 | if (!hdev->parent) |
5045 | return 0; |
5046 | |
5047 | vdev = xhci->devs[hdev->slot_id]; |
5048 | if (!vdev) { |
5049 | xhci_warn(xhci, "Cannot update hub desc for unknown device.\n" ); |
5050 | return -EINVAL; |
5051 | } |
5052 | |
5053 | config_cmd = xhci_alloc_command_with_ctx(xhci, allocate_completion: true, mem_flags); |
5054 | if (!config_cmd) |
5055 | return -ENOMEM; |
5056 | |
5057 | ctrl_ctx = xhci_get_input_control_ctx(ctx: config_cmd->in_ctx); |
5058 | if (!ctrl_ctx) { |
5059 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n" , |
5060 | __func__); |
5061 | xhci_free_command(xhci, command: config_cmd); |
5062 | return -ENOMEM; |
5063 | } |
5064 | |
5065 | spin_lock_irqsave(&xhci->lock, flags); |
5066 | if (hdev->speed == USB_SPEED_HIGH && |
5067 | xhci_alloc_tt_info(xhci, virt_dev: vdev, hdev, tt, GFP_ATOMIC)) { |
5068 | xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n" ); |
5069 | xhci_free_command(xhci, command: config_cmd); |
5070 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
5071 | return -ENOMEM; |
5072 | } |
5073 | |
5074 | xhci_slot_copy(xhci, in_ctx: config_cmd->in_ctx, out_ctx: vdev->out_ctx); |
5075 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
5076 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: config_cmd->in_ctx); |
5077 | slot_ctx->dev_info |= cpu_to_le32(DEV_HUB); |
5078 | /* |
5079 | * refer to section 6.2.2: MTT should be 0 for full speed hub, |
5080 | * but it may be already set to 1 when setup an xHCI virtual |
5081 | * device, so clear it anyway. |
5082 | */ |
5083 | if (tt->multi) |
5084 | slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); |
5085 | else if (hdev->speed == USB_SPEED_FULL) |
5086 | slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT); |
5087 | |
5088 | if (xhci->hci_version > 0x95) { |
5089 | xhci_dbg(xhci, "xHCI version %x needs hub " |
5090 | "TT think time and number of ports\n" , |
5091 | (unsigned int) xhci->hci_version); |
5092 | slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild)); |
5093 | /* Set TT think time - convert from ns to FS bit times. |
5094 | * 0 = 8 FS bit times, 1 = 16 FS bit times, |
5095 | * 2 = 24 FS bit times, 3 = 32 FS bit times. |
5096 | * |
5097 | * xHCI 1.0: this field shall be 0 if the device is not a |
5098 | * High-spped hub. |
5099 | */ |
5100 | think_time = tt->think_time; |
5101 | if (think_time != 0) |
5102 | think_time = (think_time / 666) - 1; |
5103 | if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH) |
5104 | slot_ctx->tt_info |= |
5105 | cpu_to_le32(TT_THINK_TIME(think_time)); |
5106 | } else { |
5107 | xhci_dbg(xhci, "xHCI version %x doesn't need hub " |
5108 | "TT think time or number of ports\n" , |
5109 | (unsigned int) xhci->hci_version); |
5110 | } |
5111 | slot_ctx->dev_state = 0; |
5112 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
5113 | |
5114 | xhci_dbg(xhci, "Set up %s for hub device.\n" , |
5115 | (xhci->hci_version > 0x95) ? |
5116 | "configure endpoint" : "evaluate context" ); |
5117 | |
5118 | /* Issue and wait for the configure endpoint or |
5119 | * evaluate context command. |
5120 | */ |
5121 | if (xhci->hci_version > 0x95) |
5122 | ret = xhci_configure_endpoint(xhci, udev: hdev, command: config_cmd, |
5123 | ctx_change: false, must_succeed: false); |
5124 | else |
5125 | ret = xhci_configure_endpoint(xhci, udev: hdev, command: config_cmd, |
5126 | ctx_change: true, must_succeed: false); |
5127 | |
5128 | xhci_free_command(xhci, command: config_cmd); |
5129 | return ret; |
5130 | } |
5131 | EXPORT_SYMBOL_GPL(xhci_update_hub_device); |
5132 | |
5133 | static int xhci_get_frame(struct usb_hcd *hcd) |
5134 | { |
5135 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
5136 | /* EHCI mods by the periodic size. Why? */ |
5137 | return readl(addr: &xhci->run_regs->microframe_index) >> 3; |
5138 | } |
5139 | |
5140 | static void xhci_hcd_init_usb2_data(struct xhci_hcd *xhci, struct usb_hcd *hcd) |
5141 | { |
5142 | xhci->usb2_rhub.hcd = hcd; |
5143 | hcd->speed = HCD_USB2; |
5144 | hcd->self.root_hub->speed = USB_SPEED_HIGH; |
5145 | /* |
5146 | * USB 2.0 roothub under xHCI has an integrated TT, |
5147 | * (rate matching hub) as opposed to having an OHCI/UHCI |
5148 | * companion controller. |
5149 | */ |
5150 | hcd->has_tt = 1; |
5151 | } |
5152 | |
5153 | static void xhci_hcd_init_usb3_data(struct xhci_hcd *xhci, struct usb_hcd *hcd) |
5154 | { |
5155 | unsigned int minor_rev; |
5156 | |
5157 | /* |
5158 | * Early xHCI 1.1 spec did not mention USB 3.1 capable hosts |
5159 | * should return 0x31 for sbrn, or that the minor revision |
5160 | * is a two digit BCD containig minor and sub-minor numbers. |
5161 | * This was later clarified in xHCI 1.2. |
5162 | * |
5163 | * Some USB 3.1 capable hosts therefore have sbrn 0x30, and |
5164 | * minor revision set to 0x1 instead of 0x10. |
5165 | */ |
5166 | if (xhci->usb3_rhub.min_rev == 0x1) |
5167 | minor_rev = 1; |
5168 | else |
5169 | minor_rev = xhci->usb3_rhub.min_rev / 0x10; |
5170 | |
5171 | switch (minor_rev) { |
5172 | case 2: |
5173 | hcd->speed = HCD_USB32; |
5174 | hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; |
5175 | hcd->self.root_hub->rx_lanes = 2; |
5176 | hcd->self.root_hub->tx_lanes = 2; |
5177 | hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x2; |
5178 | break; |
5179 | case 1: |
5180 | hcd->speed = HCD_USB31; |
5181 | hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; |
5182 | hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x1; |
5183 | break; |
5184 | } |
5185 | xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n" , |
5186 | minor_rev, minor_rev ? "Enhanced " : "" ); |
5187 | |
5188 | xhci->usb3_rhub.hcd = hcd; |
5189 | } |
5190 | |
5191 | int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) |
5192 | { |
5193 | struct xhci_hcd *xhci; |
5194 | /* |
5195 | * TODO: Check with DWC3 clients for sysdev according to |
5196 | * quirks |
5197 | */ |
5198 | struct device *dev = hcd->self.sysdev; |
5199 | int retval; |
5200 | |
5201 | /* Accept arbitrarily long scatter-gather lists */ |
5202 | hcd->self.sg_tablesize = ~0; |
5203 | |
5204 | /* support to build packet from discontinuous buffers */ |
5205 | hcd->self.no_sg_constraint = 1; |
5206 | |
5207 | /* XHCI controllers don't stop the ep queue on short packets :| */ |
5208 | hcd->self.no_stop_on_short = 1; |
5209 | |
5210 | xhci = hcd_to_xhci(hcd); |
5211 | |
5212 | if (!usb_hcd_is_primary_hcd(hcd)) { |
5213 | xhci_hcd_init_usb3_data(xhci, hcd); |
5214 | return 0; |
5215 | } |
5216 | |
5217 | mutex_init(&xhci->mutex); |
5218 | xhci->main_hcd = hcd; |
5219 | xhci->cap_regs = hcd->regs; |
5220 | xhci->op_regs = hcd->regs + |
5221 | HC_LENGTH(readl(&xhci->cap_regs->hc_capbase)); |
5222 | xhci->run_regs = hcd->regs + |
5223 | (readl(addr: &xhci->cap_regs->run_regs_off) & RTSOFF_MASK); |
5224 | /* Cache read-only capability registers */ |
5225 | xhci->hcs_params1 = readl(addr: &xhci->cap_regs->hcs_params1); |
5226 | xhci->hcs_params2 = readl(addr: &xhci->cap_regs->hcs_params2); |
5227 | xhci->hcs_params3 = readl(addr: &xhci->cap_regs->hcs_params3); |
5228 | xhci->hci_version = HC_VERSION(readl(&xhci->cap_regs->hc_capbase)); |
5229 | xhci->hcc_params = readl(addr: &xhci->cap_regs->hcc_params); |
5230 | if (xhci->hci_version > 0x100) |
5231 | xhci->hcc_params2 = readl(addr: &xhci->cap_regs->hcc_params2); |
5232 | |
5233 | /* xhci-plat or xhci-pci might have set max_interrupters already */ |
5234 | if ((!xhci->max_interrupters) || |
5235 | xhci->max_interrupters > HCS_MAX_INTRS(xhci->hcs_params1)) |
5236 | xhci->max_interrupters = HCS_MAX_INTRS(xhci->hcs_params1); |
5237 | |
5238 | xhci->quirks |= quirks; |
5239 | |
5240 | if (get_quirks) |
5241 | get_quirks(dev, xhci); |
5242 | |
5243 | /* In xhci controllers which follow xhci 1.0 spec gives a spurious |
5244 | * success event after a short transfer. This quirk will ignore such |
5245 | * spurious event. |
5246 | */ |
5247 | if (xhci->hci_version > 0x96) |
5248 | xhci->quirks |= XHCI_SPURIOUS_SUCCESS; |
5249 | |
5250 | /* Make sure the HC is halted. */ |
5251 | retval = xhci_halt(xhci); |
5252 | if (retval) |
5253 | return retval; |
5254 | |
5255 | xhci_zero_64b_regs(xhci); |
5256 | |
5257 | xhci_dbg(xhci, "Resetting HCD\n" ); |
5258 | /* Reset the internal HC memory state and registers. */ |
5259 | retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC); |
5260 | if (retval) |
5261 | return retval; |
5262 | xhci_dbg(xhci, "Reset complete\n" ); |
5263 | |
5264 | /* |
5265 | * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0) |
5266 | * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit |
5267 | * address memory pointers actually. So, this driver clears the AC64 |
5268 | * bit of xhci->hcc_params to call dma_set_coherent_mask(dev, |
5269 | * DMA_BIT_MASK(32)) in this xhci_gen_setup(). |
5270 | */ |
5271 | if (xhci->quirks & XHCI_NO_64BIT_SUPPORT) |
5272 | xhci->hcc_params &= ~BIT(0); |
5273 | |
5274 | /* Set dma_mask and coherent_dma_mask to 64-bits, |
5275 | * if xHC supports 64-bit addressing */ |
5276 | if (HCC_64BIT_ADDR(xhci->hcc_params) && |
5277 | !dma_set_mask(dev, DMA_BIT_MASK(64))) { |
5278 | xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n" ); |
5279 | dma_set_coherent_mask(dev, DMA_BIT_MASK(64)); |
5280 | } else { |
5281 | /* |
5282 | * This is to avoid error in cases where a 32-bit USB |
5283 | * controller is used on a 64-bit capable system. |
5284 | */ |
5285 | retval = dma_set_mask(dev, DMA_BIT_MASK(32)); |
5286 | if (retval) |
5287 | return retval; |
5288 | xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n" ); |
5289 | dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); |
5290 | } |
5291 | |
5292 | xhci_dbg(xhci, "Calling HCD init\n" ); |
5293 | /* Initialize HCD and host controller data structures. */ |
5294 | retval = xhci_init(hcd); |
5295 | if (retval) |
5296 | return retval; |
5297 | xhci_dbg(xhci, "Called HCD init\n" ); |
5298 | |
5299 | if (xhci_hcd_is_usb3(hcd)) |
5300 | xhci_hcd_init_usb3_data(xhci, hcd); |
5301 | else |
5302 | xhci_hcd_init_usb2_data(xhci, hcd); |
5303 | |
5304 | xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n" , |
5305 | xhci->hcc_params, xhci->hci_version, xhci->quirks); |
5306 | |
5307 | return 0; |
5308 | } |
5309 | EXPORT_SYMBOL_GPL(xhci_gen_setup); |
5310 | |
5311 | static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd, |
5312 | struct usb_host_endpoint *ep) |
5313 | { |
5314 | struct xhci_hcd *xhci; |
5315 | struct usb_device *udev; |
5316 | unsigned int slot_id; |
5317 | unsigned int ep_index; |
5318 | unsigned long flags; |
5319 | |
5320 | xhci = hcd_to_xhci(hcd); |
5321 | |
5322 | spin_lock_irqsave(&xhci->lock, flags); |
5323 | udev = (struct usb_device *)ep->hcpriv; |
5324 | slot_id = udev->slot_id; |
5325 | ep_index = xhci_get_endpoint_index(&ep->desc); |
5326 | |
5327 | xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT; |
5328 | xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index); |
5329 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
5330 | } |
5331 | |
5332 | static const struct hc_driver xhci_hc_driver = { |
5333 | .description = "xhci-hcd" , |
5334 | .product_desc = "xHCI Host Controller" , |
5335 | .hcd_priv_size = sizeof(struct xhci_hcd), |
5336 | |
5337 | /* |
5338 | * generic hardware linkage |
5339 | */ |
5340 | .irq = xhci_irq, |
5341 | .flags = HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED | |
5342 | HCD_BH, |
5343 | |
5344 | /* |
5345 | * basic lifecycle operations |
5346 | */ |
5347 | .reset = NULL, /* set in xhci_init_driver() */ |
5348 | .start = xhci_run, |
5349 | .stop = xhci_stop, |
5350 | .shutdown = xhci_shutdown, |
5351 | |
5352 | /* |
5353 | * managing i/o requests and associated device resources |
5354 | */ |
5355 | .map_urb_for_dma = xhci_map_urb_for_dma, |
5356 | .unmap_urb_for_dma = xhci_unmap_urb_for_dma, |
5357 | .urb_enqueue = xhci_urb_enqueue, |
5358 | .urb_dequeue = xhci_urb_dequeue, |
5359 | .alloc_dev = xhci_alloc_dev, |
5360 | .free_dev = xhci_free_dev, |
5361 | .alloc_streams = xhci_alloc_streams, |
5362 | .free_streams = xhci_free_streams, |
5363 | .add_endpoint = xhci_add_endpoint, |
5364 | .drop_endpoint = xhci_drop_endpoint, |
5365 | .endpoint_disable = xhci_endpoint_disable, |
5366 | .endpoint_reset = xhci_endpoint_reset, |
5367 | .check_bandwidth = xhci_check_bandwidth, |
5368 | .reset_bandwidth = xhci_reset_bandwidth, |
5369 | .address_device = xhci_address_device, |
5370 | .enable_device = xhci_enable_device, |
5371 | .update_hub_device = xhci_update_hub_device, |
5372 | .reset_device = xhci_discover_or_reset_device, |
5373 | |
5374 | /* |
5375 | * scheduling support |
5376 | */ |
5377 | .get_frame_number = xhci_get_frame, |
5378 | |
5379 | /* |
5380 | * root hub support |
5381 | */ |
5382 | .hub_control = xhci_hub_control, |
5383 | .hub_status_data = xhci_hub_status_data, |
5384 | .bus_suspend = xhci_bus_suspend, |
5385 | .bus_resume = xhci_bus_resume, |
5386 | .get_resuming_ports = xhci_get_resuming_ports, |
5387 | |
5388 | /* |
5389 | * call back when device connected and addressed |
5390 | */ |
5391 | .update_device = xhci_update_device, |
5392 | .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm, |
5393 | .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout, |
5394 | .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout, |
5395 | .find_raw_port_number = xhci_find_raw_port_number, |
5396 | .clear_tt_buffer_complete = xhci_clear_tt_buffer_complete, |
5397 | }; |
5398 | |
5399 | void xhci_init_driver(struct hc_driver *drv, |
5400 | const struct xhci_driver_overrides *over) |
5401 | { |
5402 | BUG_ON(!over); |
5403 | |
5404 | /* Copy the generic table to drv then apply the overrides */ |
5405 | *drv = xhci_hc_driver; |
5406 | |
5407 | if (over) { |
5408 | drv->hcd_priv_size += over->extra_priv_size; |
5409 | if (over->reset) |
5410 | drv->reset = over->reset; |
5411 | if (over->start) |
5412 | drv->start = over->start; |
5413 | if (over->add_endpoint) |
5414 | drv->add_endpoint = over->add_endpoint; |
5415 | if (over->drop_endpoint) |
5416 | drv->drop_endpoint = over->drop_endpoint; |
5417 | if (over->check_bandwidth) |
5418 | drv->check_bandwidth = over->check_bandwidth; |
5419 | if (over->reset_bandwidth) |
5420 | drv->reset_bandwidth = over->reset_bandwidth; |
5421 | if (over->update_hub_device) |
5422 | drv->update_hub_device = over->update_hub_device; |
5423 | if (over->hub_control) |
5424 | drv->hub_control = over->hub_control; |
5425 | } |
5426 | } |
5427 | EXPORT_SYMBOL_GPL(xhci_init_driver); |
5428 | |
5429 | MODULE_DESCRIPTION(DRIVER_DESC); |
5430 | MODULE_AUTHOR(DRIVER_AUTHOR); |
5431 | MODULE_LICENSE("GPL" ); |
5432 | |
5433 | static int __init xhci_hcd_init(void) |
5434 | { |
5435 | /* |
5436 | * Check the compiler generated sizes of structures that must be laid |
5437 | * out in specific ways for hardware access. |
5438 | */ |
5439 | BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8); |
5440 | BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8); |
5441 | BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8); |
5442 | /* xhci_device_control has eight fields, and also |
5443 | * embeds one xhci_slot_ctx and 31 xhci_ep_ctx |
5444 | */ |
5445 | BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); |
5446 | BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); |
5447 | BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); |
5448 | BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8); |
5449 | BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8); |
5450 | /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */ |
5451 | BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8); |
5452 | |
5453 | if (usb_disabled()) |
5454 | return -ENODEV; |
5455 | |
5456 | xhci_debugfs_create_root(); |
5457 | xhci_dbc_init(); |
5458 | |
5459 | return 0; |
5460 | } |
5461 | |
5462 | /* |
5463 | * If an init function is provided, an exit function must also be provided |
5464 | * to allow module unload. |
5465 | */ |
5466 | static void __exit xhci_hcd_fini(void) |
5467 | { |
5468 | xhci_debugfs_remove_root(); |
5469 | xhci_dbc_exit(); |
5470 | } |
5471 | |
5472 | module_init(xhci_hcd_init); |
5473 | module_exit(xhci_hcd_fini); |
5474 | |