1 | // SPDX-License-Identifier: GPL-2.0 |
---|---|
2 | /* |
3 | * xHCI host controller driver |
4 | * |
5 | * Copyright (C) 2008 Intel Corp. |
6 | * |
7 | * Author: Sarah Sharp |
8 | * Some code borrowed from the Linux EHCI driver. |
9 | */ |
10 | |
11 | /* |
12 | * Ring initialization rules: |
13 | * 1. Each segment is initialized to zero, except for link TRBs. |
14 | * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or |
15 | * Consumer Cycle State (CCS), depending on ring function. |
16 | * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment. |
17 | * |
18 | * Ring behavior rules: |
19 | * 1. A ring is empty if enqueue == dequeue. This means there will always be at |
20 | * least one free TRB in the ring. This is useful if you want to turn that |
21 | * into a link TRB and expand the ring. |
22 | * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a |
23 | * link TRB, then load the pointer with the address in the link TRB. If the |
24 | * link TRB had its toggle bit set, you may need to update the ring cycle |
25 | * state (see cycle bit rules). You may have to do this multiple times |
26 | * until you reach a non-link TRB. |
27 | * 3. A ring is full if enqueue++ (for the definition of increment above) |
28 | * equals the dequeue pointer. |
29 | * |
30 | * Cycle bit rules: |
31 | * 1. When a consumer increments a dequeue pointer and encounters a toggle bit |
32 | * in a link TRB, it must toggle the ring cycle state. |
33 | * 2. When a producer increments an enqueue pointer and encounters a toggle bit |
34 | * in a link TRB, it must toggle the ring cycle state. |
35 | * |
36 | * Producer rules: |
37 | * 1. Check if ring is full before you enqueue. |
38 | * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing. |
39 | * Update enqueue pointer between each write (which may update the ring |
40 | * cycle state). |
41 | * 3. Notify consumer. If SW is producer, it rings the doorbell for command |
42 | * and endpoint rings. If HC is the producer for the event ring, |
43 | * and it generates an interrupt according to interrupt modulation rules. |
44 | * |
45 | * Consumer rules: |
46 | * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state, |
47 | * the TRB is owned by the consumer. |
48 | * 2. Update dequeue pointer (which may update the ring cycle state) and |
49 | * continue processing TRBs until you reach a TRB which is not owned by you. |
50 | * 3. Notify the producer. SW is the consumer for the event ring, and it |
51 | * updates event ring dequeue pointer. HC is the consumer for the command and |
52 | * endpoint rings; it generates events on the event ring for these. |
53 | */ |
54 | |
55 | #include <linux/scatterlist.h> |
56 | #include <linux/slab.h> |
57 | #include <linux/dma-mapping.h> |
58 | #include "xhci.h" |
59 | #include "xhci-trace.h" |
60 | |
61 | static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd, |
62 | u32 field1, u32 field2, |
63 | u32 field3, u32 field4, bool command_must_succeed); |
64 | |
65 | /* |
66 | * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA |
67 | * address of the TRB. |
68 | */ |
69 | dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, |
70 | union xhci_trb *trb) |
71 | { |
72 | unsigned long segment_offset; |
73 | |
74 | if (!seg || !trb || trb < seg->trbs) |
75 | return 0; |
76 | /* offset in TRBs */ |
77 | segment_offset = trb - seg->trbs; |
78 | if (segment_offset >= TRBS_PER_SEGMENT) |
79 | return 0; |
80 | return seg->dma + (segment_offset * sizeof(*trb)); |
81 | } |
82 | |
83 | static bool trb_is_noop(union xhci_trb *trb) |
84 | { |
85 | return TRB_TYPE_NOOP_LE32(trb->generic.field[3]); |
86 | } |
87 | |
88 | static bool trb_is_link(union xhci_trb *trb) |
89 | { |
90 | return TRB_TYPE_LINK_LE32(trb->link.control); |
91 | } |
92 | |
93 | static bool last_trb_on_seg(struct xhci_segment *seg, union xhci_trb *trb) |
94 | { |
95 | return trb == &seg->trbs[TRBS_PER_SEGMENT - 1]; |
96 | } |
97 | |
98 | static bool last_trb_on_ring(struct xhci_ring *ring, |
99 | struct xhci_segment *seg, union xhci_trb *trb) |
100 | { |
101 | return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg); |
102 | } |
103 | |
104 | static bool link_trb_toggles_cycle(union xhci_trb *trb) |
105 | { |
106 | return le32_to_cpu(trb->link.control) & LINK_TOGGLE; |
107 | } |
108 | |
109 | static bool last_td_in_urb(struct xhci_td *td) |
110 | { |
111 | struct urb_priv *urb_priv = td->urb->hcpriv; |
112 | |
113 | return urb_priv->num_tds_done == urb_priv->num_tds; |
114 | } |
115 | |
116 | static bool unhandled_event_trb(struct xhci_ring *ring) |
117 | { |
118 | return ((le32_to_cpu(ring->dequeue->event_cmd.flags) & TRB_CYCLE) == |
119 | ring->cycle_state); |
120 | } |
121 | |
122 | static void inc_td_cnt(struct urb *urb) |
123 | { |
124 | struct urb_priv *urb_priv = urb->hcpriv; |
125 | |
126 | urb_priv->num_tds_done++; |
127 | } |
128 | |
129 | static void trb_to_noop(union xhci_trb *trb, u32 noop_type) |
130 | { |
131 | if (trb_is_link(trb)) { |
132 | /* unchain chained link TRBs */ |
133 | trb->link.control &= cpu_to_le32(~TRB_CHAIN); |
134 | } else { |
135 | trb->generic.field[0] = 0; |
136 | trb->generic.field[1] = 0; |
137 | trb->generic.field[2] = 0; |
138 | /* Preserve only the cycle bit of this TRB */ |
139 | trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); |
140 | trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type)); |
141 | } |
142 | } |
143 | |
144 | /* Updates trb to point to the next TRB in the ring, and updates seg if the next |
145 | * TRB is in a new segment. This does not skip over link TRBs, and it does not |
146 | * effect the ring dequeue or enqueue pointers. |
147 | */ |
148 | static void next_trb(struct xhci_hcd *xhci, |
149 | struct xhci_ring *ring, |
150 | struct xhci_segment **seg, |
151 | union xhci_trb **trb) |
152 | { |
153 | if (trb_is_link(trb: *trb) || last_trb_on_seg(seg: *seg, trb: *trb)) { |
154 | *seg = (*seg)->next; |
155 | *trb = ((*seg)->trbs); |
156 | } else { |
157 | (*trb)++; |
158 | } |
159 | } |
160 | |
161 | /* |
162 | * See Cycle bit rules. SW is the consumer for the event ring only. |
163 | */ |
164 | void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring) |
165 | { |
166 | unsigned int link_trb_count = 0; |
167 | |
168 | /* event ring doesn't have link trbs, check for last trb */ |
169 | if (ring->type == TYPE_EVENT) { |
170 | if (!last_trb_on_seg(seg: ring->deq_seg, trb: ring->dequeue)) { |
171 | ring->dequeue++; |
172 | goto out; |
173 | } |
174 | if (last_trb_on_ring(ring, seg: ring->deq_seg, trb: ring->dequeue)) |
175 | ring->cycle_state ^= 1; |
176 | ring->deq_seg = ring->deq_seg->next; |
177 | ring->dequeue = ring->deq_seg->trbs; |
178 | goto out; |
179 | } |
180 | |
181 | /* All other rings have link trbs */ |
182 | if (!trb_is_link(trb: ring->dequeue)) { |
183 | if (last_trb_on_seg(seg: ring->deq_seg, trb: ring->dequeue)) |
184 | xhci_warn(xhci, "Missing link TRB at end of segment\n"); |
185 | else |
186 | ring->dequeue++; |
187 | } |
188 | |
189 | while (trb_is_link(trb: ring->dequeue)) { |
190 | ring->deq_seg = ring->deq_seg->next; |
191 | ring->dequeue = ring->deq_seg->trbs; |
192 | |
193 | if (link_trb_count++ > ring->num_segs) { |
194 | xhci_warn(xhci, "Ring is an endless link TRB loop\n"); |
195 | break; |
196 | } |
197 | } |
198 | out: |
199 | trace_xhci_inc_deq(ring); |
200 | |
201 | return; |
202 | } |
203 | |
204 | /* |
205 | * See Cycle bit rules. SW is the consumer for the event ring only. |
206 | * |
207 | * If we've just enqueued a TRB that is in the middle of a TD (meaning the |
208 | * chain bit is set), then set the chain bit in all the following link TRBs. |
209 | * If we've enqueued the last TRB in a TD, make sure the following link TRBs |
210 | * have their chain bit cleared (so that each Link TRB is a separate TD). |
211 | * |
212 | * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit |
213 | * set, but other sections talk about dealing with the chain bit set. This was |
214 | * fixed in the 0.96 specification errata, but we have to assume that all 0.95 |
215 | * xHCI hardware can't handle the chain bit being cleared on a link TRB. |
216 | * |
217 | * @more_trbs_coming: Will you enqueue more TRBs before calling |
218 | * prepare_transfer()? |
219 | */ |
220 | static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, |
221 | bool more_trbs_coming) |
222 | { |
223 | u32 chain; |
224 | union xhci_trb *next; |
225 | unsigned int link_trb_count = 0; |
226 | |
227 | chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN; |
228 | |
229 | if (last_trb_on_seg(seg: ring->enq_seg, trb: ring->enqueue)) { |
230 | xhci_err(xhci, "Tried to move enqueue past ring segment\n"); |
231 | return; |
232 | } |
233 | |
234 | next = ++(ring->enqueue); |
235 | |
236 | /* Update the dequeue pointer further if that was a link TRB */ |
237 | while (trb_is_link(trb: next)) { |
238 | |
239 | /* |
240 | * If the caller doesn't plan on enqueueing more TDs before |
241 | * ringing the doorbell, then we don't want to give the link TRB |
242 | * to the hardware just yet. We'll give the link TRB back in |
243 | * prepare_ring() just before we enqueue the TD at the top of |
244 | * the ring. |
245 | */ |
246 | if (!chain && !more_trbs_coming) |
247 | break; |
248 | |
249 | /* If we're not dealing with 0.95 hardware or isoc rings on |
250 | * AMD 0.96 host, carry over the chain bit of the previous TRB |
251 | * (which may mean the chain bit is cleared). |
252 | */ |
253 | if (!(ring->type == TYPE_ISOC && |
254 | (xhci->quirks & XHCI_AMD_0x96_HOST)) && |
255 | !xhci_link_trb_quirk(xhci)) { |
256 | next->link.control &= cpu_to_le32(~TRB_CHAIN); |
257 | next->link.control |= cpu_to_le32(chain); |
258 | } |
259 | /* Give this link TRB to the hardware */ |
260 | wmb(); |
261 | next->link.control ^= cpu_to_le32(TRB_CYCLE); |
262 | |
263 | /* Toggle the cycle bit after the last ring segment. */ |
264 | if (link_trb_toggles_cycle(trb: next)) |
265 | ring->cycle_state ^= 1; |
266 | |
267 | ring->enq_seg = ring->enq_seg->next; |
268 | ring->enqueue = ring->enq_seg->trbs; |
269 | next = ring->enqueue; |
270 | |
271 | if (link_trb_count++ > ring->num_segs) { |
272 | xhci_warn(xhci, "%s: Ring link TRB loop\n", __func__); |
273 | break; |
274 | } |
275 | } |
276 | |
277 | trace_xhci_inc_enq(ring); |
278 | } |
279 | |
280 | /* |
281 | * Return number of free normal TRBs from enqueue to dequeue pointer on ring. |
282 | * Not counting an assumed link TRB at end of each TRBS_PER_SEGMENT sized segment. |
283 | * Only for transfer and command rings where driver is the producer, not for |
284 | * event rings. |
285 | */ |
286 | static unsigned int xhci_num_trbs_free(struct xhci_hcd *xhci, struct xhci_ring *ring) |
287 | { |
288 | struct xhci_segment *enq_seg = ring->enq_seg; |
289 | union xhci_trb *enq = ring->enqueue; |
290 | union xhci_trb *last_on_seg; |
291 | unsigned int free = 0; |
292 | int i = 0; |
293 | |
294 | /* Ring might be empty even if enq != deq if enq is left on a link trb */ |
295 | if (trb_is_link(trb: enq)) { |
296 | enq_seg = enq_seg->next; |
297 | enq = enq_seg->trbs; |
298 | } |
299 | |
300 | /* Empty ring, common case, don't walk the segments */ |
301 | if (enq == ring->dequeue) |
302 | return ring->num_segs * (TRBS_PER_SEGMENT - 1); |
303 | |
304 | do { |
305 | if (ring->deq_seg == enq_seg && ring->dequeue >= enq) |
306 | return free + (ring->dequeue - enq); |
307 | last_on_seg = &enq_seg->trbs[TRBS_PER_SEGMENT - 1]; |
308 | free += last_on_seg - enq; |
309 | enq_seg = enq_seg->next; |
310 | enq = enq_seg->trbs; |
311 | } while (i++ <= ring->num_segs); |
312 | |
313 | return free; |
314 | } |
315 | |
316 | /* |
317 | * Check to see if there's room to enqueue num_trbs on the ring and make sure |
318 | * enqueue pointer will not advance into dequeue segment. See rules above. |
319 | * return number of new segments needed to ensure this. |
320 | */ |
321 | |
322 | static unsigned int xhci_ring_expansion_needed(struct xhci_hcd *xhci, struct xhci_ring *ring, |
323 | unsigned int num_trbs) |
324 | { |
325 | struct xhci_segment *seg; |
326 | int trbs_past_seg; |
327 | int enq_used; |
328 | int new_segs; |
329 | |
330 | enq_used = ring->enqueue - ring->enq_seg->trbs; |
331 | |
332 | /* how many trbs will be queued past the enqueue segment? */ |
333 | trbs_past_seg = enq_used + num_trbs - (TRBS_PER_SEGMENT - 1); |
334 | |
335 | /* |
336 | * Consider expanding the ring already if num_trbs fills the current |
337 | * segment (i.e. trbs_past_seg == 0), not only when num_trbs goes into |
338 | * the next segment. Avoids confusing full ring with special empty ring |
339 | * case below |
340 | */ |
341 | if (trbs_past_seg < 0) |
342 | return 0; |
343 | |
344 | /* Empty ring special case, enqueue stuck on link trb while dequeue advanced */ |
345 | if (trb_is_link(trb: ring->enqueue) && ring->enq_seg->next->trbs == ring->dequeue) |
346 | return 0; |
347 | |
348 | new_segs = 1 + (trbs_past_seg / (TRBS_PER_SEGMENT - 1)); |
349 | seg = ring->enq_seg; |
350 | |
351 | while (new_segs > 0) { |
352 | seg = seg->next; |
353 | if (seg == ring->deq_seg) { |
354 | xhci_dbg(xhci, "Ring expansion by %d segments needed\n", |
355 | new_segs); |
356 | xhci_dbg(xhci, "Adding %d trbs moves enq %d trbs into deq seg\n", |
357 | num_trbs, trbs_past_seg % TRBS_PER_SEGMENT); |
358 | return new_segs; |
359 | } |
360 | new_segs--; |
361 | } |
362 | |
363 | return 0; |
364 | } |
365 | |
366 | /* Ring the host controller doorbell after placing a command on the ring */ |
367 | void xhci_ring_cmd_db(struct xhci_hcd *xhci) |
368 | { |
369 | if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) |
370 | return; |
371 | |
372 | xhci_dbg(xhci, "// Ding dong!\n"); |
373 | |
374 | trace_xhci_ring_host_doorbell(slot: 0, DB_VALUE_HOST); |
375 | |
376 | writel(DB_VALUE_HOST, addr: &xhci->dba->doorbell[0]); |
377 | /* Flush PCI posted writes */ |
378 | readl(addr: &xhci->dba->doorbell[0]); |
379 | } |
380 | |
381 | static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci) |
382 | { |
383 | return mod_delayed_work(wq: system_wq, dwork: &xhci->cmd_timer, |
384 | delay: msecs_to_jiffies(m: xhci->current_cmd->timeout_ms)); |
385 | } |
386 | |
387 | static struct xhci_command *xhci_next_queued_cmd(struct xhci_hcd *xhci) |
388 | { |
389 | return list_first_entry_or_null(&xhci->cmd_list, struct xhci_command, |
390 | cmd_list); |
391 | } |
392 | |
393 | /* |
394 | * Turn all commands on command ring with status set to "aborted" to no-op trbs. |
395 | * If there are other commands waiting then restart the ring and kick the timer. |
396 | * This must be called with command ring stopped and xhci->lock held. |
397 | */ |
398 | static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci, |
399 | struct xhci_command *cur_cmd) |
400 | { |
401 | struct xhci_command *i_cmd; |
402 | |
403 | /* Turn all aborted commands in list to no-ops, then restart */ |
404 | list_for_each_entry(i_cmd, &xhci->cmd_list, cmd_list) { |
405 | |
406 | if (i_cmd->status != COMP_COMMAND_ABORTED) |
407 | continue; |
408 | |
409 | i_cmd->status = COMP_COMMAND_RING_STOPPED; |
410 | |
411 | xhci_dbg(xhci, "Turn aborted command %p to no-op\n", |
412 | i_cmd->command_trb); |
413 | |
414 | trb_to_noop(trb: i_cmd->command_trb, TRB_CMD_NOOP); |
415 | |
416 | /* |
417 | * caller waiting for completion is called when command |
418 | * completion event is received for these no-op commands |
419 | */ |
420 | } |
421 | |
422 | xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; |
423 | |
424 | /* ring command ring doorbell to restart the command ring */ |
425 | if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) && |
426 | !(xhci->xhc_state & XHCI_STATE_DYING)) { |
427 | xhci->current_cmd = cur_cmd; |
428 | xhci_mod_cmd_timer(xhci); |
429 | xhci_ring_cmd_db(xhci); |
430 | } |
431 | } |
432 | |
433 | /* Must be called with xhci->lock held, releases and aquires lock back */ |
434 | static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags) |
435 | { |
436 | struct xhci_segment *new_seg = xhci->cmd_ring->deq_seg; |
437 | union xhci_trb *new_deq = xhci->cmd_ring->dequeue; |
438 | u64 crcr; |
439 | int ret; |
440 | |
441 | xhci_dbg(xhci, "Abort command ring\n"); |
442 | |
443 | reinit_completion(x: &xhci->cmd_ring_stop_completion); |
444 | |
445 | /* |
446 | * The control bits like command stop, abort are located in lower |
447 | * dword of the command ring control register. |
448 | * Some controllers require all 64 bits to be written to abort the ring. |
449 | * Make sure the upper dword is valid, pointing to the next command, |
450 | * avoiding corrupting the command ring pointer in case the command ring |
451 | * is stopped by the time the upper dword is written. |
452 | */ |
453 | next_trb(xhci, NULL, seg: &new_seg, trb: &new_deq); |
454 | if (trb_is_link(trb: new_deq)) |
455 | next_trb(xhci, NULL, seg: &new_seg, trb: &new_deq); |
456 | |
457 | crcr = xhci_trb_virt_to_dma(seg: new_seg, trb: new_deq); |
458 | xhci_write_64(xhci, val: crcr | CMD_RING_ABORT, regs: &xhci->op_regs->cmd_ring); |
459 | |
460 | /* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the |
461 | * completion of the Command Abort operation. If CRR is not negated in 5 |
462 | * seconds then driver handles it as if host died (-ENODEV). |
463 | * In the future we should distinguish between -ENODEV and -ETIMEDOUT |
464 | * and try to recover a -ETIMEDOUT with a host controller reset. |
465 | */ |
466 | ret = xhci_handshake_check_state(xhci, ptr: &xhci->op_regs->cmd_ring, |
467 | CMD_RING_RUNNING, done: 0, usec: 5 * 1000 * 1000, |
468 | XHCI_STATE_REMOVING); |
469 | if (ret < 0) { |
470 | xhci_err(xhci, "Abort failed to stop command ring: %d\n", ret); |
471 | xhci_halt(xhci); |
472 | xhci_hc_died(xhci); |
473 | return ret; |
474 | } |
475 | /* |
476 | * Writing the CMD_RING_ABORT bit should cause a cmd completion event, |
477 | * however on some host hw the CMD_RING_RUNNING bit is correctly cleared |
478 | * but the completion event in never sent. Wait 2 secs (arbitrary |
479 | * number) to handle those cases after negation of CMD_RING_RUNNING. |
480 | */ |
481 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
482 | ret = wait_for_completion_timeout(x: &xhci->cmd_ring_stop_completion, |
483 | timeout: msecs_to_jiffies(m: 2000)); |
484 | spin_lock_irqsave(&xhci->lock, flags); |
485 | if (!ret) { |
486 | xhci_dbg(xhci, "No stop event for abort, ring start fail?\n"); |
487 | xhci_cleanup_command_queue(xhci); |
488 | } else { |
489 | xhci_handle_stopped_cmd_ring(xhci, cur_cmd: xhci_next_queued_cmd(xhci)); |
490 | } |
491 | return 0; |
492 | } |
493 | |
494 | void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, |
495 | unsigned int slot_id, |
496 | unsigned int ep_index, |
497 | unsigned int stream_id) |
498 | { |
499 | __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; |
500 | struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; |
501 | unsigned int ep_state = ep->ep_state; |
502 | |
503 | /* Don't ring the doorbell for this endpoint if there are pending |
504 | * cancellations because we don't want to interrupt processing. |
505 | * We don't want to restart any stream rings if there's a set dequeue |
506 | * pointer command pending because the device can choose to start any |
507 | * stream once the endpoint is on the HW schedule. |
508 | */ |
509 | if ((ep_state & EP_STOP_CMD_PENDING) || (ep_state & SET_DEQ_PENDING) || |
510 | (ep_state & EP_HALTED) || (ep_state & EP_CLEARING_TT)) |
511 | return; |
512 | |
513 | trace_xhci_ring_ep_doorbell(slot: slot_id, DB_VALUE(ep_index, stream_id)); |
514 | |
515 | writel(DB_VALUE(ep_index, stream_id), addr: db_addr); |
516 | /* flush the write */ |
517 | readl(addr: db_addr); |
518 | } |
519 | |
520 | /* Ring the doorbell for any rings with pending URBs */ |
521 | static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci, |
522 | unsigned int slot_id, |
523 | unsigned int ep_index) |
524 | { |
525 | unsigned int stream_id; |
526 | struct xhci_virt_ep *ep; |
527 | |
528 | ep = &xhci->devs[slot_id]->eps[ep_index]; |
529 | |
530 | /* A ring has pending URBs if its TD list is not empty */ |
531 | if (!(ep->ep_state & EP_HAS_STREAMS)) { |
532 | if (ep->ring && !(list_empty(head: &ep->ring->td_list))) |
533 | xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id: 0); |
534 | return; |
535 | } |
536 | |
537 | for (stream_id = 1; stream_id < ep->stream_info->num_streams; |
538 | stream_id++) { |
539 | struct xhci_stream_info *stream_info = ep->stream_info; |
540 | if (!list_empty(head: &stream_info->stream_rings[stream_id]->td_list)) |
541 | xhci_ring_ep_doorbell(xhci, slot_id, ep_index, |
542 | stream_id); |
543 | } |
544 | } |
545 | |
546 | void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci, |
547 | unsigned int slot_id, |
548 | unsigned int ep_index) |
549 | { |
550 | ring_doorbell_for_active_rings(xhci, slot_id, ep_index); |
551 | } |
552 | |
553 | static struct xhci_virt_ep *xhci_get_virt_ep(struct xhci_hcd *xhci, |
554 | unsigned int slot_id, |
555 | unsigned int ep_index) |
556 | { |
557 | if (slot_id == 0 || slot_id >= MAX_HC_SLOTS) { |
558 | xhci_warn(xhci, "Invalid slot_id %u\n", slot_id); |
559 | return NULL; |
560 | } |
561 | if (ep_index >= EP_CTX_PER_DEV) { |
562 | xhci_warn(xhci, "Invalid endpoint index %u\n", ep_index); |
563 | return NULL; |
564 | } |
565 | if (!xhci->devs[slot_id]) { |
566 | xhci_warn(xhci, "No xhci virt device for slot_id %u\n", slot_id); |
567 | return NULL; |
568 | } |
569 | |
570 | return &xhci->devs[slot_id]->eps[ep_index]; |
571 | } |
572 | |
573 | static struct xhci_ring *xhci_virt_ep_to_ring(struct xhci_hcd *xhci, |
574 | struct xhci_virt_ep *ep, |
575 | unsigned int stream_id) |
576 | { |
577 | /* common case, no streams */ |
578 | if (!(ep->ep_state & EP_HAS_STREAMS)) |
579 | return ep->ring; |
580 | |
581 | if (!ep->stream_info) |
582 | return NULL; |
583 | |
584 | if (stream_id == 0 || stream_id >= ep->stream_info->num_streams) { |
585 | xhci_warn(xhci, "Invalid stream_id %u request for slot_id %u ep_index %u\n", |
586 | stream_id, ep->vdev->slot_id, ep->ep_index); |
587 | return NULL; |
588 | } |
589 | |
590 | return ep->stream_info->stream_rings[stream_id]; |
591 | } |
592 | |
593 | /* Get the right ring for the given slot_id, ep_index and stream_id. |
594 | * If the endpoint supports streams, boundary check the URB's stream ID. |
595 | * If the endpoint doesn't support streams, return the singular endpoint ring. |
596 | */ |
597 | struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, |
598 | unsigned int slot_id, unsigned int ep_index, |
599 | unsigned int stream_id) |
600 | { |
601 | struct xhci_virt_ep *ep; |
602 | |
603 | ep = xhci_get_virt_ep(xhci, slot_id, ep_index); |
604 | if (!ep) |
605 | return NULL; |
606 | |
607 | return xhci_virt_ep_to_ring(xhci, ep, stream_id); |
608 | } |
609 | |
610 | |
611 | /* |
612 | * Get the hw dequeue pointer xHC stopped on, either directly from the |
613 | * endpoint context, or if streams are in use from the stream context. |
614 | * The returned hw_dequeue contains the lowest four bits with cycle state |
615 | * and possbile stream context type. |
616 | */ |
617 | static u64 xhci_get_hw_deq(struct xhci_hcd *xhci, struct xhci_virt_device *vdev, |
618 | unsigned int ep_index, unsigned int stream_id) |
619 | { |
620 | struct xhci_ep_ctx *ep_ctx; |
621 | struct xhci_stream_ctx *st_ctx; |
622 | struct xhci_virt_ep *ep; |
623 | |
624 | ep = &vdev->eps[ep_index]; |
625 | |
626 | if (ep->ep_state & EP_HAS_STREAMS) { |
627 | st_ctx = &ep->stream_info->stream_ctx_array[stream_id]; |
628 | return le64_to_cpu(st_ctx->stream_ring); |
629 | } |
630 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: vdev->out_ctx, ep_index); |
631 | return le64_to_cpu(ep_ctx->deq); |
632 | } |
633 | |
634 | static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci, |
635 | unsigned int slot_id, unsigned int ep_index, |
636 | unsigned int stream_id, struct xhci_td *td) |
637 | { |
638 | struct xhci_virt_device *dev = xhci->devs[slot_id]; |
639 | struct xhci_virt_ep *ep = &dev->eps[ep_index]; |
640 | struct xhci_ring *ep_ring; |
641 | struct xhci_command *cmd; |
642 | struct xhci_segment *new_seg; |
643 | union xhci_trb *new_deq; |
644 | int new_cycle; |
645 | dma_addr_t addr; |
646 | u64 hw_dequeue; |
647 | bool cycle_found = false; |
648 | bool td_last_trb_found = false; |
649 | u32 trb_sct = 0; |
650 | int ret; |
651 | |
652 | ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id, |
653 | ep_index, stream_id); |
654 | if (!ep_ring) { |
655 | xhci_warn(xhci, "WARN can't find new dequeue, invalid stream ID %u\n", |
656 | stream_id); |
657 | return -ENODEV; |
658 | } |
659 | /* |
660 | * A cancelled TD can complete with a stall if HW cached the trb. |
661 | * In this case driver can't find td, but if the ring is empty we |
662 | * can move the dequeue pointer to the current enqueue position. |
663 | * We shouldn't hit this anymore as cached cancelled TRBs are given back |
664 | * after clearing the cache, but be on the safe side and keep it anyway |
665 | */ |
666 | if (!td) { |
667 | if (list_empty(head: &ep_ring->td_list)) { |
668 | new_seg = ep_ring->enq_seg; |
669 | new_deq = ep_ring->enqueue; |
670 | new_cycle = ep_ring->cycle_state; |
671 | xhci_dbg(xhci, "ep ring empty, Set new dequeue = enqueue"); |
672 | goto deq_found; |
673 | } else { |
674 | xhci_warn(xhci, "Can't find new dequeue state, missing td\n"); |
675 | return -EINVAL; |
676 | } |
677 | } |
678 | |
679 | hw_dequeue = xhci_get_hw_deq(xhci, vdev: dev, ep_index, stream_id); |
680 | new_seg = ep_ring->deq_seg; |
681 | new_deq = ep_ring->dequeue; |
682 | new_cycle = hw_dequeue & 0x1; |
683 | |
684 | /* |
685 | * We want to find the pointer, segment and cycle state of the new trb |
686 | * (the one after current TD's last_trb). We know the cycle state at |
687 | * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are |
688 | * found. |
689 | */ |
690 | do { |
691 | if (!cycle_found && xhci_trb_virt_to_dma(seg: new_seg, trb: new_deq) |
692 | == (dma_addr_t)(hw_dequeue & ~0xf)) { |
693 | cycle_found = true; |
694 | if (td_last_trb_found) |
695 | break; |
696 | } |
697 | if (new_deq == td->last_trb) |
698 | td_last_trb_found = true; |
699 | |
700 | if (cycle_found && trb_is_link(trb: new_deq) && |
701 | link_trb_toggles_cycle(trb: new_deq)) |
702 | new_cycle ^= 0x1; |
703 | |
704 | next_trb(xhci, ring: ep_ring, seg: &new_seg, trb: &new_deq); |
705 | |
706 | /* Search wrapped around, bail out */ |
707 | if (new_deq == ep->ring->dequeue) { |
708 | xhci_err(xhci, "Error: Failed finding new dequeue state\n"); |
709 | return -EINVAL; |
710 | } |
711 | |
712 | } while (!cycle_found || !td_last_trb_found); |
713 | |
714 | deq_found: |
715 | |
716 | /* Don't update the ring cycle state for the producer (us). */ |
717 | addr = xhci_trb_virt_to_dma(seg: new_seg, trb: new_deq); |
718 | if (addr == 0) { |
719 | xhci_warn(xhci, "Can't find dma of new dequeue ptr\n"); |
720 | xhci_warn(xhci, "deq seg = %p, deq ptr = %p\n", new_seg, new_deq); |
721 | return -EINVAL; |
722 | } |
723 | |
724 | if ((ep->ep_state & SET_DEQ_PENDING)) { |
725 | xhci_warn(xhci, "Set TR Deq already pending, don't submit for 0x%pad\n", |
726 | &addr); |
727 | return -EBUSY; |
728 | } |
729 | |
730 | /* This function gets called from contexts where it cannot sleep */ |
731 | cmd = xhci_alloc_command(xhci, allocate_completion: false, GFP_ATOMIC); |
732 | if (!cmd) { |
733 | xhci_warn(xhci, "Can't alloc Set TR Deq cmd 0x%pad\n", &addr); |
734 | return -ENOMEM; |
735 | } |
736 | |
737 | if (stream_id) |
738 | trb_sct = SCT_FOR_TRB(SCT_PRI_TR); |
739 | ret = queue_command(xhci, cmd, |
740 | lower_32_bits(addr) | trb_sct | new_cycle, |
741 | upper_32_bits(addr), |
742 | STREAM_ID_FOR_TRB(stream_id), SLOT_ID_FOR_TRB(slot_id) | |
743 | EP_ID_FOR_TRB(ep_index) | TRB_TYPE(TRB_SET_DEQ), command_must_succeed: false); |
744 | if (ret < 0) { |
745 | xhci_free_command(xhci, command: cmd); |
746 | return ret; |
747 | } |
748 | ep->queued_deq_seg = new_seg; |
749 | ep->queued_deq_ptr = new_deq; |
750 | |
751 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_cancel_urb, |
752 | fmt: "Set TR Deq ptr 0x%llx, cycle %u\n", addr, new_cycle); |
753 | |
754 | /* Stop the TD queueing code from ringing the doorbell until |
755 | * this command completes. The HC won't set the dequeue pointer |
756 | * if the ring is running, and ringing the doorbell starts the |
757 | * ring running. |
758 | */ |
759 | ep->ep_state |= SET_DEQ_PENDING; |
760 | xhci_ring_cmd_db(xhci); |
761 | return 0; |
762 | } |
763 | |
764 | /* flip_cycle means flip the cycle bit of all but the first and last TRB. |
765 | * (The last TRB actually points to the ring enqueue pointer, which is not part |
766 | * of this TD.) This is used to remove partially enqueued isoc TDs from a ring. |
767 | */ |
768 | static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, |
769 | struct xhci_td *td, bool flip_cycle) |
770 | { |
771 | struct xhci_segment *seg = td->start_seg; |
772 | union xhci_trb *trb = td->first_trb; |
773 | |
774 | while (1) { |
775 | trb_to_noop(trb, TRB_TR_NOOP); |
776 | |
777 | /* flip cycle if asked to */ |
778 | if (flip_cycle && trb != td->first_trb && trb != td->last_trb) |
779 | trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE); |
780 | |
781 | if (trb == td->last_trb) |
782 | break; |
783 | |
784 | next_trb(xhci, ring: ep_ring, seg: &seg, trb: &trb); |
785 | } |
786 | } |
787 | |
788 | /* |
789 | * Must be called with xhci->lock held in interrupt context, |
790 | * releases and re-acquires xhci->lock |
791 | */ |
792 | static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, |
793 | struct xhci_td *cur_td, int status) |
794 | { |
795 | struct urb *urb = cur_td->urb; |
796 | struct urb_priv *urb_priv = urb->hcpriv; |
797 | struct usb_hcd *hcd = bus_to_hcd(bus: urb->dev->bus); |
798 | |
799 | if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { |
800 | xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; |
801 | if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { |
802 | if (xhci->quirks & XHCI_AMD_PLL_FIX) |
803 | usb_amd_quirk_pll_enable(); |
804 | } |
805 | } |
806 | xhci_urb_free_priv(urb_priv); |
807 | usb_hcd_unlink_urb_from_ep(hcd, urb); |
808 | trace_xhci_urb_giveback(urb); |
809 | usb_hcd_giveback_urb(hcd, urb, status); |
810 | } |
811 | |
812 | static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, |
813 | struct xhci_ring *ring, struct xhci_td *td) |
814 | { |
815 | struct device *dev = xhci_to_hcd(xhci)->self.sysdev; |
816 | struct xhci_segment *seg = td->bounce_seg; |
817 | struct urb *urb = td->urb; |
818 | size_t len; |
819 | |
820 | if (!ring || !seg || !urb) |
821 | return; |
822 | |
823 | if (usb_urb_dir_out(urb)) { |
824 | dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, |
825 | DMA_TO_DEVICE); |
826 | return; |
827 | } |
828 | |
829 | dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, |
830 | DMA_FROM_DEVICE); |
831 | /* for in tranfers we need to copy the data from bounce to sg */ |
832 | if (urb->num_sgs) { |
833 | len = sg_pcopy_from_buffer(sgl: urb->sg, nents: urb->num_sgs, buf: seg->bounce_buf, |
834 | buflen: seg->bounce_len, skip: seg->bounce_offs); |
835 | if (len != seg->bounce_len) |
836 | xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n", |
837 | len, seg->bounce_len); |
838 | } else { |
839 | memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf, |
840 | seg->bounce_len); |
841 | } |
842 | seg->bounce_len = 0; |
843 | seg->bounce_offs = 0; |
844 | } |
845 | |
846 | static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td, |
847 | struct xhci_ring *ep_ring, int status) |
848 | { |
849 | struct urb *urb = NULL; |
850 | |
851 | /* Clean up the endpoint's TD list */ |
852 | urb = td->urb; |
853 | |
854 | /* if a bounce buffer was used to align this td then unmap it */ |
855 | xhci_unmap_td_bounce_buffer(xhci, ring: ep_ring, td); |
856 | |
857 | /* Do one last check of the actual transfer length. |
858 | * If the host controller said we transferred more data than the buffer |
859 | * length, urb->actual_length will be a very big number (since it's |
860 | * unsigned). Play it safe and say we didn't transfer anything. |
861 | */ |
862 | if (urb->actual_length > urb->transfer_buffer_length) { |
863 | xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n", |
864 | urb->transfer_buffer_length, urb->actual_length); |
865 | urb->actual_length = 0; |
866 | status = 0; |
867 | } |
868 | /* TD might be removed from td_list if we are giving back a cancelled URB */ |
869 | if (!list_empty(head: &td->td_list)) |
870 | list_del_init(entry: &td->td_list); |
871 | /* Giving back a cancelled URB, or if a slated TD completed anyway */ |
872 | if (!list_empty(head: &td->cancelled_td_list)) |
873 | list_del_init(entry: &td->cancelled_td_list); |
874 | |
875 | inc_td_cnt(urb); |
876 | /* Giveback the urb when all the tds are completed */ |
877 | if (last_td_in_urb(td)) { |
878 | if ((urb->actual_length != urb->transfer_buffer_length && |
879 | (urb->transfer_flags & URB_SHORT_NOT_OK)) || |
880 | (status != 0 && !usb_endpoint_xfer_isoc(epd: &urb->ep->desc))) |
881 | xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n", |
882 | urb, urb->actual_length, |
883 | urb->transfer_buffer_length, status); |
884 | |
885 | /* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */ |
886 | if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) |
887 | status = 0; |
888 | xhci_giveback_urb_in_irq(xhci, cur_td: td, status); |
889 | } |
890 | |
891 | return 0; |
892 | } |
893 | |
894 | |
895 | /* Complete the cancelled URBs we unlinked from td_list. */ |
896 | static void xhci_giveback_invalidated_tds(struct xhci_virt_ep *ep) |
897 | { |
898 | struct xhci_ring *ring; |
899 | struct xhci_td *td, *tmp_td; |
900 | |
901 | list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, |
902 | cancelled_td_list) { |
903 | |
904 | ring = xhci_urb_to_transfer_ring(xhci: ep->xhci, urb: td->urb); |
905 | |
906 | if (td->cancel_status == TD_CLEARED) { |
907 | xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n", |
908 | __func__, td->urb); |
909 | xhci_td_cleanup(xhci: ep->xhci, td, ep_ring: ring, status: td->status); |
910 | } else { |
911 | xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n", |
912 | __func__, td->urb, td->cancel_status); |
913 | } |
914 | if (ep->xhci->xhc_state & XHCI_STATE_DYING) |
915 | return; |
916 | } |
917 | } |
918 | |
919 | static int xhci_reset_halted_ep(struct xhci_hcd *xhci, unsigned int slot_id, |
920 | unsigned int ep_index, enum xhci_ep_reset_type reset_type) |
921 | { |
922 | struct xhci_command *command; |
923 | int ret = 0; |
924 | |
925 | command = xhci_alloc_command(xhci, allocate_completion: false, GFP_ATOMIC); |
926 | if (!command) { |
927 | ret = -ENOMEM; |
928 | goto done; |
929 | } |
930 | |
931 | xhci_dbg(xhci, "%s-reset ep %u, slot %u\n", |
932 | (reset_type == EP_HARD_RESET) ? "Hard": "Soft", |
933 | ep_index, slot_id); |
934 | |
935 | ret = xhci_queue_reset_ep(xhci, cmd: command, slot_id, ep_index, reset_type); |
936 | done: |
937 | if (ret) |
938 | xhci_err(xhci, "ERROR queuing reset endpoint for slot %d ep_index %d, %d\n", |
939 | slot_id, ep_index, ret); |
940 | return ret; |
941 | } |
942 | |
943 | static int xhci_handle_halted_endpoint(struct xhci_hcd *xhci, |
944 | struct xhci_virt_ep *ep, |
945 | struct xhci_td *td, |
946 | enum xhci_ep_reset_type reset_type) |
947 | { |
948 | unsigned int slot_id = ep->vdev->slot_id; |
949 | int err; |
950 | |
951 | /* |
952 | * Avoid resetting endpoint if link is inactive. Can cause host hang. |
953 | * Device will be reset soon to recover the link so don't do anything |
954 | */ |
955 | if (ep->vdev->flags & VDEV_PORT_ERROR) |
956 | return -ENODEV; |
957 | |
958 | /* add td to cancelled list and let reset ep handler take care of it */ |
959 | if (reset_type == EP_HARD_RESET) { |
960 | ep->ep_state |= EP_HARD_CLEAR_TOGGLE; |
961 | if (td && list_empty(head: &td->cancelled_td_list)) { |
962 | list_add_tail(new: &td->cancelled_td_list, head: &ep->cancelled_td_list); |
963 | td->cancel_status = TD_HALTED; |
964 | } |
965 | } |
966 | |
967 | if (ep->ep_state & EP_HALTED) { |
968 | xhci_dbg(xhci, "Reset ep command for ep_index %d already pending\n", |
969 | ep->ep_index); |
970 | return 0; |
971 | } |
972 | |
973 | err = xhci_reset_halted_ep(xhci, slot_id, ep_index: ep->ep_index, reset_type); |
974 | if (err) |
975 | return err; |
976 | |
977 | ep->ep_state |= EP_HALTED; |
978 | |
979 | xhci_ring_cmd_db(xhci); |
980 | |
981 | return 0; |
982 | } |
983 | |
984 | /* |
985 | * Fix up the ep ring first, so HW stops executing cancelled TDs. |
986 | * We have the xHCI lock, so nothing can modify this list until we drop it. |
987 | * We're also in the event handler, so we can't get re-interrupted if another |
988 | * Stop Endpoint command completes. |
989 | * |
990 | * only call this when ring is not in a running state |
991 | */ |
992 | |
993 | static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep) |
994 | { |
995 | struct xhci_hcd *xhci; |
996 | struct xhci_td *td = NULL; |
997 | struct xhci_td *tmp_td = NULL; |
998 | struct xhci_td *cached_td = NULL; |
999 | struct xhci_ring *ring; |
1000 | u64 hw_deq; |
1001 | unsigned int slot_id = ep->vdev->slot_id; |
1002 | int err; |
1003 | |
1004 | xhci = ep->xhci; |
1005 | |
1006 | list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) { |
1007 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_cancel_urb, |
1008 | fmt: "Removing canceled TD starting at 0x%llx (dma) in stream %u URB %p", |
1009 | (unsigned long long)xhci_trb_virt_to_dma( |
1010 | seg: td->start_seg, trb: td->first_trb), |
1011 | td->urb->stream_id, td->urb); |
1012 | list_del_init(entry: &td->td_list); |
1013 | ring = xhci_urb_to_transfer_ring(xhci, urb: td->urb); |
1014 | if (!ring) { |
1015 | xhci_warn(xhci, "WARN Cancelled URB %p has invalid stream ID %u.\n", |
1016 | td->urb, td->urb->stream_id); |
1017 | continue; |
1018 | } |
1019 | /* |
1020 | * If a ring stopped on the TD we need to cancel then we have to |
1021 | * move the xHC endpoint ring dequeue pointer past this TD. |
1022 | * Rings halted due to STALL may show hw_deq is past the stalled |
1023 | * TD, but still require a set TR Deq command to flush xHC cache. |
1024 | */ |
1025 | hw_deq = xhci_get_hw_deq(xhci, vdev: ep->vdev, ep_index: ep->ep_index, |
1026 | stream_id: td->urb->stream_id); |
1027 | hw_deq &= ~0xf; |
1028 | |
1029 | if (td->cancel_status == TD_HALTED || |
1030 | trb_in_td(xhci, start_seg: td->start_seg, start_trb: td->first_trb, end_trb: td->last_trb, suspect_dma: hw_deq, debug: false)) { |
1031 | switch (td->cancel_status) { |
1032 | case TD_CLEARED: /* TD is already no-op */ |
1033 | case TD_CLEARING_CACHE: /* set TR deq command already queued */ |
1034 | break; |
1035 | case TD_DIRTY: /* TD is cached, clear it */ |
1036 | case TD_HALTED: |
1037 | td->cancel_status = TD_CLEARING_CACHE; |
1038 | if (cached_td) |
1039 | /* FIXME stream case, several stopped rings */ |
1040 | xhci_dbg(xhci, |
1041 | "Move dq past stream %u URB %p instead of stream %u URB %p\n", |
1042 | td->urb->stream_id, td->urb, |
1043 | cached_td->urb->stream_id, cached_td->urb); |
1044 | cached_td = td; |
1045 | break; |
1046 | } |
1047 | } else { |
1048 | td_to_noop(xhci, ep_ring: ring, td, flip_cycle: false); |
1049 | td->cancel_status = TD_CLEARED; |
1050 | } |
1051 | } |
1052 | |
1053 | /* If there's no need to move the dequeue pointer then we're done */ |
1054 | if (!cached_td) |
1055 | return 0; |
1056 | |
1057 | err = xhci_move_dequeue_past_td(xhci, slot_id, ep_index: ep->ep_index, |
1058 | stream_id: cached_td->urb->stream_id, |
1059 | td: cached_td); |
1060 | if (err) { |
1061 | /* Failed to move past cached td, just set cached TDs to no-op */ |
1062 | list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) { |
1063 | if (td->cancel_status != TD_CLEARING_CACHE) |
1064 | continue; |
1065 | xhci_dbg(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n", |
1066 | td->urb); |
1067 | td_to_noop(xhci, ep_ring: ring, td, flip_cycle: false); |
1068 | td->cancel_status = TD_CLEARED; |
1069 | } |
1070 | } |
1071 | return 0; |
1072 | } |
1073 | |
1074 | /* |
1075 | * Returns the TD the endpoint ring halted on. |
1076 | * Only call for non-running rings without streams. |
1077 | */ |
1078 | static struct xhci_td *find_halted_td(struct xhci_virt_ep *ep) |
1079 | { |
1080 | struct xhci_td *td; |
1081 | u64 hw_deq; |
1082 | |
1083 | if (!list_empty(head: &ep->ring->td_list)) { /* Not streams compatible */ |
1084 | hw_deq = xhci_get_hw_deq(xhci: ep->xhci, vdev: ep->vdev, ep_index: ep->ep_index, stream_id: 0); |
1085 | hw_deq &= ~0xf; |
1086 | td = list_first_entry(&ep->ring->td_list, struct xhci_td, td_list); |
1087 | if (trb_in_td(xhci: ep->xhci, start_seg: td->start_seg, start_trb: td->first_trb, |
1088 | end_trb: td->last_trb, suspect_dma: hw_deq, debug: false)) |
1089 | return td; |
1090 | } |
1091 | return NULL; |
1092 | } |
1093 | |
1094 | /* |
1095 | * When we get a command completion for a Stop Endpoint Command, we need to |
1096 | * unlink any cancelled TDs from the ring. There are two ways to do that: |
1097 | * |
1098 | * 1. If the HW was in the middle of processing the TD that needs to be |
1099 | * cancelled, then we must move the ring's dequeue pointer past the last TRB |
1100 | * in the TD with a Set Dequeue Pointer Command. |
1101 | * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain |
1102 | * bit cleared) so that the HW will skip over them. |
1103 | */ |
1104 | static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, |
1105 | union xhci_trb *trb, u32 comp_code) |
1106 | { |
1107 | unsigned int ep_index; |
1108 | struct xhci_virt_ep *ep; |
1109 | struct xhci_ep_ctx *ep_ctx; |
1110 | struct xhci_td *td = NULL; |
1111 | enum xhci_ep_reset_type reset_type; |
1112 | struct xhci_command *command; |
1113 | int err; |
1114 | |
1115 | if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) { |
1116 | if (!xhci->devs[slot_id]) |
1117 | xhci_warn(xhci, "Stop endpoint command completion for disabled slot %u\n", |
1118 | slot_id); |
1119 | return; |
1120 | } |
1121 | |
1122 | ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); |
1123 | ep = xhci_get_virt_ep(xhci, slot_id, ep_index); |
1124 | if (!ep) |
1125 | return; |
1126 | |
1127 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: ep->vdev->out_ctx, ep_index); |
1128 | |
1129 | trace_xhci_handle_cmd_stop_ep(ctx: ep_ctx); |
1130 | |
1131 | if (comp_code == COMP_CONTEXT_STATE_ERROR) { |
1132 | /* |
1133 | * If stop endpoint command raced with a halting endpoint we need to |
1134 | * reset the host side endpoint first. |
1135 | * If the TD we halted on isn't cancelled the TD should be given back |
1136 | * with a proper error code, and the ring dequeue moved past the TD. |
1137 | * If streams case we can't find hw_deq, or the TD we halted on so do a |
1138 | * soft reset. |
1139 | * |
1140 | * Proper error code is unknown here, it would be -EPIPE if device side |
1141 | * of enadpoit halted (aka STALL), and -EPROTO if not (transaction error) |
1142 | * We use -EPROTO, if device is stalled it should return a stall error on |
1143 | * next transfer, which then will return -EPIPE, and device side stall is |
1144 | * noted and cleared by class driver. |
1145 | */ |
1146 | switch (GET_EP_CTX_STATE(ep_ctx)) { |
1147 | case EP_STATE_HALTED: |
1148 | xhci_dbg(xhci, "Stop ep completion raced with stall, reset ep\n"); |
1149 | if (ep->ep_state & EP_HAS_STREAMS) { |
1150 | reset_type = EP_SOFT_RESET; |
1151 | } else { |
1152 | reset_type = EP_HARD_RESET; |
1153 | td = find_halted_td(ep); |
1154 | if (td) |
1155 | td->status = -EPROTO; |
1156 | } |
1157 | /* reset ep, reset handler cleans up cancelled tds */ |
1158 | err = xhci_handle_halted_endpoint(xhci, ep, td, reset_type); |
1159 | if (err) |
1160 | break; |
1161 | ep->ep_state &= ~EP_STOP_CMD_PENDING; |
1162 | return; |
1163 | case EP_STATE_STOPPED: |
1164 | /* |
1165 | * NEC uPD720200 sometimes sets this state and fails with |
1166 | * Context Error while continuing to process TRBs. |
1167 | * Be conservative and trust EP_CTX_STATE on other chips. |
1168 | */ |
1169 | if (!(xhci->quirks & XHCI_NEC_HOST)) |
1170 | break; |
1171 | fallthrough; |
1172 | case EP_STATE_RUNNING: |
1173 | /* Race, HW handled stop ep cmd before ep was running */ |
1174 | xhci_dbg(xhci, "Stop ep completion ctx error, ep is running\n"); |
1175 | |
1176 | command = xhci_alloc_command(xhci, allocate_completion: false, GFP_ATOMIC); |
1177 | if (!command) { |
1178 | ep->ep_state &= ~EP_STOP_CMD_PENDING; |
1179 | return; |
1180 | } |
1181 | xhci_queue_stop_endpoint(xhci, cmd: command, slot_id, ep_index, suspend: 0); |
1182 | xhci_ring_cmd_db(xhci); |
1183 | |
1184 | return; |
1185 | default: |
1186 | break; |
1187 | } |
1188 | } |
1189 | |
1190 | /* will queue a set TR deq if stopped on a cancelled, uncleared TD */ |
1191 | xhci_invalidate_cancelled_tds(ep); |
1192 | ep->ep_state &= ~EP_STOP_CMD_PENDING; |
1193 | |
1194 | /* Otherwise ring the doorbell(s) to restart queued transfers */ |
1195 | xhci_giveback_invalidated_tds(ep); |
1196 | ring_doorbell_for_active_rings(xhci, slot_id, ep_index); |
1197 | } |
1198 | |
1199 | static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring) |
1200 | { |
1201 | struct xhci_td *cur_td; |
1202 | struct xhci_td *tmp; |
1203 | |
1204 | list_for_each_entry_safe(cur_td, tmp, &ring->td_list, td_list) { |
1205 | list_del_init(entry: &cur_td->td_list); |
1206 | |
1207 | if (!list_empty(head: &cur_td->cancelled_td_list)) |
1208 | list_del_init(entry: &cur_td->cancelled_td_list); |
1209 | |
1210 | xhci_unmap_td_bounce_buffer(xhci, ring, td: cur_td); |
1211 | |
1212 | inc_td_cnt(urb: cur_td->urb); |
1213 | if (last_td_in_urb(td: cur_td)) |
1214 | xhci_giveback_urb_in_irq(xhci, cur_td, status: -ESHUTDOWN); |
1215 | } |
1216 | } |
1217 | |
1218 | static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci, |
1219 | int slot_id, int ep_index) |
1220 | { |
1221 | struct xhci_td *cur_td; |
1222 | struct xhci_td *tmp; |
1223 | struct xhci_virt_ep *ep; |
1224 | struct xhci_ring *ring; |
1225 | |
1226 | ep = xhci_get_virt_ep(xhci, slot_id, ep_index); |
1227 | if (!ep) |
1228 | return; |
1229 | |
1230 | if ((ep->ep_state & EP_HAS_STREAMS) || |
1231 | (ep->ep_state & EP_GETTING_NO_STREAMS)) { |
1232 | int stream_id; |
1233 | |
1234 | for (stream_id = 1; stream_id < ep->stream_info->num_streams; |
1235 | stream_id++) { |
1236 | ring = ep->stream_info->stream_rings[stream_id]; |
1237 | if (!ring) |
1238 | continue; |
1239 | |
1240 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_cancel_urb, |
1241 | fmt: "Killing URBs for slot ID %u, ep index %u, stream %u", |
1242 | slot_id, ep_index, stream_id); |
1243 | xhci_kill_ring_urbs(xhci, ring); |
1244 | } |
1245 | } else { |
1246 | ring = ep->ring; |
1247 | if (!ring) |
1248 | return; |
1249 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_cancel_urb, |
1250 | fmt: "Killing URBs for slot ID %u, ep index %u", |
1251 | slot_id, ep_index); |
1252 | xhci_kill_ring_urbs(xhci, ring); |
1253 | } |
1254 | |
1255 | list_for_each_entry_safe(cur_td, tmp, &ep->cancelled_td_list, |
1256 | cancelled_td_list) { |
1257 | list_del_init(entry: &cur_td->cancelled_td_list); |
1258 | inc_td_cnt(urb: cur_td->urb); |
1259 | |
1260 | if (last_td_in_urb(td: cur_td)) |
1261 | xhci_giveback_urb_in_irq(xhci, cur_td, status: -ESHUTDOWN); |
1262 | } |
1263 | } |
1264 | |
1265 | /* |
1266 | * host controller died, register read returns 0xffffffff |
1267 | * Complete pending commands, mark them ABORTED. |
1268 | * URBs need to be given back as usb core might be waiting with device locks |
1269 | * held for the URBs to finish during device disconnect, blocking host remove. |
1270 | * |
1271 | * Call with xhci->lock held. |
1272 | * lock is relased and re-acquired while giving back urb. |
1273 | */ |
1274 | void xhci_hc_died(struct xhci_hcd *xhci) |
1275 | { |
1276 | int i, j; |
1277 | |
1278 | if (xhci->xhc_state & XHCI_STATE_DYING) |
1279 | return; |
1280 | |
1281 | xhci_err(xhci, "xHCI host controller not responding, assume dead\n"); |
1282 | xhci->xhc_state |= XHCI_STATE_DYING; |
1283 | |
1284 | xhci_cleanup_command_queue(xhci); |
1285 | |
1286 | /* return any pending urbs, remove may be waiting for them */ |
1287 | for (i = 0; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) { |
1288 | if (!xhci->devs[i]) |
1289 | continue; |
1290 | for (j = 0; j < 31; j++) |
1291 | xhci_kill_endpoint_urbs(xhci, slot_id: i, ep_index: j); |
1292 | } |
1293 | |
1294 | /* inform usb core hc died if PCI remove isn't already handling it */ |
1295 | if (!(xhci->xhc_state & XHCI_STATE_REMOVING)) |
1296 | usb_hc_died(hcd: xhci_to_hcd(xhci)); |
1297 | } |
1298 | |
1299 | static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci, |
1300 | struct xhci_virt_device *dev, |
1301 | struct xhci_ring *ep_ring, |
1302 | unsigned int ep_index) |
1303 | { |
1304 | union xhci_trb *dequeue_temp; |
1305 | |
1306 | dequeue_temp = ep_ring->dequeue; |
1307 | |
1308 | /* If we get two back-to-back stalls, and the first stalled transfer |
1309 | * ends just before a link TRB, the dequeue pointer will be left on |
1310 | * the link TRB by the code in the while loop. So we have to update |
1311 | * the dequeue pointer one segment further, or we'll jump off |
1312 | * the segment into la-la-land. |
1313 | */ |
1314 | if (trb_is_link(trb: ep_ring->dequeue)) { |
1315 | ep_ring->deq_seg = ep_ring->deq_seg->next; |
1316 | ep_ring->dequeue = ep_ring->deq_seg->trbs; |
1317 | } |
1318 | |
1319 | while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) { |
1320 | /* We have more usable TRBs */ |
1321 | ep_ring->dequeue++; |
1322 | if (trb_is_link(trb: ep_ring->dequeue)) { |
1323 | if (ep_ring->dequeue == |
1324 | dev->eps[ep_index].queued_deq_ptr) |
1325 | break; |
1326 | ep_ring->deq_seg = ep_ring->deq_seg->next; |
1327 | ep_ring->dequeue = ep_ring->deq_seg->trbs; |
1328 | } |
1329 | if (ep_ring->dequeue == dequeue_temp) { |
1330 | xhci_dbg(xhci, "Unable to find new dequeue pointer\n"); |
1331 | break; |
1332 | } |
1333 | } |
1334 | } |
1335 | |
1336 | /* |
1337 | * When we get a completion for a Set Transfer Ring Dequeue Pointer command, |
1338 | * we need to clear the set deq pending flag in the endpoint ring state, so that |
1339 | * the TD queueing code can ring the doorbell again. We also need to ring the |
1340 | * endpoint doorbell to restart the ring, but only if there aren't more |
1341 | * cancellations pending. |
1342 | */ |
1343 | static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, |
1344 | union xhci_trb *trb, u32 cmd_comp_code) |
1345 | { |
1346 | unsigned int ep_index; |
1347 | unsigned int stream_id; |
1348 | struct xhci_ring *ep_ring; |
1349 | struct xhci_virt_ep *ep; |
1350 | struct xhci_ep_ctx *ep_ctx; |
1351 | struct xhci_slot_ctx *slot_ctx; |
1352 | struct xhci_td *td, *tmp_td; |
1353 | |
1354 | ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); |
1355 | stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2])); |
1356 | ep = xhci_get_virt_ep(xhci, slot_id, ep_index); |
1357 | if (!ep) |
1358 | return; |
1359 | |
1360 | ep_ring = xhci_virt_ep_to_ring(xhci, ep, stream_id); |
1361 | if (!ep_ring) { |
1362 | xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n", |
1363 | stream_id); |
1364 | /* XXX: Harmless??? */ |
1365 | goto cleanup; |
1366 | } |
1367 | |
1368 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: ep->vdev->out_ctx, ep_index); |
1369 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: ep->vdev->out_ctx); |
1370 | trace_xhci_handle_cmd_set_deq(ctx: slot_ctx); |
1371 | trace_xhci_handle_cmd_set_deq_ep(ctx: ep_ctx); |
1372 | |
1373 | if (cmd_comp_code != COMP_SUCCESS) { |
1374 | unsigned int ep_state; |
1375 | unsigned int slot_state; |
1376 | |
1377 | switch (cmd_comp_code) { |
1378 | case COMP_TRB_ERROR: |
1379 | xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n"); |
1380 | break; |
1381 | case COMP_CONTEXT_STATE_ERROR: |
1382 | xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n"); |
1383 | ep_state = GET_EP_CTX_STATE(ep_ctx); |
1384 | slot_state = le32_to_cpu(slot_ctx->dev_state); |
1385 | slot_state = GET_SLOT_STATE(slot_state); |
1386 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_cancel_urb, |
1387 | fmt: "Slot state = %u, EP state = %u", |
1388 | slot_state, ep_state); |
1389 | break; |
1390 | case COMP_SLOT_NOT_ENABLED_ERROR: |
1391 | xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n", |
1392 | slot_id); |
1393 | break; |
1394 | default: |
1395 | xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n", |
1396 | cmd_comp_code); |
1397 | break; |
1398 | } |
1399 | /* OK what do we do now? The endpoint state is hosed, and we |
1400 | * should never get to this point if the synchronization between |
1401 | * queueing, and endpoint state are correct. This might happen |
1402 | * if the device gets disconnected after we've finished |
1403 | * cancelling URBs, which might not be an error... |
1404 | */ |
1405 | } else { |
1406 | u64 deq; |
1407 | /* 4.6.10 deq ptr is written to the stream ctx for streams */ |
1408 | if (ep->ep_state & EP_HAS_STREAMS) { |
1409 | struct xhci_stream_ctx *ctx = |
1410 | &ep->stream_info->stream_ctx_array[stream_id]; |
1411 | deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK; |
1412 | } else { |
1413 | deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK; |
1414 | } |
1415 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_cancel_urb, |
1416 | fmt: "Successful Set TR Deq Ptr cmd, deq = @%08llx", deq); |
1417 | if (xhci_trb_virt_to_dma(seg: ep->queued_deq_seg, |
1418 | trb: ep->queued_deq_ptr) == deq) { |
1419 | /* Update the ring's dequeue segment and dequeue pointer |
1420 | * to reflect the new position. |
1421 | */ |
1422 | update_ring_for_set_deq_completion(xhci, dev: ep->vdev, |
1423 | ep_ring, ep_index); |
1424 | } else { |
1425 | xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n"); |
1426 | xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n", |
1427 | ep->queued_deq_seg, ep->queued_deq_ptr); |
1428 | } |
1429 | } |
1430 | /* HW cached TDs cleared from cache, give them back */ |
1431 | list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, |
1432 | cancelled_td_list) { |
1433 | ep_ring = xhci_urb_to_transfer_ring(xhci: ep->xhci, urb: td->urb); |
1434 | if (td->cancel_status == TD_CLEARING_CACHE) { |
1435 | td->cancel_status = TD_CLEARED; |
1436 | xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n", |
1437 | __func__, td->urb); |
1438 | xhci_td_cleanup(xhci: ep->xhci, td, ep_ring, status: td->status); |
1439 | } else { |
1440 | xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n", |
1441 | __func__, td->urb, td->cancel_status); |
1442 | } |
1443 | } |
1444 | cleanup: |
1445 | ep->ep_state &= ~SET_DEQ_PENDING; |
1446 | ep->queued_deq_seg = NULL; |
1447 | ep->queued_deq_ptr = NULL; |
1448 | /* Restart any rings with pending URBs */ |
1449 | ring_doorbell_for_active_rings(xhci, slot_id, ep_index); |
1450 | } |
1451 | |
1452 | static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, |
1453 | union xhci_trb *trb, u32 cmd_comp_code) |
1454 | { |
1455 | struct xhci_virt_ep *ep; |
1456 | struct xhci_ep_ctx *ep_ctx; |
1457 | unsigned int ep_index; |
1458 | |
1459 | ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); |
1460 | ep = xhci_get_virt_ep(xhci, slot_id, ep_index); |
1461 | if (!ep) |
1462 | return; |
1463 | |
1464 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: ep->vdev->out_ctx, ep_index); |
1465 | trace_xhci_handle_cmd_reset_ep(ctx: ep_ctx); |
1466 | |
1467 | /* This command will only fail if the endpoint wasn't halted, |
1468 | * but we don't care. |
1469 | */ |
1470 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_reset_ep, |
1471 | fmt: "Ignoring reset ep completion code of %u", cmd_comp_code); |
1472 | |
1473 | /* Cleanup cancelled TDs as ep is stopped. May queue a Set TR Deq cmd */ |
1474 | xhci_invalidate_cancelled_tds(ep); |
1475 | |
1476 | /* Clear our internal halted state */ |
1477 | ep->ep_state &= ~EP_HALTED; |
1478 | |
1479 | xhci_giveback_invalidated_tds(ep); |
1480 | |
1481 | /* if this was a soft reset, then restart */ |
1482 | if ((le32_to_cpu(trb->generic.field[3])) & TRB_TSP) |
1483 | ring_doorbell_for_active_rings(xhci, slot_id, ep_index); |
1484 | } |
1485 | |
1486 | static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id, |
1487 | struct xhci_command *command, u32 cmd_comp_code) |
1488 | { |
1489 | if (cmd_comp_code == COMP_SUCCESS) |
1490 | command->slot_id = slot_id; |
1491 | else |
1492 | command->slot_id = 0; |
1493 | } |
1494 | |
1495 | static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) |
1496 | { |
1497 | struct xhci_virt_device *virt_dev; |
1498 | struct xhci_slot_ctx *slot_ctx; |
1499 | |
1500 | virt_dev = xhci->devs[slot_id]; |
1501 | if (!virt_dev) |
1502 | return; |
1503 | |
1504 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: virt_dev->out_ctx); |
1505 | trace_xhci_handle_cmd_disable_slot(ctx: slot_ctx); |
1506 | |
1507 | if (xhci->quirks & XHCI_EP_LIMIT_QUIRK) |
1508 | /* Delete default control endpoint resources */ |
1509 | xhci_free_device_endpoint_resources(xhci, virt_dev, drop_control_ep: true); |
1510 | } |
1511 | |
1512 | static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id, |
1513 | u32 cmd_comp_code) |
1514 | { |
1515 | struct xhci_virt_device *virt_dev; |
1516 | struct xhci_input_control_ctx *ctrl_ctx; |
1517 | struct xhci_ep_ctx *ep_ctx; |
1518 | unsigned int ep_index; |
1519 | u32 add_flags; |
1520 | |
1521 | /* |
1522 | * Configure endpoint commands can come from the USB core configuration |
1523 | * or alt setting changes, or when streams were being configured. |
1524 | */ |
1525 | |
1526 | virt_dev = xhci->devs[slot_id]; |
1527 | if (!virt_dev) |
1528 | return; |
1529 | ctrl_ctx = xhci_get_input_control_ctx(ctx: virt_dev->in_ctx); |
1530 | if (!ctrl_ctx) { |
1531 | xhci_warn(xhci, "Could not get input context, bad type.\n"); |
1532 | return; |
1533 | } |
1534 | |
1535 | add_flags = le32_to_cpu(ctrl_ctx->add_flags); |
1536 | |
1537 | /* Input ctx add_flags are the endpoint index plus one */ |
1538 | ep_index = xhci_last_valid_endpoint(added_ctxs: add_flags) - 1; |
1539 | |
1540 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: virt_dev->out_ctx, ep_index); |
1541 | trace_xhci_handle_cmd_config_ep(ctx: ep_ctx); |
1542 | |
1543 | return; |
1544 | } |
1545 | |
1546 | static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id) |
1547 | { |
1548 | struct xhci_virt_device *vdev; |
1549 | struct xhci_slot_ctx *slot_ctx; |
1550 | |
1551 | vdev = xhci->devs[slot_id]; |
1552 | if (!vdev) |
1553 | return; |
1554 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: vdev->out_ctx); |
1555 | trace_xhci_handle_cmd_addr_dev(ctx: slot_ctx); |
1556 | } |
1557 | |
1558 | static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id) |
1559 | { |
1560 | struct xhci_virt_device *vdev; |
1561 | struct xhci_slot_ctx *slot_ctx; |
1562 | |
1563 | vdev = xhci->devs[slot_id]; |
1564 | if (!vdev) { |
1565 | xhci_warn(xhci, "Reset device command completion for disabled slot %u\n", |
1566 | slot_id); |
1567 | return; |
1568 | } |
1569 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: vdev->out_ctx); |
1570 | trace_xhci_handle_cmd_reset_dev(ctx: slot_ctx); |
1571 | |
1572 | xhci_dbg(xhci, "Completed reset device command.\n"); |
1573 | } |
1574 | |
1575 | static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci, |
1576 | struct xhci_event_cmd *event) |
1577 | { |
1578 | if (!(xhci->quirks & XHCI_NEC_HOST)) { |
1579 | xhci_warn(xhci, "WARN NEC_GET_FW command on non-NEC host\n"); |
1580 | return; |
1581 | } |
1582 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
1583 | fmt: "NEC firmware version %2x.%02x", |
1584 | NEC_FW_MAJOR(le32_to_cpu(event->status)), |
1585 | NEC_FW_MINOR(le32_to_cpu(event->status))); |
1586 | } |
1587 | |
1588 | static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status) |
1589 | { |
1590 | list_del(entry: &cmd->cmd_list); |
1591 | |
1592 | if (cmd->completion) { |
1593 | cmd->status = status; |
1594 | complete(cmd->completion); |
1595 | } else { |
1596 | kfree(objp: cmd); |
1597 | } |
1598 | } |
1599 | |
1600 | void xhci_cleanup_command_queue(struct xhci_hcd *xhci) |
1601 | { |
1602 | struct xhci_command *cur_cmd, *tmp_cmd; |
1603 | xhci->current_cmd = NULL; |
1604 | list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list) |
1605 | xhci_complete_del_and_free_cmd(cmd: cur_cmd, COMP_COMMAND_ABORTED); |
1606 | } |
1607 | |
1608 | void xhci_handle_command_timeout(struct work_struct *work) |
1609 | { |
1610 | struct xhci_hcd *xhci; |
1611 | unsigned long flags; |
1612 | char str[XHCI_MSG_MAX]; |
1613 | u64 hw_ring_state; |
1614 | u32 cmd_field3; |
1615 | u32 usbsts; |
1616 | |
1617 | xhci = container_of(to_delayed_work(work), struct xhci_hcd, cmd_timer); |
1618 | |
1619 | spin_lock_irqsave(&xhci->lock, flags); |
1620 | |
1621 | /* |
1622 | * If timeout work is pending, or current_cmd is NULL, it means we |
1623 | * raced with command completion. Command is handled so just return. |
1624 | */ |
1625 | if (!xhci->current_cmd || delayed_work_pending(&xhci->cmd_timer)) { |
1626 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
1627 | return; |
1628 | } |
1629 | |
1630 | cmd_field3 = le32_to_cpu(xhci->current_cmd->command_trb->generic.field[3]); |
1631 | usbsts = readl(addr: &xhci->op_regs->status); |
1632 | xhci_dbg(xhci, "Command timeout, USBSTS:%s\n", xhci_decode_usbsts(str, usbsts)); |
1633 | |
1634 | /* Bail out and tear down xhci if a stop endpoint command failed */ |
1635 | if (TRB_FIELD_TO_TYPE(cmd_field3) == TRB_STOP_RING) { |
1636 | struct xhci_virt_ep *ep; |
1637 | |
1638 | xhci_warn(xhci, "xHCI host not responding to stop endpoint command\n"); |
1639 | |
1640 | ep = xhci_get_virt_ep(xhci, TRB_TO_SLOT_ID(cmd_field3), |
1641 | TRB_TO_EP_INDEX(cmd_field3)); |
1642 | if (ep) |
1643 | ep->ep_state &= ~EP_STOP_CMD_PENDING; |
1644 | |
1645 | xhci_halt(xhci); |
1646 | xhci_hc_died(xhci); |
1647 | goto time_out_completed; |
1648 | } |
1649 | |
1650 | /* mark this command to be cancelled */ |
1651 | xhci->current_cmd->status = COMP_COMMAND_ABORTED; |
1652 | |
1653 | /* Make sure command ring is running before aborting it */ |
1654 | hw_ring_state = xhci_read_64(xhci, regs: &xhci->op_regs->cmd_ring); |
1655 | if (hw_ring_state == ~(u64)0) { |
1656 | xhci_hc_died(xhci); |
1657 | goto time_out_completed; |
1658 | } |
1659 | |
1660 | if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) && |
1661 | (hw_ring_state & CMD_RING_RUNNING)) { |
1662 | /* Prevent new doorbell, and start command abort */ |
1663 | xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; |
1664 | xhci_dbg(xhci, "Command timeout\n"); |
1665 | xhci_abort_cmd_ring(xhci, flags); |
1666 | goto time_out_completed; |
1667 | } |
1668 | |
1669 | /* host removed. Bail out */ |
1670 | if (xhci->xhc_state & XHCI_STATE_REMOVING) { |
1671 | xhci_dbg(xhci, "host removed, ring start fail?\n"); |
1672 | xhci_cleanup_command_queue(xhci); |
1673 | |
1674 | goto time_out_completed; |
1675 | } |
1676 | |
1677 | /* command timeout on stopped ring, ring can't be aborted */ |
1678 | xhci_dbg(xhci, "Command timeout on stopped ring\n"); |
1679 | xhci_handle_stopped_cmd_ring(xhci, cur_cmd: xhci->current_cmd); |
1680 | |
1681 | time_out_completed: |
1682 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
1683 | return; |
1684 | } |
1685 | |
1686 | static void handle_cmd_completion(struct xhci_hcd *xhci, |
1687 | struct xhci_event_cmd *event) |
1688 | { |
1689 | unsigned int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); |
1690 | u64 cmd_dma; |
1691 | dma_addr_t cmd_dequeue_dma; |
1692 | u32 cmd_comp_code; |
1693 | union xhci_trb *cmd_trb; |
1694 | struct xhci_command *cmd; |
1695 | u32 cmd_type; |
1696 | |
1697 | if (slot_id >= MAX_HC_SLOTS) { |
1698 | xhci_warn(xhci, "Invalid slot_id %u\n", slot_id); |
1699 | return; |
1700 | } |
1701 | |
1702 | cmd_dma = le64_to_cpu(event->cmd_trb); |
1703 | cmd_trb = xhci->cmd_ring->dequeue; |
1704 | |
1705 | trace_xhci_handle_command(ring: xhci->cmd_ring, trb: &cmd_trb->generic); |
1706 | |
1707 | cmd_dequeue_dma = xhci_trb_virt_to_dma(seg: xhci->cmd_ring->deq_seg, |
1708 | trb: cmd_trb); |
1709 | /* |
1710 | * Check whether the completion event is for our internal kept |
1711 | * command. |
1712 | */ |
1713 | if (!cmd_dequeue_dma || cmd_dma != (u64)cmd_dequeue_dma) { |
1714 | xhci_warn(xhci, |
1715 | "ERROR mismatched command completion event\n"); |
1716 | return; |
1717 | } |
1718 | |
1719 | cmd = list_first_entry(&xhci->cmd_list, struct xhci_command, cmd_list); |
1720 | |
1721 | cancel_delayed_work(dwork: &xhci->cmd_timer); |
1722 | |
1723 | cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status)); |
1724 | |
1725 | /* If CMD ring stopped we own the trbs between enqueue and dequeue */ |
1726 | if (cmd_comp_code == COMP_COMMAND_RING_STOPPED) { |
1727 | complete_all(&xhci->cmd_ring_stop_completion); |
1728 | return; |
1729 | } |
1730 | |
1731 | if (cmd->command_trb != xhci->cmd_ring->dequeue) { |
1732 | xhci_err(xhci, |
1733 | "Command completion event does not match command\n"); |
1734 | return; |
1735 | } |
1736 | |
1737 | /* |
1738 | * Host aborted the command ring, check if the current command was |
1739 | * supposed to be aborted, otherwise continue normally. |
1740 | * The command ring is stopped now, but the xHC will issue a Command |
1741 | * Ring Stopped event which will cause us to restart it. |
1742 | */ |
1743 | if (cmd_comp_code == COMP_COMMAND_ABORTED) { |
1744 | xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; |
1745 | if (cmd->status == COMP_COMMAND_ABORTED) { |
1746 | if (xhci->current_cmd == cmd) |
1747 | xhci->current_cmd = NULL; |
1748 | goto event_handled; |
1749 | } |
1750 | } |
1751 | |
1752 | cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3])); |
1753 | switch (cmd_type) { |
1754 | case TRB_ENABLE_SLOT: |
1755 | xhci_handle_cmd_enable_slot(xhci, slot_id, command: cmd, cmd_comp_code); |
1756 | break; |
1757 | case TRB_DISABLE_SLOT: |
1758 | xhci_handle_cmd_disable_slot(xhci, slot_id); |
1759 | break; |
1760 | case TRB_CONFIG_EP: |
1761 | if (!cmd->completion) |
1762 | xhci_handle_cmd_config_ep(xhci, slot_id, cmd_comp_code); |
1763 | break; |
1764 | case TRB_EVAL_CONTEXT: |
1765 | break; |
1766 | case TRB_ADDR_DEV: |
1767 | xhci_handle_cmd_addr_dev(xhci, slot_id); |
1768 | break; |
1769 | case TRB_STOP_RING: |
1770 | WARN_ON(slot_id != TRB_TO_SLOT_ID( |
1771 | le32_to_cpu(cmd_trb->generic.field[3]))); |
1772 | if (!cmd->completion) |
1773 | xhci_handle_cmd_stop_ep(xhci, slot_id, trb: cmd_trb, |
1774 | comp_code: cmd_comp_code); |
1775 | break; |
1776 | case TRB_SET_DEQ: |
1777 | WARN_ON(slot_id != TRB_TO_SLOT_ID( |
1778 | le32_to_cpu(cmd_trb->generic.field[3]))); |
1779 | xhci_handle_cmd_set_deq(xhci, slot_id, trb: cmd_trb, cmd_comp_code); |
1780 | break; |
1781 | case TRB_CMD_NOOP: |
1782 | /* Is this an aborted command turned to NO-OP? */ |
1783 | if (cmd->status == COMP_COMMAND_RING_STOPPED) |
1784 | cmd_comp_code = COMP_COMMAND_RING_STOPPED; |
1785 | break; |
1786 | case TRB_RESET_EP: |
1787 | WARN_ON(slot_id != TRB_TO_SLOT_ID( |
1788 | le32_to_cpu(cmd_trb->generic.field[3]))); |
1789 | xhci_handle_cmd_reset_ep(xhci, slot_id, trb: cmd_trb, cmd_comp_code); |
1790 | break; |
1791 | case TRB_RESET_DEV: |
1792 | /* SLOT_ID field in reset device cmd completion event TRB is 0. |
1793 | * Use the SLOT_ID from the command TRB instead (xhci 4.6.11) |
1794 | */ |
1795 | slot_id = TRB_TO_SLOT_ID( |
1796 | le32_to_cpu(cmd_trb->generic.field[3])); |
1797 | xhci_handle_cmd_reset_dev(xhci, slot_id); |
1798 | break; |
1799 | case TRB_NEC_GET_FW: |
1800 | xhci_handle_cmd_nec_get_fw(xhci, event); |
1801 | break; |
1802 | default: |
1803 | /* Skip over unknown commands on the event ring */ |
1804 | xhci_info(xhci, "INFO unknown command type %d\n", cmd_type); |
1805 | break; |
1806 | } |
1807 | |
1808 | /* restart timer if this wasn't the last command */ |
1809 | if (!list_is_singular(head: &xhci->cmd_list)) { |
1810 | xhci->current_cmd = list_first_entry(&cmd->cmd_list, |
1811 | struct xhci_command, cmd_list); |
1812 | xhci_mod_cmd_timer(xhci); |
1813 | } else if (xhci->current_cmd == cmd) { |
1814 | xhci->current_cmd = NULL; |
1815 | } |
1816 | |
1817 | event_handled: |
1818 | xhci_complete_del_and_free_cmd(cmd, status: cmd_comp_code); |
1819 | |
1820 | inc_deq(xhci, ring: xhci->cmd_ring); |
1821 | } |
1822 | |
1823 | static void handle_vendor_event(struct xhci_hcd *xhci, |
1824 | union xhci_trb *event, u32 trb_type) |
1825 | { |
1826 | xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type); |
1827 | if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST)) |
1828 | handle_cmd_completion(xhci, event: &event->event_cmd); |
1829 | } |
1830 | |
1831 | static void handle_device_notification(struct xhci_hcd *xhci, |
1832 | union xhci_trb *event) |
1833 | { |
1834 | u32 slot_id; |
1835 | struct usb_device *udev; |
1836 | |
1837 | slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3])); |
1838 | if (!xhci->devs[slot_id]) { |
1839 | xhci_warn(xhci, "Device Notification event for " |
1840 | "unused slot %u\n", slot_id); |
1841 | return; |
1842 | } |
1843 | |
1844 | xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n", |
1845 | slot_id); |
1846 | udev = xhci->devs[slot_id]->udev; |
1847 | if (udev && udev->parent) |
1848 | usb_wakeup_notification(hdev: udev->parent, portnum: udev->portnum); |
1849 | } |
1850 | |
1851 | /* |
1852 | * Quirk hanlder for errata seen on Cavium ThunderX2 processor XHCI |
1853 | * Controller. |
1854 | * As per ThunderX2errata-129 USB 2 device may come up as USB 1 |
1855 | * If a connection to a USB 1 device is followed by another connection |
1856 | * to a USB 2 device. |
1857 | * |
1858 | * Reset the PHY after the USB device is disconnected if device speed |
1859 | * is less than HCD_USB3. |
1860 | * Retry the reset sequence max of 4 times checking the PLL lock status. |
1861 | * |
1862 | */ |
1863 | static void xhci_cavium_reset_phy_quirk(struct xhci_hcd *xhci) |
1864 | { |
1865 | struct usb_hcd *hcd = xhci_to_hcd(xhci); |
1866 | u32 pll_lock_check; |
1867 | u32 retry_count = 4; |
1868 | |
1869 | do { |
1870 | /* Assert PHY reset */ |
1871 | writel(val: 0x6F, addr: hcd->regs + 0x1048); |
1872 | udelay(10); |
1873 | /* De-assert the PHY reset */ |
1874 | writel(val: 0x7F, addr: hcd->regs + 0x1048); |
1875 | udelay(200); |
1876 | pll_lock_check = readl(addr: hcd->regs + 0x1070); |
1877 | } while (!(pll_lock_check & 0x1) && --retry_count); |
1878 | } |
1879 | |
1880 | static void handle_port_status(struct xhci_hcd *xhci, |
1881 | struct xhci_interrupter *ir, |
1882 | union xhci_trb *event) |
1883 | { |
1884 | struct usb_hcd *hcd; |
1885 | u32 port_id; |
1886 | u32 portsc, cmd_reg; |
1887 | int max_ports; |
1888 | unsigned int hcd_portnum; |
1889 | struct xhci_bus_state *bus_state; |
1890 | bool bogus_port_status = false; |
1891 | struct xhci_port *port; |
1892 | |
1893 | /* Port status change events always have a successful completion code */ |
1894 | if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) |
1895 | xhci_warn(xhci, |
1896 | "WARN: xHC returned failed port status event\n"); |
1897 | |
1898 | port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0])); |
1899 | max_ports = HCS_MAX_PORTS(xhci->hcs_params1); |
1900 | |
1901 | if ((port_id <= 0) || (port_id > max_ports)) { |
1902 | xhci_warn(xhci, "Port change event with invalid port ID %d\n", |
1903 | port_id); |
1904 | return; |
1905 | } |
1906 | |
1907 | port = &xhci->hw_ports[port_id - 1]; |
1908 | if (!port || !port->rhub || port->hcd_portnum == DUPLICATE_ENTRY) { |
1909 | xhci_warn(xhci, "Port change event, no port for port ID %u\n", |
1910 | port_id); |
1911 | bogus_port_status = true; |
1912 | goto cleanup; |
1913 | } |
1914 | |
1915 | /* We might get interrupts after shared_hcd is removed */ |
1916 | if (port->rhub == &xhci->usb3_rhub && xhci->shared_hcd == NULL) { |
1917 | xhci_dbg(xhci, "ignore port event for removed USB3 hcd\n"); |
1918 | bogus_port_status = true; |
1919 | goto cleanup; |
1920 | } |
1921 | |
1922 | hcd = port->rhub->hcd; |
1923 | bus_state = &port->rhub->bus_state; |
1924 | hcd_portnum = port->hcd_portnum; |
1925 | portsc = readl(addr: port->addr); |
1926 | |
1927 | xhci_dbg(xhci, "Port change event, %d-%d, id %d, portsc: 0x%x\n", |
1928 | hcd->self.busnum, hcd_portnum + 1, port_id, portsc); |
1929 | |
1930 | trace_xhci_handle_port_status(port, portsc); |
1931 | |
1932 | if (hcd->state == HC_STATE_SUSPENDED) { |
1933 | xhci_dbg(xhci, "resume root hub\n"); |
1934 | usb_hcd_resume_root_hub(hcd); |
1935 | } |
1936 | |
1937 | if (hcd->speed >= HCD_USB3 && |
1938 | (portsc & PORT_PLS_MASK) == XDEV_INACTIVE) { |
1939 | if (port->slot_id && xhci->devs[port->slot_id]) |
1940 | xhci->devs[port->slot_id]->flags |= VDEV_PORT_ERROR; |
1941 | } |
1942 | |
1943 | if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_RESUME) { |
1944 | xhci_dbg(xhci, "port resume event for port %d\n", port_id); |
1945 | |
1946 | cmd_reg = readl(addr: &xhci->op_regs->command); |
1947 | if (!(cmd_reg & CMD_RUN)) { |
1948 | xhci_warn(xhci, "xHC is not running.\n"); |
1949 | goto cleanup; |
1950 | } |
1951 | |
1952 | if (DEV_SUPERSPEED_ANY(portsc)) { |
1953 | xhci_dbg(xhci, "remote wake SS port %d\n", port_id); |
1954 | /* Set a flag to say the port signaled remote wakeup, |
1955 | * so we can tell the difference between the end of |
1956 | * device and host initiated resume. |
1957 | */ |
1958 | bus_state->port_remote_wakeup |= 1 << hcd_portnum; |
1959 | xhci_test_and_clear_bit(xhci, port, PORT_PLC); |
1960 | usb_hcd_start_port_resume(bus: &hcd->self, portnum: hcd_portnum); |
1961 | xhci_set_link_state(xhci, port, XDEV_U0); |
1962 | /* Need to wait until the next link state change |
1963 | * indicates the device is actually in U0. |
1964 | */ |
1965 | bogus_port_status = true; |
1966 | goto cleanup; |
1967 | } else if (!test_bit(hcd_portnum, &bus_state->resuming_ports)) { |
1968 | xhci_dbg(xhci, "resume HS port %d\n", port_id); |
1969 | port->resume_timestamp = jiffies + |
1970 | msecs_to_jiffies(USB_RESUME_TIMEOUT); |
1971 | set_bit(nr: hcd_portnum, addr: &bus_state->resuming_ports); |
1972 | /* Do the rest in GetPortStatus after resume time delay. |
1973 | * Avoid polling roothub status before that so that a |
1974 | * usb device auto-resume latency around ~40ms. |
1975 | */ |
1976 | set_bit(HCD_FLAG_POLL_RH, addr: &hcd->flags); |
1977 | mod_timer(timer: &hcd->rh_timer, |
1978 | expires: port->resume_timestamp); |
1979 | usb_hcd_start_port_resume(bus: &hcd->self, portnum: hcd_portnum); |
1980 | bogus_port_status = true; |
1981 | } |
1982 | } |
1983 | |
1984 | if ((portsc & PORT_PLC) && |
1985 | DEV_SUPERSPEED_ANY(portsc) && |
1986 | ((portsc & PORT_PLS_MASK) == XDEV_U0 || |
1987 | (portsc & PORT_PLS_MASK) == XDEV_U1 || |
1988 | (portsc & PORT_PLS_MASK) == XDEV_U2)) { |
1989 | xhci_dbg(xhci, "resume SS port %d finished\n", port_id); |
1990 | complete(&port->u3exit_done); |
1991 | /* We've just brought the device into U0/1/2 through either the |
1992 | * Resume state after a device remote wakeup, or through the |
1993 | * U3Exit state after a host-initiated resume. If it's a device |
1994 | * initiated remote wake, don't pass up the link state change, |
1995 | * so the roothub behavior is consistent with external |
1996 | * USB 3.0 hub behavior. |
1997 | */ |
1998 | if (port->slot_id && xhci->devs[port->slot_id]) |
1999 | xhci_ring_device(xhci, slot_id: port->slot_id); |
2000 | if (bus_state->port_remote_wakeup & (1 << hcd_portnum)) { |
2001 | xhci_test_and_clear_bit(xhci, port, PORT_PLC); |
2002 | usb_wakeup_notification(hdev: hcd->self.root_hub, |
2003 | portnum: hcd_portnum + 1); |
2004 | bogus_port_status = true; |
2005 | goto cleanup; |
2006 | } |
2007 | } |
2008 | |
2009 | /* |
2010 | * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or |
2011 | * RExit to a disconnect state). If so, let the driver know it's |
2012 | * out of the RExit state. |
2013 | */ |
2014 | if (hcd->speed < HCD_USB3 && port->rexit_active) { |
2015 | complete(&port->rexit_done); |
2016 | port->rexit_active = false; |
2017 | bogus_port_status = true; |
2018 | goto cleanup; |
2019 | } |
2020 | |
2021 | if (hcd->speed < HCD_USB3) { |
2022 | xhci_test_and_clear_bit(xhci, port, PORT_PLC); |
2023 | if ((xhci->quirks & XHCI_RESET_PLL_ON_DISCONNECT) && |
2024 | (portsc & PORT_CSC) && !(portsc & PORT_CONNECT)) |
2025 | xhci_cavium_reset_phy_quirk(xhci); |
2026 | } |
2027 | |
2028 | cleanup: |
2029 | |
2030 | /* Don't make the USB core poll the roothub if we got a bad port status |
2031 | * change event. Besides, at that point we can't tell which roothub |
2032 | * (USB 2.0 or USB 3.0) to kick. |
2033 | */ |
2034 | if (bogus_port_status) |
2035 | return; |
2036 | |
2037 | /* |
2038 | * xHCI port-status-change events occur when the "or" of all the |
2039 | * status-change bits in the portsc register changes from 0 to 1. |
2040 | * New status changes won't cause an event if any other change |
2041 | * bits are still set. When an event occurs, switch over to |
2042 | * polling to avoid losing status changes. |
2043 | */ |
2044 | xhci_dbg(xhci, "%s: starting usb%d port polling.\n", |
2045 | __func__, hcd->self.busnum); |
2046 | set_bit(HCD_FLAG_POLL_RH, addr: &hcd->flags); |
2047 | spin_unlock(lock: &xhci->lock); |
2048 | /* Pass this up to the core */ |
2049 | usb_hcd_poll_rh_status(hcd); |
2050 | spin_lock(lock: &xhci->lock); |
2051 | } |
2052 | |
2053 | /* |
2054 | * This TD is defined by the TRBs starting at start_trb in start_seg and ending |
2055 | * at end_trb, which may be in another segment. If the suspect DMA address is a |
2056 | * TRB in this TD, this function returns that TRB's segment. Otherwise it |
2057 | * returns 0. |
2058 | */ |
2059 | struct xhci_segment *trb_in_td(struct xhci_hcd *xhci, |
2060 | struct xhci_segment *start_seg, |
2061 | union xhci_trb *start_trb, |
2062 | union xhci_trb *end_trb, |
2063 | dma_addr_t suspect_dma, |
2064 | bool debug) |
2065 | { |
2066 | dma_addr_t start_dma; |
2067 | dma_addr_t end_seg_dma; |
2068 | dma_addr_t end_trb_dma; |
2069 | struct xhci_segment *cur_seg; |
2070 | |
2071 | start_dma = xhci_trb_virt_to_dma(seg: start_seg, trb: start_trb); |
2072 | cur_seg = start_seg; |
2073 | |
2074 | do { |
2075 | if (start_dma == 0) |
2076 | return NULL; |
2077 | /* We may get an event for a Link TRB in the middle of a TD */ |
2078 | end_seg_dma = xhci_trb_virt_to_dma(seg: cur_seg, |
2079 | trb: &cur_seg->trbs[TRBS_PER_SEGMENT - 1]); |
2080 | /* If the end TRB isn't in this segment, this is set to 0 */ |
2081 | end_trb_dma = xhci_trb_virt_to_dma(seg: cur_seg, trb: end_trb); |
2082 | |
2083 | if (debug) |
2084 | xhci_warn(xhci, |
2085 | "Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n", |
2086 | (unsigned long long)suspect_dma, |
2087 | (unsigned long long)start_dma, |
2088 | (unsigned long long)end_trb_dma, |
2089 | (unsigned long long)cur_seg->dma, |
2090 | (unsigned long long)end_seg_dma); |
2091 | |
2092 | if (end_trb_dma > 0) { |
2093 | /* The end TRB is in this segment, so suspect should be here */ |
2094 | if (start_dma <= end_trb_dma) { |
2095 | if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma) |
2096 | return cur_seg; |
2097 | } else { |
2098 | /* Case for one segment with |
2099 | * a TD wrapped around to the top |
2100 | */ |
2101 | if ((suspect_dma >= start_dma && |
2102 | suspect_dma <= end_seg_dma) || |
2103 | (suspect_dma >= cur_seg->dma && |
2104 | suspect_dma <= end_trb_dma)) |
2105 | return cur_seg; |
2106 | } |
2107 | return NULL; |
2108 | } else { |
2109 | /* Might still be somewhere in this segment */ |
2110 | if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma) |
2111 | return cur_seg; |
2112 | } |
2113 | cur_seg = cur_seg->next; |
2114 | start_dma = xhci_trb_virt_to_dma(seg: cur_seg, trb: &cur_seg->trbs[0]); |
2115 | } while (cur_seg != start_seg); |
2116 | |
2117 | return NULL; |
2118 | } |
2119 | |
2120 | static void xhci_clear_hub_tt_buffer(struct xhci_hcd *xhci, struct xhci_td *td, |
2121 | struct xhci_virt_ep *ep) |
2122 | { |
2123 | /* |
2124 | * As part of low/full-speed endpoint-halt processing |
2125 | * we must clear the TT buffer (USB 2.0 specification 11.17.5). |
2126 | */ |
2127 | if (td->urb->dev->tt && !usb_pipeint(td->urb->pipe) && |
2128 | (td->urb->dev->tt->hub != xhci_to_hcd(xhci)->self.root_hub) && |
2129 | !(ep->ep_state & EP_CLEARING_TT)) { |
2130 | ep->ep_state |= EP_CLEARING_TT; |
2131 | td->urb->ep->hcpriv = td->urb->dev; |
2132 | if (usb_hub_clear_tt_buffer(urb: td->urb)) |
2133 | ep->ep_state &= ~EP_CLEARING_TT; |
2134 | } |
2135 | } |
2136 | |
2137 | /* Check if an error has halted the endpoint ring. The class driver will |
2138 | * cleanup the halt for a non-default control endpoint if we indicate a stall. |
2139 | * However, a babble and other errors also halt the endpoint ring, and the class |
2140 | * driver won't clear the halt in that case, so we need to issue a Set Transfer |
2141 | * Ring Dequeue Pointer command manually. |
2142 | */ |
2143 | static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci, |
2144 | struct xhci_ep_ctx *ep_ctx, |
2145 | unsigned int trb_comp_code) |
2146 | { |
2147 | /* TRB completion codes that may require a manual halt cleanup */ |
2148 | if (trb_comp_code == COMP_USB_TRANSACTION_ERROR || |
2149 | trb_comp_code == COMP_BABBLE_DETECTED_ERROR || |
2150 | trb_comp_code == COMP_SPLIT_TRANSACTION_ERROR) |
2151 | /* The 0.95 spec says a babbling control endpoint |
2152 | * is not halted. The 0.96 spec says it is. Some HW |
2153 | * claims to be 0.95 compliant, but it halts the control |
2154 | * endpoint anyway. Check if a babble halted the |
2155 | * endpoint. |
2156 | */ |
2157 | if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_HALTED) |
2158 | return 1; |
2159 | |
2160 | return 0; |
2161 | } |
2162 | |
2163 | int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code) |
2164 | { |
2165 | if (trb_comp_code >= 224 && trb_comp_code <= 255) { |
2166 | /* Vendor defined "informational" completion code, |
2167 | * treat as not-an-error. |
2168 | */ |
2169 | xhci_dbg(xhci, "Vendor defined info completion code %u\n", |
2170 | trb_comp_code); |
2171 | xhci_dbg(xhci, "Treating code as success.\n"); |
2172 | return 1; |
2173 | } |
2174 | return 0; |
2175 | } |
2176 | |
2177 | static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, |
2178 | struct xhci_ring *ep_ring, struct xhci_td *td, |
2179 | u32 trb_comp_code) |
2180 | { |
2181 | struct xhci_ep_ctx *ep_ctx; |
2182 | |
2183 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: ep->vdev->out_ctx, ep_index: ep->ep_index); |
2184 | |
2185 | switch (trb_comp_code) { |
2186 | case COMP_STOPPED_LENGTH_INVALID: |
2187 | case COMP_STOPPED_SHORT_PACKET: |
2188 | case COMP_STOPPED: |
2189 | /* |
2190 | * The "Stop Endpoint" completion will take care of any |
2191 | * stopped TDs. A stopped TD may be restarted, so don't update |
2192 | * the ring dequeue pointer or take this TD off any lists yet. |
2193 | */ |
2194 | return 0; |
2195 | case COMP_USB_TRANSACTION_ERROR: |
2196 | case COMP_BABBLE_DETECTED_ERROR: |
2197 | case COMP_SPLIT_TRANSACTION_ERROR: |
2198 | /* |
2199 | * If endpoint context state is not halted we might be |
2200 | * racing with a reset endpoint command issued by a unsuccessful |
2201 | * stop endpoint completion (context error). In that case the |
2202 | * td should be on the cancelled list, and EP_HALTED flag set. |
2203 | * |
2204 | * Or then it's not halted due to the 0.95 spec stating that a |
2205 | * babbling control endpoint should not halt. The 0.96 spec |
2206 | * again says it should. Some HW claims to be 0.95 compliant, |
2207 | * but it halts the control endpoint anyway. |
2208 | */ |
2209 | if (GET_EP_CTX_STATE(ep_ctx) != EP_STATE_HALTED) { |
2210 | /* |
2211 | * If EP_HALTED is set and TD is on the cancelled list |
2212 | * the TD and dequeue pointer will be handled by reset |
2213 | * ep command completion |
2214 | */ |
2215 | if ((ep->ep_state & EP_HALTED) && |
2216 | !list_empty(head: &td->cancelled_td_list)) { |
2217 | xhci_dbg(xhci, "Already resolving halted ep for 0x%llx\n", |
2218 | (unsigned long long)xhci_trb_virt_to_dma( |
2219 | td->start_seg, td->first_trb)); |
2220 | return 0; |
2221 | } |
2222 | /* endpoint not halted, don't reset it */ |
2223 | break; |
2224 | } |
2225 | /* Almost same procedure as for STALL_ERROR below */ |
2226 | xhci_clear_hub_tt_buffer(xhci, td, ep); |
2227 | xhci_handle_halted_endpoint(xhci, ep, td, reset_type: EP_HARD_RESET); |
2228 | return 0; |
2229 | case COMP_STALL_ERROR: |
2230 | /* |
2231 | * xhci internal endpoint state will go to a "halt" state for |
2232 | * any stall, including default control pipe protocol stall. |
2233 | * To clear the host side halt we need to issue a reset endpoint |
2234 | * command, followed by a set dequeue command to move past the |
2235 | * TD. |
2236 | * Class drivers clear the device side halt from a functional |
2237 | * stall later. Hub TT buffer should only be cleared for FS/LS |
2238 | * devices behind HS hubs for functional stalls. |
2239 | */ |
2240 | if (ep->ep_index != 0) |
2241 | xhci_clear_hub_tt_buffer(xhci, td, ep); |
2242 | |
2243 | xhci_handle_halted_endpoint(xhci, ep, td, reset_type: EP_HARD_RESET); |
2244 | |
2245 | return 0; /* xhci_handle_halted_endpoint marked td cancelled */ |
2246 | default: |
2247 | break; |
2248 | } |
2249 | |
2250 | /* Update ring dequeue pointer */ |
2251 | ep_ring->dequeue = td->last_trb; |
2252 | ep_ring->deq_seg = td->last_trb_seg; |
2253 | inc_deq(xhci, ring: ep_ring); |
2254 | |
2255 | return xhci_td_cleanup(xhci, td, ep_ring, status: td->status); |
2256 | } |
2257 | |
2258 | /* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */ |
2259 | static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring, |
2260 | union xhci_trb *stop_trb) |
2261 | { |
2262 | u32 sum; |
2263 | union xhci_trb *trb = ring->dequeue; |
2264 | struct xhci_segment *seg = ring->deq_seg; |
2265 | |
2266 | for (sum = 0; trb != stop_trb; next_trb(xhci, ring, seg: &seg, trb: &trb)) { |
2267 | if (!trb_is_noop(trb) && !trb_is_link(trb)) |
2268 | sum += TRB_LEN(le32_to_cpu(trb->generic.field[2])); |
2269 | } |
2270 | return sum; |
2271 | } |
2272 | |
2273 | /* |
2274 | * Process control tds, update urb status and actual_length. |
2275 | */ |
2276 | static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, |
2277 | struct xhci_ring *ep_ring, struct xhci_td *td, |
2278 | union xhci_trb *ep_trb, struct xhci_transfer_event *event) |
2279 | { |
2280 | struct xhci_ep_ctx *ep_ctx; |
2281 | u32 trb_comp_code; |
2282 | u32 remaining, requested; |
2283 | u32 trb_type; |
2284 | |
2285 | trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3])); |
2286 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: ep->vdev->out_ctx, ep_index: ep->ep_index); |
2287 | trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); |
2288 | requested = td->urb->transfer_buffer_length; |
2289 | remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); |
2290 | |
2291 | switch (trb_comp_code) { |
2292 | case COMP_SUCCESS: |
2293 | if (trb_type != TRB_STATUS) { |
2294 | xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n", |
2295 | (trb_type == TRB_DATA) ? "data": "setup"); |
2296 | td->status = -ESHUTDOWN; |
2297 | break; |
2298 | } |
2299 | td->status = 0; |
2300 | break; |
2301 | case COMP_SHORT_PACKET: |
2302 | td->status = 0; |
2303 | break; |
2304 | case COMP_STOPPED_SHORT_PACKET: |
2305 | if (trb_type == TRB_DATA || trb_type == TRB_NORMAL) |
2306 | td->urb->actual_length = remaining; |
2307 | else |
2308 | xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n"); |
2309 | goto finish_td; |
2310 | case COMP_STOPPED: |
2311 | switch (trb_type) { |
2312 | case TRB_SETUP: |
2313 | td->urb->actual_length = 0; |
2314 | goto finish_td; |
2315 | case TRB_DATA: |
2316 | case TRB_NORMAL: |
2317 | td->urb->actual_length = requested - remaining; |
2318 | goto finish_td; |
2319 | case TRB_STATUS: |
2320 | td->urb->actual_length = requested; |
2321 | goto finish_td; |
2322 | default: |
2323 | xhci_warn(xhci, "WARN: unexpected TRB Type %d\n", |
2324 | trb_type); |
2325 | goto finish_td; |
2326 | } |
2327 | case COMP_STOPPED_LENGTH_INVALID: |
2328 | goto finish_td; |
2329 | default: |
2330 | if (!xhci_requires_manual_halt_cleanup(xhci, |
2331 | ep_ctx, trb_comp_code)) |
2332 | break; |
2333 | xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n", |
2334 | trb_comp_code, ep->ep_index); |
2335 | fallthrough; |
2336 | case COMP_STALL_ERROR: |
2337 | /* Did we transfer part of the data (middle) phase? */ |
2338 | if (trb_type == TRB_DATA || trb_type == TRB_NORMAL) |
2339 | td->urb->actual_length = requested - remaining; |
2340 | else if (!td->urb_length_set) |
2341 | td->urb->actual_length = 0; |
2342 | goto finish_td; |
2343 | } |
2344 | |
2345 | /* stopped at setup stage, no data transferred */ |
2346 | if (trb_type == TRB_SETUP) |
2347 | goto finish_td; |
2348 | |
2349 | /* |
2350 | * if on data stage then update the actual_length of the URB and flag it |
2351 | * as set, so it won't be overwritten in the event for the last TRB. |
2352 | */ |
2353 | if (trb_type == TRB_DATA || |
2354 | trb_type == TRB_NORMAL) { |
2355 | td->urb_length_set = true; |
2356 | td->urb->actual_length = requested - remaining; |
2357 | xhci_dbg(xhci, "Waiting for status stage event\n"); |
2358 | return 0; |
2359 | } |
2360 | |
2361 | /* at status stage */ |
2362 | if (!td->urb_length_set) |
2363 | td->urb->actual_length = requested; |
2364 | |
2365 | finish_td: |
2366 | return finish_td(xhci, ep, ep_ring, td, trb_comp_code); |
2367 | } |
2368 | |
2369 | /* |
2370 | * Process isochronous tds, update urb packet status and actual_length. |
2371 | */ |
2372 | static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, |
2373 | struct xhci_ring *ep_ring, struct xhci_td *td, |
2374 | union xhci_trb *ep_trb, struct xhci_transfer_event *event) |
2375 | { |
2376 | struct urb_priv *urb_priv; |
2377 | int idx; |
2378 | struct usb_iso_packet_descriptor *frame; |
2379 | u32 trb_comp_code; |
2380 | bool sum_trbs_for_length = false; |
2381 | u32 remaining, requested, ep_trb_len; |
2382 | int short_framestatus; |
2383 | |
2384 | trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); |
2385 | urb_priv = td->urb->hcpriv; |
2386 | idx = urb_priv->num_tds_done; |
2387 | frame = &td->urb->iso_frame_desc[idx]; |
2388 | requested = frame->length; |
2389 | remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); |
2390 | ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); |
2391 | short_framestatus = td->urb->transfer_flags & URB_SHORT_NOT_OK ? |
2392 | -EREMOTEIO : 0; |
2393 | |
2394 | /* handle completion code */ |
2395 | switch (trb_comp_code) { |
2396 | case COMP_SUCCESS: |
2397 | /* Don't overwrite status if TD had an error, see xHCI 4.9.1 */ |
2398 | if (td->error_mid_td) |
2399 | break; |
2400 | if (remaining) { |
2401 | frame->status = short_framestatus; |
2402 | if (xhci->quirks & XHCI_TRUST_TX_LENGTH) |
2403 | sum_trbs_for_length = true; |
2404 | break; |
2405 | } |
2406 | frame->status = 0; |
2407 | break; |
2408 | case COMP_SHORT_PACKET: |
2409 | frame->status = short_framestatus; |
2410 | sum_trbs_for_length = true; |
2411 | break; |
2412 | case COMP_BANDWIDTH_OVERRUN_ERROR: |
2413 | frame->status = -ECOMM; |
2414 | break; |
2415 | case COMP_BABBLE_DETECTED_ERROR: |
2416 | sum_trbs_for_length = true; |
2417 | fallthrough; |
2418 | case COMP_ISOCH_BUFFER_OVERRUN: |
2419 | frame->status = -EOVERFLOW; |
2420 | if (ep_trb != td->last_trb) |
2421 | td->error_mid_td = true; |
2422 | break; |
2423 | case COMP_INCOMPATIBLE_DEVICE_ERROR: |
2424 | case COMP_STALL_ERROR: |
2425 | frame->status = -EPROTO; |
2426 | break; |
2427 | case COMP_USB_TRANSACTION_ERROR: |
2428 | frame->status = -EPROTO; |
2429 | sum_trbs_for_length = true; |
2430 | if (ep_trb != td->last_trb) |
2431 | td->error_mid_td = true; |
2432 | break; |
2433 | case COMP_STOPPED: |
2434 | sum_trbs_for_length = true; |
2435 | break; |
2436 | case COMP_STOPPED_SHORT_PACKET: |
2437 | /* field normally containing residue now contains tranferred */ |
2438 | frame->status = short_framestatus; |
2439 | requested = remaining; |
2440 | break; |
2441 | case COMP_STOPPED_LENGTH_INVALID: |
2442 | requested = 0; |
2443 | remaining = 0; |
2444 | break; |
2445 | default: |
2446 | sum_trbs_for_length = true; |
2447 | frame->status = -1; |
2448 | break; |
2449 | } |
2450 | |
2451 | if (td->urb_length_set) |
2452 | goto finish_td; |
2453 | |
2454 | if (sum_trbs_for_length) |
2455 | frame->actual_length = sum_trb_lengths(xhci, ring: ep->ring, stop_trb: ep_trb) + |
2456 | ep_trb_len - remaining; |
2457 | else |
2458 | frame->actual_length = requested; |
2459 | |
2460 | td->urb->actual_length += frame->actual_length; |
2461 | |
2462 | finish_td: |
2463 | /* Don't give back TD yet if we encountered an error mid TD */ |
2464 | if (td->error_mid_td && ep_trb != td->last_trb) { |
2465 | xhci_dbg(xhci, "Error mid isoc TD, wait for final completion event\n"); |
2466 | td->urb_length_set = true; |
2467 | return 0; |
2468 | } |
2469 | |
2470 | return finish_td(xhci, ep, ep_ring, td, trb_comp_code); |
2471 | } |
2472 | |
2473 | static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, |
2474 | struct xhci_virt_ep *ep, int status) |
2475 | { |
2476 | struct urb_priv *urb_priv; |
2477 | struct usb_iso_packet_descriptor *frame; |
2478 | int idx; |
2479 | |
2480 | urb_priv = td->urb->hcpriv; |
2481 | idx = urb_priv->num_tds_done; |
2482 | frame = &td->urb->iso_frame_desc[idx]; |
2483 | |
2484 | /* The transfer is partly done. */ |
2485 | frame->status = -EXDEV; |
2486 | |
2487 | /* calc actual length */ |
2488 | frame->actual_length = 0; |
2489 | |
2490 | /* Update ring dequeue pointer */ |
2491 | ep->ring->dequeue = td->last_trb; |
2492 | ep->ring->deq_seg = td->last_trb_seg; |
2493 | inc_deq(xhci, ring: ep->ring); |
2494 | |
2495 | return xhci_td_cleanup(xhci, td, ep_ring: ep->ring, status); |
2496 | } |
2497 | |
2498 | /* |
2499 | * Process bulk and interrupt tds, update urb status and actual_length. |
2500 | */ |
2501 | static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, |
2502 | struct xhci_ring *ep_ring, struct xhci_td *td, |
2503 | union xhci_trb *ep_trb, struct xhci_transfer_event *event) |
2504 | { |
2505 | struct xhci_slot_ctx *slot_ctx; |
2506 | u32 trb_comp_code; |
2507 | u32 remaining, requested, ep_trb_len; |
2508 | |
2509 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: ep->vdev->out_ctx); |
2510 | trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); |
2511 | remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); |
2512 | ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); |
2513 | requested = td->urb->transfer_buffer_length; |
2514 | |
2515 | switch (trb_comp_code) { |
2516 | case COMP_SUCCESS: |
2517 | ep->err_count = 0; |
2518 | /* handle success with untransferred data as short packet */ |
2519 | if (ep_trb != td->last_trb || remaining) { |
2520 | xhci_warn(xhci, "WARN Successful completion on short TX\n"); |
2521 | xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n", |
2522 | td->urb->ep->desc.bEndpointAddress, |
2523 | requested, remaining); |
2524 | } |
2525 | td->status = 0; |
2526 | break; |
2527 | case COMP_SHORT_PACKET: |
2528 | xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n", |
2529 | td->urb->ep->desc.bEndpointAddress, |
2530 | requested, remaining); |
2531 | td->status = 0; |
2532 | break; |
2533 | case COMP_STOPPED_SHORT_PACKET: |
2534 | td->urb->actual_length = remaining; |
2535 | goto finish_td; |
2536 | case COMP_STOPPED_LENGTH_INVALID: |
2537 | /* stopped on ep trb with invalid length, exclude it */ |
2538 | ep_trb_len = 0; |
2539 | remaining = 0; |
2540 | break; |
2541 | case COMP_USB_TRANSACTION_ERROR: |
2542 | if (xhci->quirks & XHCI_NO_SOFT_RETRY || |
2543 | (ep->err_count++ > MAX_SOFT_RETRY) || |
2544 | le32_to_cpu(slot_ctx->tt_info) & TT_SLOT) |
2545 | break; |
2546 | |
2547 | td->status = 0; |
2548 | |
2549 | xhci_handle_halted_endpoint(xhci, ep, td, reset_type: EP_SOFT_RESET); |
2550 | return 0; |
2551 | default: |
2552 | /* do nothing */ |
2553 | break; |
2554 | } |
2555 | |
2556 | if (ep_trb == td->last_trb) |
2557 | td->urb->actual_length = requested - remaining; |
2558 | else |
2559 | td->urb->actual_length = |
2560 | sum_trb_lengths(xhci, ring: ep_ring, stop_trb: ep_trb) + |
2561 | ep_trb_len - remaining; |
2562 | finish_td: |
2563 | if (remaining > requested) { |
2564 | xhci_warn(xhci, "bad transfer trb length %d in event trb\n", |
2565 | remaining); |
2566 | td->urb->actual_length = 0; |
2567 | } |
2568 | |
2569 | return finish_td(xhci, ep, ep_ring, td, trb_comp_code); |
2570 | } |
2571 | |
2572 | /* |
2573 | * If this function returns an error condition, it means it got a Transfer |
2574 | * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address. |
2575 | * At this point, the host controller is probably hosed and should be reset. |
2576 | */ |
2577 | static int handle_tx_event(struct xhci_hcd *xhci, |
2578 | struct xhci_interrupter *ir, |
2579 | struct xhci_transfer_event *event) |
2580 | { |
2581 | struct xhci_virt_ep *ep; |
2582 | struct xhci_ring *ep_ring; |
2583 | unsigned int slot_id; |
2584 | int ep_index; |
2585 | struct xhci_td *td = NULL; |
2586 | dma_addr_t ep_trb_dma; |
2587 | struct xhci_segment *ep_seg; |
2588 | union xhci_trb *ep_trb; |
2589 | int status = -EINPROGRESS; |
2590 | struct xhci_ep_ctx *ep_ctx; |
2591 | u32 trb_comp_code; |
2592 | int td_num = 0; |
2593 | bool handling_skipped_tds = false; |
2594 | |
2595 | slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); |
2596 | ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; |
2597 | trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); |
2598 | ep_trb_dma = le64_to_cpu(event->buffer); |
2599 | |
2600 | ep = xhci_get_virt_ep(xhci, slot_id, ep_index); |
2601 | if (!ep) { |
2602 | xhci_err(xhci, "ERROR Invalid Transfer event\n"); |
2603 | goto err_out; |
2604 | } |
2605 | |
2606 | ep_ring = xhci_dma_to_transfer_ring(ep, address: ep_trb_dma); |
2607 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: ep->vdev->out_ctx, ep_index); |
2608 | |
2609 | if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) { |
2610 | xhci_err(xhci, |
2611 | "ERROR Transfer event for disabled endpoint slot %u ep %u\n", |
2612 | slot_id, ep_index); |
2613 | goto err_out; |
2614 | } |
2615 | |
2616 | /* Some transfer events don't always point to a trb, see xhci 4.17.4 */ |
2617 | if (!ep_ring) { |
2618 | switch (trb_comp_code) { |
2619 | case COMP_STALL_ERROR: |
2620 | case COMP_USB_TRANSACTION_ERROR: |
2621 | case COMP_INVALID_STREAM_TYPE_ERROR: |
2622 | case COMP_INVALID_STREAM_ID_ERROR: |
2623 | xhci_dbg(xhci, "Stream transaction error ep %u no id\n", |
2624 | ep_index); |
2625 | if (ep->err_count++ > MAX_SOFT_RETRY) |
2626 | xhci_handle_halted_endpoint(xhci, ep, NULL, |
2627 | reset_type: EP_HARD_RESET); |
2628 | else |
2629 | xhci_handle_halted_endpoint(xhci, ep, NULL, |
2630 | reset_type: EP_SOFT_RESET); |
2631 | goto cleanup; |
2632 | case COMP_RING_UNDERRUN: |
2633 | case COMP_RING_OVERRUN: |
2634 | case COMP_STOPPED_LENGTH_INVALID: |
2635 | goto cleanup; |
2636 | default: |
2637 | xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n", |
2638 | slot_id, ep_index); |
2639 | goto err_out; |
2640 | } |
2641 | } |
2642 | |
2643 | /* Count current td numbers if ep->skip is set */ |
2644 | if (ep->skip) |
2645 | td_num += list_count_nodes(head: &ep_ring->td_list); |
2646 | |
2647 | /* Look for common error cases */ |
2648 | switch (trb_comp_code) { |
2649 | /* Skip codes that require special handling depending on |
2650 | * transfer type |
2651 | */ |
2652 | case COMP_SUCCESS: |
2653 | if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) |
2654 | break; |
2655 | if (xhci->quirks & XHCI_TRUST_TX_LENGTH || |
2656 | ep_ring->last_td_was_short) |
2657 | trb_comp_code = COMP_SHORT_PACKET; |
2658 | else |
2659 | xhci_warn_ratelimited(xhci, |
2660 | "WARN Successful completion on short TX for slot %u ep %u: needs XHCI_TRUST_TX_LENGTH quirk?\n", |
2661 | slot_id, ep_index); |
2662 | break; |
2663 | case COMP_SHORT_PACKET: |
2664 | break; |
2665 | /* Completion codes for endpoint stopped state */ |
2666 | case COMP_STOPPED: |
2667 | xhci_dbg(xhci, "Stopped on Transfer TRB for slot %u ep %u\n", |
2668 | slot_id, ep_index); |
2669 | break; |
2670 | case COMP_STOPPED_LENGTH_INVALID: |
2671 | xhci_dbg(xhci, |
2672 | "Stopped on No-op or Link TRB for slot %u ep %u\n", |
2673 | slot_id, ep_index); |
2674 | break; |
2675 | case COMP_STOPPED_SHORT_PACKET: |
2676 | xhci_dbg(xhci, |
2677 | "Stopped with short packet transfer detected for slot %u ep %u\n", |
2678 | slot_id, ep_index); |
2679 | break; |
2680 | /* Completion codes for endpoint halted state */ |
2681 | case COMP_STALL_ERROR: |
2682 | xhci_dbg(xhci, "Stalled endpoint for slot %u ep %u\n", slot_id, |
2683 | ep_index); |
2684 | status = -EPIPE; |
2685 | break; |
2686 | case COMP_SPLIT_TRANSACTION_ERROR: |
2687 | xhci_dbg(xhci, "Split transaction error for slot %u ep %u\n", |
2688 | slot_id, ep_index); |
2689 | status = -EPROTO; |
2690 | break; |
2691 | case COMP_USB_TRANSACTION_ERROR: |
2692 | xhci_dbg(xhci, "Transfer error for slot %u ep %u on endpoint\n", |
2693 | slot_id, ep_index); |
2694 | status = -EPROTO; |
2695 | break; |
2696 | case COMP_BABBLE_DETECTED_ERROR: |
2697 | xhci_dbg(xhci, "Babble error for slot %u ep %u on endpoint\n", |
2698 | slot_id, ep_index); |
2699 | status = -EOVERFLOW; |
2700 | break; |
2701 | /* Completion codes for endpoint error state */ |
2702 | case COMP_TRB_ERROR: |
2703 | xhci_warn(xhci, |
2704 | "WARN: TRB error for slot %u ep %u on endpoint\n", |
2705 | slot_id, ep_index); |
2706 | status = -EILSEQ; |
2707 | break; |
2708 | /* completion codes not indicating endpoint state change */ |
2709 | case COMP_DATA_BUFFER_ERROR: |
2710 | xhci_warn(xhci, |
2711 | "WARN: HC couldn't access mem fast enough for slot %u ep %u\n", |
2712 | slot_id, ep_index); |
2713 | status = -ENOSR; |
2714 | break; |
2715 | case COMP_BANDWIDTH_OVERRUN_ERROR: |
2716 | xhci_warn(xhci, |
2717 | "WARN: bandwidth overrun event for slot %u ep %u on endpoint\n", |
2718 | slot_id, ep_index); |
2719 | break; |
2720 | case COMP_ISOCH_BUFFER_OVERRUN: |
2721 | xhci_warn(xhci, |
2722 | "WARN: buffer overrun event for slot %u ep %u on endpoint", |
2723 | slot_id, ep_index); |
2724 | break; |
2725 | case COMP_RING_UNDERRUN: |
2726 | /* |
2727 | * When the Isoch ring is empty, the xHC will generate |
2728 | * a Ring Overrun Event for IN Isoch endpoint or Ring |
2729 | * Underrun Event for OUT Isoch endpoint. |
2730 | */ |
2731 | xhci_dbg(xhci, "underrun event on endpoint\n"); |
2732 | if (!list_empty(head: &ep_ring->td_list)) |
2733 | xhci_dbg(xhci, "Underrun Event for slot %d ep %d " |
2734 | "still with TDs queued?\n", |
2735 | TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), |
2736 | ep_index); |
2737 | goto cleanup; |
2738 | case COMP_RING_OVERRUN: |
2739 | xhci_dbg(xhci, "overrun event on endpoint\n"); |
2740 | if (!list_empty(head: &ep_ring->td_list)) |
2741 | xhci_dbg(xhci, "Overrun Event for slot %d ep %d " |
2742 | "still with TDs queued?\n", |
2743 | TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), |
2744 | ep_index); |
2745 | goto cleanup; |
2746 | case COMP_MISSED_SERVICE_ERROR: |
2747 | /* |
2748 | * When encounter missed service error, one or more isoc tds |
2749 | * may be missed by xHC. |
2750 | * Set skip flag of the ep_ring; Complete the missed tds as |
2751 | * short transfer when process the ep_ring next time. |
2752 | */ |
2753 | ep->skip = true; |
2754 | xhci_dbg(xhci, |
2755 | "Miss service interval error for slot %u ep %u, set skip flag\n", |
2756 | slot_id, ep_index); |
2757 | goto cleanup; |
2758 | case COMP_NO_PING_RESPONSE_ERROR: |
2759 | ep->skip = true; |
2760 | xhci_dbg(xhci, |
2761 | "No Ping response error for slot %u ep %u, Skip one Isoc TD\n", |
2762 | slot_id, ep_index); |
2763 | goto cleanup; |
2764 | |
2765 | case COMP_INCOMPATIBLE_DEVICE_ERROR: |
2766 | /* needs disable slot command to recover */ |
2767 | xhci_warn(xhci, |
2768 | "WARN: detect an incompatible device for slot %u ep %u", |
2769 | slot_id, ep_index); |
2770 | status = -EPROTO; |
2771 | break; |
2772 | default: |
2773 | if (xhci_is_vendor_info_code(xhci, trb_comp_code)) { |
2774 | status = 0; |
2775 | break; |
2776 | } |
2777 | xhci_warn(xhci, |
2778 | "ERROR Unknown event condition %u for slot %u ep %u , HC probably busted\n", |
2779 | trb_comp_code, slot_id, ep_index); |
2780 | goto cleanup; |
2781 | } |
2782 | |
2783 | do { |
2784 | /* This TRB should be in the TD at the head of this ring's |
2785 | * TD list. |
2786 | */ |
2787 | if (list_empty(head: &ep_ring->td_list)) { |
2788 | /* |
2789 | * Don't print wanings if it's due to a stopped endpoint |
2790 | * generating an extra completion event if the device |
2791 | * was suspended. Or, a event for the last TRB of a |
2792 | * short TD we already got a short event for. |
2793 | * The short TD is already removed from the TD list. |
2794 | */ |
2795 | |
2796 | if (!(trb_comp_code == COMP_STOPPED || |
2797 | trb_comp_code == COMP_STOPPED_LENGTH_INVALID || |
2798 | ep_ring->last_td_was_short)) { |
2799 | xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", |
2800 | TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), |
2801 | ep_index); |
2802 | } |
2803 | if (ep->skip) { |
2804 | ep->skip = false; |
2805 | xhci_dbg(xhci, "td_list is empty while skip flag set. Clear skip flag for slot %u ep %u.\n", |
2806 | slot_id, ep_index); |
2807 | } |
2808 | if (trb_comp_code == COMP_STALL_ERROR || |
2809 | xhci_requires_manual_halt_cleanup(xhci, ep_ctx, |
2810 | trb_comp_code)) { |
2811 | xhci_handle_halted_endpoint(xhci, ep, NULL, |
2812 | reset_type: EP_HARD_RESET); |
2813 | } |
2814 | goto cleanup; |
2815 | } |
2816 | |
2817 | /* We've skipped all the TDs on the ep ring when ep->skip set */ |
2818 | if (ep->skip && td_num == 0) { |
2819 | ep->skip = false; |
2820 | xhci_dbg(xhci, "All tds on the ep_ring skipped. Clear skip flag for slot %u ep %u.\n", |
2821 | slot_id, ep_index); |
2822 | goto cleanup; |
2823 | } |
2824 | |
2825 | td = list_first_entry(&ep_ring->td_list, struct xhci_td, |
2826 | td_list); |
2827 | if (ep->skip) |
2828 | td_num--; |
2829 | |
2830 | /* Is this a TRB in the currently executing TD? */ |
2831 | ep_seg = trb_in_td(xhci, start_seg: td->start_seg, start_trb: td->first_trb, |
2832 | end_trb: td->last_trb, suspect_dma: ep_trb_dma, debug: false); |
2833 | |
2834 | /* |
2835 | * Skip the Force Stopped Event. The event_trb(event_dma) of FSE |
2836 | * is not in the current TD pointed by ep_ring->dequeue because |
2837 | * that the hardware dequeue pointer still at the previous TRB |
2838 | * of the current TD. The previous TRB maybe a Link TD or the |
2839 | * last TRB of the previous TD. The command completion handle |
2840 | * will take care the rest. |
2841 | */ |
2842 | if (!ep_seg && (trb_comp_code == COMP_STOPPED || |
2843 | trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) { |
2844 | goto cleanup; |
2845 | } |
2846 | |
2847 | if (!ep_seg) { |
2848 | |
2849 | if (ep->skip && usb_endpoint_xfer_isoc(epd: &td->urb->ep->desc)) { |
2850 | skip_isoc_td(xhci, td, ep, status); |
2851 | goto cleanup; |
2852 | } |
2853 | |
2854 | /* |
2855 | * Some hosts give a spurious success event after a short |
2856 | * transfer. Ignore it. |
2857 | */ |
2858 | if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && |
2859 | ep_ring->last_td_was_short) { |
2860 | ep_ring->last_td_was_short = false; |
2861 | goto cleanup; |
2862 | } |
2863 | |
2864 | /* |
2865 | * xhci 4.10.2 states isoc endpoints should continue |
2866 | * processing the next TD if there was an error mid TD. |
2867 | * So host like NEC don't generate an event for the last |
2868 | * isoc TRB even if the IOC flag is set. |
2869 | * xhci 4.9.1 states that if there are errors in mult-TRB |
2870 | * TDs xHC should generate an error for that TRB, and if xHC |
2871 | * proceeds to the next TD it should genete an event for |
2872 | * any TRB with IOC flag on the way. Other host follow this. |
2873 | * So this event might be for the next TD. |
2874 | */ |
2875 | if (td->error_mid_td && |
2876 | !list_is_last(list: &td->td_list, head: &ep_ring->td_list)) { |
2877 | struct xhci_td *td_next = list_next_entry(td, td_list); |
2878 | |
2879 | ep_seg = trb_in_td(xhci, start_seg: td_next->start_seg, start_trb: td_next->first_trb, |
2880 | end_trb: td_next->last_trb, suspect_dma: ep_trb_dma, debug: false); |
2881 | if (ep_seg) { |
2882 | /* give back previous TD, start handling new */ |
2883 | xhci_dbg(xhci, "Missing TD completion event after mid TD error\n"); |
2884 | ep_ring->dequeue = td->last_trb; |
2885 | ep_ring->deq_seg = td->last_trb_seg; |
2886 | inc_deq(xhci, ring: ep_ring); |
2887 | xhci_td_cleanup(xhci, td, ep_ring, status: td->status); |
2888 | td = td_next; |
2889 | } |
2890 | } |
2891 | |
2892 | if (!ep_seg) { |
2893 | /* HC is busted, give up! */ |
2894 | xhci_err(xhci, |
2895 | "ERROR Transfer event TRB DMA ptr not " |
2896 | "part of current TD ep_index %d " |
2897 | "comp_code %u\n", ep_index, |
2898 | trb_comp_code); |
2899 | trb_in_td(xhci, start_seg: td->start_seg, start_trb: td->first_trb, |
2900 | end_trb: td->last_trb, suspect_dma: ep_trb_dma, debug: true); |
2901 | return -ESHUTDOWN; |
2902 | } |
2903 | } |
2904 | if (trb_comp_code == COMP_SHORT_PACKET) |
2905 | ep_ring->last_td_was_short = true; |
2906 | else |
2907 | ep_ring->last_td_was_short = false; |
2908 | |
2909 | if (ep->skip) { |
2910 | xhci_dbg(xhci, |
2911 | "Found td. Clear skip flag for slot %u ep %u.\n", |
2912 | slot_id, ep_index); |
2913 | ep->skip = false; |
2914 | } |
2915 | |
2916 | ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) / |
2917 | sizeof(*ep_trb)]; |
2918 | |
2919 | trace_xhci_handle_transfer(ring: ep_ring, |
2920 | trb: (struct xhci_generic_trb *) ep_trb); |
2921 | |
2922 | /* |
2923 | * No-op TRB could trigger interrupts in a case where |
2924 | * a URB was killed and a STALL_ERROR happens right |
2925 | * after the endpoint ring stopped. Reset the halted |
2926 | * endpoint. Otherwise, the endpoint remains stalled |
2927 | * indefinitely. |
2928 | */ |
2929 | |
2930 | if (trb_is_noop(trb: ep_trb)) { |
2931 | if (trb_comp_code == COMP_STALL_ERROR || |
2932 | xhci_requires_manual_halt_cleanup(xhci, ep_ctx, |
2933 | trb_comp_code)) |
2934 | xhci_handle_halted_endpoint(xhci, ep, td, |
2935 | reset_type: EP_HARD_RESET); |
2936 | goto cleanup; |
2937 | } |
2938 | |
2939 | td->status = status; |
2940 | |
2941 | /* update the urb's actual_length and give back to the core */ |
2942 | if (usb_endpoint_xfer_control(epd: &td->urb->ep->desc)) |
2943 | process_ctrl_td(xhci, ep, ep_ring, td, ep_trb, event); |
2944 | else if (usb_endpoint_xfer_isoc(epd: &td->urb->ep->desc)) |
2945 | process_isoc_td(xhci, ep, ep_ring, td, ep_trb, event); |
2946 | else |
2947 | process_bulk_intr_td(xhci, ep, ep_ring, td, ep_trb, event); |
2948 | cleanup: |
2949 | handling_skipped_tds = ep->skip && |
2950 | trb_comp_code != COMP_MISSED_SERVICE_ERROR && |
2951 | trb_comp_code != COMP_NO_PING_RESPONSE_ERROR; |
2952 | |
2953 | /* |
2954 | * If ep->skip is set, it means there are missed tds on the |
2955 | * endpoint ring need to take care of. |
2956 | * Process them as short transfer until reach the td pointed by |
2957 | * the event. |
2958 | */ |
2959 | } while (handling_skipped_tds); |
2960 | |
2961 | return 0; |
2962 | |
2963 | err_out: |
2964 | xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n", |
2965 | (unsigned long long) xhci_trb_virt_to_dma( |
2966 | ir->event_ring->deq_seg, |
2967 | ir->event_ring->dequeue), |
2968 | lower_32_bits(le64_to_cpu(event->buffer)), |
2969 | upper_32_bits(le64_to_cpu(event->buffer)), |
2970 | le32_to_cpu(event->transfer_len), |
2971 | le32_to_cpu(event->flags)); |
2972 | return -ENODEV; |
2973 | } |
2974 | |
2975 | /* |
2976 | * This function handles one OS-owned event on the event ring. It may drop |
2977 | * xhci->lock between event processing (e.g. to pass up port status changes). |
2978 | */ |
2979 | static int xhci_handle_event_trb(struct xhci_hcd *xhci, struct xhci_interrupter *ir, |
2980 | union xhci_trb *event) |
2981 | { |
2982 | u32 trb_type; |
2983 | |
2984 | trace_xhci_handle_event(ring: ir->event_ring, trb: &event->generic); |
2985 | |
2986 | /* |
2987 | * Barrier between reading the TRB_CYCLE (valid) flag before, and any |
2988 | * speculative reads of the event's flags/data below. |
2989 | */ |
2990 | rmb(); |
2991 | trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->event_cmd.flags)); |
2992 | /* FIXME: Handle more event types. */ |
2993 | |
2994 | switch (trb_type) { |
2995 | case TRB_COMPLETION: |
2996 | handle_cmd_completion(xhci, event: &event->event_cmd); |
2997 | break; |
2998 | case TRB_PORT_STATUS: |
2999 | handle_port_status(xhci, ir, event); |
3000 | break; |
3001 | case TRB_TRANSFER: |
3002 | handle_tx_event(xhci, ir, event: &event->trans_event); |
3003 | break; |
3004 | case TRB_DEV_NOTE: |
3005 | handle_device_notification(xhci, event); |
3006 | break; |
3007 | default: |
3008 | if (trb_type >= TRB_VENDOR_DEFINED_LOW) |
3009 | handle_vendor_event(xhci, event, trb_type); |
3010 | else |
3011 | xhci_warn(xhci, "ERROR unknown event type %d\n", trb_type); |
3012 | } |
3013 | /* Any of the above functions may drop and re-acquire the lock, so check |
3014 | * to make sure a watchdog timer didn't mark the host as non-responsive. |
3015 | */ |
3016 | if (xhci->xhc_state & XHCI_STATE_DYING) { |
3017 | xhci_dbg(xhci, "xHCI host dying, returning from event handler.\n"); |
3018 | return -ENODEV; |
3019 | } |
3020 | |
3021 | return 0; |
3022 | } |
3023 | |
3024 | /* |
3025 | * Update Event Ring Dequeue Pointer: |
3026 | * - When all events have finished |
3027 | * - To avoid "Event Ring Full Error" condition |
3028 | */ |
3029 | static void xhci_update_erst_dequeue(struct xhci_hcd *xhci, |
3030 | struct xhci_interrupter *ir, |
3031 | bool clear_ehb) |
3032 | { |
3033 | u64 temp_64; |
3034 | dma_addr_t deq; |
3035 | |
3036 | temp_64 = xhci_read_64(xhci, regs: &ir->ir_set->erst_dequeue); |
3037 | deq = xhci_trb_virt_to_dma(seg: ir->event_ring->deq_seg, |
3038 | trb: ir->event_ring->dequeue); |
3039 | if (deq == 0) |
3040 | xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n"); |
3041 | /* |
3042 | * Per 4.9.4, Software writes to the ERDP register shall always advance |
3043 | * the Event Ring Dequeue Pointer value. |
3044 | */ |
3045 | if ((temp_64 & ERST_PTR_MASK) == (deq & ERST_PTR_MASK) && !clear_ehb) |
3046 | return; |
3047 | |
3048 | /* Update HC event ring dequeue pointer */ |
3049 | temp_64 = ir->event_ring->deq_seg->num & ERST_DESI_MASK; |
3050 | temp_64 |= deq & ERST_PTR_MASK; |
3051 | |
3052 | /* Clear the event handler busy flag (RW1C) */ |
3053 | if (clear_ehb) |
3054 | temp_64 |= ERST_EHB; |
3055 | xhci_write_64(xhci, val: temp_64, regs: &ir->ir_set->erst_dequeue); |
3056 | } |
3057 | |
3058 | /* Clear the interrupt pending bit for a specific interrupter. */ |
3059 | static void xhci_clear_interrupt_pending(struct xhci_hcd *xhci, |
3060 | struct xhci_interrupter *ir) |
3061 | { |
3062 | if (!ir->ip_autoclear) { |
3063 | u32 irq_pending; |
3064 | |
3065 | irq_pending = readl(addr: &ir->ir_set->irq_pending); |
3066 | irq_pending |= IMAN_IP; |
3067 | writel(val: irq_pending, addr: &ir->ir_set->irq_pending); |
3068 | } |
3069 | } |
3070 | |
3071 | /* |
3072 | * Handle all OS-owned events on an interrupter event ring. It may drop |
3073 | * and reaquire xhci->lock between event processing. |
3074 | */ |
3075 | static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir) |
3076 | { |
3077 | int event_loop = 0; |
3078 | int err; |
3079 | u64 temp; |
3080 | |
3081 | xhci_clear_interrupt_pending(xhci, ir); |
3082 | |
3083 | /* Event ring hasn't been allocated yet. */ |
3084 | if (!ir->event_ring || !ir->event_ring->dequeue) { |
3085 | xhci_err(xhci, "ERROR interrupter event ring not ready\n"); |
3086 | return -ENOMEM; |
3087 | } |
3088 | |
3089 | if (xhci->xhc_state & XHCI_STATE_DYING || |
3090 | xhci->xhc_state & XHCI_STATE_HALTED) { |
3091 | xhci_dbg(xhci, "xHCI dying, ignoring interrupt. Shouldn't IRQs be disabled?\n"); |
3092 | |
3093 | /* Clear the event handler busy flag (RW1C) */ |
3094 | temp = xhci_read_64(xhci, regs: &ir->ir_set->erst_dequeue); |
3095 | xhci_write_64(xhci, val: temp | ERST_EHB, regs: &ir->ir_set->erst_dequeue); |
3096 | return -ENODEV; |
3097 | } |
3098 | |
3099 | /* Process all OS owned event TRBs on this event ring */ |
3100 | while (unhandled_event_trb(ring: ir->event_ring)) { |
3101 | err = xhci_handle_event_trb(xhci, ir, event: ir->event_ring->dequeue); |
3102 | |
3103 | /* |
3104 | * If half a segment of events have been handled in one go then |
3105 | * update ERDP, and force isoc trbs to interrupt more often |
3106 | */ |
3107 | if (event_loop++ > TRBS_PER_SEGMENT / 2) { |
3108 | xhci_update_erst_dequeue(xhci, ir, clear_ehb: false); |
3109 | |
3110 | if (ir->isoc_bei_interval > AVOID_BEI_INTERVAL_MIN) |
3111 | ir->isoc_bei_interval = ir->isoc_bei_interval / 2; |
3112 | |
3113 | event_loop = 0; |
3114 | } |
3115 | |
3116 | /* Update SW event ring dequeue pointer */ |
3117 | inc_deq(xhci, ring: ir->event_ring); |
3118 | |
3119 | if (err) |
3120 | break; |
3121 | } |
3122 | |
3123 | xhci_update_erst_dequeue(xhci, ir, clear_ehb: true); |
3124 | |
3125 | return 0; |
3126 | } |
3127 | |
3128 | /* |
3129 | * xHCI spec says we can get an interrupt, and if the HC has an error condition, |
3130 | * we might get bad data out of the event ring. Section 4.10.2.7 has a list of |
3131 | * indicators of an event TRB error, but we check the status *first* to be safe. |
3132 | */ |
3133 | irqreturn_t xhci_irq(struct usb_hcd *hcd) |
3134 | { |
3135 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
3136 | irqreturn_t ret = IRQ_HANDLED; |
3137 | u32 status; |
3138 | |
3139 | spin_lock(lock: &xhci->lock); |
3140 | /* Check if the xHC generated the interrupt, or the irq is shared */ |
3141 | status = readl(addr: &xhci->op_regs->status); |
3142 | if (status == ~(u32)0) { |
3143 | xhci_hc_died(xhci); |
3144 | goto out; |
3145 | } |
3146 | |
3147 | if (!(status & STS_EINT)) { |
3148 | ret = IRQ_NONE; |
3149 | goto out; |
3150 | } |
3151 | |
3152 | if (status & STS_HCE) { |
3153 | xhci_warn(xhci, "WARNING: Host Controller Error\n"); |
3154 | goto out; |
3155 | } |
3156 | |
3157 | if (status & STS_FATAL) { |
3158 | xhci_warn(xhci, "WARNING: Host System Error\n"); |
3159 | xhci_halt(xhci); |
3160 | goto out; |
3161 | } |
3162 | |
3163 | /* |
3164 | * Clear the op reg interrupt status first, |
3165 | * so we can receive interrupts from other MSI-X interrupters. |
3166 | * Write 1 to clear the interrupt status. |
3167 | */ |
3168 | status |= STS_EINT; |
3169 | writel(val: status, addr: &xhci->op_regs->status); |
3170 | |
3171 | /* This is the handler of the primary interrupter */ |
3172 | xhci_handle_events(xhci, ir: xhci->interrupters[0]); |
3173 | out: |
3174 | spin_unlock(lock: &xhci->lock); |
3175 | |
3176 | return ret; |
3177 | } |
3178 | |
3179 | irqreturn_t xhci_msi_irq(int irq, void *hcd) |
3180 | { |
3181 | return xhci_irq(hcd); |
3182 | } |
3183 | EXPORT_SYMBOL_GPL(xhci_msi_irq); |
3184 | |
3185 | /**** Endpoint Ring Operations ****/ |
3186 | |
3187 | /* |
3188 | * Generic function for queueing a TRB on a ring. |
3189 | * The caller must have checked to make sure there's room on the ring. |
3190 | * |
3191 | * @more_trbs_coming: Will you enqueue more TRBs before calling |
3192 | * prepare_transfer()? |
3193 | */ |
3194 | static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, |
3195 | bool more_trbs_coming, |
3196 | u32 field1, u32 field2, u32 field3, u32 field4) |
3197 | { |
3198 | struct xhci_generic_trb *trb; |
3199 | |
3200 | trb = &ring->enqueue->generic; |
3201 | trb->field[0] = cpu_to_le32(field1); |
3202 | trb->field[1] = cpu_to_le32(field2); |
3203 | trb->field[2] = cpu_to_le32(field3); |
3204 | /* make sure TRB is fully written before giving it to the controller */ |
3205 | wmb(); |
3206 | trb->field[3] = cpu_to_le32(field4); |
3207 | |
3208 | trace_xhci_queue_trb(ring, trb); |
3209 | |
3210 | inc_enq(xhci, ring, more_trbs_coming); |
3211 | } |
3212 | |
3213 | /* |
3214 | * Does various checks on the endpoint ring, and makes it ready to queue num_trbs. |
3215 | * expand ring if it start to be full. |
3216 | */ |
3217 | static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, |
3218 | u32 ep_state, unsigned int num_trbs, gfp_t mem_flags) |
3219 | { |
3220 | unsigned int link_trb_count = 0; |
3221 | unsigned int new_segs = 0; |
3222 | |
3223 | /* Make sure the endpoint has been added to xHC schedule */ |
3224 | switch (ep_state) { |
3225 | case EP_STATE_DISABLED: |
3226 | /* |
3227 | * USB core changed config/interfaces without notifying us, |
3228 | * or hardware is reporting the wrong state. |
3229 | */ |
3230 | xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); |
3231 | return -ENOENT; |
3232 | case EP_STATE_ERROR: |
3233 | xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n"); |
3234 | /* FIXME event handling code for error needs to clear it */ |
3235 | /* XXX not sure if this should be -ENOENT or not */ |
3236 | return -EINVAL; |
3237 | case EP_STATE_HALTED: |
3238 | xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n"); |
3239 | break; |
3240 | case EP_STATE_STOPPED: |
3241 | case EP_STATE_RUNNING: |
3242 | break; |
3243 | default: |
3244 | xhci_err(xhci, "ERROR unknown endpoint state for ep\n"); |
3245 | /* |
3246 | * FIXME issue Configure Endpoint command to try to get the HC |
3247 | * back into a known state. |
3248 | */ |
3249 | return -EINVAL; |
3250 | } |
3251 | |
3252 | if (ep_ring != xhci->cmd_ring) { |
3253 | new_segs = xhci_ring_expansion_needed(xhci, ring: ep_ring, num_trbs); |
3254 | } else if (xhci_num_trbs_free(xhci, ring: ep_ring) <= num_trbs) { |
3255 | xhci_err(xhci, "Do not support expand command ring\n"); |
3256 | return -ENOMEM; |
3257 | } |
3258 | |
3259 | if (new_segs) { |
3260 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_ring_expansion, |
3261 | fmt: "ERROR no room on ep ring, try ring expansion"); |
3262 | if (xhci_ring_expansion(xhci, ring: ep_ring, num_trbs: new_segs, flags: mem_flags)) { |
3263 | xhci_err(xhci, "Ring expansion failed\n"); |
3264 | return -ENOMEM; |
3265 | } |
3266 | } |
3267 | |
3268 | while (trb_is_link(trb: ep_ring->enqueue)) { |
3269 | /* If we're not dealing with 0.95 hardware or isoc rings |
3270 | * on AMD 0.96 host, clear the chain bit. |
3271 | */ |
3272 | if (!xhci_link_trb_quirk(xhci) && |
3273 | !(ep_ring->type == TYPE_ISOC && |
3274 | (xhci->quirks & XHCI_AMD_0x96_HOST))) |
3275 | ep_ring->enqueue->link.control &= |
3276 | cpu_to_le32(~TRB_CHAIN); |
3277 | else |
3278 | ep_ring->enqueue->link.control |= |
3279 | cpu_to_le32(TRB_CHAIN); |
3280 | |
3281 | wmb(); |
3282 | ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE); |
3283 | |
3284 | /* Toggle the cycle bit after the last ring segment. */ |
3285 | if (link_trb_toggles_cycle(trb: ep_ring->enqueue)) |
3286 | ep_ring->cycle_state ^= 1; |
3287 | |
3288 | ep_ring->enq_seg = ep_ring->enq_seg->next; |
3289 | ep_ring->enqueue = ep_ring->enq_seg->trbs; |
3290 | |
3291 | /* prevent infinite loop if all first trbs are link trbs */ |
3292 | if (link_trb_count++ > ep_ring->num_segs) { |
3293 | xhci_warn(xhci, "Ring is an endless link TRB loop\n"); |
3294 | return -EINVAL; |
3295 | } |
3296 | } |
3297 | |
3298 | if (last_trb_on_seg(seg: ep_ring->enq_seg, trb: ep_ring->enqueue)) { |
3299 | xhci_warn(xhci, "Missing link TRB at end of ring segment\n"); |
3300 | return -EINVAL; |
3301 | } |
3302 | |
3303 | return 0; |
3304 | } |
3305 | |
3306 | static int prepare_transfer(struct xhci_hcd *xhci, |
3307 | struct xhci_virt_device *xdev, |
3308 | unsigned int ep_index, |
3309 | unsigned int stream_id, |
3310 | unsigned int num_trbs, |
3311 | struct urb *urb, |
3312 | unsigned int td_index, |
3313 | gfp_t mem_flags) |
3314 | { |
3315 | int ret; |
3316 | struct urb_priv *urb_priv; |
3317 | struct xhci_td *td; |
3318 | struct xhci_ring *ep_ring; |
3319 | struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, ctx: xdev->out_ctx, ep_index); |
3320 | |
3321 | ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id: xdev->slot_id, ep_index, |
3322 | stream_id); |
3323 | if (!ep_ring) { |
3324 | xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n", |
3325 | stream_id); |
3326 | return -EINVAL; |
3327 | } |
3328 | |
3329 | ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx), |
3330 | num_trbs, mem_flags); |
3331 | if (ret) |
3332 | return ret; |
3333 | |
3334 | urb_priv = urb->hcpriv; |
3335 | td = &urb_priv->td[td_index]; |
3336 | |
3337 | INIT_LIST_HEAD(list: &td->td_list); |
3338 | INIT_LIST_HEAD(list: &td->cancelled_td_list); |
3339 | |
3340 | if (td_index == 0) { |
3341 | ret = usb_hcd_link_urb_to_ep(hcd: bus_to_hcd(bus: urb->dev->bus), urb); |
3342 | if (unlikely(ret)) |
3343 | return ret; |
3344 | } |
3345 | |
3346 | td->urb = urb; |
3347 | /* Add this TD to the tail of the endpoint ring's TD list */ |
3348 | list_add_tail(new: &td->td_list, head: &ep_ring->td_list); |
3349 | td->start_seg = ep_ring->enq_seg; |
3350 | td->first_trb = ep_ring->enqueue; |
3351 | |
3352 | return 0; |
3353 | } |
3354 | |
3355 | unsigned int count_trbs(u64 addr, u64 len) |
3356 | { |
3357 | unsigned int num_trbs; |
3358 | |
3359 | num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)), |
3360 | TRB_MAX_BUFF_SIZE); |
3361 | if (num_trbs == 0) |
3362 | num_trbs++; |
3363 | |
3364 | return num_trbs; |
3365 | } |
3366 | |
3367 | static inline unsigned int count_trbs_needed(struct urb *urb) |
3368 | { |
3369 | return count_trbs(addr: urb->transfer_dma, len: urb->transfer_buffer_length); |
3370 | } |
3371 | |
3372 | static unsigned int count_sg_trbs_needed(struct urb *urb) |
3373 | { |
3374 | struct scatterlist *sg; |
3375 | unsigned int i, len, full_len, num_trbs = 0; |
3376 | |
3377 | full_len = urb->transfer_buffer_length; |
3378 | |
3379 | for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) { |
3380 | len = sg_dma_len(sg); |
3381 | num_trbs += count_trbs(sg_dma_address(sg), len); |
3382 | len = min_t(unsigned int, len, full_len); |
3383 | full_len -= len; |
3384 | if (full_len == 0) |
3385 | break; |
3386 | } |
3387 | |
3388 | return num_trbs; |
3389 | } |
3390 | |
3391 | static unsigned int count_isoc_trbs_needed(struct urb *urb, int i) |
3392 | { |
3393 | u64 addr, len; |
3394 | |
3395 | addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset); |
3396 | len = urb->iso_frame_desc[i].length; |
3397 | |
3398 | return count_trbs(addr, len); |
3399 | } |
3400 | |
3401 | static void check_trb_math(struct urb *urb, int running_total) |
3402 | { |
3403 | if (unlikely(running_total != urb->transfer_buffer_length)) |
3404 | dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " |
3405 | "queued %#x (%d), asked for %#x (%d)\n", |
3406 | __func__, |
3407 | urb->ep->desc.bEndpointAddress, |
3408 | running_total, running_total, |
3409 | urb->transfer_buffer_length, |
3410 | urb->transfer_buffer_length); |
3411 | } |
3412 | |
3413 | static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, |
3414 | unsigned int ep_index, unsigned int stream_id, int start_cycle, |
3415 | struct xhci_generic_trb *start_trb) |
3416 | { |
3417 | /* |
3418 | * Pass all the TRBs to the hardware at once and make sure this write |
3419 | * isn't reordered. |
3420 | */ |
3421 | wmb(); |
3422 | if (start_cycle) |
3423 | start_trb->field[3] |= cpu_to_le32(start_cycle); |
3424 | else |
3425 | start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE); |
3426 | xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); |
3427 | } |
3428 | |
3429 | static void check_interval(struct xhci_hcd *xhci, struct urb *urb, |
3430 | struct xhci_ep_ctx *ep_ctx) |
3431 | { |
3432 | int xhci_interval; |
3433 | int ep_interval; |
3434 | |
3435 | xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info)); |
3436 | ep_interval = urb->interval; |
3437 | |
3438 | /* Convert to microframes */ |
3439 | if (urb->dev->speed == USB_SPEED_LOW || |
3440 | urb->dev->speed == USB_SPEED_FULL) |
3441 | ep_interval *= 8; |
3442 | |
3443 | /* FIXME change this to a warning and a suggestion to use the new API |
3444 | * to set the polling interval (once the API is added). |
3445 | */ |
3446 | if (xhci_interval != ep_interval) { |
3447 | dev_dbg_ratelimited(&urb->dev->dev, |
3448 | "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n", |
3449 | ep_interval, ep_interval == 1 ? "": "s", |
3450 | xhci_interval, xhci_interval == 1 ? "": "s"); |
3451 | urb->interval = xhci_interval; |
3452 | /* Convert back to frames for LS/FS devices */ |
3453 | if (urb->dev->speed == USB_SPEED_LOW || |
3454 | urb->dev->speed == USB_SPEED_FULL) |
3455 | urb->interval /= 8; |
3456 | } |
3457 | } |
3458 | |
3459 | /* |
3460 | * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt |
3461 | * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD |
3462 | * (comprised of sg list entries) can take several service intervals to |
3463 | * transmit. |
3464 | */ |
3465 | int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, |
3466 | struct urb *urb, int slot_id, unsigned int ep_index) |
3467 | { |
3468 | struct xhci_ep_ctx *ep_ctx; |
3469 | |
3470 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: xhci->devs[slot_id]->out_ctx, ep_index); |
3471 | check_interval(xhci, urb, ep_ctx); |
3472 | |
3473 | return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index); |
3474 | } |
3475 | |
3476 | /* |
3477 | * For xHCI 1.0 host controllers, TD size is the number of max packet sized |
3478 | * packets remaining in the TD (*not* including this TRB). |
3479 | * |
3480 | * Total TD packet count = total_packet_count = |
3481 | * DIV_ROUND_UP(TD size in bytes / wMaxPacketSize) |
3482 | * |
3483 | * Packets transferred up to and including this TRB = packets_transferred = |
3484 | * rounddown(total bytes transferred including this TRB / wMaxPacketSize) |
3485 | * |
3486 | * TD size = total_packet_count - packets_transferred |
3487 | * |
3488 | * For xHCI 0.96 and older, TD size field should be the remaining bytes |
3489 | * including this TRB, right shifted by 10 |
3490 | * |
3491 | * For all hosts it must fit in bits 21:17, so it can't be bigger than 31. |
3492 | * This is taken care of in the TRB_TD_SIZE() macro |
3493 | * |
3494 | * The last TRB in a TD must have the TD size set to zero. |
3495 | */ |
3496 | static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred, |
3497 | int trb_buff_len, unsigned int td_total_len, |
3498 | struct urb *urb, bool more_trbs_coming) |
3499 | { |
3500 | u32 maxp, total_packet_count; |
3501 | |
3502 | /* MTK xHCI 0.96 contains some features from 1.0 */ |
3503 | if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST)) |
3504 | return ((td_total_len - transferred) >> 10); |
3505 | |
3506 | /* One TRB with a zero-length data packet. */ |
3507 | if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) || |
3508 | trb_buff_len == td_total_len) |
3509 | return 0; |
3510 | |
3511 | /* for MTK xHCI 0.96, TD size include this TRB, but not in 1.x */ |
3512 | if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100)) |
3513 | trb_buff_len = 0; |
3514 | |
3515 | maxp = usb_endpoint_maxp(epd: &urb->ep->desc); |
3516 | total_packet_count = DIV_ROUND_UP(td_total_len, maxp); |
3517 | |
3518 | /* Queueing functions don't count the current TRB into transferred */ |
3519 | return (total_packet_count - ((transferred + trb_buff_len) / maxp)); |
3520 | } |
3521 | |
3522 | |
3523 | static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, |
3524 | u32 *trb_buff_len, struct xhci_segment *seg) |
3525 | { |
3526 | struct device *dev = xhci_to_hcd(xhci)->self.sysdev; |
3527 | unsigned int unalign; |
3528 | unsigned int max_pkt; |
3529 | u32 new_buff_len; |
3530 | size_t len; |
3531 | |
3532 | max_pkt = usb_endpoint_maxp(epd: &urb->ep->desc); |
3533 | unalign = (enqd_len + *trb_buff_len) % max_pkt; |
3534 | |
3535 | /* we got lucky, last normal TRB data on segment is packet aligned */ |
3536 | if (unalign == 0) |
3537 | return 0; |
3538 | |
3539 | xhci_dbg(xhci, "Unaligned %d bytes, buff len %d\n", |
3540 | unalign, *trb_buff_len); |
3541 | |
3542 | /* is the last nornal TRB alignable by splitting it */ |
3543 | if (*trb_buff_len > unalign) { |
3544 | *trb_buff_len -= unalign; |
3545 | xhci_dbg(xhci, "split align, new buff len %d\n", *trb_buff_len); |
3546 | return 0; |
3547 | } |
3548 | |
3549 | /* |
3550 | * We want enqd_len + trb_buff_len to sum up to a number aligned to |
3551 | * number which is divisible by the endpoint's wMaxPacketSize. IOW: |
3552 | * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0. |
3553 | */ |
3554 | new_buff_len = max_pkt - (enqd_len % max_pkt); |
3555 | |
3556 | if (new_buff_len > (urb->transfer_buffer_length - enqd_len)) |
3557 | new_buff_len = (urb->transfer_buffer_length - enqd_len); |
3558 | |
3559 | /* create a max max_pkt sized bounce buffer pointed to by last trb */ |
3560 | if (usb_urb_dir_out(urb)) { |
3561 | if (urb->num_sgs) { |
3562 | len = sg_pcopy_to_buffer(sgl: urb->sg, nents: urb->num_sgs, |
3563 | buf: seg->bounce_buf, buflen: new_buff_len, skip: enqd_len); |
3564 | if (len != new_buff_len) |
3565 | xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n", |
3566 | len, new_buff_len); |
3567 | } else { |
3568 | memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len); |
3569 | } |
3570 | |
3571 | seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, |
3572 | max_pkt, DMA_TO_DEVICE); |
3573 | } else { |
3574 | seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, |
3575 | max_pkt, DMA_FROM_DEVICE); |
3576 | } |
3577 | |
3578 | if (dma_mapping_error(dev, dma_addr: seg->bounce_dma)) { |
3579 | /* try without aligning. Some host controllers survive */ |
3580 | xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n"); |
3581 | return 0; |
3582 | } |
3583 | *trb_buff_len = new_buff_len; |
3584 | seg->bounce_len = new_buff_len; |
3585 | seg->bounce_offs = enqd_len; |
3586 | |
3587 | xhci_dbg(xhci, "Bounce align, new buff len %d\n", *trb_buff_len); |
3588 | |
3589 | return 1; |
3590 | } |
3591 | |
3592 | /* This is very similar to what ehci-q.c qtd_fill() does */ |
3593 | int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, |
3594 | struct urb *urb, int slot_id, unsigned int ep_index) |
3595 | { |
3596 | struct xhci_ring *ring; |
3597 | struct urb_priv *urb_priv; |
3598 | struct xhci_td *td; |
3599 | struct xhci_generic_trb *start_trb; |
3600 | struct scatterlist *sg = NULL; |
3601 | bool more_trbs_coming = true; |
3602 | bool need_zero_pkt = false; |
3603 | bool first_trb = true; |
3604 | unsigned int num_trbs; |
3605 | unsigned int start_cycle, num_sgs = 0; |
3606 | unsigned int enqd_len, block_len, trb_buff_len, full_len; |
3607 | int sent_len, ret; |
3608 | u32 field, length_field, remainder; |
3609 | u64 addr, send_addr; |
3610 | |
3611 | ring = xhci_urb_to_transfer_ring(xhci, urb); |
3612 | if (!ring) |
3613 | return -EINVAL; |
3614 | |
3615 | full_len = urb->transfer_buffer_length; |
3616 | /* If we have scatter/gather list, we use it. */ |
3617 | if (urb->num_sgs && !(urb->transfer_flags & URB_DMA_MAP_SINGLE)) { |
3618 | num_sgs = urb->num_mapped_sgs; |
3619 | sg = urb->sg; |
3620 | addr = (u64) sg_dma_address(sg); |
3621 | block_len = sg_dma_len(sg); |
3622 | num_trbs = count_sg_trbs_needed(urb); |
3623 | } else { |
3624 | num_trbs = count_trbs_needed(urb); |
3625 | addr = (u64) urb->transfer_dma; |
3626 | block_len = full_len; |
3627 | } |
3628 | ret = prepare_transfer(xhci, xdev: xhci->devs[slot_id], |
3629 | ep_index, stream_id: urb->stream_id, |
3630 | num_trbs, urb, td_index: 0, mem_flags); |
3631 | if (unlikely(ret < 0)) |
3632 | return ret; |
3633 | |
3634 | urb_priv = urb->hcpriv; |
3635 | |
3636 | /* Deal with URB_ZERO_PACKET - need one more td/trb */ |
3637 | if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->num_tds > 1) |
3638 | need_zero_pkt = true; |
3639 | |
3640 | td = &urb_priv->td[0]; |
3641 | |
3642 | /* |
3643 | * Don't give the first TRB to the hardware (by toggling the cycle bit) |
3644 | * until we've finished creating all the other TRBs. The ring's cycle |
3645 | * state may change as we enqueue the other TRBs, so save it too. |
3646 | */ |
3647 | start_trb = &ring->enqueue->generic; |
3648 | start_cycle = ring->cycle_state; |
3649 | send_addr = addr; |
3650 | |
3651 | /* Queue the TRBs, even if they are zero-length */ |
3652 | for (enqd_len = 0; first_trb || enqd_len < full_len; |
3653 | enqd_len += trb_buff_len) { |
3654 | field = TRB_TYPE(TRB_NORMAL); |
3655 | |
3656 | /* TRB buffer should not cross 64KB boundaries */ |
3657 | trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr); |
3658 | trb_buff_len = min_t(unsigned int, trb_buff_len, block_len); |
3659 | |
3660 | if (enqd_len + trb_buff_len > full_len) |
3661 | trb_buff_len = full_len - enqd_len; |
3662 | |
3663 | /* Don't change the cycle bit of the first TRB until later */ |
3664 | if (first_trb) { |
3665 | first_trb = false; |
3666 | if (start_cycle == 0) |
3667 | field |= TRB_CYCLE; |
3668 | } else |
3669 | field |= ring->cycle_state; |
3670 | |
3671 | /* Chain all the TRBs together; clear the chain bit in the last |
3672 | * TRB to indicate it's the last TRB in the chain. |
3673 | */ |
3674 | if (enqd_len + trb_buff_len < full_len) { |
3675 | field |= TRB_CHAIN; |
3676 | if (trb_is_link(trb: ring->enqueue + 1)) { |
3677 | if (xhci_align_td(xhci, urb, enqd_len, |
3678 | trb_buff_len: &trb_buff_len, |
3679 | seg: ring->enq_seg)) { |
3680 | send_addr = ring->enq_seg->bounce_dma; |
3681 | /* assuming TD won't span 2 segs */ |
3682 | td->bounce_seg = ring->enq_seg; |
3683 | } |
3684 | } |
3685 | } |
3686 | if (enqd_len + trb_buff_len >= full_len) { |
3687 | field &= ~TRB_CHAIN; |
3688 | field |= TRB_IOC; |
3689 | more_trbs_coming = false; |
3690 | td->last_trb = ring->enqueue; |
3691 | td->last_trb_seg = ring->enq_seg; |
3692 | if (xhci_urb_suitable_for_idt(urb)) { |
3693 | memcpy(&send_addr, urb->transfer_buffer, |
3694 | trb_buff_len); |
3695 | le64_to_cpus(&send_addr); |
3696 | field |= TRB_IDT; |
3697 | } |
3698 | } |
3699 | |
3700 | /* Only set interrupt on short packet for IN endpoints */ |
3701 | if (usb_urb_dir_in(urb)) |
3702 | field |= TRB_ISP; |
3703 | |
3704 | /* Set the TRB length, TD size, and interrupter fields. */ |
3705 | remainder = xhci_td_remainder(xhci, transferred: enqd_len, trb_buff_len, |
3706 | td_total_len: full_len, urb, more_trbs_coming); |
3707 | |
3708 | length_field = TRB_LEN(trb_buff_len) | |
3709 | TRB_TD_SIZE(remainder) | |
3710 | TRB_INTR_TARGET(0); |
3711 | |
3712 | queue_trb(xhci, ring, more_trbs_coming: more_trbs_coming | need_zero_pkt, |
3713 | lower_32_bits(send_addr), |
3714 | upper_32_bits(send_addr), |
3715 | field3: length_field, |
3716 | field4: field); |
3717 | td->num_trbs++; |
3718 | addr += trb_buff_len; |
3719 | sent_len = trb_buff_len; |
3720 | |
3721 | while (sg && sent_len >= block_len) { |
3722 | /* New sg entry */ |
3723 | --num_sgs; |
3724 | sent_len -= block_len; |
3725 | sg = sg_next(sg); |
3726 | if (num_sgs != 0 && sg) { |
3727 | block_len = sg_dma_len(sg); |
3728 | addr = (u64) sg_dma_address(sg); |
3729 | addr += sent_len; |
3730 | } |
3731 | } |
3732 | block_len -= sent_len; |
3733 | send_addr = addr; |
3734 | } |
3735 | |
3736 | if (need_zero_pkt) { |
3737 | ret = prepare_transfer(xhci, xdev: xhci->devs[slot_id], |
3738 | ep_index, stream_id: urb->stream_id, |
3739 | num_trbs: 1, urb, td_index: 1, mem_flags); |
3740 | urb_priv->td[1].last_trb = ring->enqueue; |
3741 | urb_priv->td[1].last_trb_seg = ring->enq_seg; |
3742 | field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC; |
3743 | queue_trb(xhci, ring, more_trbs_coming: 0, field1: 0, field2: 0, TRB_INTR_TARGET(0), field4: field); |
3744 | urb_priv->td[1].num_trbs++; |
3745 | } |
3746 | |
3747 | check_trb_math(urb, running_total: enqd_len); |
3748 | giveback_first_trb(xhci, slot_id, ep_index, stream_id: urb->stream_id, |
3749 | start_cycle, start_trb); |
3750 | return 0; |
3751 | } |
3752 | |
3753 | /* Caller must have locked xhci->lock */ |
3754 | int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, |
3755 | struct urb *urb, int slot_id, unsigned int ep_index) |
3756 | { |
3757 | struct xhci_ring *ep_ring; |
3758 | int num_trbs; |
3759 | int ret; |
3760 | struct usb_ctrlrequest *setup; |
3761 | struct xhci_generic_trb *start_trb; |
3762 | int start_cycle; |
3763 | u32 field; |
3764 | struct urb_priv *urb_priv; |
3765 | struct xhci_td *td; |
3766 | |
3767 | ep_ring = xhci_urb_to_transfer_ring(xhci, urb); |
3768 | if (!ep_ring) |
3769 | return -EINVAL; |
3770 | |
3771 | /* |
3772 | * Need to copy setup packet into setup TRB, so we can't use the setup |
3773 | * DMA address. |
3774 | */ |
3775 | if (!urb->setup_packet) |
3776 | return -EINVAL; |
3777 | |
3778 | /* 1 TRB for setup, 1 for status */ |
3779 | num_trbs = 2; |
3780 | /* |
3781 | * Don't need to check if we need additional event data and normal TRBs, |
3782 | * since data in control transfers will never get bigger than 16MB |
3783 | * XXX: can we get a buffer that crosses 64KB boundaries? |
3784 | */ |
3785 | if (urb->transfer_buffer_length > 0) |
3786 | num_trbs++; |
3787 | ret = prepare_transfer(xhci, xdev: xhci->devs[slot_id], |
3788 | ep_index, stream_id: urb->stream_id, |
3789 | num_trbs, urb, td_index: 0, mem_flags); |
3790 | if (ret < 0) |
3791 | return ret; |
3792 | |
3793 | urb_priv = urb->hcpriv; |
3794 | td = &urb_priv->td[0]; |
3795 | td->num_trbs = num_trbs; |
3796 | |
3797 | /* |
3798 | * Don't give the first TRB to the hardware (by toggling the cycle bit) |
3799 | * until we've finished creating all the other TRBs. The ring's cycle |
3800 | * state may change as we enqueue the other TRBs, so save it too. |
3801 | */ |
3802 | start_trb = &ep_ring->enqueue->generic; |
3803 | start_cycle = ep_ring->cycle_state; |
3804 | |
3805 | /* Queue setup TRB - see section 6.4.1.2.1 */ |
3806 | /* FIXME better way to translate setup_packet into two u32 fields? */ |
3807 | setup = (struct usb_ctrlrequest *) urb->setup_packet; |
3808 | field = 0; |
3809 | field |= TRB_IDT | TRB_TYPE(TRB_SETUP); |
3810 | if (start_cycle == 0) |
3811 | field |= 0x1; |
3812 | |
3813 | /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */ |
3814 | if ((xhci->hci_version >= 0x100) || (xhci->quirks & XHCI_MTK_HOST)) { |
3815 | if (urb->transfer_buffer_length > 0) { |
3816 | if (setup->bRequestType & USB_DIR_IN) |
3817 | field |= TRB_TX_TYPE(TRB_DATA_IN); |
3818 | else |
3819 | field |= TRB_TX_TYPE(TRB_DATA_OUT); |
3820 | } |
3821 | } |
3822 | |
3823 | queue_trb(xhci, ring: ep_ring, more_trbs_coming: true, |
3824 | field1: setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16, |
3825 | le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16, |
3826 | TRB_LEN(8) | TRB_INTR_TARGET(0), |
3827 | /* Immediate data in pointer */ |
3828 | field4: field); |
3829 | |
3830 | /* If there's data, queue data TRBs */ |
3831 | /* Only set interrupt on short packet for IN endpoints */ |
3832 | if (usb_urb_dir_in(urb)) |
3833 | field = TRB_ISP | TRB_TYPE(TRB_DATA); |
3834 | else |
3835 | field = TRB_TYPE(TRB_DATA); |
3836 | |
3837 | if (urb->transfer_buffer_length > 0) { |
3838 | u32 length_field, remainder; |
3839 | u64 addr; |
3840 | |
3841 | if (xhci_urb_suitable_for_idt(urb)) { |
3842 | memcpy(&addr, urb->transfer_buffer, |
3843 | urb->transfer_buffer_length); |
3844 | le64_to_cpus(&addr); |
3845 | field |= TRB_IDT; |
3846 | } else { |
3847 | addr = (u64) urb->transfer_dma; |
3848 | } |
3849 | |
3850 | remainder = xhci_td_remainder(xhci, transferred: 0, |
3851 | trb_buff_len: urb->transfer_buffer_length, |
3852 | td_total_len: urb->transfer_buffer_length, |
3853 | urb, more_trbs_coming: 1); |
3854 | length_field = TRB_LEN(urb->transfer_buffer_length) | |
3855 | TRB_TD_SIZE(remainder) | |
3856 | TRB_INTR_TARGET(0); |
3857 | if (setup->bRequestType & USB_DIR_IN) |
3858 | field |= TRB_DIR_IN; |
3859 | queue_trb(xhci, ring: ep_ring, more_trbs_coming: true, |
3860 | lower_32_bits(addr), |
3861 | upper_32_bits(addr), |
3862 | field3: length_field, |
3863 | field4: field | ep_ring->cycle_state); |
3864 | } |
3865 | |
3866 | /* Save the DMA address of the last TRB in the TD */ |
3867 | td->last_trb = ep_ring->enqueue; |
3868 | td->last_trb_seg = ep_ring->enq_seg; |
3869 | |
3870 | /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */ |
3871 | /* If the device sent data, the status stage is an OUT transfer */ |
3872 | if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN) |
3873 | field = 0; |
3874 | else |
3875 | field = TRB_DIR_IN; |
3876 | queue_trb(xhci, ring: ep_ring, more_trbs_coming: false, |
3877 | field1: 0, |
3878 | field2: 0, |
3879 | TRB_INTR_TARGET(0), |
3880 | /* Event on completion */ |
3881 | field4: field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state); |
3882 | |
3883 | giveback_first_trb(xhci, slot_id, ep_index, stream_id: 0, |
3884 | start_cycle, start_trb); |
3885 | return 0; |
3886 | } |
3887 | |
3888 | /* |
3889 | * The transfer burst count field of the isochronous TRB defines the number of |
3890 | * bursts that are required to move all packets in this TD. Only SuperSpeed |
3891 | * devices can burst up to bMaxBurst number of packets per service interval. |
3892 | * This field is zero based, meaning a value of zero in the field means one |
3893 | * burst. Basically, for everything but SuperSpeed devices, this field will be |
3894 | * zero. Only xHCI 1.0 host controllers support this field. |
3895 | */ |
3896 | static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci, |
3897 | struct urb *urb, unsigned int total_packet_count) |
3898 | { |
3899 | unsigned int max_burst; |
3900 | |
3901 | if (xhci->hci_version < 0x100 || urb->dev->speed < USB_SPEED_SUPER) |
3902 | return 0; |
3903 | |
3904 | max_burst = urb->ep->ss_ep_comp.bMaxBurst; |
3905 | return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1; |
3906 | } |
3907 | |
3908 | /* |
3909 | * Returns the number of packets in the last "burst" of packets. This field is |
3910 | * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so |
3911 | * the last burst packet count is equal to the total number of packets in the |
3912 | * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst |
3913 | * must contain (bMaxBurst + 1) number of packets, but the last burst can |
3914 | * contain 1 to (bMaxBurst + 1) packets. |
3915 | */ |
3916 | static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci, |
3917 | struct urb *urb, unsigned int total_packet_count) |
3918 | { |
3919 | unsigned int max_burst; |
3920 | unsigned int residue; |
3921 | |
3922 | if (xhci->hci_version < 0x100) |
3923 | return 0; |
3924 | |
3925 | if (urb->dev->speed >= USB_SPEED_SUPER) { |
3926 | /* bMaxBurst is zero based: 0 means 1 packet per burst */ |
3927 | max_burst = urb->ep->ss_ep_comp.bMaxBurst; |
3928 | residue = total_packet_count % (max_burst + 1); |
3929 | /* If residue is zero, the last burst contains (max_burst + 1) |
3930 | * number of packets, but the TLBPC field is zero-based. |
3931 | */ |
3932 | if (residue == 0) |
3933 | return max_burst; |
3934 | return residue - 1; |
3935 | } |
3936 | if (total_packet_count == 0) |
3937 | return 0; |
3938 | return total_packet_count - 1; |
3939 | } |
3940 | |
3941 | /* |
3942 | * Calculates Frame ID field of the isochronous TRB identifies the |
3943 | * target frame that the Interval associated with this Isochronous |
3944 | * Transfer Descriptor will start on. Refer to 4.11.2.5 in 1.1 spec. |
3945 | * |
3946 | * Returns actual frame id on success, negative value on error. |
3947 | */ |
3948 | static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci, |
3949 | struct urb *urb, int index) |
3950 | { |
3951 | int start_frame, ist, ret = 0; |
3952 | int start_frame_id, end_frame_id, current_frame_id; |
3953 | |
3954 | if (urb->dev->speed == USB_SPEED_LOW || |
3955 | urb->dev->speed == USB_SPEED_FULL) |
3956 | start_frame = urb->start_frame + index * urb->interval; |
3957 | else |
3958 | start_frame = (urb->start_frame + index * urb->interval) >> 3; |
3959 | |
3960 | /* Isochronous Scheduling Threshold (IST, bits 0~3 in HCSPARAMS2): |
3961 | * |
3962 | * If bit [3] of IST is cleared to '0', software can add a TRB no |
3963 | * later than IST[2:0] Microframes before that TRB is scheduled to |
3964 | * be executed. |
3965 | * If bit [3] of IST is set to '1', software can add a TRB no later |
3966 | * than IST[2:0] Frames before that TRB is scheduled to be executed. |
3967 | */ |
3968 | ist = HCS_IST(xhci->hcs_params2) & 0x7; |
3969 | if (HCS_IST(xhci->hcs_params2) & (1 << 3)) |
3970 | ist <<= 3; |
3971 | |
3972 | /* Software shall not schedule an Isoch TD with a Frame ID value that |
3973 | * is less than the Start Frame ID or greater than the End Frame ID, |
3974 | * where: |
3975 | * |
3976 | * End Frame ID = (Current MFINDEX register value + 895 ms.) MOD 2048 |
3977 | * Start Frame ID = (Current MFINDEX register value + IST + 1) MOD 2048 |
3978 | * |
3979 | * Both the End Frame ID and Start Frame ID values are calculated |
3980 | * in microframes. When software determines the valid Frame ID value; |
3981 | * The End Frame ID value should be rounded down to the nearest Frame |
3982 | * boundary, and the Start Frame ID value should be rounded up to the |
3983 | * nearest Frame boundary. |
3984 | */ |
3985 | current_frame_id = readl(addr: &xhci->run_regs->microframe_index); |
3986 | start_frame_id = roundup(current_frame_id + ist + 1, 8); |
3987 | end_frame_id = rounddown(current_frame_id + 895 * 8, 8); |
3988 | |
3989 | start_frame &= 0x7ff; |
3990 | start_frame_id = (start_frame_id >> 3) & 0x7ff; |
3991 | end_frame_id = (end_frame_id >> 3) & 0x7ff; |
3992 | |
3993 | xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n", |
3994 | __func__, index, readl(&xhci->run_regs->microframe_index), |
3995 | start_frame_id, end_frame_id, start_frame); |
3996 | |
3997 | if (start_frame_id < end_frame_id) { |
3998 | if (start_frame > end_frame_id || |
3999 | start_frame < start_frame_id) |
4000 | ret = -EINVAL; |
4001 | } else if (start_frame_id > end_frame_id) { |
4002 | if ((start_frame > end_frame_id && |
4003 | start_frame < start_frame_id)) |
4004 | ret = -EINVAL; |
4005 | } else { |
4006 | ret = -EINVAL; |
4007 | } |
4008 | |
4009 | if (index == 0) { |
4010 | if (ret == -EINVAL || start_frame == start_frame_id) { |
4011 | start_frame = start_frame_id + 1; |
4012 | if (urb->dev->speed == USB_SPEED_LOW || |
4013 | urb->dev->speed == USB_SPEED_FULL) |
4014 | urb->start_frame = start_frame; |
4015 | else |
4016 | urb->start_frame = start_frame << 3; |
4017 | ret = 0; |
4018 | } |
4019 | } |
4020 | |
4021 | if (ret) { |
4022 | xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n", |
4023 | start_frame, current_frame_id, index, |
4024 | start_frame_id, end_frame_id); |
4025 | xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n"); |
4026 | return ret; |
4027 | } |
4028 | |
4029 | return start_frame; |
4030 | } |
4031 | |
4032 | /* Check if we should generate event interrupt for a TD in an isoc URB */ |
4033 | static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i, |
4034 | struct xhci_interrupter *ir) |
4035 | { |
4036 | if (xhci->hci_version < 0x100) |
4037 | return false; |
4038 | /* always generate an event interrupt for the last TD */ |
4039 | if (i == num_tds - 1) |
4040 | return false; |
4041 | /* |
4042 | * If AVOID_BEI is set the host handles full event rings poorly, |
4043 | * generate an event at least every 8th TD to clear the event ring |
4044 | */ |
4045 | if (i && ir->isoc_bei_interval && xhci->quirks & XHCI_AVOID_BEI) |
4046 | return !!(i % ir->isoc_bei_interval); |
4047 | |
4048 | return true; |
4049 | } |
4050 | |
4051 | /* This is for isoc transfer */ |
4052 | static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, |
4053 | struct urb *urb, int slot_id, unsigned int ep_index) |
4054 | { |
4055 | struct xhci_interrupter *ir; |
4056 | struct xhci_ring *ep_ring; |
4057 | struct urb_priv *urb_priv; |
4058 | struct xhci_td *td; |
4059 | int num_tds, trbs_per_td; |
4060 | struct xhci_generic_trb *start_trb; |
4061 | bool first_trb; |
4062 | int start_cycle; |
4063 | u32 field, length_field; |
4064 | int running_total, trb_buff_len, td_len, td_remain_len, ret; |
4065 | u64 start_addr, addr; |
4066 | int i, j; |
4067 | bool more_trbs_coming; |
4068 | struct xhci_virt_ep *xep; |
4069 | int frame_id; |
4070 | |
4071 | xep = &xhci->devs[slot_id]->eps[ep_index]; |
4072 | ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; |
4073 | ir = xhci->interrupters[0]; |
4074 | |
4075 | num_tds = urb->number_of_packets; |
4076 | if (num_tds < 1) { |
4077 | xhci_dbg(xhci, "Isoc URB with zero packets?\n"); |
4078 | return -EINVAL; |
4079 | } |
4080 | start_addr = (u64) urb->transfer_dma; |
4081 | start_trb = &ep_ring->enqueue->generic; |
4082 | start_cycle = ep_ring->cycle_state; |
4083 | |
4084 | urb_priv = urb->hcpriv; |
4085 | /* Queue the TRBs for each TD, even if they are zero-length */ |
4086 | for (i = 0; i < num_tds; i++) { |
4087 | unsigned int total_pkt_count, max_pkt; |
4088 | unsigned int burst_count, last_burst_pkt_count; |
4089 | u32 sia_frame_id; |
4090 | |
4091 | first_trb = true; |
4092 | running_total = 0; |
4093 | addr = start_addr + urb->iso_frame_desc[i].offset; |
4094 | td_len = urb->iso_frame_desc[i].length; |
4095 | td_remain_len = td_len; |
4096 | max_pkt = usb_endpoint_maxp(epd: &urb->ep->desc); |
4097 | total_pkt_count = DIV_ROUND_UP(td_len, max_pkt); |
4098 | |
4099 | /* A zero-length transfer still involves at least one packet. */ |
4100 | if (total_pkt_count == 0) |
4101 | total_pkt_count++; |
4102 | burst_count = xhci_get_burst_count(xhci, urb, total_packet_count: total_pkt_count); |
4103 | last_burst_pkt_count = xhci_get_last_burst_packet_count(xhci, |
4104 | urb, total_packet_count: total_pkt_count); |
4105 | |
4106 | trbs_per_td = count_isoc_trbs_needed(urb, i); |
4107 | |
4108 | ret = prepare_transfer(xhci, xdev: xhci->devs[slot_id], ep_index, |
4109 | stream_id: urb->stream_id, num_trbs: trbs_per_td, urb, td_index: i, mem_flags); |
4110 | if (ret < 0) { |
4111 | if (i == 0) |
4112 | return ret; |
4113 | goto cleanup; |
4114 | } |
4115 | td = &urb_priv->td[i]; |
4116 | td->num_trbs = trbs_per_td; |
4117 | /* use SIA as default, if frame id is used overwrite it */ |
4118 | sia_frame_id = TRB_SIA; |
4119 | if (!(urb->transfer_flags & URB_ISO_ASAP) && |
4120 | HCC_CFC(xhci->hcc_params)) { |
4121 | frame_id = xhci_get_isoc_frame_id(xhci, urb, index: i); |
4122 | if (frame_id >= 0) |
4123 | sia_frame_id = TRB_FRAME_ID(frame_id); |
4124 | } |
4125 | /* |
4126 | * Set isoc specific data for the first TRB in a TD. |
4127 | * Prevent HW from getting the TRBs by keeping the cycle state |
4128 | * inverted in the first TDs isoc TRB. |
4129 | */ |
4130 | field = TRB_TYPE(TRB_ISOC) | |
4131 | TRB_TLBPC(last_burst_pkt_count) | |
4132 | sia_frame_id | |
4133 | (i ? ep_ring->cycle_state : !start_cycle); |
4134 | |
4135 | /* xhci 1.1 with ETE uses TD_Size field for TBC, old is Rsvdz */ |
4136 | if (!xep->use_extended_tbc) |
4137 | field |= TRB_TBC(burst_count); |
4138 | |
4139 | /* fill the rest of the TRB fields, and remaining normal TRBs */ |
4140 | for (j = 0; j < trbs_per_td; j++) { |
4141 | u32 remainder = 0; |
4142 | |
4143 | /* only first TRB is isoc, overwrite otherwise */ |
4144 | if (!first_trb) |
4145 | field = TRB_TYPE(TRB_NORMAL) | |
4146 | ep_ring->cycle_state; |
4147 | |
4148 | /* Only set interrupt on short packet for IN EPs */ |
4149 | if (usb_urb_dir_in(urb)) |
4150 | field |= TRB_ISP; |
4151 | |
4152 | /* Set the chain bit for all except the last TRB */ |
4153 | if (j < trbs_per_td - 1) { |
4154 | more_trbs_coming = true; |
4155 | field |= TRB_CHAIN; |
4156 | } else { |
4157 | more_trbs_coming = false; |
4158 | td->last_trb = ep_ring->enqueue; |
4159 | td->last_trb_seg = ep_ring->enq_seg; |
4160 | field |= TRB_IOC; |
4161 | if (trb_block_event_intr(xhci, num_tds, i, ir)) |
4162 | field |= TRB_BEI; |
4163 | } |
4164 | /* Calculate TRB length */ |
4165 | trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr); |
4166 | if (trb_buff_len > td_remain_len) |
4167 | trb_buff_len = td_remain_len; |
4168 | |
4169 | /* Set the TRB length, TD size, & interrupter fields. */ |
4170 | remainder = xhci_td_remainder(xhci, transferred: running_total, |
4171 | trb_buff_len, td_total_len: td_len, |
4172 | urb, more_trbs_coming); |
4173 | |
4174 | length_field = TRB_LEN(trb_buff_len) | |
4175 | TRB_INTR_TARGET(0); |
4176 | |
4177 | /* xhci 1.1 with ETE uses TD Size field for TBC */ |
4178 | if (first_trb && xep->use_extended_tbc) |
4179 | length_field |= TRB_TD_SIZE_TBC(burst_count); |
4180 | else |
4181 | length_field |= TRB_TD_SIZE(remainder); |
4182 | first_trb = false; |
4183 | |
4184 | queue_trb(xhci, ring: ep_ring, more_trbs_coming, |
4185 | lower_32_bits(addr), |
4186 | upper_32_bits(addr), |
4187 | field3: length_field, |
4188 | field4: field); |
4189 | running_total += trb_buff_len; |
4190 | |
4191 | addr += trb_buff_len; |
4192 | td_remain_len -= trb_buff_len; |
4193 | } |
4194 | |
4195 | /* Check TD length */ |
4196 | if (running_total != td_len) { |
4197 | xhci_err(xhci, "ISOC TD length unmatch\n"); |
4198 | ret = -EINVAL; |
4199 | goto cleanup; |
4200 | } |
4201 | } |
4202 | |
4203 | /* store the next frame id */ |
4204 | if (HCC_CFC(xhci->hcc_params)) |
4205 | xep->next_frame_id = urb->start_frame + num_tds * urb->interval; |
4206 | |
4207 | if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { |
4208 | if (xhci->quirks & XHCI_AMD_PLL_FIX) |
4209 | usb_amd_quirk_pll_disable(); |
4210 | } |
4211 | xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++; |
4212 | |
4213 | giveback_first_trb(xhci, slot_id, ep_index, stream_id: urb->stream_id, |
4214 | start_cycle, start_trb); |
4215 | return 0; |
4216 | cleanup: |
4217 | /* Clean up a partially enqueued isoc transfer. */ |
4218 | |
4219 | for (i--; i >= 0; i--) |
4220 | list_del_init(entry: &urb_priv->td[i].td_list); |
4221 | |
4222 | /* Use the first TD as a temporary variable to turn the TDs we've queued |
4223 | * into No-ops with a software-owned cycle bit. That way the hardware |
4224 | * won't accidentally start executing bogus TDs when we partially |
4225 | * overwrite them. td->first_trb and td->start_seg are already set. |
4226 | */ |
4227 | urb_priv->td[0].last_trb = ep_ring->enqueue; |
4228 | /* Every TRB except the first & last will have its cycle bit flipped. */ |
4229 | td_to_noop(xhci, ep_ring, td: &urb_priv->td[0], flip_cycle: true); |
4230 | |
4231 | /* Reset the ring enqueue back to the first TRB and its cycle bit. */ |
4232 | ep_ring->enqueue = urb_priv->td[0].first_trb; |
4233 | ep_ring->enq_seg = urb_priv->td[0].start_seg; |
4234 | ep_ring->cycle_state = start_cycle; |
4235 | usb_hcd_unlink_urb_from_ep(hcd: bus_to_hcd(bus: urb->dev->bus), urb); |
4236 | return ret; |
4237 | } |
4238 | |
4239 | /* |
4240 | * Check transfer ring to guarantee there is enough room for the urb. |
4241 | * Update ISO URB start_frame and interval. |
4242 | * Update interval as xhci_queue_intr_tx does. Use xhci frame_index to |
4243 | * update urb->start_frame if URB_ISO_ASAP is set in transfer_flags or |
4244 | * Contiguous Frame ID is not supported by HC. |
4245 | */ |
4246 | int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, |
4247 | struct urb *urb, int slot_id, unsigned int ep_index) |
4248 | { |
4249 | struct xhci_virt_device *xdev; |
4250 | struct xhci_ring *ep_ring; |
4251 | struct xhci_ep_ctx *ep_ctx; |
4252 | int start_frame; |
4253 | int num_tds, num_trbs, i; |
4254 | int ret; |
4255 | struct xhci_virt_ep *xep; |
4256 | int ist; |
4257 | |
4258 | xdev = xhci->devs[slot_id]; |
4259 | xep = &xhci->devs[slot_id]->eps[ep_index]; |
4260 | ep_ring = xdev->eps[ep_index].ring; |
4261 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: xdev->out_ctx, ep_index); |
4262 | |
4263 | num_trbs = 0; |
4264 | num_tds = urb->number_of_packets; |
4265 | for (i = 0; i < num_tds; i++) |
4266 | num_trbs += count_isoc_trbs_needed(urb, i); |
4267 | |
4268 | /* Check the ring to guarantee there is enough room for the whole urb. |
4269 | * Do not insert any td of the urb to the ring if the check failed. |
4270 | */ |
4271 | ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx), |
4272 | num_trbs, mem_flags); |
4273 | if (ret) |
4274 | return ret; |
4275 | |
4276 | /* |
4277 | * Check interval value. This should be done before we start to |
4278 | * calculate the start frame value. |
4279 | */ |
4280 | check_interval(xhci, urb, ep_ctx); |
4281 | |
4282 | /* Calculate the start frame and put it in urb->start_frame. */ |
4283 | if (HCC_CFC(xhci->hcc_params) && !list_empty(head: &ep_ring->td_list)) { |
4284 | if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_RUNNING) { |
4285 | urb->start_frame = xep->next_frame_id; |
4286 | goto skip_start_over; |
4287 | } |
4288 | } |
4289 | |
4290 | start_frame = readl(addr: &xhci->run_regs->microframe_index); |
4291 | start_frame &= 0x3fff; |
4292 | /* |
4293 | * Round up to the next frame and consider the time before trb really |
4294 | * gets scheduled by hardare. |
4295 | */ |
4296 | ist = HCS_IST(xhci->hcs_params2) & 0x7; |
4297 | if (HCS_IST(xhci->hcs_params2) & (1 << 3)) |
4298 | ist <<= 3; |
4299 | start_frame += ist + XHCI_CFC_DELAY; |
4300 | start_frame = roundup(start_frame, 8); |
4301 | |
4302 | /* |
4303 | * Round up to the next ESIT (Endpoint Service Interval Time) if ESIT |
4304 | * is greate than 8 microframes. |
4305 | */ |
4306 | if (urb->dev->speed == USB_SPEED_LOW || |
4307 | urb->dev->speed == USB_SPEED_FULL) { |
4308 | start_frame = roundup(start_frame, urb->interval << 3); |
4309 | urb->start_frame = start_frame >> 3; |
4310 | } else { |
4311 | start_frame = roundup(start_frame, urb->interval); |
4312 | urb->start_frame = start_frame; |
4313 | } |
4314 | |
4315 | skip_start_over: |
4316 | |
4317 | return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index); |
4318 | } |
4319 | |
4320 | /**** Command Ring Operations ****/ |
4321 | |
4322 | /* Generic function for queueing a command TRB on the command ring. |
4323 | * Check to make sure there's room on the command ring for one command TRB. |
4324 | * Also check that there's room reserved for commands that must not fail. |
4325 | * If this is a command that must not fail, meaning command_must_succeed = TRUE, |
4326 | * then only check for the number of reserved spots. |
4327 | * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB |
4328 | * because the command event handler may want to resubmit a failed command. |
4329 | */ |
4330 | static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd, |
4331 | u32 field1, u32 field2, |
4332 | u32 field3, u32 field4, bool command_must_succeed) |
4333 | { |
4334 | int reserved_trbs = xhci->cmd_ring_reserved_trbs; |
4335 | int ret; |
4336 | |
4337 | if ((xhci->xhc_state & XHCI_STATE_DYING) || |
4338 | (xhci->xhc_state & XHCI_STATE_HALTED)) { |
4339 | xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n"); |
4340 | return -ESHUTDOWN; |
4341 | } |
4342 | |
4343 | if (!command_must_succeed) |
4344 | reserved_trbs++; |
4345 | |
4346 | ret = prepare_ring(xhci, ep_ring: xhci->cmd_ring, EP_STATE_RUNNING, |
4347 | num_trbs: reserved_trbs, GFP_ATOMIC); |
4348 | if (ret < 0) { |
4349 | xhci_err(xhci, "ERR: No room for command on command ring\n"); |
4350 | if (command_must_succeed) |
4351 | xhci_err(xhci, "ERR: Reserved TRB counting for " |
4352 | "unfailable commands failed.\n"); |
4353 | return ret; |
4354 | } |
4355 | |
4356 | cmd->command_trb = xhci->cmd_ring->enqueue; |
4357 | |
4358 | /* if there are no other commands queued we start the timeout timer */ |
4359 | if (list_empty(head: &xhci->cmd_list)) { |
4360 | xhci->current_cmd = cmd; |
4361 | xhci_mod_cmd_timer(xhci); |
4362 | } |
4363 | |
4364 | list_add_tail(new: &cmd->cmd_list, head: &xhci->cmd_list); |
4365 | |
4366 | queue_trb(xhci, ring: xhci->cmd_ring, more_trbs_coming: false, field1, field2, field3, |
4367 | field4: field4 | xhci->cmd_ring->cycle_state); |
4368 | return 0; |
4369 | } |
4370 | |
4371 | /* Queue a slot enable or disable request on the command ring */ |
4372 | int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd, |
4373 | u32 trb_type, u32 slot_id) |
4374 | { |
4375 | return queue_command(xhci, cmd, field1: 0, field2: 0, field3: 0, |
4376 | TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), command_must_succeed: false); |
4377 | } |
4378 | |
4379 | /* Queue an address device command TRB */ |
4380 | int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd, |
4381 | dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup) |
4382 | { |
4383 | return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), |
4384 | upper_32_bits(in_ctx_ptr), field3: 0, |
4385 | TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id) |
4386 | | (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), command_must_succeed: false); |
4387 | } |
4388 | |
4389 | int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd, |
4390 | u32 field1, u32 field2, u32 field3, u32 field4) |
4391 | { |
4392 | return queue_command(xhci, cmd, field1, field2, field3, field4, command_must_succeed: false); |
4393 | } |
4394 | |
4395 | /* Queue a reset device command TRB */ |
4396 | int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd, |
4397 | u32 slot_id) |
4398 | { |
4399 | return queue_command(xhci, cmd, field1: 0, field2: 0, field3: 0, |
4400 | TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id), |
4401 | command_must_succeed: false); |
4402 | } |
4403 | |
4404 | /* Queue a configure endpoint command TRB */ |
4405 | int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, |
4406 | struct xhci_command *cmd, dma_addr_t in_ctx_ptr, |
4407 | u32 slot_id, bool command_must_succeed) |
4408 | { |
4409 | return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), |
4410 | upper_32_bits(in_ctx_ptr), field3: 0, |
4411 | TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id), |
4412 | command_must_succeed); |
4413 | } |
4414 | |
4415 | /* Queue an evaluate context command TRB */ |
4416 | int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd, |
4417 | dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed) |
4418 | { |
4419 | return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), |
4420 | upper_32_bits(in_ctx_ptr), field3: 0, |
4421 | TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id), |
4422 | command_must_succeed); |
4423 | } |
4424 | |
4425 | /* |
4426 | * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop |
4427 | * activity on an endpoint that is about to be suspended. |
4428 | */ |
4429 | int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd, |
4430 | int slot_id, unsigned int ep_index, int suspend) |
4431 | { |
4432 | u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); |
4433 | u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); |
4434 | u32 type = TRB_TYPE(TRB_STOP_RING); |
4435 | u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend); |
4436 | |
4437 | return queue_command(xhci, cmd, field1: 0, field2: 0, field3: 0, |
4438 | field4: trb_slot_id | trb_ep_index | type | trb_suspend, command_must_succeed: false); |
4439 | } |
4440 | |
4441 | int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd, |
4442 | int slot_id, unsigned int ep_index, |
4443 | enum xhci_ep_reset_type reset_type) |
4444 | { |
4445 | u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); |
4446 | u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); |
4447 | u32 type = TRB_TYPE(TRB_RESET_EP); |
4448 | |
4449 | if (reset_type == EP_SOFT_RESET) |
4450 | type |= TRB_TSP; |
4451 | |
4452 | return queue_command(xhci, cmd, field1: 0, field2: 0, field3: 0, |
4453 | field4: trb_slot_id | trb_ep_index | type, command_must_succeed: false); |
4454 | } |
4455 |
Definitions
- xhci_trb_virt_to_dma
- trb_is_noop
- trb_is_link
- last_trb_on_seg
- last_trb_on_ring
- link_trb_toggles_cycle
- last_td_in_urb
- unhandled_event_trb
- inc_td_cnt
- trb_to_noop
- next_trb
- inc_deq
- inc_enq
- xhci_num_trbs_free
- xhci_ring_expansion_needed
- xhci_ring_cmd_db
- xhci_mod_cmd_timer
- xhci_next_queued_cmd
- xhci_handle_stopped_cmd_ring
- xhci_abort_cmd_ring
- xhci_ring_ep_doorbell
- ring_doorbell_for_active_rings
- xhci_ring_doorbell_for_active_rings
- xhci_get_virt_ep
- xhci_virt_ep_to_ring
- xhci_triad_to_transfer_ring
- xhci_get_hw_deq
- xhci_move_dequeue_past_td
- td_to_noop
- xhci_giveback_urb_in_irq
- xhci_unmap_td_bounce_buffer
- xhci_td_cleanup
- xhci_giveback_invalidated_tds
- xhci_reset_halted_ep
- xhci_handle_halted_endpoint
- xhci_invalidate_cancelled_tds
- find_halted_td
- xhci_handle_cmd_stop_ep
- xhci_kill_ring_urbs
- xhci_kill_endpoint_urbs
- xhci_hc_died
- update_ring_for_set_deq_completion
- xhci_handle_cmd_set_deq
- xhci_handle_cmd_reset_ep
- xhci_handle_cmd_enable_slot
- xhci_handle_cmd_disable_slot
- xhci_handle_cmd_config_ep
- xhci_handle_cmd_addr_dev
- xhci_handle_cmd_reset_dev
- xhci_handle_cmd_nec_get_fw
- xhci_complete_del_and_free_cmd
- xhci_cleanup_command_queue
- xhci_handle_command_timeout
- handle_cmd_completion
- handle_vendor_event
- handle_device_notification
- xhci_cavium_reset_phy_quirk
- handle_port_status
- trb_in_td
- xhci_clear_hub_tt_buffer
- xhci_requires_manual_halt_cleanup
- xhci_is_vendor_info_code
- finish_td
- sum_trb_lengths
- process_ctrl_td
- process_isoc_td
- skip_isoc_td
- process_bulk_intr_td
- handle_tx_event
- xhci_handle_event_trb
- xhci_update_erst_dequeue
- xhci_clear_interrupt_pending
- xhci_handle_events
- xhci_irq
- xhci_msi_irq
- queue_trb
- prepare_ring
- prepare_transfer
- count_trbs
- count_trbs_needed
- count_sg_trbs_needed
- count_isoc_trbs_needed
- check_trb_math
- giveback_first_trb
- check_interval
- xhci_queue_intr_tx
- xhci_td_remainder
- xhci_align_td
- xhci_queue_bulk_tx
- xhci_queue_ctrl_tx
- xhci_get_burst_count
- xhci_get_last_burst_packet_count
- xhci_get_isoc_frame_id
- trb_block_event_intr
- xhci_queue_isoc_tx
- xhci_queue_isoc_tx_prepare
- queue_command
- xhci_queue_slot_control
- xhci_queue_address_device
- xhci_queue_vendor_command
- xhci_queue_reset_device
- xhci_queue_configure_endpoint
- xhci_queue_evaluate_context
- xhci_queue_stop_endpoint
Improve your Profiling and Debugging skills
Find out more