1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * xhci-dbgcap.c - xHCI debug capability support |
4 | * |
5 | * Copyright (C) 2017 Intel Corporation |
6 | * |
7 | * Author: Lu Baolu <baolu.lu@linux.intel.com> |
8 | */ |
9 | #include <linux/bug.h> |
10 | #include <linux/device.h> |
11 | #include <linux/dma-mapping.h> |
12 | #include <linux/errno.h> |
13 | #include <linux/kstrtox.h> |
14 | #include <linux/list.h> |
15 | #include <linux/nls.h> |
16 | #include <linux/pm_runtime.h> |
17 | #include <linux/slab.h> |
18 | #include <linux/spinlock.h> |
19 | #include <linux/string.h> |
20 | #include <linux/sysfs.h> |
21 | #include <linux/types.h> |
22 | #include <linux/workqueue.h> |
23 | |
24 | #include <linux/io-64-nonatomic-lo-hi.h> |
25 | |
26 | #include <asm/byteorder.h> |
27 | |
28 | #include "xhci.h" |
29 | #include "xhci-trace.h" |
30 | #include "xhci-dbgcap.h" |
31 | |
32 | static void dbc_free_ctx(struct device *dev, struct xhci_container_ctx *ctx) |
33 | { |
34 | if (!ctx) |
35 | return; |
36 | dma_free_coherent(dev, size: ctx->size, cpu_addr: ctx->bytes, dma_handle: ctx->dma); |
37 | kfree(objp: ctx); |
38 | } |
39 | |
40 | /* we use only one segment for DbC rings */ |
41 | static void dbc_ring_free(struct device *dev, struct xhci_ring *ring) |
42 | { |
43 | if (!ring) |
44 | return; |
45 | |
46 | if (ring->first_seg) { |
47 | dma_free_coherent(dev, TRB_SEGMENT_SIZE, |
48 | cpu_addr: ring->first_seg->trbs, |
49 | dma_handle: ring->first_seg->dma); |
50 | kfree(objp: ring->first_seg); |
51 | } |
52 | kfree(objp: ring); |
53 | } |
54 | |
55 | static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings) |
56 | { |
57 | struct usb_string_descriptor *s_desc; |
58 | u32 string_length; |
59 | |
60 | /* Serial string: */ |
61 | s_desc = (struct usb_string_descriptor *)strings->serial; |
62 | utf8s_to_utf16s(DBC_STRING_SERIAL, strlen(DBC_STRING_SERIAL), |
63 | endian: UTF16_LITTLE_ENDIAN, pwcs: (wchar_t *)s_desc->wData, |
64 | DBC_MAX_STRING_LENGTH); |
65 | |
66 | s_desc->bLength = (strlen(DBC_STRING_SERIAL) + 1) * 2; |
67 | s_desc->bDescriptorType = USB_DT_STRING; |
68 | string_length = s_desc->bLength; |
69 | string_length <<= 8; |
70 | |
71 | /* Product string: */ |
72 | s_desc = (struct usb_string_descriptor *)strings->product; |
73 | utf8s_to_utf16s(DBC_STRING_PRODUCT, strlen(DBC_STRING_PRODUCT), |
74 | endian: UTF16_LITTLE_ENDIAN, pwcs: (wchar_t *)s_desc->wData, |
75 | DBC_MAX_STRING_LENGTH); |
76 | |
77 | s_desc->bLength = (strlen(DBC_STRING_PRODUCT) + 1) * 2; |
78 | s_desc->bDescriptorType = USB_DT_STRING; |
79 | string_length += s_desc->bLength; |
80 | string_length <<= 8; |
81 | |
82 | /* Manufacture string: */ |
83 | s_desc = (struct usb_string_descriptor *)strings->manufacturer; |
84 | utf8s_to_utf16s(DBC_STRING_MANUFACTURER, |
85 | strlen(DBC_STRING_MANUFACTURER), |
86 | endian: UTF16_LITTLE_ENDIAN, pwcs: (wchar_t *)s_desc->wData, |
87 | DBC_MAX_STRING_LENGTH); |
88 | |
89 | s_desc->bLength = (strlen(DBC_STRING_MANUFACTURER) + 1) * 2; |
90 | s_desc->bDescriptorType = USB_DT_STRING; |
91 | string_length += s_desc->bLength; |
92 | string_length <<= 8; |
93 | |
94 | /* String0: */ |
95 | strings->string0[0] = 4; |
96 | strings->string0[1] = USB_DT_STRING; |
97 | strings->string0[2] = 0x09; |
98 | strings->string0[3] = 0x04; |
99 | string_length += 4; |
100 | |
101 | return string_length; |
102 | } |
103 | |
104 | static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length) |
105 | { |
106 | struct dbc_info_context *info; |
107 | struct xhci_ep_ctx *ep_ctx; |
108 | u32 dev_info; |
109 | dma_addr_t deq, dma; |
110 | unsigned int max_burst; |
111 | |
112 | if (!dbc) |
113 | return; |
114 | |
115 | /* Populate info Context: */ |
116 | info = (struct dbc_info_context *)dbc->ctx->bytes; |
117 | dma = dbc->string_dma; |
118 | info->string0 = cpu_to_le64(dma); |
119 | info->manufacturer = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH); |
120 | info->product = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 2); |
121 | info->serial = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3); |
122 | info->length = cpu_to_le32(string_length); |
123 | |
124 | /* Populate bulk out endpoint context: */ |
125 | ep_ctx = dbc_bulkout_ctx(dbc); |
126 | max_burst = DBC_CTRL_MAXBURST(readl(&dbc->regs->control)); |
127 | deq = dbc_bulkout_enq(dbc); |
128 | ep_ctx->ep_info = 0; |
129 | ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst); |
130 | ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state); |
131 | |
132 | /* Populate bulk in endpoint context: */ |
133 | ep_ctx = dbc_bulkin_ctx(dbc); |
134 | deq = dbc_bulkin_enq(dbc); |
135 | ep_ctx->ep_info = 0; |
136 | ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst); |
137 | ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state); |
138 | |
139 | /* Set DbC context and info registers: */ |
140 | lo_hi_writeq(val: dbc->ctx->dma, addr: &dbc->regs->dccp); |
141 | |
142 | dev_info = (dbc->idVendor << 16) | dbc->bInterfaceProtocol; |
143 | writel(val: dev_info, addr: &dbc->regs->devinfo1); |
144 | |
145 | dev_info = (dbc->bcdDevice << 16) | dbc->idProduct; |
146 | writel(val: dev_info, addr: &dbc->regs->devinfo2); |
147 | } |
148 | |
149 | static void xhci_dbc_giveback(struct dbc_request *req, int status) |
150 | __releases(&dbc->lock) |
151 | __acquires(&dbc->lock) |
152 | { |
153 | struct xhci_dbc *dbc = req->dbc; |
154 | struct device *dev = dbc->dev; |
155 | |
156 | list_del_init(entry: &req->list_pending); |
157 | req->trb_dma = 0; |
158 | req->trb = NULL; |
159 | |
160 | if (req->status == -EINPROGRESS) |
161 | req->status = status; |
162 | |
163 | trace_xhci_dbc_giveback_request(req); |
164 | |
165 | dma_unmap_single(dev, |
166 | req->dma, |
167 | req->length, |
168 | dbc_ep_dma_direction(req)); |
169 | |
170 | /* Give back the transfer request: */ |
171 | spin_unlock(lock: &dbc->lock); |
172 | req->complete(dbc, req); |
173 | spin_lock(lock: &dbc->lock); |
174 | } |
175 | |
176 | static void xhci_dbc_flush_single_request(struct dbc_request *req) |
177 | { |
178 | union xhci_trb *trb = req->trb; |
179 | |
180 | trb->generic.field[0] = 0; |
181 | trb->generic.field[1] = 0; |
182 | trb->generic.field[2] = 0; |
183 | trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); |
184 | trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP)); |
185 | |
186 | xhci_dbc_giveback(req, status: -ESHUTDOWN); |
187 | } |
188 | |
189 | static void xhci_dbc_flush_endpoint_requests(struct dbc_ep *dep) |
190 | { |
191 | struct dbc_request *req, *tmp; |
192 | |
193 | list_for_each_entry_safe(req, tmp, &dep->list_pending, list_pending) |
194 | xhci_dbc_flush_single_request(req); |
195 | } |
196 | |
197 | static void xhci_dbc_flush_requests(struct xhci_dbc *dbc) |
198 | { |
199 | xhci_dbc_flush_endpoint_requests(dep: &dbc->eps[BULK_OUT]); |
200 | xhci_dbc_flush_endpoint_requests(dep: &dbc->eps[BULK_IN]); |
201 | } |
202 | |
203 | struct dbc_request * |
204 | dbc_alloc_request(struct xhci_dbc *dbc, unsigned int direction, gfp_t flags) |
205 | { |
206 | struct dbc_request *req; |
207 | |
208 | if (direction != BULK_IN && |
209 | direction != BULK_OUT) |
210 | return NULL; |
211 | |
212 | if (!dbc) |
213 | return NULL; |
214 | |
215 | req = kzalloc(size: sizeof(*req), flags); |
216 | if (!req) |
217 | return NULL; |
218 | |
219 | req->dbc = dbc; |
220 | INIT_LIST_HEAD(list: &req->list_pending); |
221 | INIT_LIST_HEAD(list: &req->list_pool); |
222 | req->direction = direction; |
223 | |
224 | trace_xhci_dbc_alloc_request(req); |
225 | |
226 | return req; |
227 | } |
228 | |
229 | void |
230 | dbc_free_request(struct dbc_request *req) |
231 | { |
232 | trace_xhci_dbc_free_request(req); |
233 | |
234 | kfree(objp: req); |
235 | } |
236 | |
237 | static void |
238 | xhci_dbc_queue_trb(struct xhci_ring *ring, u32 field1, |
239 | u32 field2, u32 field3, u32 field4) |
240 | { |
241 | union xhci_trb *trb, *next; |
242 | |
243 | trb = ring->enqueue; |
244 | trb->generic.field[0] = cpu_to_le32(field1); |
245 | trb->generic.field[1] = cpu_to_le32(field2); |
246 | trb->generic.field[2] = cpu_to_le32(field3); |
247 | trb->generic.field[3] = cpu_to_le32(field4); |
248 | |
249 | trace_xhci_dbc_gadget_ep_queue(ring, trb: &trb->generic); |
250 | |
251 | ring->num_trbs_free--; |
252 | next = ++(ring->enqueue); |
253 | if (TRB_TYPE_LINK_LE32(next->link.control)) { |
254 | next->link.control ^= cpu_to_le32(TRB_CYCLE); |
255 | ring->enqueue = ring->enq_seg->trbs; |
256 | ring->cycle_state ^= 1; |
257 | } |
258 | } |
259 | |
260 | static int xhci_dbc_queue_bulk_tx(struct dbc_ep *dep, |
261 | struct dbc_request *req) |
262 | { |
263 | u64 addr; |
264 | union xhci_trb *trb; |
265 | unsigned int num_trbs; |
266 | struct xhci_dbc *dbc = req->dbc; |
267 | struct xhci_ring *ring = dep->ring; |
268 | u32 length, control, cycle; |
269 | |
270 | num_trbs = count_trbs(addr: req->dma, len: req->length); |
271 | WARN_ON(num_trbs != 1); |
272 | if (ring->num_trbs_free < num_trbs) |
273 | return -EBUSY; |
274 | |
275 | addr = req->dma; |
276 | trb = ring->enqueue; |
277 | cycle = ring->cycle_state; |
278 | length = TRB_LEN(req->length); |
279 | control = TRB_TYPE(TRB_NORMAL) | TRB_IOC; |
280 | |
281 | if (cycle) |
282 | control &= cpu_to_le32(~TRB_CYCLE); |
283 | else |
284 | control |= cpu_to_le32(TRB_CYCLE); |
285 | |
286 | req->trb = ring->enqueue; |
287 | req->trb_dma = xhci_trb_virt_to_dma(seg: ring->enq_seg, trb: ring->enqueue); |
288 | xhci_dbc_queue_trb(ring, |
289 | lower_32_bits(addr), |
290 | upper_32_bits(addr), |
291 | field3: length, field4: control); |
292 | |
293 | /* |
294 | * Add a barrier between writes of trb fields and flipping |
295 | * the cycle bit: |
296 | */ |
297 | wmb(); |
298 | |
299 | if (cycle) |
300 | trb->generic.field[3] |= cpu_to_le32(TRB_CYCLE); |
301 | else |
302 | trb->generic.field[3] &= cpu_to_le32(~TRB_CYCLE); |
303 | |
304 | writel(DBC_DOOR_BELL_TARGET(dep->direction), addr: &dbc->regs->doorbell); |
305 | |
306 | return 0; |
307 | } |
308 | |
309 | static int |
310 | dbc_ep_do_queue(struct dbc_request *req) |
311 | { |
312 | int ret; |
313 | struct xhci_dbc *dbc = req->dbc; |
314 | struct device *dev = dbc->dev; |
315 | struct dbc_ep *dep = &dbc->eps[req->direction]; |
316 | |
317 | if (!req->length || !req->buf) |
318 | return -EINVAL; |
319 | |
320 | req->actual = 0; |
321 | req->status = -EINPROGRESS; |
322 | |
323 | req->dma = dma_map_single(dev, |
324 | req->buf, |
325 | req->length, |
326 | dbc_ep_dma_direction(dep)); |
327 | if (dma_mapping_error(dev, dma_addr: req->dma)) { |
328 | dev_err(dbc->dev, "failed to map buffer\n" ); |
329 | return -EFAULT; |
330 | } |
331 | |
332 | ret = xhci_dbc_queue_bulk_tx(dep, req); |
333 | if (ret) { |
334 | dev_err(dbc->dev, "failed to queue trbs\n" ); |
335 | dma_unmap_single(dev, |
336 | req->dma, |
337 | req->length, |
338 | dbc_ep_dma_direction(dep)); |
339 | return -EFAULT; |
340 | } |
341 | |
342 | list_add_tail(new: &req->list_pending, head: &dep->list_pending); |
343 | |
344 | return 0; |
345 | } |
346 | |
347 | int dbc_ep_queue(struct dbc_request *req) |
348 | { |
349 | unsigned long flags; |
350 | struct xhci_dbc *dbc = req->dbc; |
351 | int ret = -ESHUTDOWN; |
352 | |
353 | if (!dbc) |
354 | return -ENODEV; |
355 | |
356 | if (req->direction != BULK_IN && |
357 | req->direction != BULK_OUT) |
358 | return -EINVAL; |
359 | |
360 | spin_lock_irqsave(&dbc->lock, flags); |
361 | if (dbc->state == DS_CONFIGURED) |
362 | ret = dbc_ep_do_queue(req); |
363 | spin_unlock_irqrestore(lock: &dbc->lock, flags); |
364 | |
365 | mod_delayed_work(wq: system_wq, dwork: &dbc->event_work, delay: 0); |
366 | |
367 | trace_xhci_dbc_queue_request(req); |
368 | |
369 | return ret; |
370 | } |
371 | |
372 | static inline void xhci_dbc_do_eps_init(struct xhci_dbc *dbc, bool direction) |
373 | { |
374 | struct dbc_ep *dep; |
375 | |
376 | dep = &dbc->eps[direction]; |
377 | dep->dbc = dbc; |
378 | dep->direction = direction; |
379 | dep->ring = direction ? dbc->ring_in : dbc->ring_out; |
380 | |
381 | INIT_LIST_HEAD(list: &dep->list_pending); |
382 | } |
383 | |
384 | static void xhci_dbc_eps_init(struct xhci_dbc *dbc) |
385 | { |
386 | xhci_dbc_do_eps_init(dbc, BULK_OUT); |
387 | xhci_dbc_do_eps_init(dbc, BULK_IN); |
388 | } |
389 | |
390 | static void xhci_dbc_eps_exit(struct xhci_dbc *dbc) |
391 | { |
392 | memset(dbc->eps, 0, sizeof_field(struct xhci_dbc, eps)); |
393 | } |
394 | |
395 | static int dbc_erst_alloc(struct device *dev, struct xhci_ring *evt_ring, |
396 | struct xhci_erst *erst, gfp_t flags) |
397 | { |
398 | erst->entries = dma_alloc_coherent(dev, size: sizeof(*erst->entries), |
399 | dma_handle: &erst->erst_dma_addr, gfp: flags); |
400 | if (!erst->entries) |
401 | return -ENOMEM; |
402 | |
403 | erst->num_entries = 1; |
404 | erst->entries[0].seg_addr = cpu_to_le64(evt_ring->first_seg->dma); |
405 | erst->entries[0].seg_size = cpu_to_le32(TRBS_PER_SEGMENT); |
406 | erst->entries[0].rsvd = 0; |
407 | return 0; |
408 | } |
409 | |
410 | static void dbc_erst_free(struct device *dev, struct xhci_erst *erst) |
411 | { |
412 | dma_free_coherent(dev, size: sizeof(*erst->entries), cpu_addr: erst->entries, |
413 | dma_handle: erst->erst_dma_addr); |
414 | erst->entries = NULL; |
415 | } |
416 | |
417 | static struct xhci_container_ctx * |
418 | dbc_alloc_ctx(struct device *dev, gfp_t flags) |
419 | { |
420 | struct xhci_container_ctx *ctx; |
421 | |
422 | ctx = kzalloc(size: sizeof(*ctx), flags); |
423 | if (!ctx) |
424 | return NULL; |
425 | |
426 | /* xhci 7.6.9, all three contexts; info, ep-out and ep-in. Each 64 bytes*/ |
427 | ctx->size = 3 * DBC_CONTEXT_SIZE; |
428 | ctx->bytes = dma_alloc_coherent(dev, size: ctx->size, dma_handle: &ctx->dma, gfp: flags); |
429 | if (!ctx->bytes) { |
430 | kfree(objp: ctx); |
431 | return NULL; |
432 | } |
433 | return ctx; |
434 | } |
435 | |
436 | static struct xhci_ring * |
437 | xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags) |
438 | { |
439 | struct xhci_ring *ring; |
440 | struct xhci_segment *seg; |
441 | dma_addr_t dma; |
442 | |
443 | ring = kzalloc(size: sizeof(*ring), flags); |
444 | if (!ring) |
445 | return NULL; |
446 | |
447 | ring->num_segs = 1; |
448 | ring->type = type; |
449 | |
450 | seg = kzalloc(size: sizeof(*seg), flags); |
451 | if (!seg) |
452 | goto seg_fail; |
453 | |
454 | ring->first_seg = seg; |
455 | ring->last_seg = seg; |
456 | seg->next = seg; |
457 | |
458 | seg->trbs = dma_alloc_coherent(dev, TRB_SEGMENT_SIZE, dma_handle: &dma, gfp: flags); |
459 | if (!seg->trbs) |
460 | goto dma_fail; |
461 | |
462 | seg->dma = dma; |
463 | |
464 | /* Only event ring does not use link TRB */ |
465 | if (type != TYPE_EVENT) { |
466 | union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1]; |
467 | |
468 | trb->link.segment_ptr = cpu_to_le64(dma); |
469 | trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK)); |
470 | } |
471 | INIT_LIST_HEAD(list: &ring->td_list); |
472 | xhci_initialize_ring_info(ring, cycle_state: 1); |
473 | return ring; |
474 | dma_fail: |
475 | kfree(objp: seg); |
476 | seg_fail: |
477 | kfree(objp: ring); |
478 | return NULL; |
479 | } |
480 | |
481 | static int xhci_dbc_mem_init(struct xhci_dbc *dbc, gfp_t flags) |
482 | { |
483 | int ret; |
484 | dma_addr_t deq; |
485 | u32 string_length; |
486 | struct device *dev = dbc->dev; |
487 | |
488 | /* Allocate various rings for events and transfers: */ |
489 | dbc->ring_evt = xhci_dbc_ring_alloc(dev, type: TYPE_EVENT, flags); |
490 | if (!dbc->ring_evt) |
491 | goto evt_fail; |
492 | |
493 | dbc->ring_in = xhci_dbc_ring_alloc(dev, type: TYPE_BULK, flags); |
494 | if (!dbc->ring_in) |
495 | goto in_fail; |
496 | |
497 | dbc->ring_out = xhci_dbc_ring_alloc(dev, type: TYPE_BULK, flags); |
498 | if (!dbc->ring_out) |
499 | goto out_fail; |
500 | |
501 | /* Allocate and populate ERST: */ |
502 | ret = dbc_erst_alloc(dev, evt_ring: dbc->ring_evt, erst: &dbc->erst, flags); |
503 | if (ret) |
504 | goto erst_fail; |
505 | |
506 | /* Allocate context data structure: */ |
507 | dbc->ctx = dbc_alloc_ctx(dev, flags); /* was sysdev, and is still */ |
508 | if (!dbc->ctx) |
509 | goto ctx_fail; |
510 | |
511 | /* Allocate the string table: */ |
512 | dbc->string_size = sizeof(*dbc->string); |
513 | dbc->string = dma_alloc_coherent(dev, size: dbc->string_size, |
514 | dma_handle: &dbc->string_dma, gfp: flags); |
515 | if (!dbc->string) |
516 | goto string_fail; |
517 | |
518 | /* Setup ERST register: */ |
519 | writel(val: dbc->erst.erst_size, addr: &dbc->regs->ersts); |
520 | |
521 | lo_hi_writeq(val: dbc->erst.erst_dma_addr, addr: &dbc->regs->erstba); |
522 | deq = xhci_trb_virt_to_dma(seg: dbc->ring_evt->deq_seg, |
523 | trb: dbc->ring_evt->dequeue); |
524 | lo_hi_writeq(val: deq, addr: &dbc->regs->erdp); |
525 | |
526 | /* Setup strings and contexts: */ |
527 | string_length = xhci_dbc_populate_strings(strings: dbc->string); |
528 | xhci_dbc_init_contexts(dbc, string_length); |
529 | |
530 | xhci_dbc_eps_init(dbc); |
531 | dbc->state = DS_INITIALIZED; |
532 | |
533 | return 0; |
534 | |
535 | string_fail: |
536 | dbc_free_ctx(dev, ctx: dbc->ctx); |
537 | dbc->ctx = NULL; |
538 | ctx_fail: |
539 | dbc_erst_free(dev, erst: &dbc->erst); |
540 | erst_fail: |
541 | dbc_ring_free(dev, ring: dbc->ring_out); |
542 | dbc->ring_out = NULL; |
543 | out_fail: |
544 | dbc_ring_free(dev, ring: dbc->ring_in); |
545 | dbc->ring_in = NULL; |
546 | in_fail: |
547 | dbc_ring_free(dev, ring: dbc->ring_evt); |
548 | dbc->ring_evt = NULL; |
549 | evt_fail: |
550 | return -ENOMEM; |
551 | } |
552 | |
553 | static void xhci_dbc_mem_cleanup(struct xhci_dbc *dbc) |
554 | { |
555 | if (!dbc) |
556 | return; |
557 | |
558 | xhci_dbc_eps_exit(dbc); |
559 | |
560 | dma_free_coherent(dev: dbc->dev, size: dbc->string_size, cpu_addr: dbc->string, dma_handle: dbc->string_dma); |
561 | dbc->string = NULL; |
562 | |
563 | dbc_free_ctx(dev: dbc->dev, ctx: dbc->ctx); |
564 | dbc->ctx = NULL; |
565 | |
566 | dbc_erst_free(dev: dbc->dev, erst: &dbc->erst); |
567 | dbc_ring_free(dev: dbc->dev, ring: dbc->ring_out); |
568 | dbc_ring_free(dev: dbc->dev, ring: dbc->ring_in); |
569 | dbc_ring_free(dev: dbc->dev, ring: dbc->ring_evt); |
570 | dbc->ring_in = NULL; |
571 | dbc->ring_out = NULL; |
572 | dbc->ring_evt = NULL; |
573 | } |
574 | |
575 | static int xhci_do_dbc_start(struct xhci_dbc *dbc) |
576 | { |
577 | int ret; |
578 | u32 ctrl; |
579 | |
580 | if (dbc->state != DS_DISABLED) |
581 | return -EINVAL; |
582 | |
583 | writel(val: 0, addr: &dbc->regs->control); |
584 | ret = xhci_handshake(ptr: &dbc->regs->control, |
585 | DBC_CTRL_DBC_ENABLE, |
586 | done: 0, timeout_us: 1000); |
587 | if (ret) |
588 | return ret; |
589 | |
590 | ret = xhci_dbc_mem_init(dbc, GFP_ATOMIC); |
591 | if (ret) |
592 | return ret; |
593 | |
594 | ctrl = readl(addr: &dbc->regs->control); |
595 | writel(val: ctrl | DBC_CTRL_DBC_ENABLE | DBC_CTRL_PORT_ENABLE, |
596 | addr: &dbc->regs->control); |
597 | ret = xhci_handshake(ptr: &dbc->regs->control, |
598 | DBC_CTRL_DBC_ENABLE, |
599 | DBC_CTRL_DBC_ENABLE, timeout_us: 1000); |
600 | if (ret) |
601 | return ret; |
602 | |
603 | dbc->state = DS_ENABLED; |
604 | |
605 | return 0; |
606 | } |
607 | |
608 | static int xhci_do_dbc_stop(struct xhci_dbc *dbc) |
609 | { |
610 | if (dbc->state == DS_DISABLED) |
611 | return -EINVAL; |
612 | |
613 | writel(val: 0, addr: &dbc->regs->control); |
614 | dbc->state = DS_DISABLED; |
615 | |
616 | return 0; |
617 | } |
618 | |
619 | static int xhci_dbc_start(struct xhci_dbc *dbc) |
620 | { |
621 | int ret; |
622 | unsigned long flags; |
623 | |
624 | WARN_ON(!dbc); |
625 | |
626 | pm_runtime_get_sync(dev: dbc->dev); /* note this was self.controller */ |
627 | |
628 | spin_lock_irqsave(&dbc->lock, flags); |
629 | ret = xhci_do_dbc_start(dbc); |
630 | spin_unlock_irqrestore(lock: &dbc->lock, flags); |
631 | |
632 | if (ret) { |
633 | pm_runtime_put(dev: dbc->dev); /* note this was self.controller */ |
634 | return ret; |
635 | } |
636 | |
637 | return mod_delayed_work(wq: system_wq, dwork: &dbc->event_work, |
638 | delay: msecs_to_jiffies(m: dbc->poll_interval)); |
639 | } |
640 | |
641 | static void xhci_dbc_stop(struct xhci_dbc *dbc) |
642 | { |
643 | int ret; |
644 | unsigned long flags; |
645 | |
646 | WARN_ON(!dbc); |
647 | |
648 | switch (dbc->state) { |
649 | case DS_DISABLED: |
650 | return; |
651 | case DS_CONFIGURED: |
652 | case DS_STALLED: |
653 | if (dbc->driver->disconnect) |
654 | dbc->driver->disconnect(dbc); |
655 | break; |
656 | default: |
657 | break; |
658 | } |
659 | |
660 | cancel_delayed_work_sync(dwork: &dbc->event_work); |
661 | |
662 | spin_lock_irqsave(&dbc->lock, flags); |
663 | ret = xhci_do_dbc_stop(dbc); |
664 | spin_unlock_irqrestore(lock: &dbc->lock, flags); |
665 | if (ret) |
666 | return; |
667 | |
668 | xhci_dbc_mem_cleanup(dbc); |
669 | pm_runtime_put_sync(dev: dbc->dev); /* note, was self.controller */ |
670 | } |
671 | |
672 | static void |
673 | dbc_handle_port_status(struct xhci_dbc *dbc, union xhci_trb *event) |
674 | { |
675 | u32 portsc; |
676 | |
677 | portsc = readl(addr: &dbc->regs->portsc); |
678 | if (portsc & DBC_PORTSC_CONN_CHANGE) |
679 | dev_info(dbc->dev, "DbC port connect change\n" ); |
680 | |
681 | if (portsc & DBC_PORTSC_RESET_CHANGE) |
682 | dev_info(dbc->dev, "DbC port reset change\n" ); |
683 | |
684 | if (portsc & DBC_PORTSC_LINK_CHANGE) |
685 | dev_info(dbc->dev, "DbC port link status change\n" ); |
686 | |
687 | if (portsc & DBC_PORTSC_CONFIG_CHANGE) |
688 | dev_info(dbc->dev, "DbC config error change\n" ); |
689 | |
690 | /* Port reset change bit will be cleared in other place: */ |
691 | writel(val: portsc & ~DBC_PORTSC_RESET_CHANGE, addr: &dbc->regs->portsc); |
692 | } |
693 | |
694 | static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event) |
695 | { |
696 | struct dbc_ep *dep; |
697 | struct xhci_ring *ring; |
698 | int ep_id; |
699 | int status; |
700 | u32 comp_code; |
701 | size_t remain_length; |
702 | struct dbc_request *req = NULL, *r; |
703 | |
704 | comp_code = GET_COMP_CODE(le32_to_cpu(event->generic.field[2])); |
705 | remain_length = EVENT_TRB_LEN(le32_to_cpu(event->generic.field[2])); |
706 | ep_id = TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3])); |
707 | dep = (ep_id == EPID_OUT) ? |
708 | get_out_ep(dbc) : get_in_ep(dbc); |
709 | ring = dep->ring; |
710 | |
711 | switch (comp_code) { |
712 | case COMP_SUCCESS: |
713 | remain_length = 0; |
714 | fallthrough; |
715 | case COMP_SHORT_PACKET: |
716 | status = 0; |
717 | break; |
718 | case COMP_TRB_ERROR: |
719 | case COMP_BABBLE_DETECTED_ERROR: |
720 | case COMP_USB_TRANSACTION_ERROR: |
721 | case COMP_STALL_ERROR: |
722 | dev_warn(dbc->dev, "tx error %d detected\n" , comp_code); |
723 | status = -comp_code; |
724 | break; |
725 | default: |
726 | dev_err(dbc->dev, "unknown tx error %d\n" , comp_code); |
727 | status = -comp_code; |
728 | break; |
729 | } |
730 | |
731 | /* Match the pending request: */ |
732 | list_for_each_entry(r, &dep->list_pending, list_pending) { |
733 | if (r->trb_dma == event->trans_event.buffer) { |
734 | req = r; |
735 | break; |
736 | } |
737 | } |
738 | |
739 | if (!req) { |
740 | dev_warn(dbc->dev, "no matched request\n" ); |
741 | return; |
742 | } |
743 | |
744 | trace_xhci_dbc_handle_transfer(ring, trb: &req->trb->generic); |
745 | |
746 | ring->num_trbs_free++; |
747 | req->actual = req->length - remain_length; |
748 | xhci_dbc_giveback(req, status); |
749 | } |
750 | |
751 | static void inc_evt_deq(struct xhci_ring *ring) |
752 | { |
753 | /* If on the last TRB of the segment go back to the beginning */ |
754 | if (ring->dequeue == &ring->deq_seg->trbs[TRBS_PER_SEGMENT - 1]) { |
755 | ring->cycle_state ^= 1; |
756 | ring->dequeue = ring->deq_seg->trbs; |
757 | return; |
758 | } |
759 | ring->dequeue++; |
760 | } |
761 | |
762 | static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc) |
763 | { |
764 | dma_addr_t deq; |
765 | struct dbc_ep *dep; |
766 | union xhci_trb *evt; |
767 | u32 ctrl, portsc; |
768 | bool update_erdp = false; |
769 | |
770 | /* DbC state machine: */ |
771 | switch (dbc->state) { |
772 | case DS_DISABLED: |
773 | case DS_INITIALIZED: |
774 | |
775 | return EVT_ERR; |
776 | case DS_ENABLED: |
777 | portsc = readl(addr: &dbc->regs->portsc); |
778 | if (portsc & DBC_PORTSC_CONN_STATUS) { |
779 | dbc->state = DS_CONNECTED; |
780 | dev_info(dbc->dev, "DbC connected\n" ); |
781 | } |
782 | |
783 | return EVT_DONE; |
784 | case DS_CONNECTED: |
785 | ctrl = readl(addr: &dbc->regs->control); |
786 | if (ctrl & DBC_CTRL_DBC_RUN) { |
787 | dbc->state = DS_CONFIGURED; |
788 | dev_info(dbc->dev, "DbC configured\n" ); |
789 | portsc = readl(addr: &dbc->regs->portsc); |
790 | writel(val: portsc, addr: &dbc->regs->portsc); |
791 | return EVT_GSER; |
792 | } |
793 | |
794 | return EVT_DONE; |
795 | case DS_CONFIGURED: |
796 | /* Handle cable unplug event: */ |
797 | portsc = readl(addr: &dbc->regs->portsc); |
798 | if (!(portsc & DBC_PORTSC_PORT_ENABLED) && |
799 | !(portsc & DBC_PORTSC_CONN_STATUS)) { |
800 | dev_info(dbc->dev, "DbC cable unplugged\n" ); |
801 | dbc->state = DS_ENABLED; |
802 | xhci_dbc_flush_requests(dbc); |
803 | |
804 | return EVT_DISC; |
805 | } |
806 | |
807 | /* Handle debug port reset event: */ |
808 | if (portsc & DBC_PORTSC_RESET_CHANGE) { |
809 | dev_info(dbc->dev, "DbC port reset\n" ); |
810 | writel(val: portsc, addr: &dbc->regs->portsc); |
811 | dbc->state = DS_ENABLED; |
812 | xhci_dbc_flush_requests(dbc); |
813 | |
814 | return EVT_DISC; |
815 | } |
816 | |
817 | /* Handle endpoint stall event: */ |
818 | ctrl = readl(addr: &dbc->regs->control); |
819 | if ((ctrl & DBC_CTRL_HALT_IN_TR) || |
820 | (ctrl & DBC_CTRL_HALT_OUT_TR)) { |
821 | dev_info(dbc->dev, "DbC Endpoint stall\n" ); |
822 | dbc->state = DS_STALLED; |
823 | |
824 | if (ctrl & DBC_CTRL_HALT_IN_TR) { |
825 | dep = get_in_ep(dbc); |
826 | xhci_dbc_flush_endpoint_requests(dep); |
827 | } |
828 | |
829 | if (ctrl & DBC_CTRL_HALT_OUT_TR) { |
830 | dep = get_out_ep(dbc); |
831 | xhci_dbc_flush_endpoint_requests(dep); |
832 | } |
833 | |
834 | return EVT_DONE; |
835 | } |
836 | |
837 | /* Clear DbC run change bit: */ |
838 | if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) { |
839 | writel(val: ctrl, addr: &dbc->regs->control); |
840 | ctrl = readl(addr: &dbc->regs->control); |
841 | } |
842 | |
843 | break; |
844 | case DS_STALLED: |
845 | ctrl = readl(addr: &dbc->regs->control); |
846 | if (!(ctrl & DBC_CTRL_HALT_IN_TR) && |
847 | !(ctrl & DBC_CTRL_HALT_OUT_TR) && |
848 | (ctrl & DBC_CTRL_DBC_RUN)) { |
849 | dbc->state = DS_CONFIGURED; |
850 | break; |
851 | } |
852 | |
853 | return EVT_DONE; |
854 | default: |
855 | dev_err(dbc->dev, "Unknown DbC state %d\n" , dbc->state); |
856 | break; |
857 | } |
858 | |
859 | /* Handle the events in the event ring: */ |
860 | evt = dbc->ring_evt->dequeue; |
861 | while ((le32_to_cpu(evt->event_cmd.flags) & TRB_CYCLE) == |
862 | dbc->ring_evt->cycle_state) { |
863 | /* |
864 | * Add a barrier between reading the cycle flag and any |
865 | * reads of the event's flags/data below: |
866 | */ |
867 | rmb(); |
868 | |
869 | trace_xhci_dbc_handle_event(ring: dbc->ring_evt, trb: &evt->generic); |
870 | |
871 | switch (le32_to_cpu(evt->event_cmd.flags) & TRB_TYPE_BITMASK) { |
872 | case TRB_TYPE(TRB_PORT_STATUS): |
873 | dbc_handle_port_status(dbc, event: evt); |
874 | break; |
875 | case TRB_TYPE(TRB_TRANSFER): |
876 | dbc_handle_xfer_event(dbc, event: evt); |
877 | break; |
878 | default: |
879 | break; |
880 | } |
881 | |
882 | inc_evt_deq(ring: dbc->ring_evt); |
883 | |
884 | evt = dbc->ring_evt->dequeue; |
885 | update_erdp = true; |
886 | } |
887 | |
888 | /* Update event ring dequeue pointer: */ |
889 | if (update_erdp) { |
890 | deq = xhci_trb_virt_to_dma(seg: dbc->ring_evt->deq_seg, |
891 | trb: dbc->ring_evt->dequeue); |
892 | lo_hi_writeq(val: deq, addr: &dbc->regs->erdp); |
893 | } |
894 | |
895 | return EVT_DONE; |
896 | } |
897 | |
898 | static void xhci_dbc_handle_events(struct work_struct *work) |
899 | { |
900 | enum evtreturn evtr; |
901 | struct xhci_dbc *dbc; |
902 | unsigned long flags; |
903 | unsigned int poll_interval; |
904 | |
905 | dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work); |
906 | poll_interval = dbc->poll_interval; |
907 | |
908 | spin_lock_irqsave(&dbc->lock, flags); |
909 | evtr = xhci_dbc_do_handle_events(dbc); |
910 | spin_unlock_irqrestore(lock: &dbc->lock, flags); |
911 | |
912 | switch (evtr) { |
913 | case EVT_GSER: |
914 | if (dbc->driver->configure) |
915 | dbc->driver->configure(dbc); |
916 | break; |
917 | case EVT_DISC: |
918 | if (dbc->driver->disconnect) |
919 | dbc->driver->disconnect(dbc); |
920 | break; |
921 | case EVT_DONE: |
922 | /* set fast poll rate if there are pending data transfers */ |
923 | if (!list_empty(head: &dbc->eps[BULK_OUT].list_pending) || |
924 | !list_empty(head: &dbc->eps[BULK_IN].list_pending)) |
925 | poll_interval = 1; |
926 | break; |
927 | default: |
928 | dev_info(dbc->dev, "stop handling dbc events\n" ); |
929 | return; |
930 | } |
931 | |
932 | mod_delayed_work(wq: system_wq, dwork: &dbc->event_work, |
933 | delay: msecs_to_jiffies(m: poll_interval)); |
934 | } |
935 | |
936 | static const char * const dbc_state_strings[DS_MAX] = { |
937 | [DS_DISABLED] = "disabled" , |
938 | [DS_INITIALIZED] = "initialized" , |
939 | [DS_ENABLED] = "enabled" , |
940 | [DS_CONNECTED] = "connected" , |
941 | [DS_CONFIGURED] = "configured" , |
942 | [DS_STALLED] = "stalled" , |
943 | }; |
944 | |
945 | static ssize_t dbc_show(struct device *dev, |
946 | struct device_attribute *attr, |
947 | char *buf) |
948 | { |
949 | struct xhci_dbc *dbc; |
950 | struct xhci_hcd *xhci; |
951 | |
952 | xhci = hcd_to_xhci(hcd: dev_get_drvdata(dev)); |
953 | dbc = xhci->dbc; |
954 | |
955 | if (dbc->state >= ARRAY_SIZE(dbc_state_strings)) |
956 | return sysfs_emit(buf, fmt: "unknown\n" ); |
957 | |
958 | return sysfs_emit(buf, fmt: "%s\n" , dbc_state_strings[dbc->state]); |
959 | } |
960 | |
961 | static ssize_t dbc_store(struct device *dev, |
962 | struct device_attribute *attr, |
963 | const char *buf, size_t count) |
964 | { |
965 | struct xhci_hcd *xhci; |
966 | struct xhci_dbc *dbc; |
967 | |
968 | xhci = hcd_to_xhci(hcd: dev_get_drvdata(dev)); |
969 | dbc = xhci->dbc; |
970 | |
971 | if (sysfs_streq(s1: buf, s2: "enable" )) |
972 | xhci_dbc_start(dbc); |
973 | else if (sysfs_streq(s1: buf, s2: "disable" )) |
974 | xhci_dbc_stop(dbc); |
975 | else |
976 | return -EINVAL; |
977 | |
978 | return count; |
979 | } |
980 | |
981 | static ssize_t dbc_idVendor_show(struct device *dev, |
982 | struct device_attribute *attr, |
983 | char *buf) |
984 | { |
985 | struct xhci_dbc *dbc; |
986 | struct xhci_hcd *xhci; |
987 | |
988 | xhci = hcd_to_xhci(hcd: dev_get_drvdata(dev)); |
989 | dbc = xhci->dbc; |
990 | |
991 | return sysfs_emit(buf, fmt: "%04x\n" , dbc->idVendor); |
992 | } |
993 | |
994 | static ssize_t dbc_idVendor_store(struct device *dev, |
995 | struct device_attribute *attr, |
996 | const char *buf, size_t size) |
997 | { |
998 | struct xhci_dbc *dbc; |
999 | struct xhci_hcd *xhci; |
1000 | void __iomem *ptr; |
1001 | u16 value; |
1002 | u32 dev_info; |
1003 | int ret; |
1004 | |
1005 | ret = kstrtou16(s: buf, base: 0, res: &value); |
1006 | if (ret) |
1007 | return ret; |
1008 | |
1009 | xhci = hcd_to_xhci(hcd: dev_get_drvdata(dev)); |
1010 | dbc = xhci->dbc; |
1011 | if (dbc->state != DS_DISABLED) |
1012 | return -EBUSY; |
1013 | |
1014 | dbc->idVendor = value; |
1015 | ptr = &dbc->regs->devinfo1; |
1016 | dev_info = readl(addr: ptr); |
1017 | dev_info = (dev_info & ~(0xffffu << 16)) | (value << 16); |
1018 | writel(val: dev_info, addr: ptr); |
1019 | |
1020 | return size; |
1021 | } |
1022 | |
1023 | static ssize_t dbc_idProduct_show(struct device *dev, |
1024 | struct device_attribute *attr, |
1025 | char *buf) |
1026 | { |
1027 | struct xhci_dbc *dbc; |
1028 | struct xhci_hcd *xhci; |
1029 | |
1030 | xhci = hcd_to_xhci(hcd: dev_get_drvdata(dev)); |
1031 | dbc = xhci->dbc; |
1032 | |
1033 | return sysfs_emit(buf, fmt: "%04x\n" , dbc->idProduct); |
1034 | } |
1035 | |
1036 | static ssize_t dbc_idProduct_store(struct device *dev, |
1037 | struct device_attribute *attr, |
1038 | const char *buf, size_t size) |
1039 | { |
1040 | struct xhci_dbc *dbc; |
1041 | struct xhci_hcd *xhci; |
1042 | void __iomem *ptr; |
1043 | u32 dev_info; |
1044 | u16 value; |
1045 | int ret; |
1046 | |
1047 | ret = kstrtou16(s: buf, base: 0, res: &value); |
1048 | if (ret) |
1049 | return ret; |
1050 | |
1051 | xhci = hcd_to_xhci(hcd: dev_get_drvdata(dev)); |
1052 | dbc = xhci->dbc; |
1053 | if (dbc->state != DS_DISABLED) |
1054 | return -EBUSY; |
1055 | |
1056 | dbc->idProduct = value; |
1057 | ptr = &dbc->regs->devinfo2; |
1058 | dev_info = readl(addr: ptr); |
1059 | dev_info = (dev_info & ~(0xffffu)) | value; |
1060 | writel(val: dev_info, addr: ptr); |
1061 | return size; |
1062 | } |
1063 | |
1064 | static ssize_t dbc_bcdDevice_show(struct device *dev, |
1065 | struct device_attribute *attr, |
1066 | char *buf) |
1067 | { |
1068 | struct xhci_dbc *dbc; |
1069 | struct xhci_hcd *xhci; |
1070 | |
1071 | xhci = hcd_to_xhci(hcd: dev_get_drvdata(dev)); |
1072 | dbc = xhci->dbc; |
1073 | |
1074 | return sysfs_emit(buf, fmt: "%04x\n" , dbc->bcdDevice); |
1075 | } |
1076 | |
1077 | static ssize_t dbc_bcdDevice_store(struct device *dev, |
1078 | struct device_attribute *attr, |
1079 | const char *buf, size_t size) |
1080 | { |
1081 | struct xhci_dbc *dbc; |
1082 | struct xhci_hcd *xhci; |
1083 | void __iomem *ptr; |
1084 | u32 dev_info; |
1085 | u16 value; |
1086 | int ret; |
1087 | |
1088 | ret = kstrtou16(s: buf, base: 0, res: &value); |
1089 | if (ret) |
1090 | return ret; |
1091 | |
1092 | xhci = hcd_to_xhci(hcd: dev_get_drvdata(dev)); |
1093 | dbc = xhci->dbc; |
1094 | if (dbc->state != DS_DISABLED) |
1095 | return -EBUSY; |
1096 | |
1097 | dbc->bcdDevice = value; |
1098 | ptr = &dbc->regs->devinfo2; |
1099 | dev_info = readl(addr: ptr); |
1100 | dev_info = (dev_info & ~(0xffffu << 16)) | (value << 16); |
1101 | writel(val: dev_info, addr: ptr); |
1102 | |
1103 | return size; |
1104 | } |
1105 | |
1106 | static ssize_t dbc_bInterfaceProtocol_show(struct device *dev, |
1107 | struct device_attribute *attr, |
1108 | char *buf) |
1109 | { |
1110 | struct xhci_dbc *dbc; |
1111 | struct xhci_hcd *xhci; |
1112 | |
1113 | xhci = hcd_to_xhci(hcd: dev_get_drvdata(dev)); |
1114 | dbc = xhci->dbc; |
1115 | |
1116 | return sysfs_emit(buf, fmt: "%02x\n" , dbc->bInterfaceProtocol); |
1117 | } |
1118 | |
1119 | static ssize_t dbc_bInterfaceProtocol_store(struct device *dev, |
1120 | struct device_attribute *attr, |
1121 | const char *buf, size_t size) |
1122 | { |
1123 | struct xhci_dbc *dbc; |
1124 | struct xhci_hcd *xhci; |
1125 | void __iomem *ptr; |
1126 | u32 dev_info; |
1127 | u8 value; |
1128 | int ret; |
1129 | |
1130 | /* bInterfaceProtocol is 8 bit, but... */ |
1131 | ret = kstrtou8(s: buf, base: 0, res: &value); |
1132 | if (ret) |
1133 | return ret; |
1134 | |
1135 | /* ...xhci only supports values 0 and 1 */ |
1136 | if (value > 1) |
1137 | return -EINVAL; |
1138 | |
1139 | xhci = hcd_to_xhci(hcd: dev_get_drvdata(dev)); |
1140 | dbc = xhci->dbc; |
1141 | if (dbc->state != DS_DISABLED) |
1142 | return -EBUSY; |
1143 | |
1144 | dbc->bInterfaceProtocol = value; |
1145 | ptr = &dbc->regs->devinfo1; |
1146 | dev_info = readl(addr: ptr); |
1147 | dev_info = (dev_info & ~(0xffu)) | value; |
1148 | writel(val: dev_info, addr: ptr); |
1149 | |
1150 | return size; |
1151 | } |
1152 | |
1153 | static DEVICE_ATTR_RW(dbc); |
1154 | static DEVICE_ATTR_RW(dbc_idVendor); |
1155 | static DEVICE_ATTR_RW(dbc_idProduct); |
1156 | static DEVICE_ATTR_RW(dbc_bcdDevice); |
1157 | static DEVICE_ATTR_RW(dbc_bInterfaceProtocol); |
1158 | |
1159 | static struct attribute *dbc_dev_attrs[] = { |
1160 | &dev_attr_dbc.attr, |
1161 | &dev_attr_dbc_idVendor.attr, |
1162 | &dev_attr_dbc_idProduct.attr, |
1163 | &dev_attr_dbc_bcdDevice.attr, |
1164 | &dev_attr_dbc_bInterfaceProtocol.attr, |
1165 | NULL |
1166 | }; |
1167 | ATTRIBUTE_GROUPS(dbc_dev); |
1168 | |
1169 | struct xhci_dbc * |
1170 | xhci_alloc_dbc(struct device *dev, void __iomem *base, const struct dbc_driver *driver) |
1171 | { |
1172 | struct xhci_dbc *dbc; |
1173 | int ret; |
1174 | |
1175 | dbc = kzalloc(size: sizeof(*dbc), GFP_KERNEL); |
1176 | if (!dbc) |
1177 | return NULL; |
1178 | |
1179 | dbc->regs = base; |
1180 | dbc->dev = dev; |
1181 | dbc->driver = driver; |
1182 | dbc->idProduct = DBC_PRODUCT_ID; |
1183 | dbc->idVendor = DBC_VENDOR_ID; |
1184 | dbc->bcdDevice = DBC_DEVICE_REV; |
1185 | dbc->bInterfaceProtocol = DBC_PROTOCOL; |
1186 | dbc->poll_interval = DBC_POLL_INTERVAL_DEFAULT; |
1187 | |
1188 | if (readl(addr: &dbc->regs->control) & DBC_CTRL_DBC_ENABLE) |
1189 | goto err; |
1190 | |
1191 | INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events); |
1192 | spin_lock_init(&dbc->lock); |
1193 | |
1194 | ret = sysfs_create_groups(kobj: &dev->kobj, groups: dbc_dev_groups); |
1195 | if (ret) |
1196 | goto err; |
1197 | |
1198 | return dbc; |
1199 | err: |
1200 | kfree(objp: dbc); |
1201 | return NULL; |
1202 | } |
1203 | |
1204 | /* undo what xhci_alloc_dbc() did */ |
1205 | void xhci_dbc_remove(struct xhci_dbc *dbc) |
1206 | { |
1207 | if (!dbc) |
1208 | return; |
1209 | /* stop hw, stop wq and call dbc->ops->stop() */ |
1210 | xhci_dbc_stop(dbc); |
1211 | |
1212 | /* remove sysfs files */ |
1213 | sysfs_remove_groups(kobj: &dbc->dev->kobj, groups: dbc_dev_groups); |
1214 | |
1215 | kfree(objp: dbc); |
1216 | } |
1217 | |
1218 | |
1219 | int xhci_create_dbc_dev(struct xhci_hcd *xhci) |
1220 | { |
1221 | struct device *dev; |
1222 | void __iomem *base; |
1223 | int ret; |
1224 | int dbc_cap_offs; |
1225 | |
1226 | /* create all parameters needed resembling a dbc device */ |
1227 | dev = xhci_to_hcd(xhci)->self.controller; |
1228 | base = &xhci->cap_regs->hc_capbase; |
1229 | |
1230 | dbc_cap_offs = xhci_find_next_ext_cap(base, start: 0, XHCI_EXT_CAPS_DEBUG); |
1231 | if (!dbc_cap_offs) |
1232 | return -ENODEV; |
1233 | |
1234 | /* already allocated and in use */ |
1235 | if (xhci->dbc) |
1236 | return -EBUSY; |
1237 | |
1238 | ret = xhci_dbc_tty_probe(dev, res: base + dbc_cap_offs, xhci); |
1239 | |
1240 | return ret; |
1241 | } |
1242 | |
1243 | void xhci_remove_dbc_dev(struct xhci_hcd *xhci) |
1244 | { |
1245 | unsigned long flags; |
1246 | |
1247 | if (!xhci->dbc) |
1248 | return; |
1249 | |
1250 | xhci_dbc_tty_remove(dbc: xhci->dbc); |
1251 | spin_lock_irqsave(&xhci->lock, flags); |
1252 | xhci->dbc = NULL; |
1253 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
1254 | } |
1255 | |
1256 | #ifdef CONFIG_PM |
1257 | int xhci_dbc_suspend(struct xhci_hcd *xhci) |
1258 | { |
1259 | struct xhci_dbc *dbc = xhci->dbc; |
1260 | |
1261 | if (!dbc) |
1262 | return 0; |
1263 | |
1264 | if (dbc->state == DS_CONFIGURED) |
1265 | dbc->resume_required = 1; |
1266 | |
1267 | xhci_dbc_stop(dbc); |
1268 | |
1269 | return 0; |
1270 | } |
1271 | |
1272 | int xhci_dbc_resume(struct xhci_hcd *xhci) |
1273 | { |
1274 | int ret = 0; |
1275 | struct xhci_dbc *dbc = xhci->dbc; |
1276 | |
1277 | if (!dbc) |
1278 | return 0; |
1279 | |
1280 | if (dbc->resume_required) { |
1281 | dbc->resume_required = 0; |
1282 | xhci_dbc_start(dbc); |
1283 | } |
1284 | |
1285 | return ret; |
1286 | } |
1287 | #endif /* CONFIG_PM */ |
1288 | |
1289 | int xhci_dbc_init(void) |
1290 | { |
1291 | return dbc_tty_init(); |
1292 | } |
1293 | |
1294 | void xhci_dbc_exit(void) |
1295 | { |
1296 | dbc_tty_exit(); |
1297 | } |
1298 | |