1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* |
3 | * USB Peripheral Controller driver for Aeroflex Gaisler GRUSBDC. |
4 | * |
5 | * 2013 (c) Aeroflex Gaisler AB |
6 | * |
7 | * This driver supports GRUSBDC USB Device Controller cores available in the |
8 | * GRLIB VHDL IP core library. |
9 | * |
10 | * Full documentation of the GRUSBDC core can be found here: |
11 | * https://www.gaisler.com/products/grlib/grip.pdf |
12 | * |
13 | * Contributors: |
14 | * - Andreas Larsson <andreas@gaisler.com> |
15 | * - Marko Isomaki |
16 | */ |
17 | |
18 | /* |
19 | * A GRUSBDC core can have up to 16 IN endpoints and 16 OUT endpoints each |
20 | * individually configurable to any of the four USB transfer types. This driver |
21 | * only supports cores in DMA mode. |
22 | */ |
23 | |
24 | #include <linux/kernel.h> |
25 | #include <linux/module.h> |
26 | #include <linux/platform_device.h> |
27 | #include <linux/slab.h> |
28 | #include <linux/spinlock.h> |
29 | #include <linux/errno.h> |
30 | #include <linux/list.h> |
31 | #include <linux/interrupt.h> |
32 | #include <linux/device.h> |
33 | #include <linux/usb.h> |
34 | #include <linux/usb/ch9.h> |
35 | #include <linux/usb/gadget.h> |
36 | #include <linux/dma-mapping.h> |
37 | #include <linux/dmapool.h> |
38 | #include <linux/debugfs.h> |
39 | #include <linux/seq_file.h> |
40 | #include <linux/of.h> |
41 | |
42 | #include <asm/byteorder.h> |
43 | |
44 | #include "gr_udc.h" |
45 | |
46 | #define DRIVER_NAME "gr_udc" |
47 | #define DRIVER_DESC "Aeroflex Gaisler GRUSBDC USB Peripheral Controller" |
48 | |
49 | static const char driver_name[] = DRIVER_NAME; |
50 | |
51 | #define gr_read32(x) (ioread32be((x))) |
52 | #define gr_write32(x, v) (iowrite32be((v), (x))) |
53 | |
54 | /* USB speed and corresponding string calculated from status register value */ |
55 | #define GR_SPEED(status) \ |
56 | ((status & GR_STATUS_SP) ? USB_SPEED_FULL : USB_SPEED_HIGH) |
57 | #define GR_SPEED_STR(status) usb_speed_string(GR_SPEED(status)) |
58 | |
59 | /* Size of hardware buffer calculated from epctrl register value */ |
60 | #define GR_BUFFER_SIZE(epctrl) \ |
61 | ((((epctrl) & GR_EPCTRL_BUFSZ_MASK) >> GR_EPCTRL_BUFSZ_POS) * \ |
62 | GR_EPCTRL_BUFSZ_SCALER) |
63 | |
64 | /* ---------------------------------------------------------------------- */ |
65 | /* Debug printout functionality */ |
66 | |
67 | static const char * const gr_modestring[] = {"control" , "iso" , "bulk" , "int" }; |
68 | |
69 | static const char *gr_ep0state_string(enum gr_ep0state state) |
70 | { |
71 | static const char *const names[] = { |
72 | [GR_EP0_DISCONNECT] = "disconnect" , |
73 | [GR_EP0_SETUP] = "setup" , |
74 | [GR_EP0_IDATA] = "idata" , |
75 | [GR_EP0_ODATA] = "odata" , |
76 | [GR_EP0_ISTATUS] = "istatus" , |
77 | [GR_EP0_OSTATUS] = "ostatus" , |
78 | [GR_EP0_STALL] = "stall" , |
79 | [GR_EP0_SUSPEND] = "suspend" , |
80 | }; |
81 | |
82 | if (state < 0 || state >= ARRAY_SIZE(names)) |
83 | return "UNKNOWN" ; |
84 | |
85 | return names[state]; |
86 | } |
87 | |
88 | #ifdef VERBOSE_DEBUG |
89 | |
90 | static void gr_dbgprint_request(const char *str, struct gr_ep *ep, |
91 | struct gr_request *req) |
92 | { |
93 | int buflen = ep->is_in ? req->req.length : req->req.actual; |
94 | int rowlen = 32; |
95 | int plen = min(rowlen, buflen); |
96 | |
97 | dev_dbg(ep->dev->dev, "%s: 0x%p, %d bytes data%s:\n" , str, req, buflen, |
98 | (buflen > plen ? " (truncated)" : "" )); |
99 | print_hex_dump_debug(" " , DUMP_PREFIX_NONE, |
100 | rowlen, 4, req->req.buf, plen, false); |
101 | } |
102 | |
103 | static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request, |
104 | u16 value, u16 index, u16 length) |
105 | { |
106 | dev_vdbg(dev->dev, "REQ: %02x.%02x v%04x i%04x l%04x\n" , |
107 | type, request, value, index, length); |
108 | } |
109 | #else /* !VERBOSE_DEBUG */ |
110 | |
111 | static void gr_dbgprint_request(const char *str, struct gr_ep *ep, |
112 | struct gr_request *req) {} |
113 | |
114 | static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request, |
115 | u16 value, u16 index, u16 length) {} |
116 | |
117 | #endif /* VERBOSE_DEBUG */ |
118 | |
119 | /* ---------------------------------------------------------------------- */ |
120 | /* Debugfs functionality */ |
121 | |
122 | #ifdef CONFIG_USB_GADGET_DEBUG_FS |
123 | |
124 | static void gr_seq_ep_show(struct seq_file *seq, struct gr_ep *ep) |
125 | { |
126 | u32 epctrl = gr_read32(&ep->regs->epctrl); |
127 | u32 epstat = gr_read32(&ep->regs->epstat); |
128 | int mode = (epctrl & GR_EPCTRL_TT_MASK) >> GR_EPCTRL_TT_POS; |
129 | struct gr_request *req; |
130 | |
131 | seq_printf(m: seq, fmt: "%s:\n" , ep->ep.name); |
132 | seq_printf(m: seq, fmt: " mode = %s\n" , gr_modestring[mode]); |
133 | seq_printf(m: seq, fmt: " halted: %d\n" , !!(epctrl & GR_EPCTRL_EH)); |
134 | seq_printf(m: seq, fmt: " disabled: %d\n" , !!(epctrl & GR_EPCTRL_ED)); |
135 | seq_printf(m: seq, fmt: " valid: %d\n" , !!(epctrl & GR_EPCTRL_EV)); |
136 | seq_printf(m: seq, fmt: " dma_start = %d\n" , ep->dma_start); |
137 | seq_printf(m: seq, fmt: " stopped = %d\n" , ep->stopped); |
138 | seq_printf(m: seq, fmt: " wedged = %d\n" , ep->wedged); |
139 | seq_printf(m: seq, fmt: " callback = %d\n" , ep->callback); |
140 | seq_printf(m: seq, fmt: " maxpacket = %d\n" , ep->ep.maxpacket); |
141 | seq_printf(m: seq, fmt: " maxpacket_limit = %d\n" , ep->ep.maxpacket_limit); |
142 | seq_printf(m: seq, fmt: " bytes_per_buffer = %d\n" , ep->bytes_per_buffer); |
143 | if (mode == 1 || mode == 3) |
144 | seq_printf(m: seq, fmt: " nt = %d\n" , |
145 | (epctrl & GR_EPCTRL_NT_MASK) >> GR_EPCTRL_NT_POS); |
146 | |
147 | seq_printf(m: seq, fmt: " Buffer 0: %s %s%d\n" , |
148 | epstat & GR_EPSTAT_B0 ? "valid" : "invalid" , |
149 | epstat & GR_EPSTAT_BS ? " " : "selected " , |
150 | (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS); |
151 | seq_printf(m: seq, fmt: " Buffer 1: %s %s%d\n" , |
152 | epstat & GR_EPSTAT_B1 ? "valid" : "invalid" , |
153 | epstat & GR_EPSTAT_BS ? "selected " : " " , |
154 | (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS); |
155 | |
156 | if (list_empty(head: &ep->queue)) { |
157 | seq_puts(m: seq, s: " Queue: empty\n\n" ); |
158 | return; |
159 | } |
160 | |
161 | seq_puts(m: seq, s: " Queue:\n" ); |
162 | list_for_each_entry(req, &ep->queue, queue) { |
163 | struct gr_dma_desc *desc; |
164 | struct gr_dma_desc *next; |
165 | |
166 | seq_printf(m: seq, fmt: " 0x%p: 0x%p %d %d\n" , req, |
167 | &req->req.buf, req->req.actual, req->req.length); |
168 | |
169 | next = req->first_desc; |
170 | do { |
171 | desc = next; |
172 | next = desc->next_desc; |
173 | seq_printf(m: seq, fmt: " %c 0x%p (0x%08x): 0x%05x 0x%08x\n" , |
174 | desc == req->curr_desc ? 'c' : ' ', |
175 | desc, desc->paddr, desc->ctrl, desc->data); |
176 | } while (desc != req->last_desc); |
177 | } |
178 | seq_puts(m: seq, s: "\n" ); |
179 | } |
180 | |
181 | static int gr_dfs_show(struct seq_file *seq, void *v) |
182 | { |
183 | struct gr_udc *dev = seq->private; |
184 | u32 control = gr_read32(&dev->regs->control); |
185 | u32 status = gr_read32(&dev->regs->status); |
186 | struct gr_ep *ep; |
187 | |
188 | seq_printf(m: seq, fmt: "usb state = %s\n" , |
189 | usb_state_string(state: dev->gadget.state)); |
190 | seq_printf(m: seq, fmt: "address = %d\n" , |
191 | (control & GR_CONTROL_UA_MASK) >> GR_CONTROL_UA_POS); |
192 | seq_printf(m: seq, fmt: "speed = %s\n" , GR_SPEED_STR(status)); |
193 | seq_printf(m: seq, fmt: "ep0state = %s\n" , gr_ep0state_string(state: dev->ep0state)); |
194 | seq_printf(m: seq, fmt: "irq_enabled = %d\n" , dev->irq_enabled); |
195 | seq_printf(m: seq, fmt: "remote_wakeup = %d\n" , dev->remote_wakeup); |
196 | seq_printf(m: seq, fmt: "test_mode = %d\n" , dev->test_mode); |
197 | seq_puts(m: seq, s: "\n" ); |
198 | |
199 | list_for_each_entry(ep, &dev->ep_list, ep_list) |
200 | gr_seq_ep_show(seq, ep); |
201 | |
202 | return 0; |
203 | } |
204 | DEFINE_SHOW_ATTRIBUTE(gr_dfs); |
205 | |
206 | static void gr_dfs_create(struct gr_udc *dev) |
207 | { |
208 | const char *name = "gr_udc_state" ; |
209 | struct dentry *root; |
210 | |
211 | root = debugfs_create_dir(name: dev_name(dev: dev->dev), parent: usb_debug_root); |
212 | debugfs_create_file(name, mode: 0444, parent: root, data: dev, fops: &gr_dfs_fops); |
213 | } |
214 | |
215 | static void gr_dfs_delete(struct gr_udc *dev) |
216 | { |
217 | debugfs_lookup_and_remove(name: dev_name(dev: dev->dev), parent: usb_debug_root); |
218 | } |
219 | |
220 | #else /* !CONFIG_USB_GADGET_DEBUG_FS */ |
221 | |
222 | static void gr_dfs_create(struct gr_udc *dev) {} |
223 | static void gr_dfs_delete(struct gr_udc *dev) {} |
224 | |
225 | #endif /* CONFIG_USB_GADGET_DEBUG_FS */ |
226 | |
227 | /* ---------------------------------------------------------------------- */ |
228 | /* DMA and request handling */ |
229 | |
230 | /* Allocates a new struct gr_dma_desc, sets paddr and zeroes the rest */ |
231 | static struct gr_dma_desc *gr_alloc_dma_desc(struct gr_ep *ep, gfp_t gfp_flags) |
232 | { |
233 | dma_addr_t paddr; |
234 | struct gr_dma_desc *dma_desc; |
235 | |
236 | dma_desc = dma_pool_zalloc(pool: ep->dev->desc_pool, mem_flags: gfp_flags, handle: &paddr); |
237 | if (!dma_desc) { |
238 | dev_err(ep->dev->dev, "Could not allocate from DMA pool\n" ); |
239 | return NULL; |
240 | } |
241 | |
242 | dma_desc->paddr = paddr; |
243 | |
244 | return dma_desc; |
245 | } |
246 | |
247 | static inline void gr_free_dma_desc(struct gr_udc *dev, |
248 | struct gr_dma_desc *desc) |
249 | { |
250 | dma_pool_free(pool: dev->desc_pool, vaddr: desc, addr: (dma_addr_t)desc->paddr); |
251 | } |
252 | |
253 | /* Frees the chain of struct gr_dma_desc for the given request */ |
254 | static void gr_free_dma_desc_chain(struct gr_udc *dev, struct gr_request *req) |
255 | { |
256 | struct gr_dma_desc *desc; |
257 | struct gr_dma_desc *next; |
258 | |
259 | next = req->first_desc; |
260 | if (!next) |
261 | return; |
262 | |
263 | do { |
264 | desc = next; |
265 | next = desc->next_desc; |
266 | gr_free_dma_desc(dev, desc); |
267 | } while (desc != req->last_desc); |
268 | |
269 | req->first_desc = NULL; |
270 | req->curr_desc = NULL; |
271 | req->last_desc = NULL; |
272 | } |
273 | |
274 | static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req); |
275 | |
276 | /* |
277 | * Frees allocated resources and calls the appropriate completion function/setup |
278 | * package handler for a finished request. |
279 | * |
280 | * Must be called with dev->lock held and irqs disabled. |
281 | */ |
282 | static void gr_finish_request(struct gr_ep *ep, struct gr_request *req, |
283 | int status) |
284 | __releases(&dev->lock) |
285 | __acquires(&dev->lock) |
286 | { |
287 | struct gr_udc *dev; |
288 | |
289 | list_del_init(entry: &req->queue); |
290 | |
291 | if (likely(req->req.status == -EINPROGRESS)) |
292 | req->req.status = status; |
293 | else |
294 | status = req->req.status; |
295 | |
296 | dev = ep->dev; |
297 | usb_gadget_unmap_request(gadget: &dev->gadget, req: &req->req, is_in: ep->is_in); |
298 | gr_free_dma_desc_chain(dev, req); |
299 | |
300 | if (ep->is_in) { /* For OUT, req->req.actual gets updated bit by bit */ |
301 | req->req.actual = req->req.length; |
302 | } else if (req->oddlen && req->req.actual > req->evenlen) { |
303 | /* |
304 | * Copy to user buffer in this case where length was not evenly |
305 | * divisible by ep->ep.maxpacket and the last descriptor was |
306 | * actually used. |
307 | */ |
308 | char *buftail = ((char *)req->req.buf + req->evenlen); |
309 | |
310 | memcpy(buftail, ep->tailbuf, req->oddlen); |
311 | |
312 | if (req->req.actual > req->req.length) { |
313 | /* We got more data than was requested */ |
314 | dev_dbg(ep->dev->dev, "Overflow for ep %s\n" , |
315 | ep->ep.name); |
316 | gr_dbgprint_request(str: "OVFL" , ep, req); |
317 | req->req.status = -EOVERFLOW; |
318 | } |
319 | } |
320 | |
321 | if (!status) { |
322 | if (ep->is_in) |
323 | gr_dbgprint_request(str: "SENT" , ep, req); |
324 | else |
325 | gr_dbgprint_request(str: "RECV" , ep, req); |
326 | } |
327 | |
328 | /* Prevent changes to ep->queue during callback */ |
329 | ep->callback = 1; |
330 | if (req == dev->ep0reqo && !status) { |
331 | if (req->setup) |
332 | gr_ep0_setup(dev, req); |
333 | else |
334 | dev_err(dev->dev, |
335 | "Unexpected non setup packet on ep0in\n" ); |
336 | } else if (req->req.complete) { |
337 | spin_unlock(lock: &dev->lock); |
338 | |
339 | usb_gadget_giveback_request(ep: &ep->ep, req: &req->req); |
340 | |
341 | spin_lock(lock: &dev->lock); |
342 | } |
343 | ep->callback = 0; |
344 | } |
345 | |
346 | static struct usb_request *gr_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) |
347 | { |
348 | struct gr_request *req; |
349 | |
350 | req = kzalloc(size: sizeof(*req), flags: gfp_flags); |
351 | if (!req) |
352 | return NULL; |
353 | |
354 | INIT_LIST_HEAD(list: &req->queue); |
355 | |
356 | return &req->req; |
357 | } |
358 | |
359 | /* |
360 | * Starts DMA for endpoint ep if there are requests in the queue. |
361 | * |
362 | * Must be called with dev->lock held and with !ep->stopped. |
363 | */ |
364 | static void gr_start_dma(struct gr_ep *ep) |
365 | { |
366 | struct gr_request *req; |
367 | u32 dmactrl; |
368 | |
369 | if (list_empty(head: &ep->queue)) { |
370 | ep->dma_start = 0; |
371 | return; |
372 | } |
373 | |
374 | req = list_first_entry(&ep->queue, struct gr_request, queue); |
375 | |
376 | /* A descriptor should already have been allocated */ |
377 | BUG_ON(!req->curr_desc); |
378 | |
379 | /* |
380 | * The DMA controller can not handle smaller OUT buffers than |
381 | * ep->ep.maxpacket. It could lead to buffer overruns if an unexpectedly |
382 | * long packet are received. Therefore an internal bounce buffer gets |
383 | * used when such a request gets enabled. |
384 | */ |
385 | if (!ep->is_in && req->oddlen) |
386 | req->last_desc->data = ep->tailbuf_paddr; |
387 | |
388 | wmb(); /* Make sure all is settled before handing it over to DMA */ |
389 | |
390 | /* Set the descriptor pointer in the hardware */ |
391 | gr_write32(&ep->regs->dmaaddr, req->curr_desc->paddr); |
392 | |
393 | /* Announce available descriptors */ |
394 | dmactrl = gr_read32(&ep->regs->dmactrl); |
395 | gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_DA); |
396 | |
397 | ep->dma_start = 1; |
398 | } |
399 | |
400 | /* |
401 | * Finishes the first request in the ep's queue and, if available, starts the |
402 | * next request in queue. |
403 | * |
404 | * Must be called with dev->lock held, irqs disabled and with !ep->stopped. |
405 | */ |
406 | static void gr_dma_advance(struct gr_ep *ep, int status) |
407 | { |
408 | struct gr_request *req; |
409 | |
410 | req = list_first_entry(&ep->queue, struct gr_request, queue); |
411 | gr_finish_request(ep, req, status); |
412 | gr_start_dma(ep); /* Regardless of ep->dma_start */ |
413 | } |
414 | |
415 | /* |
416 | * Abort DMA for an endpoint. Sets the abort DMA bit which causes an ongoing DMA |
417 | * transfer to be canceled and clears GR_DMACTRL_DA. |
418 | * |
419 | * Must be called with dev->lock held. |
420 | */ |
421 | static void gr_abort_dma(struct gr_ep *ep) |
422 | { |
423 | u32 dmactrl; |
424 | |
425 | dmactrl = gr_read32(&ep->regs->dmactrl); |
426 | gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_AD); |
427 | } |
428 | |
429 | /* |
430 | * Allocates and sets up a struct gr_dma_desc and putting it on the descriptor |
431 | * chain. |
432 | * |
433 | * Size is not used for OUT endpoints. Hardware can not be instructed to handle |
434 | * smaller buffer than MAXPL in the OUT direction. |
435 | */ |
436 | static int gr_add_dma_desc(struct gr_ep *ep, struct gr_request *req, |
437 | dma_addr_t data, unsigned size, gfp_t gfp_flags) |
438 | { |
439 | struct gr_dma_desc *desc; |
440 | |
441 | desc = gr_alloc_dma_desc(ep, gfp_flags); |
442 | if (!desc) |
443 | return -ENOMEM; |
444 | |
445 | desc->data = data; |
446 | if (ep->is_in) |
447 | desc->ctrl = |
448 | (GR_DESC_IN_CTRL_LEN_MASK & size) | GR_DESC_IN_CTRL_EN; |
449 | else |
450 | desc->ctrl = GR_DESC_OUT_CTRL_IE; |
451 | |
452 | if (!req->first_desc) { |
453 | req->first_desc = desc; |
454 | req->curr_desc = desc; |
455 | } else { |
456 | req->last_desc->next_desc = desc; |
457 | req->last_desc->next = desc->paddr; |
458 | req->last_desc->ctrl |= GR_DESC_OUT_CTRL_NX; |
459 | } |
460 | req->last_desc = desc; |
461 | |
462 | return 0; |
463 | } |
464 | |
465 | /* |
466 | * Sets up a chain of struct gr_dma_descriptors pointing to buffers that |
467 | * together covers req->req.length bytes of the buffer at DMA address |
468 | * req->req.dma for the OUT direction. |
469 | * |
470 | * The first descriptor in the chain is enabled, the rest disabled. The |
471 | * interrupt handler will later enable them one by one when needed so we can |
472 | * find out when the transfer is finished. For OUT endpoints, all descriptors |
473 | * therefore generate interrutps. |
474 | */ |
475 | static int gr_setup_out_desc_list(struct gr_ep *ep, struct gr_request *req, |
476 | gfp_t gfp_flags) |
477 | { |
478 | u16 bytes_left; /* Bytes left to provide descriptors for */ |
479 | u16 bytes_used; /* Bytes accommodated for */ |
480 | int ret = 0; |
481 | |
482 | req->first_desc = NULL; /* Signals that no allocation is done yet */ |
483 | bytes_left = req->req.length; |
484 | bytes_used = 0; |
485 | while (bytes_left > 0) { |
486 | dma_addr_t start = req->req.dma + bytes_used; |
487 | u16 size = min(bytes_left, ep->bytes_per_buffer); |
488 | |
489 | if (size < ep->bytes_per_buffer) { |
490 | /* Prepare using bounce buffer */ |
491 | req->evenlen = req->req.length - bytes_left; |
492 | req->oddlen = size; |
493 | } |
494 | |
495 | ret = gr_add_dma_desc(ep, req, data: start, size, gfp_flags); |
496 | if (ret) |
497 | goto alloc_err; |
498 | |
499 | bytes_left -= size; |
500 | bytes_used += size; |
501 | } |
502 | |
503 | req->first_desc->ctrl |= GR_DESC_OUT_CTRL_EN; |
504 | |
505 | return 0; |
506 | |
507 | alloc_err: |
508 | gr_free_dma_desc_chain(dev: ep->dev, req); |
509 | |
510 | return ret; |
511 | } |
512 | |
513 | /* |
514 | * Sets up a chain of struct gr_dma_descriptors pointing to buffers that |
515 | * together covers req->req.length bytes of the buffer at DMA address |
516 | * req->req.dma for the IN direction. |
517 | * |
518 | * When more data is provided than the maximum payload size, the hardware splits |
519 | * this up into several payloads automatically. Moreover, ep->bytes_per_buffer |
520 | * is always set to a multiple of the maximum payload (restricted to the valid |
521 | * number of maximum payloads during high bandwidth isochronous or interrupt |
522 | * transfers) |
523 | * |
524 | * All descriptors are enabled from the beginning and we only generate an |
525 | * interrupt for the last one indicating that the entire request has been pushed |
526 | * to hardware. |
527 | */ |
528 | static int gr_setup_in_desc_list(struct gr_ep *ep, struct gr_request *req, |
529 | gfp_t gfp_flags) |
530 | { |
531 | u16 bytes_left; /* Bytes left in req to provide descriptors for */ |
532 | u16 bytes_used; /* Bytes in req accommodated for */ |
533 | int ret = 0; |
534 | |
535 | req->first_desc = NULL; /* Signals that no allocation is done yet */ |
536 | bytes_left = req->req.length; |
537 | bytes_used = 0; |
538 | do { /* Allow for zero length packets */ |
539 | dma_addr_t start = req->req.dma + bytes_used; |
540 | u16 size = min(bytes_left, ep->bytes_per_buffer); |
541 | |
542 | ret = gr_add_dma_desc(ep, req, data: start, size, gfp_flags); |
543 | if (ret) |
544 | goto alloc_err; |
545 | |
546 | bytes_left -= size; |
547 | bytes_used += size; |
548 | } while (bytes_left > 0); |
549 | |
550 | /* |
551 | * Send an extra zero length packet to indicate that no more data is |
552 | * available when req->req.zero is set and the data length is even |
553 | * multiples of ep->ep.maxpacket. |
554 | */ |
555 | if (req->req.zero && (req->req.length % ep->ep.maxpacket == 0)) { |
556 | ret = gr_add_dma_desc(ep, req, data: 0, size: 0, gfp_flags); |
557 | if (ret) |
558 | goto alloc_err; |
559 | } |
560 | |
561 | /* |
562 | * For IN packets we only want to know when the last packet has been |
563 | * transmitted (not just put into internal buffers). |
564 | */ |
565 | req->last_desc->ctrl |= GR_DESC_IN_CTRL_PI; |
566 | |
567 | return 0; |
568 | |
569 | alloc_err: |
570 | gr_free_dma_desc_chain(dev: ep->dev, req); |
571 | |
572 | return ret; |
573 | } |
574 | |
575 | /* Must be called with dev->lock held */ |
576 | static int gr_queue(struct gr_ep *ep, struct gr_request *req, gfp_t gfp_flags) |
577 | { |
578 | struct gr_udc *dev = ep->dev; |
579 | int ret; |
580 | |
581 | if (unlikely(!ep->ep.desc && ep->num != 0)) { |
582 | dev_err(dev->dev, "No ep descriptor for %s\n" , ep->ep.name); |
583 | return -EINVAL; |
584 | } |
585 | |
586 | if (unlikely(!req->req.buf || !list_empty(&req->queue))) { |
587 | dev_err(dev->dev, |
588 | "Invalid request for %s: buf=%p list_empty=%d\n" , |
589 | ep->ep.name, req->req.buf, list_empty(&req->queue)); |
590 | return -EINVAL; |
591 | } |
592 | |
593 | if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) { |
594 | dev_err(dev->dev, "-ESHUTDOWN" ); |
595 | return -ESHUTDOWN; |
596 | } |
597 | |
598 | /* Can't touch registers when suspended */ |
599 | if (dev->ep0state == GR_EP0_SUSPEND) { |
600 | dev_err(dev->dev, "-EBUSY" ); |
601 | return -EBUSY; |
602 | } |
603 | |
604 | /* Set up DMA mapping in case the caller didn't */ |
605 | ret = usb_gadget_map_request(gadget: &dev->gadget, req: &req->req, is_in: ep->is_in); |
606 | if (ret) { |
607 | dev_err(dev->dev, "usb_gadget_map_request" ); |
608 | return ret; |
609 | } |
610 | |
611 | if (ep->is_in) |
612 | ret = gr_setup_in_desc_list(ep, req, gfp_flags); |
613 | else |
614 | ret = gr_setup_out_desc_list(ep, req, gfp_flags); |
615 | if (ret) |
616 | return ret; |
617 | |
618 | req->req.status = -EINPROGRESS; |
619 | req->req.actual = 0; |
620 | list_add_tail(new: &req->queue, head: &ep->queue); |
621 | |
622 | /* Start DMA if not started, otherwise interrupt handler handles it */ |
623 | if (!ep->dma_start && likely(!ep->stopped)) |
624 | gr_start_dma(ep); |
625 | |
626 | return 0; |
627 | } |
628 | |
629 | /* |
630 | * Queue a request from within the driver. |
631 | * |
632 | * Must be called with dev->lock held. |
633 | */ |
634 | static inline int gr_queue_int(struct gr_ep *ep, struct gr_request *req, |
635 | gfp_t gfp_flags) |
636 | { |
637 | if (ep->is_in) |
638 | gr_dbgprint_request(str: "RESP" , ep, req); |
639 | |
640 | return gr_queue(ep, req, gfp_flags); |
641 | } |
642 | |
643 | /* ---------------------------------------------------------------------- */ |
644 | /* General helper functions */ |
645 | |
646 | /* |
647 | * Dequeue ALL requests. |
648 | * |
649 | * Must be called with dev->lock held and irqs disabled. |
650 | */ |
651 | static void gr_ep_nuke(struct gr_ep *ep) |
652 | { |
653 | struct gr_request *req; |
654 | |
655 | ep->stopped = 1; |
656 | ep->dma_start = 0; |
657 | gr_abort_dma(ep); |
658 | |
659 | while (!list_empty(head: &ep->queue)) { |
660 | req = list_first_entry(&ep->queue, struct gr_request, queue); |
661 | gr_finish_request(ep, req, status: -ESHUTDOWN); |
662 | } |
663 | } |
664 | |
665 | /* |
666 | * Reset the hardware state of this endpoint. |
667 | * |
668 | * Must be called with dev->lock held. |
669 | */ |
670 | static void gr_ep_reset(struct gr_ep *ep) |
671 | { |
672 | gr_write32(&ep->regs->epctrl, 0); |
673 | gr_write32(&ep->regs->dmactrl, 0); |
674 | |
675 | ep->ep.maxpacket = MAX_CTRL_PL_SIZE; |
676 | ep->ep.desc = NULL; |
677 | ep->stopped = 1; |
678 | ep->dma_start = 0; |
679 | } |
680 | |
681 | /* |
682 | * Generate STALL on ep0in/out. |
683 | * |
684 | * Must be called with dev->lock held. |
685 | */ |
686 | static void gr_control_stall(struct gr_udc *dev) |
687 | { |
688 | u32 epctrl; |
689 | |
690 | epctrl = gr_read32(&dev->epo[0].regs->epctrl); |
691 | gr_write32(&dev->epo[0].regs->epctrl, epctrl | GR_EPCTRL_CS); |
692 | epctrl = gr_read32(&dev->epi[0].regs->epctrl); |
693 | gr_write32(&dev->epi[0].regs->epctrl, epctrl | GR_EPCTRL_CS); |
694 | |
695 | dev->ep0state = GR_EP0_STALL; |
696 | } |
697 | |
698 | /* |
699 | * Halts, halts and wedges, or clears halt for an endpoint. |
700 | * |
701 | * Must be called with dev->lock held. |
702 | */ |
703 | static int gr_ep_halt_wedge(struct gr_ep *ep, int halt, int wedge, int fromhost) |
704 | { |
705 | u32 epctrl; |
706 | int retval = 0; |
707 | |
708 | if (ep->num && !ep->ep.desc) |
709 | return -EINVAL; |
710 | |
711 | if (ep->num && ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) |
712 | return -EOPNOTSUPP; |
713 | |
714 | /* Never actually halt ep0, and therefore never clear halt for ep0 */ |
715 | if (!ep->num) { |
716 | if (halt && !fromhost) { |
717 | /* ep0 halt from gadget - generate protocol stall */ |
718 | gr_control_stall(dev: ep->dev); |
719 | dev_dbg(ep->dev->dev, "EP: stall ep0\n" ); |
720 | return 0; |
721 | } |
722 | return -EINVAL; |
723 | } |
724 | |
725 | dev_dbg(ep->dev->dev, "EP: %s halt %s\n" , |
726 | (halt ? (wedge ? "wedge" : "set" ) : "clear" ), ep->ep.name); |
727 | |
728 | epctrl = gr_read32(&ep->regs->epctrl); |
729 | if (halt) { |
730 | /* Set HALT */ |
731 | gr_write32(&ep->regs->epctrl, epctrl | GR_EPCTRL_EH); |
732 | ep->stopped = 1; |
733 | if (wedge) |
734 | ep->wedged = 1; |
735 | } else { |
736 | gr_write32(&ep->regs->epctrl, epctrl & ~GR_EPCTRL_EH); |
737 | ep->stopped = 0; |
738 | ep->wedged = 0; |
739 | |
740 | /* Things might have been queued up in the meantime */ |
741 | if (!ep->dma_start) |
742 | gr_start_dma(ep); |
743 | } |
744 | |
745 | return retval; |
746 | } |
747 | |
748 | /* Must be called with dev->lock held */ |
749 | static inline void gr_set_ep0state(struct gr_udc *dev, enum gr_ep0state value) |
750 | { |
751 | if (dev->ep0state != value) |
752 | dev_vdbg(dev->dev, "STATE: ep0state=%s\n" , |
753 | gr_ep0state_string(value)); |
754 | dev->ep0state = value; |
755 | } |
756 | |
757 | /* |
758 | * Should only be called when endpoints can not generate interrupts. |
759 | * |
760 | * Must be called with dev->lock held. |
761 | */ |
762 | static void gr_disable_interrupts_and_pullup(struct gr_udc *dev) |
763 | { |
764 | gr_write32(&dev->regs->control, 0); |
765 | wmb(); /* Make sure that we do not deny one of our interrupts */ |
766 | dev->irq_enabled = 0; |
767 | } |
768 | |
769 | /* |
770 | * Stop all device activity and disable data line pullup. |
771 | * |
772 | * Must be called with dev->lock held and irqs disabled. |
773 | */ |
774 | static void gr_stop_activity(struct gr_udc *dev) |
775 | { |
776 | struct gr_ep *ep; |
777 | |
778 | list_for_each_entry(ep, &dev->ep_list, ep_list) |
779 | gr_ep_nuke(ep); |
780 | |
781 | gr_disable_interrupts_and_pullup(dev); |
782 | |
783 | gr_set_ep0state(dev, value: GR_EP0_DISCONNECT); |
784 | usb_gadget_set_state(gadget: &dev->gadget, state: USB_STATE_NOTATTACHED); |
785 | } |
786 | |
787 | /* ---------------------------------------------------------------------- */ |
788 | /* ep0 setup packet handling */ |
789 | |
790 | static void gr_ep0_testmode_complete(struct usb_ep *_ep, |
791 | struct usb_request *_req) |
792 | { |
793 | struct gr_ep *ep; |
794 | struct gr_udc *dev; |
795 | u32 control; |
796 | |
797 | ep = container_of(_ep, struct gr_ep, ep); |
798 | dev = ep->dev; |
799 | |
800 | spin_lock(lock: &dev->lock); |
801 | |
802 | control = gr_read32(&dev->regs->control); |
803 | control |= GR_CONTROL_TM | (dev->test_mode << GR_CONTROL_TS_POS); |
804 | gr_write32(&dev->regs->control, control); |
805 | |
806 | spin_unlock(lock: &dev->lock); |
807 | } |
808 | |
809 | static void gr_ep0_dummy_complete(struct usb_ep *_ep, struct usb_request *_req) |
810 | { |
811 | /* Nothing needs to be done here */ |
812 | } |
813 | |
814 | /* |
815 | * Queue a response on ep0in. |
816 | * |
817 | * Must be called with dev->lock held. |
818 | */ |
819 | static int gr_ep0_respond(struct gr_udc *dev, u8 *buf, int length, |
820 | void (*complete)(struct usb_ep *ep, |
821 | struct usb_request *req)) |
822 | { |
823 | u8 *reqbuf = dev->ep0reqi->req.buf; |
824 | int status; |
825 | int i; |
826 | |
827 | for (i = 0; i < length; i++) |
828 | reqbuf[i] = buf[i]; |
829 | dev->ep0reqi->req.length = length; |
830 | dev->ep0reqi->req.complete = complete; |
831 | |
832 | status = gr_queue_int(ep: &dev->epi[0], req: dev->ep0reqi, GFP_ATOMIC); |
833 | if (status < 0) |
834 | dev_err(dev->dev, |
835 | "Could not queue ep0in setup response: %d\n" , status); |
836 | |
837 | return status; |
838 | } |
839 | |
840 | /* |
841 | * Queue a 2 byte response on ep0in. |
842 | * |
843 | * Must be called with dev->lock held. |
844 | */ |
845 | static inline int gr_ep0_respond_u16(struct gr_udc *dev, u16 response) |
846 | { |
847 | __le16 le_response = cpu_to_le16(response); |
848 | |
849 | return gr_ep0_respond(dev, buf: (u8 *)&le_response, length: 2, |
850 | complete: gr_ep0_dummy_complete); |
851 | } |
852 | |
853 | /* |
854 | * Queue a ZLP response on ep0in. |
855 | * |
856 | * Must be called with dev->lock held. |
857 | */ |
858 | static inline int gr_ep0_respond_empty(struct gr_udc *dev) |
859 | { |
860 | return gr_ep0_respond(dev, NULL, length: 0, complete: gr_ep0_dummy_complete); |
861 | } |
862 | |
863 | /* |
864 | * This is run when a SET_ADDRESS request is received. First writes |
865 | * the new address to the control register which is updated internally |
866 | * when the next IN packet is ACKED. |
867 | * |
868 | * Must be called with dev->lock held. |
869 | */ |
870 | static void gr_set_address(struct gr_udc *dev, u8 address) |
871 | { |
872 | u32 control; |
873 | |
874 | control = gr_read32(&dev->regs->control) & ~GR_CONTROL_UA_MASK; |
875 | control |= (address << GR_CONTROL_UA_POS) & GR_CONTROL_UA_MASK; |
876 | control |= GR_CONTROL_SU; |
877 | gr_write32(&dev->regs->control, control); |
878 | } |
879 | |
880 | /* |
881 | * Returns negative for STALL, 0 for successful handling and positive for |
882 | * delegation. |
883 | * |
884 | * Must be called with dev->lock held. |
885 | */ |
886 | static int gr_device_request(struct gr_udc *dev, u8 type, u8 request, |
887 | u16 value, u16 index) |
888 | { |
889 | u16 response; |
890 | u8 test; |
891 | |
892 | switch (request) { |
893 | case USB_REQ_SET_ADDRESS: |
894 | dev_dbg(dev->dev, "STATUS: address %d\n" , value & 0xff); |
895 | gr_set_address(dev, address: value & 0xff); |
896 | if (value) |
897 | usb_gadget_set_state(gadget: &dev->gadget, state: USB_STATE_ADDRESS); |
898 | else |
899 | usb_gadget_set_state(gadget: &dev->gadget, state: USB_STATE_DEFAULT); |
900 | return gr_ep0_respond_empty(dev); |
901 | |
902 | case USB_REQ_GET_STATUS: |
903 | /* Self powered | remote wakeup */ |
904 | response = 0x0001 | (dev->remote_wakeup ? 0x0002 : 0); |
905 | return gr_ep0_respond_u16(dev, response); |
906 | |
907 | case USB_REQ_SET_FEATURE: |
908 | switch (value) { |
909 | case USB_DEVICE_REMOTE_WAKEUP: |
910 | /* Allow remote wakeup */ |
911 | dev->remote_wakeup = 1; |
912 | return gr_ep0_respond_empty(dev); |
913 | |
914 | case USB_DEVICE_TEST_MODE: |
915 | /* The hardware does not support USB_TEST_FORCE_ENABLE */ |
916 | test = index >> 8; |
917 | if (test >= USB_TEST_J && test <= USB_TEST_PACKET) { |
918 | dev->test_mode = test; |
919 | return gr_ep0_respond(dev, NULL, length: 0, |
920 | complete: gr_ep0_testmode_complete); |
921 | } |
922 | } |
923 | break; |
924 | |
925 | case USB_REQ_CLEAR_FEATURE: |
926 | switch (value) { |
927 | case USB_DEVICE_REMOTE_WAKEUP: |
928 | /* Disallow remote wakeup */ |
929 | dev->remote_wakeup = 0; |
930 | return gr_ep0_respond_empty(dev); |
931 | } |
932 | break; |
933 | } |
934 | |
935 | return 1; /* Delegate the rest */ |
936 | } |
937 | |
938 | /* |
939 | * Returns negative for STALL, 0 for successful handling and positive for |
940 | * delegation. |
941 | * |
942 | * Must be called with dev->lock held. |
943 | */ |
944 | static int gr_interface_request(struct gr_udc *dev, u8 type, u8 request, |
945 | u16 value, u16 index) |
946 | { |
947 | if (dev->gadget.state != USB_STATE_CONFIGURED) |
948 | return -1; |
949 | |
950 | /* |
951 | * Should return STALL for invalid interfaces, but udc driver does not |
952 | * know anything about that. However, many gadget drivers do not handle |
953 | * GET_STATUS so we need to take care of that. |
954 | */ |
955 | |
956 | switch (request) { |
957 | case USB_REQ_GET_STATUS: |
958 | return gr_ep0_respond_u16(dev, response: 0x0000); |
959 | |
960 | case USB_REQ_SET_FEATURE: |
961 | case USB_REQ_CLEAR_FEATURE: |
962 | /* |
963 | * No possible valid standard requests. Still let gadget drivers |
964 | * have a go at it. |
965 | */ |
966 | break; |
967 | } |
968 | |
969 | return 1; /* Delegate the rest */ |
970 | } |
971 | |
972 | /* |
973 | * Returns negative for STALL, 0 for successful handling and positive for |
974 | * delegation. |
975 | * |
976 | * Must be called with dev->lock held. |
977 | */ |
978 | static int gr_endpoint_request(struct gr_udc *dev, u8 type, u8 request, |
979 | u16 value, u16 index) |
980 | { |
981 | struct gr_ep *ep; |
982 | int status; |
983 | int halted; |
984 | u8 epnum = index & USB_ENDPOINT_NUMBER_MASK; |
985 | u8 is_in = index & USB_ENDPOINT_DIR_MASK; |
986 | |
987 | if ((is_in && epnum >= dev->nepi) || (!is_in && epnum >= dev->nepo)) |
988 | return -1; |
989 | |
990 | if (dev->gadget.state != USB_STATE_CONFIGURED && epnum != 0) |
991 | return -1; |
992 | |
993 | ep = (is_in ? &dev->epi[epnum] : &dev->epo[epnum]); |
994 | |
995 | switch (request) { |
996 | case USB_REQ_GET_STATUS: |
997 | halted = gr_read32(&ep->regs->epctrl) & GR_EPCTRL_EH; |
998 | return gr_ep0_respond_u16(dev, response: halted ? 0x0001 : 0); |
999 | |
1000 | case USB_REQ_SET_FEATURE: |
1001 | switch (value) { |
1002 | case USB_ENDPOINT_HALT: |
1003 | status = gr_ep_halt_wedge(ep, halt: 1, wedge: 0, fromhost: 1); |
1004 | if (status >= 0) |
1005 | status = gr_ep0_respond_empty(dev); |
1006 | return status; |
1007 | } |
1008 | break; |
1009 | |
1010 | case USB_REQ_CLEAR_FEATURE: |
1011 | switch (value) { |
1012 | case USB_ENDPOINT_HALT: |
1013 | if (ep->wedged) |
1014 | return -1; |
1015 | status = gr_ep_halt_wedge(ep, halt: 0, wedge: 0, fromhost: 1); |
1016 | if (status >= 0) |
1017 | status = gr_ep0_respond_empty(dev); |
1018 | return status; |
1019 | } |
1020 | break; |
1021 | } |
1022 | |
1023 | return 1; /* Delegate the rest */ |
1024 | } |
1025 | |
1026 | /* Must be called with dev->lock held */ |
1027 | static void gr_ep0out_requeue(struct gr_udc *dev) |
1028 | { |
1029 | int ret = gr_queue_int(ep: &dev->epo[0], req: dev->ep0reqo, GFP_ATOMIC); |
1030 | |
1031 | if (ret) |
1032 | dev_err(dev->dev, "Could not queue ep0out setup request: %d\n" , |
1033 | ret); |
1034 | } |
1035 | |
1036 | /* |
1037 | * The main function dealing with setup requests on ep0. |
1038 | * |
1039 | * Must be called with dev->lock held and irqs disabled |
1040 | */ |
1041 | static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req) |
1042 | __releases(&dev->lock) |
1043 | __acquires(&dev->lock) |
1044 | { |
1045 | union { |
1046 | struct usb_ctrlrequest ctrl; |
1047 | u8 raw[8]; |
1048 | u32 word[2]; |
1049 | } u; |
1050 | u8 type; |
1051 | u8 request; |
1052 | u16 value; |
1053 | u16 index; |
1054 | u16 length; |
1055 | int i; |
1056 | int status; |
1057 | |
1058 | /* Restore from ep0 halt */ |
1059 | if (dev->ep0state == GR_EP0_STALL) { |
1060 | gr_set_ep0state(dev, value: GR_EP0_SETUP); |
1061 | if (!req->req.actual) |
1062 | goto out; |
1063 | } |
1064 | |
1065 | if (dev->ep0state == GR_EP0_ISTATUS) { |
1066 | gr_set_ep0state(dev, value: GR_EP0_SETUP); |
1067 | if (req->req.actual > 0) |
1068 | dev_dbg(dev->dev, |
1069 | "Unexpected setup packet at state %s\n" , |
1070 | gr_ep0state_string(GR_EP0_ISTATUS)); |
1071 | else |
1072 | goto out; /* Got expected ZLP */ |
1073 | } else if (dev->ep0state != GR_EP0_SETUP) { |
1074 | dev_info(dev->dev, |
1075 | "Unexpected ep0out request at state %s - stalling\n" , |
1076 | gr_ep0state_string(dev->ep0state)); |
1077 | gr_control_stall(dev); |
1078 | gr_set_ep0state(dev, value: GR_EP0_SETUP); |
1079 | goto out; |
1080 | } else if (!req->req.actual) { |
1081 | dev_dbg(dev->dev, "Unexpected ZLP at state %s\n" , |
1082 | gr_ep0state_string(dev->ep0state)); |
1083 | goto out; |
1084 | } |
1085 | |
1086 | /* Handle SETUP packet */ |
1087 | for (i = 0; i < req->req.actual; i++) |
1088 | u.raw[i] = ((u8 *)req->req.buf)[i]; |
1089 | |
1090 | type = u.ctrl.bRequestType; |
1091 | request = u.ctrl.bRequest; |
1092 | value = le16_to_cpu(u.ctrl.wValue); |
1093 | index = le16_to_cpu(u.ctrl.wIndex); |
1094 | length = le16_to_cpu(u.ctrl.wLength); |
1095 | |
1096 | gr_dbgprint_devreq(dev, type, request, value, index, length); |
1097 | |
1098 | /* Check for data stage */ |
1099 | if (length) { |
1100 | if (type & USB_DIR_IN) |
1101 | gr_set_ep0state(dev, value: GR_EP0_IDATA); |
1102 | else |
1103 | gr_set_ep0state(dev, value: GR_EP0_ODATA); |
1104 | } |
1105 | |
1106 | status = 1; /* Positive status flags delegation */ |
1107 | if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD) { |
1108 | switch (type & USB_RECIP_MASK) { |
1109 | case USB_RECIP_DEVICE: |
1110 | status = gr_device_request(dev, type, request, |
1111 | value, index); |
1112 | break; |
1113 | case USB_RECIP_ENDPOINT: |
1114 | status = gr_endpoint_request(dev, type, request, |
1115 | value, index); |
1116 | break; |
1117 | case USB_RECIP_INTERFACE: |
1118 | status = gr_interface_request(dev, type, request, |
1119 | value, index); |
1120 | break; |
1121 | } |
1122 | } |
1123 | |
1124 | if (status > 0) { |
1125 | spin_unlock(lock: &dev->lock); |
1126 | |
1127 | dev_vdbg(dev->dev, "DELEGATE\n" ); |
1128 | status = dev->driver->setup(&dev->gadget, &u.ctrl); |
1129 | |
1130 | spin_lock(lock: &dev->lock); |
1131 | } |
1132 | |
1133 | /* Generate STALL on both ep0out and ep0in if requested */ |
1134 | if (unlikely(status < 0)) { |
1135 | dev_vdbg(dev->dev, "STALL\n" ); |
1136 | gr_control_stall(dev); |
1137 | } |
1138 | |
1139 | if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD && |
1140 | request == USB_REQ_SET_CONFIGURATION) { |
1141 | if (!value) { |
1142 | dev_dbg(dev->dev, "STATUS: deconfigured\n" ); |
1143 | usb_gadget_set_state(gadget: &dev->gadget, state: USB_STATE_ADDRESS); |
1144 | } else if (status >= 0) { |
1145 | /* Not configured unless gadget OK:s it */ |
1146 | dev_dbg(dev->dev, "STATUS: configured: %d\n" , value); |
1147 | usb_gadget_set_state(gadget: &dev->gadget, |
1148 | state: USB_STATE_CONFIGURED); |
1149 | } |
1150 | } |
1151 | |
1152 | /* Get ready for next stage */ |
1153 | if (dev->ep0state == GR_EP0_ODATA) |
1154 | gr_set_ep0state(dev, value: GR_EP0_OSTATUS); |
1155 | else if (dev->ep0state == GR_EP0_IDATA) |
1156 | gr_set_ep0state(dev, value: GR_EP0_ISTATUS); |
1157 | else |
1158 | gr_set_ep0state(dev, value: GR_EP0_SETUP); |
1159 | |
1160 | out: |
1161 | gr_ep0out_requeue(dev); |
1162 | } |
1163 | |
1164 | /* ---------------------------------------------------------------------- */ |
1165 | /* VBUS and USB reset handling */ |
1166 | |
1167 | /* Must be called with dev->lock held and irqs disabled */ |
1168 | static void gr_vbus_connected(struct gr_udc *dev, u32 status) |
1169 | { |
1170 | u32 control; |
1171 | |
1172 | dev->gadget.speed = GR_SPEED(status); |
1173 | usb_gadget_set_state(gadget: &dev->gadget, state: USB_STATE_POWERED); |
1174 | |
1175 | /* Turn on full interrupts and pullup */ |
1176 | control = (GR_CONTROL_SI | GR_CONTROL_UI | GR_CONTROL_VI | |
1177 | GR_CONTROL_SP | GR_CONTROL_EP); |
1178 | gr_write32(&dev->regs->control, control); |
1179 | } |
1180 | |
1181 | /* Must be called with dev->lock held */ |
1182 | static void gr_enable_vbus_detect(struct gr_udc *dev) |
1183 | { |
1184 | u32 status; |
1185 | |
1186 | dev->irq_enabled = 1; |
1187 | wmb(); /* Make sure we do not ignore an interrupt */ |
1188 | gr_write32(&dev->regs->control, GR_CONTROL_VI); |
1189 | |
1190 | /* Take care of the case we are already plugged in at this point */ |
1191 | status = gr_read32(&dev->regs->status); |
1192 | if (status & GR_STATUS_VB) |
1193 | gr_vbus_connected(dev, status); |
1194 | } |
1195 | |
1196 | /* Must be called with dev->lock held and irqs disabled */ |
1197 | static void gr_vbus_disconnected(struct gr_udc *dev) |
1198 | { |
1199 | gr_stop_activity(dev); |
1200 | |
1201 | /* Report disconnect */ |
1202 | if (dev->driver && dev->driver->disconnect) { |
1203 | spin_unlock(lock: &dev->lock); |
1204 | |
1205 | dev->driver->disconnect(&dev->gadget); |
1206 | |
1207 | spin_lock(lock: &dev->lock); |
1208 | } |
1209 | |
1210 | gr_enable_vbus_detect(dev); |
1211 | } |
1212 | |
1213 | /* Must be called with dev->lock held and irqs disabled */ |
1214 | static void gr_udc_usbreset(struct gr_udc *dev, u32 status) |
1215 | { |
1216 | gr_set_address(dev, address: 0); |
1217 | gr_set_ep0state(dev, value: GR_EP0_SETUP); |
1218 | usb_gadget_set_state(gadget: &dev->gadget, state: USB_STATE_DEFAULT); |
1219 | dev->gadget.speed = GR_SPEED(status); |
1220 | |
1221 | gr_ep_nuke(ep: &dev->epo[0]); |
1222 | gr_ep_nuke(ep: &dev->epi[0]); |
1223 | dev->epo[0].stopped = 0; |
1224 | dev->epi[0].stopped = 0; |
1225 | gr_ep0out_requeue(dev); |
1226 | } |
1227 | |
1228 | /* ---------------------------------------------------------------------- */ |
1229 | /* Irq handling */ |
1230 | |
1231 | /* |
1232 | * Handles interrupts from in endpoints. Returns whether something was handled. |
1233 | * |
1234 | * Must be called with dev->lock held, irqs disabled and with !ep->stopped. |
1235 | */ |
1236 | static int gr_handle_in_ep(struct gr_ep *ep) |
1237 | { |
1238 | struct gr_request *req; |
1239 | |
1240 | req = list_first_entry(&ep->queue, struct gr_request, queue); |
1241 | if (!req->last_desc) |
1242 | return 0; |
1243 | |
1244 | if (READ_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN) |
1245 | return 0; /* Not put in hardware buffers yet */ |
1246 | |
1247 | if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0)) |
1248 | return 0; /* Not transmitted yet, still in hardware buffers */ |
1249 | |
1250 | /* Write complete */ |
1251 | gr_dma_advance(ep, status: 0); |
1252 | |
1253 | return 1; |
1254 | } |
1255 | |
1256 | /* |
1257 | * Handles interrupts from out endpoints. Returns whether something was handled. |
1258 | * |
1259 | * Must be called with dev->lock held, irqs disabled and with !ep->stopped. |
1260 | */ |
1261 | static int gr_handle_out_ep(struct gr_ep *ep) |
1262 | { |
1263 | u32 ep_dmactrl; |
1264 | u32 ctrl; |
1265 | u16 len; |
1266 | struct gr_request *req; |
1267 | struct gr_udc *dev = ep->dev; |
1268 | |
1269 | req = list_first_entry(&ep->queue, struct gr_request, queue); |
1270 | if (!req->curr_desc) |
1271 | return 0; |
1272 | |
1273 | ctrl = READ_ONCE(req->curr_desc->ctrl); |
1274 | if (ctrl & GR_DESC_OUT_CTRL_EN) |
1275 | return 0; /* Not received yet */ |
1276 | |
1277 | /* Read complete */ |
1278 | len = ctrl & GR_DESC_OUT_CTRL_LEN_MASK; |
1279 | req->req.actual += len; |
1280 | if (ctrl & GR_DESC_OUT_CTRL_SE) |
1281 | req->setup = 1; |
1282 | |
1283 | if (len < ep->ep.maxpacket || req->req.actual >= req->req.length) { |
1284 | /* Short packet or >= expected size - we are done */ |
1285 | |
1286 | if ((ep == &dev->epo[0]) && (dev->ep0state == GR_EP0_OSTATUS)) { |
1287 | /* |
1288 | * Send a status stage ZLP to ack the DATA stage in the |
1289 | * OUT direction. This needs to be done before |
1290 | * gr_dma_advance as that can lead to a call to |
1291 | * ep0_setup that can change dev->ep0state. |
1292 | */ |
1293 | gr_ep0_respond_empty(dev); |
1294 | gr_set_ep0state(dev, value: GR_EP0_SETUP); |
1295 | } |
1296 | |
1297 | gr_dma_advance(ep, status: 0); |
1298 | } else { |
1299 | /* Not done yet. Enable the next descriptor to receive more. */ |
1300 | req->curr_desc = req->curr_desc->next_desc; |
1301 | req->curr_desc->ctrl |= GR_DESC_OUT_CTRL_EN; |
1302 | |
1303 | ep_dmactrl = gr_read32(&ep->regs->dmactrl); |
1304 | gr_write32(&ep->regs->dmactrl, ep_dmactrl | GR_DMACTRL_DA); |
1305 | } |
1306 | |
1307 | return 1; |
1308 | } |
1309 | |
1310 | /* |
1311 | * Handle state changes. Returns whether something was handled. |
1312 | * |
1313 | * Must be called with dev->lock held and irqs disabled. |
1314 | */ |
1315 | static int gr_handle_state_changes(struct gr_udc *dev) |
1316 | { |
1317 | u32 status = gr_read32(&dev->regs->status); |
1318 | int handled = 0; |
1319 | int powstate = !(dev->gadget.state == USB_STATE_NOTATTACHED || |
1320 | dev->gadget.state == USB_STATE_ATTACHED); |
1321 | |
1322 | /* VBUS valid detected */ |
1323 | if (!powstate && (status & GR_STATUS_VB)) { |
1324 | dev_dbg(dev->dev, "STATUS: vbus valid detected\n" ); |
1325 | gr_vbus_connected(dev, status); |
1326 | handled = 1; |
1327 | } |
1328 | |
1329 | /* Disconnect */ |
1330 | if (powstate && !(status & GR_STATUS_VB)) { |
1331 | dev_dbg(dev->dev, "STATUS: vbus invalid detected\n" ); |
1332 | gr_vbus_disconnected(dev); |
1333 | handled = 1; |
1334 | } |
1335 | |
1336 | /* USB reset detected */ |
1337 | if (status & GR_STATUS_UR) { |
1338 | dev_dbg(dev->dev, "STATUS: USB reset - speed is %s\n" , |
1339 | GR_SPEED_STR(status)); |
1340 | gr_write32(&dev->regs->status, GR_STATUS_UR); |
1341 | gr_udc_usbreset(dev, status); |
1342 | handled = 1; |
1343 | } |
1344 | |
1345 | /* Speed change */ |
1346 | if (dev->gadget.speed != GR_SPEED(status)) { |
1347 | dev_dbg(dev->dev, "STATUS: USB Speed change to %s\n" , |
1348 | GR_SPEED_STR(status)); |
1349 | dev->gadget.speed = GR_SPEED(status); |
1350 | handled = 1; |
1351 | } |
1352 | |
1353 | /* Going into suspend */ |
1354 | if ((dev->ep0state != GR_EP0_SUSPEND) && !(status & GR_STATUS_SU)) { |
1355 | dev_dbg(dev->dev, "STATUS: USB suspend\n" ); |
1356 | gr_set_ep0state(dev, value: GR_EP0_SUSPEND); |
1357 | dev->suspended_from = dev->gadget.state; |
1358 | usb_gadget_set_state(gadget: &dev->gadget, state: USB_STATE_SUSPENDED); |
1359 | |
1360 | if ((dev->gadget.speed != USB_SPEED_UNKNOWN) && |
1361 | dev->driver && dev->driver->suspend) { |
1362 | spin_unlock(lock: &dev->lock); |
1363 | |
1364 | dev->driver->suspend(&dev->gadget); |
1365 | |
1366 | spin_lock(lock: &dev->lock); |
1367 | } |
1368 | handled = 1; |
1369 | } |
1370 | |
1371 | /* Coming out of suspend */ |
1372 | if ((dev->ep0state == GR_EP0_SUSPEND) && (status & GR_STATUS_SU)) { |
1373 | dev_dbg(dev->dev, "STATUS: USB resume\n" ); |
1374 | if (dev->suspended_from == USB_STATE_POWERED) |
1375 | gr_set_ep0state(dev, value: GR_EP0_DISCONNECT); |
1376 | else |
1377 | gr_set_ep0state(dev, value: GR_EP0_SETUP); |
1378 | usb_gadget_set_state(gadget: &dev->gadget, state: dev->suspended_from); |
1379 | |
1380 | if ((dev->gadget.speed != USB_SPEED_UNKNOWN) && |
1381 | dev->driver && dev->driver->resume) { |
1382 | spin_unlock(lock: &dev->lock); |
1383 | |
1384 | dev->driver->resume(&dev->gadget); |
1385 | |
1386 | spin_lock(lock: &dev->lock); |
1387 | } |
1388 | handled = 1; |
1389 | } |
1390 | |
1391 | return handled; |
1392 | } |
1393 | |
1394 | /* Non-interrupt context irq handler */ |
1395 | static irqreturn_t gr_irq_handler(int irq, void *_dev) |
1396 | { |
1397 | struct gr_udc *dev = _dev; |
1398 | struct gr_ep *ep; |
1399 | int handled = 0; |
1400 | int i; |
1401 | unsigned long flags; |
1402 | |
1403 | spin_lock_irqsave(&dev->lock, flags); |
1404 | |
1405 | if (!dev->irq_enabled) |
1406 | goto out; |
1407 | |
1408 | /* |
1409 | * Check IN ep interrupts. We check these before the OUT eps because |
1410 | * some gadgets reuse the request that might already be currently |
1411 | * outstanding and needs to be completed (mainly setup requests). |
1412 | */ |
1413 | for (i = 0; i < dev->nepi; i++) { |
1414 | ep = &dev->epi[i]; |
1415 | if (!ep->stopped && !ep->callback && !list_empty(head: &ep->queue)) |
1416 | handled = gr_handle_in_ep(ep) || handled; |
1417 | } |
1418 | |
1419 | /* Check OUT ep interrupts */ |
1420 | for (i = 0; i < dev->nepo; i++) { |
1421 | ep = &dev->epo[i]; |
1422 | if (!ep->stopped && !ep->callback && !list_empty(head: &ep->queue)) |
1423 | handled = gr_handle_out_ep(ep) || handled; |
1424 | } |
1425 | |
1426 | /* Check status interrupts */ |
1427 | handled = gr_handle_state_changes(dev) || handled; |
1428 | |
1429 | /* |
1430 | * Check AMBA DMA errors. Only check if we didn't find anything else to |
1431 | * handle because this shouldn't happen if we did everything right. |
1432 | */ |
1433 | if (!handled) { |
1434 | list_for_each_entry(ep, &dev->ep_list, ep_list) { |
1435 | if (gr_read32(&ep->regs->dmactrl) & GR_DMACTRL_AE) { |
1436 | dev_err(dev->dev, |
1437 | "AMBA Error occurred for %s\n" , |
1438 | ep->ep.name); |
1439 | handled = 1; |
1440 | } |
1441 | } |
1442 | } |
1443 | |
1444 | out: |
1445 | spin_unlock_irqrestore(lock: &dev->lock, flags); |
1446 | |
1447 | return handled ? IRQ_HANDLED : IRQ_NONE; |
1448 | } |
1449 | |
1450 | /* Interrupt context irq handler */ |
1451 | static irqreturn_t gr_irq(int irq, void *_dev) |
1452 | { |
1453 | struct gr_udc *dev = _dev; |
1454 | |
1455 | if (!dev->irq_enabled) |
1456 | return IRQ_NONE; |
1457 | |
1458 | return IRQ_WAKE_THREAD; |
1459 | } |
1460 | |
1461 | /* ---------------------------------------------------------------------- */ |
1462 | /* USB ep ops */ |
1463 | |
1464 | /* Enable endpoint. Not for ep0in and ep0out that are handled separately. */ |
1465 | static int gr_ep_enable(struct usb_ep *_ep, |
1466 | const struct usb_endpoint_descriptor *desc) |
1467 | { |
1468 | struct gr_udc *dev; |
1469 | struct gr_ep *ep; |
1470 | u8 mode; |
1471 | u8 nt; |
1472 | u16 max; |
1473 | u16 buffer_size = 0; |
1474 | u32 epctrl; |
1475 | |
1476 | ep = container_of(_ep, struct gr_ep, ep); |
1477 | if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) |
1478 | return -EINVAL; |
1479 | |
1480 | dev = ep->dev; |
1481 | |
1482 | /* 'ep0' IN and OUT are reserved */ |
1483 | if (ep == &dev->epo[0] || ep == &dev->epi[0]) |
1484 | return -EINVAL; |
1485 | |
1486 | if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) |
1487 | return -ESHUTDOWN; |
1488 | |
1489 | /* Make sure we are clear for enabling */ |
1490 | epctrl = gr_read32(&ep->regs->epctrl); |
1491 | if (epctrl & GR_EPCTRL_EV) |
1492 | return -EBUSY; |
1493 | |
1494 | /* Check that directions match */ |
1495 | if (!ep->is_in != !usb_endpoint_dir_in(epd: desc)) |
1496 | return -EINVAL; |
1497 | |
1498 | /* Check ep num */ |
1499 | if ((!ep->is_in && ep->num >= dev->nepo) || |
1500 | (ep->is_in && ep->num >= dev->nepi)) |
1501 | return -EINVAL; |
1502 | |
1503 | if (usb_endpoint_xfer_control(epd: desc)) { |
1504 | mode = 0; |
1505 | } else if (usb_endpoint_xfer_isoc(epd: desc)) { |
1506 | mode = 1; |
1507 | } else if (usb_endpoint_xfer_bulk(epd: desc)) { |
1508 | mode = 2; |
1509 | } else if (usb_endpoint_xfer_int(epd: desc)) { |
1510 | mode = 3; |
1511 | } else { |
1512 | dev_err(dev->dev, "Unknown transfer type for %s\n" , |
1513 | ep->ep.name); |
1514 | return -EINVAL; |
1515 | } |
1516 | |
1517 | /* |
1518 | * Bits 10-0 set the max payload. 12-11 set the number of |
1519 | * additional transactions. |
1520 | */ |
1521 | max = usb_endpoint_maxp(epd: desc); |
1522 | nt = usb_endpoint_maxp_mult(epd: desc) - 1; |
1523 | buffer_size = GR_BUFFER_SIZE(epctrl); |
1524 | if (nt && (mode == 0 || mode == 2)) { |
1525 | dev_err(dev->dev, |
1526 | "%s mode: multiple trans./microframe not valid\n" , |
1527 | (mode == 2 ? "Bulk" : "Control" )); |
1528 | return -EINVAL; |
1529 | } else if (nt == 0x3) { |
1530 | dev_err(dev->dev, |
1531 | "Invalid value 0x3 for additional trans./microframe\n" ); |
1532 | return -EINVAL; |
1533 | } else if ((nt + 1) * max > buffer_size) { |
1534 | dev_err(dev->dev, "Hw buffer size %d < max payload %d * %d\n" , |
1535 | buffer_size, (nt + 1), max); |
1536 | return -EINVAL; |
1537 | } else if (max == 0) { |
1538 | dev_err(dev->dev, "Max payload cannot be set to 0\n" ); |
1539 | return -EINVAL; |
1540 | } else if (max > ep->ep.maxpacket_limit) { |
1541 | dev_err(dev->dev, "Requested max payload %d > limit %d\n" , |
1542 | max, ep->ep.maxpacket_limit); |
1543 | return -EINVAL; |
1544 | } |
1545 | |
1546 | spin_lock(lock: &ep->dev->lock); |
1547 | |
1548 | if (!ep->stopped) { |
1549 | spin_unlock(lock: &ep->dev->lock); |
1550 | return -EBUSY; |
1551 | } |
1552 | |
1553 | ep->stopped = 0; |
1554 | ep->wedged = 0; |
1555 | ep->ep.desc = desc; |
1556 | ep->ep.maxpacket = max; |
1557 | ep->dma_start = 0; |
1558 | |
1559 | |
1560 | if (nt) { |
1561 | /* |
1562 | * Maximum possible size of all payloads in one microframe |
1563 | * regardless of direction when using high-bandwidth mode. |
1564 | */ |
1565 | ep->bytes_per_buffer = (nt + 1) * max; |
1566 | } else if (ep->is_in) { |
1567 | /* |
1568 | * The biggest multiple of maximum packet size that fits into |
1569 | * the buffer. The hardware will split up into many packets in |
1570 | * the IN direction. |
1571 | */ |
1572 | ep->bytes_per_buffer = (buffer_size / max) * max; |
1573 | } else { |
1574 | /* |
1575 | * Only single packets will be placed the buffers in the OUT |
1576 | * direction. |
1577 | */ |
1578 | ep->bytes_per_buffer = max; |
1579 | } |
1580 | |
1581 | epctrl = (max << GR_EPCTRL_MAXPL_POS) |
1582 | | (nt << GR_EPCTRL_NT_POS) |
1583 | | (mode << GR_EPCTRL_TT_POS) |
1584 | | GR_EPCTRL_EV; |
1585 | if (ep->is_in) |
1586 | epctrl |= GR_EPCTRL_PI; |
1587 | gr_write32(&ep->regs->epctrl, epctrl); |
1588 | |
1589 | gr_write32(&ep->regs->dmactrl, GR_DMACTRL_IE | GR_DMACTRL_AI); |
1590 | |
1591 | spin_unlock(lock: &ep->dev->lock); |
1592 | |
1593 | dev_dbg(ep->dev->dev, "EP: %s enabled - %s with %d bytes/buffer\n" , |
1594 | ep->ep.name, gr_modestring[mode], ep->bytes_per_buffer); |
1595 | return 0; |
1596 | } |
1597 | |
1598 | /* Disable endpoint. Not for ep0in and ep0out that are handled separately. */ |
1599 | static int gr_ep_disable(struct usb_ep *_ep) |
1600 | { |
1601 | struct gr_ep *ep; |
1602 | struct gr_udc *dev; |
1603 | unsigned long flags; |
1604 | |
1605 | ep = container_of(_ep, struct gr_ep, ep); |
1606 | if (!_ep || !ep->ep.desc) |
1607 | return -ENODEV; |
1608 | |
1609 | dev = ep->dev; |
1610 | |
1611 | /* 'ep0' IN and OUT are reserved */ |
1612 | if (ep == &dev->epo[0] || ep == &dev->epi[0]) |
1613 | return -EINVAL; |
1614 | |
1615 | if (dev->ep0state == GR_EP0_SUSPEND) |
1616 | return -EBUSY; |
1617 | |
1618 | dev_dbg(ep->dev->dev, "EP: disable %s\n" , ep->ep.name); |
1619 | |
1620 | spin_lock_irqsave(&dev->lock, flags); |
1621 | |
1622 | gr_ep_nuke(ep); |
1623 | gr_ep_reset(ep); |
1624 | ep->ep.desc = NULL; |
1625 | |
1626 | spin_unlock_irqrestore(lock: &dev->lock, flags); |
1627 | |
1628 | return 0; |
1629 | } |
1630 | |
1631 | /* |
1632 | * Frees a request, but not any DMA buffers associated with it |
1633 | * (gr_finish_request should already have taken care of that). |
1634 | */ |
1635 | static void gr_free_request(struct usb_ep *_ep, struct usb_request *_req) |
1636 | { |
1637 | struct gr_request *req; |
1638 | |
1639 | if (!_ep || !_req) |
1640 | return; |
1641 | req = container_of(_req, struct gr_request, req); |
1642 | |
1643 | /* Leads to memory leak */ |
1644 | WARN(!list_empty(&req->queue), |
1645 | "request not dequeued properly before freeing\n" ); |
1646 | |
1647 | kfree(objp: req); |
1648 | } |
1649 | |
1650 | /* Queue a request from the gadget */ |
1651 | static int gr_queue_ext(struct usb_ep *_ep, struct usb_request *_req, |
1652 | gfp_t gfp_flags) |
1653 | { |
1654 | struct gr_ep *ep; |
1655 | struct gr_request *req; |
1656 | struct gr_udc *dev; |
1657 | int ret; |
1658 | |
1659 | if (unlikely(!_ep || !_req)) |
1660 | return -EINVAL; |
1661 | |
1662 | ep = container_of(_ep, struct gr_ep, ep); |
1663 | req = container_of(_req, struct gr_request, req); |
1664 | dev = ep->dev; |
1665 | |
1666 | spin_lock(lock: &ep->dev->lock); |
1667 | |
1668 | /* |
1669 | * The ep0 pointer in the gadget struct is used both for ep0in and |
1670 | * ep0out. In a data stage in the out direction ep0out needs to be used |
1671 | * instead of the default ep0in. Completion functions might use |
1672 | * driver_data, so that needs to be copied as well. |
1673 | */ |
1674 | if ((ep == &dev->epi[0]) && (dev->ep0state == GR_EP0_ODATA)) { |
1675 | ep = &dev->epo[0]; |
1676 | ep->ep.driver_data = dev->epi[0].ep.driver_data; |
1677 | } |
1678 | |
1679 | if (ep->is_in) |
1680 | gr_dbgprint_request(str: "EXTERN" , ep, req); |
1681 | |
1682 | ret = gr_queue(ep, req, GFP_ATOMIC); |
1683 | |
1684 | spin_unlock(lock: &ep->dev->lock); |
1685 | |
1686 | return ret; |
1687 | } |
1688 | |
1689 | /* Dequeue JUST ONE request */ |
1690 | static int gr_dequeue(struct usb_ep *_ep, struct usb_request *_req) |
1691 | { |
1692 | struct gr_request *req = NULL, *iter; |
1693 | struct gr_ep *ep; |
1694 | struct gr_udc *dev; |
1695 | int ret = 0; |
1696 | unsigned long flags; |
1697 | |
1698 | ep = container_of(_ep, struct gr_ep, ep); |
1699 | if (!_ep || !_req || (!ep->ep.desc && ep->num != 0)) |
1700 | return -EINVAL; |
1701 | dev = ep->dev; |
1702 | if (!dev->driver) |
1703 | return -ESHUTDOWN; |
1704 | |
1705 | /* We can't touch (DMA) registers when suspended */ |
1706 | if (dev->ep0state == GR_EP0_SUSPEND) |
1707 | return -EBUSY; |
1708 | |
1709 | spin_lock_irqsave(&dev->lock, flags); |
1710 | |
1711 | /* Make sure it's actually queued on this endpoint */ |
1712 | list_for_each_entry(iter, &ep->queue, queue) { |
1713 | if (&iter->req != _req) |
1714 | continue; |
1715 | req = iter; |
1716 | break; |
1717 | } |
1718 | if (!req) { |
1719 | ret = -EINVAL; |
1720 | goto out; |
1721 | } |
1722 | |
1723 | if (list_first_entry(&ep->queue, struct gr_request, queue) == req) { |
1724 | /* This request is currently being processed */ |
1725 | gr_abort_dma(ep); |
1726 | if (ep->stopped) |
1727 | gr_finish_request(ep, req, status: -ECONNRESET); |
1728 | else |
1729 | gr_dma_advance(ep, status: -ECONNRESET); |
1730 | } else if (!list_empty(head: &req->queue)) { |
1731 | /* Not being processed - gr_finish_request dequeues it */ |
1732 | gr_finish_request(ep, req, status: -ECONNRESET); |
1733 | } else { |
1734 | ret = -EOPNOTSUPP; |
1735 | } |
1736 | |
1737 | out: |
1738 | spin_unlock_irqrestore(lock: &dev->lock, flags); |
1739 | |
1740 | return ret; |
1741 | } |
1742 | |
1743 | /* Helper for gr_set_halt and gr_set_wedge */ |
1744 | static int gr_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge) |
1745 | { |
1746 | int ret; |
1747 | struct gr_ep *ep; |
1748 | |
1749 | if (!_ep) |
1750 | return -ENODEV; |
1751 | ep = container_of(_ep, struct gr_ep, ep); |
1752 | |
1753 | spin_lock(lock: &ep->dev->lock); |
1754 | |
1755 | /* Halting an IN endpoint should fail if queue is not empty */ |
1756 | if (halt && ep->is_in && !list_empty(head: &ep->queue)) { |
1757 | ret = -EAGAIN; |
1758 | goto out; |
1759 | } |
1760 | |
1761 | ret = gr_ep_halt_wedge(ep, halt, wedge, fromhost: 0); |
1762 | |
1763 | out: |
1764 | spin_unlock(lock: &ep->dev->lock); |
1765 | |
1766 | return ret; |
1767 | } |
1768 | |
1769 | /* Halt endpoint */ |
1770 | static int gr_set_halt(struct usb_ep *_ep, int halt) |
1771 | { |
1772 | return gr_set_halt_wedge(_ep, halt, wedge: 0); |
1773 | } |
1774 | |
1775 | /* Halt and wedge endpoint */ |
1776 | static int gr_set_wedge(struct usb_ep *_ep) |
1777 | { |
1778 | return gr_set_halt_wedge(_ep, halt: 1, wedge: 1); |
1779 | } |
1780 | |
1781 | /* |
1782 | * Return the total number of bytes currently stored in the internal buffers of |
1783 | * the endpoint. |
1784 | */ |
1785 | static int gr_fifo_status(struct usb_ep *_ep) |
1786 | { |
1787 | struct gr_ep *ep; |
1788 | u32 epstat; |
1789 | u32 bytes = 0; |
1790 | |
1791 | if (!_ep) |
1792 | return -ENODEV; |
1793 | ep = container_of(_ep, struct gr_ep, ep); |
1794 | |
1795 | epstat = gr_read32(&ep->regs->epstat); |
1796 | |
1797 | if (epstat & GR_EPSTAT_B0) |
1798 | bytes += (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS; |
1799 | if (epstat & GR_EPSTAT_B1) |
1800 | bytes += (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS; |
1801 | |
1802 | return bytes; |
1803 | } |
1804 | |
1805 | |
1806 | /* Empty data from internal buffers of an endpoint. */ |
1807 | static void gr_fifo_flush(struct usb_ep *_ep) |
1808 | { |
1809 | struct gr_ep *ep; |
1810 | u32 epctrl; |
1811 | |
1812 | if (!_ep) |
1813 | return; |
1814 | ep = container_of(_ep, struct gr_ep, ep); |
1815 | dev_vdbg(ep->dev->dev, "EP: flush fifo %s\n" , ep->ep.name); |
1816 | |
1817 | spin_lock(lock: &ep->dev->lock); |
1818 | |
1819 | epctrl = gr_read32(&ep->regs->epctrl); |
1820 | epctrl |= GR_EPCTRL_CB; |
1821 | gr_write32(&ep->regs->epctrl, epctrl); |
1822 | |
1823 | spin_unlock(lock: &ep->dev->lock); |
1824 | } |
1825 | |
1826 | static const struct usb_ep_ops gr_ep_ops = { |
1827 | .enable = gr_ep_enable, |
1828 | .disable = gr_ep_disable, |
1829 | |
1830 | .alloc_request = gr_alloc_request, |
1831 | .free_request = gr_free_request, |
1832 | |
1833 | .queue = gr_queue_ext, |
1834 | .dequeue = gr_dequeue, |
1835 | |
1836 | .set_halt = gr_set_halt, |
1837 | .set_wedge = gr_set_wedge, |
1838 | .fifo_status = gr_fifo_status, |
1839 | .fifo_flush = gr_fifo_flush, |
1840 | }; |
1841 | |
1842 | /* ---------------------------------------------------------------------- */ |
1843 | /* USB Gadget ops */ |
1844 | |
1845 | static int gr_get_frame(struct usb_gadget *_gadget) |
1846 | { |
1847 | struct gr_udc *dev; |
1848 | |
1849 | if (!_gadget) |
1850 | return -ENODEV; |
1851 | dev = container_of(_gadget, struct gr_udc, gadget); |
1852 | return gr_read32(&dev->regs->status) & GR_STATUS_FN_MASK; |
1853 | } |
1854 | |
1855 | static int gr_wakeup(struct usb_gadget *_gadget) |
1856 | { |
1857 | struct gr_udc *dev; |
1858 | |
1859 | if (!_gadget) |
1860 | return -ENODEV; |
1861 | dev = container_of(_gadget, struct gr_udc, gadget); |
1862 | |
1863 | /* Remote wakeup feature not enabled by host*/ |
1864 | if (!dev->remote_wakeup) |
1865 | return -EINVAL; |
1866 | |
1867 | spin_lock(lock: &dev->lock); |
1868 | |
1869 | gr_write32(&dev->regs->control, |
1870 | gr_read32(&dev->regs->control) | GR_CONTROL_RW); |
1871 | |
1872 | spin_unlock(lock: &dev->lock); |
1873 | |
1874 | return 0; |
1875 | } |
1876 | |
1877 | static int gr_pullup(struct usb_gadget *_gadget, int is_on) |
1878 | { |
1879 | struct gr_udc *dev; |
1880 | u32 control; |
1881 | |
1882 | if (!_gadget) |
1883 | return -ENODEV; |
1884 | dev = container_of(_gadget, struct gr_udc, gadget); |
1885 | |
1886 | spin_lock(lock: &dev->lock); |
1887 | |
1888 | control = gr_read32(&dev->regs->control); |
1889 | if (is_on) |
1890 | control |= GR_CONTROL_EP; |
1891 | else |
1892 | control &= ~GR_CONTROL_EP; |
1893 | gr_write32(&dev->regs->control, control); |
1894 | |
1895 | spin_unlock(lock: &dev->lock); |
1896 | |
1897 | return 0; |
1898 | } |
1899 | |
1900 | static int gr_udc_start(struct usb_gadget *gadget, |
1901 | struct usb_gadget_driver *driver) |
1902 | { |
1903 | struct gr_udc *dev = to_gr_udc(gadget); |
1904 | |
1905 | spin_lock(lock: &dev->lock); |
1906 | |
1907 | /* Hook up the driver */ |
1908 | dev->driver = driver; |
1909 | |
1910 | /* Get ready for host detection */ |
1911 | gr_enable_vbus_detect(dev); |
1912 | |
1913 | spin_unlock(lock: &dev->lock); |
1914 | |
1915 | return 0; |
1916 | } |
1917 | |
1918 | static int gr_udc_stop(struct usb_gadget *gadget) |
1919 | { |
1920 | struct gr_udc *dev = to_gr_udc(gadget); |
1921 | unsigned long flags; |
1922 | |
1923 | spin_lock_irqsave(&dev->lock, flags); |
1924 | |
1925 | dev->driver = NULL; |
1926 | gr_stop_activity(dev); |
1927 | |
1928 | spin_unlock_irqrestore(lock: &dev->lock, flags); |
1929 | |
1930 | return 0; |
1931 | } |
1932 | |
1933 | static const struct usb_gadget_ops gr_ops = { |
1934 | .get_frame = gr_get_frame, |
1935 | .wakeup = gr_wakeup, |
1936 | .pullup = gr_pullup, |
1937 | .udc_start = gr_udc_start, |
1938 | .udc_stop = gr_udc_stop, |
1939 | /* Other operations not supported */ |
1940 | }; |
1941 | |
1942 | /* ---------------------------------------------------------------------- */ |
1943 | /* Module probe, removal and of-matching */ |
1944 | |
1945 | static const char * const onames[] = { |
1946 | "ep0out" , "ep1out" , "ep2out" , "ep3out" , "ep4out" , "ep5out" , |
1947 | "ep6out" , "ep7out" , "ep8out" , "ep9out" , "ep10out" , "ep11out" , |
1948 | "ep12out" , "ep13out" , "ep14out" , "ep15out" |
1949 | }; |
1950 | |
1951 | static const char * const inames[] = { |
1952 | "ep0in" , "ep1in" , "ep2in" , "ep3in" , "ep4in" , "ep5in" , |
1953 | "ep6in" , "ep7in" , "ep8in" , "ep9in" , "ep10in" , "ep11in" , |
1954 | "ep12in" , "ep13in" , "ep14in" , "ep15in" |
1955 | }; |
1956 | |
1957 | /* Must be called with dev->lock held */ |
1958 | static int gr_ep_init(struct gr_udc *dev, int num, int is_in, u32 maxplimit) |
1959 | { |
1960 | struct gr_ep *ep; |
1961 | struct gr_request *req; |
1962 | struct usb_request *_req; |
1963 | void *buf; |
1964 | |
1965 | if (is_in) { |
1966 | ep = &dev->epi[num]; |
1967 | ep->ep.name = inames[num]; |
1968 | ep->regs = &dev->regs->epi[num]; |
1969 | } else { |
1970 | ep = &dev->epo[num]; |
1971 | ep->ep.name = onames[num]; |
1972 | ep->regs = &dev->regs->epo[num]; |
1973 | } |
1974 | |
1975 | gr_ep_reset(ep); |
1976 | ep->num = num; |
1977 | ep->is_in = is_in; |
1978 | ep->dev = dev; |
1979 | ep->ep.ops = &gr_ep_ops; |
1980 | INIT_LIST_HEAD(list: &ep->queue); |
1981 | |
1982 | if (num == 0) { |
1983 | _req = gr_alloc_request(ep: &ep->ep, GFP_ATOMIC); |
1984 | if (!_req) |
1985 | return -ENOMEM; |
1986 | |
1987 | buf = devm_kzalloc(dev: dev->dev, PAGE_SIZE, GFP_DMA | GFP_ATOMIC); |
1988 | if (!buf) { |
1989 | gr_free_request(ep: &ep->ep, _req); |
1990 | return -ENOMEM; |
1991 | } |
1992 | |
1993 | req = container_of(_req, struct gr_request, req); |
1994 | req->req.buf = buf; |
1995 | req->req.length = MAX_CTRL_PL_SIZE; |
1996 | |
1997 | if (is_in) |
1998 | dev->ep0reqi = req; /* Complete gets set as used */ |
1999 | else |
2000 | dev->ep0reqo = req; /* Completion treated separately */ |
2001 | |
2002 | usb_ep_set_maxpacket_limit(ep: &ep->ep, MAX_CTRL_PL_SIZE); |
2003 | ep->bytes_per_buffer = MAX_CTRL_PL_SIZE; |
2004 | |
2005 | ep->ep.caps.type_control = true; |
2006 | } else { |
2007 | usb_ep_set_maxpacket_limit(ep: &ep->ep, maxpacket_limit: (u16)maxplimit); |
2008 | list_add_tail(new: &ep->ep.ep_list, head: &dev->gadget.ep_list); |
2009 | |
2010 | ep->ep.caps.type_iso = true; |
2011 | ep->ep.caps.type_bulk = true; |
2012 | ep->ep.caps.type_int = true; |
2013 | } |
2014 | list_add_tail(new: &ep->ep_list, head: &dev->ep_list); |
2015 | |
2016 | if (is_in) |
2017 | ep->ep.caps.dir_in = true; |
2018 | else |
2019 | ep->ep.caps.dir_out = true; |
2020 | |
2021 | ep->tailbuf = dma_alloc_coherent(dev: dev->dev, size: ep->ep.maxpacket_limit, |
2022 | dma_handle: &ep->tailbuf_paddr, GFP_ATOMIC); |
2023 | if (!ep->tailbuf) |
2024 | return -ENOMEM; |
2025 | |
2026 | return 0; |
2027 | } |
2028 | |
2029 | /* Must be called with dev->lock held */ |
2030 | static int gr_udc_init(struct gr_udc *dev) |
2031 | { |
2032 | struct device_node *np = dev->dev->of_node; |
2033 | u32 epctrl_val; |
2034 | u32 dmactrl_val; |
2035 | int i; |
2036 | int ret = 0; |
2037 | u32 bufsize; |
2038 | |
2039 | gr_set_address(dev, address: 0); |
2040 | |
2041 | INIT_LIST_HEAD(list: &dev->gadget.ep_list); |
2042 | dev->gadget.speed = USB_SPEED_UNKNOWN; |
2043 | dev->gadget.ep0 = &dev->epi[0].ep; |
2044 | |
2045 | INIT_LIST_HEAD(list: &dev->ep_list); |
2046 | gr_set_ep0state(dev, value: GR_EP0_DISCONNECT); |
2047 | |
2048 | for (i = 0; i < dev->nepo; i++) { |
2049 | if (of_property_read_u32_index(np, propname: "epobufsizes" , index: i, out_value: &bufsize)) |
2050 | bufsize = 1024; |
2051 | ret = gr_ep_init(dev, num: i, is_in: 0, maxplimit: bufsize); |
2052 | if (ret) |
2053 | return ret; |
2054 | } |
2055 | |
2056 | for (i = 0; i < dev->nepi; i++) { |
2057 | if (of_property_read_u32_index(np, propname: "epibufsizes" , index: i, out_value: &bufsize)) |
2058 | bufsize = 1024; |
2059 | ret = gr_ep_init(dev, num: i, is_in: 1, maxplimit: bufsize); |
2060 | if (ret) |
2061 | return ret; |
2062 | } |
2063 | |
2064 | /* Must be disabled by default */ |
2065 | dev->remote_wakeup = 0; |
2066 | |
2067 | /* Enable ep0out and ep0in */ |
2068 | epctrl_val = (MAX_CTRL_PL_SIZE << GR_EPCTRL_MAXPL_POS) | GR_EPCTRL_EV; |
2069 | dmactrl_val = GR_DMACTRL_IE | GR_DMACTRL_AI; |
2070 | gr_write32(&dev->epo[0].regs->epctrl, epctrl_val); |
2071 | gr_write32(&dev->epi[0].regs->epctrl, epctrl_val | GR_EPCTRL_PI); |
2072 | gr_write32(&dev->epo[0].regs->dmactrl, dmactrl_val); |
2073 | gr_write32(&dev->epi[0].regs->dmactrl, dmactrl_val); |
2074 | |
2075 | return 0; |
2076 | } |
2077 | |
2078 | static void gr_ep_remove(struct gr_udc *dev, int num, int is_in) |
2079 | { |
2080 | struct gr_ep *ep; |
2081 | |
2082 | if (is_in) |
2083 | ep = &dev->epi[num]; |
2084 | else |
2085 | ep = &dev->epo[num]; |
2086 | |
2087 | if (ep->tailbuf) |
2088 | dma_free_coherent(dev: dev->dev, size: ep->ep.maxpacket_limit, |
2089 | cpu_addr: ep->tailbuf, dma_handle: ep->tailbuf_paddr); |
2090 | } |
2091 | |
2092 | static void gr_remove(struct platform_device *pdev) |
2093 | { |
2094 | struct gr_udc *dev = platform_get_drvdata(pdev); |
2095 | int i; |
2096 | |
2097 | if (dev->added) |
2098 | usb_del_gadget_udc(gadget: &dev->gadget); /* Shuts everything down */ |
2099 | if (dev->driver) { |
2100 | dev_err(&pdev->dev, |
2101 | "Driver still in use but removing anyhow\n" ); |
2102 | return; |
2103 | } |
2104 | |
2105 | gr_dfs_delete(dev); |
2106 | dma_pool_destroy(pool: dev->desc_pool); |
2107 | platform_set_drvdata(pdev, NULL); |
2108 | |
2109 | gr_free_request(ep: &dev->epi[0].ep, req: &dev->ep0reqi->req); |
2110 | gr_free_request(ep: &dev->epo[0].ep, req: &dev->ep0reqo->req); |
2111 | |
2112 | for (i = 0; i < dev->nepo; i++) |
2113 | gr_ep_remove(dev, num: i, is_in: 0); |
2114 | for (i = 0; i < dev->nepi; i++) |
2115 | gr_ep_remove(dev, num: i, is_in: 1); |
2116 | } |
2117 | static int gr_request_irq(struct gr_udc *dev, int irq) |
2118 | { |
2119 | return devm_request_threaded_irq(dev: dev->dev, irq, handler: gr_irq, thread_fn: gr_irq_handler, |
2120 | IRQF_SHARED, devname: driver_name, dev_id: dev); |
2121 | } |
2122 | |
2123 | static int gr_probe(struct platform_device *pdev) |
2124 | { |
2125 | struct gr_udc *dev; |
2126 | struct gr_regs __iomem *regs; |
2127 | int retval; |
2128 | u32 status; |
2129 | |
2130 | dev = devm_kzalloc(dev: &pdev->dev, size: sizeof(*dev), GFP_KERNEL); |
2131 | if (!dev) |
2132 | return -ENOMEM; |
2133 | dev->dev = &pdev->dev; |
2134 | |
2135 | regs = devm_platform_ioremap_resource(pdev, index: 0); |
2136 | if (IS_ERR(ptr: regs)) |
2137 | return PTR_ERR(ptr: regs); |
2138 | |
2139 | dev->irq = platform_get_irq(pdev, 0); |
2140 | if (dev->irq < 0) |
2141 | return dev->irq; |
2142 | |
2143 | /* Some core configurations has separate irqs for IN and OUT events */ |
2144 | dev->irqi = platform_get_irq(pdev, 1); |
2145 | if (dev->irqi > 0) { |
2146 | dev->irqo = platform_get_irq(pdev, 2); |
2147 | if (dev->irqo < 0) |
2148 | return dev->irqo; |
2149 | } else { |
2150 | dev->irqi = 0; |
2151 | } |
2152 | |
2153 | dev->gadget.name = driver_name; |
2154 | dev->gadget.max_speed = USB_SPEED_HIGH; |
2155 | dev->gadget.ops = &gr_ops; |
2156 | |
2157 | spin_lock_init(&dev->lock); |
2158 | dev->regs = regs; |
2159 | |
2160 | platform_set_drvdata(pdev, data: dev); |
2161 | |
2162 | /* Determine number of endpoints and data interface mode */ |
2163 | status = gr_read32(&dev->regs->status); |
2164 | dev->nepi = ((status & GR_STATUS_NEPI_MASK) >> GR_STATUS_NEPI_POS) + 1; |
2165 | dev->nepo = ((status & GR_STATUS_NEPO_MASK) >> GR_STATUS_NEPO_POS) + 1; |
2166 | |
2167 | if (!(status & GR_STATUS_DM)) { |
2168 | dev_err(dev->dev, "Slave mode cores are not supported\n" ); |
2169 | return -ENODEV; |
2170 | } |
2171 | |
2172 | /* --- Effects of the following calls might need explicit cleanup --- */ |
2173 | |
2174 | /* Create DMA pool for descriptors */ |
2175 | dev->desc_pool = dma_pool_create(name: "desc_pool" , dev: dev->dev, |
2176 | size: sizeof(struct gr_dma_desc), align: 4, allocation: 0); |
2177 | if (!dev->desc_pool) { |
2178 | dev_err(dev->dev, "Could not allocate DMA pool" ); |
2179 | return -ENOMEM; |
2180 | } |
2181 | |
2182 | /* Inside lock so that no gadget can use this udc until probe is done */ |
2183 | retval = usb_add_gadget_udc(parent: dev->dev, gadget: &dev->gadget); |
2184 | if (retval) { |
2185 | dev_err(dev->dev, "Could not add gadget udc" ); |
2186 | goto out; |
2187 | } |
2188 | dev->added = 1; |
2189 | |
2190 | spin_lock(lock: &dev->lock); |
2191 | |
2192 | retval = gr_udc_init(dev); |
2193 | if (retval) { |
2194 | spin_unlock(lock: &dev->lock); |
2195 | goto out; |
2196 | } |
2197 | |
2198 | /* Clear all interrupt enables that might be left on since last boot */ |
2199 | gr_disable_interrupts_and_pullup(dev); |
2200 | |
2201 | spin_unlock(lock: &dev->lock); |
2202 | |
2203 | gr_dfs_create(dev); |
2204 | |
2205 | retval = gr_request_irq(dev, irq: dev->irq); |
2206 | if (retval) { |
2207 | dev_err(dev->dev, "Failed to request irq %d\n" , dev->irq); |
2208 | goto out; |
2209 | } |
2210 | |
2211 | if (dev->irqi) { |
2212 | retval = gr_request_irq(dev, irq: dev->irqi); |
2213 | if (retval) { |
2214 | dev_err(dev->dev, "Failed to request irqi %d\n" , |
2215 | dev->irqi); |
2216 | goto out; |
2217 | } |
2218 | retval = gr_request_irq(dev, irq: dev->irqo); |
2219 | if (retval) { |
2220 | dev_err(dev->dev, "Failed to request irqo %d\n" , |
2221 | dev->irqo); |
2222 | goto out; |
2223 | } |
2224 | } |
2225 | |
2226 | if (dev->irqi) |
2227 | dev_info(dev->dev, "regs: %p, irqs %d, %d, %d\n" , dev->regs, |
2228 | dev->irq, dev->irqi, dev->irqo); |
2229 | else |
2230 | dev_info(dev->dev, "regs: %p, irq %d\n" , dev->regs, dev->irq); |
2231 | |
2232 | out: |
2233 | if (retval) |
2234 | gr_remove(pdev); |
2235 | |
2236 | return retval; |
2237 | } |
2238 | |
2239 | static const struct of_device_id gr_match[] = { |
2240 | {.name = "GAISLER_USBDC" }, |
2241 | {.name = "01_021" }, |
2242 | {}, |
2243 | }; |
2244 | MODULE_DEVICE_TABLE(of, gr_match); |
2245 | |
2246 | static struct platform_driver gr_driver = { |
2247 | .driver = { |
2248 | .name = DRIVER_NAME, |
2249 | .of_match_table = gr_match, |
2250 | }, |
2251 | .probe = gr_probe, |
2252 | .remove_new = gr_remove, |
2253 | }; |
2254 | module_platform_driver(gr_driver); |
2255 | |
2256 | MODULE_AUTHOR("Aeroflex Gaisler AB." ); |
2257 | MODULE_DESCRIPTION(DRIVER_DESC); |
2258 | MODULE_LICENSE("GPL" ); |
2259 | |