1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | // Copyright (C) 2017 Broadcom |
3 | |
4 | /* |
5 | * Broadcom FlexRM Mailbox Driver |
6 | * |
7 | * Each Broadcom FlexSparx4 offload engine is implemented as an |
8 | * extension to Broadcom FlexRM ring manager. The FlexRM ring |
9 | * manager provides a set of rings which can be used to submit |
10 | * work to a FlexSparx4 offload engine. |
11 | * |
12 | * This driver creates a mailbox controller using a set of FlexRM |
13 | * rings where each mailbox channel represents a separate FlexRM ring. |
14 | */ |
15 | |
16 | #include <asm/barrier.h> |
17 | #include <asm/byteorder.h> |
18 | #include <linux/atomic.h> |
19 | #include <linux/bitmap.h> |
20 | #include <linux/debugfs.h> |
21 | #include <linux/delay.h> |
22 | #include <linux/device.h> |
23 | #include <linux/dma-mapping.h> |
24 | #include <linux/dmapool.h> |
25 | #include <linux/err.h> |
26 | #include <linux/interrupt.h> |
27 | #include <linux/kernel.h> |
28 | #include <linux/mailbox_controller.h> |
29 | #include <linux/mailbox_client.h> |
30 | #include <linux/mailbox/brcm-message.h> |
31 | #include <linux/module.h> |
32 | #include <linux/msi.h> |
33 | #include <linux/of_address.h> |
34 | #include <linux/of_irq.h> |
35 | #include <linux/platform_device.h> |
36 | #include <linux/spinlock.h> |
37 | |
38 | /* ====== FlexRM register defines ===== */ |
39 | |
40 | /* FlexRM configuration */ |
41 | #define RING_REGS_SIZE 0x10000 |
42 | #define RING_DESC_SIZE 8 |
43 | #define RING_DESC_INDEX(offset) \ |
44 | ((offset) / RING_DESC_SIZE) |
45 | #define RING_DESC_OFFSET(index) \ |
46 | ((index) * RING_DESC_SIZE) |
47 | #define RING_MAX_REQ_COUNT 1024 |
48 | #define RING_BD_ALIGN_ORDER 12 |
49 | #define RING_BD_ALIGN_CHECK(addr) \ |
50 | (!((addr) & ((0x1 << RING_BD_ALIGN_ORDER) - 1))) |
51 | #define RING_BD_TOGGLE_INVALID(offset) \ |
52 | (((offset) >> RING_BD_ALIGN_ORDER) & 0x1) |
53 | #define RING_BD_TOGGLE_VALID(offset) \ |
54 | (!RING_BD_TOGGLE_INVALID(offset)) |
55 | #define RING_BD_DESC_PER_REQ 32 |
56 | #define RING_BD_DESC_COUNT \ |
57 | (RING_MAX_REQ_COUNT * RING_BD_DESC_PER_REQ) |
58 | #define RING_BD_SIZE \ |
59 | (RING_BD_DESC_COUNT * RING_DESC_SIZE) |
60 | #define RING_CMPL_ALIGN_ORDER 13 |
61 | #define RING_CMPL_DESC_COUNT RING_MAX_REQ_COUNT |
62 | #define RING_CMPL_SIZE \ |
63 | (RING_CMPL_DESC_COUNT * RING_DESC_SIZE) |
64 | #define RING_VER_MAGIC 0x76303031 |
65 | |
66 | /* Per-Ring register offsets */ |
67 | #define RING_VER 0x000 |
68 | #define RING_BD_START_ADDR 0x004 |
69 | #define RING_BD_READ_PTR 0x008 |
70 | #define RING_BD_WRITE_PTR 0x00c |
71 | #define RING_BD_READ_PTR_DDR_LS 0x010 |
72 | #define RING_BD_READ_PTR_DDR_MS 0x014 |
73 | #define RING_CMPL_START_ADDR 0x018 |
74 | #define RING_CMPL_WRITE_PTR 0x01c |
75 | #define RING_NUM_REQ_RECV_LS 0x020 |
76 | #define RING_NUM_REQ_RECV_MS 0x024 |
77 | #define RING_NUM_REQ_TRANS_LS 0x028 |
78 | #define RING_NUM_REQ_TRANS_MS 0x02c |
79 | #define RING_NUM_REQ_OUTSTAND 0x030 |
80 | #define RING_CONTROL 0x034 |
81 | #define RING_FLUSH_DONE 0x038 |
82 | #define RING_MSI_ADDR_LS 0x03c |
83 | #define RING_MSI_ADDR_MS 0x040 |
84 | #define RING_MSI_CONTROL 0x048 |
85 | #define RING_BD_READ_PTR_DDR_CONTROL 0x04c |
86 | #define RING_MSI_DATA_VALUE 0x064 |
87 | |
88 | /* Register RING_BD_START_ADDR fields */ |
89 | #define BD_LAST_UPDATE_HW_SHIFT 28 |
90 | #define BD_LAST_UPDATE_HW_MASK 0x1 |
91 | #define BD_START_ADDR_VALUE(pa) \ |
92 | ((u32)((((dma_addr_t)(pa)) >> RING_BD_ALIGN_ORDER) & 0x0fffffff)) |
93 | #define BD_START_ADDR_DECODE(val) \ |
94 | ((dma_addr_t)((val) & 0x0fffffff) << RING_BD_ALIGN_ORDER) |
95 | |
96 | /* Register RING_CMPL_START_ADDR fields */ |
97 | #define CMPL_START_ADDR_VALUE(pa) \ |
98 | ((u32)((((u64)(pa)) >> RING_CMPL_ALIGN_ORDER) & 0x07ffffff)) |
99 | |
100 | /* Register RING_CONTROL fields */ |
101 | #define CONTROL_MASK_DISABLE_CONTROL 12 |
102 | #define CONTROL_FLUSH_SHIFT 5 |
103 | #define CONTROL_ACTIVE_SHIFT 4 |
104 | #define CONTROL_RATE_ADAPT_MASK 0xf |
105 | #define CONTROL_RATE_DYNAMIC 0x0 |
106 | #define CONTROL_RATE_FAST 0x8 |
107 | #define CONTROL_RATE_MEDIUM 0x9 |
108 | #define CONTROL_RATE_SLOW 0xa |
109 | #define CONTROL_RATE_IDLE 0xb |
110 | |
111 | /* Register RING_FLUSH_DONE fields */ |
112 | #define FLUSH_DONE_MASK 0x1 |
113 | |
114 | /* Register RING_MSI_CONTROL fields */ |
115 | #define MSI_TIMER_VAL_SHIFT 16 |
116 | #define MSI_TIMER_VAL_MASK 0xffff |
117 | #define MSI_ENABLE_SHIFT 15 |
118 | #define MSI_ENABLE_MASK 0x1 |
119 | #define MSI_COUNT_SHIFT 0 |
120 | #define MSI_COUNT_MASK 0x3ff |
121 | |
122 | /* Register RING_BD_READ_PTR_DDR_CONTROL fields */ |
123 | #define BD_READ_PTR_DDR_TIMER_VAL_SHIFT 16 |
124 | #define BD_READ_PTR_DDR_TIMER_VAL_MASK 0xffff |
125 | #define BD_READ_PTR_DDR_ENABLE_SHIFT 15 |
126 | #define BD_READ_PTR_DDR_ENABLE_MASK 0x1 |
127 | |
128 | /* ====== FlexRM ring descriptor defines ===== */ |
129 | |
130 | /* Completion descriptor format */ |
131 | #define CMPL_OPAQUE_SHIFT 0 |
132 | #define CMPL_OPAQUE_MASK 0xffff |
133 | #define CMPL_ENGINE_STATUS_SHIFT 16 |
134 | #define CMPL_ENGINE_STATUS_MASK 0xffff |
135 | #define CMPL_DME_STATUS_SHIFT 32 |
136 | #define CMPL_DME_STATUS_MASK 0xffff |
137 | #define CMPL_RM_STATUS_SHIFT 48 |
138 | #define CMPL_RM_STATUS_MASK 0xffff |
139 | |
140 | /* Completion DME status code */ |
141 | #define DME_STATUS_MEM_COR_ERR BIT(0) |
142 | #define DME_STATUS_MEM_UCOR_ERR BIT(1) |
143 | #define DME_STATUS_FIFO_UNDERFLOW BIT(2) |
144 | #define DME_STATUS_FIFO_OVERFLOW BIT(3) |
145 | #define DME_STATUS_RRESP_ERR BIT(4) |
146 | #define DME_STATUS_BRESP_ERR BIT(5) |
147 | #define DME_STATUS_ERROR_MASK (DME_STATUS_MEM_COR_ERR | \ |
148 | DME_STATUS_MEM_UCOR_ERR | \ |
149 | DME_STATUS_FIFO_UNDERFLOW | \ |
150 | DME_STATUS_FIFO_OVERFLOW | \ |
151 | DME_STATUS_RRESP_ERR | \ |
152 | DME_STATUS_BRESP_ERR) |
153 | |
154 | /* Completion RM status code */ |
155 | #define RM_STATUS_CODE_SHIFT 0 |
156 | #define RM_STATUS_CODE_MASK 0x3ff |
157 | #define RM_STATUS_CODE_GOOD 0x0 |
158 | #define RM_STATUS_CODE_AE_TIMEOUT 0x3ff |
159 | |
160 | /* General descriptor format */ |
161 | #define DESC_TYPE_SHIFT 60 |
162 | #define DESC_TYPE_MASK 0xf |
163 | #define DESC_PAYLOAD_SHIFT 0 |
164 | #define DESC_PAYLOAD_MASK 0x0fffffffffffffff |
165 | |
166 | /* Null descriptor format */ |
167 | #define NULL_TYPE 0 |
168 | #define NULL_TOGGLE_SHIFT 58 |
169 | #define NULL_TOGGLE_MASK 0x1 |
170 | |
171 | /* Header descriptor format */ |
172 | #define 1 |
173 | #define 58 |
174 | #define 0x1 |
175 | #define 57 |
176 | #define 0x1 |
177 | #define 56 |
178 | #define 0x1 |
179 | #define 36 |
180 | #define 0x1f |
181 | #define HEADER_BDCOUNT_MASK |
182 | #define 16 |
183 | #define 0xffff |
184 | #define 0 |
185 | #define 0xffff |
186 | |
187 | /* Source (SRC) descriptor format */ |
188 | #define SRC_TYPE 2 |
189 | #define SRC_LENGTH_SHIFT 44 |
190 | #define SRC_LENGTH_MASK 0xffff |
191 | #define SRC_ADDR_SHIFT 0 |
192 | #define SRC_ADDR_MASK 0x00000fffffffffff |
193 | |
194 | /* Destination (DST) descriptor format */ |
195 | #define DST_TYPE 3 |
196 | #define DST_LENGTH_SHIFT 44 |
197 | #define DST_LENGTH_MASK 0xffff |
198 | #define DST_ADDR_SHIFT 0 |
199 | #define DST_ADDR_MASK 0x00000fffffffffff |
200 | |
201 | /* Immediate (IMM) descriptor format */ |
202 | #define IMM_TYPE 4 |
203 | #define IMM_DATA_SHIFT 0 |
204 | #define IMM_DATA_MASK 0x0fffffffffffffff |
205 | |
206 | /* Next pointer (NPTR) descriptor format */ |
207 | #define NPTR_TYPE 5 |
208 | #define NPTR_TOGGLE_SHIFT 58 |
209 | #define NPTR_TOGGLE_MASK 0x1 |
210 | #define NPTR_ADDR_SHIFT 0 |
211 | #define NPTR_ADDR_MASK 0x00000fffffffffff |
212 | |
213 | /* Mega source (MSRC) descriptor format */ |
214 | #define MSRC_TYPE 6 |
215 | #define MSRC_LENGTH_SHIFT 44 |
216 | #define MSRC_LENGTH_MASK 0xffff |
217 | #define MSRC_ADDR_SHIFT 0 |
218 | #define MSRC_ADDR_MASK 0x00000fffffffffff |
219 | |
220 | /* Mega destination (MDST) descriptor format */ |
221 | #define MDST_TYPE 7 |
222 | #define MDST_LENGTH_SHIFT 44 |
223 | #define MDST_LENGTH_MASK 0xffff |
224 | #define MDST_ADDR_SHIFT 0 |
225 | #define MDST_ADDR_MASK 0x00000fffffffffff |
226 | |
227 | /* Source with tlast (SRCT) descriptor format */ |
228 | #define SRCT_TYPE 8 |
229 | #define SRCT_LENGTH_SHIFT 44 |
230 | #define SRCT_LENGTH_MASK 0xffff |
231 | #define SRCT_ADDR_SHIFT 0 |
232 | #define SRCT_ADDR_MASK 0x00000fffffffffff |
233 | |
234 | /* Destination with tlast (DSTT) descriptor format */ |
235 | #define DSTT_TYPE 9 |
236 | #define DSTT_LENGTH_SHIFT 44 |
237 | #define DSTT_LENGTH_MASK 0xffff |
238 | #define DSTT_ADDR_SHIFT 0 |
239 | #define DSTT_ADDR_MASK 0x00000fffffffffff |
240 | |
241 | /* Immediate with tlast (IMMT) descriptor format */ |
242 | #define IMMT_TYPE 10 |
243 | #define IMMT_DATA_SHIFT 0 |
244 | #define IMMT_DATA_MASK 0x0fffffffffffffff |
245 | |
246 | /* Descriptor helper macros */ |
247 | #define DESC_DEC(_d, _s, _m) (((_d) >> (_s)) & (_m)) |
248 | #define DESC_ENC(_d, _v, _s, _m) \ |
249 | do { \ |
250 | (_d) &= ~((u64)(_m) << (_s)); \ |
251 | (_d) |= (((u64)(_v) & (_m)) << (_s)); \ |
252 | } while (0) |
253 | |
254 | /* ====== FlexRM data structures ===== */ |
255 | |
256 | struct flexrm_ring { |
257 | /* Unprotected members */ |
258 | int num; |
259 | struct flexrm_mbox *mbox; |
260 | void __iomem *regs; |
261 | bool irq_requested; |
262 | unsigned int irq; |
263 | cpumask_t irq_aff_hint; |
264 | unsigned int msi_timer_val; |
265 | unsigned int msi_count_threshold; |
266 | struct brcm_message *requests[RING_MAX_REQ_COUNT]; |
267 | void *bd_base; |
268 | dma_addr_t bd_dma_base; |
269 | u32 bd_write_offset; |
270 | void *cmpl_base; |
271 | dma_addr_t cmpl_dma_base; |
272 | /* Atomic stats */ |
273 | atomic_t msg_send_count; |
274 | atomic_t msg_cmpl_count; |
275 | /* Protected members */ |
276 | spinlock_t lock; |
277 | DECLARE_BITMAP(requests_bmap, RING_MAX_REQ_COUNT); |
278 | u32 cmpl_read_offset; |
279 | }; |
280 | |
281 | struct flexrm_mbox { |
282 | struct device *dev; |
283 | void __iomem *regs; |
284 | u32 num_rings; |
285 | struct flexrm_ring *rings; |
286 | struct dma_pool *bd_pool; |
287 | struct dma_pool *cmpl_pool; |
288 | struct dentry *root; |
289 | struct mbox_controller controller; |
290 | }; |
291 | |
292 | /* ====== FlexRM ring descriptor helper routines ===== */ |
293 | |
294 | static u64 flexrm_read_desc(void *desc_ptr) |
295 | { |
296 | return le64_to_cpu(*((u64 *)desc_ptr)); |
297 | } |
298 | |
299 | static void flexrm_write_desc(void *desc_ptr, u64 desc) |
300 | { |
301 | *((u64 *)desc_ptr) = cpu_to_le64(desc); |
302 | } |
303 | |
304 | static u32 flexrm_cmpl_desc_to_reqid(u64 cmpl_desc) |
305 | { |
306 | return (u32)(cmpl_desc & CMPL_OPAQUE_MASK); |
307 | } |
308 | |
309 | static int flexrm_cmpl_desc_to_error(u64 cmpl_desc) |
310 | { |
311 | u32 status; |
312 | |
313 | status = DESC_DEC(cmpl_desc, CMPL_DME_STATUS_SHIFT, |
314 | CMPL_DME_STATUS_MASK); |
315 | if (status & DME_STATUS_ERROR_MASK) |
316 | return -EIO; |
317 | |
318 | status = DESC_DEC(cmpl_desc, CMPL_RM_STATUS_SHIFT, |
319 | CMPL_RM_STATUS_MASK); |
320 | status &= RM_STATUS_CODE_MASK; |
321 | if (status == RM_STATUS_CODE_AE_TIMEOUT) |
322 | return -ETIMEDOUT; |
323 | |
324 | return 0; |
325 | } |
326 | |
327 | static bool flexrm_is_next_table_desc(void *desc_ptr) |
328 | { |
329 | u64 desc = flexrm_read_desc(desc_ptr); |
330 | u32 type = DESC_DEC(desc, DESC_TYPE_SHIFT, DESC_TYPE_MASK); |
331 | |
332 | return (type == NPTR_TYPE) ? true : false; |
333 | } |
334 | |
335 | static u64 flexrm_next_table_desc(u32 toggle, dma_addr_t next_addr) |
336 | { |
337 | u64 desc = 0; |
338 | |
339 | DESC_ENC(desc, NPTR_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK); |
340 | DESC_ENC(desc, toggle, NPTR_TOGGLE_SHIFT, NPTR_TOGGLE_MASK); |
341 | DESC_ENC(desc, next_addr, NPTR_ADDR_SHIFT, NPTR_ADDR_MASK); |
342 | |
343 | return desc; |
344 | } |
345 | |
346 | static u64 flexrm_null_desc(u32 toggle) |
347 | { |
348 | u64 desc = 0; |
349 | |
350 | DESC_ENC(desc, NULL_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK); |
351 | DESC_ENC(desc, toggle, NULL_TOGGLE_SHIFT, NULL_TOGGLE_MASK); |
352 | |
353 | return desc; |
354 | } |
355 | |
356 | static u32 (u32 nhcnt) |
357 | { |
358 | u32 hcnt = nhcnt / HEADER_BDCOUNT_MAX; |
359 | |
360 | if (!(nhcnt % HEADER_BDCOUNT_MAX)) |
361 | hcnt += 1; |
362 | |
363 | return hcnt; |
364 | } |
365 | |
366 | static void (void *desc_ptr) |
367 | { |
368 | u64 desc = flexrm_read_desc(desc_ptr); |
369 | |
370 | if (desc & ((u64)0x1 << HEADER_TOGGLE_SHIFT)) |
371 | desc &= ~((u64)0x1 << HEADER_TOGGLE_SHIFT); |
372 | else |
373 | desc |= ((u64)0x1 << HEADER_TOGGLE_SHIFT); |
374 | |
375 | flexrm_write_desc(desc_ptr, desc); |
376 | } |
377 | |
378 | static u64 (u32 toggle, u32 startpkt, u32 endpkt, |
379 | u32 bdcount, u32 flags, u32 opaque) |
380 | { |
381 | u64 desc = 0; |
382 | |
383 | DESC_ENC(desc, HEADER_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK); |
384 | DESC_ENC(desc, toggle, HEADER_TOGGLE_SHIFT, HEADER_TOGGLE_MASK); |
385 | DESC_ENC(desc, startpkt, HEADER_STARTPKT_SHIFT, HEADER_STARTPKT_MASK); |
386 | DESC_ENC(desc, endpkt, HEADER_ENDPKT_SHIFT, HEADER_ENDPKT_MASK); |
387 | DESC_ENC(desc, bdcount, HEADER_BDCOUNT_SHIFT, HEADER_BDCOUNT_MASK); |
388 | DESC_ENC(desc, flags, HEADER_FLAGS_SHIFT, HEADER_FLAGS_MASK); |
389 | DESC_ENC(desc, opaque, HEADER_OPAQUE_SHIFT, HEADER_OPAQUE_MASK); |
390 | |
391 | return desc; |
392 | } |
393 | |
394 | static void flexrm_enqueue_desc(u32 nhpos, u32 nhcnt, u32 reqid, |
395 | u64 desc, void **desc_ptr, u32 *toggle, |
396 | void *start_desc, void *end_desc) |
397 | { |
398 | u64 d; |
399 | u32 nhavail, _toggle, _startpkt, _endpkt, _bdcount; |
400 | |
401 | /* Sanity check */ |
402 | if (nhcnt <= nhpos) |
403 | return; |
404 | |
405 | /* |
406 | * Each request or packet start with a HEADER descriptor followed |
407 | * by one or more non-HEADER descriptors (SRC, SRCT, MSRC, DST, |
408 | * DSTT, MDST, IMM, and IMMT). The number of non-HEADER descriptors |
409 | * following a HEADER descriptor is represented by BDCOUNT field |
410 | * of HEADER descriptor. The max value of BDCOUNT field is 31 which |
411 | * means we can only have 31 non-HEADER descriptors following one |
412 | * HEADER descriptor. |
413 | * |
414 | * In general use, number of non-HEADER descriptors can easily go |
415 | * beyond 31. To tackle this situation, we have packet (or request) |
416 | * extension bits (STARTPKT and ENDPKT) in the HEADER descriptor. |
417 | * |
418 | * To use packet extension, the first HEADER descriptor of request |
419 | * (or packet) will have STARTPKT=1 and ENDPKT=0. The intermediate |
420 | * HEADER descriptors will have STARTPKT=0 and ENDPKT=0. The last |
421 | * HEADER descriptor will have STARTPKT=0 and ENDPKT=1. Also, the |
422 | * TOGGLE bit of the first HEADER will be set to invalid state to |
423 | * ensure that FlexRM does not start fetching descriptors till all |
424 | * descriptors are enqueued. The user of this function will flip |
425 | * the TOGGLE bit of first HEADER after all descriptors are |
426 | * enqueued. |
427 | */ |
428 | |
429 | if ((nhpos % HEADER_BDCOUNT_MAX == 0) && (nhcnt - nhpos)) { |
430 | /* Prepare the header descriptor */ |
431 | nhavail = (nhcnt - nhpos); |
432 | _toggle = (nhpos == 0) ? !(*toggle) : (*toggle); |
433 | _startpkt = (nhpos == 0) ? 0x1 : 0x0; |
434 | _endpkt = (nhavail <= HEADER_BDCOUNT_MAX) ? 0x1 : 0x0; |
435 | _bdcount = (nhavail <= HEADER_BDCOUNT_MAX) ? |
436 | nhavail : HEADER_BDCOUNT_MAX; |
437 | if (nhavail <= HEADER_BDCOUNT_MAX) |
438 | _bdcount = nhavail; |
439 | else |
440 | _bdcount = HEADER_BDCOUNT_MAX; |
441 | d = flexrm_header_desc(toggle: _toggle, startpkt: _startpkt, endpkt: _endpkt, |
442 | bdcount: _bdcount, flags: 0x0, opaque: reqid); |
443 | |
444 | /* Write header descriptor */ |
445 | flexrm_write_desc(desc_ptr: *desc_ptr, desc: d); |
446 | |
447 | /* Point to next descriptor */ |
448 | *desc_ptr += sizeof(desc); |
449 | if (*desc_ptr == end_desc) |
450 | *desc_ptr = start_desc; |
451 | |
452 | /* Skip next pointer descriptors */ |
453 | while (flexrm_is_next_table_desc(desc_ptr: *desc_ptr)) { |
454 | *toggle = (*toggle) ? 0 : 1; |
455 | *desc_ptr += sizeof(desc); |
456 | if (*desc_ptr == end_desc) |
457 | *desc_ptr = start_desc; |
458 | } |
459 | } |
460 | |
461 | /* Write desired descriptor */ |
462 | flexrm_write_desc(desc_ptr: *desc_ptr, desc); |
463 | |
464 | /* Point to next descriptor */ |
465 | *desc_ptr += sizeof(desc); |
466 | if (*desc_ptr == end_desc) |
467 | *desc_ptr = start_desc; |
468 | |
469 | /* Skip next pointer descriptors */ |
470 | while (flexrm_is_next_table_desc(desc_ptr: *desc_ptr)) { |
471 | *toggle = (*toggle) ? 0 : 1; |
472 | *desc_ptr += sizeof(desc); |
473 | if (*desc_ptr == end_desc) |
474 | *desc_ptr = start_desc; |
475 | } |
476 | } |
477 | |
478 | static u64 flexrm_src_desc(dma_addr_t addr, unsigned int length) |
479 | { |
480 | u64 desc = 0; |
481 | |
482 | DESC_ENC(desc, SRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK); |
483 | DESC_ENC(desc, length, SRC_LENGTH_SHIFT, SRC_LENGTH_MASK); |
484 | DESC_ENC(desc, addr, SRC_ADDR_SHIFT, SRC_ADDR_MASK); |
485 | |
486 | return desc; |
487 | } |
488 | |
489 | static u64 flexrm_msrc_desc(dma_addr_t addr, unsigned int length_div_16) |
490 | { |
491 | u64 desc = 0; |
492 | |
493 | DESC_ENC(desc, MSRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK); |
494 | DESC_ENC(desc, length_div_16, MSRC_LENGTH_SHIFT, MSRC_LENGTH_MASK); |
495 | DESC_ENC(desc, addr, MSRC_ADDR_SHIFT, MSRC_ADDR_MASK); |
496 | |
497 | return desc; |
498 | } |
499 | |
500 | static u64 flexrm_dst_desc(dma_addr_t addr, unsigned int length) |
501 | { |
502 | u64 desc = 0; |
503 | |
504 | DESC_ENC(desc, DST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK); |
505 | DESC_ENC(desc, length, DST_LENGTH_SHIFT, DST_LENGTH_MASK); |
506 | DESC_ENC(desc, addr, DST_ADDR_SHIFT, DST_ADDR_MASK); |
507 | |
508 | return desc; |
509 | } |
510 | |
511 | static u64 flexrm_mdst_desc(dma_addr_t addr, unsigned int length_div_16) |
512 | { |
513 | u64 desc = 0; |
514 | |
515 | DESC_ENC(desc, MDST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK); |
516 | DESC_ENC(desc, length_div_16, MDST_LENGTH_SHIFT, MDST_LENGTH_MASK); |
517 | DESC_ENC(desc, addr, MDST_ADDR_SHIFT, MDST_ADDR_MASK); |
518 | |
519 | return desc; |
520 | } |
521 | |
522 | static u64 flexrm_imm_desc(u64 data) |
523 | { |
524 | u64 desc = 0; |
525 | |
526 | DESC_ENC(desc, IMM_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK); |
527 | DESC_ENC(desc, data, IMM_DATA_SHIFT, IMM_DATA_MASK); |
528 | |
529 | return desc; |
530 | } |
531 | |
532 | static u64 flexrm_srct_desc(dma_addr_t addr, unsigned int length) |
533 | { |
534 | u64 desc = 0; |
535 | |
536 | DESC_ENC(desc, SRCT_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK); |
537 | DESC_ENC(desc, length, SRCT_LENGTH_SHIFT, SRCT_LENGTH_MASK); |
538 | DESC_ENC(desc, addr, SRCT_ADDR_SHIFT, SRCT_ADDR_MASK); |
539 | |
540 | return desc; |
541 | } |
542 | |
543 | static u64 flexrm_dstt_desc(dma_addr_t addr, unsigned int length) |
544 | { |
545 | u64 desc = 0; |
546 | |
547 | DESC_ENC(desc, DSTT_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK); |
548 | DESC_ENC(desc, length, DSTT_LENGTH_SHIFT, DSTT_LENGTH_MASK); |
549 | DESC_ENC(desc, addr, DSTT_ADDR_SHIFT, DSTT_ADDR_MASK); |
550 | |
551 | return desc; |
552 | } |
553 | |
554 | static u64 flexrm_immt_desc(u64 data) |
555 | { |
556 | u64 desc = 0; |
557 | |
558 | DESC_ENC(desc, IMMT_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK); |
559 | DESC_ENC(desc, data, IMMT_DATA_SHIFT, IMMT_DATA_MASK); |
560 | |
561 | return desc; |
562 | } |
563 | |
564 | static bool flexrm_spu_sanity_check(struct brcm_message *msg) |
565 | { |
566 | struct scatterlist *sg; |
567 | |
568 | if (!msg->spu.src || !msg->spu.dst) |
569 | return false; |
570 | for (sg = msg->spu.src; sg; sg = sg_next(sg)) { |
571 | if (sg->length & 0xf) { |
572 | if (sg->length > SRC_LENGTH_MASK) |
573 | return false; |
574 | } else { |
575 | if (sg->length > (MSRC_LENGTH_MASK * 16)) |
576 | return false; |
577 | } |
578 | } |
579 | for (sg = msg->spu.dst; sg; sg = sg_next(sg)) { |
580 | if (sg->length & 0xf) { |
581 | if (sg->length > DST_LENGTH_MASK) |
582 | return false; |
583 | } else { |
584 | if (sg->length > (MDST_LENGTH_MASK * 16)) |
585 | return false; |
586 | } |
587 | } |
588 | |
589 | return true; |
590 | } |
591 | |
592 | static u32 (struct brcm_message *msg) |
593 | { |
594 | u32 cnt = 0; |
595 | unsigned int dst_target = 0; |
596 | struct scatterlist *src_sg = msg->spu.src, *dst_sg = msg->spu.dst; |
597 | |
598 | while (src_sg || dst_sg) { |
599 | if (src_sg) { |
600 | cnt++; |
601 | dst_target = src_sg->length; |
602 | src_sg = sg_next(src_sg); |
603 | } else |
604 | dst_target = UINT_MAX; |
605 | |
606 | while (dst_target && dst_sg) { |
607 | cnt++; |
608 | if (dst_sg->length < dst_target) |
609 | dst_target -= dst_sg->length; |
610 | else |
611 | dst_target = 0; |
612 | dst_sg = sg_next(dst_sg); |
613 | } |
614 | } |
615 | |
616 | return cnt; |
617 | } |
618 | |
619 | static int flexrm_spu_dma_map(struct device *dev, struct brcm_message *msg) |
620 | { |
621 | int rc; |
622 | |
623 | rc = dma_map_sg(dev, msg->spu.src, sg_nents(msg->spu.src), |
624 | DMA_TO_DEVICE); |
625 | if (!rc) |
626 | return -EIO; |
627 | |
628 | rc = dma_map_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst), |
629 | DMA_FROM_DEVICE); |
630 | if (!rc) { |
631 | dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src), |
632 | DMA_TO_DEVICE); |
633 | return -EIO; |
634 | } |
635 | |
636 | return 0; |
637 | } |
638 | |
639 | static void flexrm_spu_dma_unmap(struct device *dev, struct brcm_message *msg) |
640 | { |
641 | dma_unmap_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst), |
642 | DMA_FROM_DEVICE); |
643 | dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src), |
644 | DMA_TO_DEVICE); |
645 | } |
646 | |
647 | static void *flexrm_spu_write_descs(struct brcm_message *msg, u32 nhcnt, |
648 | u32 reqid, void *desc_ptr, u32 toggle, |
649 | void *start_desc, void *end_desc) |
650 | { |
651 | u64 d; |
652 | u32 nhpos = 0; |
653 | void *orig_desc_ptr = desc_ptr; |
654 | unsigned int dst_target = 0; |
655 | struct scatterlist *src_sg = msg->spu.src, *dst_sg = msg->spu.dst; |
656 | |
657 | while (src_sg || dst_sg) { |
658 | if (src_sg) { |
659 | if (sg_dma_len(src_sg) & 0xf) |
660 | d = flexrm_src_desc(sg_dma_address(src_sg), |
661 | sg_dma_len(src_sg)); |
662 | else |
663 | d = flexrm_msrc_desc(sg_dma_address(src_sg), |
664 | sg_dma_len(src_sg)/16); |
665 | flexrm_enqueue_desc(nhpos, nhcnt, reqid, |
666 | desc: d, desc_ptr: &desc_ptr, toggle: &toggle, |
667 | start_desc, end_desc); |
668 | nhpos++; |
669 | dst_target = sg_dma_len(src_sg); |
670 | src_sg = sg_next(src_sg); |
671 | } else |
672 | dst_target = UINT_MAX; |
673 | |
674 | while (dst_target && dst_sg) { |
675 | if (sg_dma_len(dst_sg) & 0xf) |
676 | d = flexrm_dst_desc(sg_dma_address(dst_sg), |
677 | sg_dma_len(dst_sg)); |
678 | else |
679 | d = flexrm_mdst_desc(sg_dma_address(dst_sg), |
680 | sg_dma_len(dst_sg)/16); |
681 | flexrm_enqueue_desc(nhpos, nhcnt, reqid, |
682 | desc: d, desc_ptr: &desc_ptr, toggle: &toggle, |
683 | start_desc, end_desc); |
684 | nhpos++; |
685 | if (sg_dma_len(dst_sg) < dst_target) |
686 | dst_target -= sg_dma_len(dst_sg); |
687 | else |
688 | dst_target = 0; |
689 | dst_sg = sg_next(dst_sg); |
690 | } |
691 | } |
692 | |
693 | /* Null descriptor with invalid toggle bit */ |
694 | flexrm_write_desc(desc_ptr, desc: flexrm_null_desc(toggle: !toggle)); |
695 | |
696 | /* Ensure that descriptors have been written to memory */ |
697 | wmb(); |
698 | |
699 | /* Flip toggle bit in header */ |
700 | flexrm_flip_header_toggle(desc_ptr: orig_desc_ptr); |
701 | |
702 | return desc_ptr; |
703 | } |
704 | |
705 | static bool flexrm_sba_sanity_check(struct brcm_message *msg) |
706 | { |
707 | u32 i; |
708 | |
709 | if (!msg->sba.cmds || !msg->sba.cmds_count) |
710 | return false; |
711 | |
712 | for (i = 0; i < msg->sba.cmds_count; i++) { |
713 | if (((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) || |
714 | (msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C)) && |
715 | (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT)) |
716 | return false; |
717 | if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) && |
718 | (msg->sba.cmds[i].data_len > SRCT_LENGTH_MASK)) |
719 | return false; |
720 | if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C) && |
721 | (msg->sba.cmds[i].data_len > SRCT_LENGTH_MASK)) |
722 | return false; |
723 | if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_RESP) && |
724 | (msg->sba.cmds[i].resp_len > DSTT_LENGTH_MASK)) |
725 | return false; |
726 | if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT) && |
727 | (msg->sba.cmds[i].data_len > DSTT_LENGTH_MASK)) |
728 | return false; |
729 | } |
730 | |
731 | return true; |
732 | } |
733 | |
734 | static u32 (struct brcm_message *msg) |
735 | { |
736 | u32 i, cnt; |
737 | |
738 | cnt = 0; |
739 | for (i = 0; i < msg->sba.cmds_count; i++) { |
740 | cnt++; |
741 | |
742 | if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) || |
743 | (msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C)) |
744 | cnt++; |
745 | |
746 | if (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_RESP) |
747 | cnt++; |
748 | |
749 | if (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT) |
750 | cnt++; |
751 | } |
752 | |
753 | return cnt; |
754 | } |
755 | |
756 | static void *flexrm_sba_write_descs(struct brcm_message *msg, u32 nhcnt, |
757 | u32 reqid, void *desc_ptr, u32 toggle, |
758 | void *start_desc, void *end_desc) |
759 | { |
760 | u64 d; |
761 | u32 i, nhpos = 0; |
762 | struct brcm_sba_command *c; |
763 | void *orig_desc_ptr = desc_ptr; |
764 | |
765 | /* Convert SBA commands into descriptors */ |
766 | for (i = 0; i < msg->sba.cmds_count; i++) { |
767 | c = &msg->sba.cmds[i]; |
768 | |
769 | if ((c->flags & BRCM_SBA_CMD_HAS_RESP) && |
770 | (c->flags & BRCM_SBA_CMD_HAS_OUTPUT)) { |
771 | /* Destination response descriptor */ |
772 | d = flexrm_dst_desc(addr: c->resp, length: c->resp_len); |
773 | flexrm_enqueue_desc(nhpos, nhcnt, reqid, |
774 | desc: d, desc_ptr: &desc_ptr, toggle: &toggle, |
775 | start_desc, end_desc); |
776 | nhpos++; |
777 | } else if (c->flags & BRCM_SBA_CMD_HAS_RESP) { |
778 | /* Destination response with tlast descriptor */ |
779 | d = flexrm_dstt_desc(addr: c->resp, length: c->resp_len); |
780 | flexrm_enqueue_desc(nhpos, nhcnt, reqid, |
781 | desc: d, desc_ptr: &desc_ptr, toggle: &toggle, |
782 | start_desc, end_desc); |
783 | nhpos++; |
784 | } |
785 | |
786 | if (c->flags & BRCM_SBA_CMD_HAS_OUTPUT) { |
787 | /* Destination with tlast descriptor */ |
788 | d = flexrm_dstt_desc(addr: c->data, length: c->data_len); |
789 | flexrm_enqueue_desc(nhpos, nhcnt, reqid, |
790 | desc: d, desc_ptr: &desc_ptr, toggle: &toggle, |
791 | start_desc, end_desc); |
792 | nhpos++; |
793 | } |
794 | |
795 | if (c->flags & BRCM_SBA_CMD_TYPE_B) { |
796 | /* Command as immediate descriptor */ |
797 | d = flexrm_imm_desc(data: c->cmd); |
798 | flexrm_enqueue_desc(nhpos, nhcnt, reqid, |
799 | desc: d, desc_ptr: &desc_ptr, toggle: &toggle, |
800 | start_desc, end_desc); |
801 | nhpos++; |
802 | } else { |
803 | /* Command as immediate descriptor with tlast */ |
804 | d = flexrm_immt_desc(data: c->cmd); |
805 | flexrm_enqueue_desc(nhpos, nhcnt, reqid, |
806 | desc: d, desc_ptr: &desc_ptr, toggle: &toggle, |
807 | start_desc, end_desc); |
808 | nhpos++; |
809 | } |
810 | |
811 | if ((c->flags & BRCM_SBA_CMD_TYPE_B) || |
812 | (c->flags & BRCM_SBA_CMD_TYPE_C)) { |
813 | /* Source with tlast descriptor */ |
814 | d = flexrm_srct_desc(addr: c->data, length: c->data_len); |
815 | flexrm_enqueue_desc(nhpos, nhcnt, reqid, |
816 | desc: d, desc_ptr: &desc_ptr, toggle: &toggle, |
817 | start_desc, end_desc); |
818 | nhpos++; |
819 | } |
820 | } |
821 | |
822 | /* Null descriptor with invalid toggle bit */ |
823 | flexrm_write_desc(desc_ptr, desc: flexrm_null_desc(toggle: !toggle)); |
824 | |
825 | /* Ensure that descriptors have been written to memory */ |
826 | wmb(); |
827 | |
828 | /* Flip toggle bit in header */ |
829 | flexrm_flip_header_toggle(desc_ptr: orig_desc_ptr); |
830 | |
831 | return desc_ptr; |
832 | } |
833 | |
834 | static bool flexrm_sanity_check(struct brcm_message *msg) |
835 | { |
836 | if (!msg) |
837 | return false; |
838 | |
839 | switch (msg->type) { |
840 | case BRCM_MESSAGE_SPU: |
841 | return flexrm_spu_sanity_check(msg); |
842 | case BRCM_MESSAGE_SBA: |
843 | return flexrm_sba_sanity_check(msg); |
844 | default: |
845 | return false; |
846 | }; |
847 | } |
848 | |
849 | static u32 (struct brcm_message *msg) |
850 | { |
851 | if (!msg) |
852 | return 0; |
853 | |
854 | switch (msg->type) { |
855 | case BRCM_MESSAGE_SPU: |
856 | return flexrm_spu_estimate_nonheader_desc_count(msg); |
857 | case BRCM_MESSAGE_SBA: |
858 | return flexrm_sba_estimate_nonheader_desc_count(msg); |
859 | default: |
860 | return 0; |
861 | }; |
862 | } |
863 | |
864 | static int flexrm_dma_map(struct device *dev, struct brcm_message *msg) |
865 | { |
866 | if (!dev || !msg) |
867 | return -EINVAL; |
868 | |
869 | switch (msg->type) { |
870 | case BRCM_MESSAGE_SPU: |
871 | return flexrm_spu_dma_map(dev, msg); |
872 | default: |
873 | break; |
874 | } |
875 | |
876 | return 0; |
877 | } |
878 | |
879 | static void flexrm_dma_unmap(struct device *dev, struct brcm_message *msg) |
880 | { |
881 | if (!dev || !msg) |
882 | return; |
883 | |
884 | switch (msg->type) { |
885 | case BRCM_MESSAGE_SPU: |
886 | flexrm_spu_dma_unmap(dev, msg); |
887 | break; |
888 | default: |
889 | break; |
890 | } |
891 | } |
892 | |
893 | static void *flexrm_write_descs(struct brcm_message *msg, u32 nhcnt, |
894 | u32 reqid, void *desc_ptr, u32 toggle, |
895 | void *start_desc, void *end_desc) |
896 | { |
897 | if (!msg || !desc_ptr || !start_desc || !end_desc) |
898 | return ERR_PTR(error: -ENOTSUPP); |
899 | |
900 | if ((desc_ptr < start_desc) || (end_desc <= desc_ptr)) |
901 | return ERR_PTR(error: -ERANGE); |
902 | |
903 | switch (msg->type) { |
904 | case BRCM_MESSAGE_SPU: |
905 | return flexrm_spu_write_descs(msg, nhcnt, reqid, |
906 | desc_ptr, toggle, |
907 | start_desc, end_desc); |
908 | case BRCM_MESSAGE_SBA: |
909 | return flexrm_sba_write_descs(msg, nhcnt, reqid, |
910 | desc_ptr, toggle, |
911 | start_desc, end_desc); |
912 | default: |
913 | return ERR_PTR(error: -ENOTSUPP); |
914 | }; |
915 | } |
916 | |
917 | /* ====== FlexRM driver helper routines ===== */ |
918 | |
919 | static void flexrm_write_config_in_seqfile(struct flexrm_mbox *mbox, |
920 | struct seq_file *file) |
921 | { |
922 | int i; |
923 | const char *state; |
924 | struct flexrm_ring *ring; |
925 | |
926 | seq_printf(m: file, fmt: "%-5s %-9s %-18s %-10s %-18s %-10s\n" , |
927 | "Ring#" , "State" , "BD_Addr" , "BD_Size" , |
928 | "Cmpl_Addr" , "Cmpl_Size" ); |
929 | |
930 | for (i = 0; i < mbox->num_rings; i++) { |
931 | ring = &mbox->rings[i]; |
932 | if (readl(addr: ring->regs + RING_CONTROL) & |
933 | BIT(CONTROL_ACTIVE_SHIFT)) |
934 | state = "active" ; |
935 | else |
936 | state = "inactive" ; |
937 | seq_printf(m: file, |
938 | fmt: "%-5d %-9s 0x%016llx 0x%08x 0x%016llx 0x%08x\n" , |
939 | ring->num, state, |
940 | (unsigned long long)ring->bd_dma_base, |
941 | (u32)RING_BD_SIZE, |
942 | (unsigned long long)ring->cmpl_dma_base, |
943 | (u32)RING_CMPL_SIZE); |
944 | } |
945 | } |
946 | |
947 | static void flexrm_write_stats_in_seqfile(struct flexrm_mbox *mbox, |
948 | struct seq_file *file) |
949 | { |
950 | int i; |
951 | u32 val, bd_read_offset; |
952 | struct flexrm_ring *ring; |
953 | |
954 | seq_printf(m: file, fmt: "%-5s %-10s %-10s %-10s %-11s %-11s\n" , |
955 | "Ring#" , "BD_Read" , "BD_Write" , |
956 | "Cmpl_Read" , "Submitted" , "Completed" ); |
957 | |
958 | for (i = 0; i < mbox->num_rings; i++) { |
959 | ring = &mbox->rings[i]; |
960 | bd_read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR); |
961 | val = readl_relaxed(ring->regs + RING_BD_START_ADDR); |
962 | bd_read_offset *= RING_DESC_SIZE; |
963 | bd_read_offset += (u32)(BD_START_ADDR_DECODE(val) - |
964 | ring->bd_dma_base); |
965 | seq_printf(m: file, fmt: "%-5d 0x%08x 0x%08x 0x%08x %-11d %-11d\n" , |
966 | ring->num, |
967 | (u32)bd_read_offset, |
968 | (u32)ring->bd_write_offset, |
969 | (u32)ring->cmpl_read_offset, |
970 | (u32)atomic_read(v: &ring->msg_send_count), |
971 | (u32)atomic_read(v: &ring->msg_cmpl_count)); |
972 | } |
973 | } |
974 | |
975 | static int flexrm_new_request(struct flexrm_ring *ring, |
976 | struct brcm_message *batch_msg, |
977 | struct brcm_message *msg) |
978 | { |
979 | void *next; |
980 | unsigned long flags; |
981 | u32 val, count, nhcnt; |
982 | u32 read_offset, write_offset; |
983 | bool exit_cleanup = false; |
984 | int ret = 0, reqid; |
985 | |
986 | /* Do sanity check on message */ |
987 | if (!flexrm_sanity_check(msg)) |
988 | return -EIO; |
989 | msg->error = 0; |
990 | |
991 | /* If no requests possible then save data pointer and goto done. */ |
992 | spin_lock_irqsave(&ring->lock, flags); |
993 | reqid = bitmap_find_free_region(bitmap: ring->requests_bmap, |
994 | RING_MAX_REQ_COUNT, order: 0); |
995 | spin_unlock_irqrestore(lock: &ring->lock, flags); |
996 | if (reqid < 0) |
997 | return -ENOSPC; |
998 | ring->requests[reqid] = msg; |
999 | |
1000 | /* Do DMA mappings for the message */ |
1001 | ret = flexrm_dma_map(dev: ring->mbox->dev, msg); |
1002 | if (ret < 0) { |
1003 | ring->requests[reqid] = NULL; |
1004 | spin_lock_irqsave(&ring->lock, flags); |
1005 | bitmap_release_region(bitmap: ring->requests_bmap, pos: reqid, order: 0); |
1006 | spin_unlock_irqrestore(lock: &ring->lock, flags); |
1007 | return ret; |
1008 | } |
1009 | |
1010 | /* Determine current HW BD read offset */ |
1011 | read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR); |
1012 | val = readl_relaxed(ring->regs + RING_BD_START_ADDR); |
1013 | read_offset *= RING_DESC_SIZE; |
1014 | read_offset += (u32)(BD_START_ADDR_DECODE(val) - ring->bd_dma_base); |
1015 | |
1016 | /* |
1017 | * Number required descriptors = number of non-header descriptors + |
1018 | * number of header descriptors + |
1019 | * 1x null descriptor |
1020 | */ |
1021 | nhcnt = flexrm_estimate_nonheader_desc_count(msg); |
1022 | count = flexrm_estimate_header_desc_count(nhcnt) + nhcnt + 1; |
1023 | |
1024 | /* Check for available descriptor space. */ |
1025 | write_offset = ring->bd_write_offset; |
1026 | while (count) { |
1027 | if (!flexrm_is_next_table_desc(desc_ptr: ring->bd_base + write_offset)) |
1028 | count--; |
1029 | write_offset += RING_DESC_SIZE; |
1030 | if (write_offset == RING_BD_SIZE) |
1031 | write_offset = 0x0; |
1032 | if (write_offset == read_offset) |
1033 | break; |
1034 | } |
1035 | if (count) { |
1036 | ret = -ENOSPC; |
1037 | exit_cleanup = true; |
1038 | goto exit; |
1039 | } |
1040 | |
1041 | /* Write descriptors to ring */ |
1042 | next = flexrm_write_descs(msg, nhcnt, reqid, |
1043 | desc_ptr: ring->bd_base + ring->bd_write_offset, |
1044 | RING_BD_TOGGLE_VALID(ring->bd_write_offset), |
1045 | start_desc: ring->bd_base, end_desc: ring->bd_base + RING_BD_SIZE); |
1046 | if (IS_ERR(ptr: next)) { |
1047 | ret = PTR_ERR(ptr: next); |
1048 | exit_cleanup = true; |
1049 | goto exit; |
1050 | } |
1051 | |
1052 | /* Save ring BD write offset */ |
1053 | ring->bd_write_offset = (unsigned long)(next - ring->bd_base); |
1054 | |
1055 | /* Increment number of messages sent */ |
1056 | atomic_inc_return(v: &ring->msg_send_count); |
1057 | |
1058 | exit: |
1059 | /* Update error status in message */ |
1060 | msg->error = ret; |
1061 | |
1062 | /* Cleanup if we failed */ |
1063 | if (exit_cleanup) { |
1064 | flexrm_dma_unmap(dev: ring->mbox->dev, msg); |
1065 | ring->requests[reqid] = NULL; |
1066 | spin_lock_irqsave(&ring->lock, flags); |
1067 | bitmap_release_region(bitmap: ring->requests_bmap, pos: reqid, order: 0); |
1068 | spin_unlock_irqrestore(lock: &ring->lock, flags); |
1069 | } |
1070 | |
1071 | return ret; |
1072 | } |
1073 | |
1074 | static int flexrm_process_completions(struct flexrm_ring *ring) |
1075 | { |
1076 | u64 desc; |
1077 | int err, count = 0; |
1078 | unsigned long flags; |
1079 | struct brcm_message *msg = NULL; |
1080 | u32 reqid, cmpl_read_offset, cmpl_write_offset; |
1081 | struct mbox_chan *chan = &ring->mbox->controller.chans[ring->num]; |
1082 | |
1083 | spin_lock_irqsave(&ring->lock, flags); |
1084 | |
1085 | /* |
1086 | * Get current completion read and write offset |
1087 | * |
1088 | * Note: We should read completion write pointer at least once |
1089 | * after we get a MSI interrupt because HW maintains internal |
1090 | * MSI status which will allow next MSI interrupt only after |
1091 | * completion write pointer is read. |
1092 | */ |
1093 | cmpl_write_offset = readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR); |
1094 | cmpl_write_offset *= RING_DESC_SIZE; |
1095 | cmpl_read_offset = ring->cmpl_read_offset; |
1096 | ring->cmpl_read_offset = cmpl_write_offset; |
1097 | |
1098 | spin_unlock_irqrestore(lock: &ring->lock, flags); |
1099 | |
1100 | /* For each completed request notify mailbox clients */ |
1101 | reqid = 0; |
1102 | while (cmpl_read_offset != cmpl_write_offset) { |
1103 | /* Dequeue next completion descriptor */ |
1104 | desc = *((u64 *)(ring->cmpl_base + cmpl_read_offset)); |
1105 | |
1106 | /* Next read offset */ |
1107 | cmpl_read_offset += RING_DESC_SIZE; |
1108 | if (cmpl_read_offset == RING_CMPL_SIZE) |
1109 | cmpl_read_offset = 0; |
1110 | |
1111 | /* Decode error from completion descriptor */ |
1112 | err = flexrm_cmpl_desc_to_error(cmpl_desc: desc); |
1113 | if (err < 0) { |
1114 | dev_warn(ring->mbox->dev, |
1115 | "ring%d got completion desc=0x%lx with error %d\n" , |
1116 | ring->num, (unsigned long)desc, err); |
1117 | } |
1118 | |
1119 | /* Determine request id from completion descriptor */ |
1120 | reqid = flexrm_cmpl_desc_to_reqid(cmpl_desc: desc); |
1121 | |
1122 | /* Determine message pointer based on reqid */ |
1123 | msg = ring->requests[reqid]; |
1124 | if (!msg) { |
1125 | dev_warn(ring->mbox->dev, |
1126 | "ring%d null msg pointer for completion desc=0x%lx\n" , |
1127 | ring->num, (unsigned long)desc); |
1128 | continue; |
1129 | } |
1130 | |
1131 | /* Release reqid for recycling */ |
1132 | ring->requests[reqid] = NULL; |
1133 | spin_lock_irqsave(&ring->lock, flags); |
1134 | bitmap_release_region(bitmap: ring->requests_bmap, pos: reqid, order: 0); |
1135 | spin_unlock_irqrestore(lock: &ring->lock, flags); |
1136 | |
1137 | /* Unmap DMA mappings */ |
1138 | flexrm_dma_unmap(dev: ring->mbox->dev, msg); |
1139 | |
1140 | /* Give-back message to mailbox client */ |
1141 | msg->error = err; |
1142 | mbox_chan_received_data(chan, data: msg); |
1143 | |
1144 | /* Increment number of completions processed */ |
1145 | atomic_inc_return(v: &ring->msg_cmpl_count); |
1146 | count++; |
1147 | } |
1148 | |
1149 | return count; |
1150 | } |
1151 | |
1152 | /* ====== FlexRM Debugfs callbacks ====== */ |
1153 | |
1154 | static int flexrm_debugfs_conf_show(struct seq_file *file, void *offset) |
1155 | { |
1156 | struct flexrm_mbox *mbox = dev_get_drvdata(dev: file->private); |
1157 | |
1158 | /* Write config in file */ |
1159 | flexrm_write_config_in_seqfile(mbox, file); |
1160 | |
1161 | return 0; |
1162 | } |
1163 | |
1164 | static int flexrm_debugfs_stats_show(struct seq_file *file, void *offset) |
1165 | { |
1166 | struct flexrm_mbox *mbox = dev_get_drvdata(dev: file->private); |
1167 | |
1168 | /* Write stats in file */ |
1169 | flexrm_write_stats_in_seqfile(mbox, file); |
1170 | |
1171 | return 0; |
1172 | } |
1173 | |
1174 | /* ====== FlexRM interrupt handler ===== */ |
1175 | |
1176 | static irqreturn_t flexrm_irq_event(int irq, void *dev_id) |
1177 | { |
1178 | /* We only have MSI for completions so just wakeup IRQ thread */ |
1179 | /* Ring related errors will be informed via completion descriptors */ |
1180 | |
1181 | return IRQ_WAKE_THREAD; |
1182 | } |
1183 | |
1184 | static irqreturn_t flexrm_irq_thread(int irq, void *dev_id) |
1185 | { |
1186 | flexrm_process_completions(ring: dev_id); |
1187 | |
1188 | return IRQ_HANDLED; |
1189 | } |
1190 | |
1191 | /* ====== FlexRM mailbox callbacks ===== */ |
1192 | |
1193 | static int flexrm_send_data(struct mbox_chan *chan, void *data) |
1194 | { |
1195 | int i, rc; |
1196 | struct flexrm_ring *ring = chan->con_priv; |
1197 | struct brcm_message *msg = data; |
1198 | |
1199 | if (msg->type == BRCM_MESSAGE_BATCH) { |
1200 | for (i = msg->batch.msgs_queued; |
1201 | i < msg->batch.msgs_count; i++) { |
1202 | rc = flexrm_new_request(ring, batch_msg: msg, |
1203 | msg: &msg->batch.msgs[i]); |
1204 | if (rc) { |
1205 | msg->error = rc; |
1206 | return rc; |
1207 | } |
1208 | msg->batch.msgs_queued++; |
1209 | } |
1210 | return 0; |
1211 | } |
1212 | |
1213 | return flexrm_new_request(ring, NULL, msg: data); |
1214 | } |
1215 | |
1216 | static bool flexrm_peek_data(struct mbox_chan *chan) |
1217 | { |
1218 | int cnt = flexrm_process_completions(ring: chan->con_priv); |
1219 | |
1220 | return (cnt > 0) ? true : false; |
1221 | } |
1222 | |
1223 | static int flexrm_startup(struct mbox_chan *chan) |
1224 | { |
1225 | u64 d; |
1226 | u32 val, off; |
1227 | int ret = 0; |
1228 | dma_addr_t next_addr; |
1229 | struct flexrm_ring *ring = chan->con_priv; |
1230 | |
1231 | /* Allocate BD memory */ |
1232 | ring->bd_base = dma_pool_alloc(pool: ring->mbox->bd_pool, |
1233 | GFP_KERNEL, handle: &ring->bd_dma_base); |
1234 | if (!ring->bd_base) { |
1235 | dev_err(ring->mbox->dev, |
1236 | "can't allocate BD memory for ring%d\n" , |
1237 | ring->num); |
1238 | ret = -ENOMEM; |
1239 | goto fail; |
1240 | } |
1241 | |
1242 | /* Configure next table pointer entries in BD memory */ |
1243 | for (off = 0; off < RING_BD_SIZE; off += RING_DESC_SIZE) { |
1244 | next_addr = off + RING_DESC_SIZE; |
1245 | if (next_addr == RING_BD_SIZE) |
1246 | next_addr = 0; |
1247 | next_addr += ring->bd_dma_base; |
1248 | if (RING_BD_ALIGN_CHECK(next_addr)) |
1249 | d = flexrm_next_table_desc(RING_BD_TOGGLE_VALID(off), |
1250 | next_addr); |
1251 | else |
1252 | d = flexrm_null_desc(RING_BD_TOGGLE_INVALID(off)); |
1253 | flexrm_write_desc(desc_ptr: ring->bd_base + off, desc: d); |
1254 | } |
1255 | |
1256 | /* Allocate completion memory */ |
1257 | ring->cmpl_base = dma_pool_zalloc(pool: ring->mbox->cmpl_pool, |
1258 | GFP_KERNEL, handle: &ring->cmpl_dma_base); |
1259 | if (!ring->cmpl_base) { |
1260 | dev_err(ring->mbox->dev, |
1261 | "can't allocate completion memory for ring%d\n" , |
1262 | ring->num); |
1263 | ret = -ENOMEM; |
1264 | goto fail_free_bd_memory; |
1265 | } |
1266 | |
1267 | /* Request IRQ */ |
1268 | if (ring->irq == UINT_MAX) { |
1269 | dev_err(ring->mbox->dev, |
1270 | "ring%d IRQ not available\n" , ring->num); |
1271 | ret = -ENODEV; |
1272 | goto fail_free_cmpl_memory; |
1273 | } |
1274 | ret = request_threaded_irq(irq: ring->irq, |
1275 | handler: flexrm_irq_event, |
1276 | thread_fn: flexrm_irq_thread, |
1277 | flags: 0, name: dev_name(dev: ring->mbox->dev), dev: ring); |
1278 | if (ret) { |
1279 | dev_err(ring->mbox->dev, |
1280 | "failed to request ring%d IRQ\n" , ring->num); |
1281 | goto fail_free_cmpl_memory; |
1282 | } |
1283 | ring->irq_requested = true; |
1284 | |
1285 | /* Set IRQ affinity hint */ |
1286 | ring->irq_aff_hint = CPU_MASK_NONE; |
1287 | val = ring->mbox->num_rings; |
1288 | val = (num_online_cpus() < val) ? val / num_online_cpus() : 1; |
1289 | cpumask_set_cpu(cpu: (ring->num / val) % num_online_cpus(), |
1290 | dstp: &ring->irq_aff_hint); |
1291 | ret = irq_update_affinity_hint(irq: ring->irq, m: &ring->irq_aff_hint); |
1292 | if (ret) { |
1293 | dev_err(ring->mbox->dev, |
1294 | "failed to set IRQ affinity hint for ring%d\n" , |
1295 | ring->num); |
1296 | goto fail_free_irq; |
1297 | } |
1298 | |
1299 | /* Disable/inactivate ring */ |
1300 | writel_relaxed(0x0, ring->regs + RING_CONTROL); |
1301 | |
1302 | /* Program BD start address */ |
1303 | val = BD_START_ADDR_VALUE(ring->bd_dma_base); |
1304 | writel_relaxed(val, ring->regs + RING_BD_START_ADDR); |
1305 | |
1306 | /* BD write pointer will be same as HW write pointer */ |
1307 | ring->bd_write_offset = |
1308 | readl_relaxed(ring->regs + RING_BD_WRITE_PTR); |
1309 | ring->bd_write_offset *= RING_DESC_SIZE; |
1310 | |
1311 | /* Program completion start address */ |
1312 | val = CMPL_START_ADDR_VALUE(ring->cmpl_dma_base); |
1313 | writel_relaxed(val, ring->regs + RING_CMPL_START_ADDR); |
1314 | |
1315 | /* Completion read pointer will be same as HW write pointer */ |
1316 | ring->cmpl_read_offset = |
1317 | readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR); |
1318 | ring->cmpl_read_offset *= RING_DESC_SIZE; |
1319 | |
1320 | /* Read ring Tx, Rx, and Outstanding counts to clear */ |
1321 | readl_relaxed(ring->regs + RING_NUM_REQ_RECV_LS); |
1322 | readl_relaxed(ring->regs + RING_NUM_REQ_RECV_MS); |
1323 | readl_relaxed(ring->regs + RING_NUM_REQ_TRANS_LS); |
1324 | readl_relaxed(ring->regs + RING_NUM_REQ_TRANS_MS); |
1325 | readl_relaxed(ring->regs + RING_NUM_REQ_OUTSTAND); |
1326 | |
1327 | /* Configure RING_MSI_CONTROL */ |
1328 | val = 0; |
1329 | val |= (ring->msi_timer_val << MSI_TIMER_VAL_SHIFT); |
1330 | val |= BIT(MSI_ENABLE_SHIFT); |
1331 | val |= (ring->msi_count_threshold & MSI_COUNT_MASK) << MSI_COUNT_SHIFT; |
1332 | writel_relaxed(val, ring->regs + RING_MSI_CONTROL); |
1333 | |
1334 | /* Enable/activate ring */ |
1335 | val = BIT(CONTROL_ACTIVE_SHIFT); |
1336 | writel_relaxed(val, ring->regs + RING_CONTROL); |
1337 | |
1338 | /* Reset stats to zero */ |
1339 | atomic_set(v: &ring->msg_send_count, i: 0); |
1340 | atomic_set(v: &ring->msg_cmpl_count, i: 0); |
1341 | |
1342 | return 0; |
1343 | |
1344 | fail_free_irq: |
1345 | free_irq(ring->irq, ring); |
1346 | ring->irq_requested = false; |
1347 | fail_free_cmpl_memory: |
1348 | dma_pool_free(pool: ring->mbox->cmpl_pool, |
1349 | vaddr: ring->cmpl_base, addr: ring->cmpl_dma_base); |
1350 | ring->cmpl_base = NULL; |
1351 | fail_free_bd_memory: |
1352 | dma_pool_free(pool: ring->mbox->bd_pool, |
1353 | vaddr: ring->bd_base, addr: ring->bd_dma_base); |
1354 | ring->bd_base = NULL; |
1355 | fail: |
1356 | return ret; |
1357 | } |
1358 | |
1359 | static void flexrm_shutdown(struct mbox_chan *chan) |
1360 | { |
1361 | u32 reqid; |
1362 | unsigned int timeout; |
1363 | struct brcm_message *msg; |
1364 | struct flexrm_ring *ring = chan->con_priv; |
1365 | |
1366 | /* Disable/inactivate ring */ |
1367 | writel_relaxed(0x0, ring->regs + RING_CONTROL); |
1368 | |
1369 | /* Set ring flush state */ |
1370 | timeout = 1000; /* timeout of 1s */ |
1371 | writel_relaxed(BIT(CONTROL_FLUSH_SHIFT), |
1372 | ring->regs + RING_CONTROL); |
1373 | do { |
1374 | if (readl_relaxed(ring->regs + RING_FLUSH_DONE) & |
1375 | FLUSH_DONE_MASK) |
1376 | break; |
1377 | mdelay(1); |
1378 | } while (--timeout); |
1379 | if (!timeout) |
1380 | dev_err(ring->mbox->dev, |
1381 | "setting ring%d flush state timedout\n" , ring->num); |
1382 | |
1383 | /* Clear ring flush state */ |
1384 | timeout = 1000; /* timeout of 1s */ |
1385 | writel_relaxed(0x0, ring->regs + RING_CONTROL); |
1386 | do { |
1387 | if (!(readl_relaxed(ring->regs + RING_FLUSH_DONE) & |
1388 | FLUSH_DONE_MASK)) |
1389 | break; |
1390 | mdelay(1); |
1391 | } while (--timeout); |
1392 | if (!timeout) |
1393 | dev_err(ring->mbox->dev, |
1394 | "clearing ring%d flush state timedout\n" , ring->num); |
1395 | |
1396 | /* Abort all in-flight requests */ |
1397 | for (reqid = 0; reqid < RING_MAX_REQ_COUNT; reqid++) { |
1398 | msg = ring->requests[reqid]; |
1399 | if (!msg) |
1400 | continue; |
1401 | |
1402 | /* Release reqid for recycling */ |
1403 | ring->requests[reqid] = NULL; |
1404 | |
1405 | /* Unmap DMA mappings */ |
1406 | flexrm_dma_unmap(dev: ring->mbox->dev, msg); |
1407 | |
1408 | /* Give-back message to mailbox client */ |
1409 | msg->error = -EIO; |
1410 | mbox_chan_received_data(chan, data: msg); |
1411 | } |
1412 | |
1413 | /* Clear requests bitmap */ |
1414 | bitmap_zero(dst: ring->requests_bmap, RING_MAX_REQ_COUNT); |
1415 | |
1416 | /* Release IRQ */ |
1417 | if (ring->irq_requested) { |
1418 | irq_update_affinity_hint(irq: ring->irq, NULL); |
1419 | free_irq(ring->irq, ring); |
1420 | ring->irq_requested = false; |
1421 | } |
1422 | |
1423 | /* Free-up completion descriptor ring */ |
1424 | if (ring->cmpl_base) { |
1425 | dma_pool_free(pool: ring->mbox->cmpl_pool, |
1426 | vaddr: ring->cmpl_base, addr: ring->cmpl_dma_base); |
1427 | ring->cmpl_base = NULL; |
1428 | } |
1429 | |
1430 | /* Free-up BD descriptor ring */ |
1431 | if (ring->bd_base) { |
1432 | dma_pool_free(pool: ring->mbox->bd_pool, |
1433 | vaddr: ring->bd_base, addr: ring->bd_dma_base); |
1434 | ring->bd_base = NULL; |
1435 | } |
1436 | } |
1437 | |
1438 | static const struct mbox_chan_ops flexrm_mbox_chan_ops = { |
1439 | .send_data = flexrm_send_data, |
1440 | .startup = flexrm_startup, |
1441 | .shutdown = flexrm_shutdown, |
1442 | .peek_data = flexrm_peek_data, |
1443 | }; |
1444 | |
1445 | static struct mbox_chan *flexrm_mbox_of_xlate(struct mbox_controller *cntlr, |
1446 | const struct of_phandle_args *pa) |
1447 | { |
1448 | struct mbox_chan *chan; |
1449 | struct flexrm_ring *ring; |
1450 | |
1451 | if (pa->args_count < 3) |
1452 | return ERR_PTR(error: -EINVAL); |
1453 | |
1454 | if (pa->args[0] >= cntlr->num_chans) |
1455 | return ERR_PTR(error: -ENOENT); |
1456 | |
1457 | if (pa->args[1] > MSI_COUNT_MASK) |
1458 | return ERR_PTR(error: -EINVAL); |
1459 | |
1460 | if (pa->args[2] > MSI_TIMER_VAL_MASK) |
1461 | return ERR_PTR(error: -EINVAL); |
1462 | |
1463 | chan = &cntlr->chans[pa->args[0]]; |
1464 | ring = chan->con_priv; |
1465 | ring->msi_count_threshold = pa->args[1]; |
1466 | ring->msi_timer_val = pa->args[2]; |
1467 | |
1468 | return chan; |
1469 | } |
1470 | |
1471 | /* ====== FlexRM platform driver ===== */ |
1472 | |
1473 | static void flexrm_mbox_msi_write(struct msi_desc *desc, struct msi_msg *msg) |
1474 | { |
1475 | struct device *dev = msi_desc_to_dev(desc); |
1476 | struct flexrm_mbox *mbox = dev_get_drvdata(dev); |
1477 | struct flexrm_ring *ring = &mbox->rings[desc->msi_index]; |
1478 | |
1479 | /* Configure per-Ring MSI registers */ |
1480 | writel_relaxed(msg->address_lo, ring->regs + RING_MSI_ADDR_LS); |
1481 | writel_relaxed(msg->address_hi, ring->regs + RING_MSI_ADDR_MS); |
1482 | writel_relaxed(msg->data, ring->regs + RING_MSI_DATA_VALUE); |
1483 | } |
1484 | |
1485 | static int flexrm_mbox_probe(struct platform_device *pdev) |
1486 | { |
1487 | int index, ret = 0; |
1488 | void __iomem *regs; |
1489 | void __iomem *regs_end; |
1490 | struct resource *iomem; |
1491 | struct flexrm_ring *ring; |
1492 | struct flexrm_mbox *mbox; |
1493 | struct device *dev = &pdev->dev; |
1494 | |
1495 | /* Allocate driver mailbox struct */ |
1496 | mbox = devm_kzalloc(dev, size: sizeof(*mbox), GFP_KERNEL); |
1497 | if (!mbox) { |
1498 | ret = -ENOMEM; |
1499 | goto fail; |
1500 | } |
1501 | mbox->dev = dev; |
1502 | platform_set_drvdata(pdev, data: mbox); |
1503 | |
1504 | /* Get resource for registers and map registers of all rings */ |
1505 | mbox->regs = devm_platform_get_and_ioremap_resource(pdev, index: 0, res: &iomem); |
1506 | if (!iomem || (resource_size(res: iomem) < RING_REGS_SIZE)) { |
1507 | ret = -ENODEV; |
1508 | goto fail; |
1509 | } else if (IS_ERR(ptr: mbox->regs)) { |
1510 | ret = PTR_ERR(ptr: mbox->regs); |
1511 | goto fail; |
1512 | } |
1513 | regs_end = mbox->regs + resource_size(res: iomem); |
1514 | |
1515 | /* Scan and count available rings */ |
1516 | mbox->num_rings = 0; |
1517 | for (regs = mbox->regs; regs < regs_end; regs += RING_REGS_SIZE) { |
1518 | if (readl_relaxed(regs + RING_VER) == RING_VER_MAGIC) |
1519 | mbox->num_rings++; |
1520 | } |
1521 | if (!mbox->num_rings) { |
1522 | ret = -ENODEV; |
1523 | goto fail; |
1524 | } |
1525 | |
1526 | /* Allocate driver ring structs */ |
1527 | ring = devm_kcalloc(dev, n: mbox->num_rings, size: sizeof(*ring), GFP_KERNEL); |
1528 | if (!ring) { |
1529 | ret = -ENOMEM; |
1530 | goto fail; |
1531 | } |
1532 | mbox->rings = ring; |
1533 | |
1534 | /* Initialize members of driver ring structs */ |
1535 | regs = mbox->regs; |
1536 | for (index = 0; index < mbox->num_rings; index++) { |
1537 | ring = &mbox->rings[index]; |
1538 | ring->num = index; |
1539 | ring->mbox = mbox; |
1540 | while ((regs < regs_end) && |
1541 | (readl_relaxed(regs + RING_VER) != RING_VER_MAGIC)) |
1542 | regs += RING_REGS_SIZE; |
1543 | if (regs_end <= regs) { |
1544 | ret = -ENODEV; |
1545 | goto fail; |
1546 | } |
1547 | ring->regs = regs; |
1548 | regs += RING_REGS_SIZE; |
1549 | ring->irq = UINT_MAX; |
1550 | ring->irq_requested = false; |
1551 | ring->msi_timer_val = MSI_TIMER_VAL_MASK; |
1552 | ring->msi_count_threshold = 0x1; |
1553 | memset(ring->requests, 0, sizeof(ring->requests)); |
1554 | ring->bd_base = NULL; |
1555 | ring->bd_dma_base = 0; |
1556 | ring->cmpl_base = NULL; |
1557 | ring->cmpl_dma_base = 0; |
1558 | atomic_set(v: &ring->msg_send_count, i: 0); |
1559 | atomic_set(v: &ring->msg_cmpl_count, i: 0); |
1560 | spin_lock_init(&ring->lock); |
1561 | bitmap_zero(dst: ring->requests_bmap, RING_MAX_REQ_COUNT); |
1562 | ring->cmpl_read_offset = 0; |
1563 | } |
1564 | |
1565 | /* FlexRM is capable of 40-bit physical addresses only */ |
1566 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)); |
1567 | if (ret) { |
1568 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); |
1569 | if (ret) |
1570 | goto fail; |
1571 | } |
1572 | |
1573 | /* Create DMA pool for ring BD memory */ |
1574 | mbox->bd_pool = dma_pool_create(name: "bd" , dev, RING_BD_SIZE, |
1575 | align: 1 << RING_BD_ALIGN_ORDER, allocation: 0); |
1576 | if (!mbox->bd_pool) { |
1577 | ret = -ENOMEM; |
1578 | goto fail; |
1579 | } |
1580 | |
1581 | /* Create DMA pool for ring completion memory */ |
1582 | mbox->cmpl_pool = dma_pool_create(name: "cmpl" , dev, RING_CMPL_SIZE, |
1583 | align: 1 << RING_CMPL_ALIGN_ORDER, allocation: 0); |
1584 | if (!mbox->cmpl_pool) { |
1585 | ret = -ENOMEM; |
1586 | goto fail_destroy_bd_pool; |
1587 | } |
1588 | |
1589 | /* Allocate platform MSIs for each ring */ |
1590 | ret = platform_device_msi_init_and_alloc_irqs(dev, nvec: mbox->num_rings, |
1591 | write_msi_msg: flexrm_mbox_msi_write); |
1592 | if (ret) |
1593 | goto fail_destroy_cmpl_pool; |
1594 | |
1595 | /* Save alloced IRQ numbers for each ring */ |
1596 | for (index = 0; index < mbox->num_rings; index++) |
1597 | mbox->rings[index].irq = msi_get_virq(dev, index); |
1598 | |
1599 | /* Check availability of debugfs */ |
1600 | if (!debugfs_initialized()) |
1601 | goto skip_debugfs; |
1602 | |
1603 | /* Create debugfs root entry */ |
1604 | mbox->root = debugfs_create_dir(name: dev_name(dev: mbox->dev), NULL); |
1605 | |
1606 | /* Create debugfs config entry */ |
1607 | debugfs_create_devm_seqfile(dev: mbox->dev, name: "config" , parent: mbox->root, |
1608 | read_fn: flexrm_debugfs_conf_show); |
1609 | |
1610 | /* Create debugfs stats entry */ |
1611 | debugfs_create_devm_seqfile(dev: mbox->dev, name: "stats" , parent: mbox->root, |
1612 | read_fn: flexrm_debugfs_stats_show); |
1613 | |
1614 | skip_debugfs: |
1615 | |
1616 | /* Initialize mailbox controller */ |
1617 | mbox->controller.txdone_irq = false; |
1618 | mbox->controller.txdone_poll = false; |
1619 | mbox->controller.ops = &flexrm_mbox_chan_ops; |
1620 | mbox->controller.dev = dev; |
1621 | mbox->controller.num_chans = mbox->num_rings; |
1622 | mbox->controller.of_xlate = flexrm_mbox_of_xlate; |
1623 | mbox->controller.chans = devm_kcalloc(dev, n: mbox->num_rings, |
1624 | size: sizeof(*mbox->controller.chans), GFP_KERNEL); |
1625 | if (!mbox->controller.chans) { |
1626 | ret = -ENOMEM; |
1627 | goto fail_free_debugfs_root; |
1628 | } |
1629 | for (index = 0; index < mbox->num_rings; index++) |
1630 | mbox->controller.chans[index].con_priv = &mbox->rings[index]; |
1631 | |
1632 | /* Register mailbox controller */ |
1633 | ret = devm_mbox_controller_register(dev, mbox: &mbox->controller); |
1634 | if (ret) |
1635 | goto fail_free_debugfs_root; |
1636 | |
1637 | dev_info(dev, "registered flexrm mailbox with %d channels\n" , |
1638 | mbox->controller.num_chans); |
1639 | |
1640 | return 0; |
1641 | |
1642 | fail_free_debugfs_root: |
1643 | debugfs_remove_recursive(dentry: mbox->root); |
1644 | platform_device_msi_free_irqs_all(dev); |
1645 | fail_destroy_cmpl_pool: |
1646 | dma_pool_destroy(pool: mbox->cmpl_pool); |
1647 | fail_destroy_bd_pool: |
1648 | dma_pool_destroy(pool: mbox->bd_pool); |
1649 | fail: |
1650 | return ret; |
1651 | } |
1652 | |
1653 | static void flexrm_mbox_remove(struct platform_device *pdev) |
1654 | { |
1655 | struct device *dev = &pdev->dev; |
1656 | struct flexrm_mbox *mbox = platform_get_drvdata(pdev); |
1657 | |
1658 | debugfs_remove_recursive(dentry: mbox->root); |
1659 | |
1660 | platform_device_msi_free_irqs_all(dev); |
1661 | |
1662 | dma_pool_destroy(pool: mbox->cmpl_pool); |
1663 | dma_pool_destroy(pool: mbox->bd_pool); |
1664 | } |
1665 | |
1666 | static const struct of_device_id flexrm_mbox_of_match[] = { |
1667 | { .compatible = "brcm,iproc-flexrm-mbox" , }, |
1668 | {}, |
1669 | }; |
1670 | MODULE_DEVICE_TABLE(of, flexrm_mbox_of_match); |
1671 | |
1672 | static struct platform_driver flexrm_mbox_driver = { |
1673 | .driver = { |
1674 | .name = "brcm-flexrm-mbox" , |
1675 | .of_match_table = flexrm_mbox_of_match, |
1676 | }, |
1677 | .probe = flexrm_mbox_probe, |
1678 | .remove_new = flexrm_mbox_remove, |
1679 | }; |
1680 | module_platform_driver(flexrm_mbox_driver); |
1681 | |
1682 | MODULE_AUTHOR("Anup Patel <anup.patel@broadcom.com>" ); |
1683 | MODULE_DESCRIPTION("Broadcom FlexRM mailbox driver" ); |
1684 | MODULE_LICENSE("GPL v2" ); |
1685 | |