1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved. |
4 | */ |
5 | |
6 | #include <soc/tegra/ivc.h> |
7 | |
8 | #define TEGRA_IVC_ALIGN 64 |
9 | |
10 | /* |
11 | * IVC channel reset protocol. |
12 | * |
13 | * Each end uses its tx_channel.state to indicate its synchronization state. |
14 | */ |
15 | enum tegra_ivc_state { |
16 | /* |
17 | * This value is zero for backwards compatibility with services that |
18 | * assume channels to be initially zeroed. Such channels are in an |
19 | * initially valid state, but cannot be asynchronously reset, and must |
20 | * maintain a valid state at all times. |
21 | * |
22 | * The transmitting end can enter the established state from the sync or |
23 | * ack state when it observes the receiving endpoint in the ack or |
24 | * established state, indicating that has cleared the counters in our |
25 | * rx_channel. |
26 | */ |
27 | TEGRA_IVC_STATE_ESTABLISHED = 0, |
28 | |
29 | /* |
30 | * If an endpoint is observed in the sync state, the remote endpoint is |
31 | * allowed to clear the counters it owns asynchronously with respect to |
32 | * the current endpoint. Therefore, the current endpoint is no longer |
33 | * allowed to communicate. |
34 | */ |
35 | TEGRA_IVC_STATE_SYNC, |
36 | |
37 | /* |
38 | * When the transmitting end observes the receiving end in the sync |
39 | * state, it can clear the w_count and r_count and transition to the ack |
40 | * state. If the remote endpoint observes us in the ack state, it can |
41 | * return to the established state once it has cleared its counters. |
42 | */ |
43 | TEGRA_IVC_STATE_ACK |
44 | }; |
45 | |
46 | /* |
47 | * This structure is divided into two-cache aligned parts, the first is only |
48 | * written through the tx.channel pointer, while the second is only written |
49 | * through the rx.channel pointer. This delineates ownership of the cache |
50 | * lines, which is critical to performance and necessary in non-cache coherent |
51 | * implementations. |
52 | */ |
53 | struct { |
54 | union { |
55 | struct { |
56 | /* fields owned by the transmitting end */ |
57 | u32 ; |
58 | u32 ; |
59 | }; |
60 | |
61 | u8 [TEGRA_IVC_ALIGN]; |
62 | } ; |
63 | |
64 | union { |
65 | /* fields owned by the receiving end */ |
66 | u32 ; |
67 | u8 [TEGRA_IVC_ALIGN]; |
68 | } ; |
69 | }; |
70 | |
71 | #define (hdr, field) \ |
72 | iosys_map_rd_field(hdr, 0, struct tegra_ivc_header, field) |
73 | |
74 | #define (hdr, field, value) \ |
75 | iosys_map_wr_field(hdr, 0, struct tegra_ivc_header, field, value) |
76 | |
77 | static inline void tegra_ivc_invalidate(struct tegra_ivc *ivc, dma_addr_t phys) |
78 | { |
79 | if (!ivc->peer) |
80 | return; |
81 | |
82 | dma_sync_single_for_cpu(dev: ivc->peer, addr: phys, TEGRA_IVC_ALIGN, |
83 | dir: DMA_FROM_DEVICE); |
84 | } |
85 | |
86 | static inline void tegra_ivc_flush(struct tegra_ivc *ivc, dma_addr_t phys) |
87 | { |
88 | if (!ivc->peer) |
89 | return; |
90 | |
91 | dma_sync_single_for_device(dev: ivc->peer, addr: phys, TEGRA_IVC_ALIGN, |
92 | dir: DMA_TO_DEVICE); |
93 | } |
94 | |
95 | static inline bool tegra_ivc_empty(struct tegra_ivc *ivc, struct iosys_map *map) |
96 | { |
97 | /* |
98 | * This function performs multiple checks on the same values with |
99 | * security implications, so create snapshots with READ_ONCE() to |
100 | * ensure that these checks use the same values. |
101 | */ |
102 | u32 tx = tegra_ivc_header_read_field(map, tx.count); |
103 | u32 rx = tegra_ivc_header_read_field(map, rx.count); |
104 | |
105 | /* |
106 | * Perform an over-full check to prevent denial of service attacks |
107 | * where a server could be easily fooled into believing that there's |
108 | * an extremely large number of frames ready, since receivers are not |
109 | * expected to check for full or over-full conditions. |
110 | * |
111 | * Although the channel isn't empty, this is an invalid case caused by |
112 | * a potentially malicious peer, so returning empty is safer, because |
113 | * it gives the impression that the channel has gone silent. |
114 | */ |
115 | if (tx - rx > ivc->num_frames) |
116 | return true; |
117 | |
118 | return tx == rx; |
119 | } |
120 | |
121 | static inline bool tegra_ivc_full(struct tegra_ivc *ivc, struct iosys_map *map) |
122 | { |
123 | u32 tx = tegra_ivc_header_read_field(map, tx.count); |
124 | u32 rx = tegra_ivc_header_read_field(map, rx.count); |
125 | |
126 | /* |
127 | * Invalid cases where the counters indicate that the queue is over |
128 | * capacity also appear full. |
129 | */ |
130 | return tx - rx >= ivc->num_frames; |
131 | } |
132 | |
133 | static inline u32 tegra_ivc_available(struct tegra_ivc *ivc, struct iosys_map *map) |
134 | { |
135 | u32 tx = tegra_ivc_header_read_field(map, tx.count); |
136 | u32 rx = tegra_ivc_header_read_field(map, rx.count); |
137 | |
138 | /* |
139 | * This function isn't expected to be used in scenarios where an |
140 | * over-full situation can lead to denial of service attacks. See the |
141 | * comment in tegra_ivc_empty() for an explanation about special |
142 | * over-full considerations. |
143 | */ |
144 | return tx - rx; |
145 | } |
146 | |
147 | static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc) |
148 | { |
149 | unsigned int count = tegra_ivc_header_read_field(&ivc->tx.map, tx.count); |
150 | |
151 | tegra_ivc_header_write_field(&ivc->tx.map, tx.count, count + 1); |
152 | |
153 | if (ivc->tx.position == ivc->num_frames - 1) |
154 | ivc->tx.position = 0; |
155 | else |
156 | ivc->tx.position++; |
157 | } |
158 | |
159 | static inline void tegra_ivc_advance_rx(struct tegra_ivc *ivc) |
160 | { |
161 | unsigned int count = tegra_ivc_header_read_field(&ivc->rx.map, rx.count); |
162 | |
163 | tegra_ivc_header_write_field(&ivc->rx.map, rx.count, count + 1); |
164 | |
165 | if (ivc->rx.position == ivc->num_frames - 1) |
166 | ivc->rx.position = 0; |
167 | else |
168 | ivc->rx.position++; |
169 | } |
170 | |
171 | static inline int tegra_ivc_check_read(struct tegra_ivc *ivc) |
172 | { |
173 | unsigned int offset = offsetof(struct tegra_ivc_header, tx.count); |
174 | unsigned int state; |
175 | |
176 | /* |
177 | * tx.channel->state is set locally, so it is not synchronized with |
178 | * state from the remote peer. The remote peer cannot reset its |
179 | * transmit counters until we've acknowledged its synchronization |
180 | * request, so no additional synchronization is required because an |
181 | * asynchronous transition of rx.channel->state to |
182 | * TEGRA_IVC_STATE_ACK is not allowed. |
183 | */ |
184 | state = tegra_ivc_header_read_field(&ivc->tx.map, tx.state); |
185 | if (state != TEGRA_IVC_STATE_ESTABLISHED) |
186 | return -ECONNRESET; |
187 | |
188 | /* |
189 | * Avoid unnecessary invalidations when performing repeated accesses |
190 | * to an IVC channel by checking the old queue pointers first. |
191 | * |
192 | * Synchronization is only necessary when these pointers indicate |
193 | * empty or full. |
194 | */ |
195 | if (!tegra_ivc_empty(ivc, map: &ivc->rx.map)) |
196 | return 0; |
197 | |
198 | tegra_ivc_invalidate(ivc, phys: ivc->rx.phys + offset); |
199 | |
200 | if (tegra_ivc_empty(ivc, map: &ivc->rx.map)) |
201 | return -ENOSPC; |
202 | |
203 | return 0; |
204 | } |
205 | |
206 | static inline int tegra_ivc_check_write(struct tegra_ivc *ivc) |
207 | { |
208 | unsigned int offset = offsetof(struct tegra_ivc_header, rx.count); |
209 | unsigned int state; |
210 | |
211 | state = tegra_ivc_header_read_field(&ivc->tx.map, tx.state); |
212 | if (state != TEGRA_IVC_STATE_ESTABLISHED) |
213 | return -ECONNRESET; |
214 | |
215 | if (!tegra_ivc_full(ivc, map: &ivc->tx.map)) |
216 | return 0; |
217 | |
218 | tegra_ivc_invalidate(ivc, phys: ivc->tx.phys + offset); |
219 | |
220 | if (tegra_ivc_full(ivc, map: &ivc->tx.map)) |
221 | return -ENOSPC; |
222 | |
223 | return 0; |
224 | } |
225 | |
226 | static int tegra_ivc_frame_virt(struct tegra_ivc *ivc, const struct iosys_map *, |
227 | unsigned int frame, struct iosys_map *map) |
228 | { |
229 | size_t offset = sizeof(struct tegra_ivc_header) + ivc->frame_size * frame; |
230 | |
231 | if (WARN_ON(frame >= ivc->num_frames)) |
232 | return -EINVAL; |
233 | |
234 | *map = IOSYS_MAP_INIT_OFFSET(header, offset); |
235 | |
236 | return 0; |
237 | } |
238 | |
239 | static inline dma_addr_t tegra_ivc_frame_phys(struct tegra_ivc *ivc, |
240 | dma_addr_t phys, |
241 | unsigned int frame) |
242 | { |
243 | unsigned long offset; |
244 | |
245 | offset = sizeof(struct tegra_ivc_header) + ivc->frame_size * frame; |
246 | |
247 | return phys + offset; |
248 | } |
249 | |
250 | static inline void tegra_ivc_invalidate_frame(struct tegra_ivc *ivc, |
251 | dma_addr_t phys, |
252 | unsigned int frame, |
253 | unsigned int offset, |
254 | size_t size) |
255 | { |
256 | if (!ivc->peer || WARN_ON(frame >= ivc->num_frames)) |
257 | return; |
258 | |
259 | phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset; |
260 | |
261 | dma_sync_single_for_cpu(dev: ivc->peer, addr: phys, size, dir: DMA_FROM_DEVICE); |
262 | } |
263 | |
264 | static inline void tegra_ivc_flush_frame(struct tegra_ivc *ivc, |
265 | dma_addr_t phys, |
266 | unsigned int frame, |
267 | unsigned int offset, |
268 | size_t size) |
269 | { |
270 | if (!ivc->peer || WARN_ON(frame >= ivc->num_frames)) |
271 | return; |
272 | |
273 | phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset; |
274 | |
275 | dma_sync_single_for_device(dev: ivc->peer, addr: phys, size, dir: DMA_TO_DEVICE); |
276 | } |
277 | |
278 | /* directly peek at the next frame rx'ed */ |
279 | int tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc, struct iosys_map *map) |
280 | { |
281 | int err; |
282 | |
283 | if (WARN_ON(ivc == NULL)) |
284 | return -EINVAL; |
285 | |
286 | err = tegra_ivc_check_read(ivc); |
287 | if (err < 0) |
288 | return err; |
289 | |
290 | /* |
291 | * Order observation of ivc->rx.position potentially indicating new |
292 | * data before data read. |
293 | */ |
294 | smp_rmb(); |
295 | |
296 | tegra_ivc_invalidate_frame(ivc, phys: ivc->rx.phys, frame: ivc->rx.position, offset: 0, |
297 | size: ivc->frame_size); |
298 | |
299 | return tegra_ivc_frame_virt(ivc, header: &ivc->rx.map, frame: ivc->rx.position, map); |
300 | } |
301 | EXPORT_SYMBOL(tegra_ivc_read_get_next_frame); |
302 | |
303 | int tegra_ivc_read_advance(struct tegra_ivc *ivc) |
304 | { |
305 | unsigned int rx = offsetof(struct tegra_ivc_header, rx.count); |
306 | unsigned int tx = offsetof(struct tegra_ivc_header, tx.count); |
307 | int err; |
308 | |
309 | /* |
310 | * No read barriers or synchronization here: the caller is expected to |
311 | * have already observed the channel non-empty. This check is just to |
312 | * catch programming errors. |
313 | */ |
314 | err = tegra_ivc_check_read(ivc); |
315 | if (err < 0) |
316 | return err; |
317 | |
318 | tegra_ivc_advance_rx(ivc); |
319 | |
320 | tegra_ivc_flush(ivc, phys: ivc->rx.phys + rx); |
321 | |
322 | /* |
323 | * Ensure our write to ivc->rx.position occurs before our read from |
324 | * ivc->tx.position. |
325 | */ |
326 | smp_mb(); |
327 | |
328 | /* |
329 | * Notify only upon transition from full to non-full. The available |
330 | * count can only asynchronously increase, so the worst possible |
331 | * side-effect will be a spurious notification. |
332 | */ |
333 | tegra_ivc_invalidate(ivc, phys: ivc->rx.phys + tx); |
334 | |
335 | if (tegra_ivc_available(ivc, map: &ivc->rx.map) == ivc->num_frames - 1) |
336 | ivc->notify(ivc, ivc->notify_data); |
337 | |
338 | return 0; |
339 | } |
340 | EXPORT_SYMBOL(tegra_ivc_read_advance); |
341 | |
342 | /* directly poke at the next frame to be tx'ed */ |
343 | int tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc, struct iosys_map *map) |
344 | { |
345 | int err; |
346 | |
347 | err = tegra_ivc_check_write(ivc); |
348 | if (err < 0) |
349 | return err; |
350 | |
351 | return tegra_ivc_frame_virt(ivc, header: &ivc->tx.map, frame: ivc->tx.position, map); |
352 | } |
353 | EXPORT_SYMBOL(tegra_ivc_write_get_next_frame); |
354 | |
355 | /* advance the tx buffer */ |
356 | int tegra_ivc_write_advance(struct tegra_ivc *ivc) |
357 | { |
358 | unsigned int tx = offsetof(struct tegra_ivc_header, tx.count); |
359 | unsigned int rx = offsetof(struct tegra_ivc_header, rx.count); |
360 | int err; |
361 | |
362 | err = tegra_ivc_check_write(ivc); |
363 | if (err < 0) |
364 | return err; |
365 | |
366 | tegra_ivc_flush_frame(ivc, phys: ivc->tx.phys, frame: ivc->tx.position, offset: 0, |
367 | size: ivc->frame_size); |
368 | |
369 | /* |
370 | * Order any possible stores to the frame before update of |
371 | * ivc->tx.position. |
372 | */ |
373 | smp_wmb(); |
374 | |
375 | tegra_ivc_advance_tx(ivc); |
376 | tegra_ivc_flush(ivc, phys: ivc->tx.phys + tx); |
377 | |
378 | /* |
379 | * Ensure our write to ivc->tx.position occurs before our read from |
380 | * ivc->rx.position. |
381 | */ |
382 | smp_mb(); |
383 | |
384 | /* |
385 | * Notify only upon transition from empty to non-empty. The available |
386 | * count can only asynchronously decrease, so the worst possible |
387 | * side-effect will be a spurious notification. |
388 | */ |
389 | tegra_ivc_invalidate(ivc, phys: ivc->tx.phys + rx); |
390 | |
391 | if (tegra_ivc_available(ivc, map: &ivc->tx.map) == 1) |
392 | ivc->notify(ivc, ivc->notify_data); |
393 | |
394 | return 0; |
395 | } |
396 | EXPORT_SYMBOL(tegra_ivc_write_advance); |
397 | |
398 | void tegra_ivc_reset(struct tegra_ivc *ivc) |
399 | { |
400 | unsigned int offset = offsetof(struct tegra_ivc_header, tx.count); |
401 | |
402 | tegra_ivc_header_write_field(&ivc->tx.map, tx.state, TEGRA_IVC_STATE_SYNC); |
403 | tegra_ivc_flush(ivc, phys: ivc->tx.phys + offset); |
404 | ivc->notify(ivc, ivc->notify_data); |
405 | } |
406 | EXPORT_SYMBOL(tegra_ivc_reset); |
407 | |
408 | /* |
409 | * ======================================================= |
410 | * IVC State Transition Table - see tegra_ivc_notified() |
411 | * ======================================================= |
412 | * |
413 | * local remote action |
414 | * ----- ------ ----------------------------------- |
415 | * SYNC EST <none> |
416 | * SYNC ACK reset counters; move to EST; notify |
417 | * SYNC SYNC reset counters; move to ACK; notify |
418 | * ACK EST move to EST; notify |
419 | * ACK ACK move to EST; notify |
420 | * ACK SYNC reset counters; move to ACK; notify |
421 | * EST EST <none> |
422 | * EST ACK <none> |
423 | * EST SYNC reset counters; move to ACK; notify |
424 | * |
425 | * =============================================================== |
426 | */ |
427 | |
428 | int tegra_ivc_notified(struct tegra_ivc *ivc) |
429 | { |
430 | unsigned int offset = offsetof(struct tegra_ivc_header, tx.count); |
431 | enum tegra_ivc_state rx_state, tx_state; |
432 | |
433 | /* Copy the receiver's state out of shared memory. */ |
434 | tegra_ivc_invalidate(ivc, phys: ivc->rx.phys + offset); |
435 | rx_state = tegra_ivc_header_read_field(&ivc->rx.map, tx.state); |
436 | tx_state = tegra_ivc_header_read_field(&ivc->tx.map, tx.state); |
437 | |
438 | if (rx_state == TEGRA_IVC_STATE_SYNC) { |
439 | offset = offsetof(struct tegra_ivc_header, tx.count); |
440 | |
441 | /* |
442 | * Order observation of TEGRA_IVC_STATE_SYNC before stores |
443 | * clearing tx.channel. |
444 | */ |
445 | smp_rmb(); |
446 | |
447 | /* |
448 | * Reset tx.channel counters. The remote end is in the SYNC |
449 | * state and won't make progress until we change our state, |
450 | * so the counters are not in use at this time. |
451 | */ |
452 | tegra_ivc_header_write_field(&ivc->tx.map, tx.count, 0); |
453 | tegra_ivc_header_write_field(&ivc->rx.map, rx.count, 0); |
454 | |
455 | ivc->tx.position = 0; |
456 | ivc->rx.position = 0; |
457 | |
458 | /* |
459 | * Ensure that counters appear cleared before new state can be |
460 | * observed. |
461 | */ |
462 | smp_wmb(); |
463 | |
464 | /* |
465 | * Move to ACK state. We have just cleared our counters, so it |
466 | * is now safe for the remote end to start using these values. |
467 | */ |
468 | tegra_ivc_header_write_field(&ivc->tx.map, tx.state, TEGRA_IVC_STATE_ACK); |
469 | tegra_ivc_flush(ivc, phys: ivc->tx.phys + offset); |
470 | |
471 | /* |
472 | * Notify remote end to observe state transition. |
473 | */ |
474 | ivc->notify(ivc, ivc->notify_data); |
475 | |
476 | } else if (tx_state == TEGRA_IVC_STATE_SYNC && |
477 | rx_state == TEGRA_IVC_STATE_ACK) { |
478 | offset = offsetof(struct tegra_ivc_header, tx.count); |
479 | |
480 | /* |
481 | * Order observation of ivc_state_sync before stores clearing |
482 | * tx_channel. |
483 | */ |
484 | smp_rmb(); |
485 | |
486 | /* |
487 | * Reset tx.channel counters. The remote end is in the ACK |
488 | * state and won't make progress until we change our state, |
489 | * so the counters are not in use at this time. |
490 | */ |
491 | tegra_ivc_header_write_field(&ivc->tx.map, tx.count, 0); |
492 | tegra_ivc_header_write_field(&ivc->rx.map, rx.count, 0); |
493 | |
494 | ivc->tx.position = 0; |
495 | ivc->rx.position = 0; |
496 | |
497 | /* |
498 | * Ensure that counters appear cleared before new state can be |
499 | * observed. |
500 | */ |
501 | smp_wmb(); |
502 | |
503 | /* |
504 | * Move to ESTABLISHED state. We know that the remote end has |
505 | * already cleared its counters, so it is safe to start |
506 | * writing/reading on this channel. |
507 | */ |
508 | tegra_ivc_header_write_field(&ivc->tx.map, tx.state, TEGRA_IVC_STATE_ESTABLISHED); |
509 | tegra_ivc_flush(ivc, phys: ivc->tx.phys + offset); |
510 | |
511 | /* |
512 | * Notify remote end to observe state transition. |
513 | */ |
514 | ivc->notify(ivc, ivc->notify_data); |
515 | |
516 | } else if (tx_state == TEGRA_IVC_STATE_ACK) { |
517 | offset = offsetof(struct tegra_ivc_header, tx.count); |
518 | |
519 | /* |
520 | * At this point, we have observed the peer to be in either |
521 | * the ACK or ESTABLISHED state. Next, order observation of |
522 | * peer state before storing to tx.channel. |
523 | */ |
524 | smp_rmb(); |
525 | |
526 | /* |
527 | * Move to ESTABLISHED state. We know that we have previously |
528 | * cleared our counters, and we know that the remote end has |
529 | * cleared its counters, so it is safe to start writing/reading |
530 | * on this channel. |
531 | */ |
532 | tegra_ivc_header_write_field(&ivc->tx.map, tx.state, TEGRA_IVC_STATE_ESTABLISHED); |
533 | tegra_ivc_flush(ivc, phys: ivc->tx.phys + offset); |
534 | |
535 | /* |
536 | * Notify remote end to observe state transition. |
537 | */ |
538 | ivc->notify(ivc, ivc->notify_data); |
539 | |
540 | } else { |
541 | /* |
542 | * There is no need to handle any further action. Either the |
543 | * channel is already fully established, or we are waiting for |
544 | * the remote end to catch up with our current state. Refer |
545 | * to the diagram in "IVC State Transition Table" above. |
546 | */ |
547 | } |
548 | |
549 | if (tx_state != TEGRA_IVC_STATE_ESTABLISHED) |
550 | return -EAGAIN; |
551 | |
552 | return 0; |
553 | } |
554 | EXPORT_SYMBOL(tegra_ivc_notified); |
555 | |
556 | size_t tegra_ivc_align(size_t size) |
557 | { |
558 | return ALIGN(size, TEGRA_IVC_ALIGN); |
559 | } |
560 | EXPORT_SYMBOL(tegra_ivc_align); |
561 | |
562 | unsigned tegra_ivc_total_queue_size(unsigned queue_size) |
563 | { |
564 | if (!IS_ALIGNED(queue_size, TEGRA_IVC_ALIGN)) { |
565 | pr_err("%s: queue_size (%u) must be %u-byte aligned\n" , |
566 | __func__, queue_size, TEGRA_IVC_ALIGN); |
567 | return 0; |
568 | } |
569 | |
570 | return queue_size + sizeof(struct tegra_ivc_header); |
571 | } |
572 | EXPORT_SYMBOL(tegra_ivc_total_queue_size); |
573 | |
574 | static int tegra_ivc_check_params(unsigned long rx, unsigned long tx, |
575 | unsigned int num_frames, size_t frame_size) |
576 | { |
577 | BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, tx.count), |
578 | TEGRA_IVC_ALIGN)); |
579 | BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, rx.count), |
580 | TEGRA_IVC_ALIGN)); |
581 | BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct tegra_ivc_header), |
582 | TEGRA_IVC_ALIGN)); |
583 | |
584 | if ((uint64_t)num_frames * (uint64_t)frame_size >= 0x100000000UL) { |
585 | pr_err("num_frames * frame_size overflows\n" ); |
586 | return -EINVAL; |
587 | } |
588 | |
589 | if (!IS_ALIGNED(frame_size, TEGRA_IVC_ALIGN)) { |
590 | pr_err("frame size not adequately aligned: %zu\n" , frame_size); |
591 | return -EINVAL; |
592 | } |
593 | |
594 | /* |
595 | * The headers must at least be aligned enough for counters |
596 | * to be accessed atomically. |
597 | */ |
598 | if (!IS_ALIGNED(rx, TEGRA_IVC_ALIGN)) { |
599 | pr_err("IVC channel start not aligned: %#lx\n" , rx); |
600 | return -EINVAL; |
601 | } |
602 | |
603 | if (!IS_ALIGNED(tx, TEGRA_IVC_ALIGN)) { |
604 | pr_err("IVC channel start not aligned: %#lx\n" , tx); |
605 | return -EINVAL; |
606 | } |
607 | |
608 | if (rx < tx) { |
609 | if (rx + frame_size * num_frames > tx) { |
610 | pr_err("queue regions overlap: %#lx + %zx > %#lx\n" , |
611 | rx, frame_size * num_frames, tx); |
612 | return -EINVAL; |
613 | } |
614 | } else { |
615 | if (tx + frame_size * num_frames > rx) { |
616 | pr_err("queue regions overlap: %#lx + %zx > %#lx\n" , |
617 | tx, frame_size * num_frames, rx); |
618 | return -EINVAL; |
619 | } |
620 | } |
621 | |
622 | return 0; |
623 | } |
624 | |
625 | static inline void iosys_map_copy(struct iosys_map *dst, const struct iosys_map *src) |
626 | { |
627 | *dst = *src; |
628 | } |
629 | |
630 | static inline unsigned long iosys_map_get_address(const struct iosys_map *map) |
631 | { |
632 | if (map->is_iomem) |
633 | return (unsigned long)map->vaddr_iomem; |
634 | |
635 | return (unsigned long)map->vaddr; |
636 | } |
637 | |
638 | static inline void *iosys_map_get_vaddr(const struct iosys_map *map) |
639 | { |
640 | if (WARN_ON(map->is_iomem)) |
641 | return NULL; |
642 | |
643 | return map->vaddr; |
644 | } |
645 | |
646 | int tegra_ivc_init(struct tegra_ivc *ivc, struct device *peer, const struct iosys_map *rx, |
647 | dma_addr_t rx_phys, const struct iosys_map *tx, dma_addr_t tx_phys, |
648 | unsigned int num_frames, size_t frame_size, |
649 | void (*notify)(struct tegra_ivc *ivc, void *data), |
650 | void *data) |
651 | { |
652 | size_t queue_size; |
653 | int err; |
654 | |
655 | if (WARN_ON(!ivc || !notify)) |
656 | return -EINVAL; |
657 | |
658 | /* |
659 | * All sizes that can be returned by communication functions should |
660 | * fit in an int. |
661 | */ |
662 | if (frame_size > INT_MAX) |
663 | return -E2BIG; |
664 | |
665 | err = tegra_ivc_check_params(rx: iosys_map_get_address(map: rx), tx: iosys_map_get_address(map: tx), |
666 | num_frames, frame_size); |
667 | if (err < 0) |
668 | return err; |
669 | |
670 | queue_size = tegra_ivc_total_queue_size(num_frames * frame_size); |
671 | |
672 | if (peer) { |
673 | ivc->rx.phys = dma_map_single(peer, iosys_map_get_vaddr(rx), queue_size, |
674 | DMA_BIDIRECTIONAL); |
675 | if (dma_mapping_error(dev: peer, dma_addr: ivc->rx.phys)) |
676 | return -ENOMEM; |
677 | |
678 | ivc->tx.phys = dma_map_single(peer, iosys_map_get_vaddr(tx), queue_size, |
679 | DMA_BIDIRECTIONAL); |
680 | if (dma_mapping_error(dev: peer, dma_addr: ivc->tx.phys)) { |
681 | dma_unmap_single(peer, ivc->rx.phys, queue_size, |
682 | DMA_BIDIRECTIONAL); |
683 | return -ENOMEM; |
684 | } |
685 | } else { |
686 | ivc->rx.phys = rx_phys; |
687 | ivc->tx.phys = tx_phys; |
688 | } |
689 | |
690 | iosys_map_copy(dst: &ivc->rx.map, src: rx); |
691 | iosys_map_copy(dst: &ivc->tx.map, src: tx); |
692 | ivc->peer = peer; |
693 | ivc->notify = notify; |
694 | ivc->notify_data = data; |
695 | ivc->frame_size = frame_size; |
696 | ivc->num_frames = num_frames; |
697 | |
698 | /* |
699 | * These values aren't necessarily correct until the channel has been |
700 | * reset. |
701 | */ |
702 | ivc->tx.position = 0; |
703 | ivc->rx.position = 0; |
704 | |
705 | return 0; |
706 | } |
707 | EXPORT_SYMBOL(tegra_ivc_init); |
708 | |
709 | void tegra_ivc_cleanup(struct tegra_ivc *ivc) |
710 | { |
711 | if (ivc->peer) { |
712 | size_t size = tegra_ivc_total_queue_size(ivc->num_frames * |
713 | ivc->frame_size); |
714 | |
715 | dma_unmap_single(ivc->peer, ivc->rx.phys, size, |
716 | DMA_BIDIRECTIONAL); |
717 | dma_unmap_single(ivc->peer, ivc->tx.phys, size, |
718 | DMA_BIDIRECTIONAL); |
719 | } |
720 | } |
721 | EXPORT_SYMBOL(tegra_ivc_cleanup); |
722 | |