1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Virtio Transport driver for Arm System Control and Management Interface |
4 | * (SCMI). |
5 | * |
6 | * Copyright (C) 2020-2022 OpenSynergy. |
7 | * Copyright (C) 2021-2022 ARM Ltd. |
8 | */ |
9 | |
10 | /** |
11 | * DOC: Theory of Operation |
12 | * |
13 | * The scmi-virtio transport implements a driver for the virtio SCMI device. |
14 | * |
15 | * There is one Tx channel (virtio cmdq, A2P channel) and at most one Rx |
16 | * channel (virtio eventq, P2A channel). Each channel is implemented through a |
17 | * virtqueue. Access to each virtqueue is protected by spinlocks. |
18 | */ |
19 | |
20 | #include <linux/completion.h> |
21 | #include <linux/errno.h> |
22 | #include <linux/refcount.h> |
23 | #include <linux/slab.h> |
24 | #include <linux/virtio.h> |
25 | #include <linux/virtio_config.h> |
26 | |
27 | #include <uapi/linux/virtio_ids.h> |
28 | #include <uapi/linux/virtio_scmi.h> |
29 | |
30 | #include "common.h" |
31 | |
32 | #define VIRTIO_MAX_RX_TIMEOUT_MS 60000 |
33 | #define VIRTIO_SCMI_MAX_MSG_SIZE 128 /* Value may be increased. */ |
34 | #define VIRTIO_SCMI_MAX_PDU_SIZE \ |
35 | (VIRTIO_SCMI_MAX_MSG_SIZE + SCMI_MSG_MAX_PROT_OVERHEAD) |
36 | #define DESCRIPTORS_PER_TX_MSG 2 |
37 | |
38 | /** |
39 | * struct scmi_vio_channel - Transport channel information |
40 | * |
41 | * @vqueue: Associated virtqueue |
42 | * @cinfo: SCMI Tx or Rx channel |
43 | * @free_lock: Protects access to the @free_list. |
44 | * @free_list: List of unused scmi_vio_msg, maintained for Tx channels only |
45 | * @deferred_tx_work: Worker for TX deferred replies processing |
46 | * @deferred_tx_wq: Workqueue for TX deferred replies |
47 | * @pending_lock: Protects access to the @pending_cmds_list. |
48 | * @pending_cmds_list: List of pre-fetched commands queueud for later processing |
49 | * @is_rx: Whether channel is an Rx channel |
50 | * @max_msg: Maximum number of pending messages for this channel. |
51 | * @lock: Protects access to all members except users, free_list and |
52 | * pending_cmds_list. |
53 | * @shutdown_done: A reference to a completion used when freeing this channel. |
54 | * @users: A reference count to currently active users of this channel. |
55 | */ |
56 | struct scmi_vio_channel { |
57 | struct virtqueue *vqueue; |
58 | struct scmi_chan_info *cinfo; |
59 | /* lock to protect access to the free list. */ |
60 | spinlock_t free_lock; |
61 | struct list_head free_list; |
62 | /* lock to protect access to the pending list. */ |
63 | spinlock_t pending_lock; |
64 | struct list_head pending_cmds_list; |
65 | struct work_struct deferred_tx_work; |
66 | struct workqueue_struct *deferred_tx_wq; |
67 | bool is_rx; |
68 | unsigned int max_msg; |
69 | /* |
70 | * Lock to protect access to all members except users, free_list and |
71 | * pending_cmds_list |
72 | */ |
73 | spinlock_t lock; |
74 | struct completion *shutdown_done; |
75 | refcount_t users; |
76 | }; |
77 | |
78 | enum poll_states { |
79 | VIO_MSG_NOT_POLLED, |
80 | VIO_MSG_POLL_TIMEOUT, |
81 | VIO_MSG_POLLING, |
82 | VIO_MSG_POLL_DONE, |
83 | }; |
84 | |
85 | /** |
86 | * struct scmi_vio_msg - Transport PDU information |
87 | * |
88 | * @request: SDU used for commands |
89 | * @input: SDU used for (delayed) responses and notifications |
90 | * @list: List which scmi_vio_msg may be part of |
91 | * @rx_len: Input SDU size in bytes, once input has been received |
92 | * @poll_idx: Last used index registered for polling purposes if this message |
93 | * transaction reply was configured for polling. |
94 | * @poll_status: Polling state for this message. |
95 | * @poll_lock: A lock to protect @poll_status |
96 | * @users: A reference count to track this message users and avoid premature |
97 | * freeing (and reuse) when polling and IRQ execution paths interleave. |
98 | */ |
99 | struct scmi_vio_msg { |
100 | struct scmi_msg_payld *request; |
101 | struct scmi_msg_payld *input; |
102 | struct list_head list; |
103 | unsigned int rx_len; |
104 | unsigned int poll_idx; |
105 | enum poll_states poll_status; |
106 | /* Lock to protect access to poll_status */ |
107 | spinlock_t poll_lock; |
108 | refcount_t users; |
109 | }; |
110 | |
111 | /* Only one SCMI VirtIO device can possibly exist */ |
112 | static struct virtio_device *scmi_vdev; |
113 | |
114 | static void scmi_vio_channel_ready(struct scmi_vio_channel *vioch, |
115 | struct scmi_chan_info *cinfo) |
116 | { |
117 | unsigned long flags; |
118 | |
119 | spin_lock_irqsave(&vioch->lock, flags); |
120 | cinfo->transport_info = vioch; |
121 | /* Indirectly setting channel not available any more */ |
122 | vioch->cinfo = cinfo; |
123 | spin_unlock_irqrestore(lock: &vioch->lock, flags); |
124 | |
125 | refcount_set(r: &vioch->users, n: 1); |
126 | } |
127 | |
128 | static inline bool scmi_vio_channel_acquire(struct scmi_vio_channel *vioch) |
129 | { |
130 | return refcount_inc_not_zero(r: &vioch->users); |
131 | } |
132 | |
133 | static inline void scmi_vio_channel_release(struct scmi_vio_channel *vioch) |
134 | { |
135 | if (refcount_dec_and_test(r: &vioch->users)) { |
136 | unsigned long flags; |
137 | |
138 | spin_lock_irqsave(&vioch->lock, flags); |
139 | if (vioch->shutdown_done) { |
140 | vioch->cinfo = NULL; |
141 | complete(vioch->shutdown_done); |
142 | } |
143 | spin_unlock_irqrestore(lock: &vioch->lock, flags); |
144 | } |
145 | } |
146 | |
147 | static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch) |
148 | { |
149 | unsigned long flags; |
150 | DECLARE_COMPLETION_ONSTACK(vioch_shutdown_done); |
151 | |
152 | /* |
153 | * Prepare to wait for the last release if not already released |
154 | * or in progress. |
155 | */ |
156 | spin_lock_irqsave(&vioch->lock, flags); |
157 | if (!vioch->cinfo || vioch->shutdown_done) { |
158 | spin_unlock_irqrestore(lock: &vioch->lock, flags); |
159 | return; |
160 | } |
161 | |
162 | vioch->shutdown_done = &vioch_shutdown_done; |
163 | if (!vioch->is_rx && vioch->deferred_tx_wq) |
164 | /* Cannot be kicked anymore after this...*/ |
165 | vioch->deferred_tx_wq = NULL; |
166 | spin_unlock_irqrestore(lock: &vioch->lock, flags); |
167 | |
168 | scmi_vio_channel_release(vioch); |
169 | |
170 | /* Let any possibly concurrent RX path release the channel */ |
171 | wait_for_completion(vioch->shutdown_done); |
172 | } |
173 | |
174 | /* Assumes to be called with vio channel acquired already */ |
175 | static struct scmi_vio_msg * |
176 | scmi_virtio_get_free_msg(struct scmi_vio_channel *vioch) |
177 | { |
178 | unsigned long flags; |
179 | struct scmi_vio_msg *msg; |
180 | |
181 | spin_lock_irqsave(&vioch->free_lock, flags); |
182 | if (list_empty(head: &vioch->free_list)) { |
183 | spin_unlock_irqrestore(lock: &vioch->free_lock, flags); |
184 | return NULL; |
185 | } |
186 | |
187 | msg = list_first_entry(&vioch->free_list, typeof(*msg), list); |
188 | list_del_init(entry: &msg->list); |
189 | spin_unlock_irqrestore(lock: &vioch->free_lock, flags); |
190 | |
191 | /* Still no users, no need to acquire poll_lock */ |
192 | msg->poll_status = VIO_MSG_NOT_POLLED; |
193 | refcount_set(r: &msg->users, n: 1); |
194 | |
195 | return msg; |
196 | } |
197 | |
198 | static inline bool scmi_vio_msg_acquire(struct scmi_vio_msg *msg) |
199 | { |
200 | return refcount_inc_not_zero(r: &msg->users); |
201 | } |
202 | |
203 | /* Assumes to be called with vio channel acquired already */ |
204 | static inline bool scmi_vio_msg_release(struct scmi_vio_channel *vioch, |
205 | struct scmi_vio_msg *msg) |
206 | { |
207 | bool ret; |
208 | |
209 | ret = refcount_dec_and_test(r: &msg->users); |
210 | if (ret) { |
211 | unsigned long flags; |
212 | |
213 | spin_lock_irqsave(&vioch->free_lock, flags); |
214 | list_add_tail(new: &msg->list, head: &vioch->free_list); |
215 | spin_unlock_irqrestore(lock: &vioch->free_lock, flags); |
216 | } |
217 | |
218 | return ret; |
219 | } |
220 | |
221 | static bool scmi_vio_have_vq_rx(struct virtio_device *vdev) |
222 | { |
223 | return virtio_has_feature(vdev, VIRTIO_SCMI_F_P2A_CHANNELS); |
224 | } |
225 | |
226 | static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch, |
227 | struct scmi_vio_msg *msg) |
228 | { |
229 | struct scatterlist sg_in; |
230 | int rc; |
231 | unsigned long flags; |
232 | struct device *dev = &vioch->vqueue->vdev->dev; |
233 | |
234 | sg_init_one(&sg_in, msg->input, VIRTIO_SCMI_MAX_PDU_SIZE); |
235 | |
236 | spin_lock_irqsave(&vioch->lock, flags); |
237 | |
238 | rc = virtqueue_add_inbuf(vq: vioch->vqueue, sg: &sg_in, num: 1, data: msg, GFP_ATOMIC); |
239 | if (rc) |
240 | dev_err(dev, "failed to add to RX virtqueue (%d)\n" , rc); |
241 | else |
242 | virtqueue_kick(vq: vioch->vqueue); |
243 | |
244 | spin_unlock_irqrestore(lock: &vioch->lock, flags); |
245 | |
246 | return rc; |
247 | } |
248 | |
249 | /* |
250 | * Assume to be called with channel already acquired or not ready at all; |
251 | * vioch->lock MUST NOT have been already acquired. |
252 | */ |
253 | static void scmi_finalize_message(struct scmi_vio_channel *vioch, |
254 | struct scmi_vio_msg *msg) |
255 | { |
256 | if (vioch->is_rx) |
257 | scmi_vio_feed_vq_rx(vioch, msg); |
258 | else |
259 | scmi_vio_msg_release(vioch, msg); |
260 | } |
261 | |
262 | static void scmi_vio_complete_cb(struct virtqueue *vqueue) |
263 | { |
264 | unsigned long flags; |
265 | unsigned int length; |
266 | struct scmi_vio_channel *vioch; |
267 | struct scmi_vio_msg *msg; |
268 | bool cb_enabled = true; |
269 | |
270 | if (WARN_ON_ONCE(!vqueue->vdev->priv)) |
271 | return; |
272 | vioch = &((struct scmi_vio_channel *)vqueue->vdev->priv)[vqueue->index]; |
273 | |
274 | for (;;) { |
275 | if (!scmi_vio_channel_acquire(vioch)) |
276 | return; |
277 | |
278 | spin_lock_irqsave(&vioch->lock, flags); |
279 | if (cb_enabled) { |
280 | virtqueue_disable_cb(vq: vqueue); |
281 | cb_enabled = false; |
282 | } |
283 | |
284 | msg = virtqueue_get_buf(vq: vqueue, len: &length); |
285 | if (!msg) { |
286 | if (virtqueue_enable_cb(vq: vqueue)) { |
287 | spin_unlock_irqrestore(lock: &vioch->lock, flags); |
288 | scmi_vio_channel_release(vioch); |
289 | return; |
290 | } |
291 | cb_enabled = true; |
292 | } |
293 | spin_unlock_irqrestore(lock: &vioch->lock, flags); |
294 | |
295 | if (msg) { |
296 | msg->rx_len = length; |
297 | scmi_rx_callback(cinfo: vioch->cinfo, |
298 | msg_hdr: msg_read_header(msg: msg->input), priv: msg); |
299 | |
300 | scmi_finalize_message(vioch, msg); |
301 | } |
302 | |
303 | /* |
304 | * Release vio channel between loop iterations to allow |
305 | * virtio_chan_free() to eventually fully release it when |
306 | * shutting down; in such a case, any outstanding message will |
307 | * be ignored since this loop will bail out at the next |
308 | * iteration. |
309 | */ |
310 | scmi_vio_channel_release(vioch); |
311 | } |
312 | } |
313 | |
314 | static void scmi_vio_deferred_tx_worker(struct work_struct *work) |
315 | { |
316 | unsigned long flags; |
317 | struct scmi_vio_channel *vioch; |
318 | struct scmi_vio_msg *msg, *tmp; |
319 | |
320 | vioch = container_of(work, struct scmi_vio_channel, deferred_tx_work); |
321 | |
322 | if (!scmi_vio_channel_acquire(vioch)) |
323 | return; |
324 | |
325 | /* |
326 | * Process pre-fetched messages: these could be non-polled messages or |
327 | * late timed-out replies to polled messages dequeued by chance while |
328 | * polling for some other messages: this worker is in charge to process |
329 | * the valid non-expired messages and anyway finally free all of them. |
330 | */ |
331 | spin_lock_irqsave(&vioch->pending_lock, flags); |
332 | |
333 | /* Scan the list of possibly pre-fetched messages during polling. */ |
334 | list_for_each_entry_safe(msg, tmp, &vioch->pending_cmds_list, list) { |
335 | list_del(entry: &msg->list); |
336 | |
337 | /* |
338 | * Channel is acquired here (cannot vanish) and this message |
339 | * is no more processed elsewhere so no poll_lock needed. |
340 | */ |
341 | if (msg->poll_status == VIO_MSG_NOT_POLLED) |
342 | scmi_rx_callback(cinfo: vioch->cinfo, |
343 | msg_hdr: msg_read_header(msg: msg->input), priv: msg); |
344 | |
345 | /* Free the processed message once done */ |
346 | scmi_vio_msg_release(vioch, msg); |
347 | } |
348 | |
349 | spin_unlock_irqrestore(lock: &vioch->pending_lock, flags); |
350 | |
351 | /* Process possibly still pending messages */ |
352 | scmi_vio_complete_cb(vqueue: vioch->vqueue); |
353 | |
354 | scmi_vio_channel_release(vioch); |
355 | } |
356 | |
357 | static const char *const scmi_vio_vqueue_names[] = { "tx" , "rx" }; |
358 | |
359 | static vq_callback_t *scmi_vio_complete_callbacks[] = { |
360 | scmi_vio_complete_cb, |
361 | scmi_vio_complete_cb |
362 | }; |
363 | |
364 | static unsigned int virtio_get_max_msg(struct scmi_chan_info *base_cinfo) |
365 | { |
366 | struct scmi_vio_channel *vioch = base_cinfo->transport_info; |
367 | |
368 | return vioch->max_msg; |
369 | } |
370 | |
371 | static int virtio_link_supplier(struct device *dev) |
372 | { |
373 | if (!scmi_vdev) { |
374 | dev_notice(dev, |
375 | "Deferring probe after not finding a bound scmi-virtio device\n" ); |
376 | return -EPROBE_DEFER; |
377 | } |
378 | |
379 | if (!device_link_add(consumer: dev, supplier: &scmi_vdev->dev, |
380 | DL_FLAG_AUTOREMOVE_CONSUMER)) { |
381 | dev_err(dev, "Adding link to supplier virtio device failed\n" ); |
382 | return -ECANCELED; |
383 | } |
384 | |
385 | return 0; |
386 | } |
387 | |
388 | static bool virtio_chan_available(struct device_node *of_node, int idx) |
389 | { |
390 | struct scmi_vio_channel *channels, *vioch = NULL; |
391 | |
392 | if (WARN_ON_ONCE(!scmi_vdev)) |
393 | return false; |
394 | |
395 | channels = (struct scmi_vio_channel *)scmi_vdev->priv; |
396 | |
397 | switch (idx) { |
398 | case VIRTIO_SCMI_VQ_TX: |
399 | vioch = &channels[VIRTIO_SCMI_VQ_TX]; |
400 | break; |
401 | case VIRTIO_SCMI_VQ_RX: |
402 | if (scmi_vio_have_vq_rx(vdev: scmi_vdev)) |
403 | vioch = &channels[VIRTIO_SCMI_VQ_RX]; |
404 | break; |
405 | default: |
406 | return false; |
407 | } |
408 | |
409 | return vioch && !vioch->cinfo; |
410 | } |
411 | |
412 | static void scmi_destroy_tx_workqueue(void *deferred_tx_wq) |
413 | { |
414 | destroy_workqueue(wq: deferred_tx_wq); |
415 | } |
416 | |
417 | static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, |
418 | bool tx) |
419 | { |
420 | struct scmi_vio_channel *vioch; |
421 | int index = tx ? VIRTIO_SCMI_VQ_TX : VIRTIO_SCMI_VQ_RX; |
422 | int i; |
423 | |
424 | if (!scmi_vdev) |
425 | return -EPROBE_DEFER; |
426 | |
427 | vioch = &((struct scmi_vio_channel *)scmi_vdev->priv)[index]; |
428 | |
429 | /* Setup a deferred worker for polling. */ |
430 | if (tx && !vioch->deferred_tx_wq) { |
431 | int ret; |
432 | |
433 | vioch->deferred_tx_wq = |
434 | alloc_workqueue(fmt: dev_name(dev: &scmi_vdev->dev), |
435 | flags: WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS, |
436 | max_active: 0); |
437 | if (!vioch->deferred_tx_wq) |
438 | return -ENOMEM; |
439 | |
440 | ret = devm_add_action_or_reset(dev, scmi_destroy_tx_workqueue, |
441 | vioch->deferred_tx_wq); |
442 | if (ret) |
443 | return ret; |
444 | |
445 | INIT_WORK(&vioch->deferred_tx_work, |
446 | scmi_vio_deferred_tx_worker); |
447 | } |
448 | |
449 | for (i = 0; i < vioch->max_msg; i++) { |
450 | struct scmi_vio_msg *msg; |
451 | |
452 | msg = devm_kzalloc(dev, size: sizeof(*msg), GFP_KERNEL); |
453 | if (!msg) |
454 | return -ENOMEM; |
455 | |
456 | if (tx) { |
457 | msg->request = devm_kzalloc(dev, |
458 | VIRTIO_SCMI_MAX_PDU_SIZE, |
459 | GFP_KERNEL); |
460 | if (!msg->request) |
461 | return -ENOMEM; |
462 | spin_lock_init(&msg->poll_lock); |
463 | refcount_set(r: &msg->users, n: 1); |
464 | } |
465 | |
466 | msg->input = devm_kzalloc(dev, VIRTIO_SCMI_MAX_PDU_SIZE, |
467 | GFP_KERNEL); |
468 | if (!msg->input) |
469 | return -ENOMEM; |
470 | |
471 | scmi_finalize_message(vioch, msg); |
472 | } |
473 | |
474 | scmi_vio_channel_ready(vioch, cinfo); |
475 | |
476 | return 0; |
477 | } |
478 | |
479 | static int virtio_chan_free(int id, void *p, void *data) |
480 | { |
481 | struct scmi_chan_info *cinfo = p; |
482 | struct scmi_vio_channel *vioch = cinfo->transport_info; |
483 | |
484 | /* |
485 | * Break device to inhibit further traffic flowing while shutting down |
486 | * the channels: doing it later holding vioch->lock creates unsafe |
487 | * locking dependency chains as reported by LOCKDEP. |
488 | */ |
489 | virtio_break_device(dev: vioch->vqueue->vdev); |
490 | scmi_vio_channel_cleanup_sync(vioch); |
491 | |
492 | return 0; |
493 | } |
494 | |
495 | static int virtio_send_message(struct scmi_chan_info *cinfo, |
496 | struct scmi_xfer *xfer) |
497 | { |
498 | struct scmi_vio_channel *vioch = cinfo->transport_info; |
499 | struct scatterlist sg_out; |
500 | struct scatterlist sg_in; |
501 | struct scatterlist *sgs[DESCRIPTORS_PER_TX_MSG] = { &sg_out, &sg_in }; |
502 | unsigned long flags; |
503 | int rc; |
504 | struct scmi_vio_msg *msg; |
505 | |
506 | if (!scmi_vio_channel_acquire(vioch)) |
507 | return -EINVAL; |
508 | |
509 | msg = scmi_virtio_get_free_msg(vioch); |
510 | if (!msg) { |
511 | scmi_vio_channel_release(vioch); |
512 | return -EBUSY; |
513 | } |
514 | |
515 | msg_tx_prepare(msg: msg->request, xfer); |
516 | |
517 | sg_init_one(&sg_out, msg->request, msg_command_size(xfer)); |
518 | sg_init_one(&sg_in, msg->input, msg_response_size(xfer)); |
519 | |
520 | spin_lock_irqsave(&vioch->lock, flags); |
521 | |
522 | /* |
523 | * If polling was requested for this transaction: |
524 | * - retrieve last used index (will be used as polling reference) |
525 | * - bind the polled message to the xfer via .priv |
526 | * - grab an additional msg refcount for the poll-path |
527 | */ |
528 | if (xfer->hdr.poll_completion) { |
529 | msg->poll_idx = virtqueue_enable_cb_prepare(vq: vioch->vqueue); |
530 | /* Still no users, no need to acquire poll_lock */ |
531 | msg->poll_status = VIO_MSG_POLLING; |
532 | scmi_vio_msg_acquire(msg); |
533 | /* Ensure initialized msg is visibly bound to xfer */ |
534 | smp_store_mb(xfer->priv, msg); |
535 | } |
536 | |
537 | rc = virtqueue_add_sgs(vq: vioch->vqueue, sgs, out_sgs: 1, in_sgs: 1, data: msg, GFP_ATOMIC); |
538 | if (rc) |
539 | dev_err(vioch->cinfo->dev, |
540 | "failed to add to TX virtqueue (%d)\n" , rc); |
541 | else |
542 | virtqueue_kick(vq: vioch->vqueue); |
543 | |
544 | spin_unlock_irqrestore(lock: &vioch->lock, flags); |
545 | |
546 | if (rc) { |
547 | /* Ensure order between xfer->priv clear and vq feeding */ |
548 | smp_store_mb(xfer->priv, NULL); |
549 | if (xfer->hdr.poll_completion) |
550 | scmi_vio_msg_release(vioch, msg); |
551 | scmi_vio_msg_release(vioch, msg); |
552 | } |
553 | |
554 | scmi_vio_channel_release(vioch); |
555 | |
556 | return rc; |
557 | } |
558 | |
559 | static void virtio_fetch_response(struct scmi_chan_info *cinfo, |
560 | struct scmi_xfer *xfer) |
561 | { |
562 | struct scmi_vio_msg *msg = xfer->priv; |
563 | |
564 | if (msg) |
565 | msg_fetch_response(msg: msg->input, len: msg->rx_len, xfer); |
566 | } |
567 | |
568 | static void virtio_fetch_notification(struct scmi_chan_info *cinfo, |
569 | size_t max_len, struct scmi_xfer *xfer) |
570 | { |
571 | struct scmi_vio_msg *msg = xfer->priv; |
572 | |
573 | if (msg) |
574 | msg_fetch_notification(msg: msg->input, len: msg->rx_len, max_len, xfer); |
575 | } |
576 | |
577 | /** |
578 | * virtio_mark_txdone - Mark transmission done |
579 | * |
580 | * Free only completed polling transfer messages. |
581 | * |
582 | * Note that in the SCMI VirtIO transport we never explicitly release still |
583 | * outstanding but timed-out messages by forcibly re-adding them to the |
584 | * free-list inside the TX code path; we instead let IRQ/RX callbacks, or the |
585 | * TX deferred worker, eventually clean up such messages once, finally, a late |
586 | * reply is received and discarded (if ever). |
587 | * |
588 | * This approach was deemed preferable since those pending timed-out buffers are |
589 | * still effectively owned by the SCMI platform VirtIO device even after timeout |
590 | * expiration: forcibly freeing and reusing them before they had been returned |
591 | * explicitly by the SCMI platform could lead to subtle bugs due to message |
592 | * corruption. |
593 | * An SCMI platform VirtIO device which never returns message buffers is |
594 | * anyway broken and it will quickly lead to exhaustion of available messages. |
595 | * |
596 | * For this same reason, here, we take care to free only the polled messages |
597 | * that had been somehow replied (only if not by chance already processed on the |
598 | * IRQ path - the initial scmi_vio_msg_release() takes care of this) and also |
599 | * any timed-out polled message if that indeed appears to have been at least |
600 | * dequeued from the virtqueues (VIO_MSG_POLL_DONE): this is needed since such |
601 | * messages won't be freed elsewhere. Any other polled message is marked as |
602 | * VIO_MSG_POLL_TIMEOUT. |
603 | * |
604 | * Possible late replies to timed-out polled messages will be eventually freed |
605 | * by RX callbacks if delivered on the IRQ path or by the deferred TX worker if |
606 | * dequeued on some other polling path. |
607 | * |
608 | * @cinfo: SCMI channel info |
609 | * @ret: Transmission return code |
610 | * @xfer: Transfer descriptor |
611 | */ |
612 | static void virtio_mark_txdone(struct scmi_chan_info *cinfo, int ret, |
613 | struct scmi_xfer *xfer) |
614 | { |
615 | unsigned long flags; |
616 | struct scmi_vio_channel *vioch = cinfo->transport_info; |
617 | struct scmi_vio_msg *msg = xfer->priv; |
618 | |
619 | if (!msg || !scmi_vio_channel_acquire(vioch)) |
620 | return; |
621 | |
622 | /* Ensure msg is unbound from xfer anyway at this point */ |
623 | smp_store_mb(xfer->priv, NULL); |
624 | |
625 | /* Must be a polled xfer and not already freed on the IRQ path */ |
626 | if (!xfer->hdr.poll_completion || scmi_vio_msg_release(vioch, msg)) { |
627 | scmi_vio_channel_release(vioch); |
628 | return; |
629 | } |
630 | |
631 | spin_lock_irqsave(&msg->poll_lock, flags); |
632 | /* Do not free timedout polled messages only if still inflight */ |
633 | if (ret != -ETIMEDOUT || msg->poll_status == VIO_MSG_POLL_DONE) |
634 | scmi_vio_msg_release(vioch, msg); |
635 | else if (msg->poll_status == VIO_MSG_POLLING) |
636 | msg->poll_status = VIO_MSG_POLL_TIMEOUT; |
637 | spin_unlock_irqrestore(lock: &msg->poll_lock, flags); |
638 | |
639 | scmi_vio_channel_release(vioch); |
640 | } |
641 | |
642 | /** |
643 | * virtio_poll_done - Provide polling support for VirtIO transport |
644 | * |
645 | * @cinfo: SCMI channel info |
646 | * @xfer: Reference to the transfer being poll for. |
647 | * |
648 | * VirtIO core provides a polling mechanism based only on last used indexes: |
649 | * this means that it is possible to poll the virtqueues waiting for something |
650 | * new to arrive from the host side, but the only way to check if the freshly |
651 | * arrived buffer was indeed what we were waiting for is to compare the newly |
652 | * arrived message descriptor with the one we are polling on. |
653 | * |
654 | * As a consequence it can happen to dequeue something different from the buffer |
655 | * we were poll-waiting for: if that is the case such early fetched buffers are |
656 | * then added to a the @pending_cmds_list list for later processing by a |
657 | * dedicated deferred worker. |
658 | * |
659 | * So, basically, once something new is spotted we proceed to de-queue all the |
660 | * freshly received used buffers until we found the one we were polling on, or, |
661 | * we have 'seemingly' emptied the virtqueue; if some buffers are still pending |
662 | * in the vqueue at the end of the polling loop (possible due to inherent races |
663 | * in virtqueues handling mechanisms), we similarly kick the deferred worker |
664 | * and let it process those, to avoid indefinitely looping in the .poll_done |
665 | * busy-waiting helper. |
666 | * |
667 | * Finally, we delegate to the deferred worker also the final free of any timed |
668 | * out reply to a polled message that we should dequeue. |
669 | * |
670 | * Note that, since we do NOT have per-message suppress notification mechanism, |
671 | * the message we are polling for could be alternatively delivered via usual |
672 | * IRQs callbacks on another core which happened to have IRQs enabled while we |
673 | * are actively polling for it here: in such a case it will be handled as such |
674 | * by scmi_rx_callback() and the polling loop in the SCMI Core TX path will be |
675 | * transparently terminated anyway. |
676 | * |
677 | * Return: True once polling has successfully completed. |
678 | */ |
679 | static bool virtio_poll_done(struct scmi_chan_info *cinfo, |
680 | struct scmi_xfer *xfer) |
681 | { |
682 | bool pending, found = false; |
683 | unsigned int length, any_prefetched = 0; |
684 | unsigned long flags; |
685 | struct scmi_vio_msg *next_msg, *msg = xfer->priv; |
686 | struct scmi_vio_channel *vioch = cinfo->transport_info; |
687 | |
688 | if (!msg) |
689 | return true; |
690 | |
691 | /* |
692 | * Processed already by other polling loop on another CPU ? |
693 | * |
694 | * Note that this message is acquired on the poll path so cannot vanish |
695 | * while inside this loop iteration even if concurrently processed on |
696 | * the IRQ path. |
697 | * |
698 | * Avoid to acquire poll_lock since polled_status can be changed |
699 | * in a relevant manner only later in this same thread of execution: |
700 | * any other possible changes made concurrently by other polling loops |
701 | * or by a reply delivered on the IRQ path have no meaningful impact on |
702 | * this loop iteration: in other words it is harmless to allow this |
703 | * possible race but let has avoid spinlocking with irqs off in this |
704 | * initial part of the polling loop. |
705 | */ |
706 | if (msg->poll_status == VIO_MSG_POLL_DONE) |
707 | return true; |
708 | |
709 | if (!scmi_vio_channel_acquire(vioch)) |
710 | return true; |
711 | |
712 | /* Has cmdq index moved at all ? */ |
713 | pending = virtqueue_poll(vq: vioch->vqueue, msg->poll_idx); |
714 | if (!pending) { |
715 | scmi_vio_channel_release(vioch); |
716 | return false; |
717 | } |
718 | |
719 | spin_lock_irqsave(&vioch->lock, flags); |
720 | virtqueue_disable_cb(vq: vioch->vqueue); |
721 | |
722 | /* |
723 | * Process all new messages till the polled-for message is found OR |
724 | * the vqueue is empty. |
725 | */ |
726 | while ((next_msg = virtqueue_get_buf(vq: vioch->vqueue, len: &length))) { |
727 | bool next_msg_done = false; |
728 | |
729 | /* |
730 | * Mark any dequeued buffer message as VIO_MSG_POLL_DONE so |
731 | * that can be properly freed even on timeout in mark_txdone. |
732 | */ |
733 | spin_lock(lock: &next_msg->poll_lock); |
734 | if (next_msg->poll_status == VIO_MSG_POLLING) { |
735 | next_msg->poll_status = VIO_MSG_POLL_DONE; |
736 | next_msg_done = true; |
737 | } |
738 | spin_unlock(lock: &next_msg->poll_lock); |
739 | |
740 | next_msg->rx_len = length; |
741 | /* Is the message we were polling for ? */ |
742 | if (next_msg == msg) { |
743 | found = true; |
744 | break; |
745 | } else if (next_msg_done) { |
746 | /* Skip the rest if this was another polled msg */ |
747 | continue; |
748 | } |
749 | |
750 | /* |
751 | * Enqueue for later processing any non-polled message and any |
752 | * timed-out polled one that we happen to have dequeued. |
753 | */ |
754 | spin_lock(lock: &next_msg->poll_lock); |
755 | if (next_msg->poll_status == VIO_MSG_NOT_POLLED || |
756 | next_msg->poll_status == VIO_MSG_POLL_TIMEOUT) { |
757 | spin_unlock(lock: &next_msg->poll_lock); |
758 | |
759 | any_prefetched++; |
760 | spin_lock(lock: &vioch->pending_lock); |
761 | list_add_tail(new: &next_msg->list, |
762 | head: &vioch->pending_cmds_list); |
763 | spin_unlock(lock: &vioch->pending_lock); |
764 | } else { |
765 | spin_unlock(lock: &next_msg->poll_lock); |
766 | } |
767 | } |
768 | |
769 | /* |
770 | * When the polling loop has successfully terminated if something |
771 | * else was queued in the meantime, it will be served by a deferred |
772 | * worker OR by the normal IRQ/callback OR by other poll loops. |
773 | * |
774 | * If we are still looking for the polled reply, the polling index has |
775 | * to be updated to the current vqueue last used index. |
776 | */ |
777 | if (found) { |
778 | pending = !virtqueue_enable_cb(vq: vioch->vqueue); |
779 | } else { |
780 | msg->poll_idx = virtqueue_enable_cb_prepare(vq: vioch->vqueue); |
781 | pending = virtqueue_poll(vq: vioch->vqueue, msg->poll_idx); |
782 | } |
783 | |
784 | if (vioch->deferred_tx_wq && (any_prefetched || pending)) |
785 | queue_work(wq: vioch->deferred_tx_wq, work: &vioch->deferred_tx_work); |
786 | |
787 | spin_unlock_irqrestore(lock: &vioch->lock, flags); |
788 | |
789 | scmi_vio_channel_release(vioch); |
790 | |
791 | return found; |
792 | } |
793 | |
794 | static const struct scmi_transport_ops scmi_virtio_ops = { |
795 | .link_supplier = virtio_link_supplier, |
796 | .chan_available = virtio_chan_available, |
797 | .chan_setup = virtio_chan_setup, |
798 | .chan_free = virtio_chan_free, |
799 | .get_max_msg = virtio_get_max_msg, |
800 | .send_message = virtio_send_message, |
801 | .fetch_response = virtio_fetch_response, |
802 | .fetch_notification = virtio_fetch_notification, |
803 | .mark_txdone = virtio_mark_txdone, |
804 | .poll_done = virtio_poll_done, |
805 | }; |
806 | |
807 | static int scmi_vio_probe(struct virtio_device *vdev) |
808 | { |
809 | struct device *dev = &vdev->dev; |
810 | struct scmi_vio_channel *channels; |
811 | bool have_vq_rx; |
812 | int vq_cnt; |
813 | int i; |
814 | int ret; |
815 | struct virtqueue *vqs[VIRTIO_SCMI_VQ_MAX_CNT]; |
816 | |
817 | /* Only one SCMI VirtiO device allowed */ |
818 | if (scmi_vdev) { |
819 | dev_err(dev, |
820 | "One SCMI Virtio device was already initialized: only one allowed.\n" ); |
821 | return -EBUSY; |
822 | } |
823 | |
824 | have_vq_rx = scmi_vio_have_vq_rx(vdev); |
825 | vq_cnt = have_vq_rx ? VIRTIO_SCMI_VQ_MAX_CNT : 1; |
826 | |
827 | channels = devm_kcalloc(dev, n: vq_cnt, size: sizeof(*channels), GFP_KERNEL); |
828 | if (!channels) |
829 | return -ENOMEM; |
830 | |
831 | if (have_vq_rx) |
832 | channels[VIRTIO_SCMI_VQ_RX].is_rx = true; |
833 | |
834 | ret = virtio_find_vqs(vdev, nvqs: vq_cnt, vqs, callbacks: scmi_vio_complete_callbacks, |
835 | names: scmi_vio_vqueue_names, NULL); |
836 | if (ret) { |
837 | dev_err(dev, "Failed to get %d virtqueue(s)\n" , vq_cnt); |
838 | return ret; |
839 | } |
840 | |
841 | for (i = 0; i < vq_cnt; i++) { |
842 | unsigned int sz; |
843 | |
844 | spin_lock_init(&channels[i].lock); |
845 | spin_lock_init(&channels[i].free_lock); |
846 | INIT_LIST_HEAD(list: &channels[i].free_list); |
847 | spin_lock_init(&channels[i].pending_lock); |
848 | INIT_LIST_HEAD(list: &channels[i].pending_cmds_list); |
849 | channels[i].vqueue = vqs[i]; |
850 | |
851 | sz = virtqueue_get_vring_size(vq: channels[i].vqueue); |
852 | /* Tx messages need multiple descriptors. */ |
853 | if (!channels[i].is_rx) |
854 | sz /= DESCRIPTORS_PER_TX_MSG; |
855 | |
856 | if (sz > MSG_TOKEN_MAX) { |
857 | dev_info(dev, |
858 | "%s virtqueue could hold %d messages. Only %ld allowed to be pending.\n" , |
859 | channels[i].is_rx ? "rx" : "tx" , |
860 | sz, MSG_TOKEN_MAX); |
861 | sz = MSG_TOKEN_MAX; |
862 | } |
863 | channels[i].max_msg = sz; |
864 | } |
865 | |
866 | vdev->priv = channels; |
867 | /* Ensure initialized scmi_vdev is visible */ |
868 | smp_store_mb(scmi_vdev, vdev); |
869 | |
870 | return 0; |
871 | } |
872 | |
873 | static void scmi_vio_remove(struct virtio_device *vdev) |
874 | { |
875 | /* |
876 | * Once we get here, virtio_chan_free() will have already been called by |
877 | * the SCMI core for any existing channel and, as a consequence, all the |
878 | * virtio channels will have been already marked NOT ready, causing any |
879 | * outstanding message on any vqueue to be ignored by complete_cb: now |
880 | * we can just stop processing buffers and destroy the vqueues. |
881 | */ |
882 | virtio_reset_device(dev: vdev); |
883 | vdev->config->del_vqs(vdev); |
884 | /* Ensure scmi_vdev is visible as NULL */ |
885 | smp_store_mb(scmi_vdev, NULL); |
886 | } |
887 | |
888 | static int scmi_vio_validate(struct virtio_device *vdev) |
889 | { |
890 | #ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_VERSION1_COMPLIANCE |
891 | if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { |
892 | dev_err(&vdev->dev, |
893 | "device does not comply with spec version 1.x\n" ); |
894 | return -EINVAL; |
895 | } |
896 | #endif |
897 | return 0; |
898 | } |
899 | |
900 | static unsigned int features[] = { |
901 | VIRTIO_SCMI_F_P2A_CHANNELS, |
902 | }; |
903 | |
904 | static const struct virtio_device_id id_table[] = { |
905 | { VIRTIO_ID_SCMI, VIRTIO_DEV_ANY_ID }, |
906 | { 0 } |
907 | }; |
908 | |
909 | static struct virtio_driver virtio_scmi_driver = { |
910 | .driver.name = "scmi-virtio" , |
911 | .driver.owner = THIS_MODULE, |
912 | .feature_table = features, |
913 | .feature_table_size = ARRAY_SIZE(features), |
914 | .id_table = id_table, |
915 | .probe = scmi_vio_probe, |
916 | .remove = scmi_vio_remove, |
917 | .validate = scmi_vio_validate, |
918 | }; |
919 | |
920 | static int __init virtio_scmi_init(void) |
921 | { |
922 | return register_virtio_driver(&virtio_scmi_driver); |
923 | } |
924 | |
925 | static void virtio_scmi_exit(void) |
926 | { |
927 | unregister_virtio_driver(drv: &virtio_scmi_driver); |
928 | } |
929 | |
930 | const struct scmi_desc scmi_virtio_desc = { |
931 | .transport_init = virtio_scmi_init, |
932 | .transport_exit = virtio_scmi_exit, |
933 | .ops = &scmi_virtio_ops, |
934 | /* for non-realtime virtio devices */ |
935 | .max_rx_timeout_ms = VIRTIO_MAX_RX_TIMEOUT_MS, |
936 | .max_msg = 0, /* overridden by virtio_get_max_msg() */ |
937 | .max_msg_size = VIRTIO_SCMI_MAX_MSG_SIZE, |
938 | .atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE), |
939 | }; |
940 | |