1// SPDX-License-Identifier: GPL-2.0
2/*
3 * System Control and Management Interface (SCMI) Raw mode support
4 *
5 * Copyright (C) 2022 ARM Ltd.
6 */
7/**
8 * DOC: Theory of operation
9 *
10 * When enabled the SCMI Raw mode support exposes a userspace API which allows
11 * to send and receive SCMI commands, replies and notifications from a user
12 * application through injection and snooping of bare SCMI messages in binary
13 * little-endian format.
14 *
15 * Such injected SCMI transactions will then be routed through the SCMI core
16 * stack towards the SCMI backend server using whatever SCMI transport is
17 * currently configured on the system under test.
18 *
19 * It is meant to help in running any sort of SCMI backend server testing, no
20 * matter where the server is placed, as long as it is normally reachable via
21 * the transport configured on the system.
22 *
23 * It is activated by a Kernel configuration option since it is NOT meant to
24 * be used in production but only during development and in CI deployments.
25 *
26 * In order to avoid possible interferences between the SCMI Raw transactions
27 * originated from a test-suite and the normal operations of the SCMI drivers,
28 * when Raw mode is enabled, by default, all the regular SCMI drivers are
29 * inhibited, unless CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX is enabled: in this
30 * latter case the regular SCMI stack drivers will be loaded as usual and it is
31 * up to the user of this interface to take care of manually inhibiting the
32 * regular SCMI drivers in order to avoid interferences during the test runs.
33 *
34 * The exposed API is as follows.
35 *
36 * All SCMI Raw entries are rooted under a common top /raw debugfs top directory
37 * which in turn is rooted under the corresponding underlying SCMI instance.
38 *
39 * /sys/kernel/debug/scmi/
40 * `-- 0
41 * |-- atomic_threshold_us
42 * |-- instance_name
43 * |-- raw
44 * | |-- channels
45 * | | |-- 0x10
46 * | | | |-- message
47 * | | | `-- message_async
48 * | | `-- 0x13
49 * | | |-- message
50 * | | `-- message_async
51 * | |-- errors
52 * | |-- message
53 * | |-- message_async
54 * | |-- notification
55 * | `-- reset
56 * `-- transport
57 * |-- is_atomic
58 * |-- max_msg_size
59 * |-- max_rx_timeout_ms
60 * |-- rx_max_msg
61 * |-- tx_max_msg
62 * `-- type
63 *
64 * where:
65 *
66 * - errors: used to read back timed-out and unexpected replies
67 * - message*: used to send sync/async commands and read back immediate and
68 * delayed reponses (if any)
69 * - notification: used to read any notification being emitted by the system
70 * (if previously enabled by the user app)
71 * - reset: used to flush the queues of messages (of any kind) still pending
72 * to be read; this is useful at test-suite start/stop to get
73 * rid of any unread messages from the previous run.
74 *
75 * with the per-channel entries rooted at /channels being present only on a
76 * system where multiple transport channels have been configured.
77 *
78 * Such per-channel entries can be used to explicitly choose a specific channel
79 * for SCMI bare message injection, in contrast with the general entries above
80 * where, instead, the selection of the proper channel to use is automatically
81 * performed based the protocol embedded in the injected message and on how the
82 * transport is configured on the system.
83 *
84 * Note that other common general entries are available under transport/ to let
85 * the user applications properly make up their expectations in terms of
86 * timeouts and message characteristics.
87 *
88 * Each write to the message* entries causes one command request to be built
89 * and sent while the replies or delayed response are read back from those same
90 * entries one message at time (receiving an EOF at each message boundary).
91 *
92 * The user application running the test is in charge of handling timeouts
93 * on replies and properly choosing SCMI sequence numbers for the outgoing
94 * requests (using the same sequence number is supported but discouraged).
95 *
96 * Injection of multiple in-flight requests is supported as long as the user
97 * application uses properly distinct sequence numbers for concurrent requests
98 * and takes care to properly manage all the related issues about concurrency
99 * and command/reply pairing. Keep in mind that, anyway, the real level of
100 * parallelism attainable in such scenario is dependent on the characteristics
101 * of the underlying transport being used.
102 *
103 * Since the SCMI core regular stack is partially used to deliver and collect
104 * the messages, late replies arrived after timeouts and any other sort of
105 * unexpected message can be identified by the SCMI core as usual and they will
106 * be reported as messages under "errors" for later analysis.
107 */
108
109#include <linux/bitmap.h>
110#include <linux/debugfs.h>
111#include <linux/delay.h>
112#include <linux/device.h>
113#include <linux/export.h>
114#include <linux/io.h>
115#include <linux/kernel.h>
116#include <linux/fs.h>
117#include <linux/list.h>
118#include <linux/module.h>
119#include <linux/poll.h>
120#include <linux/of.h>
121#include <linux/slab.h>
122#include <linux/xarray.h>
123
124#include "common.h"
125
126#include "raw_mode.h"
127
128#include <trace/events/scmi.h>
129
130#define SCMI_XFER_RAW_MAX_RETRIES 10
131
132/**
133 * struct scmi_raw_queue - Generic Raw queue descriptor
134 *
135 * @free_bufs: A freelists listhead used to keep unused raw buffers
136 * @free_bufs_lock: Spinlock used to protect access to @free_bufs
137 * @msg_q: A listhead to a queue of snooped messages waiting to be read out
138 * @msg_q_lock: Spinlock used to protect access to @msg_q
139 * @wq: A waitqueue used to wait and poll on related @msg_q
140 */
141struct scmi_raw_queue {
142 struct list_head free_bufs;
143 /* Protect free_bufs[] lists */
144 spinlock_t free_bufs_lock;
145 struct list_head msg_q;
146 /* Protect msg_q[] lists */
147 spinlock_t msg_q_lock;
148 wait_queue_head_t wq;
149};
150
151/**
152 * struct scmi_raw_mode_info - Structure holding SCMI Raw instance data
153 *
154 * @id: Sequential Raw instance ID.
155 * @handle: Pointer to SCMI entity handle to use
156 * @desc: Pointer to the transport descriptor to use
157 * @tx_max_msg: Maximum number of concurrent TX in-flight messages
158 * @q: An array of Raw queue descriptors
159 * @chans_q: An XArray mapping optional additional per-channel queues
160 * @free_waiters: Head of freelist for unused waiters
161 * @free_mtx: A mutex to protect the waiters freelist
162 * @active_waiters: Head of list for currently active and used waiters
163 * @active_mtx: A mutex to protect the active waiters list
164 * @waiters_work: A work descriptor to be used with the workqueue machinery
165 * @wait_wq: A workqueue reference to the created workqueue
166 * @dentry: Top debugfs root dentry for SCMI Raw
167 * @gid: A group ID used for devres accounting
168 *
169 * Note that this descriptor is passed back to the core after SCMI Raw is
170 * initialized as an opaque handle to use by subsequent SCMI Raw call hooks.
171 *
172 */
173struct scmi_raw_mode_info {
174 unsigned int id;
175 const struct scmi_handle *handle;
176 const struct scmi_desc *desc;
177 int tx_max_msg;
178 struct scmi_raw_queue *q[SCMI_RAW_MAX_QUEUE];
179 struct xarray chans_q;
180 struct list_head free_waiters;
181 /* Protect free_waiters list */
182 struct mutex free_mtx;
183 struct list_head active_waiters;
184 /* Protect active_waiters list */
185 struct mutex active_mtx;
186 struct work_struct waiters_work;
187 struct workqueue_struct *wait_wq;
188 struct dentry *dentry;
189 void *gid;
190};
191
192/**
193 * struct scmi_xfer_raw_waiter - Structure to describe an xfer to be waited for
194 *
195 * @start_jiffies: The timestamp in jiffies of when this structure was queued.
196 * @cinfo: A reference to the channel to use for this transaction
197 * @xfer: A reference to the xfer to be waited for
198 * @async_response: A completion to be, optionally, used for async waits: it
199 * will be setup by @scmi_do_xfer_raw_start, if needed, to be
200 * pointed at by xfer->async_done.
201 * @node: A list node.
202 */
203struct scmi_xfer_raw_waiter {
204 unsigned long start_jiffies;
205 struct scmi_chan_info *cinfo;
206 struct scmi_xfer *xfer;
207 struct completion async_response;
208 struct list_head node;
209};
210
211/**
212 * struct scmi_raw_buffer - Structure to hold a full SCMI message
213 *
214 * @max_len: The maximum allowed message size (header included) that can be
215 * stored into @msg
216 * @msg: A message buffer used to collect a full message grabbed from an xfer.
217 * @node: A list node.
218 */
219struct scmi_raw_buffer {
220 size_t max_len;
221 struct scmi_msg msg;
222 struct list_head node;
223};
224
225/**
226 * struct scmi_dbg_raw_data - Structure holding data needed by the debugfs
227 * layer
228 *
229 * @chan_id: The preferred channel to use: if zero the channel is automatically
230 * selected based on protocol.
231 * @raw: A reference to the Raw instance.
232 * @tx: A message buffer used to collect TX message on write.
233 * @tx_size: The effective size of the TX message.
234 * @tx_req_size: The final expected size of the complete TX message.
235 * @rx: A message buffer to collect RX message on read.
236 * @rx_size: The effective size of the RX message.
237 */
238struct scmi_dbg_raw_data {
239 u8 chan_id;
240 struct scmi_raw_mode_info *raw;
241 struct scmi_msg tx;
242 size_t tx_size;
243 size_t tx_req_size;
244 struct scmi_msg rx;
245 size_t rx_size;
246};
247
248static struct scmi_raw_queue *
249scmi_raw_queue_select(struct scmi_raw_mode_info *raw, unsigned int idx,
250 unsigned int chan_id)
251{
252 if (!chan_id)
253 return raw->q[idx];
254
255 return xa_load(&raw->chans_q, index: chan_id);
256}
257
258static struct scmi_raw_buffer *scmi_raw_buffer_get(struct scmi_raw_queue *q)
259{
260 unsigned long flags;
261 struct scmi_raw_buffer *rb = NULL;
262 struct list_head *head = &q->free_bufs;
263
264 spin_lock_irqsave(&q->free_bufs_lock, flags);
265 if (!list_empty(head)) {
266 rb = list_first_entry(head, struct scmi_raw_buffer, node);
267 list_del_init(entry: &rb->node);
268 }
269 spin_unlock_irqrestore(lock: &q->free_bufs_lock, flags);
270
271 return rb;
272}
273
274static void scmi_raw_buffer_put(struct scmi_raw_queue *q,
275 struct scmi_raw_buffer *rb)
276{
277 unsigned long flags;
278
279 /* Reset to full buffer length */
280 rb->msg.len = rb->max_len;
281
282 spin_lock_irqsave(&q->free_bufs_lock, flags);
283 list_add_tail(new: &rb->node, head: &q->free_bufs);
284 spin_unlock_irqrestore(lock: &q->free_bufs_lock, flags);
285}
286
287static void scmi_raw_buffer_enqueue(struct scmi_raw_queue *q,
288 struct scmi_raw_buffer *rb)
289{
290 unsigned long flags;
291
292 spin_lock_irqsave(&q->msg_q_lock, flags);
293 list_add_tail(new: &rb->node, head: &q->msg_q);
294 spin_unlock_irqrestore(lock: &q->msg_q_lock, flags);
295
296 wake_up_interruptible(&q->wq);
297}
298
299static struct scmi_raw_buffer*
300scmi_raw_buffer_dequeue_unlocked(struct scmi_raw_queue *q)
301{
302 struct scmi_raw_buffer *rb = NULL;
303
304 if (!list_empty(head: &q->msg_q)) {
305 rb = list_first_entry(&q->msg_q, struct scmi_raw_buffer, node);
306 list_del_init(entry: &rb->node);
307 }
308
309 return rb;
310}
311
312static struct scmi_raw_buffer *scmi_raw_buffer_dequeue(struct scmi_raw_queue *q)
313{
314 unsigned long flags;
315 struct scmi_raw_buffer *rb;
316
317 spin_lock_irqsave(&q->msg_q_lock, flags);
318 rb = scmi_raw_buffer_dequeue_unlocked(q);
319 spin_unlock_irqrestore(lock: &q->msg_q_lock, flags);
320
321 return rb;
322}
323
324static void scmi_raw_buffer_queue_flush(struct scmi_raw_queue *q)
325{
326 struct scmi_raw_buffer *rb;
327
328 do {
329 rb = scmi_raw_buffer_dequeue(q);
330 if (rb)
331 scmi_raw_buffer_put(q, rb);
332 } while (rb);
333}
334
335static struct scmi_xfer_raw_waiter *
336scmi_xfer_raw_waiter_get(struct scmi_raw_mode_info *raw, struct scmi_xfer *xfer,
337 struct scmi_chan_info *cinfo, bool async)
338{
339 struct scmi_xfer_raw_waiter *rw = NULL;
340
341 mutex_lock(&raw->free_mtx);
342 if (!list_empty(head: &raw->free_waiters)) {
343 rw = list_first_entry(&raw->free_waiters,
344 struct scmi_xfer_raw_waiter, node);
345 list_del_init(entry: &rw->node);
346
347 if (async) {
348 reinit_completion(x: &rw->async_response);
349 xfer->async_done = &rw->async_response;
350 }
351
352 rw->cinfo = cinfo;
353 rw->xfer = xfer;
354 }
355 mutex_unlock(lock: &raw->free_mtx);
356
357 return rw;
358}
359
360static void scmi_xfer_raw_waiter_put(struct scmi_raw_mode_info *raw,
361 struct scmi_xfer_raw_waiter *rw)
362{
363 if (rw->xfer) {
364 rw->xfer->async_done = NULL;
365 rw->xfer = NULL;
366 }
367
368 mutex_lock(&raw->free_mtx);
369 list_add_tail(new: &rw->node, head: &raw->free_waiters);
370 mutex_unlock(lock: &raw->free_mtx);
371}
372
373static void scmi_xfer_raw_waiter_enqueue(struct scmi_raw_mode_info *raw,
374 struct scmi_xfer_raw_waiter *rw)
375{
376 /* A timestamp for the deferred worker to know how much this has aged */
377 rw->start_jiffies = jiffies;
378
379 trace_scmi_xfer_response_wait(transfer_id: rw->xfer->transfer_id, msg_id: rw->xfer->hdr.id,
380 protocol_id: rw->xfer->hdr.protocol_id,
381 seq: rw->xfer->hdr.seq,
382 timeout: raw->desc->max_rx_timeout_ms,
383 poll: rw->xfer->hdr.poll_completion);
384
385 mutex_lock(&raw->active_mtx);
386 list_add_tail(new: &rw->node, head: &raw->active_waiters);
387 mutex_unlock(lock: &raw->active_mtx);
388
389 /* kick waiter work */
390 queue_work(wq: raw->wait_wq, work: &raw->waiters_work);
391}
392
393static struct scmi_xfer_raw_waiter *
394scmi_xfer_raw_waiter_dequeue(struct scmi_raw_mode_info *raw)
395{
396 struct scmi_xfer_raw_waiter *rw = NULL;
397
398 mutex_lock(&raw->active_mtx);
399 if (!list_empty(head: &raw->active_waiters)) {
400 rw = list_first_entry(&raw->active_waiters,
401 struct scmi_xfer_raw_waiter, node);
402 list_del_init(entry: &rw->node);
403 }
404 mutex_unlock(lock: &raw->active_mtx);
405
406 return rw;
407}
408
409/**
410 * scmi_xfer_raw_worker - Work function to wait for Raw xfers completions
411 *
412 * @work: A reference to the work.
413 *
414 * In SCMI Raw mode, once a user-provided injected SCMI message is sent, we
415 * cannot wait to receive its response (if any) in the context of the injection
416 * routines so as not to leave the userspace write syscall, which delivered the
417 * SCMI message to send, pending till eventually a reply is received.
418 * Userspace should and will poll/wait instead on the read syscalls which will
419 * be in charge of reading a received reply (if any).
420 *
421 * Even though reply messages are collected and reported into the SCMI Raw layer
422 * on the RX path, nonetheless we have to properly wait for their completion as
423 * usual (and async_completion too if needed) in order to properly release the
424 * xfer structure at the end: to do this out of the context of the write/send
425 * these waiting jobs are delegated to this deferred worker.
426 *
427 * Any sent xfer, to be waited for, is timestamped and queued for later
428 * consumption by this worker: queue aging is accounted for while choosing a
429 * timeout for the completion, BUT we do not really care here if we end up
430 * accidentally waiting for a bit too long.
431 */
432static void scmi_xfer_raw_worker(struct work_struct *work)
433{
434 struct scmi_raw_mode_info *raw;
435 struct device *dev;
436 unsigned long max_tmo;
437
438 raw = container_of(work, struct scmi_raw_mode_info, waiters_work);
439 dev = raw->handle->dev;
440 max_tmo = msecs_to_jiffies(m: raw->desc->max_rx_timeout_ms);
441
442 do {
443 int ret = 0;
444 unsigned int timeout_ms;
445 unsigned long aging;
446 struct scmi_xfer *xfer;
447 struct scmi_xfer_raw_waiter *rw;
448 struct scmi_chan_info *cinfo;
449
450 rw = scmi_xfer_raw_waiter_dequeue(raw);
451 if (!rw)
452 return;
453
454 cinfo = rw->cinfo;
455 xfer = rw->xfer;
456 /*
457 * Waiters are queued by wait-deadline at the end, so some of
458 * them could have been already expired when processed, BUT we
459 * have to check the completion status anyway just in case a
460 * virtually expired (aged) transaction was indeed completed
461 * fine and we'll have to wait for the asynchronous part (if
462 * any): for this reason a 1 ms timeout is used for already
463 * expired/aged xfers.
464 */
465 aging = jiffies - rw->start_jiffies;
466 timeout_ms = max_tmo > aging ?
467 jiffies_to_msecs(j: max_tmo - aging) : 1;
468
469 ret = scmi_xfer_raw_wait_for_message_response(cinfo, xfer,
470 timeout_ms);
471 if (!ret && xfer->hdr.status)
472 ret = scmi_to_linux_errno(errno: xfer->hdr.status);
473
474 if (raw->desc->ops->mark_txdone)
475 raw->desc->ops->mark_txdone(rw->cinfo, ret, xfer);
476
477 trace_scmi_xfer_end(transfer_id: xfer->transfer_id, msg_id: xfer->hdr.id,
478 protocol_id: xfer->hdr.protocol_id, seq: xfer->hdr.seq, status: ret);
479
480 /* Wait also for an async delayed response if needed */
481 if (!ret && xfer->async_done) {
482 unsigned long tmo = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
483
484 if (!wait_for_completion_timeout(x: xfer->async_done, timeout: tmo))
485 dev_err(dev,
486 "timed out in RAW delayed resp - HDR:%08X\n",
487 pack_scmi_header(&xfer->hdr));
488 }
489
490 /* Release waiter and xfer */
491 scmi_xfer_raw_put(handle: raw->handle, xfer);
492 scmi_xfer_raw_waiter_put(raw, rw);
493 } while (1);
494}
495
496static void scmi_xfer_raw_reset(struct scmi_raw_mode_info *raw)
497{
498 int i;
499
500 dev_info(raw->handle->dev, "Resetting SCMI Raw stack.\n");
501
502 for (i = 0; i < SCMI_RAW_MAX_QUEUE; i++)
503 scmi_raw_buffer_queue_flush(q: raw->q[i]);
504}
505
506/**
507 * scmi_xfer_raw_get_init - An helper to build a valid xfer from the provided
508 * bare SCMI message.
509 *
510 * @raw: A reference to the Raw instance.
511 * @buf: A buffer containing the whole SCMI message to send (including the
512 * header) in little-endian binary formmat.
513 * @len: Length of the message in @buf.
514 * @p: A pointer to return the initialized Raw xfer.
515 *
516 * After an xfer is picked from the TX pool and filled in with the message
517 * content, the xfer is registered as pending with the core in the usual way
518 * using the original sequence number provided by the user with the message.
519 *
520 * Note that, in case the testing user application is NOT using distinct
521 * sequence-numbers between successive SCMI messages such registration could
522 * fail temporarily if the previous message, using the same sequence number,
523 * had still not released; in such a case we just wait and retry.
524 *
525 * Return: 0 on Success
526 */
527static int scmi_xfer_raw_get_init(struct scmi_raw_mode_info *raw, void *buf,
528 size_t len, struct scmi_xfer **p)
529{
530 u32 msg_hdr;
531 size_t tx_size;
532 struct scmi_xfer *xfer;
533 int ret, retry = SCMI_XFER_RAW_MAX_RETRIES;
534 struct device *dev = raw->handle->dev;
535
536 if (!buf || len < sizeof(u32))
537 return -EINVAL;
538
539 tx_size = len - sizeof(u32);
540 /* Ensure we have sane transfer sizes */
541 if (tx_size > raw->desc->max_msg_size)
542 return -ERANGE;
543
544 xfer = scmi_xfer_raw_get(handle: raw->handle);
545 if (IS_ERR(ptr: xfer)) {
546 dev_warn(dev, "RAW - Cannot get a free RAW xfer !\n");
547 return PTR_ERR(ptr: xfer);
548 }
549
550 /* Build xfer from the provided SCMI bare LE message */
551 msg_hdr = le32_to_cpu(*((__le32 *)buf));
552 unpack_scmi_header(msg_hdr, hdr: &xfer->hdr);
553 xfer->hdr.seq = (u16)MSG_XTRACT_TOKEN(msg_hdr);
554 /* Polling not supported */
555 xfer->hdr.poll_completion = false;
556 xfer->hdr.status = SCMI_SUCCESS;
557 xfer->tx.len = tx_size;
558 xfer->rx.len = raw->desc->max_msg_size;
559 /* Clear the whole TX buffer */
560 memset(xfer->tx.buf, 0x00, raw->desc->max_msg_size);
561 if (xfer->tx.len)
562 memcpy(xfer->tx.buf, (u8 *)buf + sizeof(msg_hdr), xfer->tx.len);
563 *p = xfer;
564
565 /*
566 * In flight registration can temporarily fail in case of Raw messages
567 * if the user injects messages without using monotonically increasing
568 * sequence numbers since, in Raw mode, the xfer (and the token) is
569 * finally released later by a deferred worker. Just retry for a while.
570 */
571 do {
572 ret = scmi_xfer_raw_inflight_register(handle: raw->handle, xfer);
573 if (ret) {
574 dev_dbg(dev,
575 "...retrying[%d] inflight registration\n",
576 retry);
577 msleep(msecs: raw->desc->max_rx_timeout_ms /
578 SCMI_XFER_RAW_MAX_RETRIES);
579 }
580 } while (ret && --retry);
581
582 if (ret) {
583 dev_warn(dev,
584 "RAW - Could NOT register xfer %d in-flight HDR:0x%08X\n",
585 xfer->hdr.seq, msg_hdr);
586 scmi_xfer_raw_put(handle: raw->handle, xfer);
587 }
588
589 return ret;
590}
591
592/**
593 * scmi_do_xfer_raw_start - An helper to send a valid raw xfer
594 *
595 * @raw: A reference to the Raw instance.
596 * @xfer: The xfer to send
597 * @chan_id: The channel ID to use, if zero the channels is automatically
598 * selected based on the protocol used.
599 * @async: A flag stating if an asynchronous command is required.
600 *
601 * This function send a previously built raw xfer using an appropriate channel
602 * and queues the related waiting work.
603 *
604 * Note that we need to know explicitly if the required command is meant to be
605 * asynchronous in kind since we have to properly setup the waiter.
606 * (and deducing this from the payload is weak and do not scale given there is
607 * NOT a common header-flag stating if the command is asynchronous or not)
608 *
609 * Return: 0 on Success
610 */
611static int scmi_do_xfer_raw_start(struct scmi_raw_mode_info *raw,
612 struct scmi_xfer *xfer, u8 chan_id,
613 bool async)
614{
615 int ret;
616 struct scmi_chan_info *cinfo;
617 struct scmi_xfer_raw_waiter *rw;
618 struct device *dev = raw->handle->dev;
619
620 if (!chan_id)
621 chan_id = xfer->hdr.protocol_id;
622 else
623 xfer->flags |= SCMI_XFER_FLAG_CHAN_SET;
624
625 cinfo = scmi_xfer_raw_channel_get(handle: raw->handle, protocol_id: chan_id);
626 if (IS_ERR(ptr: cinfo))
627 return PTR_ERR(ptr: cinfo);
628
629 rw = scmi_xfer_raw_waiter_get(raw, xfer, cinfo, async);
630 if (!rw) {
631 dev_warn(dev, "RAW - Cannot get a free waiter !\n");
632 return -ENOMEM;
633 }
634
635 /* True ONLY if also supported by transport. */
636 if (is_polling_enabled(cinfo, desc: raw->desc))
637 xfer->hdr.poll_completion = true;
638
639 reinit_completion(x: &xfer->done);
640 /* Make sure xfer state update is visible before sending */
641 smp_store_mb(xfer->state, SCMI_XFER_SENT_OK);
642
643 trace_scmi_xfer_begin(transfer_id: xfer->transfer_id, msg_id: xfer->hdr.id,
644 protocol_id: xfer->hdr.protocol_id, seq: xfer->hdr.seq,
645 poll: xfer->hdr.poll_completion);
646
647 ret = raw->desc->ops->send_message(rw->cinfo, xfer);
648 if (ret) {
649 dev_err(dev, "Failed to send RAW message %d\n", ret);
650 scmi_xfer_raw_waiter_put(raw, rw);
651 return ret;
652 }
653
654 trace_scmi_msg_dump(id: raw->id, channel_id: cinfo->id, protocol_id: xfer->hdr.protocol_id,
655 msg_id: xfer->hdr.id, tag: "cmnd", seq: xfer->hdr.seq,
656 status: xfer->hdr.status,
657 buf: xfer->tx.buf, len: xfer->tx.len);
658
659 scmi_xfer_raw_waiter_enqueue(raw, rw);
660
661 return ret;
662}
663
664/**
665 * scmi_raw_message_send - An helper to build and send an SCMI command using
666 * the provided SCMI bare message buffer
667 *
668 * @raw: A reference to the Raw instance.
669 * @buf: A buffer containing the whole SCMI message to send (including the
670 * header) in little-endian binary format.
671 * @len: Length of the message in @buf.
672 * @chan_id: The channel ID to use.
673 * @async: A flag stating if an asynchronous command is required.
674 *
675 * Return: 0 on Success
676 */
677static int scmi_raw_message_send(struct scmi_raw_mode_info *raw,
678 void *buf, size_t len, u8 chan_id, bool async)
679{
680 int ret;
681 struct scmi_xfer *xfer;
682
683 ret = scmi_xfer_raw_get_init(raw, buf, len, p: &xfer);
684 if (ret)
685 return ret;
686
687 ret = scmi_do_xfer_raw_start(raw, xfer, chan_id, async);
688 if (ret)
689 scmi_xfer_raw_put(handle: raw->handle, xfer);
690
691 return ret;
692}
693
694static struct scmi_raw_buffer *
695scmi_raw_message_dequeue(struct scmi_raw_queue *q, bool o_nonblock)
696{
697 unsigned long flags;
698 struct scmi_raw_buffer *rb;
699
700 spin_lock_irqsave(&q->msg_q_lock, flags);
701 while (list_empty(head: &q->msg_q)) {
702 spin_unlock_irqrestore(lock: &q->msg_q_lock, flags);
703
704 if (o_nonblock)
705 return ERR_PTR(error: -EAGAIN);
706
707 if (wait_event_interruptible(q->wq, !list_empty(&q->msg_q)))
708 return ERR_PTR(error: -ERESTARTSYS);
709
710 spin_lock_irqsave(&q->msg_q_lock, flags);
711 }
712
713 rb = scmi_raw_buffer_dequeue_unlocked(q);
714
715 spin_unlock_irqrestore(lock: &q->msg_q_lock, flags);
716
717 return rb;
718}
719
720/**
721 * scmi_raw_message_receive - An helper to dequeue and report the next
722 * available enqueued raw message payload that has been collected.
723 *
724 * @raw: A reference to the Raw instance.
725 * @buf: A buffer to get hold of the whole SCMI message received and represented
726 * in little-endian binary format.
727 * @len: Length of @buf.
728 * @size: The effective size of the message copied into @buf
729 * @idx: The index of the queue to pick the next queued message from.
730 * @chan_id: The channel ID to use.
731 * @o_nonblock: A flag to request a non-blocking message dequeue.
732 *
733 * Return: 0 on Success
734 */
735static int scmi_raw_message_receive(struct scmi_raw_mode_info *raw,
736 void *buf, size_t len, size_t *size,
737 unsigned int idx, unsigned int chan_id,
738 bool o_nonblock)
739{
740 int ret = 0;
741 struct scmi_raw_buffer *rb;
742 struct scmi_raw_queue *q;
743
744 q = scmi_raw_queue_select(raw, idx, chan_id);
745 if (!q)
746 return -ENODEV;
747
748 rb = scmi_raw_message_dequeue(q, o_nonblock);
749 if (IS_ERR(ptr: rb)) {
750 dev_dbg(raw->handle->dev, "RAW - No message available!\n");
751 return PTR_ERR(ptr: rb);
752 }
753
754 if (rb->msg.len <= len) {
755 memcpy(buf, rb->msg.buf, rb->msg.len);
756 *size = rb->msg.len;
757 } else {
758 ret = -ENOSPC;
759 }
760
761 scmi_raw_buffer_put(q, rb);
762
763 return ret;
764}
765
766/* SCMI Raw debugfs helpers */
767
768static ssize_t scmi_dbg_raw_mode_common_read(struct file *filp,
769 char __user *buf,
770 size_t count, loff_t *ppos,
771 unsigned int idx)
772{
773 ssize_t cnt;
774 struct scmi_dbg_raw_data *rd = filp->private_data;
775
776 if (!rd->rx_size) {
777 int ret;
778
779 ret = scmi_raw_message_receive(raw: rd->raw, buf: rd->rx.buf, len: rd->rx.len,
780 size: &rd->rx_size, idx, chan_id: rd->chan_id,
781 o_nonblock: filp->f_flags & O_NONBLOCK);
782 if (ret) {
783 rd->rx_size = 0;
784 return ret;
785 }
786
787 /* Reset any previous filepos change, including writes */
788 *ppos = 0;
789 } else if (*ppos == rd->rx_size) {
790 /* Return EOF once all the message has been read-out */
791 rd->rx_size = 0;
792 return 0;
793 }
794
795 cnt = simple_read_from_buffer(to: buf, count, ppos,
796 from: rd->rx.buf, available: rd->rx_size);
797
798 return cnt;
799}
800
801static ssize_t scmi_dbg_raw_mode_common_write(struct file *filp,
802 const char __user *buf,
803 size_t count, loff_t *ppos,
804 bool async)
805{
806 int ret;
807 struct scmi_dbg_raw_data *rd = filp->private_data;
808
809 if (count > rd->tx.len - rd->tx_size)
810 return -ENOSPC;
811
812 /* On first write attempt @count carries the total full message size. */
813 if (!rd->tx_size)
814 rd->tx_req_size = count;
815
816 /*
817 * Gather a full message, possibly across multiple interrupted wrrtes,
818 * before sending it with a single RAW xfer.
819 */
820 if (rd->tx_size < rd->tx_req_size) {
821 ssize_t cnt;
822
823 cnt = simple_write_to_buffer(to: rd->tx.buf, available: rd->tx.len, ppos,
824 from: buf, count);
825 if (cnt < 0)
826 return cnt;
827
828 rd->tx_size += cnt;
829 if (cnt < count)
830 return cnt;
831 }
832
833 ret = scmi_raw_message_send(raw: rd->raw, buf: rd->tx.buf, len: rd->tx_size,
834 chan_id: rd->chan_id, async);
835
836 /* Reset ppos for next message ... */
837 rd->tx_size = 0;
838 *ppos = 0;
839
840 return ret ?: count;
841}
842
843static __poll_t scmi_test_dbg_raw_common_poll(struct file *filp,
844 struct poll_table_struct *wait,
845 unsigned int idx)
846{
847 unsigned long flags;
848 struct scmi_dbg_raw_data *rd = filp->private_data;
849 struct scmi_raw_queue *q;
850 __poll_t mask = 0;
851
852 q = scmi_raw_queue_select(raw: rd->raw, idx, chan_id: rd->chan_id);
853 if (!q)
854 return mask;
855
856 poll_wait(filp, wait_address: &q->wq, p: wait);
857
858 spin_lock_irqsave(&q->msg_q_lock, flags);
859 if (!list_empty(head: &q->msg_q))
860 mask = EPOLLIN | EPOLLRDNORM;
861 spin_unlock_irqrestore(lock: &q->msg_q_lock, flags);
862
863 return mask;
864}
865
866static ssize_t scmi_dbg_raw_mode_message_read(struct file *filp,
867 char __user *buf,
868 size_t count, loff_t *ppos)
869{
870 return scmi_dbg_raw_mode_common_read(filp, buf, count, ppos,
871 idx: SCMI_RAW_REPLY_QUEUE);
872}
873
874static ssize_t scmi_dbg_raw_mode_message_write(struct file *filp,
875 const char __user *buf,
876 size_t count, loff_t *ppos)
877{
878 return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, async: false);
879}
880
881static __poll_t scmi_dbg_raw_mode_message_poll(struct file *filp,
882 struct poll_table_struct *wait)
883{
884 return scmi_test_dbg_raw_common_poll(filp, wait, idx: SCMI_RAW_REPLY_QUEUE);
885}
886
887static int scmi_dbg_raw_mode_open(struct inode *inode, struct file *filp)
888{
889 u8 id;
890 struct scmi_raw_mode_info *raw;
891 struct scmi_dbg_raw_data *rd;
892 const char *id_str = filp->f_path.dentry->d_parent->d_name.name;
893
894 if (!inode->i_private)
895 return -ENODEV;
896
897 raw = inode->i_private;
898 rd = kzalloc(size: sizeof(*rd), GFP_KERNEL);
899 if (!rd)
900 return -ENOMEM;
901
902 rd->rx.len = raw->desc->max_msg_size + sizeof(u32);
903 rd->rx.buf = kzalloc(size: rd->rx.len, GFP_KERNEL);
904 if (!rd->rx.buf) {
905 kfree(objp: rd);
906 return -ENOMEM;
907 }
908
909 rd->tx.len = raw->desc->max_msg_size + sizeof(u32);
910 rd->tx.buf = kzalloc(size: rd->tx.len, GFP_KERNEL);
911 if (!rd->tx.buf) {
912 kfree(objp: rd->rx.buf);
913 kfree(objp: rd);
914 return -ENOMEM;
915 }
916
917 /* Grab channel ID from debugfs entry naming if any */
918 if (!kstrtou8(s: id_str, base: 16, res: &id))
919 rd->chan_id = id;
920
921 rd->raw = raw;
922 filp->private_data = rd;
923
924 return nonseekable_open(inode, filp);
925}
926
927static int scmi_dbg_raw_mode_release(struct inode *inode, struct file *filp)
928{
929 struct scmi_dbg_raw_data *rd = filp->private_data;
930
931 kfree(objp: rd->rx.buf);
932 kfree(objp: rd->tx.buf);
933 kfree(objp: rd);
934
935 return 0;
936}
937
938static ssize_t scmi_dbg_raw_mode_reset_write(struct file *filp,
939 const char __user *buf,
940 size_t count, loff_t *ppos)
941{
942 struct scmi_dbg_raw_data *rd = filp->private_data;
943
944 scmi_xfer_raw_reset(raw: rd->raw);
945
946 return count;
947}
948
949static const struct file_operations scmi_dbg_raw_mode_reset_fops = {
950 .open = scmi_dbg_raw_mode_open,
951 .release = scmi_dbg_raw_mode_release,
952 .write = scmi_dbg_raw_mode_reset_write,
953 .llseek = no_llseek,
954 .owner = THIS_MODULE,
955};
956
957static const struct file_operations scmi_dbg_raw_mode_message_fops = {
958 .open = scmi_dbg_raw_mode_open,
959 .release = scmi_dbg_raw_mode_release,
960 .read = scmi_dbg_raw_mode_message_read,
961 .write = scmi_dbg_raw_mode_message_write,
962 .poll = scmi_dbg_raw_mode_message_poll,
963 .llseek = no_llseek,
964 .owner = THIS_MODULE,
965};
966
967static ssize_t scmi_dbg_raw_mode_message_async_write(struct file *filp,
968 const char __user *buf,
969 size_t count, loff_t *ppos)
970{
971 return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, async: true);
972}
973
974static const struct file_operations scmi_dbg_raw_mode_message_async_fops = {
975 .open = scmi_dbg_raw_mode_open,
976 .release = scmi_dbg_raw_mode_release,
977 .read = scmi_dbg_raw_mode_message_read,
978 .write = scmi_dbg_raw_mode_message_async_write,
979 .poll = scmi_dbg_raw_mode_message_poll,
980 .llseek = no_llseek,
981 .owner = THIS_MODULE,
982};
983
984static ssize_t scmi_test_dbg_raw_mode_notif_read(struct file *filp,
985 char __user *buf,
986 size_t count, loff_t *ppos)
987{
988 return scmi_dbg_raw_mode_common_read(filp, buf, count, ppos,
989 idx: SCMI_RAW_NOTIF_QUEUE);
990}
991
992static __poll_t
993scmi_test_dbg_raw_mode_notif_poll(struct file *filp,
994 struct poll_table_struct *wait)
995{
996 return scmi_test_dbg_raw_common_poll(filp, wait, idx: SCMI_RAW_NOTIF_QUEUE);
997}
998
999static const struct file_operations scmi_dbg_raw_mode_notification_fops = {
1000 .open = scmi_dbg_raw_mode_open,
1001 .release = scmi_dbg_raw_mode_release,
1002 .read = scmi_test_dbg_raw_mode_notif_read,
1003 .poll = scmi_test_dbg_raw_mode_notif_poll,
1004 .llseek = no_llseek,
1005 .owner = THIS_MODULE,
1006};
1007
1008static ssize_t scmi_test_dbg_raw_mode_errors_read(struct file *filp,
1009 char __user *buf,
1010 size_t count, loff_t *ppos)
1011{
1012 return scmi_dbg_raw_mode_common_read(filp, buf, count, ppos,
1013 idx: SCMI_RAW_ERRS_QUEUE);
1014}
1015
1016static __poll_t
1017scmi_test_dbg_raw_mode_errors_poll(struct file *filp,
1018 struct poll_table_struct *wait)
1019{
1020 return scmi_test_dbg_raw_common_poll(filp, wait, idx: SCMI_RAW_ERRS_QUEUE);
1021}
1022
1023static const struct file_operations scmi_dbg_raw_mode_errors_fops = {
1024 .open = scmi_dbg_raw_mode_open,
1025 .release = scmi_dbg_raw_mode_release,
1026 .read = scmi_test_dbg_raw_mode_errors_read,
1027 .poll = scmi_test_dbg_raw_mode_errors_poll,
1028 .llseek = no_llseek,
1029 .owner = THIS_MODULE,
1030};
1031
1032static struct scmi_raw_queue *
1033scmi_raw_queue_init(struct scmi_raw_mode_info *raw)
1034{
1035 int i;
1036 struct scmi_raw_buffer *rb;
1037 struct device *dev = raw->handle->dev;
1038 struct scmi_raw_queue *q;
1039
1040 q = devm_kzalloc(dev, size: sizeof(*q), GFP_KERNEL);
1041 if (!q)
1042 return ERR_PTR(error: -ENOMEM);
1043
1044 rb = devm_kcalloc(dev, n: raw->tx_max_msg, size: sizeof(*rb), GFP_KERNEL);
1045 if (!rb)
1046 return ERR_PTR(error: -ENOMEM);
1047
1048 spin_lock_init(&q->free_bufs_lock);
1049 INIT_LIST_HEAD(list: &q->free_bufs);
1050 for (i = 0; i < raw->tx_max_msg; i++, rb++) {
1051 rb->max_len = raw->desc->max_msg_size + sizeof(u32);
1052 rb->msg.buf = devm_kzalloc(dev, size: rb->max_len, GFP_KERNEL);
1053 if (!rb->msg.buf)
1054 return ERR_PTR(error: -ENOMEM);
1055 scmi_raw_buffer_put(q, rb);
1056 }
1057
1058 spin_lock_init(&q->msg_q_lock);
1059 INIT_LIST_HEAD(list: &q->msg_q);
1060 init_waitqueue_head(&q->wq);
1061
1062 return q;
1063}
1064
1065static int scmi_xfer_raw_worker_init(struct scmi_raw_mode_info *raw)
1066{
1067 int i;
1068 struct scmi_xfer_raw_waiter *rw;
1069 struct device *dev = raw->handle->dev;
1070
1071 rw = devm_kcalloc(dev, n: raw->tx_max_msg, size: sizeof(*rw), GFP_KERNEL);
1072 if (!rw)
1073 return -ENOMEM;
1074
1075 raw->wait_wq = alloc_workqueue(fmt: "scmi-raw-wait-wq-%d",
1076 flags: WQ_UNBOUND | WQ_FREEZABLE |
1077 WQ_HIGHPRI | WQ_SYSFS, max_active: 0, raw->id);
1078 if (!raw->wait_wq)
1079 return -ENOMEM;
1080
1081 mutex_init(&raw->free_mtx);
1082 INIT_LIST_HEAD(list: &raw->free_waiters);
1083 mutex_init(&raw->active_mtx);
1084 INIT_LIST_HEAD(list: &raw->active_waiters);
1085
1086 for (i = 0; i < raw->tx_max_msg; i++, rw++) {
1087 init_completion(x: &rw->async_response);
1088 scmi_xfer_raw_waiter_put(raw, rw);
1089 }
1090 INIT_WORK(&raw->waiters_work, scmi_xfer_raw_worker);
1091
1092 return 0;
1093}
1094
1095static int scmi_raw_mode_setup(struct scmi_raw_mode_info *raw,
1096 u8 *channels, int num_chans)
1097{
1098 int ret, idx;
1099 void *gid;
1100 struct device *dev = raw->handle->dev;
1101
1102 gid = devres_open_group(dev, NULL, GFP_KERNEL);
1103 if (!gid)
1104 return -ENOMEM;
1105
1106 for (idx = 0; idx < SCMI_RAW_MAX_QUEUE; idx++) {
1107 raw->q[idx] = scmi_raw_queue_init(raw);
1108 if (IS_ERR(ptr: raw->q[idx])) {
1109 ret = PTR_ERR(ptr: raw->q[idx]);
1110 goto err;
1111 }
1112 }
1113
1114 xa_init(xa: &raw->chans_q);
1115 if (num_chans > 1) {
1116 int i;
1117
1118 for (i = 0; i < num_chans; i++) {
1119 struct scmi_raw_queue *q;
1120
1121 q = scmi_raw_queue_init(raw);
1122 if (IS_ERR(ptr: q)) {
1123 ret = PTR_ERR(ptr: q);
1124 goto err_xa;
1125 }
1126
1127 ret = xa_insert(xa: &raw->chans_q, index: channels[i], entry: q,
1128 GFP_KERNEL);
1129 if (ret) {
1130 dev_err(dev,
1131 "Fail to allocate Raw queue 0x%02X\n",
1132 channels[i]);
1133 goto err_xa;
1134 }
1135 }
1136 }
1137
1138 ret = scmi_xfer_raw_worker_init(raw);
1139 if (ret)
1140 goto err_xa;
1141
1142 devres_close_group(dev, id: gid);
1143 raw->gid = gid;
1144
1145 return 0;
1146
1147err_xa:
1148 xa_destroy(&raw->chans_q);
1149err:
1150 devres_release_group(dev, id: gid);
1151 return ret;
1152}
1153
1154/**
1155 * scmi_raw_mode_init - Function to initialize the SCMI Raw stack
1156 *
1157 * @handle: Pointer to SCMI entity handle
1158 * @top_dentry: A reference to the top Raw debugfs dentry
1159 * @instance_id: The ID of the underlying SCMI platform instance represented by
1160 * this Raw instance
1161 * @channels: The list of the existing channels
1162 * @num_chans: The number of entries in @channels
1163 * @desc: Reference to the transport operations
1164 * @tx_max_msg: Max number of in-flight messages allowed by the transport
1165 *
1166 * This function prepare the SCMI Raw stack and creates the debugfs API.
1167 *
1168 * Return: An opaque handle to the Raw instance on Success, an ERR_PTR otherwise
1169 */
1170void *scmi_raw_mode_init(const struct scmi_handle *handle,
1171 struct dentry *top_dentry, int instance_id,
1172 u8 *channels, int num_chans,
1173 const struct scmi_desc *desc, int tx_max_msg)
1174{
1175 int ret;
1176 struct scmi_raw_mode_info *raw;
1177 struct device *dev;
1178
1179 if (!handle || !desc)
1180 return ERR_PTR(error: -EINVAL);
1181
1182 dev = handle->dev;
1183 raw = devm_kzalloc(dev, size: sizeof(*raw), GFP_KERNEL);
1184 if (!raw)
1185 return ERR_PTR(error: -ENOMEM);
1186
1187 raw->handle = handle;
1188 raw->desc = desc;
1189 raw->tx_max_msg = tx_max_msg;
1190 raw->id = instance_id;
1191
1192 ret = scmi_raw_mode_setup(raw, channels, num_chans);
1193 if (ret) {
1194 devm_kfree(dev, p: raw);
1195 return ERR_PTR(error: ret);
1196 }
1197
1198 raw->dentry = debugfs_create_dir(name: "raw", parent: top_dentry);
1199
1200 debugfs_create_file(name: "reset", mode: 0200, parent: raw->dentry, data: raw,
1201 fops: &scmi_dbg_raw_mode_reset_fops);
1202
1203 debugfs_create_file(name: "message", mode: 0600, parent: raw->dentry, data: raw,
1204 fops: &scmi_dbg_raw_mode_message_fops);
1205
1206 debugfs_create_file(name: "message_async", mode: 0600, parent: raw->dentry, data: raw,
1207 fops: &scmi_dbg_raw_mode_message_async_fops);
1208
1209 debugfs_create_file(name: "notification", mode: 0400, parent: raw->dentry, data: raw,
1210 fops: &scmi_dbg_raw_mode_notification_fops);
1211
1212 debugfs_create_file(name: "errors", mode: 0400, parent: raw->dentry, data: raw,
1213 fops: &scmi_dbg_raw_mode_errors_fops);
1214
1215 /*
1216 * Expose per-channel entries if multiple channels available.
1217 * Just ignore errors while setting up these interfaces since we
1218 * have anyway already a working core Raw support.
1219 */
1220 if (num_chans > 1) {
1221 int i;
1222 struct dentry *top_chans;
1223
1224 top_chans = debugfs_create_dir(name: "channels", parent: raw->dentry);
1225
1226 for (i = 0; i < num_chans; i++) {
1227 char cdir[8];
1228 struct dentry *chd;
1229
1230 snprintf(buf: cdir, size: 8, fmt: "0x%02X", channels[i]);
1231 chd = debugfs_create_dir(name: cdir, parent: top_chans);
1232
1233 debugfs_create_file(name: "message", mode: 0600, parent: chd, data: raw,
1234 fops: &scmi_dbg_raw_mode_message_fops);
1235
1236 debugfs_create_file(name: "message_async", mode: 0600, parent: chd, data: raw,
1237 fops: &scmi_dbg_raw_mode_message_async_fops);
1238 }
1239 }
1240
1241 dev_info(dev, "SCMI RAW Mode initialized for instance %d\n", raw->id);
1242
1243 return raw;
1244}
1245
1246/**
1247 * scmi_raw_mode_cleanup - Function to cleanup the SCMI Raw stack
1248 *
1249 * @r: An opaque handle to an initialized SCMI Raw instance
1250 */
1251void scmi_raw_mode_cleanup(void *r)
1252{
1253 struct scmi_raw_mode_info *raw = r;
1254
1255 if (!raw)
1256 return;
1257
1258 debugfs_remove_recursive(dentry: raw->dentry);
1259
1260 cancel_work_sync(work: &raw->waiters_work);
1261 destroy_workqueue(wq: raw->wait_wq);
1262 xa_destroy(&raw->chans_q);
1263}
1264
1265static int scmi_xfer_raw_collect(void *msg, size_t *msg_len,
1266 struct scmi_xfer *xfer)
1267{
1268 __le32 *m;
1269 size_t msg_size;
1270
1271 if (!xfer || !msg || !msg_len)
1272 return -EINVAL;
1273
1274 /* Account for hdr ...*/
1275 msg_size = xfer->rx.len + sizeof(u32);
1276 /* ... and status if needed */
1277 if (xfer->hdr.type != MSG_TYPE_NOTIFICATION)
1278 msg_size += sizeof(u32);
1279
1280 if (msg_size > *msg_len)
1281 return -ENOSPC;
1282
1283 m = msg;
1284 *m = cpu_to_le32(pack_scmi_header(&xfer->hdr));
1285 if (xfer->hdr.type != MSG_TYPE_NOTIFICATION)
1286 *++m = cpu_to_le32(xfer->hdr.status);
1287
1288 memcpy(++m, xfer->rx.buf, xfer->rx.len);
1289
1290 *msg_len = msg_size;
1291
1292 return 0;
1293}
1294
1295/**
1296 * scmi_raw_message_report - Helper to report back valid reponses/notifications
1297 * to raw message requests.
1298 *
1299 * @r: An opaque reference to the raw instance configuration
1300 * @xfer: The xfer containing the message to be reported
1301 * @idx: The index of the queue.
1302 * @chan_id: The channel ID to use.
1303 *
1304 * If Raw mode is enabled, this is called from the SCMI core on the regular RX
1305 * path to save and enqueue the response/notification payload carried by this
1306 * xfer into a dedicated scmi_raw_buffer for later consumption by the user.
1307 *
1308 * This way the caller can free the related xfer immediately afterwards and the
1309 * user can read back the raw message payload at its own pace (if ever) without
1310 * holding an xfer for too long.
1311 */
1312void scmi_raw_message_report(void *r, struct scmi_xfer *xfer,
1313 unsigned int idx, unsigned int chan_id)
1314{
1315 int ret;
1316 unsigned long flags;
1317 struct scmi_raw_buffer *rb;
1318 struct device *dev;
1319 struct scmi_raw_queue *q;
1320 struct scmi_raw_mode_info *raw = r;
1321
1322 if (!raw || (idx == SCMI_RAW_REPLY_QUEUE && !SCMI_XFER_IS_RAW(xfer)))
1323 return;
1324
1325 dev = raw->handle->dev;
1326 q = scmi_raw_queue_select(raw, idx,
1327 SCMI_XFER_IS_CHAN_SET(xfer) ? chan_id : 0);
1328 if (!q) {
1329 dev_warn(dev,
1330 "RAW[%d] - NO queue for chan 0x%X. Dropping report.\n",
1331 idx, chan_id);
1332 return;
1333 }
1334
1335 /*
1336 * Grab the msg_q_lock upfront to avoid a possible race between
1337 * realizing the free list was empty and effectively picking the next
1338 * buffer to use from the oldest one enqueued and still unread on this
1339 * msg_q.
1340 *
1341 * Note that nowhere else these locks are taken together, so no risk of
1342 * deadlocks du eto inversion.
1343 */
1344 spin_lock_irqsave(&q->msg_q_lock, flags);
1345 rb = scmi_raw_buffer_get(q);
1346 if (!rb) {
1347 /*
1348 * Immediate and delayed replies to previously injected Raw
1349 * commands MUST be read back from userspace to free the buffers:
1350 * if this is not happening something is seriously broken and
1351 * must be fixed at the application level: complain loudly.
1352 */
1353 if (idx == SCMI_RAW_REPLY_QUEUE) {
1354 spin_unlock_irqrestore(lock: &q->msg_q_lock, flags);
1355 dev_warn(dev,
1356 "RAW[%d] - Buffers exhausted. Dropping report.\n",
1357 idx);
1358 return;
1359 }
1360
1361 /*
1362 * Notifications and errors queues are instead handled in a
1363 * circular manner: unread old buffers are just overwritten by
1364 * newer ones.
1365 *
1366 * The main reason for this is that notifications originated
1367 * by Raw requests cannot be distinguished from normal ones, so
1368 * your Raw buffers queues risk to be flooded and depleted by
1369 * notifications if you left it mistakenly enabled or when in
1370 * coexistence mode.
1371 */
1372 rb = scmi_raw_buffer_dequeue_unlocked(q);
1373 if (WARN_ON(!rb)) {
1374 spin_unlock_irqrestore(lock: &q->msg_q_lock, flags);
1375 return;
1376 }
1377
1378 /* Reset to full buffer length */
1379 rb->msg.len = rb->max_len;
1380
1381 dev_warn_once(dev,
1382 "RAW[%d] - Buffers exhausted. Re-using oldest.\n",
1383 idx);
1384 }
1385 spin_unlock_irqrestore(lock: &q->msg_q_lock, flags);
1386
1387 ret = scmi_xfer_raw_collect(msg: rb->msg.buf, msg_len: &rb->msg.len, xfer);
1388 if (ret) {
1389 dev_warn(dev, "RAW - Cannot collect xfer into buffer !\n");
1390 scmi_raw_buffer_put(q, rb);
1391 return;
1392 }
1393
1394 scmi_raw_buffer_enqueue(q, rb);
1395}
1396
1397static void scmi_xfer_raw_fill(struct scmi_raw_mode_info *raw,
1398 struct scmi_chan_info *cinfo,
1399 struct scmi_xfer *xfer, u32 msg_hdr)
1400{
1401 /* Unpack received HDR as it is */
1402 unpack_scmi_header(msg_hdr, hdr: &xfer->hdr);
1403 xfer->hdr.seq = MSG_XTRACT_TOKEN(msg_hdr);
1404
1405 memset(xfer->rx.buf, 0x00, xfer->rx.len);
1406
1407 raw->desc->ops->fetch_response(cinfo, xfer);
1408}
1409
1410/**
1411 * scmi_raw_error_report - Helper to report back timed-out or generally
1412 * unexpected replies.
1413 *
1414 * @r: An opaque reference to the raw instance configuration
1415 * @cinfo: A reference to the channel to use to retrieve the broken xfer
1416 * @msg_hdr: The SCMI message header of the message to fetch and report
1417 * @priv: Any private data related to the xfer.
1418 *
1419 * If Raw mode is enabled, this is called from the SCMI core on the RX path in
1420 * case of errors to save and enqueue the bad message payload carried by the
1421 * message that has just been received.
1422 *
1423 * Note that we have to manually fetch any available payload into a temporary
1424 * xfer to be able to save and enqueue the message, since the regular RX error
1425 * path which had called this would have not fetched the message payload having
1426 * classified it as an error.
1427 */
1428void scmi_raw_error_report(void *r, struct scmi_chan_info *cinfo,
1429 u32 msg_hdr, void *priv)
1430{
1431 struct scmi_xfer xfer;
1432 struct scmi_raw_mode_info *raw = r;
1433
1434 if (!raw)
1435 return;
1436
1437 xfer.rx.len = raw->desc->max_msg_size;
1438 xfer.rx.buf = kzalloc(size: xfer.rx.len, GFP_ATOMIC);
1439 if (!xfer.rx.buf) {
1440 dev_info(raw->handle->dev,
1441 "Cannot report Raw error for HDR:0x%X - ENOMEM\n",
1442 msg_hdr);
1443 return;
1444 }
1445
1446 /* Any transport-provided priv must be passed back down to transport */
1447 if (priv)
1448 /* Ensure priv is visible */
1449 smp_store_mb(xfer.priv, priv);
1450
1451 scmi_xfer_raw_fill(raw, cinfo, xfer: &xfer, msg_hdr);
1452 scmi_raw_message_report(r: raw, xfer: &xfer, idx: SCMI_RAW_ERRS_QUEUE, chan_id: 0);
1453
1454 kfree(objp: xfer.rx.buf);
1455}
1456

source code of linux/drivers/firmware/arm_scmi/raw_mode.c