1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* Copyright (C) 2023 Intel Corporation */ |
3 | |
4 | #include "idpf_controlq.h" |
5 | |
6 | /** |
7 | * idpf_ctlq_setup_regs - initialize control queue registers |
8 | * @cq: pointer to the specific control queue |
9 | * @q_create_info: structs containing info for each queue to be initialized |
10 | */ |
11 | static void idpf_ctlq_setup_regs(struct idpf_ctlq_info *cq, |
12 | struct idpf_ctlq_create_info *q_create_info) |
13 | { |
14 | /* set control queue registers in our local struct */ |
15 | cq->reg.head = q_create_info->reg.head; |
16 | cq->reg.tail = q_create_info->reg.tail; |
17 | cq->reg.len = q_create_info->reg.len; |
18 | cq->reg.bah = q_create_info->reg.bah; |
19 | cq->reg.bal = q_create_info->reg.bal; |
20 | cq->reg.len_mask = q_create_info->reg.len_mask; |
21 | cq->reg.len_ena_mask = q_create_info->reg.len_ena_mask; |
22 | cq->reg.head_mask = q_create_info->reg.head_mask; |
23 | } |
24 | |
25 | /** |
26 | * idpf_ctlq_init_regs - Initialize control queue registers |
27 | * @hw: pointer to hw struct |
28 | * @cq: pointer to the specific Control queue |
29 | * @is_rxq: true if receive control queue, false otherwise |
30 | * |
31 | * Initialize registers. The caller is expected to have already initialized the |
32 | * descriptor ring memory and buffer memory |
33 | */ |
34 | static void idpf_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, |
35 | bool is_rxq) |
36 | { |
37 | /* Update tail to post pre-allocated buffers for rx queues */ |
38 | if (is_rxq) |
39 | wr32(hw, cq->reg.tail, (u32)(cq->ring_size - 1)); |
40 | |
41 | /* For non-Mailbox control queues only TAIL need to be set */ |
42 | if (cq->q_id != -1) |
43 | return; |
44 | |
45 | /* Clear Head for both send or receive */ |
46 | wr32(hw, cq->reg.head, 0); |
47 | |
48 | /* set starting point */ |
49 | wr32(hw, cq->reg.bal, lower_32_bits(cq->desc_ring.pa)); |
50 | wr32(hw, cq->reg.bah, upper_32_bits(cq->desc_ring.pa)); |
51 | wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask)); |
52 | } |
53 | |
54 | /** |
55 | * idpf_ctlq_init_rxq_bufs - populate receive queue descriptors with buf |
56 | * @cq: pointer to the specific Control queue |
57 | * |
58 | * Record the address of the receive queue DMA buffers in the descriptors. |
59 | * The buffers must have been previously allocated. |
60 | */ |
61 | static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq) |
62 | { |
63 | int i; |
64 | |
65 | for (i = 0; i < cq->ring_size; i++) { |
66 | struct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i); |
67 | struct idpf_dma_mem *bi = cq->bi.rx_buff[i]; |
68 | |
69 | /* No buffer to post to descriptor, continue */ |
70 | if (!bi) |
71 | continue; |
72 | |
73 | desc->flags = |
74 | cpu_to_le16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD); |
75 | desc->opcode = 0; |
76 | desc->datalen = cpu_to_le16(bi->size); |
77 | desc->ret_val = 0; |
78 | desc->v_opcode_dtype = 0; |
79 | desc->v_retval = 0; |
80 | desc->params.indirect.addr_high = |
81 | cpu_to_le32(upper_32_bits(bi->pa)); |
82 | desc->params.indirect.addr_low = |
83 | cpu_to_le32(lower_32_bits(bi->pa)); |
84 | desc->params.indirect.param0 = 0; |
85 | desc->params.indirect.sw_cookie = 0; |
86 | desc->params.indirect.v_flags = 0; |
87 | } |
88 | } |
89 | |
90 | /** |
91 | * idpf_ctlq_shutdown - shutdown the CQ |
92 | * @hw: pointer to hw struct |
93 | * @cq: pointer to the specific Control queue |
94 | * |
95 | * The main shutdown routine for any controq queue |
96 | */ |
97 | static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq) |
98 | { |
99 | mutex_lock(&cq->cq_lock); |
100 | |
101 | /* free ring buffers and the ring itself */ |
102 | idpf_ctlq_dealloc_ring_res(hw, cq); |
103 | |
104 | /* Set ring_size to 0 to indicate uninitialized queue */ |
105 | cq->ring_size = 0; |
106 | |
107 | mutex_unlock(lock: &cq->cq_lock); |
108 | mutex_destroy(lock: &cq->cq_lock); |
109 | } |
110 | |
111 | /** |
112 | * idpf_ctlq_add - add one control queue |
113 | * @hw: pointer to hardware struct |
114 | * @qinfo: info for queue to be created |
115 | * @cq_out: (output) double pointer to control queue to be created |
116 | * |
117 | * Allocate and initialize a control queue and add it to the control queue list. |
118 | * The cq parameter will be allocated/initialized and passed back to the caller |
119 | * if no errors occur. |
120 | * |
121 | * Note: idpf_ctlq_init must be called prior to any calls to idpf_ctlq_add |
122 | */ |
123 | int idpf_ctlq_add(struct idpf_hw *hw, |
124 | struct idpf_ctlq_create_info *qinfo, |
125 | struct idpf_ctlq_info **cq_out) |
126 | { |
127 | struct idpf_ctlq_info *cq; |
128 | bool is_rxq = false; |
129 | int err; |
130 | |
131 | cq = kzalloc(size: sizeof(*cq), GFP_KERNEL); |
132 | if (!cq) |
133 | return -ENOMEM; |
134 | |
135 | cq->cq_type = qinfo->type; |
136 | cq->q_id = qinfo->id; |
137 | cq->buf_size = qinfo->buf_size; |
138 | cq->ring_size = qinfo->len; |
139 | |
140 | cq->next_to_use = 0; |
141 | cq->next_to_clean = 0; |
142 | cq->next_to_post = cq->ring_size - 1; |
143 | |
144 | switch (qinfo->type) { |
145 | case IDPF_CTLQ_TYPE_MAILBOX_RX: |
146 | is_rxq = true; |
147 | fallthrough; |
148 | case IDPF_CTLQ_TYPE_MAILBOX_TX: |
149 | err = idpf_ctlq_alloc_ring_res(hw, cq); |
150 | break; |
151 | default: |
152 | err = -EBADR; |
153 | break; |
154 | } |
155 | |
156 | if (err) |
157 | goto init_free_q; |
158 | |
159 | if (is_rxq) { |
160 | idpf_ctlq_init_rxq_bufs(cq); |
161 | } else { |
162 | /* Allocate the array of msg pointers for TX queues */ |
163 | cq->bi.tx_msg = kcalloc(n: qinfo->len, |
164 | size: sizeof(struct idpf_ctlq_msg *), |
165 | GFP_KERNEL); |
166 | if (!cq->bi.tx_msg) { |
167 | err = -ENOMEM; |
168 | goto init_dealloc_q_mem; |
169 | } |
170 | } |
171 | |
172 | idpf_ctlq_setup_regs(cq, q_create_info: qinfo); |
173 | |
174 | idpf_ctlq_init_regs(hw, cq, is_rxq); |
175 | |
176 | mutex_init(&cq->cq_lock); |
177 | |
178 | list_add(new: &cq->cq_list, head: &hw->cq_list_head); |
179 | |
180 | *cq_out = cq; |
181 | |
182 | return 0; |
183 | |
184 | init_dealloc_q_mem: |
185 | /* free ring buffers and the ring itself */ |
186 | idpf_ctlq_dealloc_ring_res(hw, cq); |
187 | init_free_q: |
188 | kfree(objp: cq); |
189 | |
190 | return err; |
191 | } |
192 | |
193 | /** |
194 | * idpf_ctlq_remove - deallocate and remove specified control queue |
195 | * @hw: pointer to hardware struct |
196 | * @cq: pointer to control queue to be removed |
197 | */ |
198 | void idpf_ctlq_remove(struct idpf_hw *hw, |
199 | struct idpf_ctlq_info *cq) |
200 | { |
201 | list_del(entry: &cq->cq_list); |
202 | idpf_ctlq_shutdown(hw, cq); |
203 | kfree(objp: cq); |
204 | } |
205 | |
206 | /** |
207 | * idpf_ctlq_init - main initialization routine for all control queues |
208 | * @hw: pointer to hardware struct |
209 | * @num_q: number of queues to initialize |
210 | * @q_info: array of structs containing info for each queue to be initialized |
211 | * |
212 | * This initializes any number and any type of control queues. This is an all |
213 | * or nothing routine; if one fails, all previously allocated queues will be |
214 | * destroyed. This must be called prior to using the individual add/remove |
215 | * APIs. |
216 | */ |
217 | int idpf_ctlq_init(struct idpf_hw *hw, u8 num_q, |
218 | struct idpf_ctlq_create_info *q_info) |
219 | { |
220 | struct idpf_ctlq_info *cq, *tmp; |
221 | int err; |
222 | int i; |
223 | |
224 | INIT_LIST_HEAD(list: &hw->cq_list_head); |
225 | |
226 | for (i = 0; i < num_q; i++) { |
227 | struct idpf_ctlq_create_info *qinfo = q_info + i; |
228 | |
229 | err = idpf_ctlq_add(hw, qinfo, cq_out: &cq); |
230 | if (err) |
231 | goto init_destroy_qs; |
232 | } |
233 | |
234 | return 0; |
235 | |
236 | init_destroy_qs: |
237 | list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list) |
238 | idpf_ctlq_remove(hw, cq); |
239 | |
240 | return err; |
241 | } |
242 | |
243 | /** |
244 | * idpf_ctlq_deinit - destroy all control queues |
245 | * @hw: pointer to hw struct |
246 | */ |
247 | void idpf_ctlq_deinit(struct idpf_hw *hw) |
248 | { |
249 | struct idpf_ctlq_info *cq, *tmp; |
250 | |
251 | list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list) |
252 | idpf_ctlq_remove(hw, cq); |
253 | } |
254 | |
255 | /** |
256 | * idpf_ctlq_send - send command to Control Queue (CTQ) |
257 | * @hw: pointer to hw struct |
258 | * @cq: handle to control queue struct to send on |
259 | * @num_q_msg: number of messages to send on control queue |
260 | * @q_msg: pointer to array of queue messages to be sent |
261 | * |
262 | * The caller is expected to allocate DMAable buffers and pass them to the |
263 | * send routine via the q_msg struct / control queue specific data struct. |
264 | * The control queue will hold a reference to each send message until |
265 | * the completion for that message has been cleaned. |
266 | */ |
267 | int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq, |
268 | u16 num_q_msg, struct idpf_ctlq_msg q_msg[]) |
269 | { |
270 | struct idpf_ctlq_desc *desc; |
271 | int num_desc_avail; |
272 | int err = 0; |
273 | int i; |
274 | |
275 | mutex_lock(&cq->cq_lock); |
276 | |
277 | /* Ensure there are enough descriptors to send all messages */ |
278 | num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq); |
279 | if (num_desc_avail == 0 || num_desc_avail < num_q_msg) { |
280 | err = -ENOSPC; |
281 | goto err_unlock; |
282 | } |
283 | |
284 | for (i = 0; i < num_q_msg; i++) { |
285 | struct idpf_ctlq_msg *msg = &q_msg[i]; |
286 | |
287 | desc = IDPF_CTLQ_DESC(cq, cq->next_to_use); |
288 | |
289 | desc->opcode = cpu_to_le16(msg->opcode); |
290 | desc->pfid_vfid = cpu_to_le16(msg->func_id); |
291 | |
292 | desc->v_opcode_dtype = cpu_to_le32(msg->cookie.mbx.chnl_opcode); |
293 | desc->v_retval = cpu_to_le32(msg->cookie.mbx.chnl_retval); |
294 | |
295 | desc->flags = cpu_to_le16((msg->host_id & IDPF_HOST_ID_MASK) << |
296 | IDPF_CTLQ_FLAG_HOST_ID_S); |
297 | if (msg->data_len) { |
298 | struct idpf_dma_mem *buff = msg->ctx.indirect.payload; |
299 | |
300 | desc->datalen |= cpu_to_le16(msg->data_len); |
301 | desc->flags |= cpu_to_le16(IDPF_CTLQ_FLAG_BUF); |
302 | desc->flags |= cpu_to_le16(IDPF_CTLQ_FLAG_RD); |
303 | |
304 | /* Update the address values in the desc with the pa |
305 | * value for respective buffer |
306 | */ |
307 | desc->params.indirect.addr_high = |
308 | cpu_to_le32(upper_32_bits(buff->pa)); |
309 | desc->params.indirect.addr_low = |
310 | cpu_to_le32(lower_32_bits(buff->pa)); |
311 | |
312 | memcpy(&desc->params, msg->ctx.indirect.context, |
313 | IDPF_INDIRECT_CTX_SIZE); |
314 | } else { |
315 | memcpy(&desc->params, msg->ctx.direct, |
316 | IDPF_DIRECT_CTX_SIZE); |
317 | } |
318 | |
319 | /* Store buffer info */ |
320 | cq->bi.tx_msg[cq->next_to_use] = msg; |
321 | |
322 | (cq->next_to_use)++; |
323 | if (cq->next_to_use == cq->ring_size) |
324 | cq->next_to_use = 0; |
325 | } |
326 | |
327 | /* Force memory write to complete before letting hardware |
328 | * know that there are new descriptors to fetch. |
329 | */ |
330 | dma_wmb(); |
331 | |
332 | wr32(hw, cq->reg.tail, cq->next_to_use); |
333 | |
334 | err_unlock: |
335 | mutex_unlock(lock: &cq->cq_lock); |
336 | |
337 | return err; |
338 | } |
339 | |
340 | /** |
341 | * idpf_ctlq_clean_sq - reclaim send descriptors on HW write back for the |
342 | * requested queue |
343 | * @cq: pointer to the specific Control queue |
344 | * @clean_count: (input|output) number of descriptors to clean as input, and |
345 | * number of descriptors actually cleaned as output |
346 | * @msg_status: (output) pointer to msg pointer array to be populated; needs |
347 | * to be allocated by caller |
348 | * |
349 | * Returns an array of message pointers associated with the cleaned |
350 | * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned |
351 | * descriptors. The status will be returned for each; any messages that failed |
352 | * to send will have a non-zero status. The caller is expected to free original |
353 | * ctlq_msgs and free or reuse the DMA buffers. |
354 | */ |
355 | int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count, |
356 | struct idpf_ctlq_msg *msg_status[]) |
357 | { |
358 | struct idpf_ctlq_desc *desc; |
359 | u16 i, num_to_clean; |
360 | u16 ntc, desc_err; |
361 | |
362 | if (*clean_count == 0) |
363 | return 0; |
364 | if (*clean_count > cq->ring_size) |
365 | return -EBADR; |
366 | |
367 | mutex_lock(&cq->cq_lock); |
368 | |
369 | ntc = cq->next_to_clean; |
370 | |
371 | num_to_clean = *clean_count; |
372 | |
373 | for (i = 0; i < num_to_clean; i++) { |
374 | /* Fetch next descriptor and check if marked as done */ |
375 | desc = IDPF_CTLQ_DESC(cq, ntc); |
376 | if (!(le16_to_cpu(desc->flags) & IDPF_CTLQ_FLAG_DD)) |
377 | break; |
378 | |
379 | /* strip off FW internal code */ |
380 | desc_err = le16_to_cpu(desc->ret_val) & 0xff; |
381 | |
382 | msg_status[i] = cq->bi.tx_msg[ntc]; |
383 | msg_status[i]->status = desc_err; |
384 | |
385 | cq->bi.tx_msg[ntc] = NULL; |
386 | |
387 | /* Zero out any stale data */ |
388 | memset(desc, 0, sizeof(*desc)); |
389 | |
390 | ntc++; |
391 | if (ntc == cq->ring_size) |
392 | ntc = 0; |
393 | } |
394 | |
395 | cq->next_to_clean = ntc; |
396 | |
397 | mutex_unlock(lock: &cq->cq_lock); |
398 | |
399 | /* Return number of descriptors actually cleaned */ |
400 | *clean_count = i; |
401 | |
402 | return 0; |
403 | } |
404 | |
405 | /** |
406 | * idpf_ctlq_post_rx_buffs - post buffers to descriptor ring |
407 | * @hw: pointer to hw struct |
408 | * @cq: pointer to control queue handle |
409 | * @buff_count: (input|output) input is number of buffers caller is trying to |
410 | * return; output is number of buffers that were not posted |
411 | * @buffs: array of pointers to dma mem structs to be given to hardware |
412 | * |
413 | * Caller uses this function to return DMA buffers to the descriptor ring after |
414 | * consuming them; buff_count will be the number of buffers. |
415 | * |
416 | * Note: this function needs to be called after a receive call even |
417 | * if there are no DMA buffers to be returned, i.e. buff_count = 0, |
418 | * buffs = NULL to support direct commands |
419 | */ |
420 | int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, |
421 | u16 *buff_count, struct idpf_dma_mem **buffs) |
422 | { |
423 | struct idpf_ctlq_desc *desc; |
424 | u16 ntp = cq->next_to_post; |
425 | bool buffs_avail = false; |
426 | u16 tbp = ntp + 1; |
427 | int i = 0; |
428 | |
429 | if (*buff_count > cq->ring_size) |
430 | return -EBADR; |
431 | |
432 | if (*buff_count > 0) |
433 | buffs_avail = true; |
434 | |
435 | mutex_lock(&cq->cq_lock); |
436 | |
437 | if (tbp >= cq->ring_size) |
438 | tbp = 0; |
439 | |
440 | if (tbp == cq->next_to_clean) |
441 | /* Nothing to do */ |
442 | goto post_buffs_out; |
443 | |
444 | /* Post buffers for as many as provided or up until the last one used */ |
445 | while (ntp != cq->next_to_clean) { |
446 | desc = IDPF_CTLQ_DESC(cq, ntp); |
447 | |
448 | if (cq->bi.rx_buff[ntp]) |
449 | goto fill_desc; |
450 | if (!buffs_avail) { |
451 | /* If the caller hasn't given us any buffers or |
452 | * there are none left, search the ring itself |
453 | * for an available buffer to move to this |
454 | * entry starting at the next entry in the ring |
455 | */ |
456 | tbp = ntp + 1; |
457 | |
458 | /* Wrap ring if necessary */ |
459 | if (tbp >= cq->ring_size) |
460 | tbp = 0; |
461 | |
462 | while (tbp != cq->next_to_clean) { |
463 | if (cq->bi.rx_buff[tbp]) { |
464 | cq->bi.rx_buff[ntp] = |
465 | cq->bi.rx_buff[tbp]; |
466 | cq->bi.rx_buff[tbp] = NULL; |
467 | |
468 | /* Found a buffer, no need to |
469 | * search anymore |
470 | */ |
471 | break; |
472 | } |
473 | |
474 | /* Wrap ring if necessary */ |
475 | tbp++; |
476 | if (tbp >= cq->ring_size) |
477 | tbp = 0; |
478 | } |
479 | |
480 | if (tbp == cq->next_to_clean) |
481 | goto post_buffs_out; |
482 | } else { |
483 | /* Give back pointer to DMA buffer */ |
484 | cq->bi.rx_buff[ntp] = buffs[i]; |
485 | i++; |
486 | |
487 | if (i >= *buff_count) |
488 | buffs_avail = false; |
489 | } |
490 | |
491 | fill_desc: |
492 | desc->flags = |
493 | cpu_to_le16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD); |
494 | |
495 | /* Post buffers to descriptor */ |
496 | desc->datalen = cpu_to_le16(cq->bi.rx_buff[ntp]->size); |
497 | desc->params.indirect.addr_high = |
498 | cpu_to_le32(upper_32_bits(cq->bi.rx_buff[ntp]->pa)); |
499 | desc->params.indirect.addr_low = |
500 | cpu_to_le32(lower_32_bits(cq->bi.rx_buff[ntp]->pa)); |
501 | |
502 | ntp++; |
503 | if (ntp == cq->ring_size) |
504 | ntp = 0; |
505 | } |
506 | |
507 | post_buffs_out: |
508 | /* Only update tail if buffers were actually posted */ |
509 | if (cq->next_to_post != ntp) { |
510 | if (ntp) |
511 | /* Update next_to_post to ntp - 1 since current ntp |
512 | * will not have a buffer |
513 | */ |
514 | cq->next_to_post = ntp - 1; |
515 | else |
516 | /* Wrap to end of end ring since current ntp is 0 */ |
517 | cq->next_to_post = cq->ring_size - 1; |
518 | |
519 | dma_wmb(); |
520 | |
521 | wr32(hw, cq->reg.tail, cq->next_to_post); |
522 | } |
523 | |
524 | mutex_unlock(lock: &cq->cq_lock); |
525 | |
526 | /* return the number of buffers that were not posted */ |
527 | *buff_count = *buff_count - i; |
528 | |
529 | return 0; |
530 | } |
531 | |
532 | /** |
533 | * idpf_ctlq_recv - receive control queue message call back |
534 | * @cq: pointer to control queue handle to receive on |
535 | * @num_q_msg: (input|output) input number of messages that should be received; |
536 | * output number of messages actually received |
537 | * @q_msg: (output) array of received control queue messages on this q; |
538 | * needs to be pre-allocated by caller for as many messages as requested |
539 | * |
540 | * Called by interrupt handler or polling mechanism. Caller is expected |
541 | * to free buffers |
542 | */ |
543 | int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg, |
544 | struct idpf_ctlq_msg *q_msg) |
545 | { |
546 | u16 num_to_clean, ntc, flags; |
547 | struct idpf_ctlq_desc *desc; |
548 | int err = 0; |
549 | u16 i; |
550 | |
551 | /* take the lock before we start messing with the ring */ |
552 | mutex_lock(&cq->cq_lock); |
553 | |
554 | ntc = cq->next_to_clean; |
555 | |
556 | num_to_clean = *num_q_msg; |
557 | |
558 | for (i = 0; i < num_to_clean; i++) { |
559 | /* Fetch next descriptor and check if marked as done */ |
560 | desc = IDPF_CTLQ_DESC(cq, ntc); |
561 | flags = le16_to_cpu(desc->flags); |
562 | |
563 | if (!(flags & IDPF_CTLQ_FLAG_DD)) |
564 | break; |
565 | |
566 | q_msg[i].vmvf_type = (flags & |
567 | (IDPF_CTLQ_FLAG_FTYPE_VM | |
568 | IDPF_CTLQ_FLAG_FTYPE_PF)) >> |
569 | IDPF_CTLQ_FLAG_FTYPE_S; |
570 | |
571 | if (flags & IDPF_CTLQ_FLAG_ERR) |
572 | err = -EBADMSG; |
573 | |
574 | q_msg[i].cookie.mbx.chnl_opcode = |
575 | le32_to_cpu(desc->v_opcode_dtype); |
576 | q_msg[i].cookie.mbx.chnl_retval = |
577 | le32_to_cpu(desc->v_retval); |
578 | |
579 | q_msg[i].opcode = le16_to_cpu(desc->opcode); |
580 | q_msg[i].data_len = le16_to_cpu(desc->datalen); |
581 | q_msg[i].status = le16_to_cpu(desc->ret_val); |
582 | |
583 | if (desc->datalen) { |
584 | memcpy(q_msg[i].ctx.indirect.context, |
585 | &desc->params.indirect, IDPF_INDIRECT_CTX_SIZE); |
586 | |
587 | /* Assign pointer to dma buffer to ctlq_msg array |
588 | * to be given to upper layer |
589 | */ |
590 | q_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc]; |
591 | |
592 | /* Zero out pointer to DMA buffer info; |
593 | * will be repopulated by post buffers API |
594 | */ |
595 | cq->bi.rx_buff[ntc] = NULL; |
596 | } else { |
597 | memcpy(q_msg[i].ctx.direct, desc->params.raw, |
598 | IDPF_DIRECT_CTX_SIZE); |
599 | } |
600 | |
601 | /* Zero out stale data in descriptor */ |
602 | memset(desc, 0, sizeof(struct idpf_ctlq_desc)); |
603 | |
604 | ntc++; |
605 | if (ntc == cq->ring_size) |
606 | ntc = 0; |
607 | } |
608 | |
609 | cq->next_to_clean = ntc; |
610 | |
611 | mutex_unlock(lock: &cq->cq_lock); |
612 | |
613 | *num_q_msg = i; |
614 | if (*num_q_msg == 0) |
615 | err = -ENOMSG; |
616 | |
617 | return err; |
618 | } |
619 | |