1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright(c) 2013 - 2018 Intel Corporation. */ |
3 | |
4 | #include "iavf_status.h" |
5 | #include "iavf_type.h" |
6 | #include "iavf_register.h" |
7 | #include "iavf_adminq.h" |
8 | #include "iavf_prototype.h" |
9 | |
10 | /** |
11 | * iavf_adminq_init_regs - Initialize AdminQ registers |
12 | * @hw: pointer to the hardware structure |
13 | * |
14 | * This assumes the alloc_asq and alloc_arq functions have already been called |
15 | **/ |
16 | static void iavf_adminq_init_regs(struct iavf_hw *hw) |
17 | { |
18 | /* set head and tail registers in our local struct */ |
19 | hw->aq.asq.tail = IAVF_VF_ATQT1; |
20 | hw->aq.asq.head = IAVF_VF_ATQH1; |
21 | hw->aq.asq.len = IAVF_VF_ATQLEN1; |
22 | hw->aq.asq.bal = IAVF_VF_ATQBAL1; |
23 | hw->aq.asq.bah = IAVF_VF_ATQBAH1; |
24 | hw->aq.arq.tail = IAVF_VF_ARQT1; |
25 | hw->aq.arq.head = IAVF_VF_ARQH1; |
26 | hw->aq.arq.len = IAVF_VF_ARQLEN1; |
27 | hw->aq.arq.bal = IAVF_VF_ARQBAL1; |
28 | hw->aq.arq.bah = IAVF_VF_ARQBAH1; |
29 | } |
30 | |
31 | /** |
32 | * iavf_alloc_adminq_asq_ring - Allocate Admin Queue send rings |
33 | * @hw: pointer to the hardware structure |
34 | **/ |
35 | static enum iavf_status iavf_alloc_adminq_asq_ring(struct iavf_hw *hw) |
36 | { |
37 | enum iavf_status ret_code; |
38 | |
39 | ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf, |
40 | iavf_mem_atq_ring, |
41 | (hw->aq.num_asq_entries * |
42 | sizeof(struct iavf_aq_desc)), |
43 | IAVF_ADMINQ_DESC_ALIGNMENT); |
44 | if (ret_code) |
45 | return ret_code; |
46 | |
47 | ret_code = iavf_allocate_virt_mem(hw, mem: &hw->aq.asq.cmd_buf, |
48 | size: (hw->aq.num_asq_entries * |
49 | sizeof(struct iavf_asq_cmd_details))); |
50 | if (ret_code) { |
51 | iavf_free_dma_mem(hw, mem: &hw->aq.asq.desc_buf); |
52 | return ret_code; |
53 | } |
54 | |
55 | return ret_code; |
56 | } |
57 | |
58 | /** |
59 | * iavf_alloc_adminq_arq_ring - Allocate Admin Queue receive rings |
60 | * @hw: pointer to the hardware structure |
61 | **/ |
62 | static enum iavf_status iavf_alloc_adminq_arq_ring(struct iavf_hw *hw) |
63 | { |
64 | enum iavf_status ret_code; |
65 | |
66 | ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf, |
67 | iavf_mem_arq_ring, |
68 | (hw->aq.num_arq_entries * |
69 | sizeof(struct iavf_aq_desc)), |
70 | IAVF_ADMINQ_DESC_ALIGNMENT); |
71 | |
72 | return ret_code; |
73 | } |
74 | |
75 | /** |
76 | * iavf_free_adminq_asq - Free Admin Queue send rings |
77 | * @hw: pointer to the hardware structure |
78 | * |
79 | * This assumes the posted send buffers have already been cleaned |
80 | * and de-allocated |
81 | **/ |
82 | static void iavf_free_adminq_asq(struct iavf_hw *hw) |
83 | { |
84 | iavf_free_dma_mem(hw, mem: &hw->aq.asq.desc_buf); |
85 | } |
86 | |
87 | /** |
88 | * iavf_free_adminq_arq - Free Admin Queue receive rings |
89 | * @hw: pointer to the hardware structure |
90 | * |
91 | * This assumes the posted receive buffers have already been cleaned |
92 | * and de-allocated |
93 | **/ |
94 | static void iavf_free_adminq_arq(struct iavf_hw *hw) |
95 | { |
96 | iavf_free_dma_mem(hw, mem: &hw->aq.arq.desc_buf); |
97 | } |
98 | |
99 | /** |
100 | * iavf_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue |
101 | * @hw: pointer to the hardware structure |
102 | **/ |
103 | static enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw) |
104 | { |
105 | struct iavf_aq_desc *desc; |
106 | struct iavf_dma_mem *bi; |
107 | enum iavf_status ret_code; |
108 | int i; |
109 | |
110 | /* We'll be allocating the buffer info memory first, then we can |
111 | * allocate the mapped buffers for the event processing |
112 | */ |
113 | |
114 | /* buffer_info structures do not need alignment */ |
115 | ret_code = iavf_allocate_virt_mem(hw, mem: &hw->aq.arq.dma_head, |
116 | size: (hw->aq.num_arq_entries * |
117 | sizeof(struct iavf_dma_mem))); |
118 | if (ret_code) |
119 | goto alloc_arq_bufs; |
120 | hw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va; |
121 | |
122 | /* allocate the mapped buffers */ |
123 | for (i = 0; i < hw->aq.num_arq_entries; i++) { |
124 | bi = &hw->aq.arq.r.arq_bi[i]; |
125 | ret_code = iavf_allocate_dma_mem(hw, bi, |
126 | iavf_mem_arq_buf, |
127 | hw->aq.arq_buf_size, |
128 | IAVF_ADMINQ_DESC_ALIGNMENT); |
129 | if (ret_code) |
130 | goto unwind_alloc_arq_bufs; |
131 | |
132 | /* now configure the descriptors for use */ |
133 | desc = IAVF_ADMINQ_DESC(hw->aq.arq, i); |
134 | |
135 | desc->flags = cpu_to_le16(IAVF_AQ_FLAG_BUF); |
136 | if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF) |
137 | desc->flags |= cpu_to_le16(IAVF_AQ_FLAG_LB); |
138 | desc->opcode = 0; |
139 | /* This is in accordance with Admin queue design, there is no |
140 | * register for buffer size configuration |
141 | */ |
142 | desc->datalen = cpu_to_le16((u16)bi->size); |
143 | desc->retval = 0; |
144 | desc->cookie_high = 0; |
145 | desc->cookie_low = 0; |
146 | desc->params.external.addr_high = |
147 | cpu_to_le32(upper_32_bits(bi->pa)); |
148 | desc->params.external.addr_low = |
149 | cpu_to_le32(lower_32_bits(bi->pa)); |
150 | desc->params.external.param0 = 0; |
151 | desc->params.external.param1 = 0; |
152 | } |
153 | |
154 | alloc_arq_bufs: |
155 | return ret_code; |
156 | |
157 | unwind_alloc_arq_bufs: |
158 | /* don't try to free the one that failed... */ |
159 | i--; |
160 | for (; i >= 0; i--) |
161 | iavf_free_dma_mem(hw, mem: &hw->aq.arq.r.arq_bi[i]); |
162 | iavf_free_virt_mem(hw, mem: &hw->aq.arq.dma_head); |
163 | |
164 | return ret_code; |
165 | } |
166 | |
167 | /** |
168 | * iavf_alloc_asq_bufs - Allocate empty buffer structs for the send queue |
169 | * @hw: pointer to the hardware structure |
170 | **/ |
171 | static enum iavf_status iavf_alloc_asq_bufs(struct iavf_hw *hw) |
172 | { |
173 | struct iavf_dma_mem *bi; |
174 | enum iavf_status ret_code; |
175 | int i; |
176 | |
177 | /* No mapped memory needed yet, just the buffer info structures */ |
178 | ret_code = iavf_allocate_virt_mem(hw, mem: &hw->aq.asq.dma_head, |
179 | size: (hw->aq.num_asq_entries * |
180 | sizeof(struct iavf_dma_mem))); |
181 | if (ret_code) |
182 | goto alloc_asq_bufs; |
183 | hw->aq.asq.r.asq_bi = (struct iavf_dma_mem *)hw->aq.asq.dma_head.va; |
184 | |
185 | /* allocate the mapped buffers */ |
186 | for (i = 0; i < hw->aq.num_asq_entries; i++) { |
187 | bi = &hw->aq.asq.r.asq_bi[i]; |
188 | ret_code = iavf_allocate_dma_mem(hw, bi, |
189 | iavf_mem_asq_buf, |
190 | hw->aq.asq_buf_size, |
191 | IAVF_ADMINQ_DESC_ALIGNMENT); |
192 | if (ret_code) |
193 | goto unwind_alloc_asq_bufs; |
194 | } |
195 | alloc_asq_bufs: |
196 | return ret_code; |
197 | |
198 | unwind_alloc_asq_bufs: |
199 | /* don't try to free the one that failed... */ |
200 | i--; |
201 | for (; i >= 0; i--) |
202 | iavf_free_dma_mem(hw, mem: &hw->aq.asq.r.asq_bi[i]); |
203 | iavf_free_virt_mem(hw, mem: &hw->aq.asq.dma_head); |
204 | |
205 | return ret_code; |
206 | } |
207 | |
208 | /** |
209 | * iavf_free_arq_bufs - Free receive queue buffer info elements |
210 | * @hw: pointer to the hardware structure |
211 | **/ |
212 | static void iavf_free_arq_bufs(struct iavf_hw *hw) |
213 | { |
214 | int i; |
215 | |
216 | /* free descriptors */ |
217 | for (i = 0; i < hw->aq.num_arq_entries; i++) |
218 | iavf_free_dma_mem(hw, mem: &hw->aq.arq.r.arq_bi[i]); |
219 | |
220 | /* free the descriptor memory */ |
221 | iavf_free_dma_mem(hw, mem: &hw->aq.arq.desc_buf); |
222 | |
223 | /* free the dma header */ |
224 | iavf_free_virt_mem(hw, mem: &hw->aq.arq.dma_head); |
225 | } |
226 | |
227 | /** |
228 | * iavf_free_asq_bufs - Free send queue buffer info elements |
229 | * @hw: pointer to the hardware structure |
230 | **/ |
231 | static void iavf_free_asq_bufs(struct iavf_hw *hw) |
232 | { |
233 | int i; |
234 | |
235 | /* only unmap if the address is non-NULL */ |
236 | for (i = 0; i < hw->aq.num_asq_entries; i++) |
237 | if (hw->aq.asq.r.asq_bi[i].pa) |
238 | iavf_free_dma_mem(hw, mem: &hw->aq.asq.r.asq_bi[i]); |
239 | |
240 | /* free the buffer info list */ |
241 | iavf_free_virt_mem(hw, mem: &hw->aq.asq.cmd_buf); |
242 | |
243 | /* free the descriptor memory */ |
244 | iavf_free_dma_mem(hw, mem: &hw->aq.asq.desc_buf); |
245 | |
246 | /* free the dma header */ |
247 | iavf_free_virt_mem(hw, mem: &hw->aq.asq.dma_head); |
248 | } |
249 | |
250 | /** |
251 | * iavf_config_asq_regs - configure ASQ registers |
252 | * @hw: pointer to the hardware structure |
253 | * |
254 | * Configure base address and length registers for the transmit queue |
255 | **/ |
256 | static enum iavf_status iavf_config_asq_regs(struct iavf_hw *hw) |
257 | { |
258 | enum iavf_status ret_code = 0; |
259 | u32 reg = 0; |
260 | |
261 | /* Clear Head and Tail */ |
262 | wr32(hw, hw->aq.asq.head, 0); |
263 | wr32(hw, hw->aq.asq.tail, 0); |
264 | |
265 | /* set starting point */ |
266 | wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | |
267 | IAVF_VF_ATQLEN1_ATQENABLE_MASK)); |
268 | wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa)); |
269 | wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa)); |
270 | |
271 | /* Check one register to verify that config was applied */ |
272 | reg = rd32(hw, hw->aq.asq.bal); |
273 | if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa)) |
274 | ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR; |
275 | |
276 | return ret_code; |
277 | } |
278 | |
279 | /** |
280 | * iavf_config_arq_regs - ARQ register configuration |
281 | * @hw: pointer to the hardware structure |
282 | * |
283 | * Configure base address and length registers for the receive (event queue) |
284 | **/ |
285 | static enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw) |
286 | { |
287 | enum iavf_status ret_code = 0; |
288 | u32 reg = 0; |
289 | |
290 | /* Clear Head and Tail */ |
291 | wr32(hw, hw->aq.arq.head, 0); |
292 | wr32(hw, hw->aq.arq.tail, 0); |
293 | |
294 | /* set starting point */ |
295 | wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | |
296 | IAVF_VF_ARQLEN1_ARQENABLE_MASK)); |
297 | wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa)); |
298 | wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa)); |
299 | |
300 | /* Update tail in the HW to post pre-allocated buffers */ |
301 | wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1); |
302 | |
303 | /* Check one register to verify that config was applied */ |
304 | reg = rd32(hw, hw->aq.arq.bal); |
305 | if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa)) |
306 | ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR; |
307 | |
308 | return ret_code; |
309 | } |
310 | |
311 | /** |
312 | * iavf_init_asq - main initialization routine for ASQ |
313 | * @hw: pointer to the hardware structure |
314 | * |
315 | * This is the main initialization routine for the Admin Send Queue |
316 | * Prior to calling this function, drivers *MUST* set the following fields |
317 | * in the hw->aq structure: |
318 | * - hw->aq.num_asq_entries |
319 | * - hw->aq.arq_buf_size |
320 | * |
321 | * Do *NOT* hold the lock when calling this as the memory allocation routines |
322 | * called are not going to be atomic context safe |
323 | **/ |
324 | static enum iavf_status iavf_init_asq(struct iavf_hw *hw) |
325 | { |
326 | enum iavf_status ret_code = 0; |
327 | int i; |
328 | |
329 | if (hw->aq.asq.count > 0) { |
330 | /* queue already initialized */ |
331 | ret_code = IAVF_ERR_NOT_READY; |
332 | goto init_adminq_exit; |
333 | } |
334 | |
335 | /* verify input for valid configuration */ |
336 | if ((hw->aq.num_asq_entries == 0) || |
337 | (hw->aq.asq_buf_size == 0)) { |
338 | ret_code = IAVF_ERR_CONFIG; |
339 | goto init_adminq_exit; |
340 | } |
341 | |
342 | hw->aq.asq.next_to_use = 0; |
343 | hw->aq.asq.next_to_clean = 0; |
344 | |
345 | /* allocate the ring memory */ |
346 | ret_code = iavf_alloc_adminq_asq_ring(hw); |
347 | if (ret_code) |
348 | goto init_adminq_exit; |
349 | |
350 | /* allocate buffers in the rings */ |
351 | ret_code = iavf_alloc_asq_bufs(hw); |
352 | if (ret_code) |
353 | goto init_adminq_free_rings; |
354 | |
355 | /* initialize base registers */ |
356 | ret_code = iavf_config_asq_regs(hw); |
357 | if (ret_code) |
358 | goto init_free_asq_bufs; |
359 | |
360 | /* success! */ |
361 | hw->aq.asq.count = hw->aq.num_asq_entries; |
362 | goto init_adminq_exit; |
363 | |
364 | init_free_asq_bufs: |
365 | for (i = 0; i < hw->aq.num_asq_entries; i++) |
366 | iavf_free_dma_mem(hw, mem: &hw->aq.asq.r.asq_bi[i]); |
367 | iavf_free_virt_mem(hw, mem: &hw->aq.asq.dma_head); |
368 | |
369 | init_adminq_free_rings: |
370 | iavf_free_adminq_asq(hw); |
371 | |
372 | init_adminq_exit: |
373 | return ret_code; |
374 | } |
375 | |
376 | /** |
377 | * iavf_init_arq - initialize ARQ |
378 | * @hw: pointer to the hardware structure |
379 | * |
380 | * The main initialization routine for the Admin Receive (Event) Queue. |
381 | * Prior to calling this function, drivers *MUST* set the following fields |
382 | * in the hw->aq structure: |
383 | * - hw->aq.num_asq_entries |
384 | * - hw->aq.arq_buf_size |
385 | * |
386 | * Do *NOT* hold the lock when calling this as the memory allocation routines |
387 | * called are not going to be atomic context safe |
388 | **/ |
389 | static enum iavf_status iavf_init_arq(struct iavf_hw *hw) |
390 | { |
391 | enum iavf_status ret_code = 0; |
392 | int i; |
393 | |
394 | if (hw->aq.arq.count > 0) { |
395 | /* queue already initialized */ |
396 | ret_code = IAVF_ERR_NOT_READY; |
397 | goto init_adminq_exit; |
398 | } |
399 | |
400 | /* verify input for valid configuration */ |
401 | if ((hw->aq.num_arq_entries == 0) || |
402 | (hw->aq.arq_buf_size == 0)) { |
403 | ret_code = IAVF_ERR_CONFIG; |
404 | goto init_adminq_exit; |
405 | } |
406 | |
407 | hw->aq.arq.next_to_use = 0; |
408 | hw->aq.arq.next_to_clean = 0; |
409 | |
410 | /* allocate the ring memory */ |
411 | ret_code = iavf_alloc_adminq_arq_ring(hw); |
412 | if (ret_code) |
413 | goto init_adminq_exit; |
414 | |
415 | /* allocate buffers in the rings */ |
416 | ret_code = iavf_alloc_arq_bufs(hw); |
417 | if (ret_code) |
418 | goto init_adminq_free_rings; |
419 | |
420 | /* initialize base registers */ |
421 | ret_code = iavf_config_arq_regs(hw); |
422 | if (ret_code) |
423 | goto init_free_arq_bufs; |
424 | |
425 | /* success! */ |
426 | hw->aq.arq.count = hw->aq.num_arq_entries; |
427 | goto init_adminq_exit; |
428 | |
429 | init_free_arq_bufs: |
430 | for (i = 0; i < hw->aq.num_arq_entries; i++) |
431 | iavf_free_dma_mem(hw, mem: &hw->aq.arq.r.arq_bi[i]); |
432 | iavf_free_virt_mem(hw, mem: &hw->aq.arq.dma_head); |
433 | init_adminq_free_rings: |
434 | iavf_free_adminq_arq(hw); |
435 | |
436 | init_adminq_exit: |
437 | return ret_code; |
438 | } |
439 | |
440 | /** |
441 | * iavf_shutdown_asq - shutdown the ASQ |
442 | * @hw: pointer to the hardware structure |
443 | * |
444 | * The main shutdown routine for the Admin Send Queue |
445 | **/ |
446 | static enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw) |
447 | { |
448 | enum iavf_status ret_code = 0; |
449 | |
450 | mutex_lock(&hw->aq.asq_mutex); |
451 | |
452 | if (hw->aq.asq.count == 0) { |
453 | ret_code = IAVF_ERR_NOT_READY; |
454 | goto shutdown_asq_out; |
455 | } |
456 | |
457 | /* Stop firmware AdminQ processing */ |
458 | wr32(hw, hw->aq.asq.head, 0); |
459 | wr32(hw, hw->aq.asq.tail, 0); |
460 | wr32(hw, hw->aq.asq.len, 0); |
461 | wr32(hw, hw->aq.asq.bal, 0); |
462 | wr32(hw, hw->aq.asq.bah, 0); |
463 | |
464 | hw->aq.asq.count = 0; /* to indicate uninitialized queue */ |
465 | |
466 | /* free ring buffers */ |
467 | iavf_free_asq_bufs(hw); |
468 | |
469 | shutdown_asq_out: |
470 | mutex_unlock(lock: &hw->aq.asq_mutex); |
471 | return ret_code; |
472 | } |
473 | |
474 | /** |
475 | * iavf_shutdown_arq - shutdown ARQ |
476 | * @hw: pointer to the hardware structure |
477 | * |
478 | * The main shutdown routine for the Admin Receive Queue |
479 | **/ |
480 | static enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw) |
481 | { |
482 | enum iavf_status ret_code = 0; |
483 | |
484 | mutex_lock(&hw->aq.arq_mutex); |
485 | |
486 | if (hw->aq.arq.count == 0) { |
487 | ret_code = IAVF_ERR_NOT_READY; |
488 | goto shutdown_arq_out; |
489 | } |
490 | |
491 | /* Stop firmware AdminQ processing */ |
492 | wr32(hw, hw->aq.arq.head, 0); |
493 | wr32(hw, hw->aq.arq.tail, 0); |
494 | wr32(hw, hw->aq.arq.len, 0); |
495 | wr32(hw, hw->aq.arq.bal, 0); |
496 | wr32(hw, hw->aq.arq.bah, 0); |
497 | |
498 | hw->aq.arq.count = 0; /* to indicate uninitialized queue */ |
499 | |
500 | /* free ring buffers */ |
501 | iavf_free_arq_bufs(hw); |
502 | |
503 | shutdown_arq_out: |
504 | mutex_unlock(lock: &hw->aq.arq_mutex); |
505 | return ret_code; |
506 | } |
507 | |
508 | /** |
509 | * iavf_init_adminq - main initialization routine for Admin Queue |
510 | * @hw: pointer to the hardware structure |
511 | * |
512 | * Prior to calling this function, drivers *MUST* set the following fields |
513 | * in the hw->aq structure: |
514 | * - hw->aq.num_asq_entries |
515 | * - hw->aq.num_arq_entries |
516 | * - hw->aq.arq_buf_size |
517 | * - hw->aq.asq_buf_size |
518 | **/ |
519 | enum iavf_status iavf_init_adminq(struct iavf_hw *hw) |
520 | { |
521 | enum iavf_status ret_code; |
522 | |
523 | /* verify input for valid configuration */ |
524 | if ((hw->aq.num_arq_entries == 0) || |
525 | (hw->aq.num_asq_entries == 0) || |
526 | (hw->aq.arq_buf_size == 0) || |
527 | (hw->aq.asq_buf_size == 0)) { |
528 | ret_code = IAVF_ERR_CONFIG; |
529 | goto init_adminq_exit; |
530 | } |
531 | |
532 | /* Set up register offsets */ |
533 | iavf_adminq_init_regs(hw); |
534 | |
535 | /* setup ASQ command write back timeout */ |
536 | hw->aq.asq_cmd_timeout = IAVF_ASQ_CMD_TIMEOUT; |
537 | |
538 | /* allocate the ASQ */ |
539 | ret_code = iavf_init_asq(hw); |
540 | if (ret_code) |
541 | goto init_adminq_destroy_locks; |
542 | |
543 | /* allocate the ARQ */ |
544 | ret_code = iavf_init_arq(hw); |
545 | if (ret_code) |
546 | goto init_adminq_free_asq; |
547 | |
548 | /* success! */ |
549 | goto init_adminq_exit; |
550 | |
551 | init_adminq_free_asq: |
552 | iavf_shutdown_asq(hw); |
553 | init_adminq_destroy_locks: |
554 | |
555 | init_adminq_exit: |
556 | return ret_code; |
557 | } |
558 | |
559 | /** |
560 | * iavf_shutdown_adminq - shutdown routine for the Admin Queue |
561 | * @hw: pointer to the hardware structure |
562 | **/ |
563 | enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw) |
564 | { |
565 | if (iavf_check_asq_alive(hw)) |
566 | iavf_aq_queue_shutdown(hw, unloading: true); |
567 | |
568 | iavf_shutdown_asq(hw); |
569 | iavf_shutdown_arq(hw); |
570 | |
571 | return 0; |
572 | } |
573 | |
574 | /** |
575 | * iavf_clean_asq - cleans Admin send queue |
576 | * @hw: pointer to the hardware structure |
577 | * |
578 | * returns the number of free desc |
579 | **/ |
580 | static u16 iavf_clean_asq(struct iavf_hw *hw) |
581 | { |
582 | struct iavf_adminq_ring *asq = &hw->aq.asq; |
583 | struct iavf_asq_cmd_details *details; |
584 | u16 ntc = asq->next_to_clean; |
585 | struct iavf_aq_desc desc_cb; |
586 | struct iavf_aq_desc *desc; |
587 | |
588 | desc = IAVF_ADMINQ_DESC(*asq, ntc); |
589 | details = IAVF_ADMINQ_DETAILS(*asq, ntc); |
590 | while (rd32(hw, hw->aq.asq.head) != ntc) { |
591 | iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, |
592 | "ntc %d head %d.\n" , ntc, rd32(hw, hw->aq.asq.head)); |
593 | |
594 | if (details->callback) { |
595 | IAVF_ADMINQ_CALLBACK cb_func = |
596 | (IAVF_ADMINQ_CALLBACK)details->callback; |
597 | desc_cb = *desc; |
598 | cb_func(hw, &desc_cb); |
599 | } |
600 | memset((void *)desc, 0, sizeof(struct iavf_aq_desc)); |
601 | memset((void *)details, 0, |
602 | sizeof(struct iavf_asq_cmd_details)); |
603 | ntc++; |
604 | if (ntc == asq->count) |
605 | ntc = 0; |
606 | desc = IAVF_ADMINQ_DESC(*asq, ntc); |
607 | details = IAVF_ADMINQ_DETAILS(*asq, ntc); |
608 | } |
609 | |
610 | asq->next_to_clean = ntc; |
611 | |
612 | return IAVF_DESC_UNUSED(asq); |
613 | } |
614 | |
615 | /** |
616 | * iavf_asq_done - check if FW has processed the Admin Send Queue |
617 | * @hw: pointer to the hw struct |
618 | * |
619 | * Returns true if the firmware has processed all descriptors on the |
620 | * admin send queue. Returns false if there are still requests pending. |
621 | **/ |
622 | bool iavf_asq_done(struct iavf_hw *hw) |
623 | { |
624 | /* AQ designers suggest use of head for better |
625 | * timing reliability than DD bit |
626 | */ |
627 | return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use; |
628 | } |
629 | |
630 | /** |
631 | * iavf_asq_send_command - send command to Admin Queue |
632 | * @hw: pointer to the hw struct |
633 | * @desc: prefilled descriptor describing the command (non DMA mem) |
634 | * @buff: buffer to use for indirect commands |
635 | * @buff_size: size of buffer for indirect commands |
636 | * @cmd_details: pointer to command details structure |
637 | * |
638 | * This is the main send command driver routine for the Admin Queue send |
639 | * queue. It runs the queue, cleans the queue, etc |
640 | **/ |
641 | enum iavf_status iavf_asq_send_command(struct iavf_hw *hw, |
642 | struct iavf_aq_desc *desc, |
643 | void *buff, /* can be NULL */ |
644 | u16 buff_size, |
645 | struct iavf_asq_cmd_details *cmd_details) |
646 | { |
647 | struct iavf_dma_mem *dma_buff = NULL; |
648 | struct iavf_asq_cmd_details *details; |
649 | struct iavf_aq_desc *desc_on_ring; |
650 | bool cmd_completed = false; |
651 | enum iavf_status status = 0; |
652 | u16 retval = 0; |
653 | u32 val = 0; |
654 | |
655 | mutex_lock(&hw->aq.asq_mutex); |
656 | |
657 | if (hw->aq.asq.count == 0) { |
658 | iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, |
659 | "AQTX: Admin queue not initialized.\n" ); |
660 | status = IAVF_ERR_QUEUE_EMPTY; |
661 | goto asq_send_command_error; |
662 | } |
663 | |
664 | hw->aq.asq_last_status = IAVF_AQ_RC_OK; |
665 | |
666 | val = rd32(hw, hw->aq.asq.head); |
667 | if (val >= hw->aq.num_asq_entries) { |
668 | iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, |
669 | "AQTX: head overrun at %d\n" , val); |
670 | status = IAVF_ERR_QUEUE_EMPTY; |
671 | goto asq_send_command_error; |
672 | } |
673 | |
674 | details = IAVF_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); |
675 | if (cmd_details) { |
676 | *details = *cmd_details; |
677 | |
678 | /* If the cmd_details are defined copy the cookie. The |
679 | * cpu_to_le32 is not needed here because the data is ignored |
680 | * by the FW, only used by the driver |
681 | */ |
682 | if (details->cookie) { |
683 | desc->cookie_high = |
684 | cpu_to_le32(upper_32_bits(details->cookie)); |
685 | desc->cookie_low = |
686 | cpu_to_le32(lower_32_bits(details->cookie)); |
687 | } |
688 | } else { |
689 | memset(details, 0, sizeof(struct iavf_asq_cmd_details)); |
690 | } |
691 | |
692 | /* clear requested flags and then set additional flags if defined */ |
693 | desc->flags &= ~cpu_to_le16(details->flags_dis); |
694 | desc->flags |= cpu_to_le16(details->flags_ena); |
695 | |
696 | if (buff_size > hw->aq.asq_buf_size) { |
697 | iavf_debug(hw, |
698 | IAVF_DEBUG_AQ_MESSAGE, |
699 | "AQTX: Invalid buffer size: %d.\n" , |
700 | buff_size); |
701 | status = IAVF_ERR_INVALID_SIZE; |
702 | goto asq_send_command_error; |
703 | } |
704 | |
705 | if (details->postpone && !details->async) { |
706 | iavf_debug(hw, |
707 | IAVF_DEBUG_AQ_MESSAGE, |
708 | "AQTX: Async flag not set along with postpone flag" ); |
709 | status = IAVF_ERR_PARAM; |
710 | goto asq_send_command_error; |
711 | } |
712 | |
713 | /* call clean and check queue available function to reclaim the |
714 | * descriptors that were processed by FW, the function returns the |
715 | * number of desc available |
716 | */ |
717 | /* the clean function called here could be called in a separate thread |
718 | * in case of asynchronous completions |
719 | */ |
720 | if (iavf_clean_asq(hw) == 0) { |
721 | iavf_debug(hw, |
722 | IAVF_DEBUG_AQ_MESSAGE, |
723 | "AQTX: Error queue is full.\n" ); |
724 | status = IAVF_ERR_ADMIN_QUEUE_FULL; |
725 | goto asq_send_command_error; |
726 | } |
727 | |
728 | /* initialize the temp desc pointer with the right desc */ |
729 | desc_on_ring = IAVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); |
730 | |
731 | /* if the desc is available copy the temp desc to the right place */ |
732 | *desc_on_ring = *desc; |
733 | |
734 | /* if buff is not NULL assume indirect command */ |
735 | if (buff) { |
736 | dma_buff = &hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]; |
737 | /* copy the user buff into the respective DMA buff */ |
738 | memcpy(dma_buff->va, buff, buff_size); |
739 | desc_on_ring->datalen = cpu_to_le16(buff_size); |
740 | |
741 | /* Update the address values in the desc with the pa value |
742 | * for respective buffer |
743 | */ |
744 | desc_on_ring->params.external.addr_high = |
745 | cpu_to_le32(upper_32_bits(dma_buff->pa)); |
746 | desc_on_ring->params.external.addr_low = |
747 | cpu_to_le32(lower_32_bits(dma_buff->pa)); |
748 | } |
749 | |
750 | /* bump the tail */ |
751 | iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n" ); |
752 | iavf_debug_aq(hw, mask: IAVF_DEBUG_AQ_COMMAND, desc: (void *)desc_on_ring, |
753 | buffer: buff, buf_len: buff_size); |
754 | (hw->aq.asq.next_to_use)++; |
755 | if (hw->aq.asq.next_to_use == hw->aq.asq.count) |
756 | hw->aq.asq.next_to_use = 0; |
757 | if (!details->postpone) |
758 | wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use); |
759 | |
760 | /* if cmd_details are not defined or async flag is not set, |
761 | * we need to wait for desc write back |
762 | */ |
763 | if (!details->async && !details->postpone) { |
764 | u32 total_delay = 0; |
765 | |
766 | do { |
767 | /* AQ designers suggest use of head for better |
768 | * timing reliability than DD bit |
769 | */ |
770 | if (iavf_asq_done(hw)) |
771 | break; |
772 | udelay(50); |
773 | total_delay += 50; |
774 | } while (total_delay < hw->aq.asq_cmd_timeout); |
775 | } |
776 | |
777 | /* if ready, copy the desc back to temp */ |
778 | if (iavf_asq_done(hw)) { |
779 | *desc = *desc_on_ring; |
780 | if (buff) |
781 | memcpy(buff, dma_buff->va, buff_size); |
782 | retval = le16_to_cpu(desc->retval); |
783 | if (retval != 0) { |
784 | iavf_debug(hw, |
785 | IAVF_DEBUG_AQ_MESSAGE, |
786 | "AQTX: Command completed with error 0x%X.\n" , |
787 | retval); |
788 | |
789 | /* strip off FW internal code */ |
790 | retval &= 0xff; |
791 | } |
792 | cmd_completed = true; |
793 | if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_OK) |
794 | status = 0; |
795 | else if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_EBUSY) |
796 | status = IAVF_ERR_NOT_READY; |
797 | else |
798 | status = IAVF_ERR_ADMIN_QUEUE_ERROR; |
799 | hw->aq.asq_last_status = (enum iavf_admin_queue_err)retval; |
800 | } |
801 | |
802 | iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, |
803 | "AQTX: desc and buffer writeback:\n" ); |
804 | iavf_debug_aq(hw, mask: IAVF_DEBUG_AQ_COMMAND, desc: (void *)desc, buffer: buff, buf_len: buff_size); |
805 | |
806 | /* save writeback aq if requested */ |
807 | if (details->wb_desc) |
808 | *details->wb_desc = *desc_on_ring; |
809 | |
810 | /* update the error if time out occurred */ |
811 | if ((!cmd_completed) && |
812 | (!details->async && !details->postpone)) { |
813 | if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) { |
814 | iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, |
815 | "AQTX: AQ Critical error.\n" ); |
816 | status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR; |
817 | } else { |
818 | iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, |
819 | "AQTX: Writeback timeout.\n" ); |
820 | status = IAVF_ERR_ADMIN_QUEUE_TIMEOUT; |
821 | } |
822 | } |
823 | |
824 | asq_send_command_error: |
825 | mutex_unlock(lock: &hw->aq.asq_mutex); |
826 | return status; |
827 | } |
828 | |
829 | /** |
830 | * iavf_fill_default_direct_cmd_desc - AQ descriptor helper function |
831 | * @desc: pointer to the temp descriptor (non DMA mem) |
832 | * @opcode: the opcode can be used to decide which flags to turn off or on |
833 | * |
834 | * Fill the desc with default values |
835 | **/ |
836 | void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc, u16 opcode) |
837 | { |
838 | /* zero out the desc */ |
839 | memset((void *)desc, 0, sizeof(struct iavf_aq_desc)); |
840 | desc->opcode = cpu_to_le16(opcode); |
841 | desc->flags = cpu_to_le16(IAVF_AQ_FLAG_SI); |
842 | } |
843 | |
844 | /** |
845 | * iavf_clean_arq_element |
846 | * @hw: pointer to the hw struct |
847 | * @e: event info from the receive descriptor, includes any buffers |
848 | * @pending: number of events that could be left to process |
849 | * |
850 | * This function cleans one Admin Receive Queue element and returns |
851 | * the contents through e. It can also return how many events are |
852 | * left to process through 'pending' |
853 | **/ |
854 | enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw, |
855 | struct iavf_arq_event_info *e, |
856 | u16 *pending) |
857 | { |
858 | u16 ntc = hw->aq.arq.next_to_clean; |
859 | struct iavf_aq_desc *desc; |
860 | enum iavf_status ret_code = 0; |
861 | struct iavf_dma_mem *bi; |
862 | u16 desc_idx; |
863 | u16 datalen; |
864 | u16 flags; |
865 | u16 ntu; |
866 | |
867 | /* pre-clean the event info */ |
868 | memset(&e->desc, 0, sizeof(e->desc)); |
869 | |
870 | /* take the lock before we start messing with the ring */ |
871 | mutex_lock(&hw->aq.arq_mutex); |
872 | |
873 | if (hw->aq.arq.count == 0) { |
874 | iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, |
875 | "AQRX: Admin queue not initialized.\n" ); |
876 | ret_code = IAVF_ERR_QUEUE_EMPTY; |
877 | goto clean_arq_element_err; |
878 | } |
879 | |
880 | /* set next_to_use to head */ |
881 | ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK; |
882 | if (ntu == ntc) { |
883 | /* nothing to do - shouldn't need to update ring's values */ |
884 | ret_code = IAVF_ERR_ADMIN_QUEUE_NO_WORK; |
885 | goto clean_arq_element_out; |
886 | } |
887 | |
888 | /* now clean the next descriptor */ |
889 | desc = IAVF_ADMINQ_DESC(hw->aq.arq, ntc); |
890 | desc_idx = ntc; |
891 | |
892 | hw->aq.arq_last_status = |
893 | (enum iavf_admin_queue_err)le16_to_cpu(desc->retval); |
894 | flags = le16_to_cpu(desc->flags); |
895 | if (flags & IAVF_AQ_FLAG_ERR) { |
896 | ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR; |
897 | iavf_debug(hw, |
898 | IAVF_DEBUG_AQ_MESSAGE, |
899 | "AQRX: Event received with error 0x%X.\n" , |
900 | hw->aq.arq_last_status); |
901 | } |
902 | |
903 | e->desc = *desc; |
904 | datalen = le16_to_cpu(desc->datalen); |
905 | e->msg_len = min(datalen, e->buf_len); |
906 | if (e->msg_buf && (e->msg_len != 0)) |
907 | memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va, |
908 | e->msg_len); |
909 | |
910 | iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n" ); |
911 | iavf_debug_aq(hw, mask: IAVF_DEBUG_AQ_COMMAND, desc: (void *)desc, buffer: e->msg_buf, |
912 | buf_len: hw->aq.arq_buf_size); |
913 | |
914 | /* Restore the original datalen and buffer address in the desc, |
915 | * FW updates datalen to indicate the event message |
916 | * size |
917 | */ |
918 | bi = &hw->aq.arq.r.arq_bi[ntc]; |
919 | memset((void *)desc, 0, sizeof(struct iavf_aq_desc)); |
920 | |
921 | desc->flags = cpu_to_le16(IAVF_AQ_FLAG_BUF); |
922 | if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF) |
923 | desc->flags |= cpu_to_le16(IAVF_AQ_FLAG_LB); |
924 | desc->datalen = cpu_to_le16((u16)bi->size); |
925 | desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa)); |
926 | desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa)); |
927 | |
928 | /* set tail = the last cleaned desc index. */ |
929 | wr32(hw, hw->aq.arq.tail, ntc); |
930 | /* ntc is updated to tail + 1 */ |
931 | ntc++; |
932 | if (ntc == hw->aq.num_arq_entries) |
933 | ntc = 0; |
934 | hw->aq.arq.next_to_clean = ntc; |
935 | hw->aq.arq.next_to_use = ntu; |
936 | |
937 | clean_arq_element_out: |
938 | /* Set pending if needed, unlock and return */ |
939 | if (pending) |
940 | *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); |
941 | |
942 | clean_arq_element_err: |
943 | mutex_unlock(lock: &hw->aq.arq_mutex); |
944 | |
945 | return ret_code; |
946 | } |
947 | |